Merge tag 'pull-tcg-20230313' of https://gitlab.com/rth7680/qemu into staging
accel/tcg: Fix NB_MMU_MODES to 16
Balance of the target/ patchset which eliminates tcg_temp_free
Balance of the target/ patchset which eliminates tcg_const
# -----BEGIN PGP SIGNATURE-----
#
# iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmQPcb0dHHJpY2hhcmQu
# aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV885AgAjDbg1soRBy0THf0X
# CVXmQ4yYyUKAonZBL8Abt9yX01BhLFqEsrju3HiaLNOM9DbwWQ4gdvSrtAZ/K2YG
# d6EvC+rJe79pr58MEEhqO4OO1ymp52amRHtEXva4vcKRNuM9WF5by/Hz2PsZyenG
# ysaLBdddooA9SJeL7xYBMpKWFgUm3C8NzfaRfCBVcG94er9u8RUi0kx+drmOLw0g
# vZ3Hekvi2I8Y5mWqvHeAIOsr8Md9PO3ezWxEteE4qsPNTTRfVD93oSGe9nNCYZTX
# wWU51Vfv9GB6hOylAfMRIeCmkjks/gqLOGElsh1MaVovNDTXS5IKV/HgaLaocJHV
# 2P81uQ==
# =FpIY
# -----END PGP SIGNATURE-----
# gpg: Signature made Mon 13 Mar 2023 18:55:57 GMT
# gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg: issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full]
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F
* tag 'pull-tcg-20230313' of https://gitlab.com/rth7680/qemu: (91 commits)
tcg: Drop tcg_const_*
tcg: Drop tcg_const_*_vec
target/tricore: Use min/max for saturate
target/ppc: Avoid tcg_const_* in translate.c
target/ppc: Fix gen_tlbsx_booke206
target/ppc: Rewrite trans_ADDG6S
target/ppc: Avoid tcg_const_* in power8-pmu-regs.c.inc
target/ppc: Avoid tcg_const_* in fp-impl.c.inc
target/ppc: Avoid tcg_const_* in vsx-impl.c.inc
target/ppc: Avoid tcg_const_* in xxeval
target/ppc: Avoid tcg_const_* in vmx-impl.c.inc
target/ppc: Avoid tcg_const_i64 in do_vcntmb
target/m68k: Use tcg_constant_i32 in gen_ea_mode
target/arm: Avoid tcg_const_ptr in handle_rev
target/arm: Avoid tcg_const_ptr in handle_vec_simd_sqshrn
target/arm: Avoid tcg_const_ptr in disas_simd_zip_trn
target/arm: Avoid tcg_const_* in translate-mve.c
target/arm: Avoid tcg_const_ptr in gen_sve_{ldr,str}
target/arm: Improve trans_BFCI
target/arm: Create gen_set_rmode, gen_restore_rmode
...
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
index c42a436..5efb8db 100644
--- a/accel/tcg/plugin-gen.c
+++ b/accel/tcg/plugin-gen.c
@@ -44,6 +44,7 @@
*/
#include "qemu/osdep.h"
#include "tcg/tcg.h"
+#include "tcg/tcg-temp-internal.h"
#include "tcg/tcg-op.h"
#include "exec/exec-all.h"
#include "exec/plugin-gen.h"
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index cd8aa17..e1c498e 100644
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -36,9 +36,6 @@
#ifndef TARGET_LONG_BITS
# error TARGET_LONG_BITS must be defined in cpu-param.h
#endif
-#ifndef NB_MMU_MODES
-# error NB_MMU_MODES must be defined in cpu-param.h
-#endif
#ifndef TARGET_PHYS_ADDR_SPACE_BITS
# error TARGET_PHYS_ADDR_SPACE_BITS must be defined in cpu-param.h
#endif
@@ -57,6 +54,12 @@
#include "exec/target_long.h"
+/*
+ * Fix the number of mmu modes to 16, which is also the maximum
+ * supported by the softmmu tlb api.
+ */
+#define NB_MMU_MODES 16
+
#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
/* use a fully associative victim tlb of 8 entries */
diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h
index 7085614..dff17c7 100644
--- a/include/tcg/tcg-op.h
+++ b/include/tcg/tcg-op.h
@@ -1089,9 +1089,7 @@
#define tcg_gen_extract_tl tcg_gen_extract_i64
#define tcg_gen_sextract_tl tcg_gen_sextract_i64
#define tcg_gen_extract2_tl tcg_gen_extract2_i64
-#define tcg_const_tl tcg_const_i64
#define tcg_constant_tl tcg_constant_i64
-#define tcg_const_local_tl tcg_const_local_i64
#define tcg_gen_movcond_tl tcg_gen_movcond_i64
#define tcg_gen_add2_tl tcg_gen_add2_i64
#define tcg_gen_sub2_tl tcg_gen_sub2_i64
@@ -1205,9 +1203,7 @@
#define tcg_gen_extract_tl tcg_gen_extract_i32
#define tcg_gen_sextract_tl tcg_gen_sextract_i32
#define tcg_gen_extract2_tl tcg_gen_extract2_i32
-#define tcg_const_tl tcg_const_i32
#define tcg_constant_tl tcg_constant_i32
-#define tcg_const_local_tl tcg_const_local_i32
#define tcg_gen_movcond_tl tcg_gen_movcond_i32
#define tcg_gen_add2_tl tcg_gen_add2_i32
#define tcg_gen_sub2_tl tcg_gen_sub2_i32
diff --git a/include/tcg/tcg-temp-internal.h b/include/tcg/tcg-temp-internal.h
new file mode 100644
index 0000000..dded291
--- /dev/null
+++ b/include/tcg/tcg-temp-internal.h
@@ -0,0 +1,83 @@
+/*
+ * TCG internals related to TCG temp allocation
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef TCG_TEMP_INTERNAL_H
+#define TCG_TEMP_INTERNAL_H
+
+/*
+ * Allocation and freeing of EBB temps is reserved to TCG internals
+ */
+
+void tcg_temp_free_internal(TCGTemp *);
+
+static inline void tcg_temp_free_i32(TCGv_i32 arg)
+{
+ tcg_temp_free_internal(tcgv_i32_temp(arg));
+}
+
+static inline void tcg_temp_free_i64(TCGv_i64 arg)
+{
+ tcg_temp_free_internal(tcgv_i64_temp(arg));
+}
+
+static inline void tcg_temp_free_i128(TCGv_i128 arg)
+{
+ tcg_temp_free_internal(tcgv_i128_temp(arg));
+}
+
+static inline void tcg_temp_free_ptr(TCGv_ptr arg)
+{
+ tcg_temp_free_internal(tcgv_ptr_temp(arg));
+}
+
+static inline void tcg_temp_free_vec(TCGv_vec arg)
+{
+ tcg_temp_free_internal(tcgv_vec_temp(arg));
+}
+
+static inline TCGv_i32 tcg_temp_ebb_new_i32(void)
+{
+ TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, TEMP_EBB);
+ return temp_tcgv_i32(t);
+}
+
+static inline TCGv_i64 tcg_temp_ebb_new_i64(void)
+{
+ TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, TEMP_EBB);
+ return temp_tcgv_i64(t);
+}
+
+static inline TCGv_i128 tcg_temp_ebb_new_i128(void)
+{
+ TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I128, TEMP_EBB);
+ return temp_tcgv_i128(t);
+}
+
+static inline TCGv_ptr tcg_temp_ebb_new_ptr(void)
+{
+ TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, TEMP_EBB);
+ return temp_tcgv_ptr(t);
+}
+
+#endif /* TCG_TEMP_FREE_H */
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
index a5cf21b..5cfaa53 100644
--- a/include/tcg/tcg.h
+++ b/include/tcg/tcg.h
@@ -862,35 +862,9 @@
TCGTemp *tcg_global_mem_new_internal(TCGType, TCGv_ptr,
intptr_t, const char *);
TCGTemp *tcg_temp_new_internal(TCGType, TCGTempKind);
-void tcg_temp_free_internal(TCGTemp *);
TCGv_vec tcg_temp_new_vec(TCGType type);
TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match);
-static inline void tcg_temp_free_i32(TCGv_i32 arg)
-{
- tcg_temp_free_internal(tcgv_i32_temp(arg));
-}
-
-static inline void tcg_temp_free_i64(TCGv_i64 arg)
-{
- tcg_temp_free_internal(tcgv_i64_temp(arg));
-}
-
-static inline void tcg_temp_free_i128(TCGv_i128 arg)
-{
- tcg_temp_free_internal(tcgv_i128_temp(arg));
-}
-
-static inline void tcg_temp_free_ptr(TCGv_ptr arg)
-{
- tcg_temp_free_internal(tcgv_ptr_temp(arg));
-}
-
-static inline void tcg_temp_free_vec(TCGv_vec arg)
-{
- tcg_temp_free_internal(tcgv_vec_temp(arg));
-}
-
static inline TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t offset,
const char *name)
{
@@ -898,13 +872,6 @@
return temp_tcgv_i32(t);
}
-/* Used only by tcg infrastructure: tcg-op.c or plugin-gen.c */
-static inline TCGv_i32 tcg_temp_ebb_new_i32(void)
-{
- TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, TEMP_EBB);
- return temp_tcgv_i32(t);
-}
-
static inline TCGv_i32 tcg_temp_new_i32(void)
{
TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, TEMP_TB);
@@ -918,26 +885,12 @@
return temp_tcgv_i64(t);
}
-/* Used only by tcg infrastructure: tcg-op.c or plugin-gen.c */
-static inline TCGv_i64 tcg_temp_ebb_new_i64(void)
-{
- TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, TEMP_EBB);
- return temp_tcgv_i64(t);
-}
-
static inline TCGv_i64 tcg_temp_new_i64(void)
{
TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, TEMP_TB);
return temp_tcgv_i64(t);
}
-/* Used only by tcg infrastructure: tcg-op.c or plugin-gen.c */
-static inline TCGv_i128 tcg_temp_ebb_new_i128(void)
-{
- TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I128, TEMP_EBB);
- return temp_tcgv_i128(t);
-}
-
static inline TCGv_i128 tcg_temp_new_i128(void)
{
TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I128, TEMP_TB);
@@ -951,13 +904,6 @@
return temp_tcgv_ptr(t);
}
-/* Used only by tcg infrastructure: tcg-op.c or plugin-gen.c */
-static inline TCGv_ptr tcg_temp_ebb_new_ptr(void)
-{
- TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, TEMP_EBB);
- return temp_tcgv_ptr(t);
-}
-
static inline TCGv_ptr tcg_temp_new_ptr(void)
{
TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, TEMP_TB);
@@ -1050,14 +996,6 @@
void tcg_optimize(TCGContext *s);
-/* Allocate a new temporary and initialize it with a constant. */
-TCGv_i32 tcg_const_i32(int32_t val);
-TCGv_i64 tcg_const_i64(int64_t val);
-TCGv_vec tcg_const_zeros_vec(TCGType);
-TCGv_vec tcg_const_ones_vec(TCGType);
-TCGv_vec tcg_const_zeros_vec_matching(TCGv_vec);
-TCGv_vec tcg_const_ones_vec_matching(TCGv_vec);
-
/*
* Locate or create a read-only temporary that is a constant.
* This kind of temporary need not be freed, but for convenience
@@ -1079,10 +1017,8 @@
TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val);
#if UINTPTR_MAX == UINT32_MAX
-# define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i32((intptr_t)(x)))
# define tcg_constant_ptr(x) ((TCGv_ptr)tcg_constant_i32((intptr_t)(x)))
#else
-# define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i64((intptr_t)(x)))
# define tcg_constant_ptr(x) ((TCGv_ptr)tcg_constant_i64((intptr_t)(x)))
#endif
diff --git a/target/alpha/cpu-param.h b/target/alpha/cpu-param.h
index 17cd14e..68c46f7 100644
--- a/target/alpha/cpu-param.h
+++ b/target/alpha/cpu-param.h
@@ -15,6 +15,4 @@
#define TARGET_PHYS_ADDR_SPACE_BITS 44
#define TARGET_VIRT_ADDR_SPACE_BITS (30 + TARGET_PAGE_BITS)
-#define NB_MMU_MODES 3
-
#endif
diff --git a/target/arm/cpu-param.h b/target/arm/cpu-param.h
index b7bde18..b3b35f7 100644
--- a/target/arm/cpu-param.h
+++ b/target/arm/cpu-param.h
@@ -45,6 +45,4 @@
bool guarded;
#endif
-#define NB_MMU_MODES 12
-
#endif
diff --git a/target/arm/internals.h b/target/arm/internals.h
index b1ef059..673519a 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -193,16 +193,22 @@
void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
#endif /* CONFIG_TCG */
-enum arm_fprounding {
+typedef enum ARMFPRounding {
FPROUNDING_TIEEVEN,
FPROUNDING_POSINF,
FPROUNDING_NEGINF,
FPROUNDING_ZERO,
FPROUNDING_TIEAWAY,
FPROUNDING_ODD
-};
+} ARMFPRounding;
-int arm_rmode_to_sf(int rmode);
+extern const FloatRoundMode arm_rmode_to_sf_map[6];
+
+static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode)
+{
+ assert((unsigned)rmode < ARRAY_SIZE(arm_rmode_to_sf_map));
+ return arm_rmode_to_sf_map[rmode];
+}
static inline void aarch64_save_sp(CPUARMState *env, int el)
{
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index 2c2ea45..dff391b 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -6146,13 +6146,12 @@
case 0xb: /* FRINTZ */
case 0xc: /* FRINTA */
{
- TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
+ TCGv_i32 tcg_rmode;
+
fpst = fpstatus_ptr(FPST_FPCR_F16);
-
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ tcg_rmode = gen_set_rmode(opcode & 7, fpst);
gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
-
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ gen_restore_rmode(tcg_rmode, fpst);
break;
}
case 0xe: /* FRINTX */
@@ -6202,7 +6201,7 @@
case 0xa: /* FRINTM */
case 0xb: /* FRINTZ */
case 0xc: /* FRINTA */
- rmode = arm_rmode_to_sf(opcode & 7);
+ rmode = opcode & 7;
gen_fpst = gen_helper_rints;
break;
case 0xe: /* FRINTX */
@@ -6212,14 +6211,14 @@
gen_fpst = gen_helper_rints;
break;
case 0x10: /* FRINT32Z */
- rmode = float_round_to_zero;
+ rmode = FPROUNDING_ZERO;
gen_fpst = gen_helper_frint32_s;
break;
case 0x11: /* FRINT32X */
gen_fpst = gen_helper_frint32_s;
break;
case 0x12: /* FRINT64Z */
- rmode = float_round_to_zero;
+ rmode = FPROUNDING_ZERO;
gen_fpst = gen_helper_frint64_s;
break;
case 0x13: /* FRINT64X */
@@ -6231,10 +6230,9 @@
fpst = fpstatus_ptr(FPST_FPCR);
if (rmode >= 0) {
- TCGv_i32 tcg_rmode = tcg_const_i32(rmode);
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ TCGv_i32 tcg_rmode = gen_set_rmode(rmode, fpst);
gen_fpst(tcg_res, tcg_op, fpst);
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ gen_restore_rmode(tcg_rmode, fpst);
} else {
gen_fpst(tcg_res, tcg_op, fpst);
}
@@ -6275,7 +6273,7 @@
case 0xa: /* FRINTM */
case 0xb: /* FRINTZ */
case 0xc: /* FRINTA */
- rmode = arm_rmode_to_sf(opcode & 7);
+ rmode = opcode & 7;
gen_fpst = gen_helper_rintd;
break;
case 0xe: /* FRINTX */
@@ -6285,14 +6283,14 @@
gen_fpst = gen_helper_rintd;
break;
case 0x10: /* FRINT32Z */
- rmode = float_round_to_zero;
+ rmode = FPROUNDING_ZERO;
gen_fpst = gen_helper_frint32_d;
break;
case 0x11: /* FRINT32X */
gen_fpst = gen_helper_frint32_d;
break;
case 0x12: /* FRINT64Z */
- rmode = float_round_to_zero;
+ rmode = FPROUNDING_ZERO;
gen_fpst = gen_helper_frint64_d;
break;
case 0x13: /* FRINT64X */
@@ -6304,10 +6302,9 @@
fpst = fpstatus_ptr(FPST_FPCR);
if (rmode >= 0) {
- TCGv_i32 tcg_rmode = tcg_const_i32(rmode);
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ TCGv_i32 tcg_rmode = gen_set_rmode(rmode, fpst);
gen_fpst(tcg_res, tcg_op, fpst);
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ gen_restore_rmode(tcg_rmode, fpst);
} else {
gen_fpst(tcg_res, tcg_op, fpst);
}
@@ -6944,9 +6941,7 @@
rmode = FPROUNDING_TIEAWAY;
}
- tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
-
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
+ tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus);
switch (type) {
case 1: /* float64 */
@@ -7023,7 +7018,7 @@
g_assert_not_reached();
}
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
+ gen_restore_rmode(tcg_rmode, tcg_fpstatus);
}
}
@@ -7447,10 +7442,10 @@
bool part = extract32(insn, 14, 1);
bool is_q = extract32(insn, 30, 1);
int esize = 8 << size;
- int i, ofs;
+ int i;
int datasize = is_q ? 128 : 64;
int elements = datasize / esize;
- TCGv_i64 tcg_res, tcg_resl, tcg_resh;
+ TCGv_i64 tcg_res[2], tcg_ele;
if (opcode == 0 || (size == 3 && !is_q)) {
unallocated_encoding(s);
@@ -7461,37 +7456,39 @@
return;
}
- tcg_resl = tcg_const_i64(0);
- tcg_resh = is_q ? tcg_const_i64(0) : NULL;
- tcg_res = tcg_temp_new_i64();
+ tcg_res[0] = tcg_temp_new_i64();
+ tcg_res[1] = is_q ? tcg_temp_new_i64() : NULL;
+ tcg_ele = tcg_temp_new_i64();
for (i = 0; i < elements; i++) {
+ int o, w;
+
switch (opcode) {
case 1: /* UZP1/2 */
{
int midpoint = elements / 2;
if (i < midpoint) {
- read_vec_element(s, tcg_res, rn, 2 * i + part, size);
+ read_vec_element(s, tcg_ele, rn, 2 * i + part, size);
} else {
- read_vec_element(s, tcg_res, rm,
+ read_vec_element(s, tcg_ele, rm,
2 * (i - midpoint) + part, size);
}
break;
}
case 2: /* TRN1/2 */
if (i & 1) {
- read_vec_element(s, tcg_res, rm, (i & ~1) + part, size);
+ read_vec_element(s, tcg_ele, rm, (i & ~1) + part, size);
} else {
- read_vec_element(s, tcg_res, rn, (i & ~1) + part, size);
+ read_vec_element(s, tcg_ele, rn, (i & ~1) + part, size);
}
break;
case 3: /* ZIP1/2 */
{
int base = part * elements / 2;
if (i & 1) {
- read_vec_element(s, tcg_res, rm, base + (i >> 1), size);
+ read_vec_element(s, tcg_ele, rm, base + (i >> 1), size);
} else {
- read_vec_element(s, tcg_res, rn, base + (i >> 1), size);
+ read_vec_element(s, tcg_ele, rn, base + (i >> 1), size);
}
break;
}
@@ -7499,19 +7496,18 @@
g_assert_not_reached();
}
- ofs = i * esize;
- if (ofs < 64) {
- tcg_gen_shli_i64(tcg_res, tcg_res, ofs);
- tcg_gen_or_i64(tcg_resl, tcg_resl, tcg_res);
+ w = (i * esize) / 64;
+ o = (i * esize) % 64;
+ if (o == 0) {
+ tcg_gen_mov_i64(tcg_res[w], tcg_ele);
} else {
- tcg_gen_shli_i64(tcg_res, tcg_res, ofs - 64);
- tcg_gen_or_i64(tcg_resh, tcg_resh, tcg_res);
+ tcg_gen_shli_i64(tcg_ele, tcg_ele, o);
+ tcg_gen_or_i64(tcg_res[w], tcg_res[w], tcg_ele);
}
}
- write_vec_element(s, tcg_resl, rd, 0, MO_64);
- if (is_q) {
- write_vec_element(s, tcg_resh, rd, 1, MO_64);
+ for (i = 0; i <= is_q; ++i) {
+ write_vec_element(s, tcg_res[i], rd, i, MO_64);
}
clear_vec_high(s, is_q, rd);
}
@@ -8463,7 +8459,7 @@
tcg_rn = tcg_temp_new_i64();
tcg_rd = tcg_temp_new_i64();
tcg_rd_narrowed = tcg_temp_new_i32();
- tcg_final = tcg_const_i64(0);
+ tcg_final = tcg_temp_new_i64();
if (round) {
tcg_round = tcg_constant_i64(1ULL << (shift - 1));
@@ -8477,7 +8473,11 @@
false, is_u_shift, size+1, shift);
narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd);
tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed);
- tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
+ if (i == 0) {
+ tcg_gen_mov_i64(tcg_final, tcg_rd);
+ } else {
+ tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
+ }
}
if (!is_q) {
@@ -8771,9 +8771,8 @@
assert(!(is_scalar && is_q));
- tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO));
tcg_fpstatus = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
+ tcg_rmode = gen_set_rmode(FPROUNDING_ZERO, tcg_fpstatus);
fracbits = (16 << size) - immhb;
tcg_shift = tcg_constant_i32(fracbits);
@@ -8831,7 +8830,7 @@
}
}
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
+ gen_restore_rmode(tcg_rmode, tcg_fpstatus);
}
/* AdvSIMD scalar shift by immediate
@@ -10219,12 +10218,11 @@
}
if (is_fcvt) {
- tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
tcg_fpstatus = fpstatus_ptr(FPST_FPCR);
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
+ tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus);
} else {
- tcg_rmode = NULL;
tcg_fpstatus = NULL;
+ tcg_rmode = NULL;
}
if (size == 3) {
@@ -10276,7 +10274,7 @@
}
if (is_fcvt) {
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
+ gen_restore_rmode(tcg_rmode, tcg_fpstatus);
}
}
@@ -12005,22 +12003,26 @@
int esize = 8 << size;
int elements = dsize / esize;
TCGv_i64 tcg_rn = tcg_temp_new_i64();
- TCGv_i64 tcg_rd = tcg_const_i64(0);
- TCGv_i64 tcg_rd_hi = tcg_const_i64(0);
+ TCGv_i64 tcg_rd[2];
+
+ for (i = 0; i < 2; i++) {
+ tcg_rd[i] = tcg_temp_new_i64();
+ tcg_gen_movi_i64(tcg_rd[i], 0);
+ }
for (i = 0; i < elements; i++) {
int e_rev = (i & 0xf) ^ revmask;
- int off = e_rev * esize;
+ int w = (e_rev * esize) / 64;
+ int o = (e_rev * esize) % 64;
+
read_vec_element(s, tcg_rn, rn, i, size);
- if (off >= 64) {
- tcg_gen_deposit_i64(tcg_rd_hi, tcg_rd_hi,
- tcg_rn, off - 64, esize);
- } else {
- tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, off, esize);
- }
+ tcg_gen_deposit_i64(tcg_rd[w], tcg_rd[w], tcg_rn, o, esize);
}
- write_vec_element(s, tcg_rd, rd, 0, MO_64);
- write_vec_element(s, tcg_rd_hi, rd, 1, MO_64);
+
+ for (i = 0; i < 2; i++) {
+ write_vec_element(s, tcg_rd[i], rd, i, MO_64);
+ }
+ clear_vec_high(s, true, rd);
}
}
@@ -12133,7 +12135,6 @@
int rn = extract32(insn, 5, 5);
int rd = extract32(insn, 0, 5);
bool need_fpstatus = false;
- bool need_rmode = false;
int rmode = -1;
TCGv_i32 tcg_rmode;
TCGv_ptr tcg_fpstatus;
@@ -12283,7 +12284,6 @@
case 0x7a: /* FCVTPU */
case 0x7b: /* FCVTZU */
need_fpstatus = true;
- need_rmode = true;
rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
if (size == 3 && !is_q) {
unallocated_encoding(s);
@@ -12293,7 +12293,6 @@
case 0x5c: /* FCVTAU */
case 0x1c: /* FCVTAS */
need_fpstatus = true;
- need_rmode = true;
rmode = FPROUNDING_TIEAWAY;
if (size == 3 && !is_q) {
unallocated_encoding(s);
@@ -12352,7 +12351,6 @@
case 0x19: /* FRINTM */
case 0x38: /* FRINTP */
case 0x39: /* FRINTZ */
- need_rmode = true;
rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
/* fall through */
case 0x59: /* FRINTX */
@@ -12364,7 +12362,6 @@
}
break;
case 0x58: /* FRINTA */
- need_rmode = true;
rmode = FPROUNDING_TIEAWAY;
need_fpstatus = true;
if (size == 3 && !is_q) {
@@ -12380,7 +12377,6 @@
break;
case 0x1e: /* FRINT32Z */
case 0x1f: /* FRINT64Z */
- need_rmode = true;
rmode = FPROUNDING_ZERO;
/* fall through */
case 0x5e: /* FRINT32X */
@@ -12406,14 +12402,13 @@
return;
}
- if (need_fpstatus || need_rmode) {
+ if (need_fpstatus || rmode >= 0) {
tcg_fpstatus = fpstatus_ptr(FPST_FPCR);
} else {
tcg_fpstatus = NULL;
}
- if (need_rmode) {
- tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
+ if (rmode >= 0) {
+ tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus);
} else {
tcg_rmode = NULL;
}
@@ -12595,8 +12590,8 @@
}
clear_vec_high(s, is_q, rd);
- if (need_rmode) {
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
+ if (tcg_rmode) {
+ gen_restore_rmode(tcg_rmode, tcg_fpstatus);
}
}
@@ -12625,9 +12620,8 @@
int pass;
TCGv_i32 tcg_rmode = NULL;
TCGv_ptr tcg_fpstatus = NULL;
- bool need_rmode = false;
bool need_fpst = true;
- int rmode;
+ int rmode = -1;
if (!dc_isar_feature(aa64_fp16, s)) {
unallocated_encoding(s);
@@ -12676,27 +12670,22 @@
case 0x3f: /* FRECPX */
break;
case 0x18: /* FRINTN */
- need_rmode = true;
only_in_vector = true;
rmode = FPROUNDING_TIEEVEN;
break;
case 0x19: /* FRINTM */
- need_rmode = true;
only_in_vector = true;
rmode = FPROUNDING_NEGINF;
break;
case 0x38: /* FRINTP */
- need_rmode = true;
only_in_vector = true;
rmode = FPROUNDING_POSINF;
break;
case 0x39: /* FRINTZ */
- need_rmode = true;
only_in_vector = true;
rmode = FPROUNDING_ZERO;
break;
case 0x58: /* FRINTA */
- need_rmode = true;
only_in_vector = true;
rmode = FPROUNDING_TIEAWAY;
break;
@@ -12706,43 +12695,33 @@
/* current rounding mode */
break;
case 0x1a: /* FCVTNS */
- need_rmode = true;
rmode = FPROUNDING_TIEEVEN;
break;
case 0x1b: /* FCVTMS */
- need_rmode = true;
rmode = FPROUNDING_NEGINF;
break;
case 0x1c: /* FCVTAS */
- need_rmode = true;
rmode = FPROUNDING_TIEAWAY;
break;
case 0x3a: /* FCVTPS */
- need_rmode = true;
rmode = FPROUNDING_POSINF;
break;
case 0x3b: /* FCVTZS */
- need_rmode = true;
rmode = FPROUNDING_ZERO;
break;
case 0x5a: /* FCVTNU */
- need_rmode = true;
rmode = FPROUNDING_TIEEVEN;
break;
case 0x5b: /* FCVTMU */
- need_rmode = true;
rmode = FPROUNDING_NEGINF;
break;
case 0x5c: /* FCVTAU */
- need_rmode = true;
rmode = FPROUNDING_TIEAWAY;
break;
case 0x7a: /* FCVTPU */
- need_rmode = true;
rmode = FPROUNDING_POSINF;
break;
case 0x7b: /* FCVTZU */
- need_rmode = true;
rmode = FPROUNDING_ZERO;
break;
case 0x2f: /* FABS */
@@ -12775,13 +12754,12 @@
return;
}
- if (need_rmode || need_fpst) {
+ if (rmode >= 0 || need_fpst) {
tcg_fpstatus = fpstatus_ptr(FPST_FPCR_F16);
}
- if (need_rmode) {
- tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
+ if (rmode >= 0) {
+ tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus);
}
if (is_scalar) {
@@ -12881,7 +12859,7 @@
}
if (tcg_rmode) {
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
+ gen_restore_rmode(tcg_rmode, tcg_fpstatus);
}
}
diff --git a/target/arm/tcg/translate-mve.c b/target/arm/tcg/translate-mve.c
index 798b4fd..31fb211 100644
--- a/target/arm/tcg/translate-mve.c
+++ b/target/arm/tcg/translate-mve.c
@@ -588,7 +588,7 @@
DO_VCVT(VCVT_FU, vcvt_hu, vcvt_fu)
static bool do_vcvt_rmode(DisasContext *s, arg_1op *a,
- enum arm_fprounding rmode, bool u)
+ ARMFPRounding rmode, bool u)
{
/*
* Handle VCVT fp to int with specified rounding mode.
@@ -1150,7 +1150,7 @@
MVEGenLongDualAccOpFn *fn)
{
TCGv_ptr qn, qm;
- TCGv_i64 rda;
+ TCGv_i64 rda_i, rda_o;
TCGv_i32 rdalo, rdahi;
if (!dc_isar_feature(aa32_mve, s) ||
@@ -1177,21 +1177,22 @@
* of an A=0 (no-accumulate) insn which does not execute the first
* beat must start with the current rda value, not 0.
*/
+ rda_o = tcg_temp_new_i64();
if (a->a || mve_skip_first_beat(s)) {
- rda = tcg_temp_new_i64();
+ rda_i = rda_o;
rdalo = load_reg(s, a->rdalo);
rdahi = load_reg(s, a->rdahi);
- tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
+ tcg_gen_concat_i32_i64(rda_i, rdalo, rdahi);
} else {
- rda = tcg_const_i64(0);
+ rda_i = tcg_constant_i64(0);
}
- fn(rda, cpu_env, qn, qm, rda);
+ fn(rda_o, cpu_env, qn, qm, rda_i);
rdalo = tcg_temp_new_i32();
rdahi = tcg_temp_new_i32();
- tcg_gen_extrl_i64_i32(rdalo, rda);
- tcg_gen_extrh_i64_i32(rdahi, rda);
+ tcg_gen_extrl_i64_i32(rdalo, rda_o);
+ tcg_gen_extrh_i64_i32(rdahi, rda_o);
store_reg(s, a->rdalo, rdalo);
store_reg(s, a->rdahi, rdahi);
mve_update_eci(s);
@@ -1258,7 +1259,7 @@
static bool do_dual_acc(DisasContext *s, arg_vmladav *a, MVEGenDualAccOpFn *fn)
{
TCGv_ptr qn, qm;
- TCGv_i32 rda;
+ TCGv_i32 rda_i, rda_o;
if (!dc_isar_feature(aa32_mve, s) ||
!mve_check_qreg_bank(s, a->qn) ||
@@ -1278,13 +1279,14 @@
* beat must start with the current rda value, not 0.
*/
if (a->a || mve_skip_first_beat(s)) {
- rda = load_reg(s, a->rda);
+ rda_o = rda_i = load_reg(s, a->rda);
} else {
- rda = tcg_const_i32(0);
+ rda_i = tcg_constant_i32(0);
+ rda_o = tcg_temp_new_i32();
}
- fn(rda, cpu_env, qn, qm, rda);
- store_reg(s, a->rda, rda);
+ fn(rda_o, cpu_env, qn, qm, rda_i);
+ store_reg(s, a->rda, rda_o);
mve_update_eci(s);
return true;
@@ -1396,7 +1398,7 @@
{ NULL, NULL }
};
TCGv_ptr qm;
- TCGv_i32 rda;
+ TCGv_i32 rda_i, rda_o;
if (!dc_isar_feature(aa32_mve, s) ||
a->size == 3) {
@@ -1413,15 +1415,16 @@
*/
if (a->a || mve_skip_first_beat(s)) {
/* Accumulate input from Rda */
- rda = load_reg(s, a->rda);
+ rda_o = rda_i = load_reg(s, a->rda);
} else {
/* Accumulate starting at zero */
- rda = tcg_const_i32(0);
+ rda_i = tcg_constant_i32(0);
+ rda_o = tcg_temp_new_i32();
}
qm = mve_qreg_ptr(a->qm);
- fns[a->size][a->u](rda, cpu_env, qm, rda);
- store_reg(s, a->rda, rda);
+ fns[a->size][a->u](rda_o, cpu_env, qm, rda_i);
+ store_reg(s, a->rda, rda_o);
mve_update_eci(s);
return true;
@@ -1436,7 +1439,7 @@
* No need to check Qm's bank: it is only 3 bits in decode.
*/
TCGv_ptr qm;
- TCGv_i64 rda;
+ TCGv_i64 rda_i, rda_o;
TCGv_i32 rdalo, rdahi;
if (!dc_isar_feature(aa32_mve, s)) {
@@ -1458,28 +1461,29 @@
* of an A=0 (no-accumulate) insn which does not execute the first
* beat must start with the current value of RdaHi:RdaLo, not zero.
*/
+ rda_o = tcg_temp_new_i64();
if (a->a || mve_skip_first_beat(s)) {
/* Accumulate input from RdaHi:RdaLo */
- rda = tcg_temp_new_i64();
+ rda_i = rda_o;
rdalo = load_reg(s, a->rdalo);
rdahi = load_reg(s, a->rdahi);
- tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
+ tcg_gen_concat_i32_i64(rda_i, rdalo, rdahi);
} else {
/* Accumulate starting at zero */
- rda = tcg_const_i64(0);
+ rda_i = tcg_constant_i64(0);
}
qm = mve_qreg_ptr(a->qm);
if (a->u) {
- gen_helper_mve_vaddlv_u(rda, cpu_env, qm, rda);
+ gen_helper_mve_vaddlv_u(rda_o, cpu_env, qm, rda_i);
} else {
- gen_helper_mve_vaddlv_s(rda, cpu_env, qm, rda);
+ gen_helper_mve_vaddlv_s(rda_o, cpu_env, qm, rda_i);
}
rdalo = tcg_temp_new_i32();
rdahi = tcg_temp_new_i32();
- tcg_gen_extrl_i64_i32(rdalo, rda);
- tcg_gen_extrh_i64_i32(rdahi, rda);
+ tcg_gen_extrl_i64_i32(rdalo, rda_o);
+ tcg_gen_extrh_i64_i32(rdahi, rda_o);
store_reg(s, a->rdalo, rdalo);
store_reg(s, a->rdahi, rdahi);
mve_update_eci(s);
diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c
index 5bf80b2..92ab290 100644
--- a/target/arm/tcg/translate-sve.c
+++ b/target/arm/tcg/translate-sve.c
@@ -4082,7 +4082,7 @@
a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
static bool do_frint_mode(DisasContext *s, arg_rpr_esz *a,
- int mode, gen_helper_gvec_3_ptr *fn)
+ ARMFPRounding mode, gen_helper_gvec_3_ptr *fn)
{
unsigned vsz;
TCGv_i32 tmode;
@@ -4096,30 +4096,28 @@
}
vsz = vec_full_reg_size(s);
- tmode = tcg_const_i32(mode);
status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
-
- gen_helper_set_rmode(tmode, tmode, status);
+ tmode = gen_set_rmode(mode, status);
tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
vec_full_reg_offset(s, a->rn),
pred_full_reg_offset(s, a->pg),
status, vsz, vsz, 0, fn);
- gen_helper_set_rmode(tmode, tmode, status);
+ gen_restore_rmode(tmode, status);
return true;
}
TRANS_FEAT(FRINTN, aa64_sve, do_frint_mode, a,
- float_round_nearest_even, frint_fns[a->esz])
+ FPROUNDING_TIEEVEN, frint_fns[a->esz])
TRANS_FEAT(FRINTP, aa64_sve, do_frint_mode, a,
- float_round_up, frint_fns[a->esz])
+ FPROUNDING_POSINF, frint_fns[a->esz])
TRANS_FEAT(FRINTM, aa64_sve, do_frint_mode, a,
- float_round_down, frint_fns[a->esz])
+ FPROUNDING_NEGINF, frint_fns[a->esz])
TRANS_FEAT(FRINTZ, aa64_sve, do_frint_mode, a,
- float_round_to_zero, frint_fns[a->esz])
+ FPROUNDING_ZERO, frint_fns[a->esz])
TRANS_FEAT(FRINTA, aa64_sve, do_frint_mode, a,
- float_round_ties_away, frint_fns[a->esz])
+ FPROUNDING_TIEAWAY, frint_fns[a->esz])
static gen_helper_gvec_3_ptr * const frecpx_fns[] = {
NULL, gen_helper_sve_frecpx_h,
@@ -4208,8 +4206,9 @@
}
} else {
TCGLabel *loop = gen_new_label();
- TCGv_ptr tp, i = tcg_const_ptr(0);
+ TCGv_ptr tp, i = tcg_temp_new_ptr();
+ tcg_gen_movi_ptr(i, 0);
gen_set_label(loop);
t0 = tcg_temp_new_i64();
@@ -4286,8 +4285,9 @@
}
} else {
TCGLabel *loop = gen_new_label();
- TCGv_ptr tp, i = tcg_const_ptr(0);
+ TCGv_ptr tp, i = tcg_temp_new_ptr();
+ tcg_gen_movi_ptr(i, 0);
gen_set_label(loop);
t0 = tcg_temp_new_i64();
@@ -7145,9 +7145,9 @@
gen_helper_sve2_fcvtlt_sd, a, 0, FPST_FPCR)
TRANS_FEAT(FCVTX_ds, aa64_sve2, do_frint_mode, a,
- float_round_to_odd, gen_helper_sve_fcvt_ds)
+ FPROUNDING_ODD, gen_helper_sve_fcvt_ds)
TRANS_FEAT(FCVTXNT_ds, aa64_sve2, do_frint_mode, a,
- float_round_to_odd, gen_helper_sve2_fcvtnt_ds)
+ FPROUNDING_ODD, gen_helper_sve2_fcvtnt_ds)
static gen_helper_gvec_3_ptr * const flogb_fns[] = {
NULL, gen_helper_flogb_h,
diff --git a/target/arm/tcg/translate-vfp.c b/target/arm/tcg/translate-vfp.c
index 757a2bf..dd782aa 100644
--- a/target/arm/tcg/translate-vfp.c
+++ b/target/arm/tcg/translate-vfp.c
@@ -464,8 +464,7 @@
fpst = fpstatus_ptr(FPST_FPCR);
}
- tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ tcg_rmode = gen_set_rmode(rounding, fpst);
if (sz == 3) {
TCGv_i64 tcg_op;
@@ -489,7 +488,7 @@
vfp_store_reg32(tcg_res, rd);
}
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ gen_restore_rmode(tcg_rmode, fpst);
return true;
}
@@ -533,9 +532,7 @@
}
tcg_shift = tcg_constant_i32(0);
-
- tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ tcg_rmode = gen_set_rmode(rounding, fpst);
if (sz == 3) {
TCGv_i64 tcg_double, tcg_res;
@@ -572,7 +569,7 @@
vfp_store_reg32(tcg_res, rd);
}
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ gen_restore_rmode(tcg_rmode, fpst);
return true;
}
@@ -2783,10 +2780,9 @@
tmp = tcg_temp_new_i32();
vfp_load_reg32(tmp, a->vm);
fpst = fpstatus_ptr(FPST_FPCR_F16);
- tcg_rmode = tcg_const_i32(float_round_to_zero);
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ tcg_rmode = gen_set_rmode(FPROUNDING_ZERO, fpst);
gen_helper_rinth(tmp, tmp, fpst);
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ gen_restore_rmode(tcg_rmode, fpst);
vfp_store_reg32(tmp, a->vd);
return true;
}
@@ -2808,10 +2804,9 @@
tmp = tcg_temp_new_i32();
vfp_load_reg32(tmp, a->vm);
fpst = fpstatus_ptr(FPST_FPCR);
- tcg_rmode = tcg_const_i32(float_round_to_zero);
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ tcg_rmode = gen_set_rmode(FPROUNDING_ZERO, fpst);
gen_helper_rints(tmp, tmp, fpst);
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ gen_restore_rmode(tcg_rmode, fpst);
vfp_store_reg32(tmp, a->vd);
return true;
}
@@ -2842,10 +2837,9 @@
tmp = tcg_temp_new_i64();
vfp_load_reg64(tmp, a->vm);
fpst = fpstatus_ptr(FPST_FPCR);
- tcg_rmode = tcg_const_i32(float_round_to_zero);
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ tcg_rmode = gen_set_rmode(FPROUNDING_ZERO, fpst);
gen_helper_rintd(tmp, tmp, fpst);
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ gen_restore_rmode(tcg_rmode, fpst);
vfp_store_reg64(tmp, a->vd);
return true;
}
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
index b70b628..2cb9368 100644
--- a/target/arm/tcg/translate.c
+++ b/target/arm/tcg/translate.c
@@ -7261,8 +7261,8 @@
static bool trans_BFCI(DisasContext *s, arg_BFCI *a)
{
- TCGv_i32 tmp;
int msb = a->msb, lsb = a->lsb;
+ TCGv_i32 t_in, t_rd;
int width;
if (!ENABLE_ARCH_6T2) {
@@ -7277,16 +7277,14 @@
width = msb + 1 - lsb;
if (a->rn == 15) {
/* BFC */
- tmp = tcg_const_i32(0);
+ t_in = tcg_constant_i32(0);
} else {
/* BFI */
- tmp = load_reg(s, a->rn);
+ t_in = load_reg(s, a->rn);
}
- if (width != 32) {
- TCGv_i32 tmp2 = load_reg(s, a->rd);
- tcg_gen_deposit_i32(tmp, tmp2, tmp, lsb, width);
- }
- store_reg(s, a->rd, tmp);
+ t_rd = load_reg(s, a->rd);
+ tcg_gen_deposit_i32(t_rd, t_rd, t_in, lsb, width);
+ store_reg(s, a->rd, t_rd);
return true;
}
diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h
index 20f3ca7..f02d468 100644
--- a/target/arm/tcg/translate.h
+++ b/target/arm/tcg/translate.h
@@ -617,6 +617,23 @@
}
/*
+ * Set and reset rounding mode around another operation.
+ */
+static inline TCGv_i32 gen_set_rmode(ARMFPRounding rmode, TCGv_ptr fpst)
+{
+ TCGv_i32 new = tcg_constant_i32(arm_rmode_to_sf(rmode));
+ TCGv_i32 old = tcg_temp_new_i32();
+
+ gen_helper_set_rmode(old, new, fpst);
+ return old;
+}
+
+static inline void gen_restore_rmode(TCGv_i32 old, TCGv_ptr fpst)
+{
+ gen_helper_set_rmode(old, old, fpst);
+}
+
+/*
* Helpers for implementing sets of trans_* functions.
* Defer the implementation of NAME to FUNC, with optional extra arguments.
*/
diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
index 24e3d82..36906db 100644
--- a/target/arm/vfp_helper.c
+++ b/target/arm/vfp_helper.c
@@ -1104,33 +1104,14 @@
}
/* Convert ARM rounding mode to softfloat */
-int arm_rmode_to_sf(int rmode)
-{
- switch (rmode) {
- case FPROUNDING_TIEAWAY:
- rmode = float_round_ties_away;
- break;
- case FPROUNDING_ODD:
- /* FIXME: add support for TIEAWAY and ODD */
- qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n",
- rmode);
- /* fall through for now */
- case FPROUNDING_TIEEVEN:
- default:
- rmode = float_round_nearest_even;
- break;
- case FPROUNDING_POSINF:
- rmode = float_round_up;
- break;
- case FPROUNDING_NEGINF:
- rmode = float_round_down;
- break;
- case FPROUNDING_ZERO:
- rmode = float_round_to_zero;
- break;
- }
- return rmode;
-}
+const FloatRoundMode arm_rmode_to_sf_map[] = {
+ [FPROUNDING_TIEEVEN] = float_round_nearest_even,
+ [FPROUNDING_POSINF] = float_round_up,
+ [FPROUNDING_NEGINF] = float_round_down,
+ [FPROUNDING_ZERO] = float_round_to_zero,
+ [FPROUNDING_TIEAWAY] = float_round_ties_away,
+ [FPROUNDING_ODD] = float_round_to_odd,
+};
/*
* Implement float64 to int32_t conversion without saturation;
diff --git a/target/avr/cpu-param.h b/target/avr/cpu-param.h
index 7ef4e7c..9a92bc7 100644
--- a/target/avr/cpu-param.h
+++ b/target/avr/cpu-param.h
@@ -31,6 +31,5 @@
#define TARGET_PAGE_BITS 8
#define TARGET_PHYS_ADDR_SPACE_BITS 24
#define TARGET_VIRT_ADDR_SPACE_BITS 24
-#define NB_MMU_MODES 2
#endif
diff --git a/target/avr/translate.c b/target/avr/translate.c
index b9506a8..a6aeae6 100644
--- a/target/avr/translate.c
+++ b/target/avr/translate.c
@@ -400,7 +400,7 @@
static bool trans_SUBI(DisasContext *ctx, arg_SUBI *a)
{
TCGv Rd = cpu_r[a->rd];
- TCGv Rr = tcg_const_i32(a->imm);
+ TCGv Rr = tcg_constant_i32(a->imm);
TCGv R = tcg_temp_new_i32();
tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Imm */
@@ -425,7 +425,7 @@
TCGv Rd = cpu_r[a->rd];
TCGv Rr = cpu_r[a->rr];
TCGv R = tcg_temp_new_i32();
- TCGv zero = tcg_const_i32(0);
+ TCGv zero = tcg_constant_i32(0);
tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr - Cf */
tcg_gen_sub_tl(R, R, cpu_Cf);
@@ -453,9 +453,9 @@
static bool trans_SBCI(DisasContext *ctx, arg_SBCI *a)
{
TCGv Rd = cpu_r[a->rd];
- TCGv Rr = tcg_const_i32(a->imm);
+ TCGv Rr = tcg_constant_i32(a->imm);
TCGv R = tcg_temp_new_i32();
- TCGv zero = tcg_const_i32(0);
+ TCGv zero = tcg_constant_i32(0);
tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr - Cf */
tcg_gen_sub_tl(R, R, cpu_Cf);
@@ -637,7 +637,7 @@
static bool trans_NEG(DisasContext *ctx, arg_NEG *a)
{
TCGv Rd = cpu_r[a->rd];
- TCGv t0 = tcg_const_i32(0);
+ TCGv t0 = tcg_constant_i32(0);
TCGv R = tcg_temp_new_i32();
tcg_gen_sub_tl(R, t0, Rd); /* R = 0 - Rd */
@@ -930,19 +930,19 @@
static void gen_push_ret(DisasContext *ctx, int ret)
{
if (avr_feature(ctx->env, AVR_FEATURE_1_BYTE_PC)) {
- TCGv t0 = tcg_const_i32((ret & 0x0000ff));
+ TCGv t0 = tcg_constant_i32(ret & 0x0000ff);
tcg_gen_qemu_st_tl(t0, cpu_sp, MMU_DATA_IDX, MO_UB);
tcg_gen_subi_tl(cpu_sp, cpu_sp, 1);
} else if (avr_feature(ctx->env, AVR_FEATURE_2_BYTE_PC)) {
- TCGv t0 = tcg_const_i32((ret & 0x00ffff));
+ TCGv t0 = tcg_constant_i32(ret & 0x00ffff);
tcg_gen_subi_tl(cpu_sp, cpu_sp, 1);
tcg_gen_qemu_st_tl(t0, cpu_sp, MMU_DATA_IDX, MO_BEUW);
tcg_gen_subi_tl(cpu_sp, cpu_sp, 1);
} else if (avr_feature(ctx->env, AVR_FEATURE_3_BYTE_PC)) {
- TCGv lo = tcg_const_i32((ret & 0x0000ff));
- TCGv hi = tcg_const_i32((ret & 0xffff00) >> 8);
+ TCGv lo = tcg_constant_i32(ret & 0x0000ff);
+ TCGv hi = tcg_constant_i32((ret & 0xffff00) >> 8);
tcg_gen_qemu_st_tl(lo, cpu_sp, MMU_DATA_IDX, MO_UB);
tcg_gen_subi_tl(cpu_sp, cpu_sp, 2);
@@ -1211,7 +1211,7 @@
TCGv Rd = cpu_r[a->rd];
TCGv Rr = cpu_r[a->rr];
TCGv R = tcg_temp_new_i32();
- TCGv zero = tcg_const_i32(0);
+ TCGv zero = tcg_constant_i32(0);
tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr - Cf */
tcg_gen_sub_tl(R, R, cpu_Cf);
@@ -1238,7 +1238,7 @@
{
TCGv Rd = cpu_r[a->rd];
int Imm = a->imm;
- TCGv Rr = tcg_const_i32(Imm);
+ TCGv Rr = tcg_constant_i32(Imm);
TCGv R = tcg_temp_new_i32();
tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr */
@@ -1288,12 +1288,13 @@
*/
static bool trans_SBIC(DisasContext *ctx, arg_SBIC *a)
{
- TCGv temp = tcg_const_i32(a->reg);
+ TCGv data = tcg_temp_new_i32();
+ TCGv port = tcg_constant_i32(a->reg);
- gen_helper_inb(temp, cpu_env, temp);
- tcg_gen_andi_tl(temp, temp, 1 << a->bit);
+ gen_helper_inb(data, cpu_env, port);
+ tcg_gen_andi_tl(data, data, 1 << a->bit);
ctx->skip_cond = TCG_COND_EQ;
- ctx->skip_var0 = temp;
+ ctx->skip_var0 = data;
return true;
}
@@ -1305,12 +1306,13 @@
*/
static bool trans_SBIS(DisasContext *ctx, arg_SBIS *a)
{
- TCGv temp = tcg_const_i32(a->reg);
+ TCGv data = tcg_temp_new_i32();
+ TCGv port = tcg_constant_i32(a->reg);
- gen_helper_inb(temp, cpu_env, temp);
- tcg_gen_andi_tl(temp, temp, 1 << a->bit);
+ gen_helper_inb(data, cpu_env, port);
+ tcg_gen_andi_tl(data, data, 1 << a->bit);
ctx->skip_cond = TCG_COND_NE;
- ctx->skip_var0 = temp;
+ ctx->skip_var0 = data;
return true;
}
@@ -2122,7 +2124,7 @@
static bool trans_IN(DisasContext *ctx, arg_IN *a)
{
TCGv Rd = cpu_r[a->rd];
- TCGv port = tcg_const_i32(a->imm);
+ TCGv port = tcg_constant_i32(a->imm);
gen_helper_inb(Rd, cpu_env, port);
return true;
@@ -2135,7 +2137,7 @@
static bool trans_OUT(DisasContext *ctx, arg_OUT *a)
{
TCGv Rd = cpu_r[a->rd];
- TCGv port = tcg_const_i32(a->imm);
+ TCGv port = tcg_constant_i32(a->imm);
gen_helper_outb(cpu_env, port, Rd);
return true;
@@ -2403,7 +2405,7 @@
static bool trans_SBI(DisasContext *ctx, arg_SBI *a)
{
TCGv data = tcg_temp_new_i32();
- TCGv port = tcg_const_i32(a->reg);
+ TCGv port = tcg_constant_i32(a->reg);
gen_helper_inb(data, cpu_env, port);
tcg_gen_ori_tl(data, data, 1 << a->bit);
@@ -2418,7 +2420,7 @@
static bool trans_CBI(DisasContext *ctx, arg_CBI *a)
{
TCGv data = tcg_temp_new_i32();
- TCGv port = tcg_const_i32(a->reg);
+ TCGv port = tcg_constant_i32(a->reg);
gen_helper_inb(data, cpu_env, port);
tcg_gen_andi_tl(data, data, ~(1 << a->bit));
diff --git a/target/cris/cpu-param.h b/target/cris/cpu-param.h
index 12ec22d..b31b742 100644
--- a/target/cris/cpu-param.h
+++ b/target/cris/cpu-param.h
@@ -12,6 +12,5 @@
#define TARGET_PAGE_BITS 13
#define TARGET_PHYS_ADDR_SPACE_BITS 32
#define TARGET_VIRT_ADDR_SPACE_BITS 32
-#define NB_MMU_MODES 2
#endif
diff --git a/target/cris/translate.c b/target/cris/translate.c
index 5172c9b..b2beb99 100644
--- a/target/cris/translate.c
+++ b/target/cris/translate.c
@@ -175,10 +175,7 @@
#define t_gen_mov_env_TN(member, tn) \
tcg_gen_st_tl(tn, cpu_env, offsetof(CPUCRISState, member))
#define t_gen_movi_env_TN(member, c) \
- do { \
- TCGv tc = tcg_const_tl(c); \
- t_gen_mov_env_TN(member, tc); \
- } while (0)
+ t_gen_mov_env_TN(member, tcg_constant_tl(c))
static inline void t_gen_mov_TN_preg(TCGv tn, int r)
{
@@ -268,8 +265,7 @@
static inline void t_gen_raise_exception(uint32_t index)
{
- TCGv_i32 tmp = tcg_const_i32(index);
- gen_helper_raise_exception(cpu_env, tmp);
+ gen_helper_raise_exception(cpu_env, tcg_constant_i32(index));
}
static void t_gen_lsl(TCGv d, TCGv a, TCGv b)
@@ -277,7 +273,7 @@
TCGv t0, t_31;
t0 = tcg_temp_new();
- t_31 = tcg_const_tl(31);
+ t_31 = tcg_constant_tl(31);
tcg_gen_shl_tl(d, a, b);
tcg_gen_sub_tl(t0, t_31, b);
@@ -1250,7 +1246,7 @@
cris_cc_mask(dc, CC_MASK_NZVC);
- c = tcg_const_tl(dc->op1);
+ c = tcg_constant_tl(dc->op1);
cris_alu(dc, CC_OP_ADD,
cpu_R[dc->op2], cpu_R[dc->op2], c, 4);
return 2;
@@ -1274,7 +1270,7 @@
LOG_DIS("subq %u, $r%u\n", dc->op1, dc->op2);
cris_cc_mask(dc, CC_MASK_NZVC);
- c = tcg_const_tl(dc->op1);
+ c = tcg_constant_tl(dc->op1);
cris_alu(dc, CC_OP_SUB,
cpu_R[dc->op2], cpu_R[dc->op2], c, 4);
return 2;
@@ -1289,7 +1285,7 @@
LOG_DIS("cmpq %d, $r%d\n", imm, dc->op2);
cris_cc_mask(dc, CC_MASK_NZVC);
- c = tcg_const_tl(imm);
+ c = tcg_constant_tl(imm);
cris_alu(dc, CC_OP_CMP,
cpu_R[dc->op2], cpu_R[dc->op2], c, 4);
return 2;
@@ -1304,7 +1300,7 @@
LOG_DIS("andq %d, $r%d\n", imm, dc->op2);
cris_cc_mask(dc, CC_MASK_NZ);
- c = tcg_const_tl(imm);
+ c = tcg_constant_tl(imm);
cris_alu(dc, CC_OP_AND,
cpu_R[dc->op2], cpu_R[dc->op2], c, 4);
return 2;
@@ -1318,7 +1314,7 @@
LOG_DIS("orq %d, $r%d\n", imm, dc->op2);
cris_cc_mask(dc, CC_MASK_NZ);
- c = tcg_const_tl(imm);
+ c = tcg_constant_tl(imm);
cris_alu(dc, CC_OP_OR,
cpu_R[dc->op2], cpu_R[dc->op2], c, 4);
return 2;
@@ -1330,7 +1326,7 @@
LOG_DIS("btstq %u, $r%d\n", dc->op1, dc->op2);
cris_cc_mask(dc, CC_MASK_NZ);
- c = tcg_const_tl(dc->op1);
+ c = tcg_constant_tl(dc->op1);
cris_evaluate_flags(dc);
gen_helper_btst(cpu_PR[PR_CCS], cpu_env, cpu_R[dc->op2],
c, cpu_PR[PR_CCS]);
@@ -1945,8 +1941,8 @@
{
TCGv c2, c1;
LOG_DIS("move $r%u, $s%u\n", dc->op1, dc->op2);
- c1 = tcg_const_tl(dc->op1);
- c2 = tcg_const_tl(dc->op2);
+ c1 = tcg_constant_tl(dc->op1);
+ c2 = tcg_constant_tl(dc->op2);
cris_cc_mask(dc, 0);
gen_helper_movl_sreg_reg(cpu_env, c2, c1);
return 2;
@@ -1955,8 +1951,8 @@
{
TCGv c2, c1;
LOG_DIS("move $s%u, $r%u\n", dc->op2, dc->op1);
- c1 = tcg_const_tl(dc->op1);
- c2 = tcg_const_tl(dc->op2);
+ c1 = tcg_constant_tl(dc->op1);
+ c2 = tcg_constant_tl(dc->op2);
cris_cc_mask(dc, 0);
gen_helper_movl_reg_sreg(cpu_env, c1, c2);
return 2;
@@ -2237,7 +2233,7 @@
cris_cc_mask(dc, CC_MASK_NZ);
tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~3);
- c = tcg_const_tl(0);
+ c = tcg_constant_tl(0);
cris_alu(dc, CC_OP_CMP,
cpu_R[dc->op2], t[1], c, memsize_zz(dc));
do_postinc(dc, memsize);
@@ -2582,7 +2578,7 @@
if (dc->op2 > 15) {
abort();
}
- c = tcg_const_tl(dc->pc + 4);
+ c = tcg_constant_tl(dc->pc + 4);
t_gen_mov_preg_TN(dc, dc->op2, c);
cris_prepare_jmp(dc, JMP_INDIRECT);
@@ -2598,7 +2594,7 @@
LOG_DIS("jas 0x%x\n", imm);
cris_cc_mask(dc, 0);
- c = tcg_const_tl(dc->pc + 8);
+ c = tcg_constant_tl(dc->pc + 8);
/* Store the return address in Pd. */
t_gen_mov_preg_TN(dc, dc->op2, c);
@@ -2616,7 +2612,7 @@
LOG_DIS("jasc 0x%x\n", imm);
cris_cc_mask(dc, 0);
- c = tcg_const_tl(dc->pc + 8 + 4);
+ c = tcg_constant_tl(dc->pc + 8 + 4);
/* Store the return address in Pd. */
t_gen_mov_preg_TN(dc, dc->op2, c);
@@ -2632,7 +2628,7 @@
cris_cc_mask(dc, 0);
/* Store the return address in Pd. */
tcg_gen_mov_tl(env_btarget, cpu_R[dc->op1]);
- c = tcg_const_tl(dc->pc + 4 + 4);
+ c = tcg_constant_tl(dc->pc + 4 + 4);
t_gen_mov_preg_TN(dc, dc->op2, c);
cris_prepare_jmp(dc, JMP_INDIRECT);
return 2;
@@ -2664,7 +2660,7 @@
LOG_DIS("bas 0x%x, $p%u\n", dc->pc + simm, dc->op2);
cris_cc_mask(dc, 0);
- c = tcg_const_tl(dc->pc + 8);
+ c = tcg_constant_tl(dc->pc + 8);
/* Store the return address in Pd. */
t_gen_mov_preg_TN(dc, dc->op2, c);
@@ -2681,7 +2677,7 @@
LOG_DIS("basc 0x%x, $p%u\n", dc->pc + simm, dc->op2);
cris_cc_mask(dc, 0);
- c = tcg_const_tl(dc->pc + 12);
+ c = tcg_constant_tl(dc->pc + 12);
/* Store the return address in Pd. */
t_gen_mov_preg_TN(dc, dc->op2, c);
@@ -2695,7 +2691,7 @@
cris_cc_mask(dc, 0);
if (dc->op2 == 15) {
- tcg_gen_st_i32(tcg_const_i32(1), cpu_env,
+ tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
-offsetof(CRISCPU, env) + offsetof(CPUState, halted));
tcg_gen_movi_tl(env_pc, dc->pc + 2);
t_gen_raise_exception(EXCP_HLT);
diff --git a/target/cris/translate_v10.c.inc b/target/cris/translate_v10.c.inc
index b03b2ef..32338bb 100644
--- a/target/cris/translate_v10.c.inc
+++ b/target/cris/translate_v10.c.inc
@@ -251,7 +251,7 @@
LOG_DIS("moveq %d, $r%d\n", simm, dc->dst);
cris_cc_mask(dc, CC_MASK_NZVC);
- c = tcg_const_tl(simm);
+ c = tcg_constant_tl(simm);
cris_alu(dc, CC_OP_MOVE, cpu_R[dc->dst],
cpu_R[dc->dst], c, 4);
break;
@@ -259,7 +259,7 @@
LOG_DIS("cmpq %d, $r%d\n", simm, dc->dst);
cris_cc_mask(dc, CC_MASK_NZVC);
- c = tcg_const_tl(simm);
+ c = tcg_constant_tl(simm);
cris_alu(dc, CC_OP_CMP, cpu_R[dc->dst],
cpu_R[dc->dst], c, 4);
break;
@@ -267,7 +267,7 @@
LOG_DIS("addq %d, $r%d\n", imm, dc->dst);
cris_cc_mask(dc, CC_MASK_NZVC);
- c = tcg_const_tl(imm);
+ c = tcg_constant_tl(imm);
cris_alu(dc, CC_OP_ADD, cpu_R[dc->dst],
cpu_R[dc->dst], c, 4);
break;
@@ -275,7 +275,7 @@
LOG_DIS("andq %d, $r%d\n", simm, dc->dst);
cris_cc_mask(dc, CC_MASK_NZVC);
- c = tcg_const_tl(simm);
+ c = tcg_constant_tl(simm);
cris_alu(dc, CC_OP_AND, cpu_R[dc->dst],
cpu_R[dc->dst], c, 4);
break;
@@ -285,7 +285,7 @@
cris_cc_mask(dc, CC_MASK_NZVC);
op = imm & (1 << 5);
imm &= 0x1f;
- c = tcg_const_tl(imm);
+ c = tcg_constant_tl(imm);
if (op) {
cris_alu(dc, CC_OP_ASR, cpu_R[dc->dst],
cpu_R[dc->dst], c, 4);
@@ -305,7 +305,7 @@
}
imm &= 0x1f;
cris_cc_mask(dc, CC_MASK_NZVC);
- c = tcg_const_tl(imm);
+ c = tcg_constant_tl(imm);
cris_alu(dc, op, cpu_R[dc->dst],
cpu_R[dc->dst], c, 4);
break;
@@ -313,7 +313,7 @@
LOG_DIS("subq %d, $r%d\n", imm, dc->dst);
cris_cc_mask(dc, CC_MASK_NZVC);
- c = tcg_const_tl(imm);
+ c = tcg_constant_tl(imm);
cris_alu(dc, CC_OP_SUB, cpu_R[dc->dst],
cpu_R[dc->dst], c, 4);
break;
@@ -321,7 +321,7 @@
LOG_DIS("andq %d, $r%d\n", simm, dc->dst);
cris_cc_mask(dc, CC_MASK_NZVC);
- c = tcg_const_tl(simm);
+ c = tcg_constant_tl(simm);
cris_alu(dc, CC_OP_OR, cpu_R[dc->dst],
cpu_R[dc->dst], c, 4);
break;
@@ -1014,7 +1014,7 @@
cris_alu_m_alloc_temps(t);
insn_len += dec10_prep_move_m(env, dc, 0, size, t[0]);
tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~3);
- c = tcg_const_tl(0);
+ c = tcg_constant_tl(0);
cris_alu(dc, CC_OP_CMP, cpu_R[dc->dst],
t[0], c, size);
break;
@@ -1111,7 +1111,7 @@
if (dc->mode == CRISV10_MODE_AUTOINC)
insn_len += size;
- c = tcg_const_tl(dc->pc + insn_len);
+ c = tcg_constant_tl(dc->pc + insn_len);
t_gen_mov_preg_TN(dc, dc->dst, c);
dc->jmp_pc = imm;
cris_prepare_jmp(dc, JMP_DIRECT);
@@ -1121,7 +1121,7 @@
LOG_DIS("break %d\n", dc->src);
cris_evaluate_flags(dc);
tcg_gen_movi_tl(env_pc, dc->pc + 2);
- c = tcg_const_tl(dc->src + 2);
+ c = tcg_constant_tl(dc->src + 2);
t_gen_mov_env_TN(trap_vector, c);
t_gen_raise_exception(EXCP_BREAK);
dc->base.is_jmp = DISAS_NORETURN;
@@ -1130,7 +1130,7 @@
LOG_DIS("%d: jump.%d %d r%d r%d\n", __LINE__, size,
dc->opcode, dc->src, dc->dst);
t[0] = tcg_temp_new();
- c = tcg_const_tl(dc->pc + insn_len);
+ c = tcg_constant_tl(dc->pc + insn_len);
t_gen_mov_preg_TN(dc, dc->dst, c);
crisv10_prepare_memaddr(dc, t[0], size);
gen_load(dc, env_btarget, t[0], 4, 0);
@@ -1153,7 +1153,7 @@
LOG_DIS("jmp pc=%x opcode=%d r%d r%d\n",
dc->pc, dc->opcode, dc->dst, dc->src);
tcg_gen_mov_tl(env_btarget, cpu_R[dc->src]);
- c = tcg_const_tl(dc->pc + insn_len);
+ c = tcg_constant_tl(dc->pc + insn_len);
t_gen_mov_preg_TN(dc, dc->dst, c);
cris_prepare_jmp(dc, JMP_INDIRECT);
dc->delayed_branch--; /* v10 has no dslot here. */
diff --git a/target/hexagon/cpu-param.h b/target/hexagon/cpu-param.h
index e8ed546..71b4a9b 100644
--- a/target/hexagon/cpu-param.h
+++ b/target/hexagon/cpu-param.h
@@ -24,6 +24,4 @@
#define TARGET_PHYS_ADDR_SPACE_BITS 36
#define TARGET_VIRT_ADDR_SPACE_BITS 32
-#define NB_MMU_MODES 1
-
#endif
diff --git a/target/hppa/cpu-param.h b/target/hppa/cpu-param.h
index a48a270..c2791ae 100644
--- a/target/hppa/cpu-param.h
+++ b/target/hppa/cpu-param.h
@@ -29,6 +29,5 @@
# define TARGET_PHYS_ADDR_SPACE_BITS 32
#endif
#define TARGET_PAGE_BITS 12
-#define NB_MMU_MODES 5
#endif
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
index cb4fd1f..6a3154e 100644
--- a/target/hppa/translate.c
+++ b/target/hppa/translate.c
@@ -135,8 +135,6 @@
#define tcg_gen_extract_reg tcg_gen_extract_i64
#define tcg_gen_sextract_reg tcg_gen_sextract_i64
#define tcg_gen_extract2_reg tcg_gen_extract2_i64
-#define tcg_const_reg tcg_const_i64
-#define tcg_const_local_reg tcg_const_local_i64
#define tcg_constant_reg tcg_constant_i64
#define tcg_gen_movcond_reg tcg_gen_movcond_i64
#define tcg_gen_add2_reg tcg_gen_add2_i64
@@ -228,8 +226,6 @@
#define tcg_gen_extract_reg tcg_gen_extract_i32
#define tcg_gen_sextract_reg tcg_gen_sextract_i32
#define tcg_gen_extract2_reg tcg_gen_extract2_i32
-#define tcg_const_reg tcg_const_i32
-#define tcg_const_local_reg tcg_const_local_i32
#define tcg_constant_reg tcg_constant_i32
#define tcg_gen_movcond_reg tcg_gen_movcond_i32
#define tcg_gen_add2_reg tcg_gen_add2_i32
@@ -574,7 +570,9 @@
static TCGv_i32 load_frw0_i32(unsigned rt)
{
if (rt == 0) {
- return tcg_const_i32(0);
+ TCGv_i32 ret = tcg_temp_new_i32();
+ tcg_gen_movi_i32(ret, 0);
+ return ret;
} else {
return load_frw_i32(rt);
}
@@ -582,15 +580,15 @@
static TCGv_i64 load_frw0_i64(unsigned rt)
{
+ TCGv_i64 ret = tcg_temp_new_i64();
if (rt == 0) {
- return tcg_const_i64(0);
+ tcg_gen_movi_i64(ret, 0);
} else {
- TCGv_i64 ret = tcg_temp_new_i64();
tcg_gen_ld32u_i64(ret, cpu_env,
offsetof(CPUHPPAState, fr[rt & 31])
+ (rt & 32 ? LO_OFS : HI_OFS));
- return ret;
}
+ return ret;
}
static void save_frw_i32(unsigned rt, TCGv_i32 val)
@@ -613,7 +611,9 @@
static TCGv_i64 load_frd0(unsigned rt)
{
if (rt == 0) {
- return tcg_const_i64(0);
+ TCGv_i64 ret = tcg_temp_new_i64();
+ tcg_gen_movi_i64(ret, 0);
+ return ret;
} else {
return load_frd(rt);
}
@@ -3330,7 +3330,8 @@
/* Convert big-endian bit numbering in SAR to left-shift. */
tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
- mask = tcg_const_reg(msb + (msb - 1));
+ mask = tcg_temp_new();
+ tcg_gen_movi_reg(mask, msb + (msb - 1));
tcg_gen_and_reg(tmp, val, mask);
if (rs) {
tcg_gen_shl_reg(mask, mask, shift);
@@ -3547,12 +3548,16 @@
static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
{
+ uint64_t ret;
+
+ if (TARGET_REGISTER_BITS == 64) {
+ ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
+ } else {
+ ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
+ }
+
nullify_over(ctx);
-#if TARGET_REGISTER_BITS == 64
- save_frd(0, tcg_const_i64(0x13080000000000ULL)); /* PA8700 (PCX-W2) */
-#else
- save_frd(0, tcg_const_i64(0x0f080000000000ULL)); /* PA7300LC (PCX-L2) */
-#endif
+ save_frd(0, tcg_constant_i64(ret));
return nullify_end(ctx);
}
diff --git a/target/i386/cpu-param.h b/target/i386/cpu-param.h
index abad52a..911b4cd 100644
--- a/target/i386/cpu-param.h
+++ b/target/i386/cpu-param.h
@@ -23,6 +23,5 @@
# define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif
#define TARGET_PAGE_BITS 12
-#define NB_MMU_MODES 5
#endif
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
index fa422eb..9dfad2f 100644
--- a/target/i386/tcg/translate.c
+++ b/target/i386/tcg/translate.c
@@ -884,7 +884,7 @@
live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
if (dead) {
- zero = tcg_const_tl(0);
+ zero = tcg_constant_tl(0);
if (dead & USES_CC_DST) {
dst = zero;
}
@@ -1412,7 +1412,7 @@
/* NOTE the exception in "r" op ordering */
static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
{
- TCGv_i32 tmp = tcg_const_i32(opreg);
+ TCGv_i32 tmp = tcg_constant_i32(opreg);
switch (op) {
case 0:
gen_helper_fadd_STN_ST0(cpu_env, tmp);
@@ -1439,7 +1439,7 @@
{
gen_update_cc_op(s);
gen_update_eip_cur(s);
- gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
+ gen_helper_raise_exception(cpu_env, tcg_constant_i32(trapno));
s->base.is_jmp = DISAS_NORETURN;
}
@@ -1633,7 +1633,7 @@
/* Store the results into the CC variables. If we know that the
variable must be dead, store unconditionally. Otherwise we'll
need to not disrupt the current contents. */
- z_tl = tcg_const_tl(0);
+ z_tl = tcg_constant_tl(0);
if (cc_op_live[s->cc_op] & USES_CC_DST) {
tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
result, cpu_cc_dst);
@@ -1657,7 +1657,7 @@
}
/* Conditionally store the CC_OP value. */
- z32 = tcg_const_i32(0);
+ z32 = tcg_constant_i32(0);
s32 = tcg_temp_new_i32();
tcg_gen_trunc_tl_i32(s32, count);
tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, s->tmp2_i32, oldop);
@@ -1813,7 +1813,7 @@
is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
exactly as we computed above. */
- t0 = tcg_const_i32(0);
+ t0 = tcg_constant_i32(0);
t1 = tcg_temp_new_i32();
tcg_gen_trunc_tl_i32(t1, s->T1);
tcg_gen_movi_i32(s->tmp2_i32, CC_OP_ADCOX);
@@ -2497,7 +2497,7 @@
cc.reg = t0;
}
if (!cc.use_reg2) {
- cc.reg2 = tcg_const_tl(cc.imm);
+ cc.reg2 = tcg_constant_tl(cc.imm);
}
tcg_gen_movcond_tl(cc.cond, s->T0, cc.reg, cc.reg2,
@@ -2525,7 +2525,7 @@
{
if (PE(s) && !VM86(s)) {
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
- gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), s->tmp2_i32);
+ gen_helper_load_seg(cpu_env, tcg_constant_i32(seg_reg), s->tmp2_i32);
/* abort translation because the addseg value may change or
because ss32 may change. For R_SS, translation must always
stop as a special handling must be done to disable hardware
@@ -4344,7 +4344,7 @@
gen_op_mov_v_reg(s, ot, s->T1, reg);
if (shift) {
- TCGv imm = tcg_const_tl(x86_ldub_code(env, s));
+ TCGv imm = tcg_constant_tl(x86_ldub_code(env, s));
gen_shiftd_rm_T1(s, ot, opreg, op, imm);
} else {
gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
@@ -4503,7 +4503,7 @@
break;
case 0x0c: /* fldenv mem */
gen_helper_fldenv(cpu_env, s->A0,
- tcg_const_i32(dflag - 1));
+ tcg_constant_i32(dflag - 1));
update_fip = update_fdp = false;
break;
case 0x0d: /* fldcw mem */
@@ -4514,7 +4514,7 @@
break;
case 0x0e: /* fnstenv mem */
gen_helper_fstenv(cpu_env, s->A0,
- tcg_const_i32(dflag - 1));
+ tcg_constant_i32(dflag - 1));
update_fip = update_fdp = false;
break;
case 0x0f: /* fnstcw mem */
@@ -4532,12 +4532,12 @@
break;
case 0x2c: /* frstor mem */
gen_helper_frstor(cpu_env, s->A0,
- tcg_const_i32(dflag - 1));
+ tcg_constant_i32(dflag - 1));
update_fip = update_fdp = false;
break;
case 0x2e: /* fnsave mem */
gen_helper_fsave(cpu_env, s->A0,
- tcg_const_i32(dflag - 1));
+ tcg_constant_i32(dflag - 1));
update_fip = update_fdp = false;
break;
case 0x2f: /* fnstsw mem */
@@ -4587,12 +4587,12 @@
case 0x08: /* fld sti */
gen_helper_fpush(cpu_env);
gen_helper_fmov_ST0_STN(cpu_env,
- tcg_const_i32((opreg + 1) & 7));
+ tcg_constant_i32((opreg + 1) & 7));
break;
case 0x09: /* fxchg sti */
case 0x29: /* fxchg4 sti, undocumented op */
case 0x39: /* fxchg7 sti, undocumented op */
- gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fxchg_ST0_STN(cpu_env, tcg_constant_i32(opreg));
break;
case 0x0a: /* grp d9/2 */
switch (rm) {
@@ -4732,27 +4732,27 @@
}
} else {
gen_helper_fmov_FT0_STN(cpu_env,
- tcg_const_i32(opreg));
+ tcg_constant_i32(opreg));
gen_helper_fp_arith_ST0_FT0(op1);
}
}
break;
case 0x02: /* fcom */
case 0x22: /* fcom2, undocumented op */
- gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
gen_helper_fcom_ST0_FT0(cpu_env);
break;
case 0x03: /* fcomp */
case 0x23: /* fcomp3, undocumented op */
case 0x32: /* fcomp5, undocumented op */
- gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
gen_helper_fcom_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
break;
case 0x15: /* da/5 */
switch (rm) {
case 1: /* fucompp */
- gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(1));
gen_helper_fucom_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
gen_helper_fpop(cpu_env);
@@ -4786,7 +4786,7 @@
goto illegal_op;
}
gen_update_cc_op(s);
- gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
gen_helper_fucomi_ST0_FT0(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
@@ -4795,36 +4795,36 @@
goto illegal_op;
}
gen_update_cc_op(s);
- gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
gen_helper_fcomi_ST0_FT0(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x28: /* ffree sti */
- gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_ffree_STN(cpu_env, tcg_constant_i32(opreg));
break;
case 0x2a: /* fst sti */
- gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fmov_STN_ST0(cpu_env, tcg_constant_i32(opreg));
break;
case 0x2b: /* fstp sti */
case 0x0b: /* fstp1 sti, undocumented op */
case 0x3a: /* fstp8 sti, undocumented op */
case 0x3b: /* fstp9 sti, undocumented op */
- gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fmov_STN_ST0(cpu_env, tcg_constant_i32(opreg));
gen_helper_fpop(cpu_env);
break;
case 0x2c: /* fucom st(i) */
- gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
gen_helper_fucom_ST0_FT0(cpu_env);
break;
case 0x2d: /* fucomp st(i) */
- gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
gen_helper_fucom_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
break;
case 0x33: /* de/3 */
switch (rm) {
case 1: /* fcompp */
- gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(1));
gen_helper_fcom_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
gen_helper_fpop(cpu_env);
@@ -4834,7 +4834,7 @@
}
break;
case 0x38: /* ffreep sti, undocumented op */
- gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_ffree_STN(cpu_env, tcg_constant_i32(opreg));
gen_helper_fpop(cpu_env);
break;
case 0x3c: /* df/4 */
@@ -4853,7 +4853,7 @@
goto illegal_op;
}
gen_update_cc_op(s);
- gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
gen_helper_fucomi_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
@@ -4863,7 +4863,7 @@
goto illegal_op;
}
gen_update_cc_op(s);
- gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
gen_helper_fcomi_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
@@ -4886,7 +4886,8 @@
op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
l1 = gen_new_label();
gen_jcc1_noeob(s, op1, l1);
- gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fmov_ST0_STN(cpu_env,
+ tcg_constant_i32(opreg));
gen_set_label(l1);
}
break;
@@ -5092,8 +5093,8 @@
if (PE(s) && !VM86(s)) {
gen_update_cc_op(s);
gen_update_eip_cur(s);
- gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1),
- tcg_const_i32(val));
+ gen_helper_lret_protected(cpu_env, tcg_constant_i32(dflag - 1),
+ tcg_constant_i32(val));
} else {
gen_stack_A0(s);
/* pop offset */
@@ -5120,7 +5121,7 @@
if (!check_vm86_iopl(s)) {
break;
}
- gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
+ gen_helper_iret_real(cpu_env, tcg_constant_i32(dflag - 1));
} else {
gen_helper_iret_protected(cpu_env, tcg_constant_i32(dflag - 1),
eip_next_i32(s));
@@ -5509,7 +5510,7 @@
if (val == 0) {
gen_exception(s, EXCP00_DIVZ);
} else {
- gen_helper_aam(cpu_env, tcg_const_i32(val));
+ gen_helper_aam(cpu_env, tcg_constant_i32(val));
set_cc_op(s, CC_OP_LOGICB);
}
break;
@@ -5517,7 +5518,7 @@
if (CODE64(s))
goto illegal_op;
val = x86_ldub_code(env, s);
- gen_helper_aad(cpu_env, tcg_const_i32(val));
+ gen_helper_aad(cpu_env, tcg_constant_i32(val));
set_cc_op(s, CC_OP_LOGICB);
break;
/************************/
@@ -5698,7 +5699,7 @@
if (!PE(s)) {
gen_exception_gpf(s);
} else {
- gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1));
+ gen_helper_sysexit(cpu_env, tcg_constant_i32(dflag - 1));
s->base.is_jmp = DISAS_EOB_ONLY;
}
break;
@@ -5717,7 +5718,7 @@
if (!PE(s)) {
gen_exception_gpf(s);
} else {
- gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1));
+ gen_helper_sysret(cpu_env, tcg_constant_i32(dflag - 1));
/* condition codes are modified only in long mode */
if (LMA(s)) {
set_cc_op(s, CC_OP_EFLAGS);
@@ -5923,7 +5924,7 @@
}
gen_update_cc_op(s);
gen_update_eip_cur(s);
- gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1),
+ gen_helper_vmrun(cpu_env, tcg_constant_i32(s->aflag - 1),
cur_insn_len_i32(s));
tcg_gen_exit_tb(NULL, 0);
s->base.is_jmp = DISAS_NORETURN;
@@ -5947,7 +5948,7 @@
}
gen_update_cc_op(s);
gen_update_eip_cur(s);
- gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1));
+ gen_helper_vmload(cpu_env, tcg_constant_i32(s->aflag - 1));
break;
case 0xdb: /* VMSAVE */
@@ -5959,7 +5960,7 @@
}
gen_update_cc_op(s);
gen_update_eip_cur(s);
- gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1));
+ gen_helper_vmsave(cpu_env, tcg_constant_i32(s->aflag - 1));
break;
case 0xdc: /* STGI */
diff --git a/target/loongarch/cpu-param.h b/target/loongarch/cpu-param.h
index 414d8ff..1265dc7 100644
--- a/target/loongarch/cpu-param.h
+++ b/target/loongarch/cpu-param.h
@@ -13,6 +13,5 @@
#define TARGET_VIRT_ADDR_SPACE_BITS 48
#define TARGET_PAGE_BITS 14
-#define NB_MMU_MODES 5
#endif
diff --git a/target/m68k/cpu-param.h b/target/m68k/cpu-param.h
index 44a8d19..39dcbce 100644
--- a/target/m68k/cpu-param.h
+++ b/target/m68k/cpu-param.h
@@ -17,6 +17,5 @@
#define TARGET_PAGE_BITS 12
#define TARGET_PHYS_ADDR_SPACE_BITS 32
#define TARGET_VIRT_ADDR_SPACE_BITS 32
-#define NB_MMU_MODES 2
#endif
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
index 3055d2d..422f465 100644
--- a/target/m68k/translate.c
+++ b/target/m68k/translate.c
@@ -264,10 +264,7 @@
static void gen_raise_exception(int nr)
{
- TCGv_i32 tmp;
-
- tmp = tcg_const_i32(nr);
- gen_helper_raise_exception(cpu_env, tmp);
+ gen_helper_raise_exception(cpu_env, tcg_constant_i32(nr));
}
static void gen_raise_exception_format2(DisasContext *s, int nr,
@@ -471,7 +468,7 @@
if ((ext & 0x80) == 0) {
/* base not suppressed */
if (IS_NULL_QREG(base)) {
- base = tcg_const_i32(offset + bd);
+ base = tcg_constant_i32(offset + bd);
bd = 0;
}
if (!IS_NULL_QREG(add)) {
@@ -487,7 +484,7 @@
add = tmp;
}
} else {
- add = tcg_const_i32(bd);
+ add = tcg_constant_i32(bd);
}
if ((ext & 3) != 0) {
/* memory indirect */
@@ -623,8 +620,7 @@
break;
default:
- t0 = tcg_const_i32(s->cc_op);
- gen_helper_flush_flags(cpu_env, t0);
+ gen_helper_flush_flags(cpu_env, tcg_constant_i32(s->cc_op));
s->cc_op_synced = 1;
break;
}
@@ -785,14 +781,14 @@
switch (reg0) {
case 0: /* Absolute short. */
offset = (int16_t)read_im16(env, s);
- return tcg_const_i32(offset);
+ return tcg_constant_i32(offset);
case 1: /* Absolute long. */
offset = read_im32(env, s);
- return tcg_const_i32(offset);
+ return tcg_constant_i32(offset);
case 2: /* pc displacement */
offset = s->pc;
offset += (int16_t)read_im16(env, s);
- return tcg_const_i32(offset);
+ return tcg_constant_i32(offset);
case 3: /* pc index+displacement. */
return gen_lea_indexed(env, s, NULL_QREG);
case 4: /* Immediate. */
@@ -920,7 +916,7 @@
default:
g_assert_not_reached();
}
- return tcg_const_i32(offset);
+ return tcg_constant_i32(offset);
default:
return NULL_QREG;
}
@@ -1167,23 +1163,23 @@
}
switch (opsize) {
case OS_BYTE:
- tmp = tcg_const_i32((int8_t)read_im8(env, s));
+ tmp = tcg_constant_i32((int8_t)read_im8(env, s));
gen_helper_exts32(cpu_env, fp, tmp);
break;
case OS_WORD:
- tmp = tcg_const_i32((int16_t)read_im16(env, s));
+ tmp = tcg_constant_i32((int16_t)read_im16(env, s));
gen_helper_exts32(cpu_env, fp, tmp);
break;
case OS_LONG:
- tmp = tcg_const_i32(read_im32(env, s));
+ tmp = tcg_constant_i32(read_im32(env, s));
gen_helper_exts32(cpu_env, fp, tmp);
break;
case OS_SINGLE:
- tmp = tcg_const_i32(read_im32(env, s));
+ tmp = tcg_constant_i32(read_im32(env, s));
gen_helper_extf32(cpu_env, fp, tmp);
break;
case OS_DOUBLE:
- t64 = tcg_const_i64(read_im64(env, s));
+ t64 = tcg_constant_i64(read_im64(env, s));
gen_helper_extf64(cpu_env, fp, t64);
break;
case OS_EXTENDED:
@@ -1191,9 +1187,9 @@
gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
break;
}
- tmp = tcg_const_i32(read_im32(env, s) >> 16);
+ tmp = tcg_constant_i32(read_im32(env, s) >> 16);
tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
- t64 = tcg_const_i64(read_im64(env, s));
+ t64 = tcg_constant_i64(read_im64(env, s));
tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
break;
case OS_PACKED:
@@ -1253,7 +1249,7 @@
goto done;
case 10: /* PL */
case 11: /* MI */
- c->v2 = tcg_const_i32(0);
+ c->v2 = tcg_constant_i32(0);
c->v1 = tmp = tcg_temp_new();
tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V);
gen_ext(tmp, tmp, op - CC_OP_CMPB, 1);
@@ -1269,7 +1265,7 @@
}
}
- c->v2 = tcg_const_i32(0);
+ c->v2 = tcg_constant_i32(0);
switch (cond) {
case 0: /* T */
@@ -1631,8 +1627,8 @@
* = result with some possible exceeding 0x6
*/
- t0 = tcg_const_i32(0x066);
- tcg_gen_add_i32(t0, t0, src);
+ t0 = tcg_temp_new();
+ tcg_gen_addi_i32(t0, src, 0x066);
t1 = tcg_temp_new();
tcg_gen_add_i32(t1, t0, dest);
@@ -1818,7 +1814,8 @@
SRC_EA(env, src, OS_BYTE, 0, &addr);
- dest = tcg_const_i32(0);
+ dest = tcg_temp_new();
+ tcg_gen_movi_i32(dest, 0);
bcd_sub(dest, src);
DEST_EA(env, insn, OS_BYTE, dest, &addr);
@@ -1896,8 +1893,8 @@
else
tcg_gen_andi_i32(src2, DREG(insn, 9), 31);
- tmp = tcg_const_i32(1);
- tcg_gen_shl_i32(tmp, tmp, src2);
+ tmp = tcg_temp_new();
+ tcg_gen_shl_i32(tmp, tcg_constant_i32(1), src2);
tcg_gen_and_i32(QREG_CC_Z, src1, tmp);
@@ -1999,7 +1996,7 @@
addr = tcg_temp_new();
tcg_gen_mov_i32(addr, tmp);
- incr = tcg_const_i32(opsize_bytes(opsize));
+ incr = tcg_constant_i32(opsize_bytes(opsize));
if (is_load) {
/* memory to register */
@@ -2235,13 +2232,13 @@
opsize = insn_opsize(insn);
switch (opsize) {
case OS_BYTE:
- im = tcg_const_i32((int8_t)read_im8(env, s));
+ im = tcg_constant_i32((int8_t)read_im8(env, s));
break;
case OS_WORD:
- im = tcg_const_i32((int16_t)read_im16(env, s));
+ im = tcg_constant_i32((int16_t)read_im16(env, s));
break;
case OS_LONG:
- im = tcg_const_i32(read_im32(env, s));
+ im = tcg_constant_i32(read_im32(env, s));
break;
default:
g_assert_not_reached();
@@ -2393,7 +2390,6 @@
{
uint16_t ext1, ext2;
TCGv addr1, addr2;
- TCGv regs;
/* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
@@ -2425,13 +2421,13 @@
* Dc2 = (R2)
*/
- regs = tcg_const_i32(REG(ext2, 6) |
- (REG(ext1, 6) << 3) |
- (REG(ext2, 0) << 6) |
- (REG(ext1, 0) << 9));
if (tb_cflags(s->base.tb) & CF_PARALLEL) {
gen_helper_exit_atomic(cpu_env);
} else {
+ TCGv regs = tcg_constant_i32(REG(ext2, 6) |
+ (REG(ext1, 6) << 3) |
+ (REG(ext2, 0) << 6) |
+ (REG(ext1, 0) << 9));
gen_helper_cas2w(cpu_env, regs, addr1, addr2);
}
@@ -2475,10 +2471,10 @@
* Dc2 = (R2)
*/
- regs = tcg_const_i32(REG(ext2, 6) |
- (REG(ext1, 6) << 3) |
- (REG(ext2, 0) << 6) |
- (REG(ext1, 0) << 9));
+ regs = tcg_constant_i32(REG(ext2, 6) |
+ (REG(ext1, 6) << 3) |
+ (REG(ext2, 0) << 6) |
+ (REG(ext1, 0) << 9));
if (tb_cflags(s->base.tb) & CF_PARALLEL) {
gen_helper_cas2l_parallel(cpu_env, regs, addr1, addr2);
} else {
@@ -2552,7 +2548,7 @@
* (X, N) = -(src + X);
*/
- z = tcg_const_i32(0);
+ z = tcg_constant_i32(0);
tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z);
tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X);
gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
@@ -2597,8 +2593,7 @@
int opsize;
TCGv zero;
- zero = tcg_const_i32(0);
-
+ zero = tcg_constant_i32(0);
opsize = insn_opsize(insn);
DEST_EA(env, insn, opsize, zero, NULL);
gen_logic_cc(s, zero, opsize);
@@ -2934,7 +2929,7 @@
}
if ((insn & 0x40) == 0) {
/* jsr */
- gen_push(s, tcg_const_i32(s->pc));
+ gen_push(s, tcg_constant_i32(s->pc));
}
gen_jmp(s, tmp);
}
@@ -2959,7 +2954,7 @@
if (imm == 0) {
imm = 8;
}
- val = tcg_const_i32(imm);
+ val = tcg_constant_i32(imm);
dest = tcg_temp_new();
tcg_gen_mov_i32(dest, src);
if ((insn & 0x38) == 0x08) {
@@ -3003,7 +2998,7 @@
}
if (op == 1) {
/* bsr */
- gen_push(s, tcg_const_i32(s->pc));
+ gen_push(s, tcg_constant_i32(s->pc));
}
if (op > 1) {
/* Bcc */
@@ -3076,7 +3071,7 @@
static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize)
{
- TCGv tmp;
+ TCGv tmp, zero;
gen_flush_flags(s); /* compute old Z */
@@ -3085,14 +3080,15 @@
* (X, N) = dest - (src + X);
*/
- tmp = tcg_const_i32(0);
- tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, tmp, QREG_CC_X, tmp);
- tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, tmp, QREG_CC_N, QREG_CC_X);
+ zero = tcg_constant_i32(0);
+ tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, zero, QREG_CC_X, zero);
+ tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, zero, QREG_CC_N, QREG_CC_X);
gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
/* Compute signed-overflow for subtract. */
+ tmp = tcg_temp_new();
tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest);
tcg_gen_xor_i32(tmp, dest, src);
tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp);
@@ -3151,9 +3147,10 @@
int val;
val = (insn >> 9) & 7;
- if (val == 0)
+ if (val == 0) {
val = -1;
- src = tcg_const_i32(val);
+ }
+ src = tcg_constant_i32(val);
gen_logic_cc(s, src, OS_LONG);
DEST_EA(env, insn, OS_LONG, src, NULL);
}
@@ -3279,7 +3276,7 @@
static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize)
{
- TCGv tmp;
+ TCGv tmp, zero;
gen_flush_flags(s); /* compute old Z */
@@ -3288,13 +3285,14 @@
* (X, N) = src + dest + X;
*/
- tmp = tcg_const_i32(0);
- tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, tmp, dest, tmp);
- tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, tmp);
+ zero = tcg_constant_i32(0);
+ tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, zero, dest, zero);
+ tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, zero);
gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
/* Compute signed-overflow for addition. */
+ tmp = tcg_temp_new();
tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
tcg_gen_xor_i32(tmp, dest, src);
tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp);
@@ -3430,7 +3428,7 @@
tcg_gen_extr_i64_i32(QREG_CC_N, QREG_CC_C, t64);
/* Note that C=0 if shift count is 0, and we get that for free. */
} else {
- TCGv zero = tcg_const_i32(0);
+ TCGv zero = tcg_constant_i32(0);
tcg_gen_extrl_i64_i32(QREG_CC_N, t64);
tcg_gen_shri_i32(QREG_CC_C, QREG_CC_N, bits);
tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
@@ -3452,7 +3450,7 @@
* V = ((s ^ t) & (-1 << (bits - 1))) != 0
*/
if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
- TCGv_i64 tt = tcg_const_i64(32);
+ TCGv_i64 tt = tcg_constant_i64(32);
/* if shift is greater than 32, use 32 */
tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64);
/* Sign extend the input to 64 bits; re-do the shift. */
@@ -3633,7 +3631,7 @@
{
TCGv X, shl, shr, shx, sz, zero;
- sz = tcg_const_i32(size);
+ sz = tcg_constant_i32(size);
shr = tcg_temp_new();
shl = tcg_temp_new();
@@ -3644,7 +3642,7 @@
tcg_gen_sub_i32(shr, shr, shift); /* shr = size + 1 - shift */
tcg_gen_subi_i32(shx, shift, 1); /* shx = shift - 1 */
/* shx = shx < 0 ? size : shx; */
- zero = tcg_const_i32(0);
+ zero = tcg_constant_i32(0);
tcg_gen_movcond_i32(TCG_COND_LT, shx, shx, zero, sz, shx);
} else {
tcg_gen_mov_i32(shr, shift); /* shr = shift */
@@ -3723,7 +3721,7 @@
/* if shift == 0, register and X are not affected */
- zero = tcg_const_i32(0);
+ zero = tcg_constant_i32(0);
tcg_gen_movcond_i32(TCG_COND_EQ, X, shift, zero, QREG_CC_X, X);
tcg_gen_movcond_i32(TCG_COND_EQ, reg, shift, zero, reg, lo);
@@ -3741,7 +3739,7 @@
tmp = 8;
}
- shift = tcg_const_i32(tmp);
+ shift = tcg_constant_i32(tmp);
if (insn & 8) {
rotate(DREG(insn, 0), shift, left, 32);
} else {
@@ -3766,7 +3764,7 @@
tmp = 8;
}
- shift = tcg_const_i32(tmp);
+ shift = tcg_constant_i32(tmp);
if (insn & 8) {
rotate(reg, shift, left, 8);
} else {
@@ -3790,7 +3788,7 @@
tmp = 8;
}
- shift = tcg_const_i32(tmp);
+ shift = tcg_constant_i32(tmp);
if (insn & 8) {
rotate(reg, shift, left, 16);
} else {
@@ -3905,7 +3903,7 @@
SRC_EA(env, src, OS_WORD, 0, &addr);
- shift = tcg_const_i32(1);
+ shift = tcg_constant_i32(1);
if (insn & 0x0200) {
rotate(src, shift, left, 16);
} else {
@@ -3999,12 +3997,12 @@
if (ext & 0x20) {
len = DREG(ext, 0);
} else {
- len = tcg_const_i32(extract32(ext, 0, 5));
+ len = tcg_constant_i32(extract32(ext, 0, 5));
}
if (ext & 0x800) {
ofs = DREG(ext, 6);
} else {
- ofs = tcg_const_i32(extract32(ext, 6, 5));
+ ofs = tcg_constant_i32(extract32(ext, 6, 5));
}
if (is_sign) {
@@ -4024,14 +4022,8 @@
TCGv src = DREG(insn, 0);
int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
int ofs = extract32(ext, 6, 5); /* big bit-endian */
- TCGv mask, tofs, tlen;
-
- tofs = NULL;
- tlen = NULL;
- if ((insn & 0x0f00) == 0x0d00) { /* bfffo */
- tofs = tcg_temp_new();
- tlen = tcg_temp_new();
- }
+ TCGv mask, tofs = NULL, tlen = NULL;
+ bool is_bfffo = (insn & 0x0f00) == 0x0d00;
if ((ext & 0x820) == 0) {
/* Immediate width and offset. */
@@ -4042,45 +4034,49 @@
tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
}
tcg_gen_andi_i32(QREG_CC_N, QREG_CC_N, ~maski);
- mask = tcg_const_i32(ror32(maski, ofs));
- if (tofs) {
- tcg_gen_movi_i32(tofs, ofs);
- tcg_gen_movi_i32(tlen, len);
+
+ mask = tcg_constant_i32(ror32(maski, ofs));
+ if (is_bfffo) {
+ tofs = tcg_constant_i32(ofs);
+ tlen = tcg_constant_i32(len);
}
} else {
TCGv tmp = tcg_temp_new();
+
+ mask = tcg_temp_new();
if (ext & 0x20) {
/* Variable width */
tcg_gen_subi_i32(tmp, DREG(ext, 0), 1);
tcg_gen_andi_i32(tmp, tmp, 31);
- mask = tcg_const_i32(0x7fffffffu);
- tcg_gen_shr_i32(mask, mask, tmp);
- if (tlen) {
+ tcg_gen_shr_i32(mask, tcg_constant_i32(0x7fffffffu), tmp);
+ if (is_bfffo) {
+ tlen = tcg_temp_new();
tcg_gen_addi_i32(tlen, tmp, 1);
}
} else {
/* Immediate width */
- mask = tcg_const_i32(0x7fffffffu >> (len - 1));
- if (tlen) {
- tcg_gen_movi_i32(tlen, len);
+ tcg_gen_movi_i32(mask, 0x7fffffffu >> (len - 1));
+ if (is_bfffo) {
+ tlen = tcg_constant_i32(len);
}
}
+
if (ext & 0x800) {
/* Variable offset */
tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
tcg_gen_rotl_i32(QREG_CC_N, src, tmp);
tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
tcg_gen_rotr_i32(mask, mask, tmp);
- if (tofs) {
- tcg_gen_mov_i32(tofs, tmp);
+ if (is_bfffo) {
+ tofs = tmp;
}
} else {
/* Immediate offset (and variable width) */
tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
tcg_gen_rotri_i32(mask, mask, ofs);
- if (tofs) {
- tcg_gen_movi_i32(tofs, ofs);
+ if (is_bfffo) {
+ tofs = tcg_constant_i32(ofs);
}
}
}
@@ -4122,12 +4118,12 @@
if (ext & 0x20) {
len = DREG(ext, 0);
} else {
- len = tcg_const_i32(extract32(ext, 0, 5));
+ len = tcg_constant_i32(extract32(ext, 0, 5));
}
if (ext & 0x800) {
ofs = DREG(ext, 6);
} else {
- ofs = tcg_const_i32(extract32(ext, 6, 5));
+ ofs = tcg_constant_i32(extract32(ext, 6, 5));
}
switch (insn & 0x0f00) {
@@ -4239,12 +4235,12 @@
if (ext & 0x20) {
len = DREG(ext, 0);
} else {
- len = tcg_const_i32(extract32(ext, 0, 5));
+ len = tcg_constant_i32(extract32(ext, 0, 5));
}
if (ext & 0x800) {
ofs = DREG(ext, 6);
} else {
- ofs = tcg_const_i32(extract32(ext, 6, 5));
+ ofs = tcg_constant_i32(extract32(ext, 6, 5));
}
gen_helper_bfins_mem(QREG_CC_N, cpu_env, addr, src, ofs, len);
@@ -4377,7 +4373,7 @@
TCGv reg, addr;
reg = AREG(insn, 0);
- addr = tcg_const_i32(read_im32(env, s));
+ addr = tcg_constant_i32(read_im32(env, s));
if ((insn >> 3) & 1) {
/* MOVE16 (xxx).L, (Ay) */
@@ -4567,14 +4563,14 @@
} else {
reg = DREG(ext, 12);
}
- gen_helper_cf_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg);
+ gen_helper_cf_movec_to(cpu_env, tcg_constant_i32(ext & 0xfff), reg);
gen_exit_tb(s);
}
DISAS_INSN(m68k_movec)
{
uint16_t ext;
- TCGv reg;
+ TCGv reg, creg;
if (IS_USER(s)) {
gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
@@ -4588,10 +4584,11 @@
} else {
reg = DREG(ext, 12);
}
+ creg = tcg_constant_i32(ext & 0xfff);
if (insn & 1) {
- gen_helper_m68k_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg);
+ gen_helper_m68k_movec_to(cpu_env, creg, reg);
} else {
- gen_helper_m68k_movec_from(reg, cpu_env, tcg_const_i32(ext & 0xfff));
+ gen_helper_m68k_movec_from(reg, cpu_env, creg);
}
gen_exit_tb(s);
}
@@ -4642,7 +4639,7 @@
return;
}
- opmode = tcg_const_i32((insn >> 3) & 3);
+ opmode = tcg_constant_i32((insn >> 3) & 3);
gen_helper_pflush(cpu_env, AREG(insn, 0), opmode);
}
@@ -4654,7 +4651,7 @@
gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
return;
}
- is_read = tcg_const_i32((insn >> 5) & 1);
+ is_read = tcg_constant_i32((insn >> 5) & 1);
gen_helper_ptest(cpu_env, AREG(insn, 0), is_read);
}
#endif
@@ -4824,7 +4821,7 @@
gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
return;
}
- tmp = tcg_const_i32(read_im32(env, s));
+ tmp = tcg_constant_i32(read_im32(env, s));
gen_store_fcr(s, tmp, mask);
return;
}
@@ -4961,7 +4958,7 @@
case 2:
if (insn == 0xf200 && (ext & 0xfc00) == 0x5c00) {
/* fmovecr */
- TCGv rom_offset = tcg_const_i32(opmode);
+ TCGv rom_offset = tcg_constant_i32(opmode);
cpu_dest = gen_fp_ptr(REG(ext, 7));
gen_helper_fconst(cpu_env, cpu_dest, rom_offset);
return;
@@ -5185,7 +5182,7 @@
{
TCGv fpsr;
- c->v2 = tcg_const_i32(0);
+ c->v2 = tcg_constant_i32(0);
/* TODO: Raise BSUN exception. */
fpsr = tcg_temp_new();
gen_load_fcr(s, fpsr, M68K_FPSR);
@@ -5405,7 +5402,7 @@
if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
/* always write IDLE */
- TCGv idle = tcg_const_i32(0x41000000);
+ TCGv idle = tcg_constant_i32(0x41000000);
DEST_EA(env, insn, OS_LONG, idle, NULL);
} else {
disas_undef(env, s, insn);
@@ -5535,7 +5532,7 @@
/* Skip the accumulate if the value is already saturated. */
l1 = gen_new_label();
tmp = tcg_temp_new();
- gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
+ gen_op_and32(tmp, QREG_MACSR, tcg_constant_i32(MACSR_PAV0 << acc));
gen_op_jmp_nz32(tmp, l1);
}
#endif
@@ -5546,11 +5543,11 @@
tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
if (s->env->macsr & MACSR_FI)
- gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
+ gen_helper_macsatf(cpu_env, tcg_constant_i32(acc));
else if (s->env->macsr & MACSR_SU)
- gen_helper_macsats(cpu_env, tcg_const_i32(acc));
+ gen_helper_macsats(cpu_env, tcg_constant_i32(acc));
else
- gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
+ gen_helper_macsatu(cpu_env, tcg_constant_i32(acc));
#if 0
/* Disabled because conditional branches clobber temporary vars. */
@@ -5569,7 +5566,7 @@
/* Skip the accumulate if the value is already saturated. */
l1 = gen_new_label();
tmp = tcg_temp_new();
- gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
+ gen_op_and32(tmp, QREG_MACSR, tcg_constant_i32(MACSR_PAV0 << acc));
gen_op_jmp_nz32(tmp, l1);
}
#endif
@@ -5578,18 +5575,18 @@
else
tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
if (s->env->macsr & MACSR_FI)
- gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
+ gen_helper_macsatf(cpu_env, tcg_constant_i32(acc));
else if (s->env->macsr & MACSR_SU)
- gen_helper_macsats(cpu_env, tcg_const_i32(acc));
+ gen_helper_macsats(cpu_env, tcg_constant_i32(acc));
else
- gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
+ gen_helper_macsatu(cpu_env, tcg_constant_i32(acc));
#if 0
/* Disabled because conditional branches clobber temporary vars. */
if (l1 != -1)
gen_set_label(l1);
#endif
}
- gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc));
+ gen_helper_mac_set_flags(cpu_env, tcg_constant_i32(acc));
if (insn & 0x30) {
TCGv rw;
@@ -5639,8 +5636,8 @@
int src;
TCGv dest;
src = insn & 3;
- dest = tcg_const_i32((insn >> 9) & 3);
- gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src));
+ dest = tcg_constant_i32((insn >> 9) & 3);
+ gen_helper_mac_move(cpu_env, dest, tcg_constant_i32(src));
gen_mac_clear_flags();
gen_helper_mac_set_flags(cpu_env, dest);
}
@@ -5665,7 +5662,7 @@
TCGv reg;
TCGv acc;
reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
- acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
+ acc = tcg_constant_i32((insn & 0x400) ? 2 : 0);
if (s->env->macsr & MACSR_FI)
gen_helper_get_mac_extf(reg, cpu_env, acc);
else
@@ -5700,7 +5697,7 @@
}
tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
gen_mac_clear_flags();
- gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum));
+ gen_helper_mac_set_flags(cpu_env, tcg_constant_i32(accnum));
}
DISAS_INSN(to_macsr)
@@ -5723,7 +5720,7 @@
TCGv val;
TCGv acc;
SRC_EA(env, val, OS_LONG, 0, NULL);
- acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
+ acc = tcg_constant_i32((insn & 0x400) ? 2 : 0);
if (s->env->macsr & MACSR_FI)
gen_helper_set_mac_extf(cpu_env, val, acc);
else if (s->env->macsr & MACSR_SU)
diff --git a/target/microblaze/cpu-param.h b/target/microblaze/cpu-param.h
index 5e54ea0..9770b0e 100644
--- a/target/microblaze/cpu-param.h
+++ b/target/microblaze/cpu-param.h
@@ -28,6 +28,5 @@
/* FIXME: MB uses variable pages down to 1K but linux only uses 4k. */
#define TARGET_PAGE_BITS 12
-#define NB_MMU_MODES 3
#endif
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
index f66df02..88324d0 100644
--- a/target/microblaze/cpu.h
+++ b/target/microblaze/cpu.h
@@ -394,7 +394,7 @@
#define MMU_NOMMU_IDX 0
#define MMU_KERNEL_IDX 1
#define MMU_USER_IDX 2
-/* See NB_MMU_MODES further up the file. */
+/* See NB_MMU_MODES in cpu-defs.h. */
#include "exec/cpu-all.h"
diff --git a/target/mips/cpu-param.h b/target/mips/cpu-param.h
index f4c7699..594c91a 100644
--- a/target/mips/cpu-param.h
+++ b/target/mips/cpu-param.h
@@ -29,6 +29,5 @@
#define TARGET_PAGE_BITS_VARY
#define TARGET_PAGE_BITS_MIN 12
#endif
-#define NB_MMU_MODES 4
#endif
diff --git a/target/mips/tcg/micromips_translate.c.inc b/target/mips/tcg/micromips_translate.c.inc
index 632895c..e8b193a 100644
--- a/target/mips/tcg/micromips_translate.c.inc
+++ b/target/mips/tcg/micromips_translate.c.inc
@@ -704,8 +704,8 @@
gen_base_offset_addr(ctx, t0, base, offset);
- t1 = tcg_const_tl(reglist);
- t2 = tcg_const_i32(ctx->mem_idx);
+ t1 = tcg_constant_tl(reglist);
+ t2 = tcg_constant_i32(ctx->mem_idx);
save_cpu_state(ctx, 1);
switch (opc) {
@@ -724,9 +724,6 @@
break;
#endif
}
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free_i32(t2);
}
@@ -1018,8 +1015,6 @@
break;
#endif
}
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
static void gen_pool32axf(CPUMIPSState *env, DisasContext *ctx, int rt, int rs)
@@ -1067,7 +1062,6 @@
gen_load_gpr(t0, rt);
gen_mtc0(ctx, t0, rs, (ctx->opcode >> 11) & 0x7);
- tcg_temp_free(t0);
}
break;
#endif
@@ -1276,7 +1270,6 @@
* mode.
*/
ctx->base.is_jmp = DISAS_STOP;
- tcg_temp_free(t0);
}
break;
case EI:
@@ -1293,7 +1286,6 @@
*/
gen_save_pc(ctx->base.pc_next + 4);
ctx->base.is_jmp = DISAS_EXIT;
- tcg_temp_free(t0);
}
break;
default:
diff --git a/target/mips/tcg/msa_translate.c b/target/mips/tcg/msa_translate.c
index 1bcdbb1..220cd3b 100644
--- a/target/mips/tcg/msa_translate.c
+++ b/target/mips/tcg/msa_translate.c
@@ -217,8 +217,6 @@
/* if some bit is non-zero then some element is zero */
tcg_gen_setcondi_i64(cond, t0, t0, 0);
tcg_gen_trunc_i64_tl(tresult, t0);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
}
static bool gen_msa_BxZ_V(DisasContext *ctx, int wt, int sa, TCGCond cond)
@@ -237,7 +235,6 @@
tcg_gen_or_i64(t0, msa_wr_d[wt << 1], msa_wr_d[(wt << 1) + 1]);
tcg_gen_setcondi_i64(cond, t0, t0, 0);
tcg_gen_trunc_i64_tl(bcond, t0);
- tcg_temp_free_i64(t0);
ctx->btarget = ctx->base.pc_next + (sa << 2) + 4;
@@ -545,8 +542,6 @@
gen_load_gpr(telm, a->ws);
gen_helper_msa_ctcmsa(cpu_env, telm, tcg_constant_i32(a->wd));
- tcg_temp_free(telm);
-
return true;
}
@@ -563,8 +558,6 @@
gen_helper_msa_cfcmsa(telm, cpu_env, tcg_constant_i32(a->ws));
gen_store_gpr(telm, a->wd);
- tcg_temp_free(telm);
-
return true;
}
@@ -782,8 +775,6 @@
gen_base_offset_addr(ctx, taddr, a->ws, a->sa << a->df);
gen_msa_ldst(cpu_env, tcg_constant_i32(a->wd), taddr);
- tcg_temp_free(taddr);
-
return true;
}
diff --git a/target/mips/tcg/mxu_translate.c b/target/mips/tcg/mxu_translate.c
index f52244e..bdd2070 100644
--- a/target/mips/tcg/mxu_translate.c
+++ b/target/mips/tcg/mxu_translate.c
@@ -513,8 +513,6 @@
} else if (XRa == 16) {
gen_store_mxu_cr(t0);
}
-
- tcg_temp_free(t0);
}
/*
@@ -537,8 +535,6 @@
}
gen_store_gpr(t0, Rb);
-
- tcg_temp_free(t0);
}
/*
@@ -613,9 +609,6 @@
}
gen_store_mxu_gpr(t0, XRa);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
/*
@@ -664,11 +657,6 @@
}
gen_store_mxu_gpr(t3, XRa);
gen_store_mxu_gpr(t2, XRd);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
- tcg_temp_free(t3);
}
/*
@@ -741,11 +729,6 @@
}
gen_store_mxu_gpr(t3, XRa);
gen_store_mxu_gpr(t2, XRd);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
- tcg_temp_free(t3);
}
/*
@@ -821,15 +804,6 @@
gen_store_mxu_gpr(t0, XRd);
gen_store_mxu_gpr(t1, XRa);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
- tcg_temp_free(t3);
- tcg_temp_free(t4);
- tcg_temp_free(t5);
- tcg_temp_free(t6);
- tcg_temp_free(t7);
}
/*
@@ -860,9 +834,6 @@
tcg_gen_qemu_ld_tl(t1, t1, ctx->mem_idx, MO_TESL ^ (sel * MO_BSWAP));
gen_store_mxu_gpr(t1, XRa);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
@@ -1101,7 +1072,7 @@
uint32_t XRx = XRb ? XRb : XRc;
/* ...and do half-word-wise max/min with one operand 0 */
TCGv_i32 t0 = tcg_temp_new();
- TCGv_i32 t1 = tcg_const_i32(0);
+ TCGv_i32 t1 = tcg_constant_i32(0);
/* the left half-word first */
tcg_gen_andi_i32(t0, mxu_gpr[XRx - 1], 0xFFFF0000);
@@ -1125,9 +1096,6 @@
tcg_gen_shri_i32(t0, t0, 16);
/* finally update the destination */
tcg_gen_or_i32(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t0);
-
- tcg_temp_free(t1);
- tcg_temp_free(t0);
} else if (unlikely(XRb == XRc)) {
/* both operands same -> just set destination to one of them */
tcg_gen_mov_i32(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1]);
@@ -1161,9 +1129,6 @@
tcg_gen_shri_i32(t0, t0, 16);
/* finally update the destination */
tcg_gen_or_i32(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t0);
-
- tcg_temp_free(t1);
- tcg_temp_free(t0);
}
}
@@ -1198,7 +1163,7 @@
uint32_t XRx = XRb ? XRb : XRc;
/* ...and do byte-wise max/min with one operand 0 */
TCGv_i32 t0 = tcg_temp_new();
- TCGv_i32 t1 = tcg_const_i32(0);
+ TCGv_i32 t1 = tcg_constant_i32(0);
int32_t i;
/* the leftmost byte (byte 3) first */
@@ -1226,9 +1191,6 @@
/* finally update the destination */
tcg_gen_or_i32(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t0);
}
-
- tcg_temp_free(t1);
- tcg_temp_free(t0);
} else if (unlikely(XRb == XRc)) {
/* both operands same -> just set destination to one of them */
tcg_gen_mov_i32(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1]);
@@ -1266,9 +1228,6 @@
/* finally update the destination */
tcg_gen_or_i32(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t0);
}
-
- tcg_temp_free(t1);
- tcg_temp_free(t0);
}
}
@@ -1384,9 +1343,6 @@
tcg_gen_shri_i32(t1, t1, 24);
tcg_gen_or_i32(mxu_gpr[XRa - 1], t0, t1);
-
- tcg_temp_free(t1);
- tcg_temp_free(t0);
}
break;
case MXU_OPTN3_PTN2:
@@ -1410,9 +1366,6 @@
tcg_gen_shri_i32(t1, t1, 16);
tcg_gen_or_i32(mxu_gpr[XRa - 1], t0, t1);
-
- tcg_temp_free(t1);
- tcg_temp_free(t0);
}
break;
case MXU_OPTN3_PTN3:
@@ -1436,9 +1389,6 @@
tcg_gen_shri_i32(t1, t1, 8);
tcg_gen_or_i32(mxu_gpr[XRa - 1], t0, t1);
-
- tcg_temp_free(t1);
- tcg_temp_free(t0);
}
break;
case MXU_OPTN3_PTN4:
@@ -1598,7 +1548,6 @@
}
gen_set_label(l_exit);
- tcg_temp_free(t_mxu_cr);
}
return true;
diff --git a/target/mips/tcg/nanomips_translate.c.inc b/target/mips/tcg/nanomips_translate.c.inc
index faf6d67..9398e28 100644
--- a/target/mips/tcg/nanomips_translate.c.inc
+++ b/target/mips/tcg/nanomips_translate.c.inc
@@ -1005,13 +1005,9 @@
tcg_gen_extr_i64_tl(tmp1, tmp2, tval);
}
gen_store_gpr(tmp1, reg1);
- tcg_temp_free(tmp1);
gen_store_gpr(tmp2, reg2);
- tcg_temp_free(tmp2);
tcg_gen_st_i64(tval, cpu_env, offsetof(CPUMIPSState, llval_wp));
- tcg_temp_free_i64(tval);
tcg_gen_st_tl(taddr, cpu_env, offsetof(CPUMIPSState, lladdr));
- tcg_temp_free(taddr);
}
static void gen_scwp(DisasContext *ctx, uint32_t base, int16_t offset,
@@ -1084,9 +1080,6 @@
/* adjust stack pointer */
gen_adjust_sp(ctx, -u);
-
- tcg_temp_free(t0);
- tcg_temp_free(va);
}
static void gen_restore(DisasContext *ctx, uint8_t rt, uint8_t count,
@@ -1110,9 +1103,6 @@
/* adjust stack pointer */
gen_adjust_sp(ctx, u);
-
- tcg_temp_free(t0);
- tcg_temp_free(va);
}
static void gen_compute_branch_nm(DisasContext *ctx, uint32_t opc,
@@ -1232,8 +1222,6 @@
if (insn_bytes == 2) {
ctx->hflags |= MIPS_HFLAG_B16;
}
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
static void gen_pool16c_nanomips_insn(DisasContext *ctx)
@@ -1358,7 +1346,6 @@
}
break;
}
- tcg_temp_free(t0);
#endif
} else {
gen_slt(ctx, OPC_SLTU, rd, rs, rt);
@@ -1381,10 +1368,6 @@
/* operands of same sign, result different sign */
tcg_gen_setcondi_tl(TCG_COND_LT, t0, t1, 0);
gen_store_gpr(t0, rd);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
}
break;
case NM_MUL:
@@ -1427,7 +1410,6 @@
gen_load_gpr(t0, rt);
gen_mtc0(ctx, t0, rs, extract32(ctx->opcode, 11, 3));
- tcg_temp_free(t0);
}
break;
case NM_D_E_MT_VPE:
@@ -1467,8 +1449,6 @@
}
break;
}
-
- tcg_temp_free(t0);
}
break;
case NM_FORK:
@@ -1480,8 +1460,6 @@
gen_load_gpr(t0, rt);
gen_load_gpr(t1, rs);
gen_helper_fork(t0, t1);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
break;
case NM_MFTR:
@@ -1508,7 +1486,6 @@
gen_load_gpr(t0, rs);
gen_helper_yield(t0, cpu_env, t0);
gen_store_gpr(t0, rt);
- tcg_temp_free(t0);
}
break;
#endif
@@ -1557,11 +1534,6 @@
gen_reserved_instruction(ctx);
break;
}
-
- tcg_temp_free_i32(t0);
-
- tcg_temp_free(v0_t);
- tcg_temp_free(v1_t);
}
@@ -1682,10 +1654,6 @@
gen_reserved_instruction(ctx);
break;
}
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(v0_t);
}
static void gen_pool32axf_2_multiply(DisasContext *ctx, uint32_t opc,
@@ -1802,8 +1770,6 @@
gen_reserved_instruction(ctx);
break;
}
-
- tcg_temp_free_i32(t0);
}
static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc,
@@ -1855,10 +1821,8 @@
tcg_gen_mul_i64(t2, t2, t3);
tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
tcg_gen_add_i64(t2, t2, t3);
- tcg_temp_free_i64(t3);
gen_move_low32(cpu_LO[acc], t2);
gen_move_high32(cpu_HI[acc], t2);
- tcg_temp_free_i64(t2);
}
break;
case NM_MULT:
@@ -1878,8 +1842,6 @@
tcg_gen_muls2_i32(t2, t3, t2, t3);
tcg_gen_ext_i32_tl(cpu_LO[acc], t2);
tcg_gen_ext_i32_tl(cpu_HI[acc], t3);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t3);
}
break;
case NM_EXTRV_W:
@@ -1915,10 +1877,8 @@
tcg_gen_mul_i64(t2, t2, t3);
tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
tcg_gen_add_i64(t2, t2, t3);
- tcg_temp_free_i64(t3);
gen_move_low32(cpu_LO[acc], t2);
gen_move_high32(cpu_HI[acc], t2);
- tcg_temp_free_i64(t2);
}
break;
case NM_MULTU:
@@ -1938,8 +1898,6 @@
tcg_gen_mulu2_i32(t2, t3, t2, t3);
tcg_gen_ext_i32_tl(cpu_LO[acc], t2);
tcg_gen_ext_i32_tl(cpu_HI[acc], t3);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t3);
}
break;
case NM_EXTRV_R_W:
@@ -1982,10 +1940,8 @@
tcg_gen_mul_i64(t2, t2, t3);
tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
tcg_gen_sub_i64(t2, t3, t2);
- tcg_temp_free_i64(t3);
gen_move_low32(cpu_LO[acc], t2);
gen_move_high32(cpu_HI[acc], t2);
- tcg_temp_free_i64(t2);
}
break;
case NM_EXTRV_RS_W:
@@ -2027,10 +1983,8 @@
tcg_gen_mul_i64(t2, t2, t3);
tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
tcg_gen_sub_i64(t2, t3, t2);
- tcg_temp_free_i64(t3);
gen_move_low32(cpu_LO[acc], t2);
gen_move_high32(cpu_HI[acc], t2);
- tcg_temp_free_i64(t2);
}
break;
case NM_EXTRV_S_H:
@@ -2045,12 +1999,6 @@
gen_reserved_instruction(ctx);
break;
}
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
-
- tcg_temp_free(v0_t);
- tcg_temp_free(v1_t);
}
static void gen_pool32axf_4_nanomips_insn(DisasContext *ctx, uint32_t opc,
@@ -2162,7 +2110,6 @@
gen_load_gpr(tv0, rt);
gen_helper_insv(v0_t, cpu_env, v0_t, tv0);
gen_store_gpr(v0_t, ret);
- tcg_temp_free(tv0);
}
break;
case NM_RADDU_W_QB:
@@ -2188,9 +2135,6 @@
gen_reserved_instruction(ctx);
break;
}
-
- tcg_temp_free(v0_t);
- tcg_temp_free(t0);
}
static void gen_pool32axf_7_nanomips_insn(DisasContext *ctx, uint32_t opc,
@@ -2243,8 +2187,6 @@
gen_reserved_instruction(ctx);
break;
}
- tcg_temp_free(t0);
- tcg_temp_free(rs_t);
}
@@ -2304,7 +2246,6 @@
gen_store_gpr(t0, rt);
/* Stop translation as we may have switched the execution mode */
ctx->base.is_jmp = DISAS_STOP;
- tcg_temp_free(t0);
}
break;
case NM_EI:
@@ -2317,7 +2258,6 @@
gen_store_gpr(t0, rt);
/* Stop translation as we may have switched the execution mode */
ctx->base.is_jmp = DISAS_STOP;
- tcg_temp_free(t0);
}
break;
case NM_RDPGPR:
@@ -2374,7 +2314,7 @@
/* Unconditional branch */
} else if (rt == 0 && imm != 0) {
/* Treat as NOP */
- goto out;
+ return;
} else {
cond = TCG_COND_EQ;
}
@@ -2384,12 +2324,12 @@
check_nms(ctx);
if (imm >= 32 && !(ctx->hflags & MIPS_HFLAG_64)) {
gen_reserved_instruction(ctx);
- goto out;
+ return;
} else if (rt == 0 && opc == NM_BBEQZC) {
/* Unconditional branch */
} else if (rt == 0 && opc == NM_BBNEZC) {
/* Treat as NOP */
- goto out;
+ return;
} else {
tcg_gen_shri_tl(t0, t0, imm);
tcg_gen_andi_tl(t0, t0, 1);
@@ -2404,7 +2344,7 @@
case NM_BNEIC:
if (rt == 0 && imm == 0) {
/* Treat as NOP */
- goto out;
+ return;
} else if (rt == 0 && imm != 0) {
/* Unconditional branch */
} else {
@@ -2434,7 +2374,7 @@
default:
MIPS_INVAL("Immediate Value Compact branch");
gen_reserved_instruction(ctx);
- goto out;
+ return;
}
/* branch completion */
@@ -2455,10 +2395,6 @@
gen_goto_tb(ctx, 0, ctx->base.pc_next + 4);
}
-
-out:
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
/* P.BALRSC type nanoMIPS R6 branches: BALRSC and BRSC */
@@ -2488,9 +2424,6 @@
/* unconditional branch to register */
tcg_gen_mov_tl(cpu_PC, btarget);
tcg_gen_lookup_and_goto_ptr();
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
/* nanoMIPS Branches */
@@ -2540,14 +2473,12 @@
gen_load_gpr(tbase, rt);
tcg_gen_movi_tl(toffset, offset);
gen_op_addr_add(ctx, btarget, tbase, toffset);
- tcg_temp_free(tbase);
- tcg_temp_free(toffset);
}
break;
default:
MIPS_INVAL("Compact branch/jump");
gen_reserved_instruction(ctx);
- goto out;
+ return;
}
if (bcond_compute == 0) {
@@ -2559,7 +2490,7 @@
default:
MIPS_INVAL("Compact branch/jump");
gen_reserved_instruction(ctx);
- goto out;
+ return;
}
} else {
/* Conditional compact branch */
@@ -2620,7 +2551,7 @@
default:
MIPS_INVAL("Compact conditional branch/jump");
gen_reserved_instruction(ctx);
- goto out;
+ return;
}
/* branch completion */
@@ -2633,10 +2564,6 @@
gen_goto_tb(ctx, 0, ctx->base.pc_next + 4);
}
-
-out:
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
@@ -2664,15 +2591,12 @@
default:
MIPS_INVAL("cp1 cond branch");
gen_reserved_instruction(ctx);
- goto out;
+ return;
}
tcg_gen_trunc_i64_tl(bcond, t0);
ctx->btarget = btarget;
-
-out:
- tcg_temp_free_i64(t0);
}
@@ -2709,7 +2633,7 @@
break;
default:
gen_reserved_instruction(ctx);
- goto out;
+ return;
}
}
gen_op_addr_add(ctx, t0, t0, t1);
@@ -2799,10 +2723,6 @@
gen_reserved_instruction(ctx);
break;
}
-
-out:
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
static void gen_pool32f_nanomips_insn(DisasContext *ctx)
@@ -3439,21 +3359,19 @@
case 0:
/* PRECR_SRA_PH_W */
{
- TCGv_i32 sa_t = tcg_const_i32(rd);
+ TCGv_i32 sa_t = tcg_constant_i32(rd);
gen_helper_precr_sra_ph_w(v1_t, sa_t, v1_t,
cpu_gpr[rt]);
gen_store_gpr(v1_t, rt);
- tcg_temp_free_i32(sa_t);
}
break;
case 1:
/* PRECR_SRA_R_PH_W */
{
- TCGv_i32 sa_t = tcg_const_i32(rd);
+ TCGv_i32 sa_t = tcg_constant_i32(rd);
gen_helper_precr_sra_r_ph_w(v1_t, sa_t, v1_t,
cpu_gpr[rt]);
gen_store_gpr(v1_t, rt);
- tcg_temp_free_i32(sa_t);
}
break;
}
@@ -3536,8 +3454,6 @@
tcg_gen_movi_tl(tv0, rd >> 3);
tcg_gen_movi_tl(tv1, imm);
gen_helper_shilo(tv0, tv1, cpu_env);
- tcg_temp_free(tv1);
- tcg_temp_free(tv0);
}
break;
case NM_MULEQ_S_W_PHL:
@@ -3652,10 +3568,6 @@
gen_reserved_instruction(ctx);
break;
}
-
- tcg_temp_free(v2_t);
- tcg_temp_free(v1_t);
- tcg_temp_free(t0);
}
static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx)
@@ -3827,7 +3739,6 @@
tcg_gen_movi_tl(t0, addr);
tcg_gen_qemu_ld_tl(cpu_gpr[rt], t0, ctx->mem_idx, MO_TESL);
- tcg_temp_free(t0);
}
break;
case NM_SWPC48:
@@ -3844,9 +3755,6 @@
gen_load_gpr(t1, rt);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
break;
default:
@@ -3908,8 +3816,6 @@
gen_load_gpr(t0, rs);
tcg_gen_setcondi_tl(TCG_COND_EQ, t0, t0, imm);
gen_store_gpr(t0, rt);
-
- tcg_temp_free(t0);
}
break;
case NM_ADDIUNEG:
@@ -3958,18 +3864,15 @@
check_nms(ctx);
if (rt != 0) {
TCGv t0 = tcg_temp_new();
- TCGv_i32 shift = tcg_const_i32(extract32(ctx->opcode, 0, 5));
- TCGv_i32 shiftx = tcg_const_i32(extract32(ctx->opcode, 7, 4)
- << 1);
- TCGv_i32 stripe = tcg_const_i32(extract32(ctx->opcode, 6, 1));
+ TCGv_i32 shift =
+ tcg_constant_i32(extract32(ctx->opcode, 0, 5));
+ TCGv_i32 shiftx =
+ tcg_constant_i32(extract32(ctx->opcode, 7, 4) << 1);
+ TCGv_i32 stripe =
+ tcg_constant_i32(extract32(ctx->opcode, 6, 1));
gen_load_gpr(t0, rs);
gen_helper_rotx(cpu_gpr[rt], t0, shift, shiftx, stripe);
- tcg_temp_free(t0);
-
- tcg_temp_free_i32(shift);
- tcg_temp_free_i32(shiftx);
- tcg_temp_free_i32(stripe);
}
break;
case NM_P_INS:
@@ -4239,8 +4142,6 @@
MO_UNALN);
break;
}
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
break;
case NM_P_LL:
@@ -4432,8 +4333,6 @@
}
counter++;
}
- tcg_temp_free(va);
- tcg_temp_free(t1);
}
break;
default:
@@ -4454,7 +4353,6 @@
gen_load_gpr(t0, rt);
tcg_gen_mov_tl(cpu_gpr[rd], t0);
gen_compute_branch_nm(ctx, OPC_BGEZAL, 4, 0, 0, s);
- tcg_temp_free(t0);
}
break;
case NM_P_BAL:
@@ -4604,9 +4502,8 @@
/* make sure instructions are on a halfword boundary */
if (ctx->base.pc_next & 0x1) {
- TCGv tmp = tcg_const_tl(ctx->base.pc_next);
+ TCGv tmp = tcg_constant_tl(ctx->base.pc_next);
tcg_gen_st_tl(tmp, cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr));
- tcg_temp_free(tmp);
generate_exception_end(ctx, EXCP_AdEL);
return 2;
}
@@ -4941,8 +4838,6 @@
gen_load_gpr(t1, rt);
tcg_gen_mov_tl(cpu_gpr[rd], t0);
tcg_gen_mov_tl(cpu_gpr[re], t1);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
break;
default:
diff --git a/target/mips/tcg/octeon_translate.c b/target/mips/tcg/octeon_translate.c
index 6a207d2..103c304 100644
--- a/target/mips/tcg/octeon_translate.c
+++ b/target/mips/tcg/octeon_translate.c
@@ -40,8 +40,6 @@
ctx->hflags |= MIPS_HFLAG_BC;
ctx->btarget = ctx->base.pc_next + 4 + a->offset * 4;
ctx->hflags |= MIPS_HFLAG_BDS32;
-
- tcg_temp_free(t0);
return true;
}
@@ -61,10 +59,6 @@
tcg_gen_add_tl(t0, t0, t1);
tcg_gen_andi_i64(cpu_gpr[a->rd], t0, 0xff);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
-
return true;
}
@@ -83,10 +77,6 @@
gen_load_gpr(t1, a->rt);
tcg_gen_mul_i64(cpu_gpr[a->rd], t0, t1);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
-
return true;
}
@@ -103,8 +93,6 @@
gen_load_gpr(t0, a->rs);
tcg_gen_sextract_tl(t0, t0, a->p, a->lenm1 + 1);
gen_store_gpr(t0, a->rt);
- tcg_temp_free(t0);
-
return true;
}
@@ -121,8 +109,6 @@
gen_load_gpr(t0, a->rs);
tcg_gen_deposit_z_tl(t0, t0, a->p, a->lenm1 + 1);
gen_store_gpr(t0, a->rt);
- tcg_temp_free(t0);
-
return true;
}
@@ -142,8 +128,6 @@
}
tcg_gen_ctpop_tl(t0, t0);
gen_store_gpr(t0, a->rd);
- tcg_temp_free(t0);
-
return true;
}
@@ -167,10 +151,6 @@
} else {
tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr[a->rd], t1, t0);
}
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
-
return true;
}
@@ -194,8 +174,5 @@
} else {
tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr[a->rt], t0, imm);
}
-
- tcg_temp_free(t0);
-
return true;
}
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
index 24993bc..1fb4ef7 100644
--- a/target/mips/tcg/translate.c
+++ b/target/mips/tcg/translate.c
@@ -1274,11 +1274,8 @@
tcg_gen_add_ptr(addr, cpu_env, addr);
tcg_gen_ld_tl(t0, addr, sizeof(target_ulong) * from);
- tcg_temp_free_ptr(addr);
- tcg_temp_free_i32(t2);
}
gen_store_gpr(t0, to);
- tcg_temp_free(t0);
}
static inline void gen_store_srsgpr(int from, int to)
@@ -1297,9 +1294,6 @@
tcg_gen_add_ptr(addr, cpu_env, addr);
tcg_gen_st_tl(t0, addr, sizeof(target_ulong) * to);
- tcg_temp_free_ptr(addr);
- tcg_temp_free_i32(t2);
- tcg_temp_free(t0);
}
}
@@ -1396,7 +1390,6 @@
t64 = tcg_temp_new_i64();
tcg_gen_extu_i32_i64(t64, t);
tcg_gen_deposit_i64(fpu_f64[reg], fpu_f64[reg], t64, 0, 32);
- tcg_temp_free_i64(t64);
}
static void gen_load_fpr32h(DisasContext *ctx, TCGv_i32 t, int reg)
@@ -1414,7 +1407,6 @@
TCGv_i64 t64 = tcg_temp_new_i64();
tcg_gen_extu_i32_i64(t64, t);
tcg_gen_deposit_i64(fpu_f64[reg], fpu_f64[reg], t64, 32, 32);
- tcg_temp_free_i64(t64);
} else {
gen_store_fpr32(ctx, t, reg | 1);
}
@@ -1439,7 +1431,6 @@
t0 = tcg_temp_new_i64();
tcg_gen_shri_i64(t0, t, 32);
tcg_gen_deposit_i64(fpu_f64[reg | 1], fpu_f64[reg | 1], t0, 0, 32);
- tcg_temp_free_i64(t0);
}
}
@@ -1852,8 +1843,6 @@
default: \
abort(); \
} \
- tcg_temp_free_i##bits(fp0); \
- tcg_temp_free_i##bits(fp1); \
}
FOP_CONDS(, 0, d, FMT_D, 64)
@@ -1946,8 +1935,6 @@
abort(); \
} \
STORE; \
- tcg_temp_free_i ## bits(fp0); \
- tcg_temp_free_i ## bits(fp1); \
}
FOP_CONDNS(d, FMT_D, 64, gen_store_fpr64(ctx, fp0, fd))
@@ -1967,7 +1954,6 @@
tcg_gen_qemu_##fname(ret, arg1, ctx->mem_idx); \
tcg_gen_st_tl(t0, cpu_env, offsetof(CPUMIPSState, lladdr)); \
tcg_gen_st_tl(ret, cpu_env, offsetof(CPUMIPSState, llval)); \
- tcg_temp_free(t0); \
}
#else
#define OP_LD_ATOMIC(insn, fname) \
@@ -2009,11 +1995,65 @@
return pc;
}
+/* LWL or LDL, depending on MemOp. */
+static void gen_lxl(DisasContext *ctx, TCGv reg, TCGv addr,
+ int mem_idx, MemOp mop)
+{
+ int sizem1 = memop_size(mop) - 1;
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ /*
+ * Do a byte access to possibly trigger a page
+ * fault with the unaligned address.
+ */
+ tcg_gen_qemu_ld_tl(t1, addr, mem_idx, MO_UB);
+ tcg_gen_andi_tl(t1, addr, sizem1);
+ if (!cpu_is_bigendian(ctx)) {
+ tcg_gen_xori_tl(t1, t1, sizem1);
+ }
+ tcg_gen_shli_tl(t1, t1, 3);
+ tcg_gen_andi_tl(t0, addr, ~sizem1);
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mop);
+ tcg_gen_shl_tl(t0, t0, t1);
+ tcg_gen_shl_tl(t1, tcg_constant_tl(-1), t1);
+ tcg_gen_andc_tl(t1, reg, t1);
+ tcg_gen_or_tl(reg, t0, t1);
+}
+
+/* LWR or LDR, depending on MemOp. */
+static void gen_lxr(DisasContext *ctx, TCGv reg, TCGv addr,
+ int mem_idx, MemOp mop)
+{
+ int size = memop_size(mop);
+ int sizem1 = size - 1;
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ /*
+ * Do a byte access to possibly trigger a page
+ * fault with the unaligned address.
+ */
+ tcg_gen_qemu_ld_tl(t1, addr, mem_idx, MO_UB);
+ tcg_gen_andi_tl(t1, addr, sizem1);
+ if (cpu_is_bigendian(ctx)) {
+ tcg_gen_xori_tl(t1, t1, sizem1);
+ }
+ tcg_gen_shli_tl(t1, t1, 3);
+ tcg_gen_andi_tl(t0, addr, ~sizem1);
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mop);
+ tcg_gen_shr_tl(t0, t0, t1);
+ tcg_gen_xori_tl(t1, t1, size * 8 - 1);
+ tcg_gen_shl_tl(t1, tcg_constant_tl(~1), t1);
+ tcg_gen_and_tl(t1, reg, t1);
+ tcg_gen_or_tl(reg, t0, t1);
+}
+
/* Load */
static void gen_ld(DisasContext *ctx, uint32_t opc,
int rt, int base, int offset)
{
- TCGv t0, t1, t2;
+ TCGv t0, t1;
int mem_idx = ctx->mem_idx;
if (rt == 0 && ctx->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F |
@@ -2048,65 +2088,26 @@
break;
case OPC_LDL:
t1 = tcg_temp_new();
- /*
- * Do a byte access to possibly trigger a page
- * fault with the unaligned address.
- */
- tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_UB);
- tcg_gen_andi_tl(t1, t0, 7);
- if (!cpu_is_bigendian(ctx)) {
- tcg_gen_xori_tl(t1, t1, 7);
- }
- tcg_gen_shli_tl(t1, t1, 3);
- tcg_gen_andi_tl(t0, t0, ~7);
- tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ);
- tcg_gen_shl_tl(t0, t0, t1);
- t2 = tcg_const_tl(-1);
- tcg_gen_shl_tl(t2, t2, t1);
gen_load_gpr(t1, rt);
- tcg_gen_andc_tl(t1, t1, t2);
- tcg_temp_free(t2);
- tcg_gen_or_tl(t0, t0, t1);
- tcg_temp_free(t1);
- gen_store_gpr(t0, rt);
+ gen_lxl(ctx, t1, t0, mem_idx, MO_TEUQ);
+ gen_store_gpr(t1, rt);
break;
case OPC_LDR:
t1 = tcg_temp_new();
- /*
- * Do a byte access to possibly trigger a page
- * fault with the unaligned address.
- */
- tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_UB);
- tcg_gen_andi_tl(t1, t0, 7);
- if (cpu_is_bigendian(ctx)) {
- tcg_gen_xori_tl(t1, t1, 7);
- }
- tcg_gen_shli_tl(t1, t1, 3);
- tcg_gen_andi_tl(t0, t0, ~7);
- tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ);
- tcg_gen_shr_tl(t0, t0, t1);
- tcg_gen_xori_tl(t1, t1, 63);
- t2 = tcg_const_tl(0xfffffffffffffffeull);
- tcg_gen_shl_tl(t2, t2, t1);
gen_load_gpr(t1, rt);
- tcg_gen_and_tl(t1, t1, t2);
- tcg_temp_free(t2);
- tcg_gen_or_tl(t0, t0, t1);
- tcg_temp_free(t1);
- gen_store_gpr(t0, rt);
+ gen_lxr(ctx, t1, t0, mem_idx, MO_TEUQ);
+ gen_store_gpr(t1, rt);
break;
case OPC_LDPC:
- t1 = tcg_const_tl(pc_relative_pc(ctx));
+ t1 = tcg_constant_tl(pc_relative_pc(ctx));
gen_op_addr_add(ctx, t0, t0, t1);
- tcg_temp_free(t1);
tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ);
gen_store_gpr(t0, rt);
break;
#endif
case OPC_LWPC:
- t1 = tcg_const_tl(pc_relative_pc(ctx));
+ t1 = tcg_constant_tl(pc_relative_pc(ctx));
gen_op_addr_add(ctx, t0, t0, t1);
- tcg_temp_free(t1);
tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TESL);
gen_store_gpr(t0, rt);
break;
@@ -2153,57 +2154,20 @@
/* fall through */
case OPC_LWL:
t1 = tcg_temp_new();
- /*
- * Do a byte access to possibly trigger a page
- * fault with the unaligned address.
- */
- tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_UB);
- tcg_gen_andi_tl(t1, t0, 3);
- if (!cpu_is_bigendian(ctx)) {
- tcg_gen_xori_tl(t1, t1, 3);
- }
- tcg_gen_shli_tl(t1, t1, 3);
- tcg_gen_andi_tl(t0, t0, ~3);
- tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUL);
- tcg_gen_shl_tl(t0, t0, t1);
- t2 = tcg_const_tl(-1);
- tcg_gen_shl_tl(t2, t2, t1);
gen_load_gpr(t1, rt);
- tcg_gen_andc_tl(t1, t1, t2);
- tcg_temp_free(t2);
- tcg_gen_or_tl(t0, t0, t1);
- tcg_temp_free(t1);
- tcg_gen_ext32s_tl(t0, t0);
- gen_store_gpr(t0, rt);
+ gen_lxl(ctx, t1, t0, mem_idx, MO_TEUL);
+ tcg_gen_ext32s_tl(t1, t1);
+ gen_store_gpr(t1, rt);
break;
case OPC_LWRE:
mem_idx = MIPS_HFLAG_UM;
/* fall through */
case OPC_LWR:
t1 = tcg_temp_new();
- /*
- * Do a byte access to possibly trigger a page
- * fault with the unaligned address.
- */
- tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_UB);
- tcg_gen_andi_tl(t1, t0, 3);
- if (cpu_is_bigendian(ctx)) {
- tcg_gen_xori_tl(t1, t1, 3);
- }
- tcg_gen_shli_tl(t1, t1, 3);
- tcg_gen_andi_tl(t0, t0, ~3);
- tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUL);
- tcg_gen_shr_tl(t0, t0, t1);
- tcg_gen_xori_tl(t1, t1, 31);
- t2 = tcg_const_tl(0xfffffffeull);
- tcg_gen_shl_tl(t2, t2, t1);
gen_load_gpr(t1, rt);
- tcg_gen_and_tl(t1, t1, t2);
- tcg_temp_free(t2);
- tcg_gen_or_tl(t0, t0, t1);
- tcg_temp_free(t1);
- tcg_gen_ext32s_tl(t0, t0);
- gen_store_gpr(t0, rt);
+ gen_lxr(ctx, t1, t0, mem_idx, MO_TEUL);
+ tcg_gen_ext32s_tl(t1, t1);
+ gen_store_gpr(t1, rt);
break;
case OPC_LLE:
mem_idx = MIPS_HFLAG_UM;
@@ -2214,7 +2178,6 @@
gen_store_gpr(t0, rt);
break;
}
- tcg_temp_free(t0);
}
/* Store */
@@ -2273,8 +2236,6 @@
gen_helper_0e2i(swr, t1, t0, mem_idx);
break;
}
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
@@ -2291,7 +2252,6 @@
/* compare the address against that of the preceding LL */
gen_base_offset_addr(ctx, addr, base, offset);
tcg_gen_brcond_tl(TCG_COND_EQ, addr, cpu_lladdr, l1);
- tcg_temp_free(addr);
tcg_gen_movi_tl(t0, 0);
gen_store_gpr(t0, rt);
tcg_gen_br(done);
@@ -2304,10 +2264,8 @@
eva ? MIPS_HFLAG_UM : ctx->mem_idx, tcg_mo);
tcg_gen_setcond_tl(TCG_COND_EQ, t0, t0, cpu_llval);
gen_store_gpr(t0, rt);
- tcg_temp_free(val);
gen_set_label(done);
- tcg_temp_free(t0);
}
/* Load and store */
@@ -2325,7 +2283,6 @@
tcg_gen_qemu_ld_i32(fp0, t0, ctx->mem_idx, MO_TESL |
ctx->default_tcg_memop_mask);
gen_store_fpr32(ctx, fp0, ft);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_SWC1:
@@ -2334,7 +2291,6 @@
gen_load_fpr32(ctx, fp0, ft);
tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL |
ctx->default_tcg_memop_mask);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_LDC1:
@@ -2343,7 +2299,6 @@
tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
gen_store_fpr64(ctx, fp0, ft);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_SDC1:
@@ -2352,7 +2307,6 @@
gen_load_fpr64(ctx, fp0, ft);
tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
- tcg_temp_free_i64(fp0);
}
break;
default:
@@ -2381,7 +2335,6 @@
} else {
generate_exception_err(ctx, EXCP_CpU, 1);
}
- tcg_temp_free(t0);
}
/* Arithmetic with immediate operand */
@@ -2412,15 +2365,12 @@
tcg_gen_xori_tl(t1, t1, ~uimm);
tcg_gen_xori_tl(t2, t0, uimm);
tcg_gen_and_tl(t1, t1, t2);
- tcg_temp_free(t2);
tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
- tcg_temp_free(t1);
/* operands of same sign, result different sign */
generate_exception(ctx, EXCP_OVERFLOW);
gen_set_label(l1);
tcg_gen_ext32s_tl(t0, t0);
gen_store_gpr(t0, rt);
- tcg_temp_free(t0);
}
break;
case OPC_ADDIU:
@@ -2445,14 +2395,11 @@
tcg_gen_xori_tl(t1, t1, ~uimm);
tcg_gen_xori_tl(t2, t0, uimm);
tcg_gen_and_tl(t1, t1, t2);
- tcg_temp_free(t2);
tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
- tcg_temp_free(t1);
/* operands of same sign, result different sign */
generate_exception(ctx, EXCP_OVERFLOW);
gen_set_label(l1);
gen_store_gpr(t0, rt);
- tcg_temp_free(t0);
}
break;
case OPC_DADDIU:
@@ -2535,7 +2482,6 @@
tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_gpr[rt], t0, uimm);
break;
}
- tcg_temp_free(t0);
}
/* Shifts with immediate operand */
@@ -2575,7 +2521,6 @@
tcg_gen_trunc_tl_i32(t1, t0);
tcg_gen_rotri_i32(t1, t1, uimm);
tcg_gen_ext_i32_tl(cpu_gpr[rt], t1);
- tcg_temp_free_i32(t1);
} else {
tcg_gen_ext32s_tl(cpu_gpr[rt], t0);
}
@@ -2611,7 +2556,6 @@
break;
#endif
}
- tcg_temp_free(t0);
}
/* Arithmetic */
@@ -2642,14 +2586,11 @@
tcg_gen_xor_tl(t1, t1, t2);
tcg_gen_xor_tl(t2, t0, t2);
tcg_gen_andc_tl(t1, t2, t1);
- tcg_temp_free(t2);
tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
- tcg_temp_free(t1);
/* operands of same sign, result different sign */
generate_exception(ctx, EXCP_OVERFLOW);
gen_set_label(l1);
gen_store_gpr(t0, rd);
- tcg_temp_free(t0);
}
break;
case OPC_ADDU:
@@ -2678,9 +2619,7 @@
tcg_gen_xor_tl(t2, t1, t2);
tcg_gen_xor_tl(t1, t0, t1);
tcg_gen_and_tl(t1, t1, t2);
- tcg_temp_free(t2);
tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
- tcg_temp_free(t1);
/*
* operands of different sign, first operand and the result
* of different sign
@@ -2688,7 +2627,6 @@
generate_exception(ctx, EXCP_OVERFLOW);
gen_set_label(l1);
gen_store_gpr(t0, rd);
- tcg_temp_free(t0);
}
break;
case OPC_SUBU:
@@ -2718,14 +2656,11 @@
tcg_gen_xor_tl(t1, t1, t2);
tcg_gen_xor_tl(t2, t0, t2);
tcg_gen_andc_tl(t1, t2, t1);
- tcg_temp_free(t2);
tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
- tcg_temp_free(t1);
/* operands of same sign, result different sign */
generate_exception(ctx, EXCP_OVERFLOW);
gen_set_label(l1);
gen_store_gpr(t0, rd);
- tcg_temp_free(t0);
}
break;
case OPC_DADDU:
@@ -2752,9 +2687,7 @@
tcg_gen_xor_tl(t2, t1, t2);
tcg_gen_xor_tl(t1, t0, t1);
tcg_gen_and_tl(t1, t1, t2);
- tcg_temp_free(t2);
tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
- tcg_temp_free(t1);
/*
* Operands of different sign, first operand and result different
* sign.
@@ -2762,7 +2695,6 @@
generate_exception(ctx, EXCP_OVERFLOW);
gen_set_label(l1);
gen_store_gpr(t0, rd);
- tcg_temp_free(t0);
}
break;
case OPC_DSUBU:
@@ -2801,7 +2733,7 @@
t0 = tcg_temp_new();
gen_load_gpr(t0, rt);
- t1 = tcg_const_tl(0);
+ t1 = tcg_constant_tl(0);
t2 = tcg_temp_new();
gen_load_gpr(t2, rs);
switch (opc) {
@@ -2818,9 +2750,6 @@
tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr[rd], t0, t1, t2, t1);
break;
}
- tcg_temp_free(t2);
- tcg_temp_free(t1);
- tcg_temp_free(t0);
}
/* Logic */
@@ -2899,8 +2828,6 @@
tcg_gen_setcond_tl(TCG_COND_LTU, cpu_gpr[rd], t0, t1);
break;
}
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
/* Shifts */
@@ -2947,8 +2874,6 @@
tcg_gen_andi_i32(t2, t2, 0x1f);
tcg_gen_rotr_i32(t2, t3, t2);
tcg_gen_ext_i32_tl(cpu_gpr[rd], t2);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t3);
}
break;
#if defined(TARGET_MIPS64)
@@ -2970,8 +2895,6 @@
break;
#endif
}
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
/* Arithmetic on HI/LO registers */
@@ -3041,10 +2964,9 @@
static inline void gen_r6_ld(target_long addr, int reg, int memidx,
MemOp memop)
{
- TCGv t0 = tcg_const_tl(addr);
- tcg_gen_qemu_ld_tl(t0, t0, memidx, memop);
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_qemu_ld_tl(t0, tcg_constant_tl(addr), memidx, memop);
gen_store_gpr(t0, reg);
- tcg_temp_free(t0);
}
static inline void gen_pcrel(DisasContext *ctx, int opc, target_ulong pc,
@@ -3141,8 +3063,6 @@
tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
tcg_gen_div_tl(cpu_gpr[rd], t0, t1);
tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
- tcg_temp_free(t3);
- tcg_temp_free(t2);
}
break;
case R6_OPC_MOD:
@@ -3160,34 +3080,28 @@
tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
tcg_gen_rem_tl(cpu_gpr[rd], t0, t1);
tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
- tcg_temp_free(t3);
- tcg_temp_free(t2);
}
break;
case R6_OPC_DIVU:
{
- TCGv t2 = tcg_const_tl(0);
- TCGv t3 = tcg_const_tl(1);
+ TCGv t2 = tcg_constant_tl(0);
+ TCGv t3 = tcg_constant_tl(1);
tcg_gen_ext32u_tl(t0, t0);
tcg_gen_ext32u_tl(t1, t1);
tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
tcg_gen_divu_tl(cpu_gpr[rd], t0, t1);
tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
- tcg_temp_free(t3);
- tcg_temp_free(t2);
}
break;
case R6_OPC_MODU:
{
- TCGv t2 = tcg_const_tl(0);
- TCGv t3 = tcg_const_tl(1);
+ TCGv t2 = tcg_constant_tl(0);
+ TCGv t3 = tcg_constant_tl(1);
tcg_gen_ext32u_tl(t0, t0);
tcg_gen_ext32u_tl(t1, t1);
tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
tcg_gen_remu_tl(cpu_gpr[rd], t0, t1);
tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
- tcg_temp_free(t3);
- tcg_temp_free(t2);
}
break;
case R6_OPC_MUL:
@@ -3198,8 +3112,6 @@
tcg_gen_trunc_tl_i32(t3, t1);
tcg_gen_mul_i32(t2, t2, t3);
tcg_gen_ext_i32_tl(cpu_gpr[rd], t2);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t3);
}
break;
case R6_OPC_MUH:
@@ -3210,8 +3122,6 @@
tcg_gen_trunc_tl_i32(t3, t1);
tcg_gen_muls2_i32(t2, t3, t2, t3);
tcg_gen_ext_i32_tl(cpu_gpr[rd], t3);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t3);
}
break;
case R6_OPC_MULU:
@@ -3222,8 +3132,6 @@
tcg_gen_trunc_tl_i32(t3, t1);
tcg_gen_mul_i32(t2, t2, t3);
tcg_gen_ext_i32_tl(cpu_gpr[rd], t2);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t3);
}
break;
case R6_OPC_MUHU:
@@ -3234,8 +3142,6 @@
tcg_gen_trunc_tl_i32(t3, t1);
tcg_gen_mulu2_i32(t2, t3, t2, t3);
tcg_gen_ext_i32_tl(cpu_gpr[rd], t3);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t3);
}
break;
#if defined(TARGET_MIPS64)
@@ -3251,8 +3157,6 @@
tcg_gen_movi_tl(t3, 0);
tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
tcg_gen_div_tl(cpu_gpr[rd], t0, t1);
- tcg_temp_free(t3);
- tcg_temp_free(t2);
}
break;
case R6_OPC_DMOD:
@@ -3267,28 +3171,22 @@
tcg_gen_movi_tl(t3, 0);
tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
tcg_gen_rem_tl(cpu_gpr[rd], t0, t1);
- tcg_temp_free(t3);
- tcg_temp_free(t2);
}
break;
case R6_OPC_DDIVU:
{
- TCGv t2 = tcg_const_tl(0);
- TCGv t3 = tcg_const_tl(1);
+ TCGv t2 = tcg_constant_tl(0);
+ TCGv t3 = tcg_constant_tl(1);
tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
tcg_gen_divu_i64(cpu_gpr[rd], t0, t1);
- tcg_temp_free(t3);
- tcg_temp_free(t2);
}
break;
case R6_OPC_DMODU:
{
- TCGv t2 = tcg_const_tl(0);
- TCGv t3 = tcg_const_tl(1);
+ TCGv t2 = tcg_constant_tl(0);
+ TCGv t3 = tcg_constant_tl(1);
tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
tcg_gen_remu_i64(cpu_gpr[rd], t0, t1);
- tcg_temp_free(t3);
- tcg_temp_free(t2);
}
break;
case R6_OPC_DMUL:
@@ -3298,7 +3196,6 @@
{
TCGv t2 = tcg_temp_new();
tcg_gen_muls2_i64(t2, cpu_gpr[rd], t0, t1);
- tcg_temp_free(t2);
}
break;
case R6_OPC_DMULU:
@@ -3308,18 +3205,14 @@
{
TCGv t2 = tcg_temp_new();
tcg_gen_mulu2_i64(t2, cpu_gpr[rd], t0, t1);
- tcg_temp_free(t2);
}
break;
#endif
default:
MIPS_INVAL("r6 mul/div");
gen_reserved_instruction(ctx);
- goto out;
+ break;
}
- out:
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
#if defined(TARGET_MIPS64)
@@ -3351,14 +3244,12 @@
tcg_gen_rem_tl(cpu_HI[1], t0, t1);
tcg_gen_ext32s_tl(cpu_LO[1], cpu_LO[1]);
tcg_gen_ext32s_tl(cpu_HI[1], cpu_HI[1]);
- tcg_temp_free(t3);
- tcg_temp_free(t2);
}
break;
case MMI_OPC_DIVU1:
{
- TCGv t2 = tcg_const_tl(0);
- TCGv t3 = tcg_const_tl(1);
+ TCGv t2 = tcg_constant_tl(0);
+ TCGv t3 = tcg_constant_tl(1);
tcg_gen_ext32u_tl(t0, t0);
tcg_gen_ext32u_tl(t1, t1);
tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
@@ -3366,18 +3257,13 @@
tcg_gen_remu_tl(cpu_HI[1], t0, t1);
tcg_gen_ext32s_tl(cpu_LO[1], cpu_LO[1]);
tcg_gen_ext32s_tl(cpu_HI[1], cpu_HI[1]);
- tcg_temp_free(t3);
- tcg_temp_free(t2);
}
break;
default:
MIPS_INVAL("div1 TX79");
gen_reserved_instruction(ctx);
- goto out;
+ break;
}
- out:
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
#endif
@@ -3414,14 +3300,12 @@
tcg_gen_rem_tl(cpu_HI[acc], t0, t1);
tcg_gen_ext32s_tl(cpu_LO[acc], cpu_LO[acc]);
tcg_gen_ext32s_tl(cpu_HI[acc], cpu_HI[acc]);
- tcg_temp_free(t3);
- tcg_temp_free(t2);
}
break;
case OPC_DIVU:
{
- TCGv t2 = tcg_const_tl(0);
- TCGv t3 = tcg_const_tl(1);
+ TCGv t2 = tcg_constant_tl(0);
+ TCGv t3 = tcg_constant_tl(1);
tcg_gen_ext32u_tl(t0, t0);
tcg_gen_ext32u_tl(t1, t1);
tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
@@ -3429,8 +3313,6 @@
tcg_gen_remu_tl(cpu_HI[acc], t0, t1);
tcg_gen_ext32s_tl(cpu_LO[acc], cpu_LO[acc]);
tcg_gen_ext32s_tl(cpu_HI[acc], cpu_HI[acc]);
- tcg_temp_free(t3);
- tcg_temp_free(t2);
}
break;
case OPC_MULT:
@@ -3442,8 +3324,6 @@
tcg_gen_muls2_i32(t2, t3, t2, t3);
tcg_gen_ext_i32_tl(cpu_LO[acc], t2);
tcg_gen_ext_i32_tl(cpu_HI[acc], t3);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t3);
}
break;
case OPC_MULTU:
@@ -3455,8 +3335,6 @@
tcg_gen_mulu2_i32(t2, t3, t2, t3);
tcg_gen_ext_i32_tl(cpu_LO[acc], t2);
tcg_gen_ext_i32_tl(cpu_HI[acc], t3);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t3);
}
break;
#if defined(TARGET_MIPS64)
@@ -3473,19 +3351,15 @@
tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
tcg_gen_div_tl(cpu_LO[acc], t0, t1);
tcg_gen_rem_tl(cpu_HI[acc], t0, t1);
- tcg_temp_free(t3);
- tcg_temp_free(t2);
}
break;
case OPC_DDIVU:
{
- TCGv t2 = tcg_const_tl(0);
- TCGv t3 = tcg_const_tl(1);
+ TCGv t2 = tcg_constant_tl(0);
+ TCGv t3 = tcg_constant_tl(1);
tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
tcg_gen_divu_i64(cpu_LO[acc], t0, t1);
tcg_gen_remu_i64(cpu_HI[acc], t0, t1);
- tcg_temp_free(t3);
- tcg_temp_free(t2);
}
break;
case OPC_DMULT:
@@ -3505,10 +3379,8 @@
tcg_gen_mul_i64(t2, t2, t3);
tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
tcg_gen_add_i64(t2, t2, t3);
- tcg_temp_free_i64(t3);
gen_move_low32(cpu_LO[acc], t2);
gen_move_high32(cpu_HI[acc], t2);
- tcg_temp_free_i64(t2);
}
break;
case OPC_MADDU:
@@ -3523,10 +3395,8 @@
tcg_gen_mul_i64(t2, t2, t3);
tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
tcg_gen_add_i64(t2, t2, t3);
- tcg_temp_free_i64(t3);
gen_move_low32(cpu_LO[acc], t2);
gen_move_high32(cpu_HI[acc], t2);
- tcg_temp_free_i64(t2);
}
break;
case OPC_MSUB:
@@ -3539,10 +3409,8 @@
tcg_gen_mul_i64(t2, t2, t3);
tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
tcg_gen_sub_i64(t2, t3, t2);
- tcg_temp_free_i64(t3);
gen_move_low32(cpu_LO[acc], t2);
gen_move_high32(cpu_HI[acc], t2);
- tcg_temp_free_i64(t2);
}
break;
case OPC_MSUBU:
@@ -3557,20 +3425,15 @@
tcg_gen_mul_i64(t2, t2, t3);
tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
tcg_gen_sub_i64(t2, t3, t2);
- tcg_temp_free_i64(t3);
gen_move_low32(cpu_LO[acc], t2);
gen_move_high32(cpu_HI[acc], t2);
- tcg_temp_free_i64(t2);
}
break;
default:
MIPS_INVAL("mul/div");
gen_reserved_instruction(ctx);
- goto out;
+ break;
}
- out:
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
/*
@@ -3625,8 +3488,6 @@
}
tcg_gen_ext_i32_tl(cpu_LO[acc], t2);
tcg_gen_ext_i32_tl(cpu_HI[acc], t3);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t3);
}
break;
case MMI_OPC_MULTU1:
@@ -3644,8 +3505,6 @@
}
tcg_gen_ext_i32_tl(cpu_LO[acc], t2);
tcg_gen_ext_i32_tl(cpu_HI[acc], t3);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t3);
}
break;
case MMI_OPC_MADD1:
@@ -3661,13 +3520,11 @@
tcg_gen_mul_i64(t2, t2, t3);
tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
tcg_gen_add_i64(t2, t2, t3);
- tcg_temp_free_i64(t3);
gen_move_low32(cpu_LO[acc], t2);
gen_move_high32(cpu_HI[acc], t2);
if (rd) {
gen_move_low32(cpu_gpr[rd], t2);
}
- tcg_temp_free_i64(t2);
}
break;
case MMI_OPC_MADDU1:
@@ -3685,24 +3542,18 @@
tcg_gen_mul_i64(t2, t2, t3);
tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
tcg_gen_add_i64(t2, t2, t3);
- tcg_temp_free_i64(t3);
gen_move_low32(cpu_LO[acc], t2);
gen_move_high32(cpu_HI[acc], t2);
if (rd) {
gen_move_low32(cpu_gpr[rd], t2);
}
- tcg_temp_free_i64(t2);
}
break;
default:
MIPS_INVAL("mul/madd TXx9");
gen_reserved_instruction(ctx);
- goto out;
+ break;
}
-
- out:
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
static void gen_cl(DisasContext *ctx, uint32_t opc,
@@ -3924,9 +3775,6 @@
break;
#endif
}
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
/* Loongson multimedia instructions */
@@ -4221,7 +4069,6 @@
tcg_gen_xor_i64(t1, t1, t2);
tcg_gen_xor_i64(t2, t2, t0);
tcg_gen_andc_i64(t1, t2, t1);
- tcg_temp_free_i64(t2);
tcg_gen_brcondi_i64(TCG_COND_GE, t1, 0, lab);
generate_exception(ctx, EXCP_OVERFLOW);
gen_set_label(lab);
@@ -4242,7 +4089,6 @@
tcg_gen_xor_i64(t1, t1, t2);
tcg_gen_xor_i64(t2, t2, t0);
tcg_gen_and_i64(t1, t1, t2);
- tcg_temp_free_i64(t2);
tcg_gen_brcondi_i64(TCG_COND_GE, t1, 0, lab);
generate_exception(ctx, EXCP_OVERFLOW);
gen_set_label(lab);
@@ -4284,12 +4130,8 @@
tcg_gen_extrl_i64_i32(t32, t64);
tcg_gen_deposit_i32(fpu_fcr31, fpu_fcr31, t32,
get_fp_bit(cc), 1);
-
- tcg_temp_free_i32(t32);
- tcg_temp_free_i64(t64);
}
- goto no_rd;
- break;
+ return;
default:
MIPS_INVAL("loongson_cp2");
gen_reserved_instruction(ctx);
@@ -4297,16 +4139,12 @@
}
gen_store_fpr64(ctx, t0, rd);
-
-no_rd:
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
}
static void gen_loongson_lswc2(DisasContext *ctx, int rt,
int rs, int rd)
{
- TCGv t0, t1, t2;
+ TCGv t0, t1;
TCGv_i32 fp0;
#if defined(TARGET_MIPS64)
int lsq_rt1 = ctx->opcode & 0x1f;
@@ -4328,7 +4166,6 @@
ctx->default_tcg_memop_mask);
gen_store_gpr(t1, rt);
gen_store_gpr(t0, lsq_rt1);
- tcg_temp_free(t1);
break;
case OPC_GSLQC1:
check_cp1_enabled(ctx);
@@ -4341,7 +4178,6 @@
ctx->default_tcg_memop_mask);
gen_store_fpr64(ctx, t1, rt);
gen_store_fpr64(ctx, t0, lsq_rt1);
- tcg_temp_free(t1);
break;
case OPC_GSSQ:
t1 = tcg_temp_new();
@@ -4353,7 +4189,6 @@
gen_load_gpr(t1, lsq_rt1);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
- tcg_temp_free(t1);
break;
case OPC_GSSQC1:
check_cp1_enabled(ctx);
@@ -4366,7 +4201,6 @@
gen_load_fpr64(ctx, t1, lsq_rt1);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
- tcg_temp_free(t1);
break;
#endif
case OPC_GSSHFL:
@@ -4374,109 +4208,41 @@
case OPC_GSLWLC1:
check_cp1_enabled(ctx);
gen_base_offset_addr(ctx, t0, rs, shf_offset);
- t1 = tcg_temp_new();
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB);
- tcg_gen_andi_tl(t1, t0, 3);
- if (!cpu_is_bigendian(ctx)) {
- tcg_gen_xori_tl(t1, t1, 3);
- }
- tcg_gen_shli_tl(t1, t1, 3);
- tcg_gen_andi_tl(t0, t0, ~3);
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUL);
- tcg_gen_shl_tl(t0, t0, t1);
- t2 = tcg_const_tl(-1);
- tcg_gen_shl_tl(t2, t2, t1);
fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, rt);
+ t1 = tcg_temp_new();
tcg_gen_ext_i32_tl(t1, fp0);
- tcg_gen_andc_tl(t1, t1, t2);
- tcg_temp_free(t2);
- tcg_gen_or_tl(t0, t0, t1);
- tcg_temp_free(t1);
-#if defined(TARGET_MIPS64)
- tcg_gen_extrl_i64_i32(fp0, t0);
-#else
- tcg_gen_ext32s_tl(fp0, t0);
-#endif
+ gen_lxl(ctx, t1, t0, ctx->mem_idx, MO_TEUL);
+ tcg_gen_trunc_tl_i32(fp0, t1);
gen_store_fpr32(ctx, fp0, rt);
- tcg_temp_free_i32(fp0);
break;
case OPC_GSLWRC1:
check_cp1_enabled(ctx);
gen_base_offset_addr(ctx, t0, rs, shf_offset);
- t1 = tcg_temp_new();
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB);
- tcg_gen_andi_tl(t1, t0, 3);
- if (cpu_is_bigendian(ctx)) {
- tcg_gen_xori_tl(t1, t1, 3);
- }
- tcg_gen_shli_tl(t1, t1, 3);
- tcg_gen_andi_tl(t0, t0, ~3);
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUL);
- tcg_gen_shr_tl(t0, t0, t1);
- tcg_gen_xori_tl(t1, t1, 31);
- t2 = tcg_const_tl(0xfffffffeull);
- tcg_gen_shl_tl(t2, t2, t1);
fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, rt);
+ t1 = tcg_temp_new();
tcg_gen_ext_i32_tl(t1, fp0);
- tcg_gen_and_tl(t1, t1, t2);
- tcg_temp_free(t2);
- tcg_gen_or_tl(t0, t0, t1);
- tcg_temp_free(t1);
-#if defined(TARGET_MIPS64)
- tcg_gen_extrl_i64_i32(fp0, t0);
-#else
- tcg_gen_ext32s_tl(fp0, t0);
-#endif
+ gen_lxr(ctx, t1, t0, ctx->mem_idx, MO_TEUL);
+ tcg_gen_trunc_tl_i32(fp0, t1);
gen_store_fpr32(ctx, fp0, rt);
- tcg_temp_free_i32(fp0);
break;
#if defined(TARGET_MIPS64)
case OPC_GSLDLC1:
check_cp1_enabled(ctx);
gen_base_offset_addr(ctx, t0, rs, shf_offset);
t1 = tcg_temp_new();
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB);
- tcg_gen_andi_tl(t1, t0, 7);
- if (!cpu_is_bigendian(ctx)) {
- tcg_gen_xori_tl(t1, t1, 7);
- }
- tcg_gen_shli_tl(t1, t1, 3);
- tcg_gen_andi_tl(t0, t0, ~7);
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ);
- tcg_gen_shl_tl(t0, t0, t1);
- t2 = tcg_const_tl(-1);
- tcg_gen_shl_tl(t2, t2, t1);
gen_load_fpr64(ctx, t1, rt);
- tcg_gen_andc_tl(t1, t1, t2);
- tcg_temp_free(t2);
- tcg_gen_or_tl(t0, t0, t1);
- tcg_temp_free(t1);
- gen_store_fpr64(ctx, t0, rt);
+ gen_lxl(ctx, t1, t0, ctx->mem_idx, MO_TEUQ);
+ gen_store_fpr64(ctx, t1, rt);
break;
case OPC_GSLDRC1:
check_cp1_enabled(ctx);
gen_base_offset_addr(ctx, t0, rs, shf_offset);
t1 = tcg_temp_new();
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB);
- tcg_gen_andi_tl(t1, t0, 7);
- if (cpu_is_bigendian(ctx)) {
- tcg_gen_xori_tl(t1, t1, 7);
- }
- tcg_gen_shli_tl(t1, t1, 3);
- tcg_gen_andi_tl(t0, t0, ~7);
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ);
- tcg_gen_shr_tl(t0, t0, t1);
- tcg_gen_xori_tl(t1, t1, 63);
- t2 = tcg_const_tl(0xfffffffffffffffeull);
- tcg_gen_shl_tl(t2, t2, t1);
gen_load_fpr64(ctx, t1, rt);
- tcg_gen_and_tl(t1, t1, t2);
- tcg_temp_free(t2);
- tcg_gen_or_tl(t0, t0, t1);
- tcg_temp_free(t1);
- gen_store_fpr64(ctx, t0, rt);
+ gen_lxr(ctx, t1, t0, ctx->mem_idx, MO_TEUQ);
+ gen_store_fpr64(ctx, t1, rt);
break;
#endif
default:
@@ -4495,8 +4261,6 @@
gen_load_fpr32(ctx, fp0, rt);
tcg_gen_ext_i32_tl(t1, fp0);
gen_helper_0e2i(swl, t1, t0, ctx->mem_idx);
- tcg_temp_free_i32(fp0);
- tcg_temp_free(t1);
break;
case OPC_GSSWRC1:
check_cp1_enabled(ctx);
@@ -4506,8 +4270,6 @@
gen_load_fpr32(ctx, fp0, rt);
tcg_gen_ext_i32_tl(t1, fp0);
gen_helper_0e2i(swr, t1, t0, ctx->mem_idx);
- tcg_temp_free_i32(fp0);
- tcg_temp_free(t1);
break;
#if defined(TARGET_MIPS64)
case OPC_GSSDLC1:
@@ -4516,7 +4278,6 @@
gen_base_offset_addr(ctx, t0, rs, shf_offset);
gen_load_fpr64(ctx, t1, rt);
gen_helper_0e2i(sdl, t1, t0, ctx->mem_idx);
- tcg_temp_free(t1);
break;
case OPC_GSSDRC1:
check_cp1_enabled(ctx);
@@ -4524,7 +4285,6 @@
gen_base_offset_addr(ctx, t0, rs, shf_offset);
gen_load_fpr64(ctx, t1, rt);
gen_helper_0e2i(sdr, t1, t0, ctx->mem_idx);
- tcg_temp_free(t1);
break;
#endif
default:
@@ -4538,7 +4298,6 @@
gen_reserved_instruction(ctx);
break;
}
- tcg_temp_free(t0);
}
/* Loongson EXT LDC2/SDC2 */
@@ -4633,7 +4392,6 @@
tcg_gen_qemu_ld_i32(fp0, t0, ctx->mem_idx, MO_TESL |
ctx->default_tcg_memop_mask);
gen_store_fpr32(ctx, fp0, rt);
- tcg_temp_free_i32(fp0);
break;
#if defined(TARGET_MIPS64)
case OPC_GSLDXC1:
@@ -4650,21 +4408,18 @@
t1 = tcg_temp_new();
gen_load_gpr(t1, rt);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_SB);
- tcg_temp_free(t1);
break;
case OPC_GSSHX:
t1 = tcg_temp_new();
gen_load_gpr(t1, rt);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUW |
ctx->default_tcg_memop_mask);
- tcg_temp_free(t1);
break;
case OPC_GSSWX:
t1 = tcg_temp_new();
gen_load_gpr(t1, rt);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL |
ctx->default_tcg_memop_mask);
- tcg_temp_free(t1);
break;
#if defined(TARGET_MIPS64)
case OPC_GSSDX:
@@ -4672,7 +4427,6 @@
gen_load_gpr(t1, rt);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
- tcg_temp_free(t1);
break;
#endif
case OPC_GSSWXC1:
@@ -4680,7 +4434,6 @@
gen_load_fpr32(ctx, fp0, rt);
tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL |
ctx->default_tcg_memop_mask);
- tcg_temp_free_i32(fp0);
break;
#if defined(TARGET_MIPS64)
case OPC_GSSDXC1:
@@ -4688,14 +4441,11 @@
gen_load_fpr64(ctx, t1, rt);
tcg_gen_qemu_st_i64(t1, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
- tcg_temp_free(t1);
break;
#endif
default:
break;
}
-
- tcg_temp_free(t0);
}
/* Traps */
@@ -4805,8 +4555,6 @@
generate_exception(ctx, EXCP_TRAP);
gen_set_label(l1);
}
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
@@ -5080,8 +4828,6 @@
if (insn_bytes == 2) {
ctx->hflags |= MIPS_HFLAG_B16;
}
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
@@ -5150,13 +4896,9 @@
fail:
MIPS_INVAL("bitops");
gen_reserved_instruction(ctx);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
return;
}
gen_store_gpr(t0, rt);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
static void gen_bshfl(DisasContext *ctx, uint32_t op2, int rt, int rd)
@@ -5174,15 +4916,13 @@
case OPC_WSBH:
{
TCGv t1 = tcg_temp_new();
- TCGv t2 = tcg_const_tl(0x00FF00FF);
+ TCGv t2 = tcg_constant_tl(0x00FF00FF);
tcg_gen_shri_tl(t1, t0, 8);
tcg_gen_and_tl(t1, t1, t2);
tcg_gen_and_tl(t0, t0, t2);
tcg_gen_shli_tl(t0, t0, 8);
tcg_gen_or_tl(t0, t0, t1);
- tcg_temp_free(t2);
- tcg_temp_free(t1);
tcg_gen_ext32s_tl(cpu_gpr[rd], t0);
}
break;
@@ -5196,21 +4936,19 @@
case OPC_DSBH:
{
TCGv t1 = tcg_temp_new();
- TCGv t2 = tcg_const_tl(0x00FF00FF00FF00FFULL);
+ TCGv t2 = tcg_constant_tl(0x00FF00FF00FF00FFULL);
tcg_gen_shri_tl(t1, t0, 8);
tcg_gen_and_tl(t1, t1, t2);
tcg_gen_and_tl(t0, t0, t2);
tcg_gen_shli_tl(t0, t0, 8);
tcg_gen_or_tl(cpu_gpr[rd], t0, t1);
- tcg_temp_free(t2);
- tcg_temp_free(t1);
}
break;
case OPC_DSHD:
{
TCGv t1 = tcg_temp_new();
- TCGv t2 = tcg_const_tl(0x0000FFFF0000FFFFULL);
+ TCGv t2 = tcg_constant_tl(0x0000FFFF0000FFFFULL);
tcg_gen_shri_tl(t1, t0, 16);
tcg_gen_and_tl(t1, t1, t2);
@@ -5220,18 +4958,14 @@
tcg_gen_shri_tl(t1, t0, 32);
tcg_gen_shli_tl(t0, t0, 32);
tcg_gen_or_tl(cpu_gpr[rd], t0, t1);
- tcg_temp_free(t2);
- tcg_temp_free(t1);
}
break;
#endif
default:
MIPS_INVAL("bsfhl");
gen_reserved_instruction(ctx);
- tcg_temp_free(t0);
return;
}
- tcg_temp_free(t0);
}
static void gen_align_bits(DisasContext *ctx, int wordsz, int rd, int rs,
@@ -5270,7 +5004,6 @@
tcg_gen_concat_tl_i64(t2, t1, t0);
tcg_gen_shri_i64(t2, t2, 32 - bits);
gen_move_low32(cpu_gpr[rd], t2);
- tcg_temp_free_i64(t2);
}
break;
#if defined(TARGET_MIPS64)
@@ -5281,10 +5014,7 @@
break;
#endif
}
- tcg_temp_free(t1);
}
-
- tcg_temp_free(t0);
}
void gen_align(DisasContext *ctx, int wordsz, int rd, int rs, int rt, int bp)
@@ -5311,7 +5041,6 @@
break;
#endif
}
- tcg_temp_free(t0);
}
#ifndef CONFIG_USER_ONLY
@@ -5329,8 +5058,6 @@
tcg_gen_concat32_i64(t1, t1, t0);
#endif
tcg_gen_st_i64(t1, cpu_env, off);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t0);
}
static inline void gen_mthc0_store64(TCGv arg, target_ulong off)
@@ -5342,8 +5069,6 @@
tcg_gen_ld_i64(t1, cpu_env, off);
tcg_gen_concat32_i64(t1, t1, t0);
tcg_gen_st_i64(t1, cpu_env, off);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t0);
}
static inline void gen_mfhc0_entrylo(TCGv arg, target_ulong off)
@@ -5357,7 +5082,6 @@
tcg_gen_shri_i64(t0, t0, 32);
#endif
gen_move_low32(arg, t0);
- tcg_temp_free_i64(t0);
}
static inline void gen_mfhc0_load64(TCGv arg, target_ulong off, int shift)
@@ -5367,7 +5091,6 @@
tcg_gen_ld_i64(t0, cpu_env, off);
tcg_gen_shri_i64(t0, t0, 32 + shift);
gen_move_low32(arg, t0);
- tcg_temp_free_i64(t0);
}
static inline void gen_mfc0_load32(TCGv arg, target_ulong off)
@@ -5376,7 +5099,6 @@
tcg_gen_ld_i32(t0, cpu_env, off);
tcg_gen_ext_i32_tl(arg, t0);
- tcg_temp_free_i32(t0);
}
static inline void gen_mfc0_load64(TCGv arg, target_ulong off)
@@ -5391,7 +5113,6 @@
tcg_gen_trunc_tl_i32(t0, arg);
tcg_gen_st_i32(t0, cpu_env, off);
- tcg_temp_free_i32(t0);
}
#define CP0_CHECK(c) \
@@ -5713,7 +5434,6 @@
}
#endif
gen_move_low32(arg, tmp);
- tcg_temp_free_i64(tmp);
}
register_name = "EntryLo0";
break;
@@ -5771,7 +5491,6 @@
}
#endif
gen_move_low32(arg, tmp);
- tcg_temp_free_i64(tmp);
}
register_name = "EntryLo1";
break;
@@ -6300,7 +6019,6 @@
TCGv_i64 tmp = tcg_temp_new_i64();
tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUMIPSState, CP0_TagLo));
gen_move_low32(arg, tmp);
- tcg_temp_free_i64(tmp);
}
register_name = "TagLo";
break;
@@ -8741,7 +8459,7 @@
case 5:
case 6:
case 7:
- gen_helper_mftc0_configx(t0, cpu_env, tcg_const_tl(sel));
+ gen_helper_mftc0_configx(t0, cpu_env, tcg_constant_tl(sel));
break;
default:
goto die;
@@ -8821,13 +8539,11 @@
gen_load_fpr32(ctx, fp0, rt);
tcg_gen_ext_i32_tl(t0, fp0);
- tcg_temp_free_i32(fp0);
} else {
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32h(ctx, fp0, rt);
tcg_gen_ext_i32_tl(t0, fp0);
- tcg_temp_free_i32(fp0);
}
break;
case 3:
@@ -8844,11 +8560,9 @@
}
trace_mips_translate_tr("mftr", rt, u, sel, h);
gen_store_gpr(t0, rd);
- tcg_temp_free(t0);
return;
die:
- tcg_temp_free(t0);
LOG_DISAS("mftr (reg %d u %d sel %d h %d)\n", rt, u, sel, h);
gen_reserved_instruction(ctx);
}
@@ -9025,13 +8739,11 @@
tcg_gen_trunc_tl_i32(fp0, t0);
gen_store_fpr32(ctx, fp0, rd);
- tcg_temp_free_i32(fp0);
} else {
TCGv_i32 fp0 = tcg_temp_new_i32();
tcg_gen_trunc_tl_i32(fp0, t0);
gen_store_fpr32h(ctx, fp0, rd);
- tcg_temp_free_i32(fp0);
}
break;
case 3:
@@ -9049,11 +8761,9 @@
}
}
trace_mips_translate_tr("mttr", rd, u, sel, h);
- tcg_temp_free(t0);
return;
die:
- tcg_temp_free(t0);
LOG_DISAS("mttr (reg %d u %d sel %d h %d)\n", rd, u, sel, h);
gen_reserved_instruction(ctx);
}
@@ -9079,7 +8789,6 @@
gen_load_gpr(t0, rt);
gen_mtc0(ctx, t0, rd, ctx->opcode & 0x7);
- tcg_temp_free(t0);
}
opn = "mtc0";
break;
@@ -9100,7 +8809,6 @@
gen_load_gpr(t0, rt);
gen_dmtc0(ctx, t0, rd, ctx->opcode & 0x7);
- tcg_temp_free(t0);
}
opn = "dmtc0";
break;
@@ -9120,7 +8828,6 @@
TCGv t0 = tcg_temp_new();
gen_load_gpr(t0, rt);
gen_mthc0(ctx, t0, rd, ctx->opcode & 0x7);
- tcg_temp_free(t0);
}
opn = "mthc0";
break;
@@ -9254,7 +8961,7 @@
if ((ctx->insn_flags & ISA_MIPS_R6) && (ctx->hflags & MIPS_HFLAG_BMASK)) {
gen_reserved_instruction(ctx);
- goto out;
+ return;
}
if (cc != 0) {
@@ -9294,7 +9001,6 @@
tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc));
tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc + 1));
tcg_gen_nand_i32(t0, t0, t1);
- tcg_temp_free_i32(t1);
tcg_gen_andi_i32(t0, t0, 1);
tcg_gen_extu_i32_tl(bcond, t0);
}
@@ -9305,7 +9011,6 @@
tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc));
tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc + 1));
tcg_gen_or_i32(t0, t0, t1);
- tcg_temp_free_i32(t1);
tcg_gen_andi_i32(t0, t0, 1);
tcg_gen_extu_i32_tl(bcond, t0);
}
@@ -9320,7 +9025,6 @@
tcg_gen_and_i32(t0, t0, t1);
tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc + 3));
tcg_gen_nand_i32(t0, t0, t1);
- tcg_temp_free_i32(t1);
tcg_gen_andi_i32(t0, t0, 1);
tcg_gen_extu_i32_tl(bcond, t0);
}
@@ -9335,7 +9039,6 @@
tcg_gen_or_i32(t0, t0, t1);
tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc + 3));
tcg_gen_or_i32(t0, t0, t1);
- tcg_temp_free_i32(t1);
tcg_gen_andi_i32(t0, t0, 1);
tcg_gen_extu_i32_tl(bcond, t0);
}
@@ -9345,12 +9048,10 @@
default:
MIPS_INVAL("cp1 cond branch");
gen_reserved_instruction(ctx);
- goto out;
+ return;
}
ctx->btarget = btarget;
ctx->hflags |= MIPS_HFLAG_BDS32;
- out:
- tcg_temp_free_i32(t0);
}
/* R6 CP1 Branches */
@@ -9367,7 +9068,7 @@
"\n", ctx->base.pc_next);
#endif
gen_reserved_instruction(ctx);
- goto out;
+ return;
}
gen_load_fpr64(ctx, t0, ft);
@@ -9387,7 +9088,7 @@
default:
MIPS_INVAL("cp1 cond branch");
gen_reserved_instruction(ctx);
- goto out;
+ return;
}
tcg_gen_trunc_i64_tl(bcond, t0);
@@ -9402,9 +9103,6 @@
ctx->hflags |= MIPS_HFLAG_BDS32;
break;
}
-
-out:
- tcg_temp_free_i64(t0);
}
/* Coprocessor 1 (FPU) */
@@ -9632,7 +9330,6 @@
gen_load_fpr32(ctx, fp0, fs);
tcg_gen_ext_i32_tl(t0, fp0);
- tcg_temp_free_i32(fp0);
}
gen_store_gpr(t0, rt);
break;
@@ -9643,7 +9340,6 @@
tcg_gen_trunc_tl_i32(fp0, t0);
gen_store_fpr32(ctx, fp0, fs);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_CFC1:
@@ -9673,7 +9369,6 @@
gen_load_fpr32h(ctx, fp0, fs);
tcg_gen_ext_i32_tl(t0, fp0);
- tcg_temp_free_i32(fp0);
}
gen_store_gpr(t0, rt);
break;
@@ -9684,17 +9379,13 @@
tcg_gen_trunc_tl_i32(fp0, t0);
gen_store_fpr32h(ctx, fp0, fs);
- tcg_temp_free_i32(fp0);
}
break;
default:
MIPS_INVAL("cp1 move");
gen_reserved_instruction(ctx);
- goto out;
+ return;
}
-
- out:
- tcg_temp_free(t0);
}
static void gen_movci(DisasContext *ctx, int rd, int rs, int cc, int tf)
@@ -9718,7 +9409,6 @@
t0 = tcg_temp_new_i32();
tcg_gen_andi_i32(t0, fpu_fcr31, 1 << get_fp_bit(cc));
tcg_gen_brcondi_i32(cond, t0, 0, l1);
- tcg_temp_free_i32(t0);
gen_load_gpr(cpu_gpr[rd], rs);
gen_set_label(l1);
}
@@ -9741,7 +9431,6 @@
gen_load_fpr32(ctx, t0, fs);
gen_store_fpr32(ctx, t0, fd);
gen_set_label(l1);
- tcg_temp_free_i32(t0);
}
static inline void gen_movcf_d(DisasContext *ctx, int fs, int fd, int cc,
@@ -9760,11 +9449,9 @@
tcg_gen_andi_i32(t0, fpu_fcr31, 1 << get_fp_bit(cc));
tcg_gen_brcondi_i32(cond, t0, 0, l1);
- tcg_temp_free_i32(t0);
fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
gen_set_label(l1);
}
@@ -9792,14 +9479,13 @@
tcg_gen_brcondi_i32(cond, t0, 0, l2);
gen_load_fpr32h(ctx, t0, fs);
gen_store_fpr32h(ctx, t0, fd);
- tcg_temp_free_i32(t0);
gen_set_label(l2);
}
static void gen_sel_s(DisasContext *ctx, enum fopcode op1, int fd, int ft,
int fs)
{
- TCGv_i32 t1 = tcg_const_i32(0);
+ TCGv_i32 t1 = tcg_constant_i32(0);
TCGv_i32 fp0 = tcg_temp_new_i32();
TCGv_i32 fp1 = tcg_temp_new_i32();
TCGv_i32 fp2 = tcg_temp_new_i32();
@@ -9827,16 +9513,12 @@
}
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp2);
- tcg_temp_free_i32(fp1);
- tcg_temp_free_i32(fp0);
- tcg_temp_free_i32(t1);
}
static void gen_sel_d(DisasContext *ctx, enum fopcode op1, int fd, int ft,
int fs)
{
- TCGv_i64 t1 = tcg_const_i64(0);
+ TCGv_i64 t1 = tcg_constant_i64(0);
TCGv_i64 fp0 = tcg_temp_new_i64();
TCGv_i64 fp1 = tcg_temp_new_i64();
TCGv_i64 fp2 = tcg_temp_new_i64();
@@ -9864,10 +9546,6 @@
}
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp2);
- tcg_temp_free_i64(fp1);
- tcg_temp_free_i64(fp0);
- tcg_temp_free_i64(t1);
}
static void gen_farith(DisasContext *ctx, enum fopcode op1,
@@ -9883,9 +9561,7 @@
gen_load_fpr32(ctx, fp0, fs);
gen_load_fpr32(ctx, fp1, ft);
gen_helper_float_add_s(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i32(fp1);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_SUB_S:
@@ -9896,9 +9572,7 @@
gen_load_fpr32(ctx, fp0, fs);
gen_load_fpr32(ctx, fp1, ft);
gen_helper_float_sub_s(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i32(fp1);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_MUL_S:
@@ -9909,9 +9583,7 @@
gen_load_fpr32(ctx, fp0, fs);
gen_load_fpr32(ctx, fp1, ft);
gen_helper_float_mul_s(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i32(fp1);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_DIV_S:
@@ -9922,9 +9594,7 @@
gen_load_fpr32(ctx, fp0, fs);
gen_load_fpr32(ctx, fp1, ft);
gen_helper_float_div_s(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i32(fp1);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_SQRT_S:
@@ -9934,7 +9604,6 @@
gen_load_fpr32(ctx, fp0, fs);
gen_helper_float_sqrt_s(fp0, cpu_env, fp0);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_ABS_S:
@@ -9948,7 +9617,6 @@
gen_helper_float_abs_s(fp0, fp0);
}
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_MOV_S:
@@ -9957,7 +9625,6 @@
gen_load_fpr32(ctx, fp0, fs);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_NEG_S:
@@ -9971,7 +9638,6 @@
gen_helper_float_chs_s(fp0, fp0);
}
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_ROUND_L_S:
@@ -9986,9 +9652,7 @@
} else {
gen_helper_float_round_l_s(fp64, cpu_env, fp32);
}
- tcg_temp_free_i32(fp32);
gen_store_fpr64(ctx, fp64, fd);
- tcg_temp_free_i64(fp64);
}
break;
case OPC_TRUNC_L_S:
@@ -10003,9 +9667,7 @@
} else {
gen_helper_float_trunc_l_s(fp64, cpu_env, fp32);
}
- tcg_temp_free_i32(fp32);
gen_store_fpr64(ctx, fp64, fd);
- tcg_temp_free_i64(fp64);
}
break;
case OPC_CEIL_L_S:
@@ -10020,9 +9682,7 @@
} else {
gen_helper_float_ceil_l_s(fp64, cpu_env, fp32);
}
- tcg_temp_free_i32(fp32);
gen_store_fpr64(ctx, fp64, fd);
- tcg_temp_free_i64(fp64);
}
break;
case OPC_FLOOR_L_S:
@@ -10037,9 +9697,7 @@
} else {
gen_helper_float_floor_l_s(fp64, cpu_env, fp32);
}
- tcg_temp_free_i32(fp32);
gen_store_fpr64(ctx, fp64, fd);
- tcg_temp_free_i64(fp64);
}
break;
case OPC_ROUND_W_S:
@@ -10053,7 +9711,6 @@
gen_helper_float_round_w_s(fp0, cpu_env, fp0);
}
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_TRUNC_W_S:
@@ -10067,7 +9724,6 @@
gen_helper_float_trunc_w_s(fp0, cpu_env, fp0);
}
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_CEIL_W_S:
@@ -10081,7 +9737,6 @@
gen_helper_float_ceil_w_s(fp0, cpu_env, fp0);
}
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_FLOOR_W_S:
@@ -10095,7 +9750,6 @@
gen_helper_float_floor_w_s(fp0, cpu_env, fp0);
}
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_SEL_S:
@@ -10126,7 +9780,6 @@
fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, fs);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
gen_set_label(l1);
}
break;
@@ -10141,7 +9794,6 @@
fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, fs);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
gen_set_label(l1);
}
}
@@ -10153,7 +9805,6 @@
gen_load_fpr32(ctx, fp0, fs);
gen_helper_float_recip_s(fp0, cpu_env, fp0);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_RSQRT_S:
@@ -10163,7 +9814,6 @@
gen_load_fpr32(ctx, fp0, fs);
gen_helper_float_rsqrt_s(fp0, cpu_env, fp0);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_MADDF_S:
@@ -10177,9 +9827,6 @@
gen_load_fpr32(ctx, fp2, fd);
gen_helper_float_maddf_s(fp2, cpu_env, fp0, fp1, fp2);
gen_store_fpr32(ctx, fp2, fd);
- tcg_temp_free_i32(fp2);
- tcg_temp_free_i32(fp1);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_MSUBF_S:
@@ -10193,9 +9840,6 @@
gen_load_fpr32(ctx, fp2, fd);
gen_helper_float_msubf_s(fp2, cpu_env, fp0, fp1, fp2);
gen_store_fpr32(ctx, fp2, fd);
- tcg_temp_free_i32(fp2);
- tcg_temp_free_i32(fp1);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_RINT_S:
@@ -10205,7 +9849,6 @@
gen_load_fpr32(ctx, fp0, fs);
gen_helper_float_rint_s(fp0, cpu_env, fp0);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_CLASS_S:
@@ -10215,7 +9858,6 @@
gen_load_fpr32(ctx, fp0, fs);
gen_helper_float_class_s(fp0, cpu_env, fp0);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_MIN_S: /* OPC_RECIP2_S */
@@ -10228,9 +9870,6 @@
gen_load_fpr32(ctx, fp1, ft);
gen_helper_float_min_s(fp2, cpu_env, fp0, fp1);
gen_store_fpr32(ctx, fp2, fd);
- tcg_temp_free_i32(fp2);
- tcg_temp_free_i32(fp1);
- tcg_temp_free_i32(fp0);
} else {
/* OPC_RECIP2_S */
check_cp1_64bitmode(ctx);
@@ -10241,9 +9880,7 @@
gen_load_fpr32(ctx, fp0, fs);
gen_load_fpr32(ctx, fp1, ft);
gen_helper_float_recip2_s(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i32(fp1);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
}
break;
@@ -10257,9 +9894,6 @@
gen_load_fpr32(ctx, fp1, ft);
gen_helper_float_mina_s(fp2, cpu_env, fp0, fp1);
gen_store_fpr32(ctx, fp2, fd);
- tcg_temp_free_i32(fp2);
- tcg_temp_free_i32(fp1);
- tcg_temp_free_i32(fp0);
} else {
/* OPC_RECIP1_S */
check_cp1_64bitmode(ctx);
@@ -10269,7 +9903,6 @@
gen_load_fpr32(ctx, fp0, fs);
gen_helper_float_recip1_s(fp0, cpu_env, fp0);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
}
break;
@@ -10282,8 +9915,6 @@
gen_load_fpr32(ctx, fp1, ft);
gen_helper_float_max_s(fp1, cpu_env, fp0, fp1);
gen_store_fpr32(ctx, fp1, fd);
- tcg_temp_free_i32(fp1);
- tcg_temp_free_i32(fp0);
} else {
/* OPC_RSQRT1_S */
check_cp1_64bitmode(ctx);
@@ -10293,7 +9924,6 @@
gen_load_fpr32(ctx, fp0, fs);
gen_helper_float_rsqrt1_s(fp0, cpu_env, fp0);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
}
break;
@@ -10306,8 +9936,6 @@
gen_load_fpr32(ctx, fp1, ft);
gen_helper_float_maxa_s(fp1, cpu_env, fp0, fp1);
gen_store_fpr32(ctx, fp1, fd);
- tcg_temp_free_i32(fp1);
- tcg_temp_free_i32(fp0);
} else {
/* OPC_RSQRT2_S */
check_cp1_64bitmode(ctx);
@@ -10318,9 +9946,7 @@
gen_load_fpr32(ctx, fp0, fs);
gen_load_fpr32(ctx, fp1, ft);
gen_helper_float_rsqrt2_s(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i32(fp1);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
}
break;
@@ -10332,9 +9958,7 @@
gen_load_fpr32(ctx, fp32, fs);
gen_helper_float_cvtd_s(fp64, cpu_env, fp32);
- tcg_temp_free_i32(fp32);
gen_store_fpr64(ctx, fp64, fd);
- tcg_temp_free_i64(fp64);
}
break;
case OPC_CVT_W_S:
@@ -10348,7 +9972,6 @@
gen_helper_float_cvt_w_s(fp0, cpu_env, fp0);
}
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_CVT_L_S:
@@ -10363,9 +9986,7 @@
} else {
gen_helper_float_cvt_l_s(fp64, cpu_env, fp32);
}
- tcg_temp_free_i32(fp32);
gen_store_fpr64(ctx, fp64, fd);
- tcg_temp_free_i64(fp64);
}
break;
case OPC_CVT_PS_S:
@@ -10378,10 +9999,7 @@
gen_load_fpr32(ctx, fp32_0, fs);
gen_load_fpr32(ctx, fp32_1, ft);
tcg_gen_concat_i32_i64(fp64, fp32_1, fp32_0);
- tcg_temp_free_i32(fp32_1);
- tcg_temp_free_i32(fp32_0);
gen_store_fpr64(ctx, fp64, fd);
- tcg_temp_free_i64(fp64);
}
break;
case OPC_CMP_F_S:
@@ -10416,9 +10034,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_helper_float_add_d(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_SUB_D:
@@ -10430,9 +10046,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_helper_float_sub_d(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_MUL_D:
@@ -10444,9 +10058,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_helper_float_mul_d(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_DIV_D:
@@ -10458,9 +10070,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_helper_float_div_d(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_SQRT_D:
@@ -10471,7 +10081,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_helper_float_sqrt_d(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_ABS_D:
@@ -10486,7 +10095,6 @@
gen_helper_float_abs_d(fp0, fp0);
}
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_MOV_D:
@@ -10496,7 +10104,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_NEG_D:
@@ -10511,7 +10118,6 @@
gen_helper_float_chs_d(fp0, fp0);
}
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_ROUND_L_D:
@@ -10526,7 +10132,6 @@
gen_helper_float_round_l_d(fp0, cpu_env, fp0);
}
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_TRUNC_L_D:
@@ -10541,7 +10146,6 @@
gen_helper_float_trunc_l_d(fp0, cpu_env, fp0);
}
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_CEIL_L_D:
@@ -10556,7 +10160,6 @@
gen_helper_float_ceil_l_d(fp0, cpu_env, fp0);
}
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_FLOOR_L_D:
@@ -10571,7 +10174,6 @@
gen_helper_float_floor_l_d(fp0, cpu_env, fp0);
}
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_ROUND_W_D:
@@ -10586,9 +10188,7 @@
} else {
gen_helper_float_round_w_d(fp32, cpu_env, fp64);
}
- tcg_temp_free_i64(fp64);
gen_store_fpr32(ctx, fp32, fd);
- tcg_temp_free_i32(fp32);
}
break;
case OPC_TRUNC_W_D:
@@ -10603,9 +10203,7 @@
} else {
gen_helper_float_trunc_w_d(fp32, cpu_env, fp64);
}
- tcg_temp_free_i64(fp64);
gen_store_fpr32(ctx, fp32, fd);
- tcg_temp_free_i32(fp32);
}
break;
case OPC_CEIL_W_D:
@@ -10620,9 +10218,7 @@
} else {
gen_helper_float_ceil_w_d(fp32, cpu_env, fp64);
}
- tcg_temp_free_i64(fp64);
gen_store_fpr32(ctx, fp32, fd);
- tcg_temp_free_i32(fp32);
}
break;
case OPC_FLOOR_W_D:
@@ -10637,9 +10233,7 @@
} else {
gen_helper_float_floor_w_d(fp32, cpu_env, fp64);
}
- tcg_temp_free_i64(fp64);
gen_store_fpr32(ctx, fp32, fd);
- tcg_temp_free_i32(fp32);
}
break;
case OPC_SEL_D:
@@ -10670,7 +10264,6 @@
fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
gen_set_label(l1);
}
break;
@@ -10685,7 +10278,6 @@
fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
gen_set_label(l1);
}
}
@@ -10698,7 +10290,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_helper_float_recip_d(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_RSQRT_D:
@@ -10709,7 +10300,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_helper_float_rsqrt_d(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_MADDF_D:
@@ -10723,9 +10313,6 @@
gen_load_fpr64(ctx, fp2, fd);
gen_helper_float_maddf_d(fp2, cpu_env, fp0, fp1, fp2);
gen_store_fpr64(ctx, fp2, fd);
- tcg_temp_free_i64(fp2);
- tcg_temp_free_i64(fp1);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_MSUBF_D:
@@ -10739,9 +10326,6 @@
gen_load_fpr64(ctx, fp2, fd);
gen_helper_float_msubf_d(fp2, cpu_env, fp0, fp1, fp2);
gen_store_fpr64(ctx, fp2, fd);
- tcg_temp_free_i64(fp2);
- tcg_temp_free_i64(fp1);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_RINT_D:
@@ -10751,7 +10335,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_helper_float_rint_d(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_CLASS_D:
@@ -10761,7 +10344,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_helper_float_class_d(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_MIN_D: /* OPC_RECIP2_D */
@@ -10773,8 +10355,6 @@
gen_load_fpr64(ctx, fp1, ft);
gen_helper_float_min_d(fp1, cpu_env, fp0, fp1);
gen_store_fpr64(ctx, fp1, fd);
- tcg_temp_free_i64(fp1);
- tcg_temp_free_i64(fp0);
} else {
/* OPC_RECIP2_D */
check_cp1_64bitmode(ctx);
@@ -10785,9 +10365,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_helper_float_recip2_d(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
}
break;
@@ -10800,8 +10378,6 @@
gen_load_fpr64(ctx, fp1, ft);
gen_helper_float_mina_d(fp1, cpu_env, fp0, fp1);
gen_store_fpr64(ctx, fp1, fd);
- tcg_temp_free_i64(fp1);
- tcg_temp_free_i64(fp0);
} else {
/* OPC_RECIP1_D */
check_cp1_64bitmode(ctx);
@@ -10811,7 +10387,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_helper_float_recip1_d(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
}
break;
@@ -10824,8 +10399,6 @@
gen_load_fpr64(ctx, fp1, ft);
gen_helper_float_max_d(fp1, cpu_env, fp0, fp1);
gen_store_fpr64(ctx, fp1, fd);
- tcg_temp_free_i64(fp1);
- tcg_temp_free_i64(fp0);
} else {
/* OPC_RSQRT1_D */
check_cp1_64bitmode(ctx);
@@ -10835,7 +10408,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_helper_float_rsqrt1_d(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
}
break;
@@ -10848,8 +10420,6 @@
gen_load_fpr64(ctx, fp1, ft);
gen_helper_float_maxa_d(fp1, cpu_env, fp0, fp1);
gen_store_fpr64(ctx, fp1, fd);
- tcg_temp_free_i64(fp1);
- tcg_temp_free_i64(fp0);
} else {
/* OPC_RSQRT2_D */
check_cp1_64bitmode(ctx);
@@ -10860,9 +10430,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_helper_float_rsqrt2_d(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
}
break;
@@ -10897,9 +10465,7 @@
gen_load_fpr64(ctx, fp64, fs);
gen_helper_float_cvts_d(fp32, cpu_env, fp64);
- tcg_temp_free_i64(fp64);
gen_store_fpr32(ctx, fp32, fd);
- tcg_temp_free_i32(fp32);
}
break;
case OPC_CVT_W_D:
@@ -10914,9 +10480,7 @@
} else {
gen_helper_float_cvt_w_d(fp32, cpu_env, fp64);
}
- tcg_temp_free_i64(fp64);
gen_store_fpr32(ctx, fp32, fd);
- tcg_temp_free_i32(fp32);
}
break;
case OPC_CVT_L_D:
@@ -10931,7 +10495,6 @@
gen_helper_float_cvt_l_d(fp0, cpu_env, fp0);
}
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_CVT_S_W:
@@ -10941,7 +10504,6 @@
gen_load_fpr32(ctx, fp0, fs);
gen_helper_float_cvts_w(fp0, cpu_env, fp0);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_CVT_D_W:
@@ -10952,9 +10514,7 @@
gen_load_fpr32(ctx, fp32, fs);
gen_helper_float_cvtd_w(fp64, cpu_env, fp32);
- tcg_temp_free_i32(fp32);
gen_store_fpr64(ctx, fp64, fd);
- tcg_temp_free_i64(fp64);
}
break;
case OPC_CVT_S_L:
@@ -10965,9 +10525,7 @@
gen_load_fpr64(ctx, fp64, fs);
gen_helper_float_cvts_l(fp32, cpu_env, fp64);
- tcg_temp_free_i64(fp64);
gen_store_fpr32(ctx, fp32, fd);
- tcg_temp_free_i32(fp32);
}
break;
case OPC_CVT_D_L:
@@ -10978,7 +10536,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_helper_float_cvtd_l(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_CVT_PS_PW:
@@ -10989,7 +10546,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_helper_float_cvtps_pw(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_ADD_PS:
@@ -11001,9 +10557,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_helper_float_add_ps(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_SUB_PS:
@@ -11015,9 +10569,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_helper_float_sub_ps(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_MUL_PS:
@@ -11029,9 +10581,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_helper_float_mul_ps(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_ABS_PS:
@@ -11042,7 +10592,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_helper_float_abs_ps(fp0, fp0);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_MOV_PS:
@@ -11052,7 +10601,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_NEG_PS:
@@ -11063,7 +10611,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_helper_float_chs_ps(fp0, fp0);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_MOVCF_PS:
@@ -11082,7 +10629,6 @@
fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
gen_set_label(l1);
}
break;
@@ -11097,7 +10643,6 @@
fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
gen_set_label(l1);
}
}
@@ -11111,9 +10656,7 @@
gen_load_fpr64(ctx, fp0, ft);
gen_load_fpr64(ctx, fp1, fs);
gen_helper_float_addr_ps(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_MULR_PS:
@@ -11125,9 +10668,7 @@
gen_load_fpr64(ctx, fp0, ft);
gen_load_fpr64(ctx, fp1, fs);
gen_helper_float_mulr_ps(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_RECIP2_PS:
@@ -11139,9 +10680,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_helper_float_recip2_ps(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_RECIP1_PS:
@@ -11152,7 +10691,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_helper_float_recip1_ps(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_RSQRT1_PS:
@@ -11163,7 +10701,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_helper_float_rsqrt1_ps(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_RSQRT2_PS:
@@ -11175,9 +10712,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_helper_float_rsqrt2_ps(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_CVT_S_PU:
@@ -11188,7 +10723,6 @@
gen_load_fpr32h(ctx, fp0, fs);
gen_helper_float_cvts_pu(fp0, cpu_env, fp0);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_CVT_PW_PS:
@@ -11199,7 +10733,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_helper_float_cvtpw_ps(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_CVT_S_PL:
@@ -11210,7 +10743,6 @@
gen_load_fpr32(ctx, fp0, fs);
gen_helper_float_cvts_pl(fp0, cpu_env, fp0);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_PLL_PS:
@@ -11223,8 +10755,6 @@
gen_load_fpr32(ctx, fp1, ft);
gen_store_fpr32h(ctx, fp0, fd);
gen_store_fpr32(ctx, fp1, fd);
- tcg_temp_free_i32(fp0);
- tcg_temp_free_i32(fp1);
}
break;
case OPC_PLU_PS:
@@ -11237,8 +10767,6 @@
gen_load_fpr32h(ctx, fp1, ft);
gen_store_fpr32(ctx, fp1, fd);
gen_store_fpr32h(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
- tcg_temp_free_i32(fp1);
}
break;
case OPC_PUL_PS:
@@ -11251,8 +10779,6 @@
gen_load_fpr32(ctx, fp1, ft);
gen_store_fpr32(ctx, fp1, fd);
gen_store_fpr32h(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
- tcg_temp_free_i32(fp1);
}
break;
case OPC_PUU_PS:
@@ -11265,8 +10791,6 @@
gen_load_fpr32h(ctx, fp1, ft);
gen_store_fpr32(ctx, fp1, fd);
gen_store_fpr32h(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
- tcg_temp_free_i32(fp1);
}
break;
case OPC_CMP_F_PS:
@@ -11324,7 +10848,6 @@
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL);
tcg_gen_trunc_tl_i32(fp0, t0);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_LDXC1:
@@ -11334,7 +10857,6 @@
TCGv_i64 fp0 = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_LUXC1:
@@ -11345,7 +10867,6 @@
tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_SWXC1:
@@ -11354,7 +10875,6 @@
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, fs);
tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_SDXC1:
@@ -11364,7 +10884,6 @@
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_SUXC1:
@@ -11374,11 +10893,9 @@
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ);
- tcg_temp_free_i64(fp0);
}
break;
}
- tcg_temp_free(t0);
}
static void gen_flt3_arith(DisasContext *ctx, uint32_t opc,
@@ -11405,7 +10922,6 @@
tcg_gen_br(l2);
gen_set_label(l1);
tcg_gen_brcondi_tl(TCG_COND_NE, t0, 4, l2);
- tcg_temp_free(t0);
if (cpu_is_bigendian(ctx)) {
gen_load_fpr32(ctx, fp, fs);
gen_load_fpr32h(ctx, fph, ft);
@@ -11418,8 +10934,6 @@
gen_store_fpr32h(ctx, fp, fd);
}
gen_set_label(l2);
- tcg_temp_free_i32(fp);
- tcg_temp_free_i32(fph);
}
break;
case OPC_MADD_S:
@@ -11433,10 +10947,7 @@
gen_load_fpr32(ctx, fp1, ft);
gen_load_fpr32(ctx, fp2, fr);
gen_helper_float_madd_s(fp2, cpu_env, fp0, fp1, fp2);
- tcg_temp_free_i32(fp0);
- tcg_temp_free_i32(fp1);
gen_store_fpr32(ctx, fp2, fd);
- tcg_temp_free_i32(fp2);
}
break;
case OPC_MADD_D:
@@ -11451,10 +10962,7 @@
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
gen_helper_float_madd_d(fp2, cpu_env, fp0, fp1, fp2);
- tcg_temp_free_i64(fp0);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp2, fd);
- tcg_temp_free_i64(fp2);
}
break;
case OPC_MADD_PS:
@@ -11468,10 +10976,7 @@
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
gen_helper_float_madd_ps(fp2, cpu_env, fp0, fp1, fp2);
- tcg_temp_free_i64(fp0);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp2, fd);
- tcg_temp_free_i64(fp2);
}
break;
case OPC_MSUB_S:
@@ -11485,10 +10990,7 @@
gen_load_fpr32(ctx, fp1, ft);
gen_load_fpr32(ctx, fp2, fr);
gen_helper_float_msub_s(fp2, cpu_env, fp0, fp1, fp2);
- tcg_temp_free_i32(fp0);
- tcg_temp_free_i32(fp1);
gen_store_fpr32(ctx, fp2, fd);
- tcg_temp_free_i32(fp2);
}
break;
case OPC_MSUB_D:
@@ -11503,10 +11005,7 @@
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
gen_helper_float_msub_d(fp2, cpu_env, fp0, fp1, fp2);
- tcg_temp_free_i64(fp0);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp2, fd);
- tcg_temp_free_i64(fp2);
}
break;
case OPC_MSUB_PS:
@@ -11520,10 +11019,7 @@
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
gen_helper_float_msub_ps(fp2, cpu_env, fp0, fp1, fp2);
- tcg_temp_free_i64(fp0);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp2, fd);
- tcg_temp_free_i64(fp2);
}
break;
case OPC_NMADD_S:
@@ -11537,10 +11033,7 @@
gen_load_fpr32(ctx, fp1, ft);
gen_load_fpr32(ctx, fp2, fr);
gen_helper_float_nmadd_s(fp2, cpu_env, fp0, fp1, fp2);
- tcg_temp_free_i32(fp0);
- tcg_temp_free_i32(fp1);
gen_store_fpr32(ctx, fp2, fd);
- tcg_temp_free_i32(fp2);
}
break;
case OPC_NMADD_D:
@@ -11555,10 +11048,7 @@
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
gen_helper_float_nmadd_d(fp2, cpu_env, fp0, fp1, fp2);
- tcg_temp_free_i64(fp0);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp2, fd);
- tcg_temp_free_i64(fp2);
}
break;
case OPC_NMADD_PS:
@@ -11572,10 +11062,7 @@
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
gen_helper_float_nmadd_ps(fp2, cpu_env, fp0, fp1, fp2);
- tcg_temp_free_i64(fp0);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp2, fd);
- tcg_temp_free_i64(fp2);
}
break;
case OPC_NMSUB_S:
@@ -11589,10 +11076,7 @@
gen_load_fpr32(ctx, fp1, ft);
gen_load_fpr32(ctx, fp2, fr);
gen_helper_float_nmsub_s(fp2, cpu_env, fp0, fp1, fp2);
- tcg_temp_free_i32(fp0);
- tcg_temp_free_i32(fp1);
gen_store_fpr32(ctx, fp2, fd);
- tcg_temp_free_i32(fp2);
}
break;
case OPC_NMSUB_D:
@@ -11607,10 +11091,7 @@
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
gen_helper_float_nmsub_d(fp2, cpu_env, fp0, fp1, fp2);
- tcg_temp_free_i64(fp0);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp2, fd);
- tcg_temp_free_i64(fp2);
}
break;
case OPC_NMSUB_PS:
@@ -11624,10 +11105,7 @@
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
gen_helper_float_nmsub_ps(fp2, cpu_env, fp0, fp1, fp2);
- tcg_temp_free_i64(fp0);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp2, fd);
- tcg_temp_free_i64(fp2);
}
break;
default:
@@ -11716,7 +11194,6 @@
gen_reserved_instruction(ctx);
break;
}
- tcg_temp_free(t0);
}
static inline void clear_branch_hflags(DisasContext *ctx)
@@ -11775,11 +11252,9 @@
tcg_gen_andi_tl(t0, btarget, 0x1);
tcg_gen_trunc_tl_i32(t1, t0);
- tcg_temp_free(t0);
tcg_gen_andi_i32(hflags, hflags, ~(uint32_t)MIPS_HFLAG_M16);
tcg_gen_shli_i32(t1, t1, MIPS_HFLAG_M16_SHIFT);
tcg_gen_or_i32(hflags, hflags, t1);
- tcg_temp_free_i32(t1);
tcg_gen_andi_tl(cpu_PC, btarget, ~(target_ulong)0x1);
} else {
@@ -11809,7 +11284,7 @@
"\n", ctx->base.pc_next);
#endif
gen_reserved_instruction(ctx);
- goto out;
+ return;
}
/* Load needed operands and calculate btarget */
@@ -11863,13 +11338,12 @@
gen_load_gpr(tbase, rt);
gen_op_addr_add(ctx, btarget, tbase, toffset);
- tcg_temp_free(tbase);
}
break;
default:
MIPS_INVAL("Compact branch/jump");
gen_reserved_instruction(ctx);
- goto out;
+ return;
}
if (bcond_compute == 0) {
@@ -11890,7 +11364,7 @@
default:
MIPS_INVAL("Compact branch/jump");
gen_reserved_instruction(ctx);
- goto out;
+ return;
}
/* Generating branch here as compact branches don't have delay slot */
@@ -11980,10 +11454,6 @@
/* OPC_BNVC */
tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_EQ), t4, 0, fs);
}
- tcg_temp_free(input_overflow);
- tcg_temp_free(t4);
- tcg_temp_free(t3);
- tcg_temp_free(t2);
} else if (rs < rt && rs == 0) {
/* OPC_BEQZALC, OPC_BNEZALC */
if (opc == OPC_BEQZALC) {
@@ -12013,7 +11483,7 @@
default:
MIPS_INVAL("Compact conditional branch/jump");
gen_reserved_instruction(ctx);
- goto out;
+ return;
}
/* Generating branch here as compact branches don't have delay slot */
@@ -12022,10 +11492,6 @@
ctx->hflags |= MIPS_HFLAG_FBNSLOT;
}
-
-out:
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
void gen_addiupc(DisasContext *ctx, int rx, int imm,
@@ -12045,19 +11511,15 @@
if (!is_64_bit) {
tcg_gen_ext32s_tl(cpu_gpr[rx], cpu_gpr[rx]);
}
-
- tcg_temp_free(t0);
}
static void gen_cache_operation(DisasContext *ctx, uint32_t op, int base,
int16_t offset)
{
- TCGv_i32 t0 = tcg_const_i32(op);
+ TCGv_i32 t0 = tcg_constant_i32(op);
TCGv t1 = tcg_temp_new();
gen_base_offset_addr(ctx, t1, base, offset);
gen_helper_cache(cpu_env, t1, t0);
- tcg_temp_free(t1);
- tcg_temp_free_i32(t0);
}
static inline bool is_uhi(DisasContext *ctx, int sdbbp_code)
@@ -12085,9 +11547,6 @@
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL);
gen_store_gpr(t1, rd);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
static void gen_sync(int stype)
@@ -12191,7 +11650,6 @@
break;
#endif
}
- tcg_temp_free(t0);
}
static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2,
@@ -12402,19 +11860,17 @@
case OPC_PRECR_SRA_PH_W:
check_dsp_r2(ctx);
{
- TCGv_i32 sa_t = tcg_const_i32(v2);
+ TCGv_i32 sa_t = tcg_constant_i32(v2);
gen_helper_precr_sra_ph_w(cpu_gpr[ret], sa_t, v1_t,
cpu_gpr[ret]);
- tcg_temp_free_i32(sa_t);
break;
}
case OPC_PRECR_SRA_R_PH_W:
check_dsp_r2(ctx);
{
- TCGv_i32 sa_t = tcg_const_i32(v2);
+ TCGv_i32 sa_t = tcg_constant_i32(v2);
gen_helper_precr_sra_r_ph_w(cpu_gpr[ret], sa_t, v1_t,
cpu_gpr[ret]);
- tcg_temp_free_i32(sa_t);
break;
}
case OPC_PRECRQ_PH_W:
@@ -12601,17 +12057,15 @@
case OPC_PRECR_SRA_QH_PW:
check_dsp_r2(ctx);
{
- TCGv_i32 ret_t = tcg_const_i32(ret);
+ TCGv_i32 ret_t = tcg_constant_i32(ret);
gen_helper_precr_sra_qh_pw(v2_t, v1_t, v2_t, ret_t);
- tcg_temp_free_i32(ret_t);
break;
}
case OPC_PRECR_SRA_R_QH_PW:
check_dsp_r2(ctx);
{
- TCGv_i32 sa_v = tcg_const_i32(ret);
+ TCGv_i32 sa_v = tcg_constant_i32(ret);
gen_helper_precr_sra_r_qh_pw(v2_t, v1_t, v2_t, sa_v);
- tcg_temp_free_i32(sa_v);
break;
}
case OPC_PRECRQ_OB_QH:
@@ -12638,9 +12092,6 @@
break;
#endif
}
-
- tcg_temp_free(v1_t);
- tcg_temp_free(v2_t);
}
static void gen_mipsdsp_shift(DisasContext *ctx, uint32_t opc,
@@ -12880,10 +12331,6 @@
break;
#endif
}
-
- tcg_temp_free(t0);
- tcg_temp_free(v1_t);
- tcg_temp_free(v2_t);
}
static void gen_mipsdsp_multiply(DisasContext *ctx, uint32_t op1, uint32_t op2,
@@ -13190,10 +12637,6 @@
break;
#endif
}
-
- tcg_temp_free_i32(t0);
- tcg_temp_free(v1_t);
- tcg_temp_free(v2_t);
}
static void gen_mipsdsp_bitinsn(DisasContext *ctx, uint32_t op1, uint32_t op2,
@@ -13330,8 +12773,6 @@
break;
#endif
}
- tcg_temp_free(t0);
- tcg_temp_free(val_t);
}
static void gen_mipsdsp_add_cmp_pick(DisasContext *ctx,
@@ -13514,10 +12955,6 @@
break;
#endif
}
-
- tcg_temp_free(t1);
- tcg_temp_free(v1_t);
- tcg_temp_free(v2_t);
}
static void gen_mipsdsp_append(CPUMIPSState *env, DisasContext *ctx,
@@ -13605,7 +13042,6 @@
break;
#endif
}
- tcg_temp_free(t0);
}
static void gen_mipsdsp_accinsn(DisasContext *ctx, uint32_t op1, uint32_t op2,
@@ -13822,10 +13258,6 @@
break;
#endif
}
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(v1_t);
}
/* End MIPSDSP functions. */
@@ -14676,9 +14108,6 @@
gen_load_gpr(t1, rs);
gen_helper_insv(cpu_gpr[rt], cpu_env, t1, t0);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
break;
}
default: /* Invalid */
@@ -14948,9 +14377,6 @@
gen_load_gpr(t1, rs);
gen_helper_dinsv(cpu_gpr[rt], cpu_env, t1, t0);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
break;
}
default: /* Invalid */
@@ -15177,8 +14603,6 @@
gen_load_gpr(t0, rt);
gen_load_gpr(t1, rs);
gen_helper_fork(t0, t1);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
break;
case OPC_YIELD:
@@ -15189,7 +14613,6 @@
gen_load_gpr(t0, rs);
gen_helper_yield(t0, cpu_env, t0);
gen_store_gpr(t0, rd);
- tcg_temp_free(t0);
}
break;
default:
@@ -15432,7 +14855,6 @@
gen_reserved_instruction(ctx);
break;
}
- tcg_temp_free(t0);
}
#endif /* !CONFIG_USER_ONLY */
break;
@@ -15880,7 +15302,6 @@
TCGv t0 = tcg_temp_new();
gen_load_gpr(t0, rs);
tcg_gen_addi_tl(cpu_gpr[rt], t0, imm << 16);
- tcg_temp_free(t0);
}
#else
gen_reserved_instruction(ctx);
diff --git a/target/mips/tcg/translate_addr_const.c b/target/mips/tcg/translate_addr_const.c
index 96f4834..a510da4 100644
--- a/target/mips/tcg/translate_addr_const.c
+++ b/target/mips/tcg/translate_addr_const.c
@@ -30,10 +30,6 @@
tcg_gen_shli_tl(t0, t0, sa + 1);
tcg_gen_add_tl(cpu_gpr[rd], t0, t1);
tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
-
- tcg_temp_free(t1);
- tcg_temp_free(t0);
-
return true;
}
@@ -54,8 +50,5 @@
gen_load_gpr(t1, rt);
tcg_gen_shli_tl(t0, t0, sa + 1);
tcg_gen_add_tl(cpu_gpr[rd], t0, t1);
- tcg_temp_free(t1);
- tcg_temp_free(t0);
-
return true;
}
diff --git a/target/mips/tcg/tx79_translate.c b/target/mips/tcg/tx79_translate.c
index 4e479c2..3a45a1b 100644
--- a/target/mips/tcg/tx79_translate.c
+++ b/target/mips/tcg/tx79_translate.c
@@ -138,10 +138,6 @@
gen_load_gpr_hi(ax, a->rs);
gen_load_gpr_hi(bx, a->rt);
gen_logic_i64(cpu_gpr_hi[a->rd], ax, bx);
-
- tcg_temp_free(bx);
- tcg_temp_free(ax);
-
return true;
}
@@ -247,8 +243,8 @@
return true;
}
- c0 = tcg_const_tl(0);
- c1 = tcg_const_tl(0xffffffff);
+ c0 = tcg_constant_tl(0);
+ c1 = tcg_constant_tl(0xffffffff);
ax = tcg_temp_new_i64();
bx = tcg_temp_new_i64();
t0 = tcg_temp_new_i64();
@@ -273,15 +269,6 @@
tcg_gen_movcond_i64(cond, t2, t1, t0, c1, c0);
tcg_gen_deposit_i64(cpu_gpr_hi[a->rd], cpu_gpr_hi[a->rd], t2, wlen * i, wlen);
}
-
- tcg_temp_free(t2);
- tcg_temp_free(t1);
- tcg_temp_free(t0);
- tcg_temp_free(bx);
- tcg_temp_free(ax);
- tcg_temp_free(c1);
- tcg_temp_free(c0);
-
return true;
}
@@ -362,10 +349,6 @@
tcg_gen_addi_i64(addr, addr, 8);
tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, MO_TEUQ);
gen_store_gpr_hi(t0, a->rt);
-
- tcg_temp_free(t0);
- tcg_temp_free(addr);
-
return true;
}
@@ -389,10 +372,6 @@
tcg_gen_addi_i64(addr, addr, 8);
gen_load_gpr_hi(t0, a->rt);
tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, MO_TEUQ);
-
- tcg_temp_free(addr);
- tcg_temp_free(t0);
-
return true;
}
@@ -458,11 +437,6 @@
gen_load_gpr_hi(t0, a->rs); /* a1 */
tcg_gen_deposit_i64(cpu_gpr_hi[a->rd], a0, t0, 32, 32);
-
- tcg_temp_free(t0);
- tcg_temp_free(b0);
- tcg_temp_free(a0);
-
return true;
}
@@ -506,10 +480,6 @@
tcg_gen_shri_i64(bx, bx, wlen);
tcg_gen_shri_i64(ax, ax, wlen);
}
-
- tcg_temp_free(bx);
- tcg_temp_free(ax);
-
return true;
}
@@ -541,10 +511,6 @@
gen_load_gpr(ax, a->rs);
gen_load_gpr(bx, a->rt);
gen_pextw(cpu_gpr[a->rd], cpu_gpr_hi[a->rd], ax, bx);
-
- tcg_temp_free(bx);
- tcg_temp_free(ax);
-
return true;
}
@@ -564,10 +530,6 @@
gen_load_gpr_hi(ax, a->rs);
gen_load_gpr_hi(bx, a->rt);
gen_pextw(cpu_gpr[a->rd], cpu_gpr_hi[a->rd], ax, bx);
-
- tcg_temp_free(bx);
- tcg_temp_free(ax);
-
return true;
}
@@ -678,8 +640,5 @@
tcg_gen_deposit_i64(cpu_gpr[a->rd], cpu_gpr[a->rt], ax, 0, 32);
tcg_gen_rotri_i64(cpu_gpr[a->rd], cpu_gpr[a->rd], 32);
-
- tcg_temp_free(ax);
-
return true;
}
diff --git a/target/mips/tcg/vr54xx_translate.c b/target/mips/tcg/vr54xx_translate.c
index a7d241e..804672f 100644
--- a/target/mips/tcg/vr54xx_translate.c
+++ b/target/mips/tcg/vr54xx_translate.c
@@ -49,10 +49,6 @@
gen_helper_mult_acc(t0, cpu_env, t0, t1);
gen_store_gpr(t0, a->rd);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
-
return true;
}
diff --git a/target/nios2/cpu-param.h b/target/nios2/cpu-param.h
index 177d720..767bba4 100644
--- a/target/nios2/cpu-param.h
+++ b/target/nios2/cpu-param.h
@@ -16,6 +16,5 @@
#else
# define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif
-#define NB_MMU_MODES 2
#endif
diff --git a/target/openrisc/cpu-param.h b/target/openrisc/cpu-param.h
index 73be699..3f08207 100644
--- a/target/openrisc/cpu-param.h
+++ b/target/openrisc/cpu-param.h
@@ -12,6 +12,5 @@
#define TARGET_PAGE_BITS 13
#define TARGET_PHYS_ADDR_SPACE_BITS 32
#define TARGET_VIRT_ADDR_SPACE_BITS 32
-#define NB_MMU_MODES 3
#endif
diff --git a/target/ppc/cpu-param.h b/target/ppc/cpu-param.h
index ea377b7..0a0416e 100644
--- a/target/ppc/cpu-param.h
+++ b/target/ppc/cpu-param.h
@@ -32,6 +32,5 @@
# define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif
#define TARGET_PAGE_BITS 12
-#define NB_MMU_MODES 10
#endif
diff --git a/target/ppc/power8-pmu-regs.c.inc b/target/ppc/power8-pmu-regs.c.inc
index 42f2cd0..d900e13 100644
--- a/target/ppc/power8-pmu-regs.c.inc
+++ b/target/ppc/power8-pmu-regs.c.inc
@@ -177,7 +177,7 @@
void spr_read_PMC(DisasContext *ctx, int gprn, int sprn)
{
- TCGv_i32 t_sprn = tcg_const_i32(sprn);
+ TCGv_i32 t_sprn = tcg_constant_i32(sprn);
gen_icount_io_start(ctx);
gen_helper_read_pmc(cpu_gpr[gprn], cpu_env, t_sprn);
@@ -210,7 +210,7 @@
void spr_write_PMC(DisasContext *ctx, int sprn, int gprn)
{
- TCGv_i32 t_sprn = tcg_const_i32(sprn);
+ TCGv_i32 t_sprn = tcg_constant_i32(sprn);
gen_icount_io_start(ctx);
gen_helper_store_pmc(cpu_env, t_sprn, cpu_gpr[gprn]);
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
index df324fc..9d05357 100644
--- a/target/ppc/translate.c
+++ b/target/ppc/translate.c
@@ -263,8 +263,8 @@
* faulting instruction
*/
gen_update_nip(ctx, ctx->cia);
- t0 = tcg_const_i32(excp);
- t1 = tcg_const_i32(error);
+ t0 = tcg_constant_i32(excp);
+ t1 = tcg_constant_i32(error);
gen_helper_raise_exception_err(cpu_env, t0, t1);
ctx->base.is_jmp = DISAS_NORETURN;
}
@@ -278,7 +278,7 @@
* faulting instruction
*/
gen_update_nip(ctx, ctx->cia);
- t0 = tcg_const_i32(excp);
+ t0 = tcg_constant_i32(excp);
gen_helper_raise_exception(cpu_env, t0);
ctx->base.is_jmp = DISAS_NORETURN;
}
@@ -289,7 +289,7 @@
TCGv_i32 t0;
gen_update_nip(ctx, nip);
- t0 = tcg_const_i32(excp);
+ t0 = tcg_constant_i32(excp);
gen_helper_raise_exception(cpu_env, t0);
ctx->base.is_jmp = DISAS_NORETURN;
}
@@ -386,7 +386,7 @@
static void spr_load_dump_spr(int sprn)
{
#ifdef PPC_DUMP_SPR_ACCESSES
- TCGv_i32 t0 = tcg_const_i32(sprn);
+ TCGv_i32 t0 = tcg_constant_i32(sprn);
gen_helper_load_dump_spr(cpu_env, t0);
#endif
}
@@ -400,7 +400,7 @@
static void spr_store_dump_spr(int sprn)
{
#ifdef PPC_DUMP_SPR_ACCESSES
- TCGv_i32 t0 = tcg_const_i32(sprn);
+ TCGv_i32 t0 = tcg_constant_i32(sprn);
gen_helper_store_dump_spr(cpu_env, t0);
#endif
}
@@ -672,25 +672,25 @@
void spr_write_ibatu(DisasContext *ctx, int sprn, int gprn)
{
- TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0U) / 2);
+ TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_IBAT0U) / 2);
gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]);
}
void spr_write_ibatu_h(DisasContext *ctx, int sprn, int gprn)
{
- TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_IBAT4U) / 2) + 4);
+ TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_IBAT4U) / 2) + 4);
gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]);
}
void spr_write_ibatl(DisasContext *ctx, int sprn, int gprn)
{
- TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0L) / 2);
+ TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_IBAT0L) / 2);
gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]);
}
void spr_write_ibatl_h(DisasContext *ctx, int sprn, int gprn)
{
- TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_IBAT4L) / 2) + 4);
+ TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_IBAT4L) / 2) + 4);
gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]);
}
@@ -712,25 +712,25 @@
void spr_write_dbatu(DisasContext *ctx, int sprn, int gprn)
{
- TCGv_i32 t0 = tcg_const_i32((sprn - SPR_DBAT0U) / 2);
+ TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_DBAT0U) / 2);
gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]);
}
void spr_write_dbatu_h(DisasContext *ctx, int sprn, int gprn)
{
- TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_DBAT4U) / 2) + 4);
+ TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_DBAT4U) / 2) + 4);
gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]);
}
void spr_write_dbatl(DisasContext *ctx, int sprn, int gprn)
{
- TCGv_i32 t0 = tcg_const_i32((sprn - SPR_DBAT0L) / 2);
+ TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_DBAT0L) / 2);
gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]);
}
void spr_write_dbatl_h(DisasContext *ctx, int sprn, int gprn)
{
- TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_DBAT4L) / 2) + 4);
+ TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_DBAT4L) / 2) + 4);
gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]);
}
@@ -1040,13 +1040,15 @@
void spr_write_booke_pid(DisasContext *ctx, int sprn, int gprn)
{
- TCGv_i32 t0 = tcg_const_i32(sprn);
+ TCGv_i32 t0 = tcg_constant_i32(sprn);
gen_helper_booke_setpid(cpu_env, t0, cpu_gpr[gprn]);
}
+
void spr_write_eplc(DisasContext *ctx, int sprn, int gprn)
{
gen_helper_booke_set_eplc(cpu_env, cpu_gpr[gprn]);
}
+
void spr_write_epsc(DisasContext *ctx, int sprn, int gprn)
{
gen_helper_booke_set_epsc(cpu_env, cpu_gpr[gprn]);
@@ -1080,9 +1082,9 @@
static void gen_fscr_facility_check(DisasContext *ctx, int facility_sprn,
int bit, int sprn, int cause)
{
- TCGv_i32 t1 = tcg_const_i32(bit);
- TCGv_i32 t2 = tcg_const_i32(sprn);
- TCGv_i32 t3 = tcg_const_i32(cause);
+ TCGv_i32 t1 = tcg_constant_i32(bit);
+ TCGv_i32 t2 = tcg_constant_i32(sprn);
+ TCGv_i32 t3 = tcg_constant_i32(cause);
gen_helper_fscr_facility_check(cpu_env, t1, t2, t3);
}
@@ -1090,9 +1092,9 @@
static void gen_msr_facility_check(DisasContext *ctx, int facility_sprn,
int bit, int sprn, int cause)
{
- TCGv_i32 t1 = tcg_const_i32(bit);
- TCGv_i32 t2 = tcg_const_i32(sprn);
- TCGv_i32 t3 = tcg_const_i32(cause);
+ TCGv_i32 t1 = tcg_constant_i32(bit);
+ TCGv_i32 t2 = tcg_constant_i32(sprn);
+ TCGv_i32 t3 = tcg_constant_i32(cause);
gen_helper_msr_facility_check(cpu_env, t1, t2, t3);
}
@@ -1388,7 +1390,7 @@
static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf)
{
- TCGv t0 = tcg_const_tl(arg1);
+ TCGv t0 = tcg_constant_tl(arg1);
gen_op_cmp(arg0, t0, s, crf);
}
@@ -1409,7 +1411,7 @@
static inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf)
{
- TCGv t0 = tcg_const_tl(arg1);
+ TCGv t0 = tcg_constant_tl(arg1);
gen_op_cmp32(arg0, t0, s, crf);
}
@@ -1476,7 +1478,7 @@
tcg_gen_extu_i32_tl(t0, cpu_crf[bi >> 2]);
tcg_gen_andi_tl(t0, t0, mask);
- zr = tcg_const_tl(0);
+ zr = tcg_constant_tl(0);
tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rD(ctx->opcode)], t0, zr,
rA(ctx->opcode) ? cpu_gpr[rA(ctx->opcode)] : zr,
cpu_gpr[rB(ctx->opcode)]);
@@ -1568,7 +1570,7 @@
tcg_gen_mov_tl(ca32, ca);
}
} else {
- TCGv zero = tcg_const_tl(0);
+ TCGv zero = tcg_constant_tl(0);
if (add_ca) {
tcg_gen_add2_tl(t0, ca, arg1, zero, ca, zero);
tcg_gen_add2_tl(t0, ca, t0, ca, arg2, zero);
@@ -1609,7 +1611,7 @@
add_ca, compute_ca, compute_ov) \
static void glue(gen_, name)(DisasContext *ctx) \
{ \
- TCGv t0 = tcg_const_tl(const_val); \
+ TCGv t0 = tcg_constant_tl(const_val); \
gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
cpu_gpr[rA(ctx->opcode)], t0, \
ca, glue(ca, 32), \
@@ -1636,7 +1638,7 @@
/* addic addic.*/
static inline void gen_op_addic(DisasContext *ctx, bool compute_rc0)
{
- TCGv c = tcg_const_tl(SIMM(ctx->opcode));
+ TCGv c = tcg_constant_tl(SIMM(ctx->opcode));
gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
c, cpu_ca, cpu_ca32, 0, 1, 0, compute_rc0);
}
@@ -1709,7 +1711,7 @@
#define GEN_DIVE(name, hlpr, compute_ov) \
static void gen_##name(DisasContext *ctx) \
{ \
- TCGv_i32 t0 = tcg_const_i32(compute_ov); \
+ TCGv_i32 t0 = tcg_constant_i32(compute_ov); \
gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], cpu_env, \
cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \
if (unlikely(Rc(ctx->opcode) != 0)) { \
@@ -1802,8 +1804,8 @@
tcg_gen_rem_i32(t3, t0, t1);
tcg_gen_ext_i32_tl(ret, t3);
} else {
- TCGv_i32 t2 = tcg_const_i32(1);
- TCGv_i32 t3 = tcg_const_i32(0);
+ TCGv_i32 t2 = tcg_constant_i32(1);
+ TCGv_i32 t3 = tcg_constant_i32(0);
tcg_gen_movcond_i32(TCG_COND_EQ, t1, t1, t3, t2, t1);
tcg_gen_remu_i32(t3, t0, t1);
tcg_gen_extu_i32_tl(ret, t3);
@@ -1842,8 +1844,8 @@
tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
tcg_gen_rem_i64(ret, t0, t1);
} else {
- TCGv_i64 t2 = tcg_const_i64(1);
- TCGv_i64 t3 = tcg_const_i64(0);
+ TCGv_i64 t2 = tcg_constant_i64(1);
+ TCGv_i64 t3 = tcg_constant_i64(0);
tcg_gen_movcond_i64(TCG_COND_EQ, t1, t1, t3, t2, t1);
tcg_gen_remu_i64(ret, t0, t1);
}
@@ -2038,7 +2040,7 @@
} else if (add_ca) {
TCGv zero, inv1 = tcg_temp_new();
tcg_gen_not_tl(inv1, arg1);
- zero = tcg_const_tl(0);
+ zero = tcg_constant_tl(0);
tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero);
tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero);
gen_op_arith_compute_ca32(ctx, t0, inv1, arg2, cpu_ca32, 0);
@@ -2083,7 +2085,7 @@
add_ca, compute_ca, compute_ov) \
static void glue(gen_, name)(DisasContext *ctx) \
{ \
- TCGv t0 = tcg_const_tl(const_val); \
+ TCGv t0 = tcg_constant_tl(const_val); \
gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
cpu_gpr[rA(ctx->opcode)], t0, \
add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
@@ -2107,7 +2109,7 @@
/* subfic */
static void gen_subfic(DisasContext *ctx)
{
- TCGv c = tcg_const_tl(SIMM(ctx->opcode));
+ TCGv c = tcg_constant_tl(SIMM(ctx->opcode));
gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
c, 0, 1, 0, 0);
}
@@ -2115,7 +2117,7 @@
/* neg neg. nego nego. */
static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov)
{
- TCGv zero = tcg_const_tl(0);
+ TCGv zero = tcg_constant_tl(0);
gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
zero, 0, 0, compute_ov, Rc(ctx->opcode));
}
@@ -2214,7 +2216,7 @@
#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
static void gen_pause(DisasContext *ctx)
{
- TCGv_i32 t0 = tcg_const_i32(0);
+ TCGv_i32 t0 = tcg_constant_i32(0);
tcg_gen_st_i32(t0, cpu_env,
-offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
@@ -3256,7 +3258,7 @@
}
gen_set_access_type(ctx, ACCESS_INT);
t0 = tcg_temp_new();
- t1 = tcg_const_i32(rD(ctx->opcode));
+ t1 = tcg_constant_i32(rD(ctx->opcode));
gen_addr_imm_index(ctx, t0, 0);
gen_helper_lmw(cpu_env, t0, t1);
}
@@ -3273,7 +3275,7 @@
}
gen_set_access_type(ctx, ACCESS_INT);
t0 = tcg_temp_new();
- t1 = tcg_const_i32(rS(ctx->opcode));
+ t1 = tcg_constant_i32(rS(ctx->opcode));
gen_addr_imm_index(ctx, t0, 0);
gen_helper_stmw(cpu_env, t0, t1);
}
@@ -3311,8 +3313,8 @@
gen_set_access_type(ctx, ACCESS_INT);
t0 = tcg_temp_new();
gen_addr_register(ctx, t0);
- t1 = tcg_const_i32(nb);
- t2 = tcg_const_i32(start);
+ t1 = tcg_constant_i32(nb);
+ t2 = tcg_constant_i32(start);
gen_helper_lsw(cpu_env, t0, t1, t2);
}
@@ -3329,9 +3331,9 @@
gen_set_access_type(ctx, ACCESS_INT);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
- t1 = tcg_const_i32(rD(ctx->opcode));
- t2 = tcg_const_i32(rA(ctx->opcode));
- t3 = tcg_const_i32(rB(ctx->opcode));
+ t1 = tcg_constant_i32(rD(ctx->opcode));
+ t2 = tcg_constant_i32(rA(ctx->opcode));
+ t3 = tcg_constant_i32(rB(ctx->opcode));
gen_helper_lswx(cpu_env, t0, t1, t2, t3);
}
@@ -3352,8 +3354,8 @@
if (nb == 0) {
nb = 32;
}
- t1 = tcg_const_i32(nb);
- t2 = tcg_const_i32(rS(ctx->opcode));
+ t1 = tcg_constant_i32(nb);
+ t2 = tcg_constant_i32(rS(ctx->opcode));
gen_helper_stsw(cpu_env, t0, t1, t2);
}
@@ -3373,7 +3375,7 @@
t1 = tcg_temp_new_i32();
tcg_gen_trunc_tl_i32(t1, cpu_xer);
tcg_gen_andi_i32(t1, t1, 0x7F);
- t2 = tcg_const_i32(rS(ctx->opcode));
+ t2 = tcg_constant_i32(rS(ctx->opcode));
gen_helper_stsw(cpu_env, t0, t1, t2);
}
@@ -3943,7 +3945,7 @@
* to occur.
*/
if (wc == 0) {
- TCGv_i32 t0 = tcg_const_i32(1);
+ TCGv_i32 t0 = tcg_constant_i32(1);
tcg_gen_st_i32(t0, cpu_env,
-offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
/* Stop translation, as the CPU is supposed to sleep from now */
@@ -3989,7 +3991,7 @@
TCGv_i32 t;
CHK_HV(ctx);
- t = tcg_const_i32(PPC_PM_DOZE);
+ t = tcg_constant_i32(PPC_PM_DOZE);
gen_helper_pminsn(cpu_env, t);
/* Stop translation, as the CPU is supposed to sleep from now */
gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
@@ -4004,7 +4006,7 @@
TCGv_i32 t;
CHK_HV(ctx);
- t = tcg_const_i32(PPC_PM_NAP);
+ t = tcg_constant_i32(PPC_PM_NAP);
gen_helper_pminsn(cpu_env, t);
/* Stop translation, as the CPU is supposed to sleep from now */
gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
@@ -4019,7 +4021,7 @@
TCGv_i32 t;
CHK_HV(ctx);
- t = tcg_const_i32(PPC_PM_STOP);
+ t = tcg_constant_i32(PPC_PM_STOP);
gen_helper_pminsn(cpu_env, t);
/* Stop translation, as the CPU is supposed to sleep from now */
gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
@@ -4034,7 +4036,7 @@
TCGv_i32 t;
CHK_HV(ctx);
- t = tcg_const_i32(PPC_PM_SLEEP);
+ t = tcg_constant_i32(PPC_PM_SLEEP);
gen_helper_pminsn(cpu_env, t);
/* Stop translation, as the CPU is supposed to sleep from now */
gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
@@ -4049,7 +4051,7 @@
TCGv_i32 t;
CHK_HV(ctx);
- t = tcg_const_i32(PPC_PM_RVWINKLE);
+ t = tcg_constant_i32(PPC_PM_RVWINKLE);
gen_helper_pminsn(cpu_env, t);
/* Stop translation, as the CPU is supposed to sleep from now */
gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
@@ -4506,7 +4508,7 @@
if (check_unconditional_trap(ctx)) {
return;
}
- t0 = tcg_const_i32(TO(ctx->opcode));
+ t0 = tcg_constant_i32(TO(ctx->opcode));
gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
t0);
}
@@ -4520,8 +4522,8 @@
if (check_unconditional_trap(ctx)) {
return;
}
- t0 = tcg_const_tl(SIMM(ctx->opcode));
- t1 = tcg_const_i32(TO(ctx->opcode));
+ t0 = tcg_constant_tl(SIMM(ctx->opcode));
+ t1 = tcg_constant_i32(TO(ctx->opcode));
gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
}
@@ -4534,7 +4536,7 @@
if (check_unconditional_trap(ctx)) {
return;
}
- t0 = tcg_const_i32(TO(ctx->opcode));
+ t0 = tcg_constant_i32(TO(ctx->opcode));
gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
t0);
}
@@ -4548,8 +4550,8 @@
if (check_unconditional_trap(ctx)) {
return;
}
- t0 = tcg_const_tl(SIMM(ctx->opcode));
- t1 = tcg_const_i32(TO(ctx->opcode));
+ t0 = tcg_constant_tl(SIMM(ctx->opcode));
+ t1 = tcg_constant_i32(TO(ctx->opcode));
gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
}
#endif
@@ -5026,7 +5028,7 @@
gen_set_access_type(ctx, ACCESS_CACHE);
tcgv_addr = tcg_temp_new();
- tcgv_op = tcg_const_i32(ctx->opcode & 0x03FF000);
+ tcgv_op = tcg_constant_i32(ctx->opcode & 0x03FF000);
gen_addr_reg_index(ctx, tcgv_addr);
gen_helper_dcbz(cpu_env, tcgv_addr, tcgv_op);
}
@@ -5039,7 +5041,7 @@
gen_set_access_type(ctx, ACCESS_CACHE);
tcgv_addr = tcg_temp_new();
- tcgv_op = tcg_const_i32(ctx->opcode & 0x03FF000);
+ tcgv_op = tcg_constant_i32(ctx->opcode & 0x03FF000);
gen_addr_reg_index(ctx, tcgv_addr);
gen_helper_dcbzep(cpu_env, tcgv_addr, tcgv_op);
}
@@ -5114,7 +5116,7 @@
TCGv t0;
CHK_SV(ctx);
- t0 = tcg_const_tl(SR(ctx->opcode));
+ t0 = tcg_constant_tl(SR(ctx->opcode));
gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5143,7 +5145,7 @@
TCGv t0;
CHK_SV(ctx);
- t0 = tcg_const_tl(SR(ctx->opcode));
+ t0 = tcg_constant_tl(SR(ctx->opcode));
gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5175,7 +5177,7 @@
TCGv t0;
CHK_SV(ctx);
- t0 = tcg_const_tl(SR(ctx->opcode));
+ t0 = tcg_constant_tl(SR(ctx->opcode));
gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5204,7 +5206,7 @@
TCGv t0;
CHK_SV(ctx);
- t0 = tcg_const_tl(SR(ctx->opcode));
+ t0 = tcg_constant_tl(SR(ctx->opcode));
gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5558,7 +5560,7 @@
TCGv dcrn;
CHK_SV(ctx);
- dcrn = tcg_const_tl(SPR(ctx->opcode));
+ dcrn = tcg_constant_tl(SPR(ctx->opcode));
gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env, dcrn);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5572,7 +5574,7 @@
TCGv dcrn;
CHK_SV(ctx);
- dcrn = tcg_const_tl(SPR(ctx->opcode));
+ dcrn = tcg_constant_tl(SPR(ctx->opcode));
gen_helper_store_dcr(cpu_env, dcrn, cpu_gpr[rS(ctx->opcode)]);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5793,7 +5795,7 @@
case 1:
case 2:
{
- TCGv_i32 t0 = tcg_const_i32(rB(ctx->opcode));
+ TCGv_i32 t0 = tcg_constant_i32(rB(ctx->opcode));
gen_helper_440_tlbre(cpu_gpr[rD(ctx->opcode)], cpu_env,
t0, cpu_gpr[rA(ctx->opcode)]);
}
@@ -5839,7 +5841,7 @@
case 1:
case 2:
{
- TCGv_i32 t0 = tcg_const_i32(rB(ctx->opcode));
+ TCGv_i32 t0 = tcg_constant_i32(rB(ctx->opcode));
gen_helper_440_tlbwe(cpu_env, t0, cpu_gpr[rA(ctx->opcode)],
cpu_gpr[rS(ctx->opcode)]);
}
@@ -5875,12 +5877,10 @@
CHK_SV(ctx);
if (rA(ctx->opcode)) {
t0 = tcg_temp_new();
- tcg_gen_mov_tl(t0, cpu_gpr[rD(ctx->opcode)]);
+ tcg_gen_add_tl(t0, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
} else {
- t0 = tcg_const_tl(0);
+ t0 = cpu_gpr[rB(ctx->opcode)];
}
-
- tcg_gen_add_tl(t0, t0, cpu_gpr[rB(ctx->opcode)]);
gen_helper_booke206_tlbsx(cpu_env, t0);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5983,7 +5983,7 @@
/* dlmzb */
static void gen_dlmzb(DisasContext *ctx)
{
- TCGv_i32 t0 = tcg_const_i32(Rc(ctx->opcode));
+ TCGv_i32 t0 = tcg_constant_i32(Rc(ctx->opcode));
gen_helper_dlmzb(cpu_gpr[rA(ctx->opcode)], cpu_env,
cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0);
}
diff --git a/target/ppc/translate/fixedpoint-impl.c.inc b/target/ppc/translate/fixedpoint-impl.c.inc
index 20ea484..02d86b7 100644
--- a/target/ppc/translate/fixedpoint-impl.c.inc
+++ b/target/ppc/translate/fixedpoint-impl.c.inc
@@ -484,33 +484,35 @@
static bool trans_ADDG6S(DisasContext *ctx, arg_X *a)
{
- const uint64_t carry_bits = 0x1111111111111111ULL;
- TCGv t0, t1, carry, zero = tcg_constant_tl(0);
+ const target_ulong carry_bits = (target_ulong)-1 / 0xf;
+ TCGv in1, in2, carryl, carryh, tmp;
+ TCGv zero = tcg_constant_tl(0);
REQUIRE_INSNS_FLAGS2(ctx, BCDA_ISA206);
- t0 = tcg_temp_new();
- t1 = tcg_const_tl(0);
- carry = tcg_const_tl(0);
+ in1 = cpu_gpr[a->ra];
+ in2 = cpu_gpr[a->rb];
+ tmp = tcg_temp_new();
+ carryl = tcg_temp_new();
+ carryh = tcg_temp_new();
- for (int i = 0; i < 16; i++) {
- tcg_gen_shri_tl(t0, cpu_gpr[a->ra], i * 4);
- tcg_gen_andi_tl(t0, t0, 0xf);
- tcg_gen_add_tl(t1, t1, t0);
+ /* Addition with carry. */
+ tcg_gen_add2_tl(carryl, carryh, in1, zero, in2, zero);
+ /* Addition without carry. */
+ tcg_gen_xor_tl(tmp, in1, in2);
+ /* Difference between the two is carry in to each bit. */
+ tcg_gen_xor_tl(carryl, carryl, tmp);
- tcg_gen_shri_tl(t0, cpu_gpr[a->rb], i * 4);
- tcg_gen_andi_tl(t0, t0, 0xf);
- tcg_gen_add_tl(t1, t1, t0);
+ /*
+ * The carry-out that we're looking for is the carry-in to
+ * the next nibble. Shift the double-word down one nibble,
+ * which puts all of the bits back into one word.
+ */
+ tcg_gen_extract2_tl(carryl, carryl, carryh, 4);
- tcg_gen_andi_tl(t1, t1, 0x10);
- tcg_gen_setcond_tl(TCG_COND_NE, t1, t1, zero);
-
- tcg_gen_shli_tl(t0, t1, i * 4);
- tcg_gen_or_tl(carry, carry, t0);
- }
-
- tcg_gen_xori_tl(carry, carry, (target_long)carry_bits);
- tcg_gen_muli_tl(cpu_gpr[a->rt], carry, 6);
+ /* Invert, isolate the carry bits, and produce 6's. */
+ tcg_gen_andc_tl(carryl, tcg_constant_tl(carry_bits), carryl);
+ tcg_gen_muli_tl(cpu_gpr[a->rt], carryl, 6);
return true;
}
diff --git a/target/ppc/translate/fp-impl.c.inc b/target/ppc/translate/fp-impl.c.inc
index d5d88e7..57d8437 100644
--- a/target/ppc/translate/fp-impl.c.inc
+++ b/target/ppc/translate/fp-impl.c.inc
@@ -348,7 +348,7 @@
t0 = tcg_temp_new_i64();
t1 = tcg_temp_new_i64();
gen_reset_fpstatus();
- crf = tcg_const_i32(crfD(ctx->opcode));
+ crf = tcg_constant_i32(crfD(ctx->opcode));
get_fpr(t0, rA(ctx->opcode));
get_fpr(t1, rB(ctx->opcode));
gen_helper_fcmpo(cpu_env, t0, t1, crf);
@@ -368,7 +368,7 @@
t0 = tcg_temp_new_i64();
t1 = tcg_temp_new_i64();
gen_reset_fpstatus();
- crf = tcg_const_i32(crfD(ctx->opcode));
+ crf = tcg_constant_i32(crfD(ctx->opcode));
get_fpr(t0, rA(ctx->opcode));
get_fpr(t1, rB(ctx->opcode));
gen_helper_fcmpu(cpu_env, t0, t1, crf);
@@ -541,7 +541,7 @@
tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr,
~((0xF << shift) & FP_EX_CLEAR_BITS));
/* FEX and VX need to be updated, so don't set fpscr directly */
- tmask = tcg_const_i32(1 << nibble);
+ tmask = tcg_constant_i32(1 << nibble);
gen_helper_store_fpscr(cpu_env, tnew_fpscr, tmask);
}
@@ -681,9 +681,7 @@
crb = 31 - crbD(ctx->opcode);
gen_reset_fpstatus();
if (likely(crb != FPSCR_FEX && crb != FPSCR_VX)) {
- TCGv_i32 t0;
- t0 = tcg_const_i32(crb);
- gen_helper_fpscr_clrbit(cpu_env, t0);
+ gen_helper_fpscr_clrbit(cpu_env, tcg_constant_i32(crb));
}
if (unlikely(Rc(ctx->opcode) != 0)) {
tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
@@ -703,9 +701,7 @@
crb = 31 - crbD(ctx->opcode);
/* XXX: we pretend we can only do IEEE floating-point computations */
if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI)) {
- TCGv_i32 t0;
- t0 = tcg_const_i32(crb);
- gen_helper_fpscr_setbit(cpu_env, t0);
+ gen_helper_fpscr_setbit(cpu_env, tcg_constant_i32(crb));
}
if (unlikely(Rc(ctx->opcode) != 0)) {
tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
@@ -733,10 +729,12 @@
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
return;
}
- if (l) {
- t0 = tcg_const_i32((ctx->insns_flags2 & PPC2_ISA205) ? 0xffff : 0xff);
+ if (!l) {
+ t0 = tcg_constant_i32(flm << (w * 8));
+ } else if (ctx->insns_flags2 & PPC2_ISA205) {
+ t0 = tcg_constant_i32(0xffff);
} else {
- t0 = tcg_const_i32(flm << (w * 8));
+ t0 = tcg_constant_i32(0xff);
}
t1 = tcg_temp_new_i64();
get_fpr(t1, rB(ctx->opcode));
@@ -767,8 +765,8 @@
return;
}
sh = (8 * w) + 7 - bf;
- t0 = tcg_const_i64(((uint64_t)FPIMM(ctx->opcode)) << (4 * sh));
- t1 = tcg_const_i32(1 << sh);
+ t0 = tcg_constant_i64(((uint64_t)FPIMM(ctx->opcode)) << (4 * sh));
+ t1 = tcg_constant_i32(1 << sh);
gen_helper_store_fpscr(cpu_env, t0, t1);
if (unlikely(Rc(ctx->opcode) != 0)) {
tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc
index 05ba9c9..112233b 100644
--- a/target/ppc/translate/vmx-impl.c.inc
+++ b/target/ppc/translate/vmx-impl.c.inc
@@ -171,53 +171,56 @@
gen_helper_mtvscr(cpu_env, val);
}
+static void gen_vx_vmul10(DisasContext *ctx, bool add_cin, bool ret_carry)
+{
+ TCGv_i64 t0;
+ TCGv_i64 t1;
+ TCGv_i64 t2;
+ TCGv_i64 avr;
+ TCGv_i64 ten, z;
+
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ avr = tcg_temp_new_i64();
+ ten = tcg_constant_i64(10);
+ z = tcg_constant_i64(0);
+
+ if (add_cin) {
+ get_avr64(avr, rA(ctx->opcode), false);
+ tcg_gen_mulu2_i64(t0, t1, avr, ten);
+ get_avr64(avr, rB(ctx->opcode), false);
+ tcg_gen_andi_i64(t2, avr, 0xF);
+ tcg_gen_add2_i64(avr, t2, t0, t1, t2, z);
+ set_avr64(rD(ctx->opcode), avr, false);
+ } else {
+ get_avr64(avr, rA(ctx->opcode), false);
+ tcg_gen_mulu2_i64(avr, t2, avr, ten);
+ set_avr64(rD(ctx->opcode), avr, false);
+ }
+
+ if (ret_carry) {
+ get_avr64(avr, rA(ctx->opcode), true);
+ tcg_gen_mulu2_i64(t0, t1, avr, ten);
+ tcg_gen_add2_i64(t0, avr, t0, t1, t2, z);
+ set_avr64(rD(ctx->opcode), avr, false);
+ set_avr64(rD(ctx->opcode), z, true);
+ } else {
+ get_avr64(avr, rA(ctx->opcode), true);
+ tcg_gen_mul_i64(t0, avr, ten);
+ tcg_gen_add_i64(avr, t0, t2);
+ set_avr64(rD(ctx->opcode), avr, true);
+ }
+}
+
#define GEN_VX_VMUL10(name, add_cin, ret_carry) \
-static void glue(gen_, name)(DisasContext *ctx) \
-{ \
- TCGv_i64 t0; \
- TCGv_i64 t1; \
- TCGv_i64 t2; \
- TCGv_i64 avr; \
- TCGv_i64 ten, z; \
- \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- \
- t0 = tcg_temp_new_i64(); \
- t1 = tcg_temp_new_i64(); \
- t2 = tcg_temp_new_i64(); \
- avr = tcg_temp_new_i64(); \
- ten = tcg_const_i64(10); \
- z = tcg_const_i64(0); \
- \
- if (add_cin) { \
- get_avr64(avr, rA(ctx->opcode), false); \
- tcg_gen_mulu2_i64(t0, t1, avr, ten); \
- get_avr64(avr, rB(ctx->opcode), false); \
- tcg_gen_andi_i64(t2, avr, 0xF); \
- tcg_gen_add2_i64(avr, t2, t0, t1, t2, z); \
- set_avr64(rD(ctx->opcode), avr, false); \
- } else { \
- get_avr64(avr, rA(ctx->opcode), false); \
- tcg_gen_mulu2_i64(avr, t2, avr, ten); \
- set_avr64(rD(ctx->opcode), avr, false); \
- } \
- \
- if (ret_carry) { \
- get_avr64(avr, rA(ctx->opcode), true); \
- tcg_gen_mulu2_i64(t0, t1, avr, ten); \
- tcg_gen_add2_i64(t0, avr, t0, t1, t2, z); \
- set_avr64(rD(ctx->opcode), avr, false); \
- set_avr64(rD(ctx->opcode), z, true); \
- } else { \
- get_avr64(avr, rA(ctx->opcode), true); \
- tcg_gen_mul_i64(t0, avr, ten); \
- tcg_gen_add_i64(avr, t0, t2); \
- set_avr64(rD(ctx->opcode), avr, true); \
- } \
-} \
+ static void glue(gen_, name)(DisasContext *ctx) \
+ { gen_vx_vmul10(ctx, add_cin, ret_carry); }
GEN_VX_VMUL10(vmul10uq, 0, 0);
GEN_VX_VMUL10(vmul10euq, 1, 0);
@@ -903,7 +906,6 @@
hi = tcg_temp_new_i64();
lo = tcg_temp_new_i64();
t0 = tcg_temp_new_i64();
- t1 = tcg_const_i64(0);
get_avr64(lo, a->vra, false);
get_avr64(hi, a->vra, true);
@@ -914,7 +916,10 @@
if (right) {
tcg_gen_movcond_i64(TCG_COND_NE, lo, t0, zero, hi, lo);
if (alg) {
+ t1 = tcg_temp_new_i64();
tcg_gen_sari_i64(t1, lo, 63);
+ } else {
+ t1 = zero;
}
tcg_gen_movcond_i64(TCG_COND_NE, hi, t0, zero, t1, hi);
} else {
@@ -1619,7 +1624,7 @@
gen_exception(ctx, POWERPC_EXCP_VPU); \
return; \
} \
- uimm = tcg_const_i32(UIMM5(ctx->opcode)); \
+ uimm = tcg_constant_i32(UIMM5(ctx->opcode)); \
rb = gen_avr_ptr(rB(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
gen_helper_##name(cpu_env, rd, rb, uimm); \
@@ -1960,7 +1965,7 @@
ra = gen_avr_ptr(rA(ctx->opcode));
rb = gen_avr_ptr(rB(ctx->opcode));
rd = gen_avr_ptr(rD(ctx->opcode));
- sh = tcg_const_i32(VSH(ctx->opcode));
+ sh = tcg_constant_i32(VSH(ctx->opcode));
gen_helper_vsldoi(rd, ra, rb, sh);
}
@@ -2231,24 +2236,25 @@
static bool do_vcntmb(DisasContext *ctx, arg_VX_mp *a, int vece)
{
- TCGv_i64 rt, vrb, mask;
- rt = tcg_const_i64(0);
- vrb = tcg_temp_new_i64();
+ TCGv_i64 r[2], mask;
+
+ r[0] = tcg_temp_new_i64();
+ r[1] = tcg_temp_new_i64();
mask = tcg_constant_i64(dup_const(vece, 1ULL << ((8 << vece) - 1)));
for (int i = 0; i < 2; i++) {
- get_avr64(vrb, a->vrb, i);
+ get_avr64(r[i], a->vrb, i);
if (a->mp) {
- tcg_gen_and_i64(vrb, mask, vrb);
+ tcg_gen_and_i64(r[i], mask, r[i]);
} else {
- tcg_gen_andc_i64(vrb, mask, vrb);
+ tcg_gen_andc_i64(r[i], mask, r[i]);
}
- tcg_gen_ctpop_i64(vrb, vrb);
- tcg_gen_add_i64(rt, rt, vrb);
+ tcg_gen_ctpop_i64(r[i], r[i]);
}
- tcg_gen_shli_i64(rt, rt, TARGET_LONG_BITS - 8 + vece);
- tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], rt);
+ tcg_gen_add_i64(r[0], r[0], r[1]);
+ tcg_gen_shli_i64(r[0], r[0], TARGET_LONG_BITS - 8 + vece);
+ tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], r[0]);
return true;
}
@@ -2569,7 +2575,7 @@
rb = gen_avr_ptr(rB(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
\
- ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \
+ ps = tcg_constant_i32((ctx->opcode & 0x200) != 0); \
\
gen_helper_##op(cpu_crf[6], rd, ra, rb, ps); \
}
@@ -2588,7 +2594,7 @@
rb = gen_avr_ptr(rB(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
\
- ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \
+ ps = tcg_constant_i32((ctx->opcode & 0x200) != 0); \
\
gen_helper_##op(cpu_crf[6], rd, rb, ps); \
}
@@ -2720,7 +2726,7 @@
} \
ra = gen_avr_ptr(rA(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
- st_six = tcg_const_i32(rB(ctx->opcode)); \
+ st_six = tcg_constant_i32(rB(ctx->opcode)); \
gen_helper_##op(rd, ra, st_six); \
}
diff --git a/target/ppc/translate/vsx-impl.c.inc b/target/ppc/translate/vsx-impl.c.inc
index 6e63403..0f5b005 100644
--- a/target/ppc/translate/vsx-impl.c.inc
+++ b/target/ppc/translate/vsx-impl.c.inc
@@ -154,7 +154,7 @@
static void gen_bswap16x8(TCGv_i64 outh, TCGv_i64 outl,
TCGv_i64 inh, TCGv_i64 inl)
{
- TCGv_i64 mask = tcg_const_i64(0x00FF00FF00FF00FF);
+ TCGv_i64 mask = tcg_constant_i64(0x00FF00FF00FF00FF);
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
@@ -825,7 +825,7 @@
REQUIRE_INSNS_FLAGS2(ctx, ISA300);
REQUIRE_VSX(ctx);
- ro = tcg_const_i32(a->rc);
+ ro = tcg_constant_i32(a->rc);
xt = gen_avr_ptr(a->rt);
xb = gen_avr_ptr(a->rb);
@@ -860,7 +860,7 @@
gen_exception(ctx, POWERPC_EXCP_VSXU); \
return; \
} \
- opc = tcg_const_i32(ctx->opcode); \
+ opc = tcg_constant_i32(ctx->opcode); \
gen_helper_##name(cpu_env, opc); \
}
@@ -900,7 +900,7 @@
gen_exception(ctx, POWERPC_EXCP_VSXU); \
return; \
} \
- opc = tcg_const_i32(ctx->opcode); \
+ opc = tcg_constant_i32(ctx->opcode); \
xa = gen_vsr_ptr(xA(ctx->opcode)); \
xb = gen_vsr_ptr(xB(ctx->opcode)); \
gen_helper_##name(cpu_env, opc, xa, xb); \
@@ -915,7 +915,7 @@
gen_exception(ctx, POWERPC_EXCP_VSXU); \
return; \
} \
- opc = tcg_const_i32(ctx->opcode); \
+ opc = tcg_constant_i32(ctx->opcode); \
xb = gen_vsr_ptr(xB(ctx->opcode)); \
gen_helper_##name(cpu_env, opc, xb); \
}
@@ -929,7 +929,7 @@
gen_exception(ctx, POWERPC_EXCP_VSXU); \
return; \
} \
- opc = tcg_const_i32(ctx->opcode); \
+ opc = tcg_constant_i32(ctx->opcode); \
xt = gen_vsr_ptr(rD(ctx->opcode) + 32); \
xa = gen_vsr_ptr(rA(ctx->opcode) + 32); \
xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \
@@ -945,7 +945,7 @@
gen_exception(ctx, POWERPC_EXCP_VSXU); \
return; \
} \
- opc = tcg_const_i32(ctx->opcode); \
+ opc = tcg_constant_i32(ctx->opcode); \
xt = gen_vsr_ptr(rD(ctx->opcode) + 32); \
xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \
gen_helper_##name(cpu_env, opc, xt, xb); \
@@ -960,7 +960,7 @@
gen_exception(ctx, POWERPC_EXCP_VSXU); \
return; \
} \
- opc = tcg_const_i32(ctx->opcode); \
+ opc = tcg_constant_i32(ctx->opcode); \
xa = gen_vsr_ptr(rA(ctx->opcode) + 32); \
xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \
gen_helper_##name(cpu_env, opc, xa, xb); \
@@ -1994,8 +1994,8 @@
exp = tcg_temp_new_i64();
t0 = tcg_temp_new_i64();
t1 = tcg_temp_new_i64();
- zr = tcg_const_i64(0);
- nan = tcg_const_i64(2047);
+ zr = tcg_constant_i64(0);
+ nan = tcg_constant_i64(2047);
get_cpu_vsr(t1, xB(ctx->opcode), true);
tcg_gen_extract_i64(exp, t1, 52, 11);
@@ -2026,8 +2026,8 @@
get_cpu_vsr(xbl, rB(ctx->opcode) + 32, false);
exp = tcg_temp_new_i64();
t0 = tcg_temp_new_i64();
- zr = tcg_const_i64(0);
- nan = tcg_const_i64(32767);
+ zr = tcg_constant_i64(0);
+ nan = tcg_constant_i64(32767);
tcg_gen_extract_i64(exp, xbh, 48, 15);
tcg_gen_movi_i64(t0, 0x0001000000000000);
@@ -2193,8 +2193,8 @@
get_cpu_vsr(xbl, xB(ctx->opcode), false);
exp = tcg_temp_new_i64();
t0 = tcg_temp_new_i64();
- zr = tcg_const_i64(0);
- nan = tcg_const_i64(2047);
+ zr = tcg_constant_i64(0);
+ nan = tcg_constant_i64(2047);
tcg_gen_extract_i64(exp, xbh, 52, 11);
tcg_gen_movi_i64(t0, 0x0010000000000000);
@@ -2449,7 +2449,8 @@
TCGv_i64 conj, disj;
conj = tcg_temp_new_i64();
- disj = tcg_const_i64(0);
+ disj = tcg_temp_new_i64();
+ tcg_gen_movi_i64(disj, 0);
/* Iterate over set bits from the least to the most significant bit */
while (imm) {
@@ -2492,8 +2493,9 @@
int bit;
TCGv_vec disj, conj;
- disj = tcg_const_zeros_vec_matching(t);
conj = tcg_temp_new_vec_matching(t);
+ disj = tcg_temp_new_vec_matching(t);
+ tcg_gen_dupi_vec(vece, disj, 0);
/* Iterate over set bits from the least to the most significant bit */
while (imm) {
@@ -2546,7 +2548,7 @@
/* Equivalent functions that can be implemented with a single gen_gvec */
switch (a->imm) {
- case 0b00000000: /* true */
+ case 0b00000000: /* false */
set_cpu_vsr(a->xt, tcg_constant_i64(0), true);
set_cpu_vsr(a->xt, tcg_constant_i64(0), false);
break;
diff --git a/target/riscv/cpu-param.h b/target/riscv/cpu-param.h
index ebaf26d..b2a9396 100644
--- a/target/riscv/cpu-param.h
+++ b/target/riscv/cpu-param.h
@@ -27,6 +27,5 @@
* - S mode HLV/HLVX/HSV 0b101
* - M mode HLV/HLVX/HSV 0b111
*/
-#define NB_MMU_MODES 8
#endif
diff --git a/target/rx/cpu-param.h b/target/rx/cpu-param.h
index b156ad1..521d669 100644
--- a/target/rx/cpu-param.h
+++ b/target/rx/cpu-param.h
@@ -25,6 +25,4 @@
#define TARGET_PHYS_ADDR_SPACE_BITS 32
#define TARGET_VIRT_ADDR_SPACE_BITS 32
-#define NB_MMU_MODES 1
-
#endif
diff --git a/target/rx/translate.c b/target/rx/translate.c
index 6624414..70fad98 100644
--- a/target/rx/translate.c
+++ b/target/rx/translate.c
@@ -456,7 +456,7 @@
static bool trans_MOV_im(DisasContext *ctx, arg_MOV_im *a)
{
TCGv imm, mem;
- imm = tcg_const_i32(a->imm);
+ imm = tcg_constant_i32(a->imm);
mem = tcg_temp_new();
tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz);
rx_gen_st(a->sz, imm, mem);
@@ -729,8 +729,8 @@
{
TCGv z;
TCGv _imm;
- z = tcg_const_i32(0);
- _imm = tcg_const_i32(imm);
+ z = tcg_constant_i32(0);
+ _imm = tcg_constant_i32(imm);
tcg_gen_movcond_i32(cond, cpu_regs[rd], cpu_psw_z, z,
_imm, cpu_regs[rd]);
}
@@ -815,7 +815,7 @@
static inline void rx_gen_op_irr(op3fn opr, int dst, int src, uint32_t src2)
{
- TCGv imm = tcg_const_i32(src2);
+ TCGv imm = tcg_constant_i32(src2);
opr(cpu_regs[dst], cpu_regs[src], imm);
}
@@ -967,14 +967,13 @@
/* ret = arg1 + arg2 + psw_c */
static void rx_adc(TCGv ret, TCGv arg1, TCGv arg2)
{
- TCGv z;
- z = tcg_const_i32(0);
+ TCGv z = tcg_constant_i32(0);
tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, cpu_psw_c, z);
tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, cpu_psw_s, cpu_psw_c, arg2, z);
- tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
- tcg_gen_xor_i32(z, arg1, arg2);
- tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, z);
+ tcg_gen_xor_i32(cpu_psw_z, arg1, arg2);
+ tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, cpu_psw_z);
+ tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
tcg_gen_mov_i32(ret, cpu_psw_s);
}
@@ -1006,13 +1005,12 @@
/* ret = arg1 + arg2 */
static void rx_add(TCGv ret, TCGv arg1, TCGv arg2)
{
- TCGv z;
- z = tcg_const_i32(0);
+ TCGv z = tcg_constant_i32(0);
tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, arg2, z);
- tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
- tcg_gen_xor_i32(z, arg1, arg2);
- tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, z);
+ tcg_gen_xor_i32(cpu_psw_z, arg1, arg2);
+ tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, cpu_psw_z);
+ tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
tcg_gen_mov_i32(ret, cpu_psw_s);
}
@@ -1042,23 +1040,23 @@
/* ret = arg1 - arg2 */
static void rx_sub(TCGv ret, TCGv arg1, TCGv arg2)
{
- TCGv temp;
tcg_gen_sub_i32(cpu_psw_s, arg1, arg2);
- tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
tcg_gen_setcond_i32(TCG_COND_GEU, cpu_psw_c, arg1, arg2);
tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
- temp = tcg_temp_new_i32();
- tcg_gen_xor_i32(temp, arg1, arg2);
- tcg_gen_and_i32(cpu_psw_o, cpu_psw_o, temp);
+ tcg_gen_xor_i32(cpu_psw_z, arg1, arg2);
+ tcg_gen_and_i32(cpu_psw_o, cpu_psw_o, cpu_psw_z);
+ tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
/* CMP not required return */
if (ret) {
tcg_gen_mov_i32(ret, cpu_psw_s);
}
}
+
static void rx_cmp(TCGv dummy, TCGv arg1, TCGv arg2)
{
rx_sub(NULL, arg1, arg2);
}
+
/* ret = arg1 - arg2 - !psw_c */
/* -> ret = arg1 + ~arg2 + psw_c */
static void rx_sbb(TCGv ret, TCGv arg1, TCGv arg2)
@@ -1126,21 +1124,11 @@
return true;
}
-static void rx_abs(TCGv ret, TCGv arg1)
-{
- TCGv neg;
- TCGv zero;
- neg = tcg_temp_new();
- zero = tcg_const_i32(0);
- tcg_gen_neg_i32(neg, arg1);
- tcg_gen_movcond_i32(TCG_COND_LT, ret, arg1, zero, neg, arg1);
-}
-
/* abs rd */
/* abs rs, rd */
static bool trans_ABS_rr(DisasContext *ctx, arg_ABS_rr *a)
{
- rx_gen_op_rr(rx_abs, a->rd, a->rs);
+ rx_gen_op_rr(tcg_gen_abs_i32, a->rd, a->rs);
return true;
}
@@ -1200,7 +1188,7 @@
/* emul #imm, rd */
static bool trans_EMUL_ir(DisasContext *ctx, arg_EMUL_ir *a)
{
- TCGv imm = tcg_const_i32(a->imm);
+ TCGv imm = tcg_constant_i32(a->imm);
if (a->rd > 14) {
qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
}
@@ -1227,7 +1215,7 @@
/* emulu #imm, rd */
static bool trans_EMULU_ir(DisasContext *ctx, arg_EMULU_ir *a)
{
- TCGv imm = tcg_const_i32(a->imm);
+ TCGv imm = tcg_constant_i32(a->imm);
if (a->rd > 14) {
qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
}
@@ -1325,10 +1313,10 @@
done = gen_new_label();
/* if (cpu_regs[a->rs]) { */
tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[a->rs], 0, noshift);
- count = tcg_const_i32(32);
+ count = tcg_temp_new();
tmp = tcg_temp_new();
tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 31);
- tcg_gen_sub_i32(count, count, tmp);
+ tcg_gen_sub_i32(count, tcg_constant_i32(32), tmp);
tcg_gen_sar_i32(cpu_psw_c, cpu_regs[a->rd], count);
tcg_gen_shl_i32(cpu_regs[a->rd], cpu_regs[a->rd], tmp);
tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, cpu_psw_c, 0);
@@ -1597,7 +1585,7 @@
static inline void rx_save_pc(DisasContext *ctx)
{
- TCGv pc = tcg_const_i32(ctx->base.pc_next);
+ TCGv pc = tcg_constant_i32(ctx->base.pc_next);
push(pc);
}
@@ -1680,7 +1668,7 @@
#define STRING(op) \
do { \
- TCGv size = tcg_const_i32(a->sz); \
+ TCGv size = tcg_constant_i32(a->sz); \
gen_helper_##op(cpu_env, size); \
} while (0)
@@ -1811,7 +1799,7 @@
/* racw #imm */
static bool trans_RACW(DisasContext *ctx, arg_RACW *a)
{
- TCGv imm = tcg_const_i32(a->imm + 1);
+ TCGv imm = tcg_constant_i32(a->imm + 1);
gen_helper_racw(cpu_env, imm);
return true;
}
@@ -1821,7 +1809,7 @@
{
TCGv tmp, z;
tmp = tcg_temp_new();
- z = tcg_const_i32(0);
+ z = tcg_constant_i32(0);
/* S == 1 -> 0xffffffff / S == 0 -> 0x00000000 */
tcg_gen_sari_i32(tmp, cpu_psw_s, 31);
/* S == 1 -> 0x7fffffff / S == 0 -> 0x80000000 */
@@ -1843,7 +1831,7 @@
static bool cat3(trans_, name, _ir)(DisasContext *ctx, \
cat3(arg_, name, _ir) * a) \
{ \
- TCGv imm = tcg_const_i32(li(ctx, 0)); \
+ TCGv imm = tcg_constant_i32(li(ctx, 0)); \
gen_helper_##op(cpu_regs[a->rd], cpu_env, \
cpu_regs[a->rd], imm); \
return true; \
@@ -1877,7 +1865,7 @@
/* fcmp #imm, rd */
static bool trans_FCMP_ir(DisasContext *ctx, arg_FCMP_ir * a)
{
- TCGv imm = tcg_const_i32(li(ctx, 0));
+ TCGv imm = tcg_constant_i32(li(ctx, 0));
gen_helper_fcmp(cpu_env, cpu_regs[a->rd], imm);
return true;
}
@@ -1974,7 +1962,7 @@
{ \
TCGv mask, mem, addr; \
mem = tcg_temp_new(); \
- mask = tcg_const_i32(1 << a->imm); \
+ mask = tcg_constant_i32(1 << a->imm); \
addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
cat3(rx_, op, m)(addr, mask); \
return true; \
@@ -1983,7 +1971,7 @@
cat3(arg_, name, _ir) * a) \
{ \
TCGv mask; \
- mask = tcg_const_i32(1 << a->imm); \
+ mask = tcg_constant_i32(1 << a->imm); \
cat3(rx_, op, r)(cpu_regs[a->rd], mask); \
return true; \
} \
@@ -1991,10 +1979,10 @@
cat3(arg_, name, _rr) * a) \
{ \
TCGv mask, b; \
- mask = tcg_const_i32(1); \
+ mask = tcg_temp_new(); \
b = tcg_temp_new(); \
tcg_gen_andi_i32(b, cpu_regs[a->rs], 31); \
- tcg_gen_shl_i32(mask, mask, b); \
+ tcg_gen_shl_i32(mask, tcg_constant_i32(1), b); \
cat3(rx_, op, r)(cpu_regs[a->rd], mask); \
return true; \
} \
@@ -2002,10 +1990,10 @@
cat3(arg_, name, _rm) * a) \
{ \
TCGv mask, mem, addr, b; \
- mask = tcg_const_i32(1); \
+ mask = tcg_temp_new(); \
b = tcg_temp_new(); \
tcg_gen_andi_i32(b, cpu_regs[a->rd], 7); \
- tcg_gen_shl_i32(mask, mask, b); \
+ tcg_gen_shl_i32(mask, tcg_constant_i32(1), b); \
mem = tcg_temp_new(); \
addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
cat3(rx_, op, m)(addr, mask); \
@@ -2128,7 +2116,7 @@
{
TCGv imm;
- imm = tcg_const_i32(a->imm);
+ imm = tcg_constant_i32(a->imm);
move_to_cr(ctx, imm, a->cr);
return true;
}
@@ -2190,7 +2178,7 @@
TCGv vec;
tcg_debug_assert(a->imm < 0x100);
- vec = tcg_const_i32(a->imm);
+ vec = tcg_constant_i32(a->imm);
tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
gen_helper_rxint(cpu_env, vec);
ctx->base.is_jmp = DISAS_NORETURN;
diff --git a/target/s390x/cpu-param.h b/target/s390x/cpu-param.h
index bf951a0..84ca086 100644
--- a/target/s390x/cpu-param.h
+++ b/target/s390x/cpu-param.h
@@ -12,6 +12,5 @@
#define TARGET_PAGE_BITS 12
#define TARGET_PHYS_ADDR_SPACE_BITS 64
#define TARGET_VIRT_ADDR_SPACE_BITS 64
-#define NB_MMU_MODES 4
#endif
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
index d324c0b..14c3896 100644
--- a/target/s390x/tcg/translate.c
+++ b/target/s390x/tcg/translate.c
@@ -156,8 +156,6 @@
typedef struct {
TCGCond cond:8;
bool is_64;
- bool g1;
- bool g2;
union {
struct { TCGv_i64 a, b; } s64;
struct { TCGv_i32 a, b; } s32;
@@ -308,8 +306,6 @@
TCGv_i128 r = tcg_temp_new_i128();
tcg_gen_concat_i64_i128(r, l, h);
- tcg_temp_free_i64(h);
- tcg_temp_free_i64(l);
return r;
}
@@ -722,7 +718,6 @@
c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
c->u.s32.a = cc_op;
c->u.s32.b = cc_op;
- c->g1 = c->g2 = true;
c->is_64 = false;
return;
}
@@ -839,7 +834,6 @@
/* Load up the arguments of the comparison. */
c->is_64 = true;
- c->g1 = c->g2 = false;
switch (old_cc_op) {
case CC_OP_LTGT0_32:
c->is_64 = false;
@@ -861,13 +855,11 @@
case CC_OP_FLOGR:
c->u.s64.a = cc_dst;
c->u.s64.b = tcg_constant_i64(0);
- c->g1 = true;
break;
case CC_OP_LTGT_64:
case CC_OP_LTUGTU_64:
c->u.s64.a = cc_src;
c->u.s64.b = cc_dst;
- c->g1 = c->g2 = true;
break;
case CC_OP_TM_32:
@@ -882,7 +874,6 @@
case CC_OP_SUBU:
c->is_64 = true;
c->u.s64.b = tcg_constant_i64(0);
- c->g1 = true;
switch (mask) {
case 8 | 2:
case 4 | 1: /* result */
@@ -900,7 +891,6 @@
case CC_OP_STATIC:
c->is_64 = false;
c->u.s32.a = cc_op;
- c->g1 = true;
switch (mask) {
case 0x8 | 0x4 | 0x2: /* cc != 3 */
cond = TCG_COND_NE;
@@ -916,7 +906,6 @@
break;
case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
cond = TCG_COND_EQ;
- c->g1 = false;
c->u.s32.a = tcg_temp_new_i32();
c->u.s32.b = tcg_constant_i32(0);
tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
@@ -935,7 +924,6 @@
break;
case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
cond = TCG_COND_NE;
- c->g1 = false;
c->u.s32.a = tcg_temp_new_i32();
c->u.s32.b = tcg_constant_i32(0);
tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
@@ -959,7 +947,6 @@
default:
/* CC is masked by something else: (8 >> cc) & mask. */
cond = TCG_COND_NE;
- c->g1 = false;
c->u.s32.a = tcg_temp_new_i32();
c->u.s32.b = tcg_constant_i32(0);
tcg_gen_shr_i32(c->u.s32.a, tcg_constant_i32(8), cc_op);
@@ -974,24 +961,6 @@
c->cond = cond;
}
-static void free_compare(DisasCompare *c)
-{
- if (!c->g1) {
- if (c->is_64) {
- tcg_temp_free_i64(c->u.s64.a);
- } else {
- tcg_temp_free_i32(c->u.s32.a);
- }
- }
- if (!c->g2) {
- if (c->is_64) {
- tcg_temp_free_i64(c->u.s64.b);
- } else {
- tcg_temp_free_i32(c->u.s32.b);
- }
- }
-}
-
/* ====================================================================== */
/* Define the insn format enumeration. */
#define F0(N) FMT_##N,
@@ -1092,7 +1061,6 @@
them, and store them back. See the "in1", "in2", "prep", "wout" sets
of routines below for more details. */
typedef struct {
- bool g_out, g_out2, g_in1, g_in2;
TCGv_i64 out, out2, in1, in2;
TCGv_i64 addr1;
TCGv_i128 out_128, in1_128, in2_128;
@@ -1292,17 +1260,14 @@
TCGv_i64 z = tcg_constant_i64(0);
tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
tcg_gen_extu_i32_i64(t1, t0);
- tcg_temp_free_i32(t0);
tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
per_branch_cond(s, TCG_COND_NE, t1, z);
- tcg_temp_free_i64(t1);
}
ret = DISAS_PC_UPDATED;
}
egress:
- free_compare(c);
return ret;
}
@@ -1462,11 +1427,11 @@
int shift = s->insn->data & 0xff;
int size = s->insn->data >> 8;
uint64_t mask = ((1ull << size) - 1) << shift;
+ TCGv_i64 t = tcg_temp_new_i64();
- assert(!o->g_in2);
- tcg_gen_shli_i64(o->in2, o->in2, shift);
- tcg_gen_ori_i64(o->in2, o->in2, ~mask);
- tcg_gen_and_i64(o->out, o->in1, o->in2);
+ tcg_gen_shli_i64(t, o->in2, shift);
+ tcg_gen_ori_i64(t, t, ~mask);
+ tcg_gen_and_i64(o->out, o->in1, t);
/* Produce the CC from only the bits manipulated. */
tcg_gen_andi_i64(cc_dst, o->out, mask);
@@ -1555,7 +1520,6 @@
tcg_gen_extu_i32_i64(t, cc_op);
tcg_gen_shli_i64(t, t, 28);
tcg_gen_or_i64(o->out, o->out, t);
- tcg_temp_free_i64(t);
}
static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
@@ -1612,8 +1576,6 @@
c.cond = TCG_COND_NE;
c.is_64 = false;
- c.g1 = false;
- c.g2 = false;
t = tcg_temp_new_i64();
tcg_gen_subi_i64(t, regs[r1], 1);
@@ -1621,7 +1583,6 @@
c.u.s32.a = tcg_temp_new_i32();
c.u.s32.b = tcg_constant_i32(0);
tcg_gen_extrl_i64_i32(c.u.s32.a, t);
- tcg_temp_free_i64(t);
return help_branch(s, &c, is_imm, imm, o->in2);
}
@@ -1635,8 +1596,6 @@
c.cond = TCG_COND_NE;
c.is_64 = false;
- c.g1 = false;
- c.g2 = false;
t = tcg_temp_new_i64();
tcg_gen_shri_i64(t, regs[r1], 32);
@@ -1645,7 +1604,6 @@
c.u.s32.a = tcg_temp_new_i32();
c.u.s32.b = tcg_constant_i32(0);
tcg_gen_extrl_i64_i32(c.u.s32.a, t);
- tcg_temp_free_i64(t);
return help_branch(s, &c, 1, imm, o->in2);
}
@@ -1659,8 +1617,6 @@
c.cond = TCG_COND_NE;
c.is_64 = true;
- c.g1 = true;
- c.g2 = false;
tcg_gen_subi_i64(regs[r1], regs[r1], 1);
c.u.s64.a = regs[r1];
@@ -1680,8 +1636,6 @@
c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
c.is_64 = false;
- c.g1 = false;
- c.g2 = false;
t = tcg_temp_new_i64();
tcg_gen_add_i64(t, regs[r1], regs[r3]);
@@ -1690,7 +1644,6 @@
tcg_gen_extrl_i64_i32(c.u.s32.a, t);
tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
store_reg32_i64(r1, t);
- tcg_temp_free_i64(t);
return help_branch(s, &c, is_imm, imm, o->in2);
}
@@ -1708,15 +1661,12 @@
if (r1 == (r3 | 1)) {
c.u.s64.b = load_reg(r3 | 1);
- c.g2 = false;
} else {
c.u.s64.b = regs[r3 | 1];
- c.g2 = true;
}
tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
c.u.s64.a = regs[r1];
- c.g1 = true;
return help_branch(s, &c, is_imm, imm, o->in2);
}
@@ -1731,7 +1681,7 @@
if (s->insn->data) {
c.cond = tcg_unsigned_cond(c.cond);
}
- c.is_64 = c.g1 = c.g2 = true;
+ c.is_64 = true;
c.u.s64.a = o->in1;
c.u.s64.b = o->in2;
@@ -2012,11 +1962,9 @@
gen_helper_cksm(pair, cpu_env, o->in1, o->in2, regs[r2 + 1]);
set_cc_static(s);
tcg_gen_extr_i128_i64(o->out, len, pair);
- tcg_temp_free_i128(pair);
tcg_gen_add_i64(regs[r2], regs[r2], len);
tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
- tcg_temp_free_i64(len);
return DISAS_NEXT;
}
@@ -2118,7 +2066,6 @@
tcg_gen_extrl_i64_i32(t1, o->in1);
gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
set_cc_static(s);
- tcg_temp_free_i32(t1);
return DISAS_NEXT;
}
@@ -2128,7 +2075,6 @@
gen_helper_clst(pair, cpu_env, regs[0], o->in1, o->in2);
tcg_gen_extr_i128_i64(o->in2, o->in1, pair);
- tcg_temp_free_i128(pair);
set_cc_static(s);
return DISAS_NEXT;
@@ -2140,7 +2086,6 @@
tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
tcg_gen_or_i64(o->out, o->out, t);
- tcg_temp_free_i64(t);
return DISAS_NEXT;
}
@@ -2156,14 +2101,12 @@
addr = get_address(s, 0, b2, d2);
tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
get_mem_index(s), s->insn->data | MO_ALIGN);
- tcg_temp_free_i64(addr);
/* Are the memory and expected values (un)equal? Note that this setcond
produces the output CC value, thus the NE sense of the test. */
cc = tcg_temp_new_i64();
tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
tcg_gen_extrl_i64_i32(cc_op, cc);
- tcg_temp_free_i64(cc);
set_cc_static(s);
return DISAS_NEXT;
@@ -2223,7 +2166,6 @@
tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
get_mem_index(s), mop | MO_ALIGN);
- tcg_temp_free_i64(addr);
/* Are the memory and expected values (un)equal? */
cc = tcg_temp_new_i64();
@@ -2237,14 +2179,12 @@
} else {
tcg_gen_mov_i64(o->out, old);
}
- tcg_temp_free_i64(old);
/* If the comparison was equal, and the LSB of R2 was set,
then we need to flush the TLB (for all cpus). */
tcg_gen_xori_i64(cc, cc, 1);
tcg_gen_and_i64(cc, cc, o->in2);
tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
- tcg_temp_free_i64(cc);
gen_helper_purge(cpu_env);
gen_set_label(lab);
@@ -2259,9 +2199,7 @@
TCGv_i32 t2 = tcg_temp_new_i32();
tcg_gen_extrl_i64_i32(t2, o->in1);
gen_helper_cvd(t1, t2);
- tcg_temp_free_i32(t2);
tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
- tcg_temp_free_i64(t1);
return DISAS_NEXT;
}
@@ -2363,7 +2301,6 @@
gen_helper_divs64(t, cpu_env, o->in1, o->in2);
tcg_gen_extr_i128_i64(o->out2, o->out, t);
- tcg_temp_free_i128(t);
return DISAS_NEXT;
}
@@ -2373,7 +2310,6 @@
gen_helper_divu64(t, cpu_env, o->out, o->out2, o->in2);
tcg_gen_extr_i128_i64(o->out2, o->out, t);
- tcg_temp_free_i128(t);
return DISAS_NEXT;
}
@@ -2428,8 +2364,6 @@
if (r2 != 0) {
store_reg32_i64(r2, psw_mask);
}
-
- tcg_temp_free_i64(t);
return DISAS_NEXT;
}
@@ -2569,7 +2503,6 @@
tcg_gen_movi_i64(tmp, ccm);
gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -2592,8 +2525,6 @@
tcg_gen_extu_i32_i64(t2, cc_op);
tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t2);
return DISAS_NEXT;
}
@@ -2925,21 +2856,17 @@
if (c.is_64) {
tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
o->in2, o->in1);
- free_compare(&c);
} else {
TCGv_i32 t32 = tcg_temp_new_i32();
TCGv_i64 t, z;
tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
- free_compare(&c);
t = tcg_temp_new_i64();
tcg_gen_extu_i32_i64(t, t32);
- tcg_temp_free_i32(t32);
z = tcg_constant_i64(0);
tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
- tcg_temp_free_i64(t);
}
return DISAS_NEXT;
@@ -2996,8 +2923,6 @@
/* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
tcg_gen_shli_i64(t1, t1, 32);
gen_helper_load_psw(cpu_env, t1, t2);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t2);
return DISAS_NORETURN;
}
@@ -3014,8 +2939,6 @@
tcg_gen_addi_i64(o->in2, o->in2, 8);
tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
gen_helper_load_psw(cpu_env, t1, t2);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t2);
return DISAS_NORETURN;
}
#endif
@@ -3040,7 +2963,6 @@
if (unlikely(r1 == r3)) {
tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
store_reg32_i64(r1, t1);
- tcg_temp_free(t1);
return DISAS_NEXT;
}
@@ -3055,8 +2977,6 @@
/* Only two registers to read. */
if (((r1 + 1) & 15) == r3) {
- tcg_temp_free(t2);
- tcg_temp_free(t1);
return DISAS_NEXT;
}
@@ -3069,9 +2989,6 @@
tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
store_reg32_i64(r1, t1);
}
- tcg_temp_free(t2);
- tcg_temp_free(t1);
-
return DISAS_NEXT;
}
@@ -3086,7 +3003,6 @@
if (unlikely(r1 == r3)) {
tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
store_reg32h_i64(r1, t1);
- tcg_temp_free(t1);
return DISAS_NEXT;
}
@@ -3101,8 +3017,6 @@
/* Only two registers to read. */
if (((r1 + 1) & 15) == r3) {
- tcg_temp_free(t2);
- tcg_temp_free(t1);
return DISAS_NEXT;
}
@@ -3115,9 +3029,6 @@
tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
store_reg32h_i64(r1, t1);
}
- tcg_temp_free(t2);
- tcg_temp_free(t1);
-
return DISAS_NEXT;
}
@@ -3141,11 +3052,9 @@
tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
tcg_gen_mov_i64(regs[r1], t1);
- tcg_temp_free(t2);
/* Only two registers to read. */
if (((r1 + 1) & 15) == r3) {
- tcg_temp_free(t1);
return DISAS_NEXT;
}
@@ -3157,8 +3066,6 @@
tcg_gen_add_i64(o->in2, o->in2, t1);
tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
}
- tcg_temp_free(t1);
-
return DISAS_NEXT;
}
@@ -3180,8 +3087,6 @@
a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
- tcg_temp_free_i64(a1);
- tcg_temp_free_i64(a2);
/* ... and indicate that we performed them while interlocked. */
gen_op_movi_cc(s, 0);
@@ -3253,9 +3158,7 @@
static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
{
o->out = o->in2;
- o->g_out = o->g_in2;
o->in2 = NULL;
- o->g_in2 = false;
return DISAS_NEXT;
}
@@ -3265,9 +3168,7 @@
TCGv ar1 = tcg_temp_new_i64();
o->out = o->in2;
- o->g_out = o->g_in2;
o->in2 = NULL;
- o->g_in2 = false;
switch (s->base.tb->flags & FLAG_MASK_ASC) {
case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
@@ -3289,8 +3190,6 @@
}
tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
- tcg_temp_free_i64(ar1);
-
return DISAS_NEXT;
}
@@ -3298,11 +3197,8 @@
{
o->out = o->in1;
o->out2 = o->in2;
- o->g_out = o->g_in1;
- o->g_out2 = o->g_in2;
o->in1 = NULL;
o->in2 = NULL;
- o->g_in1 = o->g_in2 = false;
return DISAS_NEXT;
}
@@ -3509,7 +3405,6 @@
{
TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
- tcg_temp_free_i64(r3);
return DISAS_NEXT;
}
@@ -3517,7 +3412,6 @@
{
TCGv_i64 r3 = load_freg(get_field(s, r3));
gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
- tcg_temp_free_i64(r3);
return DISAS_NEXT;
}
@@ -3525,7 +3419,6 @@
{
TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
- tcg_temp_free_i64(r3);
return DISAS_NEXT;
}
@@ -3533,7 +3426,6 @@
{
TCGv_i64 r3 = load_freg(get_field(s, r3));
gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
- tcg_temp_free_i64(r3);
return DISAS_NEXT;
}
@@ -3544,7 +3436,6 @@
tcg_gen_neg_i64(n, o->in2);
tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
- tcg_temp_free_i64(n);
return DISAS_NEXT;
}
@@ -3621,10 +3512,10 @@
int shift = s->insn->data & 0xff;
int size = s->insn->data >> 8;
uint64_t mask = ((1ull << size) - 1) << shift;
+ TCGv_i64 t = tcg_temp_new_i64();
- assert(!o->g_in2);
- tcg_gen_shli_i64(o->in2, o->in2, shift);
- tcg_gen_or_i64(o->out, o->in1, o->in2);
+ tcg_gen_shli_i64(t, o->in2, shift);
+ tcg_gen_or_i64(o->out, o->in1, t);
/* Produce the CC from only the bits manipulated. */
tcg_gen_andi_i64(cc_dst, o->out, mask);
@@ -3809,7 +3700,6 @@
/* If this is a test-only form, arrange to discard the result. */
if (i3 & 0x80) {
o->out = tcg_temp_new_i64();
- o->g_out = false;
}
i3 &= 63;
@@ -3879,9 +3769,6 @@
tcg_gen_extrl_i64_i32(t2, o->in2);
tcg_gen_rotl_i32(to, t1, t2);
tcg_gen_extu_i32_i64(o->out, to);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(to);
return DISAS_NEXT;
}
@@ -4022,7 +3909,6 @@
} else {
tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
}
- free_compare(&c);
r1 = get_field(s, r1);
a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
@@ -4037,12 +3923,10 @@
h = tcg_temp_new_i64();
tcg_gen_shri_i64(h, regs[r1], 32);
tcg_gen_qemu_st32(h, a, get_mem_index(s));
- tcg_temp_free_i64(h);
break;
default:
g_assert_not_reached();
}
- tcg_temp_free_i64(a);
gen_set_label(lab);
return DISAS_NEXT;
@@ -4059,9 +3943,6 @@
t = o->in1;
}
gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
- if (s->insn->data == 31) {
- tcg_temp_free_i64(t);
- }
tcg_gen_shl_i64(o->out, o->in1, o->in2);
/* The arithmetic left shift is curious in that it does not affect
the sign bit. Copy that over from the source unchanged. */
@@ -4128,8 +4009,6 @@
tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
-
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -4170,8 +4049,6 @@
/* store second operand in GR1 */
tcg_gen_mov_i64(regs[1], o->in2);
-
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -4231,9 +4108,6 @@
tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
tcg_gen_addi_i64(o->in2, o->in2, 8);
tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
- tcg_temp_free_i64(c1);
- tcg_temp_free_i64(c2);
- tcg_temp_free_i64(todpr);
/* ??? We don't implement clock states. */
gen_op_movi_cc(s, 0);
return DISAS_NEXT;
@@ -4447,7 +4321,6 @@
t = tcg_temp_new_i64();
tcg_gen_shri_i64(t, psw_mask, 56);
tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
- tcg_temp_free_i64(t);
if (s->fields.op == 0xac) {
tcg_gen_andi_i64(psw_mask, psw_mask,
@@ -4558,7 +4431,6 @@
}
break;
}
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -4602,8 +4474,6 @@
tcg_gen_add_i64(o->in2, o->in2, t4);
r1 = (r1 + 1) & 15;
}
-
- tcg_temp_free_i64(t);
return DISAS_NEXT;
}
@@ -4790,7 +4660,6 @@
gen_helper_tre(pair, cpu_env, o->out, o->out2, o->in2);
tcg_gen_extr_i128_i64(o->out2, o->out, pair);
- tcg_temp_free_i128(pair);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -4836,7 +4705,6 @@
}
gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
- tcg_temp_free_i32(tst);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -4955,10 +4823,10 @@
int shift = s->insn->data & 0xff;
int size = s->insn->data >> 8;
uint64_t mask = ((1ull << size) - 1) << shift;
+ TCGv_i64 t = tcg_temp_new_i64();
- assert(!o->g_in2);
- tcg_gen_shli_i64(o->in2, o->in2, shift);
- tcg_gen_xor_i64(o->out, o->in1, o->in2);
+ tcg_gen_shli_i64(t, o->in2, shift);
+ tcg_gen_xor_i64(o->out, o->in1, t);
/* Produce the CC from only the bits manipulated. */
tcg_gen_andi_i64(cc_dst, o->out, mask);
@@ -4989,15 +4857,14 @@
static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
{
- o->out = tcg_const_i64(0);
+ o->out = tcg_constant_i64(0);
return DISAS_NEXT;
}
static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
{
- o->out = tcg_const_i64(0);
+ o->out = tcg_constant_i64(0);
o->out2 = o->out;
- o->g_out2 = true;
return DISAS_NEXT;
}
@@ -5265,7 +5132,6 @@
static void prep_r1(DisasContext *s, DisasOps *o)
{
o->out = regs[get_field(s, r1)];
- o->g_out = true;
}
#define SPEC_prep_r1 0
@@ -5274,7 +5140,6 @@
int r1 = get_field(s, r1);
o->out = regs[r1];
o->out2 = regs[r1 + 1];
- o->g_out = o->g_out2 = true;
}
#define SPEC_prep_r1_P SPEC_r1_even
@@ -5343,7 +5208,6 @@
store_reg32_i64(r1 + 1, o->out);
tcg_gen_shri_i64(t, o->out, 32);
store_reg32_i64(r1, t);
- tcg_temp_free_i64(t);
}
#define SPEC_wout_r1_D32 SPEC_r1_even
@@ -5499,7 +5363,6 @@
static void in1_r1_o(DisasContext *s, DisasOps *o)
{
o->in1 = regs[get_field(s, r1)];
- o->g_in1 = true;
}
#define SPEC_in1_r1_o 0
@@ -5533,7 +5396,6 @@
static void in1_r1p1_o(DisasContext *s, DisasOps *o)
{
o->in1 = regs[get_field(s, r1) + 1];
- o->g_in1 = true;
}
#define SPEC_in1_r1p1_o SPEC_r1_even
@@ -5588,7 +5450,6 @@
static void in1_r3_o(DisasContext *s, DisasOps *o)
{
o->in1 = regs[get_field(s, r3)];
- o->g_in1 = true;
}
#define SPEC_in1_r3_o 0
@@ -5719,7 +5580,6 @@
static void in2_r1_o(DisasContext *s, DisasOps *o)
{
o->in2 = regs[get_field(s, r1)];
- o->g_in2 = true;
}
#define SPEC_in2_r1_o 0
@@ -5754,7 +5614,6 @@
static void in2_r2_o(DisasContext *s, DisasOps *o)
{
o->in2 = regs[get_field(s, r2)];
- o->g_in2 = true;
}
#define SPEC_in2_r2_o 0
@@ -5903,7 +5762,7 @@
int d2 = get_field(s, d2);
if (b2 == 0) {
- o->in2 = tcg_const_i64(d2 & 0x3f);
+ o->in2 = tcg_constant_i64(d2 & 0x3f);
} else {
o->in2 = get_address(s, 0, b2, d2);
tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
@@ -6016,46 +5875,46 @@
static void in2_i2(DisasContext *s, DisasOps *o)
{
- o->in2 = tcg_const_i64(get_field(s, i2));
+ o->in2 = tcg_constant_i64(get_field(s, i2));
}
#define SPEC_in2_i2 0
static void in2_i2_8u(DisasContext *s, DisasOps *o)
{
- o->in2 = tcg_const_i64((uint8_t)get_field(s, i2));
+ o->in2 = tcg_constant_i64((uint8_t)get_field(s, i2));
}
#define SPEC_in2_i2_8u 0
static void in2_i2_16u(DisasContext *s, DisasOps *o)
{
- o->in2 = tcg_const_i64((uint16_t)get_field(s, i2));
+ o->in2 = tcg_constant_i64((uint16_t)get_field(s, i2));
}
#define SPEC_in2_i2_16u 0
static void in2_i2_32u(DisasContext *s, DisasOps *o)
{
- o->in2 = tcg_const_i64((uint32_t)get_field(s, i2));
+ o->in2 = tcg_constant_i64((uint32_t)get_field(s, i2));
}
#define SPEC_in2_i2_32u 0
static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
{
uint64_t i2 = (uint16_t)get_field(s, i2);
- o->in2 = tcg_const_i64(i2 << s->insn->data);
+ o->in2 = tcg_constant_i64(i2 << s->insn->data);
}
#define SPEC_in2_i2_16u_shl 0
static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
{
uint64_t i2 = (uint32_t)get_field(s, i2);
- o->in2 = tcg_const_i64(i2 << s->insn->data);
+ o->in2 = tcg_constant_i64(i2 << s->insn->data);
}
#define SPEC_in2_i2_32u_shl 0
#ifndef CONFIG_USER_ONLY
static void in2_insn(DisasContext *s, DisasOps *o)
{
- o->in2 = tcg_const_i64(s->fields.raw_insn);
+ o->in2 = tcg_constant_i64(s->fields.raw_insn);
}
#define SPEC_in2_insn 0
#endif
@@ -6481,31 +6340,6 @@
}
}
- /* Free any temporaries created by the helpers. */
- if (o.out && !o.g_out) {
- tcg_temp_free_i64(o.out);
- }
- if (o.out2 && !o.g_out2) {
- tcg_temp_free_i64(o.out2);
- }
- if (o.in1 && !o.g_in1) {
- tcg_temp_free_i64(o.in1);
- }
- if (o.in2 && !o.g_in2) {
- tcg_temp_free_i64(o.in2);
- }
- if (o.addr1) {
- tcg_temp_free_i64(o.addr1);
- }
- if (o.out_128) {
- tcg_temp_free_i128(o.out_128);
- }
- if (o.in1_128) {
- tcg_temp_free_i128(o.in1_128);
- }
- if (o.in2_128) {
- tcg_temp_free_i128(o.in2_128);
- }
/* io should be the last instruction in tb when icount is enabled */
if (unlikely(icount && ret == DISAS_NEXT)) {
ret = DISAS_TOO_MANY;
diff --git a/target/s390x/tcg/translate_vx.c.inc b/target/s390x/tcg/translate_vx.c.inc
index 3fadc82..43dfbfd 100644
--- a/target/s390x/tcg/translate_vx.c.inc
+++ b/target/s390x/tcg/translate_vx.c.inc
@@ -183,8 +183,6 @@
/* generate the final ptr by adding cpu_env */
tcg_gen_trunc_i64_ptr(ptr, tmp);
tcg_gen_add_ptr(ptr, ptr, cpu_env);
-
- tcg_temp_free_i64(tmp);
}
#define gen_gvec_2(v1, v2, gen) \
@@ -272,13 +270,6 @@
fn(dl, dh, al, ah, bl, bh);
write_vec_element_i64(dh, d, 0, ES_64);
write_vec_element_i64(dl, d, 1, ES_64);
-
- tcg_temp_free_i64(dh);
- tcg_temp_free_i64(dl);
- tcg_temp_free_i64(ah);
- tcg_temp_free_i64(al);
- tcg_temp_free_i64(bh);
- tcg_temp_free_i64(bl);
}
typedef void (*gen_gvec128_4_i64_fn)(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
@@ -305,15 +296,6 @@
fn(dl, dh, al, ah, bl, bh, cl, ch);
write_vec_element_i64(dh, d, 0, ES_64);
write_vec_element_i64(dl, d, 1, ES_64);
-
- tcg_temp_free_i64(dh);
- tcg_temp_free_i64(dl);
- tcg_temp_free_i64(ah);
- tcg_temp_free_i64(al);
- tcg_temp_free_i64(bh);
- tcg_temp_free_i64(bl);
- tcg_temp_free_i64(ch);
- tcg_temp_free_i64(cl);
}
static void gen_addi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
@@ -351,7 +333,6 @@
tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
write_vec_element_i64(tmp, get_field(s, v1), enr, es);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -386,7 +367,6 @@
write_vec_element_i64(t, get_field(s, v1), 0, ES_64);
tcg_gen_movi_i64(t, generate_byte_mask(i2));
write_vec_element_i64(t, get_field(s, v1), 1, ES_64);
- tcg_temp_free_i64(t);
}
return DISAS_NEXT;
}
@@ -427,8 +407,6 @@
tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEUQ);
write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
return DISAS_NEXT;
}
@@ -451,7 +429,6 @@
tmp = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
gen_gvec_dup_i64(es, get_field(s, v1), tmp);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -469,7 +446,6 @@
tmp = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_LE | es);
write_vec_element_i64(tmp, get_field(s, v1), enr, es);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -486,7 +462,6 @@
tmp = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_LE | es);
gen_gvec_dup_i64(es, get_field(s, v1), tmp);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -518,7 +493,6 @@
write_vec_element_i64(tmp, get_field(s, v1), 0, ES_64);
write_vec_element_i64(tcg_constant_i64(0), get_field(s, v1), 1, ES_64);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -572,9 +546,6 @@
write:
write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
return DISAS_NEXT;
}
@@ -592,7 +563,6 @@
tmp = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
write_vec_element_i64(tmp, get_field(s, v1), enr, es);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -647,8 +617,6 @@
write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
return DISAS_NEXT;
}
@@ -688,8 +656,6 @@
default:
g_assert_not_reached();
}
- tcg_temp_free_ptr(ptr);
-
return DISAS_NEXT;
}
@@ -730,7 +696,6 @@
tcg_gen_qemu_ld_i64(t, o->addr1, get_mem_index(s), MO_TE | es);
gen_gvec_dup_imm(es, get_field(s, v1), 0);
write_vec_element_i64(t, get_field(s, v1), enr, es);
- tcg_temp_free_i64(t);
return DISAS_NEXT;
}
@@ -768,9 +733,6 @@
/* Store the last element, loaded first */
write_vec_element_i64(t0, v1, 1, ES_64);
-
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
return DISAS_NEXT;
}
@@ -794,8 +756,6 @@
tcg_gen_addi_ptr(a0, cpu_env, v1_offs);
gen_helper_vll(cpu_env, a0, o->addr1, bytes);
- tcg_temp_free_i64(bytes);
- tcg_temp_free_ptr(a0);
return DISAS_NEXT;
}
@@ -835,8 +795,6 @@
default:
g_assert_not_reached();
}
- tcg_temp_free_ptr(ptr);
-
return DISAS_NEXT;
}
@@ -856,7 +814,6 @@
tcg_gen_addi_i64(o->in2, o->in2, 1);
tcg_gen_addi_ptr(a0, cpu_env, v1_offs);
gen_helper_vll(cpu_env, a0, o->addr1, o->in2);
- tcg_temp_free_ptr(a0);
return DISAS_NEXT;
}
@@ -898,7 +855,6 @@
write_vec_element_i64(tmp, v1, dst_idx, es);
}
}
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -974,7 +930,6 @@
}
write_vec_element_i64(tmp, v1, dst_idx, dst_es);
}
- tcg_temp_free_i64(tmp);
} else {
gen_gvec_3_ool(v1, v2, v3, 0, vpk[es - 1]);
}
@@ -1004,8 +959,6 @@
read_vec_element_i64(t1, get_field(s, v3), i3, ES_64);
write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
return DISAS_NEXT;
}
@@ -1057,7 +1010,6 @@
read_vec_element_i64(tmp, get_field(s, v1), enr, es);
tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -1098,7 +1050,6 @@
write_vec_element_i64(tmp, get_field(s, v1), 0, ES_64);
read_vec_element_i64(tmp, get_field(s, v2), idx2, es | MO_SIGN);
write_vec_element_i64(tmp, get_field(s, v1), 1, ES_64);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -1116,7 +1067,6 @@
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
read_vec_element_i64(tmp, get_field(s, v1), 1, ES_64);
tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEUQ);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -1134,7 +1084,6 @@
tmp = tcg_temp_new_i64();
read_vec_element_i64(tmp, get_field(s, v1), enr, es);
tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_LE | es);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -1189,9 +1138,6 @@
tcg_gen_qemu_st_i64(t0, o->addr1, get_mem_index(s), MO_LEUQ);
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
tcg_gen_qemu_st_i64(t1, o->addr1, get_mem_index(s), MO_LEUQ);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
return DISAS_NEXT;
}
@@ -1209,7 +1155,6 @@
tmp = tcg_temp_new_i64();
read_vec_element_i64(tmp, get_field(s, v1), enr, es);
tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -1251,9 +1196,6 @@
tcg_gen_qemu_st_i64(t0, o->addr1, get_mem_index(s), MO_TEUQ);
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
tcg_gen_qemu_st_i64(t1, o->addr1, get_mem_index(s), MO_TEUQ);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
return DISAS_NEXT;
}
@@ -1284,7 +1226,6 @@
}
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
}
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -1297,7 +1238,6 @@
tcg_gen_addi_i64(o->in2, o->in2, 1);
tcg_gen_addi_ptr(a0, cpu_env, v1_offs);
gen_helper_vstl(cpu_env, a0, o->addr1, o->in2);
- tcg_temp_free_ptr(a0);
return DISAS_NEXT;
}
@@ -1335,7 +1275,6 @@
write_vec_element_i64(tmp, v1, dst_idx, dst_es);
}
}
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -1377,10 +1316,6 @@
/* Isolate and shift the carry into position */
tcg_gen_and_i64(d, d, msb_mask);
tcg_gen_shri_i64(d, d, msb_bit_nr);
-
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t2);
- tcg_temp_free_i64(t3);
}
static void gen_acc8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
@@ -1399,7 +1334,6 @@
tcg_gen_add_i32(t, a, b);
tcg_gen_setcond_i32(TCG_COND_LTU, d, t, b);
- tcg_temp_free_i32(t);
}
static void gen_acc_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
@@ -1408,7 +1342,6 @@
tcg_gen_add_i64(t, a, b);
tcg_gen_setcond_i64(TCG_COND_LTU, d, t, b);
- tcg_temp_free_i64(t);
}
static void gen_acc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
@@ -1422,9 +1355,6 @@
tcg_gen_add2_i64(tl, th, th, zero, ah, zero);
tcg_gen_add2_i64(tl, dl, tl, th, bh, zero);
tcg_gen_mov_i64(dh, zero);
-
- tcg_temp_free_i64(th);
- tcg_temp_free_i64(tl);
}
static DisasJumpType op_vacc(DisasContext *s, DisasOps *o)
@@ -1460,8 +1390,6 @@
tcg_gen_extract_i64(tl, cl, 0, 1);
tcg_gen_add2_i64(dl, dh, al, ah, bl, bh);
tcg_gen_add2_i64(dl, dh, dl, dh, tl, zero);
-
- tcg_temp_free_i64(tl);
}
static DisasJumpType op_vac(DisasContext *s, DisasOps *o)
@@ -1490,9 +1418,6 @@
tcg_gen_add2_i64(tl, th, th, zero, ah, zero);
tcg_gen_add2_i64(tl, dl, tl, th, bh, zero);
tcg_gen_mov_i64(dh, zero);
-
- tcg_temp_free_i64(tl);
- tcg_temp_free_i64(th);
}
static DisasJumpType op_vaccc(DisasContext *s, DisasOps *o)
@@ -1533,9 +1458,6 @@
tcg_gen_addi_i64(t0, t0, 1);
tcg_gen_shri_i64(t0, t0, 1);
tcg_gen_extrl_i64_i32(d, t0);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
static void gen_avg_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl)
@@ -1550,10 +1472,6 @@
tcg_gen_add2_i64(dl, dh, al, ah, bl, bh);
gen_addi2_i64(dl, dh, dl, dh, 1);
tcg_gen_extract2_i64(dl, dl, dh, 1);
-
- tcg_temp_free_i64(dh);
- tcg_temp_free_i64(ah);
- tcg_temp_free_i64(bh);
}
static DisasJumpType op_vavg(DisasContext *s, DisasOps *o)
@@ -1586,9 +1504,6 @@
tcg_gen_addi_i64(t0, t0, 1);
tcg_gen_shri_i64(t0, t0, 1);
tcg_gen_extrl_i64_i32(d, t0);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
static void gen_avgl_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl)
@@ -1599,8 +1514,6 @@
tcg_gen_add2_i64(dl, dh, al, zero, bl, zero);
gen_addi2_i64(dl, dh, dl, dh, 1);
tcg_gen_extract2_i64(dl, dl, dh, 1);
-
- tcg_temp_free_i64(dh);
}
static DisasJumpType op_vavgl(DisasContext *s, DisasOps *o)
@@ -1635,9 +1548,6 @@
}
gen_gvec_dup_imm(ES_32, get_field(s, v1), 0);
write_vec_element_i32(sum, get_field(s, v1), 1, ES_32);
-
- tcg_temp_free_i32(tmp);
- tcg_temp_free_i32(sum);
return DISAS_NEXT;
}
@@ -1682,9 +1592,6 @@
read_vec_element_i64(high, get_field(s, v1), 0, ES_64);
read_vec_element_i64(low, get_field(s, v1), 1, ES_64);
gen_op_update2_cc_i64(s, CC_OP_VC, low, high);
-
- tcg_temp_free_i64(low);
- tcg_temp_free_i64(high);
}
return DISAS_NEXT;
}
@@ -1853,8 +1760,6 @@
tcg_gen_mul_i32(t0, a, b);
tcg_gen_add_i32(d, t0, c);
-
- tcg_temp_free_i32(t0);
}
static void gen_mah_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c)
@@ -1869,10 +1774,6 @@
tcg_gen_mul_i64(t0, t0, t1);
tcg_gen_add_i64(t0, t0, t2);
tcg_gen_extrh_i64_i32(d, t0);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
}
static void gen_malh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c)
@@ -1887,10 +1788,6 @@
tcg_gen_mul_i64(t0, t0, t1);
tcg_gen_add_i64(t0, t0, t2);
tcg_gen_extrh_i64_i32(d, t0);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
}
static DisasJumpType op_vma(DisasContext *s, DisasOps *o)
@@ -1974,7 +1871,6 @@
TCGv_i32 t = tcg_temp_new_i32();
tcg_gen_muls2_i32(t, d, a, b);
- tcg_temp_free_i32(t);
}
static void gen_mlh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
@@ -1982,7 +1878,6 @@
TCGv_i32 t = tcg_temp_new_i32();
tcg_gen_mulu2_i32(t, d, a, b);
- tcg_temp_free_i32(t);
}
static DisasJumpType op_vm(DisasContext *s, DisasOps *o)
@@ -2099,11 +1994,6 @@
/* Store final result into v1. */
write_vec_element_i64(h1, get_field(s, v1), 0, ES_64);
write_vec_element_i64(l1, get_field(s, v1), 1, ES_64);
-
- tcg_temp_free_i64(l1);
- tcg_temp_free_i64(h1);
- tcg_temp_free_i64(l2);
- tcg_temp_free_i64(h2);
return DISAS_NEXT;
}
@@ -2169,8 +2059,6 @@
tcg_gen_and_i32(t, t, b);
tcg_gen_andc_i32(d, d, b);
tcg_gen_or_i32(d, d, t);
-
- tcg_temp_free_i32(t);
}
static void gen_rim_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, int64_t c)
@@ -2181,8 +2069,6 @@
tcg_gen_and_i64(t, t, b);
tcg_gen_andc_i64(d, d, b);
tcg_gen_or_i64(d, d, t);
-
- tcg_temp_free_i64(t);
}
static DisasJumpType op_verim(DisasContext *s, DisasOps *o)
@@ -2291,7 +2177,6 @@
default:
g_assert_not_reached();
}
- tcg_temp_free_i32(shift);
}
return DISAS_NEXT;
}
@@ -2311,7 +2196,6 @@
read_vec_element_i64(shift, get_field(s, v3), 7, ES_8);
tcg_gen_andi_i64(shift, shift, byte ? 0x78 : 7);
gen_gvec_2i_ool(get_field(s, v1), get_field(s, v2), shift, 0, gen);
- tcg_temp_free_i64(shift);
}
return DISAS_NEXT;
}
@@ -2367,10 +2251,6 @@
write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
return DISAS_NEXT;
}
@@ -2397,10 +2277,6 @@
write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
return DISAS_NEXT;
}
@@ -2445,9 +2321,6 @@
/* "invert" the result: -1 -> 0; 0 -> 1 */
tcg_gen_addi_i64(dl, th, 1);
tcg_gen_mov_i64(dh, zero);
-
- tcg_temp_free_i64(th);
- tcg_temp_free_i64(tl);
}
static DisasJumpType op_vscbi(DisasContext *s, DisasOps *o)
@@ -2482,8 +2355,6 @@
tcg_gen_not_i64(tl, bl);
tcg_gen_not_i64(th, bh);
gen_ac2_i64(dl, dh, al, ah, tl, th, cl, ch);
- tcg_temp_free_i64(tl);
- tcg_temp_free_i64(th);
}
static DisasJumpType op_vsbi(DisasContext *s, DisasOps *o)
@@ -2508,9 +2379,6 @@
tcg_gen_not_i64(tl, bl);
tcg_gen_not_i64(th, bh);
gen_accc2_i64(dl, dh, al, ah, tl, th, cl, ch);
-
- tcg_temp_free_i64(tl);
- tcg_temp_free_i64(th);
}
static DisasJumpType op_vsbcbi(DisasContext *s, DisasOps *o)
@@ -2550,8 +2418,6 @@
}
write_vec_element_i64(sum, get_field(s, v1), dst_idx, ES_64);
}
- tcg_temp_free_i64(sum);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -2580,10 +2446,6 @@
}
write_vec_element_i64(sumh, get_field(s, v1), 0, ES_64);
write_vec_element_i64(suml, get_field(s, v1), 1, ES_64);
-
- tcg_temp_free_i64(sumh);
- tcg_temp_free_i64(suml);
- tcg_temp_free_i64(tmpl);
return DISAS_NEXT;
}
@@ -2611,8 +2473,6 @@
}
write_vec_element_i32(sum, get_field(s, v1), dst_idx, ES_32);
}
- tcg_temp_free_i32(sum);
- tcg_temp_free_i32(tmp);
return DISAS_NEXT;
}
@@ -3399,9 +3259,6 @@
read_vec_element_i64(tmp, v2, 1, ES_64);
write_vec_element_i64(tmp, v1, 1, ES_64);
}
-
- tcg_temp_free_i64(tmp);
-
return DISAS_NEXT;
}
diff --git a/target/sh4/cpu-param.h b/target/sh4/cpu-param.h
index 98a0250..a7cdb7e 100644
--- a/target/sh4/cpu-param.h
+++ b/target/sh4/cpu-param.h
@@ -16,6 +16,5 @@
#else
# define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif
-#define NB_MMU_MODES 2
#endif
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
index ad6de41..97da8bc 100644
--- a/target/sh4/translate.c
+++ b/target/sh4/translate.c
@@ -526,13 +526,13 @@
return;
case 0x9000: /* mov.w @(disp,PC),Rn */
{
- TCGv addr = tcg_const_i32(ctx->base.pc_next + 4 + B7_0 * 2);
+ TCGv addr = tcg_constant_i32(ctx->base.pc_next + 4 + B7_0 * 2);
tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
}
return;
case 0xd000: /* mov.l @(disp,PC),Rn */
{
- TCGv addr = tcg_const_i32((ctx->base.pc_next + 4 + B7_0 * 4) & ~3);
+ TCGv addr = tcg_constant_i32((ctx->base.pc_next + 4 + B7_0 * 4) & ~3);
tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
}
return;
@@ -694,7 +694,7 @@
case 0x300e: /* addc Rm,Rn */
{
TCGv t0, t1;
- t0 = tcg_const_tl(0);
+ t0 = tcg_constant_tl(0);
t1 = tcg_temp_new();
tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
@@ -754,7 +754,7 @@
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
TCGv t2 = tcg_temp_new();
- TCGv zero = tcg_const_i32(0);
+ TCGv zero = tcg_constant_i32(0);
/* shift left arg1, saving the bit being pushed out and inserting
T on the right */
@@ -849,7 +849,7 @@
return;
case 0x600a: /* negc Rm,Rn */
{
- TCGv t0 = tcg_const_i32(0);
+ TCGv t0 = tcg_constant_i32(0);
tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
REG(B7_4), t0, cpu_sr_t, t0);
tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
@@ -913,7 +913,7 @@
case 0x300a: /* subc Rm,Rn */
{
TCGv t0, t1;
- t0 = tcg_const_tl(0);
+ t0 = tcg_constant_tl(0);
t1 = tcg_temp_new();
tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
@@ -1242,7 +1242,7 @@
TCGv imm;
CHECK_NOT_DELAY_SLOT
gen_save_cpu_state(ctx, true);
- imm = tcg_const_i32(B7_0);
+ imm = tcg_constant_i32(B7_0);
gen_helper_trapa(cpu_env, imm);
ctx->base.is_jmp = DISAS_NORETURN;
}
@@ -1610,12 +1610,9 @@
tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
return;
case 0x401b: /* tas.b @Rn */
- {
- TCGv val = tcg_const_i32(0x80);
- tcg_gen_atomic_fetch_or_i32(val, REG(B11_8), val,
- ctx->memidx, MO_UB);
- tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
- }
+ tcg_gen_atomic_fetch_or_i32(cpu_sr_t, REG(B11_8),
+ tcg_constant_i32(0x80), ctx->memidx, MO_UB);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, cpu_sr_t, 0);
return;
case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
CHECK_FPU_ENABLED
@@ -1712,8 +1709,8 @@
CHECK_FPU_ENABLED
CHECK_FPSCR_PR_1
{
- TCGv m = tcg_const_i32((ctx->opcode >> 8) & 3);
- TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
+ TCGv m = tcg_constant_i32((ctx->opcode >> 8) & 3);
+ TCGv n = tcg_constant_i32((ctx->opcode >> 10) & 3);
gen_helper_fipr(cpu_env, m, n);
return;
}
@@ -1725,7 +1722,7 @@
if ((ctx->opcode & 0x0300) != 0x0100) {
goto do_illegal;
}
- TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
+ TCGv n = tcg_constant_i32((ctx->opcode >> 10) & 3);
gen_helper_ftrv(cpu_env, n);
return;
}
@@ -1929,7 +1926,7 @@
}
op_dst = B11_8;
op_opc = INDEX_op_xor_i32;
- op_arg = tcg_const_i32(-1);
+ op_arg = tcg_constant_i32(-1);
break;
case 0x7000 ... 0x700f: /* add #imm,Rn */
@@ -1937,7 +1934,7 @@
goto fail;
}
op_opc = INDEX_op_add_i32;
- op_arg = tcg_const_i32(B7_0s);
+ op_arg = tcg_constant_i32(B7_0s);
break;
case 0x3000: /* cmp/eq Rm,Rn */
@@ -1983,7 +1980,7 @@
goto fail;
}
op_opc = INDEX_op_setcond_i32;
- op_arg = tcg_const_i32(0);
+ op_arg = tcg_constant_i32(0);
NEXT_INSN;
if ((ctx->opcode & 0xff00) != 0x8900 /* bt label */
diff --git a/target/sparc/cpu-param.h b/target/sparc/cpu-param.h
index 72ddc4a..cb11980 100644
--- a/target/sparc/cpu-param.h
+++ b/target/sparc/cpu-param.h
@@ -16,13 +16,11 @@
# else
# define TARGET_VIRT_ADDR_SPACE_BITS 44
# endif
-# define NB_MMU_MODES 6
#else
# define TARGET_LONG_BITS 32
# define TARGET_PAGE_BITS 12 /* 4k */
# define TARGET_PHYS_ADDR_SPACE_BITS 36
# define TARGET_VIRT_ADDR_SPACE_BITS 32
-# define NB_MMU_MODES 3
#endif
#endif
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
index 5ee2933..137bdc5 100644
--- a/target/sparc/translate.c
+++ b/target/sparc/translate.c
@@ -2838,7 +2838,7 @@
static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
int width, bool cc, bool left)
{
- TCGv lo1, lo2, t1, t2;
+ TCGv lo1, lo2;
uint64_t amask, tabl, tabr;
int shift, imask, omask;
@@ -2905,10 +2905,8 @@
tcg_gen_shli_tl(lo1, lo1, shift);
tcg_gen_shli_tl(lo2, lo2, shift);
- t1 = tcg_const_tl(tabl);
- t2 = tcg_const_tl(tabr);
- tcg_gen_shr_tl(lo1, t1, lo1);
- tcg_gen_shr_tl(lo2, t2, lo2);
+ tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1);
+ tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2);
tcg_gen_andi_tl(dst, lo1, omask);
tcg_gen_andi_tl(lo2, lo2, omask);
@@ -2927,9 +2925,9 @@
lo2 |= -(s1 == s2)
dst &= lo2
*/
- tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
- tcg_gen_neg_tl(t1, t1);
- tcg_gen_or_tl(lo2, lo2, t1);
+ tcg_gen_setcond_tl(TCG_COND_EQ, lo1, s1, s2);
+ tcg_gen_neg_tl(lo1, lo1);
+ tcg_gen_or_tl(lo2, lo2, lo1);
tcg_gen_and_tl(dst, dst, lo2);
}
diff --git a/target/tricore/cpu-param.h b/target/tricore/cpu-param.h
index 2727913..e29d551 100644
--- a/target/tricore/cpu-param.h
+++ b/target/tricore/cpu-param.h
@@ -12,6 +12,5 @@
#define TARGET_PAGE_BITS 14
#define TARGET_PHYS_ADDR_SPACE_BITS 32
#define TARGET_VIRT_ADDR_SPACE_BITS 32
-#define NB_MMU_MODES 3
#endif
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
index 127f9a9..2646cb3 100644
--- a/target/tricore/translate.c
+++ b/target/tricore/translate.c
@@ -124,7 +124,7 @@
/* Makros for generating helpers */
#define gen_helper_1arg(name, arg) do { \
- TCGv_i32 helper_tmp = tcg_const_i32(arg); \
+ TCGv_i32 helper_tmp = tcg_constant_i32(arg); \
gen_helper_##name(cpu_env, helper_tmp); \
} while (0)
@@ -513,7 +513,7 @@
static inline void gen_maddi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_madd32_d(ret, r1, r2, temp);
}
@@ -579,7 +579,7 @@
gen_maddi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_madd64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
@@ -587,7 +587,7 @@
gen_maddui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_maddu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
@@ -595,21 +595,22 @@
gen_madd_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
+ TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
@@ -621,21 +622,22 @@
gen_maddsu_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
+ TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
@@ -647,22 +649,22 @@
gen_maddsum_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
TCGv_i64 temp64_3 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_concat_i32_i64(temp64_3, r1_low, r1_high);
@@ -682,23 +684,24 @@
gen_madds_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
+ TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv temp3 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
@@ -718,23 +721,24 @@
gen_maddsus_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
+ TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv temp3 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
@@ -752,22 +756,22 @@
gen_maddsums_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_sari_i64(temp64_2, temp64, 32); /* high */
@@ -785,22 +789,22 @@
gen_maddm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
TCGv_i64 temp64_3 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mulm_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mulm_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mulm_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mulm_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
@@ -813,21 +817,21 @@
gen_maddms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mulm_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mulm_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mulm_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mulm_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
@@ -839,20 +843,20 @@
gen_maddr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n,
uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
gen_helper_addr_h(ret, cpu_env, temp64, r1_low, r1_high);
@@ -872,21 +876,22 @@
static inline void
gen_maddsur32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
+ TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_andi_tl(temp2, r1, 0xffff0000);
@@ -899,20 +904,20 @@
gen_maddr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3,
uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
gen_helper_addr_h_ssov(ret, cpu_env, temp64, r1_low, r1_high);
@@ -932,21 +937,22 @@
static inline void
gen_maddsur32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
+ TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_andi_tl(temp2, r1, 0xffff0000);
@@ -957,15 +963,15 @@
static inline void
gen_maddr_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
{
- TCGv temp = tcg_const_i32(n);
- gen_helper_maddr_q(ret, cpu_env, r1, r2, r3, temp);
+ TCGv t_n = tcg_constant_i32(n);
+ gen_helper_maddr_q(ret, cpu_env, r1, r2, r3, t_n);
}
static inline void
gen_maddrs_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
{
- TCGv temp = tcg_const_i32(n);
- gen_helper_maddr_q_ssov(ret, cpu_env, r1, r2, r3, temp);
+ TCGv t_n = tcg_constant_i32(n);
+ gen_helper_maddr_q_ssov(ret, cpu_env, r1, r2, r3, t_n);
}
static inline void
@@ -1176,10 +1182,10 @@
TCGv arg3, uint32_t n)
{
TCGv_i64 r1 = tcg_temp_new_i64();
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
tcg_gen_concat_i32_i64(r1, arg1_low, arg1_high);
- gen_helper_madd64_q_ssov(r1, cpu_env, r1, arg2, arg3, temp);
+ gen_helper_madd64_q_ssov(r1, cpu_env, r1, arg2, arg3, t_n);
tcg_gen_extr_i64_i32(rl, rh, r1);
}
@@ -1218,7 +1224,7 @@
static inline void gen_msubi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_msub32_d(ret, r1, r2, temp);
}
@@ -1254,7 +1260,7 @@
gen_msubi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_msub64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
@@ -1290,13 +1296,13 @@
gen_msubui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_msubu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
static inline void gen_addi_d(TCGv ret, TCGv r1, target_ulong r2)
{
- TCGv temp = tcg_const_i32(r2);
+ TCGv temp = tcg_constant_i32(r2);
gen_add_d(ret, r1, temp);
}
@@ -1326,7 +1332,7 @@
static inline void gen_addi_CC(TCGv ret, TCGv r1, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_add_CC(ret, r1, temp);
}
@@ -1358,7 +1364,7 @@
static inline void gen_addci_CC(TCGv ret, TCGv r1, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_addc_CC(ret, r1, temp);
}
@@ -1369,7 +1375,7 @@
TCGv temp2 = tcg_temp_new();
TCGv result = tcg_temp_new();
TCGv mask = tcg_temp_new();
- TCGv t0 = tcg_const_i32(0);
+ TCGv t0 = tcg_constant_i32(0);
/* create mask for sticky bits */
tcg_gen_setcond_tl(cond, mask, r4, t0);
@@ -1398,7 +1404,7 @@
static inline void gen_condi_add(TCGCond cond, TCGv r1, int32_t r2,
TCGv r3, TCGv r4)
{
- TCGv temp = tcg_const_i32(r2);
+ TCGv temp = tcg_constant_i32(r2);
gen_cond_add(cond, r1, temp, r3, r4);
}
@@ -1486,7 +1492,7 @@
TCGv temp2 = tcg_temp_new();
TCGv result = tcg_temp_new();
TCGv mask = tcg_temp_new();
- TCGv t0 = tcg_const_i32(0);
+ TCGv t0 = tcg_constant_i32(0);
/* create mask for sticky bits */
tcg_gen_setcond_tl(cond, mask, r4, t0);
@@ -1516,21 +1522,22 @@
gen_msub_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
+ TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
@@ -1542,23 +1549,24 @@
gen_msubs_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
+ TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv temp3 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
@@ -1576,22 +1584,22 @@
gen_msubm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
TCGv_i64 temp64_3 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mulm_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mulm_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mulm_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mulm_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
@@ -1604,21 +1612,21 @@
gen_msubms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mulm_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mulm_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mulm_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mulm_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
@@ -1630,20 +1638,20 @@
gen_msubr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n,
uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
gen_helper_subr_h(ret, cpu_env, temp64, r1_low, r1_high);
@@ -1664,20 +1672,20 @@
gen_msubr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3,
uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
gen_helper_subr_h_ssov(ret, cpu_env, temp64, r1_low, r1_high);
@@ -1697,14 +1705,14 @@
static inline void
gen_msubr_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv temp = tcg_constant_i32(n);
gen_helper_msubr_q(ret, cpu_env, r1, r2, r3, temp);
}
static inline void
gen_msubrs_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv temp = tcg_constant_i32(n);
gen_helper_msubr_q_ssov(ret, cpu_env, r1, r2, r3, temp);
}
@@ -1912,10 +1920,10 @@
TCGv arg3, uint32_t n)
{
TCGv_i64 r1 = tcg_temp_new_i64();
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
tcg_gen_concat_i32_i64(r1, arg1_low, arg1_high);
- gen_helper_msub64_q_ssov(r1, cpu_env, r1, arg2, arg3, temp);
+ gen_helper_msub64_q_ssov(r1, cpu_env, r1, arg2, arg3, t_n);
tcg_gen_extr_i64_i32(rl, rh, r1);
}
@@ -1923,21 +1931,22 @@
gen_msubad_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
+ TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
@@ -1949,22 +1958,22 @@
gen_msubadm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
TCGv_i64 temp64_3 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_concat_i32_i64(temp64_3, r1_low, r1_high);
@@ -1981,21 +1990,22 @@
static inline void
gen_msubadr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
+ TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_andi_tl(temp2, r1, 0xffff0000);
@@ -2007,23 +2017,24 @@
gen_msubads_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
+ TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv temp3 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
@@ -2041,22 +2052,22 @@
gen_msubadms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_sari_i64(temp64_2, temp64, 32); /* high */
@@ -2072,21 +2083,22 @@
static inline void
gen_msubadr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
+ TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_andi_tl(temp2, r1, 0xffff0000);
@@ -2137,13 +2149,13 @@
static inline void gen_absdifi(TCGv ret, TCGv r1, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_absdif(ret, r1, temp);
}
static inline void gen_absdifsi(TCGv ret, TCGv r1, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_helper_absdif_ssov(ret, cpu_env, r1, temp);
}
@@ -2169,7 +2181,7 @@
static inline void gen_muli_i32s(TCGv ret, TCGv r1, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_mul_i32s(ret, r1, temp);
}
@@ -2190,7 +2202,7 @@
static inline void gen_muli_i64s(TCGv ret_low, TCGv ret_high, TCGv r1,
int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_mul_i64s(ret_low, ret_high, r1, temp);
}
@@ -2211,31 +2223,32 @@
static inline void gen_muli_i64u(TCGv ret_low, TCGv ret_high, TCGv r1,
int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_mul_i64u(ret_low, ret_high, r1, temp);
}
static inline void gen_mulsi_i32(TCGv ret, TCGv r1, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_helper_mul_ssov(ret, cpu_env, r1, temp);
}
static inline void gen_mulsui_i32(TCGv ret, TCGv r1, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_helper_mul_suov(ret, cpu_env, r1, temp);
}
+
/* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */
static inline void gen_maddsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_helper_madd32_ssov(ret, cpu_env, r1, r2, temp);
}
static inline void gen_maddsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_helper_madd32_suov(ret, cpu_env, r1, r2, temp);
}
@@ -2358,7 +2371,7 @@
gen_maddsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_madds_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
@@ -2376,19 +2389,19 @@
gen_maddsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_maddsu_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
static inline void gen_msubsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_helper_msub32_ssov(ret, cpu_env, r1, r2, temp);
}
static inline void gen_msubsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_helper_msub32_suov(ret, cpu_env, r1, r2, temp);
}
@@ -2406,7 +2419,7 @@
gen_msubsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_msubs_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
@@ -2424,27 +2437,19 @@
gen_msubsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_msubsu_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
static void gen_saturate(TCGv ret, TCGv arg, int32_t up, int32_t low)
{
- TCGv sat_neg = tcg_const_i32(low);
- TCGv temp = tcg_const_i32(up);
-
- /* sat_neg = (arg < low ) ? low : arg; */
- tcg_gen_movcond_tl(TCG_COND_LT, sat_neg, arg, sat_neg, sat_neg, arg);
-
- /* ret = (sat_neg > up ) ? up : sat_neg; */
- tcg_gen_movcond_tl(TCG_COND_GT, ret, sat_neg, temp, temp, sat_neg);
+ tcg_gen_smax_tl(ret, arg, tcg_constant_i32(low));
+ tcg_gen_smin_tl(ret, ret, tcg_constant_i32(up));
}
static void gen_saturate_u(TCGv ret, TCGv arg, int32_t up)
{
- TCGv temp = tcg_const_i32(up);
- /* sat_neg = (arg > up ) ? up : arg; */
- tcg_gen_movcond_tl(TCG_COND_GTU, ret, arg, temp, temp, arg);
+ tcg_gen_umin_tl(ret, arg, tcg_constant_i32(up));
}
static void gen_shi(TCGv ret, TCGv r1, int32_t shift_count)
@@ -2495,8 +2500,8 @@
/* clear PSW.V */
tcg_gen_movi_tl(cpu_PSW_V, 0);
} else if (shift_count > 0) {
- TCGv t_max = tcg_const_i32(0x7FFFFFFF >> shift_count);
- TCGv t_min = tcg_const_i32(((int32_t) -0x80000000) >> shift_count);
+ TCGv t_max = tcg_constant_i32(0x7FFFFFFF >> shift_count);
+ TCGv t_min = tcg_constant_i32(((int32_t) -0x80000000) >> shift_count);
/* calc carry */
msk_start = 32 - shift_count;
@@ -2534,7 +2539,7 @@
static void gen_shasi(TCGv ret, TCGv r1, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_shas(ret, r1, temp);
}
@@ -2576,7 +2581,7 @@
static void gen_sh_condi(int cond, TCGv ret, TCGv r1, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_sh_cond(cond, ret, r1, temp);
}
@@ -2587,13 +2592,13 @@
static inline void gen_addsi(TCGv ret, TCGv r1, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_helper_add_ssov(ret, cpu_env, r1, temp);
}
static inline void gen_addsui(TCGv ret, TCGv r1, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_helper_add_suov(ret, cpu_env, r1, temp);
}
@@ -2663,7 +2668,7 @@
gen_accumulating_condi(int cond, TCGv ret, TCGv r1, int32_t con,
void(*op)(TCGv, TCGv, TCGv))
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_accumulating_cond(cond, ret, r1, temp, op);
}
@@ -2830,8 +2835,8 @@
static void generate_trap(DisasContext *ctx, int class, int tin)
{
- TCGv_i32 classtemp = tcg_const_i32(class);
- TCGv_i32 tintemp = tcg_const_i32(tin);
+ TCGv_i32 classtemp = tcg_constant_i32(class);
+ TCGv_i32 tintemp = tcg_constant_i32(tin);
gen_save_pc(ctx->base.pc_next);
gen_helper_raise_exception_sync(cpu_env, classtemp, tintemp);
@@ -2853,7 +2858,7 @@
static inline void gen_branch_condi(DisasContext *ctx, TCGCond cond, TCGv r1,
int r2, int16_t address)
{
- TCGv temp = tcg_const_i32(r2);
+ TCGv temp = tcg_constant_i32(r2);
gen_branch_cond(ctx, cond, r1, temp, address);
}
@@ -3182,14 +3187,14 @@
cpu_gpr_d[15]);
break;
case OPC1_16_SRC_CMOV:
- temp = tcg_const_tl(0);
- temp2 = tcg_const_tl(const4);
+ temp = tcg_constant_tl(0);
+ temp2 = tcg_constant_tl(const4);
tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
temp2, cpu_gpr_d[r1]);
break;
case OPC1_16_SRC_CMOVN:
- temp = tcg_const_tl(0);
- temp2 = tcg_const_tl(const4);
+ temp = tcg_constant_tl(0);
+ temp2 = tcg_constant_tl(const4);
tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
temp2, cpu_gpr_d[r1]);
break;
@@ -3255,12 +3260,12 @@
tcg_gen_and_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC1_16_SRR_CMOV:
- temp = tcg_const_tl(0);
+ temp = tcg_constant_tl(0);
tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
cpu_gpr_d[r2], cpu_gpr_d[r1]);
break;
case OPC1_16_SRR_CMOVN:
- temp = tcg_const_tl(0);
+ temp = tcg_constant_tl(0);
tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
cpu_gpr_d[r2], cpu_gpr_d[r1]);
break;
@@ -3502,17 +3507,14 @@
{
uint32_t op2;
uint32_t r1;
- TCGv temp;
r1 = MASK_OP_SR_S1D(ctx->opcode);
op2 = MASK_OP_SR_OP2(ctx->opcode);
switch (op2) {
case OPC2_16_SR_RSUB:
- /* overflow only if r1 = -0x80000000 */
- temp = tcg_const_i32(-0x80000000);
- /* calc V bit */
- tcg_gen_setcond_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r1], temp);
+ /* calc V bit -- overflow only if r1 = -0x80000000 */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r1], -0x80000000);
tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
/* calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
@@ -3788,7 +3790,7 @@
address = MASK_OP_ABS_OFF18(ctx->opcode);
op2 = MASK_OP_ABS_OP2(ctx->opcode);
- temp = tcg_const_i32(EA_ABS_FORMAT(address));
+ temp = tcg_constant_i32(EA_ABS_FORMAT(address));
switch (op2) {
case OPC2_32_ABS_LD_A:
@@ -3821,7 +3823,7 @@
address = MASK_OP_ABS_OFF18(ctx->opcode);
op2 = MASK_OP_ABS_OP2(ctx->opcode);
- temp = tcg_const_i32(EA_ABS_FORMAT(address));
+ temp = tcg_constant_i32(EA_ABS_FORMAT(address));
switch (op2) {
case OPC2_32_ABS_LD_B:
@@ -3852,7 +3854,7 @@
address = MASK_OP_ABS_OFF18(ctx->opcode);
op2 = MASK_OP_ABS_OP2(ctx->opcode);
- temp = tcg_const_i32(EA_ABS_FORMAT(address));
+ temp = tcg_constant_i32(EA_ABS_FORMAT(address));
switch (op2) {
case OPC2_32_ABS_LDMST:
@@ -3903,7 +3905,7 @@
address = MASK_OP_ABS_OFF18(ctx->opcode);
op2 = MASK_OP_ABS_OP2(ctx->opcode);
- temp = tcg_const_i32(EA_ABS_FORMAT(address));
+ temp = tcg_constant_i32(EA_ABS_FORMAT(address));
switch (op2) {
case OPC2_32_ABS_ST_A:
@@ -3936,7 +3938,7 @@
address = MASK_OP_ABS_OFF18(ctx->opcode);
op2 = MASK_OP_ABS_OP2(ctx->opcode);
- temp = tcg_const_i32(EA_ABS_FORMAT(address));
+ temp = tcg_constant_i32(EA_ABS_FORMAT(address));
switch (op2) {
case OPC2_32_ABS_ST_B:
@@ -4368,7 +4370,7 @@
uint32_t op2;
uint32_t off10;
int32_t r1, r2;
- TCGv temp, temp2, temp3;
+ TCGv temp, temp2, t_off10;
r1 = MASK_OP_BO_S1D(ctx->opcode);
r2 = MASK_OP_BO_S2(ctx->opcode);
@@ -4377,7 +4379,7 @@
temp = tcg_temp_new();
temp2 = tcg_temp_new();
- temp3 = tcg_const_i32(off10);
+ t_off10 = tcg_constant_i32(off10);
CHECK_REG_PAIR(r2);
tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]);
tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
@@ -4391,7 +4393,7 @@
case OPC2_32_BO_CACHEA_WI_CIRC:
case OPC2_32_BO_CACHEA_W_CIRC:
case OPC2_32_BO_CACHEA_I_CIRC:
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_ST_A_BR:
tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
@@ -4399,7 +4401,7 @@
break;
case OPC2_32_BO_ST_A_CIRC:
tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_ST_B_BR:
tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
@@ -4407,7 +4409,7 @@
break;
case OPC2_32_BO_ST_B_CIRC:
tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_ST_D_BR:
CHECK_REG_PAIR(r1);
@@ -4422,7 +4424,7 @@
tcg_gen_rem_tl(temp, temp, temp2);
tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
tcg_gen_qemu_st_tl(cpu_gpr_d[r1+1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_ST_DA_BR:
CHECK_REG_PAIR(r1);
@@ -4437,7 +4439,7 @@
tcg_gen_rem_tl(temp, temp, temp2);
tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
tcg_gen_qemu_st_tl(cpu_gpr_a[r1+1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_ST_H_BR:
tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
@@ -4445,7 +4447,7 @@
break;
case OPC2_32_BO_ST_H_CIRC:
tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_ST_Q_BR:
tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
@@ -4455,7 +4457,7 @@
case OPC2_32_BO_ST_Q_CIRC:
tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
tcg_gen_qemu_st_tl(temp, temp2, ctx->mem_idx, MO_LEUW);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_ST_W_BR:
tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
@@ -4463,7 +4465,7 @@
break;
case OPC2_32_BO_ST_W_CIRC:
tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -4607,8 +4609,7 @@
uint32_t op2;
uint32_t off10;
int r1, r2;
-
- TCGv temp, temp2, temp3;
+ TCGv temp, temp2, t_off10;
r1 = MASK_OP_BO_S1D(ctx->opcode);
r2 = MASK_OP_BO_S2(ctx->opcode);
@@ -4617,7 +4618,7 @@
temp = tcg_temp_new();
temp2 = tcg_temp_new();
- temp3 = tcg_const_i32(off10);
+ t_off10 = tcg_constant_i32(off10);
CHECK_REG_PAIR(r2);
tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]);
tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
@@ -4630,7 +4631,7 @@
break;
case OPC2_32_BO_LD_A_CIRC:
tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_LD_B_BR:
tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB);
@@ -4638,7 +4639,7 @@
break;
case OPC2_32_BO_LD_B_CIRC:
tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_LD_BU_BR:
tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
@@ -4646,7 +4647,7 @@
break;
case OPC2_32_BO_LD_BU_CIRC:
tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_LD_D_BR:
CHECK_REG_PAIR(r1);
@@ -4661,7 +4662,7 @@
tcg_gen_rem_tl(temp, temp, temp2);
tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
tcg_gen_qemu_ld_tl(cpu_gpr_d[r1+1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_LD_DA_BR:
CHECK_REG_PAIR(r1);
@@ -4676,7 +4677,7 @@
tcg_gen_rem_tl(temp, temp, temp2);
tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
tcg_gen_qemu_ld_tl(cpu_gpr_a[r1+1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_LD_H_BR:
tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW);
@@ -4684,7 +4685,7 @@
break;
case OPC2_32_BO_LD_H_CIRC:
tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_LD_HU_BR:
tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
@@ -4692,7 +4693,7 @@
break;
case OPC2_32_BO_LD_HU_CIRC:
tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_LD_Q_BR:
tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
@@ -4702,7 +4703,7 @@
case OPC2_32_BO_LD_Q_CIRC:
tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_LD_W_BR:
tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
@@ -4710,7 +4711,7 @@
break;
case OPC2_32_BO_LD_W_CIRC:
tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -4811,8 +4812,7 @@
uint32_t op2;
uint32_t off10;
int r1, r2;
-
- TCGv temp, temp2, temp3;
+ TCGv temp, temp2, t_off10;
r1 = MASK_OP_BO_S1D(ctx->opcode);
r2 = MASK_OP_BO_S2(ctx->opcode);
@@ -4821,7 +4821,7 @@
temp = tcg_temp_new();
temp2 = tcg_temp_new();
- temp3 = tcg_const_i32(off10);
+ t_off10 = tcg_constant_i32(off10);
CHECK_REG_PAIR(r2);
tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]);
tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
@@ -4833,7 +4833,7 @@
break;
case OPC2_32_BO_LDMST_CIRC:
gen_ldmst(ctx, r1, temp2);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_SWAP_W_BR:
gen_swap(ctx, r1, temp2);
@@ -4841,7 +4841,7 @@
break;
case OPC2_32_BO_SWAP_W_CIRC:
gen_swap(ctx, r1, temp2);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_CMPSWAP_W_BR:
gen_cmpswap(ctx, r1, temp2);
@@ -4849,7 +4849,7 @@
break;
case OPC2_32_BO_CMPSWAP_W_CIRC:
gen_cmpswap(ctx, r1, temp2);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_SWAPMSK_W_BR:
gen_swapmsk(ctx, r1, temp2);
@@ -4857,7 +4857,7 @@
break;
case OPC2_32_BO_SWAPMSK_W_CIRC:
gen_swapmsk(ctx, r1, temp2);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -5296,7 +5296,7 @@
case OPC2_32_RCPW_INSERT:
/* if pos + width > 32 undefined result */
if (pos + width <= 32) {
- temp = tcg_const_i32(const4);
+ temp = tcg_constant_i32(const4);
tcg_gen_deposit_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp, pos, width);
}
break;
@@ -5372,14 +5372,14 @@
cpu_gpr_d[r3]);
break;
case OPC2_32_RCR_SEL:
- temp = tcg_const_i32(0);
- temp2 = tcg_const_i32(const9);
+ temp = tcg_constant_i32(0);
+ temp2 = tcg_constant_i32(const9);
tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
cpu_gpr_d[r1], temp2);
break;
case OPC2_32_RCR_SELN:
- temp = tcg_const_i32(0);
- temp2 = tcg_const_i32(const9);
+ temp = tcg_constant_i32(0);
+ temp2 = tcg_constant_i32(const9);
tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
cpu_gpr_d[r1], temp2);
break;
@@ -6256,7 +6256,7 @@
r1 = MASK_OP_RR1_S1(ctx->opcode);
r2 = MASK_OP_RR1_S2(ctx->opcode);
r3 = MASK_OP_RR1_D(ctx->opcode);
- n = tcg_const_i32(MASK_OP_RR1_N(ctx->opcode));
+ n = tcg_constant_i32(MASK_OP_RR1_N(ctx->opcode));
op2 = MASK_OP_RR1_OP2(ctx->opcode);
switch (op2) {
@@ -6550,12 +6550,12 @@
cpu_gpr_d[r3]);
break;
case OPC2_32_RRR_SEL:
- temp = tcg_const_i32(0);
+ temp = tcg_constant_i32(0);
tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR_SELN:
- temp = tcg_const_i32(0);
+ temp = tcg_constant_i32(0);
tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
@@ -6907,7 +6907,7 @@
r4 = MASK_OP_RRR1_D(ctx->opcode);
n = MASK_OP_RRR1_N(ctx->opcode);
- temp = tcg_const_i32(n);
+ temp = tcg_temp_new();
temp2 = tcg_temp_new();
switch (op2) {
@@ -7389,7 +7389,7 @@
r4 = MASK_OP_RRR1_D(ctx->opcode);
n = MASK_OP_RRR1_N(ctx->opcode);
- temp = tcg_const_i32(n);
+ temp = tcg_temp_new();
temp2 = tcg_temp_new();
switch (op2) {
@@ -7957,7 +7957,7 @@
case OPC1_32_ABS_STOREQ:
address = MASK_OP_ABS_OFF18(ctx->opcode);
r1 = MASK_OP_ABS_S1D(ctx->opcode);
- temp = tcg_const_i32(EA_ABS_FORMAT(address));
+ temp = tcg_constant_i32(EA_ABS_FORMAT(address));
temp2 = tcg_temp_new();
tcg_gen_shri_tl(temp2, cpu_gpr_d[r1], 16);
@@ -7966,7 +7966,7 @@
case OPC1_32_ABS_LD_Q:
address = MASK_OP_ABS_OFF18(ctx->opcode);
r1 = MASK_OP_ABS_S1D(ctx->opcode);
- temp = tcg_const_i32(EA_ABS_FORMAT(address));
+ temp = tcg_constant_i32(EA_ABS_FORMAT(address));
tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW);
tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
@@ -7982,7 +7982,7 @@
b = MASK_OP_ABSB_B(ctx->opcode);
bpos = MASK_OP_ABSB_BPOS(ctx->opcode);
- temp = tcg_const_i32(EA_ABS_FORMAT(address));
+ temp = tcg_constant_i32(EA_ABS_FORMAT(address));
temp2 = tcg_temp_new();
tcg_gen_qemu_ld_tl(temp2, temp, ctx->mem_idx, MO_UB);
@@ -8109,7 +8109,7 @@
r2 = MASK_OP_RCRR_S3(ctx->opcode);
r3 = MASK_OP_RCRR_D(ctx->opcode);
const16 = MASK_OP_RCRR_CONST4(ctx->opcode);
- temp = tcg_const_i32(const16);
+ temp = tcg_constant_i32(const16);
temp2 = tcg_temp_new(); /* width*/
temp3 = tcg_temp_new(); /* pos */
diff --git a/target/xtensa/cpu-param.h b/target/xtensa/cpu-param.h
index b53e9a3..b1da055 100644
--- a/target/xtensa/cpu-param.h
+++ b/target/xtensa/cpu-param.h
@@ -16,6 +16,5 @@
#else
#define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif
-#define NB_MMU_MODES 4
#endif
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
index 4060a35..4444eb9 100644
--- a/tcg/i386/tcg-target.c.inc
+++ b/tcg/i386/tcg-target.c.inc
@@ -3651,6 +3651,7 @@
break;
case MO_64:
+ t1 = tcg_temp_new_vec(type);
if (imm <= 32) {
/*
* We can emulate a small sign extend by performing an arithmetic
@@ -3659,24 +3660,22 @@
* does not, so we have to bound the smaller shift -- we get the
* same result in the high half either way.
*/
- t1 = tcg_temp_new_vec(type);
tcg_gen_sari_vec(MO_32, t1, v1, MIN(imm, 31));
tcg_gen_shri_vec(MO_64, v0, v1, imm);
vec_gen_4(INDEX_op_x86_blend_vec, type, MO_32,
tcgv_vec_arg(v0), tcgv_vec_arg(v0),
tcgv_vec_arg(t1), 0xaa);
- tcg_temp_free_vec(t1);
} else {
/* Otherwise we will need to use a compare vs 0 to produce
* the sign-extend, shift and merge.
*/
- t1 = tcg_const_zeros_vec(type);
- tcg_gen_cmp_vec(TCG_COND_GT, MO_64, t1, t1, v1);
+ tcg_gen_cmp_vec(TCG_COND_GT, MO_64, t1,
+ tcg_constant_vec(type, MO_64, 0), v1);
tcg_gen_shri_vec(MO_64, v0, v1, imm);
tcg_gen_shli_vec(MO_64, t1, t1, 64 - imm);
tcg_gen_or_vec(MO_64, v0, v0, t1);
- tcg_temp_free_vec(t1);
}
+ tcg_temp_free_vec(t1);
break;
default:
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
index 291a65c..047a832 100644
--- a/tcg/tcg-op-gvec.c
+++ b/tcg/tcg-op-gvec.c
@@ -19,6 +19,7 @@
#include "qemu/osdep.h"
#include "tcg/tcg.h"
+#include "tcg/tcg-temp-internal.h"
#include "tcg/tcg-op.h"
#include "tcg/tcg-op-gvec.h"
#include "tcg/tcg-gvec-desc.h"
diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c
index 966d41d..aeeb243 100644
--- a/tcg/tcg-op-vec.c
+++ b/tcg/tcg-op-vec.c
@@ -19,6 +19,7 @@
#include "qemu/osdep.h"
#include "tcg/tcg.h"
+#include "tcg/tcg-temp-internal.h"
#include "tcg/tcg-op.h"
#include "tcg/tcg-mo.h"
#include "tcg-internal.h"
@@ -228,32 +229,6 @@
}
}
-TCGv_vec tcg_const_zeros_vec(TCGType type)
-{
- TCGv_vec ret = tcg_temp_new_vec(type);
- tcg_gen_dupi_vec(MO_64, ret, 0);
- return ret;
-}
-
-TCGv_vec tcg_const_ones_vec(TCGType type)
-{
- TCGv_vec ret = tcg_temp_new_vec(type);
- tcg_gen_dupi_vec(MO_64, ret, -1);
- return ret;
-}
-
-TCGv_vec tcg_const_zeros_vec_matching(TCGv_vec m)
-{
- TCGTemp *t = tcgv_vec_temp(m);
- return tcg_const_zeros_vec(t->base_type);
-}
-
-TCGv_vec tcg_const_ones_vec_matching(TCGv_vec m)
-{
- TCGTemp *t = tcgv_vec_temp(m);
- return tcg_const_ones_vec(t->base_type);
-}
-
void tcg_gen_dupi_vec(unsigned vece, TCGv_vec r, uint64_t a)
{
TCGTemp *rt = tcgv_vec_temp(r);
@@ -430,9 +405,7 @@
const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
if (!TCG_TARGET_HAS_not_vec || !do_op2(vece, r, a, INDEX_op_not_vec)) {
- TCGv_vec t = tcg_const_ones_vec_matching(r);
- tcg_gen_xor_vec(0, r, a, t);
- tcg_temp_free_vec(t);
+ tcg_gen_xor_vec(0, r, a, tcg_constant_vec_matching(r, 0, -1));
}
tcg_swap_vecop_list(hold_list);
}
@@ -445,9 +418,7 @@
hold_list = tcg_swap_vecop_list(NULL);
if (!TCG_TARGET_HAS_neg_vec || !do_op2(vece, r, a, INDEX_op_neg_vec)) {
- TCGv_vec t = tcg_const_zeros_vec_matching(r);
- tcg_gen_sub_vec(vece, r, t, a);
- tcg_temp_free_vec(t);
+ tcg_gen_sub_vec(vece, r, tcg_constant_vec_matching(r, vece, 0), a);
}
tcg_swap_vecop_list(hold_list);
}
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
index 77658a8..ddab20a 100644
--- a/tcg/tcg-op.c
+++ b/tcg/tcg-op.c
@@ -25,6 +25,7 @@
#include "qemu/osdep.h"
#include "exec/exec-all.h"
#include "tcg/tcg.h"
+#include "tcg/tcg-temp-internal.h"
#include "tcg/tcg-op.h"
#include "tcg/tcg-mo.h"
#include "exec/plugin-gen.h"
@@ -1562,9 +1563,7 @@
} else if (is_power_of_2(arg2)) {
tcg_gen_shli_i64(ret, arg1, ctz64(arg2));
} else {
- TCGv_i64 t0 = tcg_const_i64(arg2);
- tcg_gen_mul_i64(ret, arg1, t0);
- tcg_temp_free_i64(t0);
+ tcg_gen_mul_i64(ret, arg1, tcg_constant_i64(arg2));
}
}
@@ -1961,9 +1960,7 @@
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
tcg_temp_free_i32(t);
} else {
- TCGv_i64 t0 = tcg_const_i64(arg2);
- tcg_gen_clz_i64(ret, arg1, t0);
- tcg_temp_free_i64(t0);
+ tcg_gen_clz_i64(ret, arg1, tcg_constant_i64(arg2));
}
}
@@ -2015,9 +2012,7 @@
tcg_gen_ctpop_i64(ret, t);
tcg_temp_free_i64(t);
} else {
- TCGv_i64 t0 = tcg_const_i64(arg2);
- tcg_gen_ctz_i64(ret, arg1, t0);
- tcg_temp_free_i64(t0);
+ tcg_gen_ctz_i64(ret, arg1, tcg_constant_i64(arg2));
}
}
diff --git a/tcg/tcg.c b/tcg/tcg.c
index e4fccbd..bb52bc0 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -60,6 +60,7 @@
#include "elf.h"
#include "exec/log.h"
#include "tcg/tcg-ldst.h"
+#include "tcg/tcg-temp-internal.h"
#include "tcg-internal.h"
#include "accel/tcg/perf.h"
@@ -1444,22 +1445,6 @@
return tcg_constant_vec(t->base_type, vece, val);
}
-TCGv_i32 tcg_const_i32(int32_t val)
-{
- TCGv_i32 t0;
- t0 = tcg_temp_new_i32();
- tcg_gen_movi_i32(t0, val);
- return t0;
-}
-
-TCGv_i64 tcg_const_i64(int64_t val)
-{
- TCGv_i64 t0;
- t0 = tcg_temp_new_i64();
- tcg_gen_movi_i64(t0, val);
- return t0;
-}
-
/* Return true if OP may appear in the opcode stream.
Test the runtime variable that controls each opcode. */
bool tcg_op_supported(TCGOpcode op)