target/arm: Avoid tcg_const_ptr in handle_vec_simd_sqshrn
It is easy enough to use mov instead of or-with-zero
and relying on the optimizer to fold away the or.
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index 2ad7c48..082a8b8 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -8459,7 +8459,7 @@
tcg_rn = tcg_temp_new_i64();
tcg_rd = tcg_temp_new_i64();
tcg_rd_narrowed = tcg_temp_new_i32();
- tcg_final = tcg_const_i64(0);
+ tcg_final = tcg_temp_new_i64();
if (round) {
tcg_round = tcg_constant_i64(1ULL << (shift - 1));
@@ -8473,7 +8473,11 @@
false, is_u_shift, size+1, shift);
narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd);
tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed);
- tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
+ if (i == 0) {
+ tcg_gen_mov_i64(tcg_final, tcg_rd);
+ } else {
+ tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
+ }
}
if (!is_q) {