target/loongarch: Implement vhaddw/vhsubw
This patch includes:
- VHADDW.{H.B/W.H/D.W/Q.D/HU.BU/WU.HU/DU.WU/QU.DU};
- VHSUBW.{H.B/W.H/D.W/Q.D/HU.BU/WU.HU/DU.WU/QU.DU}.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Song Gao <gaosong@loongson.cn>
Message-Id: <20230504122810.4094787-9-gaosong@loongson.cn>
diff --git a/target/loongarch/disas.c b/target/loongarch/disas.c
index b7f9320..adfd693 100644
--- a/target/loongarch/disas.c
+++ b/target/loongarch/disas.c
@@ -848,3 +848,20 @@
INSN_LSX(vssub_hu, vvv)
INSN_LSX(vssub_wu, vvv)
INSN_LSX(vssub_du, vvv)
+
+INSN_LSX(vhaddw_h_b, vvv)
+INSN_LSX(vhaddw_w_h, vvv)
+INSN_LSX(vhaddw_d_w, vvv)
+INSN_LSX(vhaddw_q_d, vvv)
+INSN_LSX(vhaddw_hu_bu, vvv)
+INSN_LSX(vhaddw_wu_hu, vvv)
+INSN_LSX(vhaddw_du_wu, vvv)
+INSN_LSX(vhaddw_qu_du, vvv)
+INSN_LSX(vhsubw_h_b, vvv)
+INSN_LSX(vhsubw_w_h, vvv)
+INSN_LSX(vhsubw_d_w, vvv)
+INSN_LSX(vhsubw_q_d, vvv)
+INSN_LSX(vhsubw_hu_bu, vvv)
+INSN_LSX(vhsubw_wu_hu, vvv)
+INSN_LSX(vhsubw_du_wu, vvv)
+INSN_LSX(vhsubw_qu_du, vvv)
diff --git a/target/loongarch/helper.h b/target/loongarch/helper.h
index 9c01823..6d58dab 100644
--- a/target/loongarch/helper.h
+++ b/target/loongarch/helper.h
@@ -130,3 +130,21 @@
DEF_HELPER_1(ertn, void, env)
DEF_HELPER_1(idle, void, env)
#endif
+
+/* LoongArch LSX */
+DEF_HELPER_4(vhaddw_h_b, void, env, i32, i32, i32)
+DEF_HELPER_4(vhaddw_w_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vhaddw_d_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vhaddw_q_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vhaddw_hu_bu, void, env, i32, i32, i32)
+DEF_HELPER_4(vhaddw_wu_hu, void, env, i32, i32, i32)
+DEF_HELPER_4(vhaddw_du_wu, void, env, i32, i32, i32)
+DEF_HELPER_4(vhaddw_qu_du, void, env, i32, i32, i32)
+DEF_HELPER_4(vhsubw_h_b, void, env, i32, i32, i32)
+DEF_HELPER_4(vhsubw_w_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vhsubw_d_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vhsubw_q_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vhsubw_hu_bu, void, env, i32, i32, i32)
+DEF_HELPER_4(vhsubw_wu_hu, void, env, i32, i32, i32)
+DEF_HELPER_4(vhsubw_du_wu, void, env, i32, i32, i32)
+DEF_HELPER_4(vhsubw_qu_du, void, env, i32, i32, i32)
diff --git a/target/loongarch/insn_trans/trans_lsx.c.inc b/target/loongarch/insn_trans/trans_lsx.c.inc
index 082bd73..562096c 100644
--- a/target/loongarch/insn_trans/trans_lsx.c.inc
+++ b/target/loongarch/insn_trans/trans_lsx.c.inc
@@ -157,3 +157,20 @@
TRANS(vssub_hu, gvec_vvv, MO_16, tcg_gen_gvec_ussub)
TRANS(vssub_wu, gvec_vvv, MO_32, tcg_gen_gvec_ussub)
TRANS(vssub_du, gvec_vvv, MO_64, tcg_gen_gvec_ussub)
+
+TRANS(vhaddw_h_b, gen_vvv, gen_helper_vhaddw_h_b)
+TRANS(vhaddw_w_h, gen_vvv, gen_helper_vhaddw_w_h)
+TRANS(vhaddw_d_w, gen_vvv, gen_helper_vhaddw_d_w)
+TRANS(vhaddw_q_d, gen_vvv, gen_helper_vhaddw_q_d)
+TRANS(vhaddw_hu_bu, gen_vvv, gen_helper_vhaddw_hu_bu)
+TRANS(vhaddw_wu_hu, gen_vvv, gen_helper_vhaddw_wu_hu)
+TRANS(vhaddw_du_wu, gen_vvv, gen_helper_vhaddw_du_wu)
+TRANS(vhaddw_qu_du, gen_vvv, gen_helper_vhaddw_qu_du)
+TRANS(vhsubw_h_b, gen_vvv, gen_helper_vhsubw_h_b)
+TRANS(vhsubw_w_h, gen_vvv, gen_helper_vhsubw_w_h)
+TRANS(vhsubw_d_w, gen_vvv, gen_helper_vhsubw_d_w)
+TRANS(vhsubw_q_d, gen_vvv, gen_helper_vhsubw_q_d)
+TRANS(vhsubw_hu_bu, gen_vvv, gen_helper_vhsubw_hu_bu)
+TRANS(vhsubw_wu_hu, gen_vvv, gen_helper_vhsubw_wu_hu)
+TRANS(vhsubw_du_wu, gen_vvv, gen_helper_vhsubw_du_wu)
+TRANS(vhsubw_qu_du, gen_vvv, gen_helper_vhsubw_qu_du)
diff --git a/target/loongarch/insns.decode b/target/loongarch/insns.decode
index 3a29f0a..10a2085 100644
--- a/target/loongarch/insns.decode
+++ b/target/loongarch/insns.decode
@@ -542,3 +542,20 @@
vssub_hu 0111 00000100 11001 ..... ..... ..... @vvv
vssub_wu 0111 00000100 11010 ..... ..... ..... @vvv
vssub_du 0111 00000100 11011 ..... ..... ..... @vvv
+
+vhaddw_h_b 0111 00000101 01000 ..... ..... ..... @vvv
+vhaddw_w_h 0111 00000101 01001 ..... ..... ..... @vvv
+vhaddw_d_w 0111 00000101 01010 ..... ..... ..... @vvv
+vhaddw_q_d 0111 00000101 01011 ..... ..... ..... @vvv
+vhaddw_hu_bu 0111 00000101 10000 ..... ..... ..... @vvv
+vhaddw_wu_hu 0111 00000101 10001 ..... ..... ..... @vvv
+vhaddw_du_wu 0111 00000101 10010 ..... ..... ..... @vvv
+vhaddw_qu_du 0111 00000101 10011 ..... ..... ..... @vvv
+vhsubw_h_b 0111 00000101 01100 ..... ..... ..... @vvv
+vhsubw_w_h 0111 00000101 01101 ..... ..... ..... @vvv
+vhsubw_d_w 0111 00000101 01110 ..... ..... ..... @vvv
+vhsubw_q_d 0111 00000101 01111 ..... ..... ..... @vvv
+vhsubw_hu_bu 0111 00000101 10100 ..... ..... ..... @vvv
+vhsubw_wu_hu 0111 00000101 10101 ..... ..... ..... @vvv
+vhsubw_du_wu 0111 00000101 10110 ..... ..... ..... @vvv
+vhsubw_qu_du 0111 00000101 10111 ..... ..... ..... @vvv
diff --git a/target/loongarch/lsx_helper.c b/target/loongarch/lsx_helper.c
index 9332163..7088b3e 100644
--- a/target/loongarch/lsx_helper.c
+++ b/target/loongarch/lsx_helper.c
@@ -4,3 +4,84 @@
*
* Copyright (c) 2022-2023 Loongson Technology Corporation Limited
*/
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+
+#define DO_ADD(a, b) (a + b)
+#define DO_SUB(a, b) (a - b)
+
+#define DO_ODD_EVEN(NAME, BIT, E1, E2, DO_OP) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ typedef __typeof(Vd->E1(0)) TD; \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = DO_OP((TD)Vj->E2(2 * i + 1), (TD)Vk->E2(2 * i)); \
+ } \
+}
+
+DO_ODD_EVEN(vhaddw_h_b, 16, H, B, DO_ADD)
+DO_ODD_EVEN(vhaddw_w_h, 32, W, H, DO_ADD)
+DO_ODD_EVEN(vhaddw_d_w, 64, D, W, DO_ADD)
+
+void HELPER(vhaddw_q_d)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t vk)
+{
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+ VReg *Vk = &(env->fpr[vk].vreg);
+
+ Vd->Q(0) = int128_add(int128_makes64(Vj->D(1)), int128_makes64(Vk->D(0)));
+}
+
+DO_ODD_EVEN(vhsubw_h_b, 16, H, B, DO_SUB)
+DO_ODD_EVEN(vhsubw_w_h, 32, W, H, DO_SUB)
+DO_ODD_EVEN(vhsubw_d_w, 64, D, W, DO_SUB)
+
+void HELPER(vhsubw_q_d)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t vk)
+{
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+ VReg *Vk = &(env->fpr[vk].vreg);
+
+ Vd->Q(0) = int128_sub(int128_makes64(Vj->D(1)), int128_makes64(Vk->D(0)));
+}
+
+DO_ODD_EVEN(vhaddw_hu_bu, 16, UH, UB, DO_ADD)
+DO_ODD_EVEN(vhaddw_wu_hu, 32, UW, UH, DO_ADD)
+DO_ODD_EVEN(vhaddw_du_wu, 64, UD, UW, DO_ADD)
+
+void HELPER(vhaddw_qu_du)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t vk)
+{
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+ VReg *Vk = &(env->fpr[vk].vreg);
+
+ Vd->Q(0) = int128_add(int128_make64((uint64_t)Vj->D(1)),
+ int128_make64((uint64_t)Vk->D(0)));
+}
+
+DO_ODD_EVEN(vhsubw_hu_bu, 16, UH, UB, DO_SUB)
+DO_ODD_EVEN(vhsubw_wu_hu, 32, UW, UH, DO_SUB)
+DO_ODD_EVEN(vhsubw_du_wu, 64, UD, UW, DO_SUB)
+
+void HELPER(vhsubw_qu_du)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t vk)
+{
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+ VReg *Vk = &(env->fpr[vk].vreg);
+
+ Vd->Q(0) = int128_sub(int128_make64((uint64_t)Vj->D(1)),
+ int128_make64((uint64_t)Vk->D(0)));
+}