target/riscv: Remove redundunt check for zve32f and zve64f
Require_zve32/64f have been overlapped by require_rvf/require_scale_rvf.
Signed-off-by: Weiwei Li <liweiwei@iscas.ac.cn>
Signed-off-by: Junqiang Wang <wangjunqiang@iscas.ac.cn>
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Message-ID: <20230215020539.4788-11-liweiwei@iscas.ac.cn>
[Palmer: commit text]
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
index 9b2711b..9053759 100644
--- a/target/riscv/insn_trans/trans_rvv.c.inc
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
@@ -66,50 +66,6 @@
}
}
-static bool require_zve32f(DisasContext *s)
-{
- /* RVV + Zve32f = RVV. */
- if (has_ext(s, RVV)) {
- return true;
- }
-
- /* Zve32f doesn't support FP64. (Section 18.2) */
- return s->cfg_ptr->ext_zve32f ? s->sew <= MO_32 : true;
-}
-
-static bool require_scale_zve32f(DisasContext *s)
-{
- /* RVV + Zve32f = RVV. */
- if (has_ext(s, RVV)) {
- return true;
- }
-
- /* Zve32f doesn't support FP64. (Section 18.2) */
- return s->cfg_ptr->ext_zve64f ? s->sew <= MO_16 : true;
-}
-
-static bool require_zve64f(DisasContext *s)
-{
- /* RVV + Zve64f = RVV. */
- if (has_ext(s, RVV)) {
- return true;
- }
-
- /* Zve64f doesn't support FP64. (Section 18.2) */
- return s->cfg_ptr->ext_zve64f ? s->sew <= MO_32 : true;
-}
-
-static bool require_scale_zve64f(DisasContext *s)
-{
- /* RVV + Zve64f = RVV. */
- if (has_ext(s, RVV)) {
- return true;
- }
-
- /* Zve64f doesn't support FP64. (Section 18.2) */
- return s->cfg_ptr->ext_zve64f ? s->sew <= MO_16 : true;
-}
-
/* Destination vector register group cannot overlap source mask register. */
static bool require_vm(int vm, int vd)
{
@@ -2331,9 +2287,7 @@
return require_rvv(s) &&
require_rvf(s) &&
vext_check_isa_ill(s) &&
- vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm) &&
- require_zve32f(s) &&
- require_zve64f(s);
+ vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
}
/* OPFVV without GVEC IR */
@@ -2421,9 +2375,7 @@
return require_rvv(s) &&
require_rvf(s) &&
vext_check_isa_ill(s) &&
- vext_check_ss(s, a->rd, a->rs2, a->vm) &&
- require_zve32f(s) &&
- require_zve64f(s);
+ vext_check_ss(s, a->rd, a->rs2, a->vm);
}
/* OPFVF without GVEC IR */
@@ -2461,9 +2413,7 @@
require_scale_rvf(s) &&
(s->sew != MO_8) &&
vext_check_isa_ill(s) &&
- vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm) &&
- require_scale_zve32f(s) &&
- require_scale_zve64f(s);
+ vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
}
/* OPFVV with WIDEN */
@@ -2506,9 +2456,7 @@
require_scale_rvf(s) &&
(s->sew != MO_8) &&
vext_check_isa_ill(s) &&
- vext_check_ds(s, a->rd, a->rs2, a->vm) &&
- require_scale_zve32f(s) &&
- require_scale_zve64f(s);
+ vext_check_ds(s, a->rd, a->rs2, a->vm);
}
/* OPFVF with WIDEN */
@@ -2540,9 +2488,7 @@
require_scale_rvf(s) &&
(s->sew != MO_8) &&
vext_check_isa_ill(s) &&
- vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm) &&
- require_scale_zve32f(s) &&
- require_scale_zve64f(s);
+ vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm);
}
/* WIDEN OPFVV with WIDEN */
@@ -2585,9 +2531,7 @@
require_scale_rvf(s) &&
(s->sew != MO_8) &&
vext_check_isa_ill(s) &&
- vext_check_dd(s, a->rd, a->rs2, a->vm) &&
- require_scale_zve32f(s) &&
- require_scale_zve64f(s);
+ vext_check_dd(s, a->rd, a->rs2, a->vm);
}
/* WIDEN OPFVF with WIDEN */
@@ -2664,9 +2608,7 @@
require_rvf(s) &&
vext_check_isa_ill(s) &&
/* OPFV instructions ignore vs1 check */
- vext_check_ss(s, a->rd, a->rs2, a->vm) &&
- require_zve32f(s) &&
- require_zve64f(s);
+ vext_check_ss(s, a->rd, a->rs2, a->vm);
}
static bool do_opfv(DisasContext *s, arg_rmr *a,
@@ -2731,9 +2673,7 @@
return require_rvv(s) &&
require_rvf(s) &&
vext_check_isa_ill(s) &&
- vext_check_mss(s, a->rd, a->rs1, a->rs2) &&
- require_zve32f(s) &&
- require_zve64f(s);
+ vext_check_mss(s, a->rd, a->rs1, a->rs2);
}
GEN_OPFVV_TRANS(vmfeq_vv, opfvv_cmp_check)
@@ -2746,9 +2686,7 @@
return require_rvv(s) &&
require_rvf(s) &&
vext_check_isa_ill(s) &&
- vext_check_ms(s, a->rd, a->rs2) &&
- require_zve32f(s) &&
- require_zve64f(s);
+ vext_check_ms(s, a->rd, a->rs2);
}
GEN_OPFVF_TRANS(vmfeq_vf, opfvf_cmp_check)
@@ -2769,9 +2707,7 @@
if (require_rvv(s) &&
require_rvf(s) &&
vext_check_isa_ill(s) &&
- require_align(a->rd, s->lmul) &&
- require_zve32f(s) &&
- require_zve64f(s)) {
+ require_align(a->rd, s->lmul)) {
gen_set_rm(s, RISCV_FRM_DYN);
TCGv_i64 t1;
@@ -2856,18 +2792,14 @@
static bool opxfv_widen_check(DisasContext *s, arg_rmr *a)
{
return opfv_widen_check(s, a) &&
- require_rvf(s) &&
- require_zve32f(s) &&
- require_zve64f(s);
+ require_rvf(s);
}
static bool opffv_widen_check(DisasContext *s, arg_rmr *a)
{
return opfv_widen_check(s, a) &&
require_scale_rvf(s) &&
- (s->sew != MO_8) &&
- require_scale_zve32f(s) &&
- require_scale_zve64f(s);
+ (s->sew != MO_8);
}
#define GEN_OPFV_WIDEN_TRANS(NAME, CHECK, HELPER, FRM) \
@@ -2918,9 +2850,7 @@
require_scale_rvf(s) &&
vext_check_isa_ill(s) &&
/* OPFV widening instructions ignore vs1 check */
- vext_check_ds(s, a->rd, a->rs2, a->vm) &&
- require_scale_zve32f(s) &&
- require_scale_zve64f(s);
+ vext_check_ds(s, a->rd, a->rs2, a->vm);
}
#define GEN_OPFXV_WIDEN_TRANS(NAME) \
@@ -2975,18 +2905,14 @@
{
return opfv_narrow_check(s, a) &&
require_rvf(s) &&
- (s->sew != MO_64) &&
- require_zve32f(s) &&
- require_zve64f(s);
+ (s->sew != MO_64);
}
static bool opffv_narrow_check(DisasContext *s, arg_rmr *a)
{
return opfv_narrow_check(s, a) &&
require_scale_rvf(s) &&
- (s->sew != MO_8) &&
- require_scale_zve32f(s) &&
- require_scale_zve64f(s);
+ (s->sew != MO_8);
}
#define GEN_OPFV_NARROW_TRANS(NAME, CHECK, HELPER, FRM) \
@@ -3035,9 +2961,7 @@
require_scale_rvf(s) &&
vext_check_isa_ill(s) &&
/* OPFV narrowing instructions ignore vs1 check */
- vext_check_sd(s, a->rd, a->rs2, a->vm) &&
- require_scale_zve32f(s) &&
- require_scale_zve64f(s);
+ vext_check_sd(s, a->rd, a->rs2, a->vm);
}
#define GEN_OPXFV_NARROW_TRANS(NAME, HELPER, FRM) \
@@ -3111,9 +3035,7 @@
static bool freduction_check(DisasContext *s, arg_rmrr *a)
{
return reduction_check(s, a) &&
- require_rvf(s) &&
- require_zve32f(s) &&
- require_zve64f(s);
+ require_rvf(s);
}
GEN_OPFVV_TRANS(vfredusum_vs, freduction_check)
@@ -3540,9 +3462,7 @@
{
if (require_rvv(s) &&
require_rvf(s) &&
- vext_check_isa_ill(s) &&
- require_zve32f(s) &&
- require_zve64f(s)) {
+ vext_check_isa_ill(s)) {
gen_set_rm(s, RISCV_FRM_DYN);
unsigned int ofs = (8 << s->sew);
@@ -3568,9 +3488,7 @@
{
if (require_rvv(s) &&
require_rvf(s) &&
- vext_check_isa_ill(s) &&
- require_zve32f(s) &&
- require_zve64f(s)) {
+ vext_check_isa_ill(s)) {
gen_set_rm(s, RISCV_FRM_DYN);
/* The instructions ignore LMUL and vector register group. */
@@ -3621,17 +3539,13 @@
static bool fslideup_check(DisasContext *s, arg_rmrr *a)
{
return slideup_check(s, a) &&
- require_rvf(s) &&
- require_zve32f(s) &&
- require_zve64f(s);
+ require_rvf(s);
}
static bool fslidedown_check(DisasContext *s, arg_rmrr *a)
{
return slidedown_check(s, a) &&
- require_rvf(s) &&
- require_zve32f(s) &&
- require_zve64f(s);
+ require_rvf(s);
}
GEN_OPFVF_TRANS(vfslide1up_vf, fslideup_check)