Merge patch series "target/riscv: some vector_helper.c cleanups"
Daniel Henrique Barboza <dbarboza@ventanamicro.com> says:
This is a re-send of patch 1, which is already reviewed, with a
follow-up that uses riscv_cpu_cfg() in the remaining of the file. This
was suggested by Weiwei Li in the "[PATCH 0/4] RISCVCPUConfig related
cleanups" review. Patch 1 makes the work of patch 2 easier since it
eliminated some uses of env_archcpu() we want to avoid.
* b4-shazam-merge:
target/riscv/vector_helper.c: avoid env_archcpu() when reading RISCVCPUConfig
target/riscv/vector_helper.c: create vext_set_tail_elems_1s()
Message-ID: <20230226170514.588071-1-dbarboza@ventanamicro.com>
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
diff --git a/hw/riscv/sifive_u.c b/hw/riscv/sifive_u.c
index ad3bb35..35a335b 100644
--- a/hw/riscv/sifive_u.c
+++ b/hw/riscv/sifive_u.c
@@ -99,7 +99,7 @@
MachineState *ms = MACHINE(s);
uint64_t mem_size = ms->ram_size;
void *fdt;
- int cpu, fdt_size;
+ int cpu;
uint32_t *cells;
char *nodename;
uint32_t plic_phandle, prci_phandle, gpio_phandle, phandle = 1;
@@ -112,18 +112,10 @@
"sifive,plic-1.0.0", "riscv,plic0"
};
- if (ms->dtb) {
- fdt = ms->fdt = load_device_tree(ms->dtb, &fdt_size);
- if (!fdt) {
- error_report("load_device_tree() failed");
- exit(1);
- }
- } else {
- fdt = ms->fdt = create_device_tree(&fdt_size);
- if (!fdt) {
- error_report("create_device_tree() failed");
- exit(1);
- }
+ fdt = ms->fdt = create_device_tree(&s->fdt_size);
+ if (!fdt) {
+ error_report("create_device_tree() failed");
+ exit(1);
}
qemu_fdt_setprop_string(fdt, "/", "model", "SiFive HiFive Unleashed A00");
@@ -560,8 +552,16 @@
qdev_connect_gpio_out(DEVICE(&(s->soc.gpio)), 10,
qemu_allocate_irq(sifive_u_machine_reset, NULL, 0));
- /* create device tree */
- create_fdt(s, memmap, riscv_is_32bit(&s->soc.u_cpus));
+ /* load/create device tree */
+ if (machine->dtb) {
+ machine->fdt = load_device_tree(machine->dtb, &s->fdt_size);
+ if (!machine->fdt) {
+ error_report("load_device_tree() failed");
+ exit(1);
+ }
+ } else {
+ create_fdt(s, memmap, riscv_is_32bit(&s->soc.u_cpus));
+ }
if (s->start_in_flash) {
/*
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
index 86c4adc..4f81918 100644
--- a/hw/riscv/virt.c
+++ b/hw/riscv/virt.c
@@ -232,20 +232,21 @@
bool is_32_bit = riscv_is_32bit(&s->soc[0]);
for (cpu = s->soc[socket].num_harts - 1; cpu >= 0; cpu--) {
+ RISCVCPU *cpu_ptr = &s->soc[socket].harts[cpu];
+
cpu_phandle = (*phandle)++;
cpu_name = g_strdup_printf("/cpus/cpu@%d",
s->soc[socket].hartid_base + cpu);
qemu_fdt_add_subnode(ms->fdt, cpu_name);
- if (riscv_feature(&s->soc[socket].harts[cpu].env,
- RISCV_FEATURE_MMU)) {
+ if (cpu_ptr->cfg.mmu) {
qemu_fdt_setprop_string(ms->fdt, cpu_name, "mmu-type",
(is_32_bit) ? "riscv,sv32" : "riscv,sv48");
} else {
qemu_fdt_setprop_string(ms->fdt, cpu_name, "mmu-type",
"riscv,none");
}
- name = riscv_isa_string(&s->soc[socket].harts[cpu]);
+ name = riscv_isa_string(cpu_ptr);
qemu_fdt_setprop_string(ms->fdt, cpu_name, "riscv,isa", name);
g_free(name);
qemu_fdt_setprop_string(ms->fdt, cpu_name, "compatible", "riscv");
@@ -1008,18 +1009,10 @@
uint32_t irq_pcie_phandle = 1, irq_virtio_phandle = 1;
uint8_t rng_seed[32];
- if (ms->dtb) {
- ms->fdt = load_device_tree(ms->dtb, &s->fdt_size);
- if (!ms->fdt) {
- error_report("load_device_tree() failed");
- exit(1);
- }
- } else {
- ms->fdt = create_device_tree(&s->fdt_size);
- if (!ms->fdt) {
- error_report("create_device_tree() failed");
- exit(1);
- }
+ ms->fdt = create_device_tree(&s->fdt_size);
+ if (!ms->fdt) {
+ error_report("create_device_tree() failed");
+ exit(1);
}
qemu_fdt_setprop_string(ms->fdt, "/", "model", "riscv-virtio,qemu");
@@ -1504,8 +1497,16 @@
}
virt_flash_map(s, system_memory);
- /* create device tree */
- create_fdt(s, memmap);
+ /* load/create device tree */
+ if (machine->dtb) {
+ machine->fdt = load_device_tree(machine->dtb, &s->fdt_size);
+ if (!machine->fdt) {
+ error_report("load_device_tree() failed");
+ exit(1);
+ }
+ } else {
+ create_fdt(s, memmap);
+ }
s->machine_done.notify = virt_machine_done;
qemu_add_machine_init_done_notifier(&s->machine_done);
diff --git a/include/hw/riscv/sifive_u.h b/include/hw/riscv/sifive_u.h
index 65af306..0696f85 100644
--- a/include/hw/riscv/sifive_u.h
+++ b/include/hw/riscv/sifive_u.h
@@ -68,6 +68,7 @@
/*< public >*/
SiFiveUSoCState soc;
+ int fdt_size;
bool start_in_flash;
uint32_t msel;
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index 93b52b8..cac9f1d 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -74,6 +74,7 @@
static const struct isa_ext_data isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(h, false, PRIV_VERSION_1_12_0, ext_h),
ISA_EXT_DATA_ENTRY(v, false, PRIV_VERSION_1_10_0, ext_v),
+ ISA_EXT_DATA_ENTRY(zicond, true, PRIV_VERSION_1_12_0, ext_zicond),
ISA_EXT_DATA_ENTRY(zicsr, true, PRIV_VERSION_1_10_0, ext_icsr),
ISA_EXT_DATA_ENTRY(zifencei, true, PRIV_VERSION_1_10_0, ext_ifencei),
ISA_EXT_DATA_ENTRY(zihintpause, true, PRIV_VERSION_1_10_0, ext_zihintpause),
@@ -101,12 +102,16 @@
ISA_EXT_DATA_ENTRY(zkt, true, PRIV_VERSION_1_12_0, ext_zkt),
ISA_EXT_DATA_ENTRY(zve32f, true, PRIV_VERSION_1_12_0, ext_zve32f),
ISA_EXT_DATA_ENTRY(zve64f, true, PRIV_VERSION_1_12_0, ext_zve64f),
+ ISA_EXT_DATA_ENTRY(zve64d, true, PRIV_VERSION_1_12_0, ext_zve64d),
+ ISA_EXT_DATA_ENTRY(zvfh, true, PRIV_VERSION_1_12_0, ext_zvfh),
+ ISA_EXT_DATA_ENTRY(zvfhmin, true, PRIV_VERSION_1_12_0, ext_zvfhmin),
ISA_EXT_DATA_ENTRY(zhinx, true, PRIV_VERSION_1_12_0, ext_zhinx),
ISA_EXT_DATA_ENTRY(zhinxmin, true, PRIV_VERSION_1_12_0, ext_zhinxmin),
ISA_EXT_DATA_ENTRY(smaia, true, PRIV_VERSION_1_12_0, ext_smaia),
ISA_EXT_DATA_ENTRY(ssaia, true, PRIV_VERSION_1_12_0, ext_ssaia),
ISA_EXT_DATA_ENTRY(sscofpmf, true, PRIV_VERSION_1_12_0, ext_sscofpmf),
ISA_EXT_DATA_ENTRY(sstc, true, PRIV_VERSION_1_12_0, ext_sstc),
+ ISA_EXT_DATA_ENTRY(svadu, true, PRIV_VERSION_1_12_0, ext_svadu),
ISA_EXT_DATA_ENTRY(svinval, true, PRIV_VERSION_1_12_0, ext_svinval),
ISA_EXT_DATA_ENTRY(svnapot, true, PRIV_VERSION_1_12_0, ext_svnapot),
ISA_EXT_DATA_ENTRY(svpbmt, true, PRIV_VERSION_1_12_0, ext_svpbmt),
@@ -613,6 +618,11 @@
env->bins = 0;
env->two_stage_lookup = false;
+ env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
+ (cpu->cfg.ext_svadu ? MENVCFG_HADE : 0);
+ env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) |
+ (cpu->cfg.ext_svadu ? HENVCFG_HADE : 0);
+
/* Initialized default priorities of local interrupts. */
for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
iprio = riscv_cpu_default_priority(i);
@@ -637,7 +647,7 @@
set_default_nan_mode(1, &env->fp_status);
#ifndef CONFIG_USER_ONLY
- if (riscv_feature(env, RISCV_FEATURE_DEBUG)) {
+ if (cpu->cfg.debug) {
riscv_trigger_init(env);
}
@@ -729,7 +739,11 @@
return;
}
- if ((cpu->cfg.ext_zfh || cpu->cfg.ext_zfhmin) && !cpu->cfg.ext_f) {
+ if (cpu->cfg.ext_zfh) {
+ cpu->cfg.ext_zfhmin = true;
+ }
+
+ if (cpu->cfg.ext_zfhmin && !cpu->cfg.ext_f) {
error_setg(errp, "Zfh/Zfhmin extensions require F extension");
return;
}
@@ -739,19 +753,51 @@
return;
}
- if (cpu->cfg.ext_v && !cpu->cfg.ext_d) {
- error_setg(errp, "V extension requires D extension");
+ /* The V vector extension depends on the Zve64d extension */
+ if (cpu->cfg.ext_v) {
+ cpu->cfg.ext_zve64d = true;
+ }
+
+ /* The Zve64d extension depends on the Zve64f extension */
+ if (cpu->cfg.ext_zve64d) {
+ cpu->cfg.ext_zve64f = true;
+ }
+
+ /* The Zve64f extension depends on the Zve32f extension */
+ if (cpu->cfg.ext_zve64f) {
+ cpu->cfg.ext_zve32f = true;
+ }
+
+ if (cpu->cfg.ext_zve64d && !cpu->cfg.ext_d) {
+ error_setg(errp, "Zve64d/V extensions require D extension");
return;
}
- if ((cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) && !cpu->cfg.ext_f) {
+ if (cpu->cfg.ext_zve32f && !cpu->cfg.ext_f) {
error_setg(errp, "Zve32f/Zve64f extensions require F extension");
return;
}
+ if (cpu->cfg.ext_zvfh) {
+ cpu->cfg.ext_zvfhmin = true;
+ }
+
+ if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) {
+ error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension");
+ return;
+ }
+
+ if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) {
+ error_setg(errp, "Zvfh extensions requires Zfhmin extension");
+ return;
+ }
+
/* Set the ISA extensions, checks should have happened above */
- if (cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinx ||
- cpu->cfg.ext_zhinxmin) {
+ if (cpu->cfg.ext_zhinx) {
+ cpu->cfg.ext_zhinxmin = true;
+ }
+
+ if (cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) {
cpu->cfg.ext_zfinx = true;
}
@@ -762,7 +808,7 @@
}
if (cpu->cfg.ext_f) {
error_setg(errp,
- "Zfinx cannot be supported together with F extension");
+ "Zfinx cannot be supported together with F extension");
return;
}
}
@@ -825,40 +871,40 @@
ext |= RVV;
if (!is_power_of_2(cpu->cfg.vlen)) {
error_setg(errp,
- "Vector extension VLEN must be power of 2");
+ "Vector extension VLEN must be power of 2");
return;
}
if (cpu->cfg.vlen > RV_VLEN_MAX || cpu->cfg.vlen < 128) {
error_setg(errp,
- "Vector extension implementation only supports VLEN "
- "in the range [128, %d]", RV_VLEN_MAX);
+ "Vector extension implementation only supports VLEN "
+ "in the range [128, %d]", RV_VLEN_MAX);
return;
}
if (!is_power_of_2(cpu->cfg.elen)) {
error_setg(errp,
- "Vector extension ELEN must be power of 2");
+ "Vector extension ELEN must be power of 2");
return;
}
- if (cpu->cfg.elen > 64 || cpu->cfg.elen < 8) {
- error_setg(errp,
- "Vector extension implementation only supports ELEN "
- "in the range [8, 64]");
- return;
- }
- if (cpu->cfg.vext_spec) {
- if (!g_strcmp0(cpu->cfg.vext_spec, "v1.0")) {
- vext_version = VEXT_VERSION_1_00_0;
- } else {
+ if (cpu->cfg.elen > 64 || cpu->cfg.elen < 8) {
error_setg(errp,
- "Unsupported vector spec version '%s'",
- cpu->cfg.vext_spec);
+ "Vector extension implementation only supports ELEN "
+ "in the range [8, 64]");
return;
}
- } else {
- qemu_log("vector version is not specified, "
- "use the default value v1.0\n");
- }
- set_vext_version(env, vext_version);
+ if (cpu->cfg.vext_spec) {
+ if (!g_strcmp0(cpu->cfg.vext_spec, "v1.0")) {
+ vext_version = VEXT_VERSION_1_00_0;
+ } else {
+ error_setg(errp,
+ "Unsupported vector spec version '%s'",
+ cpu->cfg.vext_spec);
+ return;
+ }
+ } else {
+ qemu_log("vector version is not specified, "
+ "use the default value v1.0\n");
+ }
+ set_vext_version(env, vext_version);
}
if (cpu->cfg.ext_j) {
ext |= RVJ;
@@ -919,24 +965,13 @@
}
}
- if (cpu->cfg.mmu) {
- riscv_set_feature(env, RISCV_FEATURE_MMU);
- }
-
- if (cpu->cfg.pmp) {
- riscv_set_feature(env, RISCV_FEATURE_PMP);
-
+ if (cpu->cfg.epmp && !cpu->cfg.pmp) {
/*
* Enhanced PMP should only be available
* on harts with PMP support
*/
- if (cpu->cfg.epmp) {
- riscv_set_feature(env, RISCV_FEATURE_EPMP);
- }
- }
-
- if (cpu->cfg.debug) {
- riscv_set_feature(env, RISCV_FEATURE_DEBUG);
+ error_setg(errp, "Invalid configuration: EPMP requires PMP support");
+ return;
}
@@ -1090,6 +1125,7 @@
DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false),
DEFINE_PROP_BOOL("Zve32f", RISCVCPU, cfg.ext_zve32f, false),
DEFINE_PROP_BOOL("Zve64f", RISCVCPU, cfg.ext_zve64f, false),
+ DEFINE_PROP_BOOL("Zve64d", RISCVCPU, cfg.ext_zve64d, false),
DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true),
DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true),
DEFINE_PROP_BOOL("sstc", RISCVCPU, cfg.ext_sstc, true),
@@ -1099,6 +1135,8 @@
DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128),
DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64),
+ DEFINE_PROP_BOOL("svadu", RISCVCPU, cfg.ext_svadu, true),
+
DEFINE_PROP_BOOL("svinval", RISCVCPU, cfg.ext_svinval, false),
DEFINE_PROP_BOOL("svnapot", RISCVCPU, cfg.ext_svnapot, false),
DEFINE_PROP_BOOL("svpbmt", RISCVCPU, cfg.ext_svpbmt, false),
@@ -1143,12 +1181,16 @@
DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false),
/* These are experimental so mark with 'x-' */
+ DEFINE_PROP_BOOL("x-zicond", RISCVCPU, cfg.ext_zicond, false),
DEFINE_PROP_BOOL("x-j", RISCVCPU, cfg.ext_j, false),
/* ePMP 0.9.3 */
DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false),
DEFINE_PROP_BOOL("x-smaia", RISCVCPU, cfg.ext_smaia, false),
DEFINE_PROP_BOOL("x-ssaia", RISCVCPU, cfg.ext_ssaia, false),
+ DEFINE_PROP_BOOL("x-zvfh", RISCVCPU, cfg.ext_zvfh, false),
+ DEFINE_PROP_BOOL("x-zvfhmin", RISCVCPU, cfg.ext_zvfhmin, false),
+
DEFINE_PROP_END_OF_LIST(),
};
@@ -1210,6 +1252,12 @@
DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
+
+ /*
+ * write_misa() is marked as experimental for now so mark
+ * it with -x and default to 'false'.
+ */
+ DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index 7ee22cb..665b4c6 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -81,17 +81,6 @@
#define RVH RV('H')
#define RVJ RV('J')
-/* S extension denotes that Supervisor mode exists, however it is possible
- to have a core that support S mode but does not have an MMU and there
- is currently no bit in misa to indicate whether an MMU exists or not
- so a cpu features bitfield is required, likewise for optional PMP support */
-enum {
- RISCV_FEATURE_MMU,
- RISCV_FEATURE_PMP,
- RISCV_FEATURE_EPMP,
- RISCV_FEATURE_MISA,
- RISCV_FEATURE_DEBUG
-};
/* Privileged specification version */
enum {
@@ -186,8 +175,6 @@
/* 128-bit helpers upper part return value */
target_ulong retxh;
- uint32_t features;
-
#ifdef CONFIG_USER_ONLY
uint32_t elf_flags;
#endif
@@ -447,9 +434,11 @@
bool ext_zkt;
bool ext_ifencei;
bool ext_icsr;
+ bool ext_zicond;
bool ext_zihintpause;
bool ext_smstateen;
bool ext_sstc;
+ bool ext_svadu;
bool ext_svinval;
bool ext_svnapot;
bool ext_svpbmt;
@@ -462,7 +451,10 @@
bool ext_zhinxmin;
bool ext_zve32f;
bool ext_zve64f;
+ bool ext_zve64d;
bool ext_zmmul;
+ bool ext_zvfh;
+ bool ext_zvfhmin;
bool ext_smaia;
bool ext_ssaia;
bool ext_sscofpmf;
@@ -498,6 +490,7 @@
bool pmp;
bool epmp;
bool debug;
+ bool misa_w;
bool short_isa_string;
};
@@ -535,16 +528,6 @@
return (env->misa_ext & ext) != 0;
}
-static inline bool riscv_feature(CPURISCVState *env, int feature)
-{
- return env->features & (1ULL << feature);
-}
-
-static inline void riscv_set_feature(CPURISCVState *env, int feature)
-{
- env->features |= (1ULL << feature);
-}
-
#include "cpu_user.h"
extern const char * const riscv_int_regnames[];
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
index 8b0d7e2..fca7ef0 100644
--- a/target/riscv/cpu_bits.h
+++ b/target/riscv/cpu_bits.h
@@ -747,10 +747,12 @@
#define MENVCFG_CBIE (3UL << 4)
#define MENVCFG_CBCFE BIT(6)
#define MENVCFG_CBZE BIT(7)
+#define MENVCFG_HADE (1ULL << 61)
#define MENVCFG_PBMTE (1ULL << 62)
#define MENVCFG_STCE (1ULL << 63)
/* For RV32 */
+#define MENVCFGH_HADE BIT(29)
#define MENVCFGH_PBMTE BIT(30)
#define MENVCFGH_STCE BIT(31)
@@ -763,10 +765,12 @@
#define HENVCFG_CBIE MENVCFG_CBIE
#define HENVCFG_CBCFE MENVCFG_CBCFE
#define HENVCFG_CBZE MENVCFG_CBZE
+#define HENVCFG_HADE MENVCFG_HADE
#define HENVCFG_PBMTE MENVCFG_PBMTE
#define HENVCFG_STCE MENVCFG_STCE
/* For RV32 */
+#define HENVCFGH_HADE MENVCFGH_HADE
#define HENVCFGH_PBMTE MENVCFGH_PBMTE
#define HENVCFGH_STCE MENVCFGH_STCE
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index 3a9472a..f88c503 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -51,7 +51,7 @@
*pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc;
*cs_base = 0;
- if (riscv_has_ext(env, RVV) || cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) {
+ if (cpu->cfg.ext_zve32f) {
/*
* If env->vl equals to VLMAX, we can use generic vector operation
* expanders (GVEC) to accerlate the vector operations.
@@ -105,7 +105,7 @@
flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_VS,
get_field(env->mstatus_hs, MSTATUS_VS));
}
- if (riscv_feature(env, RISCV_FEATURE_DEBUG) && !icount_enabled()) {
+ if (cpu->cfg.debug && !icount_enabled()) {
flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled);
}
#endif
@@ -706,7 +706,7 @@
pmp_priv_t pmp_priv;
int pmp_index = -1;
- if (!riscv_feature(env, RISCV_FEATURE_PMP)) {
+ if (!riscv_cpu_cfg(env)->pmp) {
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
return TRANSLATE_SUCCESS;
}
@@ -796,7 +796,7 @@
mode = PRV_U;
}
- if (mode == PRV_M || !riscv_feature(env, RISCV_FEATURE_MMU)) {
+ if (mode == PRV_M || !riscv_cpu_cfg(env)->mmu) {
*physical = addr;
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
return TRANSLATE_SUCCESS;
@@ -936,9 +936,17 @@
return TRANSLATE_FAIL;
}
+ bool pbmte = env->menvcfg & MENVCFG_PBMTE;
+ bool hade = env->menvcfg & MENVCFG_HADE;
+
+ if (first_stage && two_stage && riscv_cpu_virt_enabled(env)) {
+ pbmte = pbmte && (env->henvcfg & HENVCFG_PBMTE);
+ hade = hade && (env->henvcfg & HENVCFG_HADE);
+ }
+
if (riscv_cpu_sxl(env) == MXL_RV32) {
ppn = pte >> PTE_PPN_SHIFT;
- } else if (cpu->cfg.ext_svpbmt || cpu->cfg.ext_svnapot) {
+ } else if (pbmte || cpu->cfg.ext_svnapot) {
ppn = (pte & (target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT;
} else {
ppn = pte >> PTE_PPN_SHIFT;
@@ -950,7 +958,7 @@
if (!(pte & PTE_V)) {
/* Invalid PTE */
return TRANSLATE_FAIL;
- } else if (!cpu->cfg.ext_svpbmt && (pte & PTE_PBMT)) {
+ } else if (!pbmte && (pte & PTE_PBMT)) {
return TRANSLATE_FAIL;
} else if (!(pte & (PTE_R | PTE_W | PTE_X))) {
/* Inner PTE, continue walking */
@@ -992,6 +1000,10 @@
/* Page table updates need to be atomic with MTTCG enabled */
if (updated_pte != pte) {
+ if (!hade) {
+ return TRANSLATE_FAIL;
+ }
+
/*
* - if accessed or dirty bits need updating, and the PTE is
* in RAM, then we do so atomically with a compare and swap.
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index 1b0a0c1..3106f96 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -46,10 +46,8 @@
uint64_t bit)
{
bool virt = riscv_cpu_virt_enabled(env);
- CPUState *cs = env_cpu(env);
- RISCVCPU *cpu = RISCV_CPU(cs);
- if (env->priv == PRV_M || !cpu->cfg.ext_smstateen) {
+ if (env->priv == PRV_M || !riscv_cpu_cfg(env)->ext_smstateen) {
return RISCV_EXCP_NONE;
}
@@ -81,7 +79,7 @@
{
#if !defined(CONFIG_USER_ONLY)
if (!env->debugger && !riscv_cpu_fp_enabled(env) &&
- !RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) {
+ !riscv_cpu_cfg(env)->ext_zfinx) {
return RISCV_EXCP_ILLEGAL_INST;
}
#endif
@@ -90,11 +88,9 @@
static RISCVException vs(CPURISCVState *env, int csrno)
{
- CPUState *cs = env_cpu(env);
- RISCVCPU *cpu = RISCV_CPU(cs);
+ RISCVCPU *cpu = env_archcpu(env);
- if (env->misa_ext & RVV ||
- cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) {
+ if (cpu->cfg.ext_zve32f) {
#if !defined(CONFIG_USER_ONLY)
if (!env->debugger && !riscv_cpu_vector_enabled(env)) {
return RISCV_EXCP_ILLEGAL_INST;
@@ -108,8 +104,7 @@
static RISCVException ctr(CPURISCVState *env, int csrno)
{
#if !defined(CONFIG_USER_ONLY)
- CPUState *cs = env_cpu(env);
- RISCVCPU *cpu = RISCV_CPU(cs);
+ RISCVCPU *cpu = env_archcpu(env);
int ctr_index;
target_ulong ctr_mask;
int base_csrno = CSR_CYCLE;
@@ -134,6 +129,10 @@
skip_ext_pmu_check:
+ if (env->debugger) {
+ return RISCV_EXCP_NONE;
+ }
+
if (env->priv < PRV_M && !get_field(env->mcounteren, ctr_mask)) {
return RISCV_EXCP_ILLEGAL_INST;
}
@@ -166,8 +165,7 @@
#if !defined(CONFIG_USER_ONLY)
static RISCVException mctr(CPURISCVState *env, int csrno)
{
- CPUState *cs = env_cpu(env);
- RISCVCPU *cpu = RISCV_CPU(cs);
+ int pmu_num = riscv_cpu_cfg(env)->pmu_num;
int ctr_index;
int base_csrno = CSR_MHPMCOUNTER3;
@@ -176,7 +174,7 @@
base_csrno += 0x80;
}
ctr_index = csrno - base_csrno;
- if (!cpu->cfg.pmu_num || ctr_index >= cpu->cfg.pmu_num) {
+ if (!pmu_num || ctr_index >= pmu_num) {
/* The PMU is not enabled or counter is out of range*/
return RISCV_EXCP_ILLEGAL_INST;
}
@@ -195,8 +193,7 @@
static RISCVException sscofpmf(CPURISCVState *env, int csrno)
{
- CPUState *cs = env_cpu(env);
- RISCVCPU *cpu = RISCV_CPU(cs);
+ RISCVCPU *cpu = env_archcpu(env);
if (!cpu->cfg.ext_sscofpmf) {
return RISCV_EXCP_ILLEGAL_INST;
@@ -222,9 +219,7 @@
static int aia_any(CPURISCVState *env, int csrno)
{
- RISCVCPU *cpu = env_archcpu(env);
-
- if (!cpu->cfg.ext_smaia) {
+ if (!riscv_cpu_cfg(env)->ext_smaia) {
return RISCV_EXCP_ILLEGAL_INST;
}
@@ -233,9 +228,7 @@
static int aia_any32(CPURISCVState *env, int csrno)
{
- RISCVCPU *cpu = env_archcpu(env);
-
- if (!cpu->cfg.ext_smaia) {
+ if (!riscv_cpu_cfg(env)->ext_smaia) {
return RISCV_EXCP_ILLEGAL_INST;
}
@@ -262,9 +255,7 @@
static int aia_smode(CPURISCVState *env, int csrno)
{
- RISCVCPU *cpu = env_archcpu(env);
-
- if (!cpu->cfg.ext_ssaia) {
+ if (!riscv_cpu_cfg(env)->ext_ssaia) {
return RISCV_EXCP_ILLEGAL_INST;
}
@@ -273,9 +264,7 @@
static int aia_smode32(CPURISCVState *env, int csrno)
{
- RISCVCPU *cpu = env_archcpu(env);
-
- if (!cpu->cfg.ext_ssaia) {
+ if (!riscv_cpu_cfg(env)->ext_ssaia) {
return RISCV_EXCP_ILLEGAL_INST;
}
@@ -321,8 +310,7 @@
static RISCVException mstateen(CPURISCVState *env, int csrno)
{
- CPUState *cs = env_cpu(env);
- RISCVCPU *cpu = RISCV_CPU(cs);
+ RISCVCPU *cpu = env_archcpu(env);
if (!cpu->cfg.ext_smstateen) {
return RISCV_EXCP_ILLEGAL_INST;
@@ -333,20 +321,28 @@
static RISCVException hstateen_pred(CPURISCVState *env, int csrno, int base)
{
- CPUState *cs = env_cpu(env);
- RISCVCPU *cpu = RISCV_CPU(cs);
+ RISCVCPU *cpu = env_archcpu(env);
if (!cpu->cfg.ext_smstateen) {
return RISCV_EXCP_ILLEGAL_INST;
}
+ RISCVException ret = hmode(env, csrno);
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
+ if (env->debugger) {
+ return RISCV_EXCP_NONE;
+ }
+
if (env->priv < PRV_M) {
if (!(env->mstateen[csrno - base] & SMSTATEEN_STATEEN)) {
return RISCV_EXCP_ILLEGAL_INST;
}
}
- return hmode(env, csrno);
+ return RISCV_EXCP_NONE;
}
static RISCVException hstateen(CPURISCVState *env, int csrno)
@@ -363,13 +359,20 @@
{
bool virt = riscv_cpu_virt_enabled(env);
int index = csrno - CSR_SSTATEEN0;
- CPUState *cs = env_cpu(env);
- RISCVCPU *cpu = RISCV_CPU(cs);
- if (!cpu->cfg.ext_smstateen) {
+ if (!riscv_cpu_cfg(env)->ext_smstateen) {
return RISCV_EXCP_ILLEGAL_INST;
}
+ RISCVException ret = smode(env, csrno);
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
+ if (env->debugger) {
+ return RISCV_EXCP_NONE;
+ }
+
if (env->priv < PRV_M) {
if (!(env->mstateen[index] & SMSTATEEN_STATEEN)) {
return RISCV_EXCP_ILLEGAL_INST;
@@ -382,7 +385,61 @@
}
}
- return smode(env, csrno);
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException sstc(CPURISCVState *env, int csrno)
+{
+ RISCVCPU *cpu = env_archcpu(env);
+ bool hmode_check = false;
+
+ if (!cpu->cfg.ext_sstc || !env->rdtime_fn) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ if ((csrno == CSR_VSTIMECMP) || (csrno == CSR_VSTIMECMPH)) {
+ hmode_check = true;
+ }
+
+ RISCVException ret = hmode_check ? hmode(env, csrno) : smode(env, csrno);
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
+ if (env->debugger) {
+ return RISCV_EXCP_NONE;
+ }
+
+ if (env->priv == PRV_M) {
+ return RISCV_EXCP_NONE;
+ }
+
+ /*
+ * No need of separate function for rv32 as menvcfg stores both menvcfg
+ * menvcfgh for RV32.
+ */
+ if (!(get_field(env->mcounteren, COUNTEREN_TM) &&
+ get_field(env->menvcfg, MENVCFG_STCE))) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ if (riscv_cpu_virt_enabled(env)) {
+ if (!(get_field(env->hcounteren, COUNTEREN_TM) &&
+ get_field(env->henvcfg, HENVCFG_STCE))) {
+ return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
+ }
+ }
+
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException sstc_32(CPURISCVState *env, int csrno)
+{
+ if (riscv_cpu_mxl(env) != MXL_RV32) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return sstc(env, csrno);
}
/* Checks if PointerMasking registers could be accessed */
@@ -397,9 +454,7 @@
static int aia_hmode(CPURISCVState *env, int csrno)
{
- RISCVCPU *cpu = env_archcpu(env);
-
- if (!cpu->cfg.ext_ssaia) {
+ if (!riscv_cpu_cfg(env)->ext_ssaia) {
return RISCV_EXCP_ILLEGAL_INST;
}
@@ -408,9 +463,7 @@
static int aia_hmode32(CPURISCVState *env, int csrno)
{
- RISCVCPU *cpu = env_archcpu(env);
-
- if (!cpu->cfg.ext_ssaia) {
+ if (!riscv_cpu_cfg(env)->ext_ssaia) {
return RISCV_EXCP_ILLEGAL_INST;
}
@@ -419,7 +472,16 @@
static RISCVException pmp(CPURISCVState *env, int csrno)
{
- if (riscv_feature(env, RISCV_FEATURE_PMP)) {
+ if (riscv_cpu_cfg(env)->pmp) {
+ if (csrno <= CSR_PMPCFG3) {
+ uint32_t reg_index = csrno - CSR_PMPCFG0;
+
+ /* TODO: RV128 restriction check */
+ if ((reg_index & 1) && (riscv_cpu_mxl(env) == MXL_RV64)) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+ }
+
return RISCV_EXCP_NONE;
}
@@ -428,7 +490,7 @@
static RISCVException epmp(CPURISCVState *env, int csrno)
{
- if (env->priv == PRV_M && riscv_feature(env, RISCV_FEATURE_EPMP)) {
+ if (riscv_cpu_cfg(env)->epmp) {
return RISCV_EXCP_NONE;
}
@@ -437,7 +499,7 @@
static RISCVException debug(CPURISCVState *env, int csrno)
{
- if (riscv_feature(env, RISCV_FEATURE_DEBUG)) {
+ if (riscv_cpu_cfg(env)->debug) {
return RISCV_EXCP_NONE;
}
@@ -447,13 +509,15 @@
static RISCVException seed(CPURISCVState *env, int csrno)
{
- RISCVCPU *cpu = env_archcpu(env);
-
- if (!cpu->cfg.ext_zkr) {
+ if (!riscv_cpu_cfg(env)->ext_zkr) {
return RISCV_EXCP_ILLEGAL_INST;
}
#if !defined(CONFIG_USER_ONLY)
+ if (env->debugger) {
+ return RISCV_EXCP_NONE;
+ }
+
/*
* With a CSR read-write instruction:
* 1) The seed CSR is always available in machine mode as normal.
@@ -572,7 +636,7 @@
static int read_vlenb(CPURISCVState *env, int csrno, target_ulong *val)
{
- *val = env_archcpu(env)->cfg.vlen >> 3;
+ *val = riscv_cpu_cfg(env)->vlen >> 3;
return RISCV_EXCP_NONE;
}
@@ -627,7 +691,7 @@
* The vstart CSR is defined to have only enough writable bits
* to hold the largest element index, i.e. lg2(VLEN) bits.
*/
- env->vstart = val & ~(~0ULL << ctzl(env_archcpu(env)->cfg.vlen));
+ env->vstart = val & ~(~0ULL << ctzl(riscv_cpu_cfg(env)->vlen));
return RISCV_EXCP_NONE;
}
@@ -916,54 +980,8 @@
return RISCV_EXCP_NONE;
}
-static RISCVException sstc(CPURISCVState *env, int csrno)
-{
- CPUState *cs = env_cpu(env);
- RISCVCPU *cpu = RISCV_CPU(cs);
- bool hmode_check = false;
-
- if (!cpu->cfg.ext_sstc || !env->rdtime_fn) {
- return RISCV_EXCP_ILLEGAL_INST;
- }
-
- if (env->priv == PRV_M) {
- return RISCV_EXCP_NONE;
- }
-
- /*
- * No need of separate function for rv32 as menvcfg stores both menvcfg
- * menvcfgh for RV32.
- */
- if (!(get_field(env->mcounteren, COUNTEREN_TM) &&
- get_field(env->menvcfg, MENVCFG_STCE))) {
- return RISCV_EXCP_ILLEGAL_INST;
- }
-
- if (riscv_cpu_virt_enabled(env)) {
- if (!(get_field(env->hcounteren, COUNTEREN_TM) &&
- get_field(env->henvcfg, HENVCFG_STCE))) {
- return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
- }
- }
-
- if ((csrno == CSR_VSTIMECMP) || (csrno == CSR_VSTIMECMPH)) {
- hmode_check = true;
- }
-
- return hmode_check ? hmode(env, csrno) : smode(env, csrno);
-}
-
-static RISCVException sstc_32(CPURISCVState *env, int csrno)
-{
- if (riscv_cpu_mxl(env) != MXL_RV32) {
- return RISCV_EXCP_ILLEGAL_INST;
- }
-
- return sstc(env, csrno);
-}
-
static RISCVException read_vstimecmp(CPURISCVState *env, int csrno,
- target_ulong *val)
+ target_ulong *val)
{
*val = env->vstimecmp;
@@ -971,7 +989,7 @@
}
static RISCVException read_vstimecmph(CPURISCVState *env, int csrno,
- target_ulong *val)
+ target_ulong *val)
{
*val = env->vstimecmp >> 32;
@@ -979,7 +997,7 @@
}
static RISCVException write_vstimecmp(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val)
{
RISCVCPU *cpu = env_archcpu(env);
@@ -996,7 +1014,7 @@
}
static RISCVException write_vstimecmph(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val)
{
RISCVCPU *cpu = env_archcpu(env);
@@ -1020,7 +1038,7 @@
}
static RISCVException read_stimecmph(CPURISCVState *env, int csrno,
- target_ulong *val)
+ target_ulong *val)
{
if (riscv_cpu_virt_enabled(env)) {
*val = env->vstimecmp >> 32;
@@ -1032,7 +1050,7 @@
}
static RISCVException write_stimecmp(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val)
{
RISCVCPU *cpu = env_archcpu(env);
@@ -1055,7 +1073,7 @@
}
static RISCVException write_stimecmph(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val)
{
RISCVCPU *cpu = env_archcpu(env);
@@ -1152,8 +1170,7 @@
static RISCVException read_mvendorid(CPURISCVState *env, int csrno,
target_ulong *val)
{
- CPUState *cs = env_cpu(env);
- RISCVCPU *cpu = RISCV_CPU(cs);
+ RISCVCPU *cpu = env_archcpu(env);
*val = cpu->cfg.mvendorid;
return RISCV_EXCP_NONE;
@@ -1162,8 +1179,7 @@
static RISCVException read_marchid(CPURISCVState *env, int csrno,
target_ulong *val)
{
- CPUState *cs = env_cpu(env);
- RISCVCPU *cpu = RISCV_CPU(cs);
+ RISCVCPU *cpu = env_archcpu(env);
*val = cpu->cfg.marchid;
return RISCV_EXCP_NONE;
@@ -1172,8 +1188,7 @@
static RISCVException read_mimpid(CPURISCVState *env, int csrno,
target_ulong *val)
{
- CPUState *cs = env_cpu(env);
- RISCVCPU *cpu = RISCV_CPU(cs);
+ RISCVCPU *cpu = env_archcpu(env);
*val = cpu->cfg.mimpid;
return RISCV_EXCP_NONE;
@@ -1329,7 +1344,7 @@
static RISCVException write_misa(CPURISCVState *env, int csrno,
target_ulong val)
{
- if (!riscv_feature(env, RISCV_FEATURE_MISA)) {
+ if (!riscv_cpu_cfg(env)->misa_w) {
/* drop write to misa */
return RISCV_EXCP_NONE;
}
@@ -1342,7 +1357,8 @@
/* 'E' excludes all other extensions */
if (val & RVE) {
- /* when we support 'E' we can do "val = RVE;" however
+ /*
+ * when we support 'E' we can do "val = RVE;" however
* for now we just drop writes if 'E' is present.
*/
return RISCV_EXCP_NONE;
@@ -1356,15 +1372,13 @@
/* Mask extensions that are not supported by this hart */
val &= env->misa_ext_mask;
- /* Mask extensions that are not supported by QEMU */
- val &= (RVI | RVE | RVM | RVA | RVF | RVD | RVC | RVS | RVU | RVV);
-
/* 'D' depends on 'F', so clear 'D' if 'F' is not present */
if ((val & RVD) && !(val & RVF)) {
val &= ~RVD;
}
- /* Suppress 'C' if next instruction is not aligned
+ /*
+ * Suppress 'C' if next instruction is not aligned
* TODO: this should check next_pc
*/
if ((val & RVC) && (GETPC() & ~3) != 0) {
@@ -1833,28 +1847,28 @@
}
static RISCVException read_mepc(CPURISCVState *env, int csrno,
- target_ulong *val)
+ target_ulong *val)
{
*val = env->mepc;
return RISCV_EXCP_NONE;
}
static RISCVException write_mepc(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val)
{
env->mepc = val;
return RISCV_EXCP_NONE;
}
static RISCVException read_mcause(CPURISCVState *env, int csrno,
- target_ulong *val)
+ target_ulong *val)
{
*val = env->mcause;
return RISCV_EXCP_NONE;
}
static RISCVException write_mcause(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val)
{
env->mcause = val;
return RISCV_EXCP_NONE;
@@ -1876,19 +1890,22 @@
/* Execution environment configuration setup */
static RISCVException read_menvcfg(CPURISCVState *env, int csrno,
- target_ulong *val)
+ target_ulong *val)
{
*val = env->menvcfg;
return RISCV_EXCP_NONE;
}
static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val)
{
+ RISCVCPUConfig *cfg = &env_archcpu(env)->cfg;
uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE | MENVCFG_CBZE;
if (riscv_cpu_mxl(env) == MXL_RV64) {
- mask |= MENVCFG_PBMTE | MENVCFG_STCE;
+ mask |= (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
+ (cfg->ext_sstc ? MENVCFG_STCE : 0) |
+ (cfg->ext_svadu ? MENVCFG_HADE : 0);
}
env->menvcfg = (env->menvcfg & ~mask) | (val & mask);
@@ -1896,16 +1913,19 @@
}
static RISCVException read_menvcfgh(CPURISCVState *env, int csrno,
- target_ulong *val)
+ target_ulong *val)
{
*val = env->menvcfg >> 32;
return RISCV_EXCP_NONE;
}
static RISCVException write_menvcfgh(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val)
{
- uint64_t mask = MENVCFG_PBMTE | MENVCFG_STCE;
+ RISCVCPUConfig *cfg = &env_archcpu(env)->cfg;
+ uint64_t mask = (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
+ (cfg->ext_sstc ? MENVCFG_STCE : 0) |
+ (cfg->ext_svadu ? MENVCFG_HADE : 0);
uint64_t valh = (uint64_t)val << 32;
env->menvcfg = (env->menvcfg & ~mask) | (valh & mask);
@@ -1914,7 +1934,7 @@
}
static RISCVException read_senvcfg(CPURISCVState *env, int csrno,
- target_ulong *val)
+ target_ulong *val)
{
RISCVException ret;
@@ -1928,7 +1948,7 @@
}
static RISCVException write_senvcfg(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val)
{
uint64_t mask = SENVCFG_FIOM | SENVCFG_CBIE | SENVCFG_CBCFE | SENVCFG_CBZE;
RISCVException ret;
@@ -1943,7 +1963,7 @@
}
static RISCVException read_henvcfg(CPURISCVState *env, int csrno,
- target_ulong *val)
+ target_ulong *val)
{
RISCVException ret;
@@ -1952,12 +1972,18 @@
return ret;
}
- *val = env->henvcfg;
+ /*
+ * henvcfg.pbmte is read_only 0 when menvcfg.pbmte = 0
+ * henvcfg.stce is read_only 0 when menvcfg.stce = 0
+ * henvcfg.hade is read_only 0 when menvcfg.hade = 0
+ */
+ *val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_HADE) |
+ env->menvcfg);
return RISCV_EXCP_NONE;
}
static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val)
{
uint64_t mask = HENVCFG_FIOM | HENVCFG_CBIE | HENVCFG_CBCFE | HENVCFG_CBZE;
RISCVException ret;
@@ -1968,7 +1994,7 @@
}
if (riscv_cpu_mxl(env) == MXL_RV64) {
- mask |= HENVCFG_PBMTE | HENVCFG_STCE;
+ mask |= env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_HADE);
}
env->henvcfg = (env->henvcfg & ~mask) | (val & mask);
@@ -1977,7 +2003,7 @@
}
static RISCVException read_henvcfgh(CPURISCVState *env, int csrno,
- target_ulong *val)
+ target_ulong *val)
{
RISCVException ret;
@@ -1986,14 +2012,16 @@
return ret;
}
- *val = env->henvcfg >> 32;
+ *val = (env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_HADE) |
+ env->menvcfg)) >> 32;
return RISCV_EXCP_NONE;
}
static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val)
{
- uint64_t mask = HENVCFG_PBMTE | HENVCFG_STCE;
+ uint64_t mask = env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE |
+ HENVCFG_HADE);
uint64_t valh = (uint64_t)val << 32;
RISCVException ret;
@@ -2034,13 +2062,13 @@
}
static RISCVException write_mstateen_1_3(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val)
{
return write_mstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
}
static RISCVException read_mstateenh(CPURISCVState *env, int csrno,
- target_ulong *val)
+ target_ulong *val)
{
*val = env->mstateen[csrno - CSR_MSTATEEN0H] >> 32;
@@ -2061,7 +2089,7 @@
}
static RISCVException write_mstateen0h(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val)
{
uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
@@ -2069,7 +2097,7 @@
}
static RISCVException write_mstateenh_1_3(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val)
{
return write_mstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
}
@@ -2106,7 +2134,7 @@
}
static RISCVException write_hstateen_1_3(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val)
{
return write_hstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
}
@@ -2145,7 +2173,7 @@
}
static RISCVException write_hstateenh_1_3(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val)
{
return write_hstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
}
@@ -2624,7 +2652,7 @@
static RISCVException read_satp(CPURISCVState *env, int csrno,
target_ulong *val)
{
- if (!riscv_feature(env, RISCV_FEATURE_MMU)) {
+ if (!riscv_cpu_cfg(env)->mmu) {
*val = 0;
return RISCV_EXCP_NONE;
}
@@ -2643,7 +2671,7 @@
{
target_ulong vm, mask;
- if (!riscv_feature(env, RISCV_FEATURE_MMU)) {
+ if (!riscv_cpu_cfg(env)->mmu) {
return RISCV_EXCP_NONE;
}
@@ -3338,30 +3366,18 @@
}
static RISCVException write_mseccfg(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val)
{
mseccfg_csr_write(env, val);
return RISCV_EXCP_NONE;
}
-static bool check_pmp_reg_index(CPURISCVState *env, uint32_t reg_index)
-{
- /* TODO: RV128 restriction check */
- if ((reg_index & 1) && (riscv_cpu_mxl(env) == MXL_RV64)) {
- return false;
- }
- return true;
-}
-
static RISCVException read_pmpcfg(CPURISCVState *env, int csrno,
target_ulong *val)
{
uint32_t reg_index = csrno - CSR_PMPCFG0;
- if (!check_pmp_reg_index(env, reg_index)) {
- return RISCV_EXCP_ILLEGAL_INST;
- }
- *val = pmpcfg_csr_read(env, csrno - CSR_PMPCFG0);
+ *val = pmpcfg_csr_read(env, reg_index);
return RISCV_EXCP_NONE;
}
@@ -3370,10 +3386,7 @@
{
uint32_t reg_index = csrno - CSR_PMPCFG0;
- if (!check_pmp_reg_index(env, reg_index)) {
- return RISCV_EXCP_ILLEGAL_INST;
- }
- pmpcfg_csr_write(env, csrno - CSR_PMPCFG0, val);
+ pmpcfg_csr_write(env, reg_index, val);
return RISCV_EXCP_NONE;
}
@@ -3776,27 +3789,32 @@
RISCVCPU *cpu)
{
/* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */
- int read_only = get_field(csrno, 0xC00) == 3;
+ bool read_only = get_field(csrno, 0xC00) == 3;
int csr_min_priv = csr_ops[csrno].min_priv_ver;
- /* ensure the CSR extension is enabled. */
+ /* ensure the CSR extension is enabled */
if (!cpu->cfg.ext_icsr) {
return RISCV_EXCP_ILLEGAL_INST;
}
+ /* privileged spec version check */
if (env->priv_ver < csr_min_priv) {
return RISCV_EXCP_ILLEGAL_INST;
}
- /* check predicate */
- if (!csr_ops[csrno].predicate) {
- return RISCV_EXCP_ILLEGAL_INST;
- }
-
+ /* read / write check */
if (write_mask && read_only) {
return RISCV_EXCP_ILLEGAL_INST;
}
+ /*
+ * The predicate() not only does existence check but also does some
+ * access control check which triggers for example virtual instruction
+ * exception in some cases. When writing read-only CSRs in those cases
+ * illegal instruction exception should be triggered instead of virtual
+ * instruction exception. Hence this comes after the read / write check.
+ */
+ g_assert(csr_ops[csrno].predicate != NULL);
RISCVException ret = csr_ops[csrno].predicate(env, csrno);
if (ret != RISCV_EXCP_NONE) {
return ret;
diff --git a/target/riscv/gdbstub.c b/target/riscv/gdbstub.c
index 6e7bbdb..6048541 100644
--- a/target/riscv/gdbstub.c
+++ b/target/riscv/gdbstub.c
@@ -127,40 +127,6 @@
return 0;
}
-/*
- * Convert register index number passed by GDB to the correspond
- * vector CSR number. Vector CSRs are defined after vector registers
- * in dynamic generated riscv-vector.xml, thus the starting register index
- * of vector CSRs is 32.
- * Return 0 if register index number is out of range.
- */
-static int riscv_gdb_vector_csrno(int num_regs)
-{
- /*
- * The order of vector CSRs in the switch case
- * should match with the order defined in csr_ops[].
- */
- switch (num_regs) {
- case 32:
- return CSR_VSTART;
- case 33:
- return CSR_VXSAT;
- case 34:
- return CSR_VXRM;
- case 35:
- return CSR_VCSR;
- case 36:
- return CSR_VL;
- case 37:
- return CSR_VTYPE;
- case 38:
- return CSR_VLENB;
- default:
- /* Unknown register. */
- return 0;
- }
-}
-
static int riscv_gdb_get_vector(CPURISCVState *env, GByteArray *buf, int n)
{
uint16_t vlenb = env_archcpu(env)->cfg.vlen >> 3;
@@ -174,19 +140,6 @@
return cnt;
}
- int csrno = riscv_gdb_vector_csrno(n);
-
- if (!csrno) {
- return 0;
- }
-
- target_ulong val = 0;
- int result = riscv_csrrw_debug(env, csrno, &val, 0, 0);
-
- if (result == RISCV_EXCP_NONE) {
- return gdb_get_regl(buf, val);
- }
-
return 0;
}
@@ -201,19 +154,6 @@
return vlenb;
}
- int csrno = riscv_gdb_vector_csrno(n);
-
- if (!csrno) {
- return 0;
- }
-
- target_ulong val = ldtul_p(mem_buf);
- int result = riscv_csrrw_debug(env, csrno, NULL, val, -1);
-
- if (result == RISCV_EXCP_NONE) {
- return sizeof(target_ulong);
- }
-
return 0;
}
@@ -280,6 +220,10 @@
int bitsize = 16 << env->misa_mxl_max;
int i;
+#if !defined(CONFIG_USER_ONLY)
+ env->debugger = true;
+#endif
+
/* Until gdb knows about 128-bit registers */
if (bitsize > 64) {
bitsize = 64;
@@ -290,6 +234,9 @@
g_string_append_printf(s, "<feature name=\"org.gnu.gdb.riscv.csr\">");
for (i = 0; i < CSR_TABLE_SIZE; i++) {
+ if (env->priv_ver < csr_ops[i].min_priv_ver) {
+ continue;
+ }
predicate = csr_ops[i].predicate;
if (predicate && (predicate(env, i) == RISCV_EXCP_NONE)) {
if (csr_ops[i].name) {
@@ -305,6 +252,11 @@
g_string_append_printf(s, "</feature>");
cpu->dyn_csr_xml = g_string_free(s, false);
+
+#if !defined(CONFIG_USER_ONLY)
+ env->debugger = false;
+#endif
+
return CSR_TABLE_SIZE;
}
@@ -349,21 +301,6 @@
num_regs++;
}
- /* Define vector CSRs */
- const char *vector_csrs[7] = {
- "vstart", "vxsat", "vxrm", "vcsr",
- "vl", "vtype", "vlenb"
- };
-
- for (i = 0; i < 7; i++) {
- g_string_append_printf(s,
- "<reg name=\"%s\" bitsize=\"%d\""
- " regnum=\"%d\" group=\"vector\""
- " type=\"int\"/>",
- vector_csrs[i], TARGET_LONG_BITS, base_reg++);
- num_regs++;
- }
-
g_string_append_printf(s, "</feature>");
cpu->dyn_vreg_xml = g_string_free(s, false);
@@ -382,9 +319,9 @@
32, "riscv-32bit-fpu.xml", 0);
}
if (env->misa_ext & RVV) {
+ int base_reg = cs->gdb_num_regs;
gdb_register_coprocessor(cs, riscv_gdb_get_vector, riscv_gdb_set_vector,
- ricsv_gen_dynamic_vector_xml(cs,
- cs->gdb_num_regs),
+ ricsv_gen_dynamic_vector_xml(cs, base_reg),
"riscv-vector.xml", 0);
}
switch (env->misa_mxl_max) {
@@ -403,7 +340,10 @@
g_assert_not_reached();
}
- gdb_register_coprocessor(cs, riscv_gdb_get_csr, riscv_gdb_set_csr,
- riscv_gen_dynamic_csr_xml(cs, cs->gdb_num_regs),
- "riscv-csr.xml", 0);
+ if (cpu->cfg.ext_icsr) {
+ int base_reg = cs->gdb_num_regs;
+ gdb_register_coprocessor(cs, riscv_gdb_get_csr, riscv_gdb_set_csr,
+ riscv_gen_dynamic_csr_xml(cs, base_reg),
+ "riscv-csr.xml", 0);
+ }
}
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
index b7e7613..fb537e9 100644
--- a/target/riscv/insn32.decode
+++ b/target/riscv/insn32.decode
@@ -890,3 +890,7 @@
# *** RV32 Zksed Standard Extension ***
sm4ed .. 11000 ..... ..... 000 ..... 0110011 @k_aes
sm4ks .. 11010 ..... ..... 000 ..... 0110011 @k_aes
+
+# *** RV32 Zicond Standard Extension ***
+czero_eqz 0000111 ..... ..... 101 ..... 0110011 @r
+czero_nez 0000111 ..... ..... 111 ..... 0110011 @r
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
index bbb5c3a..fc0d0d6 100644
--- a/target/riscv/insn_trans/trans_rvv.c.inc
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
@@ -40,10 +40,11 @@
switch (s->sew) {
case MO_16:
+ return s->cfg_ptr->ext_zvfh;
case MO_32:
- return has_ext(s, RVF);
+ return s->cfg_ptr->ext_zve32f;
case MO_64:
- return has_ext(s, RVD);
+ return s->cfg_ptr->ext_zve64d;
default:
return false;
}
@@ -57,57 +58,32 @@
switch (s->sew) {
case MO_8:
+ return s->cfg_ptr->ext_zvfh;
case MO_16:
- return has_ext(s, RVF);
+ return s->cfg_ptr->ext_zve32f;
case MO_32:
- return has_ext(s, RVD);
+ return s->cfg_ptr->ext_zve64d;
default:
return false;
}
}
-static bool require_zve32f(DisasContext *s)
+static bool require_scale_rvfmin(DisasContext *s)
{
- /* RVV + Zve32f = RVV. */
- if (has_ext(s, RVV)) {
- return true;
+ if (s->mstatus_fs == 0) {
+ return false;
}
- /* Zve32f doesn't support FP64. (Section 18.2) */
- return s->cfg_ptr->ext_zve32f ? s->sew <= MO_32 : true;
-}
-
-static bool require_scale_zve32f(DisasContext *s)
-{
- /* RVV + Zve32f = RVV. */
- if (has_ext(s, RVV)) {
- return true;
+ switch (s->sew) {
+ case MO_8:
+ return s->cfg_ptr->ext_zvfhmin;
+ case MO_16:
+ return s->cfg_ptr->ext_zve32f;
+ case MO_32:
+ return s->cfg_ptr->ext_zve64d;
+ default:
+ return false;
}
-
- /* Zve32f doesn't support FP64. (Section 18.2) */
- return s->cfg_ptr->ext_zve64f ? s->sew <= MO_16 : true;
-}
-
-static bool require_zve64f(DisasContext *s)
-{
- /* RVV + Zve64f = RVV. */
- if (has_ext(s, RVV)) {
- return true;
- }
-
- /* Zve64f doesn't support FP64. (Section 18.2) */
- return s->cfg_ptr->ext_zve64f ? s->sew <= MO_32 : true;
-}
-
-static bool require_scale_zve64f(DisasContext *s)
-{
- /* RVV + Zve64f = RVV. */
- if (has_ext(s, RVV)) {
- return true;
- }
-
- /* Zve64f doesn't support FP64. (Section 18.2) */
- return s->cfg_ptr->ext_zve64f ? s->sew <= MO_16 : true;
}
/* Destination vector register group cannot overlap source mask register. */
@@ -173,9 +149,7 @@
{
TCGv s1, dst;
- if (!require_rvv(s) ||
- !(has_ext(s, RVV) || s->cfg_ptr->ext_zve32f ||
- s->cfg_ptr->ext_zve64f)) {
+ if (!require_rvv(s) || !s->cfg_ptr->ext_zve32f) {
return false;
}
@@ -210,9 +184,7 @@
{
TCGv dst;
- if (!require_rvv(s) ||
- !(has_ext(s, RVV) || s->cfg_ptr->ext_zve32f ||
- s->cfg_ptr->ext_zve64f)) {
+ if (!require_rvv(s) || !s->cfg_ptr->ext_zve32f) {
return false;
}
@@ -315,13 +287,12 @@
require_nf(vd, nf, s->lmul);
/*
- * All Zve* extensions support all vector load and store instructions,
- * except Zve64* extensions do not support EEW=64 for index values
- * when XLEN=32. (Section 18.2)
+ * V extension supports all vector load and store instructions,
+ * except V extension does not support EEW=64 for index values
+ * when XLEN=32. (Section 18.3)
*/
if (get_xl(s) == MXL_RV32) {
- ret &= (!has_ext(s, RVV) &&
- s->cfg_ptr->ext_zve64f ? eew != MO_64 : true);
+ ret &= (eew != MO_64);
}
return ret;
@@ -2027,8 +1998,7 @@
* are not included for EEW=64 in Zve64*. (Section 18.2)
*/
return opivv_check(s, a) &&
- (!has_ext(s, RVV) &&
- s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true);
+ (!has_ext(s, RVV) ? s->sew != MO_64 : true);
}
static bool vmulh_vx_check(DisasContext *s, arg_rmrr *a)
@@ -2041,8 +2011,7 @@
* are not included for EEW=64 in Zve64*. (Section 18.2)
*/
return opivx_check(s, a) &&
- (!has_ext(s, RVV) &&
- s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true);
+ (!has_ext(s, RVV) ? s->sew != MO_64 : true);
}
GEN_OPIVV_GVEC_TRANS(vmul_vv, mul)
@@ -2259,8 +2228,7 @@
* for EEW=64 in Zve64*. (Section 18.2)
*/
return opivv_check(s, a) &&
- (!has_ext(s, RVV) &&
- s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true);
+ (!has_ext(s, RVV) ? s->sew != MO_64 : true);
}
static bool vsmul_vx_check(DisasContext *s, arg_rmrr *a)
@@ -2271,8 +2239,7 @@
* for EEW=64 in Zve64*. (Section 18.2)
*/
return opivx_check(s, a) &&
- (!has_ext(s, RVV) &&
- s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true);
+ (!has_ext(s, RVV) ? s->sew != MO_64 : true);
}
GEN_OPIVV_TRANS(vsmul_vv, vsmul_vv_check)
@@ -2335,9 +2302,7 @@
return require_rvv(s) &&
require_rvf(s) &&
vext_check_isa_ill(s) &&
- vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm) &&
- require_zve32f(s) &&
- require_zve64f(s);
+ vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
}
/* OPFVV without GVEC IR */
@@ -2425,9 +2390,7 @@
return require_rvv(s) &&
require_rvf(s) &&
vext_check_isa_ill(s) &&
- vext_check_ss(s, a->rd, a->rs2, a->vm) &&
- require_zve32f(s) &&
- require_zve64f(s);
+ vext_check_ss(s, a->rd, a->rs2, a->vm);
}
/* OPFVF without GVEC IR */
@@ -2465,9 +2428,7 @@
require_scale_rvf(s) &&
(s->sew != MO_8) &&
vext_check_isa_ill(s) &&
- vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm) &&
- require_scale_zve32f(s) &&
- require_scale_zve64f(s);
+ vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
}
/* OPFVV with WIDEN */
@@ -2510,9 +2471,7 @@
require_scale_rvf(s) &&
(s->sew != MO_8) &&
vext_check_isa_ill(s) &&
- vext_check_ds(s, a->rd, a->rs2, a->vm) &&
- require_scale_zve32f(s) &&
- require_scale_zve64f(s);
+ vext_check_ds(s, a->rd, a->rs2, a->vm);
}
/* OPFVF with WIDEN */
@@ -2544,9 +2503,7 @@
require_scale_rvf(s) &&
(s->sew != MO_8) &&
vext_check_isa_ill(s) &&
- vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm) &&
- require_scale_zve32f(s) &&
- require_scale_zve64f(s);
+ vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm);
}
/* WIDEN OPFVV with WIDEN */
@@ -2589,9 +2546,7 @@
require_scale_rvf(s) &&
(s->sew != MO_8) &&
vext_check_isa_ill(s) &&
- vext_check_dd(s, a->rd, a->rs2, a->vm) &&
- require_scale_zve32f(s) &&
- require_scale_zve64f(s);
+ vext_check_dd(s, a->rd, a->rs2, a->vm);
}
/* WIDEN OPFVF with WIDEN */
@@ -2668,9 +2623,7 @@
require_rvf(s) &&
vext_check_isa_ill(s) &&
/* OPFV instructions ignore vs1 check */
- vext_check_ss(s, a->rd, a->rs2, a->vm) &&
- require_zve32f(s) &&
- require_zve64f(s);
+ vext_check_ss(s, a->rd, a->rs2, a->vm);
}
static bool do_opfv(DisasContext *s, arg_rmr *a,
@@ -2735,9 +2688,7 @@
return require_rvv(s) &&
require_rvf(s) &&
vext_check_isa_ill(s) &&
- vext_check_mss(s, a->rd, a->rs1, a->rs2) &&
- require_zve32f(s) &&
- require_zve64f(s);
+ vext_check_mss(s, a->rd, a->rs1, a->rs2);
}
GEN_OPFVV_TRANS(vmfeq_vv, opfvv_cmp_check)
@@ -2750,9 +2701,7 @@
return require_rvv(s) &&
require_rvf(s) &&
vext_check_isa_ill(s) &&
- vext_check_ms(s, a->rd, a->rs2) &&
- require_zve32f(s) &&
- require_zve64f(s);
+ vext_check_ms(s, a->rd, a->rs2);
}
GEN_OPFVF_TRANS(vmfeq_vf, opfvf_cmp_check)
@@ -2773,9 +2722,7 @@
if (require_rvv(s) &&
require_rvf(s) &&
vext_check_isa_ill(s) &&
- require_align(a->rd, s->lmul) &&
- require_zve32f(s) &&
- require_zve64f(s)) {
+ require_align(a->rd, s->lmul)) {
gen_set_rm(s, RISCV_FRM_DYN);
TCGv_i64 t1;
@@ -2860,18 +2807,14 @@
static bool opxfv_widen_check(DisasContext *s, arg_rmr *a)
{
return opfv_widen_check(s, a) &&
- require_rvf(s) &&
- require_zve32f(s) &&
- require_zve64f(s);
+ require_rvf(s);
}
static bool opffv_widen_check(DisasContext *s, arg_rmr *a)
{
return opfv_widen_check(s, a) &&
- require_scale_rvf(s) &&
- (s->sew != MO_8) &&
- require_scale_zve32f(s) &&
- require_scale_zve64f(s);
+ require_scale_rvfmin(s) &&
+ (s->sew != MO_8);
}
#define GEN_OPFV_WIDEN_TRANS(NAME, CHECK, HELPER, FRM) \
@@ -2922,9 +2865,7 @@
require_scale_rvf(s) &&
vext_check_isa_ill(s) &&
/* OPFV widening instructions ignore vs1 check */
- vext_check_ds(s, a->rd, a->rs2, a->vm) &&
- require_scale_zve32f(s) &&
- require_scale_zve64f(s);
+ vext_check_ds(s, a->rd, a->rs2, a->vm);
}
#define GEN_OPFXV_WIDEN_TRANS(NAME) \
@@ -2979,18 +2920,21 @@
{
return opfv_narrow_check(s, a) &&
require_rvf(s) &&
- (s->sew != MO_64) &&
- require_zve32f(s) &&
- require_zve64f(s);
+ (s->sew != MO_64);
}
static bool opffv_narrow_check(DisasContext *s, arg_rmr *a)
{
return opfv_narrow_check(s, a) &&
+ require_scale_rvfmin(s) &&
+ (s->sew != MO_8);
+}
+
+static bool opffv_rod_narrow_check(DisasContext *s, arg_rmr *a)
+{
+ return opfv_narrow_check(s, a) &&
require_scale_rvf(s) &&
- (s->sew != MO_8) &&
- require_scale_zve32f(s) &&
- require_scale_zve64f(s);
+ (s->sew != MO_8);
}
#define GEN_OPFV_NARROW_TRANS(NAME, CHECK, HELPER, FRM) \
@@ -3030,7 +2974,7 @@
GEN_OPFV_NARROW_TRANS(vfncvt_f_f_w, opffv_narrow_check, vfncvt_f_f_w,
RISCV_FRM_DYN)
/* Reuse the helper function from vfncvt.f.f.w */
-GEN_OPFV_NARROW_TRANS(vfncvt_rod_f_f_w, opffv_narrow_check, vfncvt_f_f_w,
+GEN_OPFV_NARROW_TRANS(vfncvt_rod_f_f_w, opffv_rod_narrow_check, vfncvt_f_f_w,
RISCV_FRM_ROD)
static bool opxfv_narrow_check(DisasContext *s, arg_rmr *a)
@@ -3039,9 +2983,7 @@
require_scale_rvf(s) &&
vext_check_isa_ill(s) &&
/* OPFV narrowing instructions ignore vs1 check */
- vext_check_sd(s, a->rd, a->rs2, a->vm) &&
- require_scale_zve32f(s) &&
- require_scale_zve64f(s);
+ vext_check_sd(s, a->rd, a->rs2, a->vm);
}
#define GEN_OPXFV_NARROW_TRANS(NAME, HELPER, FRM) \
@@ -3115,9 +3057,7 @@
static bool freduction_check(DisasContext *s, arg_rmrr *a)
{
return reduction_check(s, a) &&
- require_rvf(s) &&
- require_zve32f(s) &&
- require_zve64f(s);
+ require_rvf(s);
}
GEN_OPFVV_TRANS(vfredusum_vs, freduction_check)
@@ -3544,9 +3484,7 @@
{
if (require_rvv(s) &&
require_rvf(s) &&
- vext_check_isa_ill(s) &&
- require_zve32f(s) &&
- require_zve64f(s)) {
+ vext_check_isa_ill(s)) {
gen_set_rm(s, RISCV_FRM_DYN);
unsigned int ofs = (8 << s->sew);
@@ -3572,9 +3510,7 @@
{
if (require_rvv(s) &&
require_rvf(s) &&
- vext_check_isa_ill(s) &&
- require_zve32f(s) &&
- require_zve64f(s)) {
+ vext_check_isa_ill(s)) {
gen_set_rm(s, RISCV_FRM_DYN);
/* The instructions ignore LMUL and vector register group. */
@@ -3625,17 +3561,13 @@
static bool fslideup_check(DisasContext *s, arg_rmrr *a)
{
return slideup_check(s, a) &&
- require_rvf(s) &&
- require_zve32f(s) &&
- require_zve64f(s);
+ require_rvf(s);
}
static bool fslidedown_check(DisasContext *s, arg_rmrr *a)
{
return slidedown_check(s, a) &&
- require_rvf(s) &&
- require_zve32f(s) &&
- require_zve64f(s);
+ require_rvf(s);
}
GEN_OPFVF_TRANS(vfslide1up_vf, fslideup_check)
diff --git a/target/riscv/insn_trans/trans_rvzfh.c.inc b/target/riscv/insn_trans/trans_rvzfh.c.inc
index 2ad5716..85fc1aa 100644
--- a/target/riscv/insn_trans/trans_rvzfh.c.inc
+++ b/target/riscv/insn_trans/trans_rvzfh.c.inc
@@ -28,15 +28,14 @@
} \
} while (0)
-#define REQUIRE_ZFH_OR_ZFHMIN(ctx) do { \
- if (!(ctx->cfg_ptr->ext_zfh || ctx->cfg_ptr->ext_zfhmin)) { \
+#define REQUIRE_ZFHMIN(ctx) do { \
+ if (!ctx->cfg_ptr->ext_zfhmin) { \
return false; \
} \
} while (0)
-#define REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx) do { \
- if (!(ctx->cfg_ptr->ext_zfh || ctx->cfg_ptr->ext_zfhmin || \
- ctx->cfg_ptr->ext_zhinx || ctx->cfg_ptr->ext_zhinxmin)) { \
+#define REQUIRE_ZFHMIN_OR_ZHINXMIN(ctx) do { \
+ if (!(ctx->cfg_ptr->ext_zfhmin || ctx->cfg_ptr->ext_zhinxmin)) { \
return false; \
} \
} while (0)
@@ -47,7 +46,7 @@
TCGv t0;
REQUIRE_FPU;
- REQUIRE_ZFH_OR_ZFHMIN(ctx);
+ REQUIRE_ZFHMIN(ctx);
decode_save_opc(ctx);
t0 = get_gpr(ctx, a->rs1, EXT_NONE);
@@ -70,7 +69,7 @@
TCGv t0;
REQUIRE_FPU;
- REQUIRE_ZFH_OR_ZFHMIN(ctx);
+ REQUIRE_ZFHMIN(ctx);
decode_save_opc(ctx);
t0 = get_gpr(ctx, a->rs1, EXT_NONE);
@@ -401,7 +400,7 @@
static bool trans_fcvt_s_h(DisasContext *ctx, arg_fcvt_s_h *a)
{
REQUIRE_FPU;
- REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx);
+ REQUIRE_ZFHMIN_OR_ZHINXMIN(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
@@ -418,7 +417,7 @@
static bool trans_fcvt_d_h(DisasContext *ctx, arg_fcvt_d_h *a)
{
REQUIRE_FPU;
- REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx);
+ REQUIRE_ZFHMIN_OR_ZHINXMIN(ctx);
REQUIRE_ZDINX_OR_D(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
@@ -436,7 +435,7 @@
static bool trans_fcvt_h_s(DisasContext *ctx, arg_fcvt_h_s *a)
{
REQUIRE_FPU;
- REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx);
+ REQUIRE_ZFHMIN_OR_ZHINXMIN(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
@@ -452,7 +451,7 @@
static bool trans_fcvt_h_d(DisasContext *ctx, arg_fcvt_h_d *a)
{
REQUIRE_FPU;
- REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx);
+ REQUIRE_ZFHMIN_OR_ZHINXMIN(ctx);
REQUIRE_ZDINX_OR_D(ctx);
TCGv_i64 dest = dest_fpr(ctx, a->rd);
@@ -585,7 +584,7 @@
static bool trans_fmv_x_h(DisasContext *ctx, arg_fmv_x_h *a)
{
REQUIRE_FPU;
- REQUIRE_ZFH_OR_ZFHMIN(ctx);
+ REQUIRE_ZFHMIN(ctx);
TCGv dest = dest_gpr(ctx, a->rd);
@@ -605,7 +604,7 @@
static bool trans_fmv_h_x(DisasContext *ctx, arg_fmv_h_x *a)
{
REQUIRE_FPU;
- REQUIRE_ZFH_OR_ZFHMIN(ctx);
+ REQUIRE_ZFHMIN(ctx);
TCGv t0 = get_gpr(ctx, a->rs1, EXT_ZERO);
diff --git a/target/riscv/insn_trans/trans_rvzicond.c.inc b/target/riscv/insn_trans/trans_rvzicond.c.inc
new file mode 100644
index 0000000..6452601
--- /dev/null
+++ b/target/riscv/insn_trans/trans_rvzicond.c.inc
@@ -0,0 +1,49 @@
+/*
+ * RISC-V translation routines for the Zicond Standard Extension.
+ *
+ * Copyright (c) 2020-2023 PLCT Lab
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define REQUIRE_ZICOND(ctx) do { \
+ if (!ctx->cfg_ptr->ext_zicond) { \
+ return false; \
+ } \
+} while (0)
+
+static bool trans_czero_eqz(DisasContext *ctx, arg_czero_eqz *a)
+{
+ REQUIRE_ZICOND(ctx);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE);
+ TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
+
+ tcg_gen_movcond_tl(TCG_COND_EQ, dest, src2, ctx->zero, ctx->zero, src1);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+static bool trans_czero_nez(DisasContext *ctx, arg_czero_nez *a)
+{
+ REQUIRE_ZICOND(ctx);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE);
+ TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
+
+ tcg_gen_movcond_tl(TCG_COND_NE, dest, src2, ctx->zero, ctx->zero, src1);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
diff --git a/target/riscv/insn_trans/trans_xthead.c.inc b/target/riscv/insn_trans/trans_xthead.c.inc
index be87c34..cf1731b 100644
--- a/target/riscv/insn_trans/trans_xthead.c.inc
+++ b/target/riscv/insn_trans/trans_xthead.c.inc
@@ -980,10 +980,6 @@
static bool gen_storepair_tl(DisasContext *ctx, arg_th_pair *a, MemOp memop,
int shamt)
{
- if (a->rs == a->rd1 || a->rs == a->rd2 || a->rd1 == a->rd2) {
- return false;
- }
-
TCGv data1 = get_gpr(ctx, a->rd1, EXT_NONE);
TCGv data2 = get_gpr(ctx, a->rd2, EXT_NONE);
TCGv addr1 = tcg_temp_new();
diff --git a/target/riscv/machine.c b/target/riscv/machine.c
index c6ce318..9c45593 100644
--- a/target/riscv/machine.c
+++ b/target/riscv/machine.c
@@ -27,9 +27,8 @@
static bool pmp_needed(void *opaque)
{
RISCVCPU *cpu = opaque;
- CPURISCVState *env = &cpu->env;
- return riscv_feature(env, RISCV_FEATURE_PMP);
+ return cpu->cfg.pmp;
}
static int pmp_post_load(void *opaque, int version_id)
@@ -226,9 +225,8 @@
static bool debug_needed(void *opaque)
{
RISCVCPU *cpu = opaque;
- CPURISCVState *env = &cpu->env;
- return riscv_feature(env, RISCV_FEATURE_DEBUG);
+ return cpu->cfg.debug;
}
static int debug_post_load(void *opaque, int version_id)
@@ -333,8 +331,8 @@
const VMStateDescription vmstate_riscv_cpu = {
.name = "cpu",
- .version_id = 6,
- .minimum_version_id = 6,
+ .version_id = 7,
+ .minimum_version_id = 7,
.post_load = riscv_cpu_post_load,
.fields = (VMStateField[]) {
VMSTATE_UINTTL_ARRAY(env.gpr, RISCVCPU, 32),
@@ -353,7 +351,6 @@
VMSTATE_UINT32(env.misa_ext, RISCVCPU),
VMSTATE_UINT32(env.misa_mxl_max, RISCVCPU),
VMSTATE_UINT32(env.misa_ext_mask, RISCVCPU),
- VMSTATE_UINT32(env.features, RISCVCPU),
VMSTATE_UINTTL(env.priv, RISCVCPU),
VMSTATE_UINTTL(env.virt, RISCVCPU),
VMSTATE_UINT64(env.resetvec, RISCVCPU),
diff --git a/target/riscv/monitor.c b/target/riscv/monitor.c
index 236f93b..f36ddfa 100644
--- a/target/riscv/monitor.c
+++ b/target/riscv/monitor.c
@@ -218,7 +218,7 @@
return;
}
- if (!riscv_feature(env, RISCV_FEATURE_MMU)) {
+ if (!riscv_cpu_cfg(env)->mmu) {
monitor_printf(mon, "S-mode MMU unavailable\n");
return;
}
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
index 48f918b..9c0b91c 100644
--- a/target/riscv/op_helper.c
+++ b/target/riscv/op_helper.c
@@ -195,7 +195,7 @@
uint64_t mstatus = env->mstatus;
target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP);
- if (riscv_feature(env, RISCV_FEATURE_PMP) &&
+ if (riscv_cpu_cfg(env)->pmp &&
!pmp_get_num_rules(env) && (prev_priv != PRV_M)) {
riscv_raise_exception(env, RISCV_EXCP_INST_ACCESS_FAULT, GETPC());
}
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
index 4bc4113..a08cd95 100644
--- a/target/riscv/pmp.c
+++ b/target/riscv/pmp.c
@@ -88,7 +88,7 @@
if (pmp_index < MAX_RISCV_PMPS) {
bool locked = true;
- if (riscv_feature(env, RISCV_FEATURE_EPMP)) {
+ if (riscv_cpu_cfg(env)->epmp) {
/* mseccfg.RLB is set */
if (MSECCFG_RLB_ISSET(env)) {
locked = false;
@@ -239,7 +239,7 @@
{
bool ret;
- if (riscv_feature(env, RISCV_FEATURE_EPMP)) {
+ if (riscv_cpu_cfg(env)->epmp) {
if (MSECCFG_MMWP_ISSET(env)) {
/*
* The Machine Mode Whitelist Policy (mseccfg.MMWP) is set
@@ -265,7 +265,7 @@
}
}
- if ((!riscv_feature(env, RISCV_FEATURE_PMP)) || (mode == PRV_M)) {
+ if (!riscv_cpu_cfg(env)->pmp || (mode == PRV_M)) {
/*
* Privileged spec v1.10 states if HW doesn't implement any PMP entry
* or no PMP entry matches an M-Mode access, the access succeeds.
@@ -315,7 +315,7 @@
}
if (size == 0) {
- if (riscv_feature(env, RISCV_FEATURE_MMU)) {
+ if (riscv_cpu_cfg(env)->mmu) {
/*
* If size is unknown (0), assume that all bytes
* from addr to the end of the page will be accessed.
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
index 772f9d7..4a957a5 100644
--- a/target/riscv/translate.c
+++ b/target/riscv/translate.c
@@ -1103,6 +1103,7 @@
#include "insn_trans/trans_rvh.c.inc"
#include "insn_trans/trans_rvv.c.inc"
#include "insn_trans/trans_rvb.c.inc"
+#include "insn_trans/trans_rvzicond.c.inc"
#include "insn_trans/trans_rvzawrs.c.inc"
#include "insn_trans/trans_rvzfh.c.inc"
#include "insn_trans/trans_rvk.c.inc"
@@ -1261,7 +1262,7 @@
uint16_t next_insn = cpu_lduw_code(env, ctx->base.pc_next);
int len = insn_len(next_insn);
- if (!is_same_page(&ctx->base, ctx->base.pc_next + len)) {
+ if (!is_same_page(&ctx->base, ctx->base.pc_next + len - 1)) {
ctx->base.is_jmp = DISAS_TOO_MANY;
}
}