target/arm: Split {full,core}_a64_user_mem_index
Separate get_a64_user_mem_index into two separate functions, one which
returns the full ARMMMUIdx and one which returns the core mmu_idx.
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Message-id: 20251008215613.300150-43-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index 43c9bfe..1337e16 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -93,7 +93,7 @@
}
/*
- * Return the core mmu_idx to use for A64 load/store insns which
+ * Return the full arm mmu_idx to use for A64 load/store insns which
* have a "unprivileged load/store" variant. Those insns access
* EL0 if executed from an EL which has control over EL0 (usually
* EL1) but behave like normal loads and stores if executed from
@@ -103,7 +103,7 @@
* normal encoding (in which case we will return the same
* thing as get_mem_index().
*/
-static int get_a64_user_mem_index(DisasContext *s, bool unpriv)
+static ARMMMUIdx full_a64_user_mem_index(DisasContext *s, bool unpriv)
{
/*
* If AccType_UNPRIV is not used, the insn uses AccType_NORMAL,
@@ -130,7 +130,13 @@
g_assert_not_reached();
}
}
- return arm_to_core_mmu_idx(useridx);
+ return useridx;
+}
+
+/* Return the core mmu_idx per above. */
+static int core_a64_user_mem_index(DisasContext *s, bool unpriv)
+{
+ return arm_to_core_mmu_idx(full_a64_user_mem_index(s, unpriv));
}
static void set_btype_raw(int val)
@@ -3577,7 +3583,7 @@
if (!a->p) {
tcg_gen_addi_i64(*dirty_addr, *dirty_addr, offset);
}
- memidx = get_a64_user_mem_index(s, a->unpriv);
+ memidx = core_a64_user_mem_index(s, a->unpriv);
*clean_addr = gen_mte_check1_mmuidx(s, *dirty_addr, is_store,
a->w || a->rn != 31,
mop, a->unpriv, memidx);
@@ -3598,7 +3604,7 @@
{
bool iss_sf, iss_valid = !a->w;
TCGv_i64 clean_addr, dirty_addr, tcg_rt;
- int memidx = get_a64_user_mem_index(s, a->unpriv);
+ int memidx = core_a64_user_mem_index(s, a->unpriv);
MemOp mop = finalize_memop(s, a->sz + a->sign * MO_SIGN);
op_addr_ldst_imm_pre(s, a, &clean_addr, &dirty_addr, a->imm, true, mop);
@@ -3616,7 +3622,7 @@
{
bool iss_sf, iss_valid = !a->w;
TCGv_i64 clean_addr, dirty_addr, tcg_rt;
- int memidx = get_a64_user_mem_index(s, a->unpriv);
+ int memidx = core_a64_user_mem_index(s, a->unpriv);
MemOp mop = finalize_memop(s, a->sz + a->sign * MO_SIGN);
op_addr_ldst_imm_pre(s, a, &clean_addr, &dirty_addr, a->imm, false, mop);
@@ -4514,7 +4520,7 @@
return false;
}
- memidx = get_a64_user_mem_index(s, a->unpriv);
+ memidx = core_a64_user_mem_index(s, a->unpriv);
/*
* We pass option_a == true, matching our implementation;
@@ -4568,8 +4574,8 @@
return false;
}
- rmemidx = get_a64_user_mem_index(s, runpriv);
- wmemidx = get_a64_user_mem_index(s, wunpriv);
+ rmemidx = core_a64_user_mem_index(s, runpriv);
+ wmemidx = core_a64_user_mem_index(s, wunpriv);
/*
* We pass option_a == true, matching our implementation;