target/arm: Move get_phys_addr to ptw.c
Begin moving all of the page table walking functions
out of helper.c, starting with get_phys_addr().
Create a temporary header file, "ptw.h", in which to
share declarations between the two C files while we
are moving functions.
Move a few declarations to "internals.h", which will
remain used by multiple C files.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20220604040607.269301-3-richard.henderson@linaro.org
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
new file mode 100644
index 0000000..318000f
--- /dev/null
+++ b/target/arm/ptw.c
@@ -0,0 +1,267 @@
+/*
+ * ARM page table walking.
+ *
+ * This code is licensed under the GNU GPL v2 or later.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "cpu.h"
+#include "internals.h"
+#include "ptw.h"
+
+
+/**
+ * get_phys_addr - get the physical address for this virtual address
+ *
+ * Find the physical address corresponding to the given virtual address,
+ * by doing a translation table walk on MMU based systems or using the
+ * MPU state on MPU based systems.
+ *
+ * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
+ * prot and page_size may not be filled in, and the populated fsr value provides
+ * information on why the translation aborted, in the format of a
+ * DFSR/IFSR fault register, with the following caveats:
+ * * we honour the short vs long DFSR format differences.
+ * * the WnR bit is never set (the caller must do this).
+ * * for PSMAv5 based systems we don't bother to return a full FSR format
+ * value.
+ *
+ * @env: CPUARMState
+ * @address: virtual address to get physical address for
+ * @access_type: 0 for read, 1 for write, 2 for execute
+ * @mmu_idx: MMU index indicating required translation regime
+ * @phys_ptr: set to the physical address corresponding to the virtual address
+ * @attrs: set to the memory transaction attributes to use
+ * @prot: set to the permissions for the page containing phys_ptr
+ * @page_size: set to the size of the page containing phys_ptr
+ * @fi: set to fault info if the translation fails
+ * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
+ */
+bool get_phys_addr(CPUARMState *env, target_ulong address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
+ target_ulong *page_size,
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
+{
+ ARMMMUIdx s1_mmu_idx = stage_1_mmu_idx(mmu_idx);
+
+ if (mmu_idx != s1_mmu_idx) {
+ /*
+ * Call ourselves recursively to do the stage 1 and then stage 2
+ * translations if mmu_idx is a two-stage regime.
+ */
+ if (arm_feature(env, ARM_FEATURE_EL2)) {
+ hwaddr ipa;
+ int s2_prot;
+ int ret;
+ bool ipa_secure;
+ ARMCacheAttrs cacheattrs2 = {};
+ ARMMMUIdx s2_mmu_idx;
+ bool is_el0;
+
+ ret = get_phys_addr(env, address, access_type, s1_mmu_idx, &ipa,
+ attrs, prot, page_size, fi, cacheattrs);
+
+ /* If S1 fails or S2 is disabled, return early. */
+ if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
+ *phys_ptr = ipa;
+ return ret;
+ }
+
+ ipa_secure = attrs->secure;
+ if (arm_is_secure_below_el3(env)) {
+ if (ipa_secure) {
+ attrs->secure = !(env->cp15.vstcr_el2.raw_tcr & VSTCR_SW);
+ } else {
+ attrs->secure = !(env->cp15.vtcr_el2.raw_tcr & VTCR_NSW);
+ }
+ } else {
+ assert(!ipa_secure);
+ }
+
+ s2_mmu_idx = attrs->secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
+ is_el0 = mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_SE10_0;
+
+ /* S1 is done. Now do S2 translation. */
+ ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx, is_el0,
+ phys_ptr, attrs, &s2_prot,
+ page_size, fi, &cacheattrs2);
+ fi->s2addr = ipa;
+ /* Combine the S1 and S2 perms. */
+ *prot &= s2_prot;
+
+ /* If S2 fails, return early. */
+ if (ret) {
+ return ret;
+ }
+
+ /* Combine the S1 and S2 cache attributes. */
+ if (arm_hcr_el2_eff(env) & HCR_DC) {
+ /*
+ * HCR.DC forces the first stage attributes to
+ * Normal Non-Shareable,
+ * Inner Write-Back Read-Allocate Write-Allocate,
+ * Outer Write-Back Read-Allocate Write-Allocate.
+ * Do not overwrite Tagged within attrs.
+ */
+ if (cacheattrs->attrs != 0xf0) {
+ cacheattrs->attrs = 0xff;
+ }
+ cacheattrs->shareability = 0;
+ }
+ *cacheattrs = combine_cacheattrs(env, *cacheattrs, cacheattrs2);
+
+ /* Check if IPA translates to secure or non-secure PA space. */
+ if (arm_is_secure_below_el3(env)) {
+ if (ipa_secure) {
+ attrs->secure =
+ !(env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW));
+ } else {
+ attrs->secure =
+ !((env->cp15.vtcr_el2.raw_tcr & (VTCR_NSA | VTCR_NSW))
+ || (env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW)));
+ }
+ }
+ return 0;
+ } else {
+ /*
+ * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
+ */
+ mmu_idx = stage_1_mmu_idx(mmu_idx);
+ }
+ }
+
+ /*
+ * The page table entries may downgrade secure to non-secure, but
+ * cannot upgrade an non-secure translation regime's attributes
+ * to secure.
+ */
+ attrs->secure = regime_is_secure(env, mmu_idx);
+ attrs->user = regime_is_user(env, mmu_idx);
+
+ /*
+ * Fast Context Switch Extension. This doesn't exist at all in v8.
+ * In v7 and earlier it affects all stage 1 translations.
+ */
+ if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
+ && !arm_feature(env, ARM_FEATURE_V8)) {
+ if (regime_el(env, mmu_idx) == 3) {
+ address += env->cp15.fcseidr_s;
+ } else {
+ address += env->cp15.fcseidr_ns;
+ }
+ }
+
+ if (arm_feature(env, ARM_FEATURE_PMSA)) {
+ bool ret;
+ *page_size = TARGET_PAGE_SIZE;
+
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ /* PMSAv8 */
+ ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
+ phys_ptr, attrs, prot, page_size, fi);
+ } else if (arm_feature(env, ARM_FEATURE_V7)) {
+ /* PMSAv7 */
+ ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
+ phys_ptr, prot, page_size, fi);
+ } else {
+ /* Pre-v7 MPU */
+ ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
+ phys_ptr, prot, fi);
+ }
+ qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
+ " mmu_idx %u -> %s (prot %c%c%c)\n",
+ access_type == MMU_DATA_LOAD ? "reading" :
+ (access_type == MMU_DATA_STORE ? "writing" : "execute"),
+ (uint32_t)address, mmu_idx,
+ ret ? "Miss" : "Hit",
+ *prot & PAGE_READ ? 'r' : '-',
+ *prot & PAGE_WRITE ? 'w' : '-',
+ *prot & PAGE_EXEC ? 'x' : '-');
+
+ return ret;
+ }
+
+ /* Definitely a real MMU, not an MPU */
+
+ if (regime_translation_disabled(env, mmu_idx)) {
+ uint64_t hcr;
+ uint8_t memattr;
+
+ /*
+ * MMU disabled. S1 addresses within aa64 translation regimes are
+ * still checked for bounds -- see AArch64.TranslateAddressS1Off.
+ */
+ if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
+ int r_el = regime_el(env, mmu_idx);
+ if (arm_el_is_aa64(env, r_el)) {
+ int pamax = arm_pamax(env_archcpu(env));
+ uint64_t tcr = env->cp15.tcr_el[r_el].raw_tcr;
+ int addrtop, tbi;
+
+ tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
+ if (access_type == MMU_INST_FETCH) {
+ tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
+ }
+ tbi = (tbi >> extract64(address, 55, 1)) & 1;
+ addrtop = (tbi ? 55 : 63);
+
+ if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
+ fi->type = ARMFault_AddressSize;
+ fi->level = 0;
+ fi->stage2 = false;
+ return 1;
+ }
+
+ /*
+ * When TBI is disabled, we've just validated that all of the
+ * bits above PAMax are zero, so logically we only need to
+ * clear the top byte for TBI. But it's clearer to follow
+ * the pseudocode set of addrdesc.paddress.
+ */
+ address = extract64(address, 0, 52);
+ }
+ }
+ *phys_ptr = address;
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ *page_size = TARGET_PAGE_SIZE;
+
+ /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
+ hcr = arm_hcr_el2_eff(env);
+ cacheattrs->shareability = 0;
+ cacheattrs->is_s2_format = false;
+ if (hcr & HCR_DC) {
+ if (hcr & HCR_DCT) {
+ memattr = 0xf0; /* Tagged, Normal, WB, RWA */
+ } else {
+ memattr = 0xff; /* Normal, WB, RWA */
+ }
+ } else if (access_type == MMU_INST_FETCH) {
+ if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
+ memattr = 0xee; /* Normal, WT, RA, NT */
+ } else {
+ memattr = 0x44; /* Normal, NC, No */
+ }
+ cacheattrs->shareability = 2; /* outer sharable */
+ } else {
+ memattr = 0x00; /* Device, nGnRnE */
+ }
+ cacheattrs->attrs = memattr;
+ return 0;
+ }
+
+ if (regime_using_lpae_format(env, mmu_idx)) {
+ return get_phys_addr_lpae(env, address, access_type, mmu_idx, false,
+ phys_ptr, attrs, prot, page_size,
+ fi, cacheattrs);
+ } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
+ return get_phys_addr_v6(env, address, access_type, mmu_idx,
+ phys_ptr, attrs, prot, page_size, fi);
+ } else {
+ return get_phys_addr_v5(env, address, access_type, mmu_idx,
+ phys_ptr, prot, page_size, fi);
+ }
+}