tlb: Add "ifetch" argument to cpu_mmu_index()
This is set to true when the index is for an instruction fetch
translation.
The core get_page_addr_code() sets it, as do the SOFTMMU_CODE_ACCESS
acessors.
All targets ignore it for now, and all other callers pass "false".
This will allow targets who wish to split the mmu index between
instruction and data accesses to do so. A subsequent patch will
do just that for PowerPC.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Message-Id: <1439796853-4410-2-git-send-email-benh@kernel.crashing.org>
Signed-off-by: Richard Henderson <rth@twiddle.net>
diff --git a/cputlb.c b/cputlb.c
index 4bc6c24..fbcebe3 100644
--- a/cputlb.c
+++ b/cputlb.c
@@ -452,7 +452,7 @@
CPUState *cpu = ENV_GET_CPU(env1);
page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- mmu_idx = cpu_mmu_index(env1);
+ mmu_idx = cpu_mmu_index(env1, true);
if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
(addr & TARGET_PAGE_MASK))) {
cpu_ldub_code(env1, addr);
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
index 26f4794..ceac168 100644
--- a/include/exec/cpu_ldst.h
+++ b/include/exec/cpu_ldst.h
@@ -363,7 +363,7 @@
#endif /* (NB_MMU_MODES > 12) */
/* these access are slower, they must be as rare as possible */
-#define CPU_MMU_INDEX (cpu_mmu_index(env))
+#define CPU_MMU_INDEX (cpu_mmu_index(env, false))
#define MEMSUFFIX _data
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
@@ -379,7 +379,7 @@
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
-#define CPU_MMU_INDEX (cpu_mmu_index(env))
+#define CPU_MMU_INDEX (cpu_mmu_index(env, true))
#define MEMSUFFIX _code
#define SOFTMMU_CODE_ACCESS
diff --git a/target-alpha/cpu.h b/target-alpha/cpu.h
index 3f1ece3..ef88ffb 100644
--- a/target-alpha/cpu.h
+++ b/target-alpha/cpu.h
@@ -376,7 +376,7 @@
PS_USER_MODE = 8
};
-static inline int cpu_mmu_index(CPUAlphaState *env)
+static inline int cpu_mmu_index(CPUAlphaState *env, bool ifetch)
{
if (env->pal_mode) {
return MMU_KERNEL_IDX;
diff --git a/target-alpha/translate.c b/target-alpha/translate.c
index 206feb5..2ba5fb8 100644
--- a/target-alpha/translate.c
+++ b/target-alpha/translate.c
@@ -2878,7 +2878,7 @@
ctx.tb = tb;
ctx.pc = pc_start;
- ctx.mem_idx = cpu_mmu_index(env);
+ ctx.mem_idx = cpu_mmu_index(env, false);
ctx.implver = env->implver;
ctx.singlestep_enabled = cs->singlestep_enabled;
diff --git a/target-arm/cpu.h b/target-arm/cpu.h
index 5abd8ba..b9068c9 100644
--- a/target-arm/cpu.h
+++ b/target-arm/cpu.h
@@ -1678,7 +1678,7 @@
}
/* Determine the current mmu_idx to use for normal loads/stores */
-static inline int cpu_mmu_index(CPUARMState *env)
+static inline int cpu_mmu_index(CPUARMState *env, bool ifetch)
{
int el = arm_current_el(env);
@@ -1911,7 +1911,7 @@
<< ARM_TBFLAG_XSCALE_CPAR_SHIFT);
}
- *flags |= (cpu_mmu_index(env) << ARM_TBFLAG_MMUIDX_SHIFT);
+ *flags |= (cpu_mmu_index(env, false) << ARM_TBFLAG_MMUIDX_SHIFT);
/* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
* states defined in the ARM ARM for software singlestep:
* SS_ACTIVE PSTATE.SS State
diff --git a/target-arm/helper.c b/target-arm/helper.c
index d453120..2c6ec9d 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -6892,7 +6892,7 @@
uint32_t fsr;
MemTxAttrs attrs = {};
- ret = get_phys_addr(env, addr, 0, cpu_mmu_index(env), &phys_addr,
+ ret = get_phys_addr(env, addr, 0, cpu_mmu_index(env, false), &phys_addr,
&attrs, &prot, &page_size, &fsr);
if (ret) {
@@ -7057,7 +7057,7 @@
int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
void *hostaddr[maxidx];
int try, i;
- unsigned mmu_idx = cpu_mmu_index(env);
+ unsigned mmu_idx = cpu_mmu_index(env, false);
TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
for (try = 0; try < 2; try++) {
diff --git a/target-cris/cpu.h b/target-cris/cpu.h
index 10c3f46..82df90d 100644
--- a/target-cris/cpu.h
+++ b/target-cris/cpu.h
@@ -233,7 +233,7 @@
#define MMU_MODE0_SUFFIX _kernel
#define MMU_MODE1_SUFFIX _user
#define MMU_USER_IDX 1
-static inline int cpu_mmu_index (CPUCRISState *env)
+static inline int cpu_mmu_index (CPUCRISState *env, bool ifetch)
{
return !!(env->pregs[PR_CCS] & U_FLAG);
}
diff --git a/target-cris/translate.c b/target-cris/translate.c
index aa90d71..d5b54e1 100644
--- a/target-cris/translate.c
+++ b/target-cris/translate.c
@@ -1083,7 +1083,7 @@
static void gen_load64(DisasContext *dc, TCGv_i64 dst, TCGv addr)
{
- int mem_index = cpu_mmu_index(&dc->cpu->env);
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
/* If we get a fault on a delayslot we must keep the jmp state in
the cpu-state to be able to re-execute the jmp. */
@@ -1097,7 +1097,7 @@
static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
unsigned int size, int sign)
{
- int mem_index = cpu_mmu_index(&dc->cpu->env);
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
/* If we get a fault on a delayslot we must keep the jmp state in
the cpu-state to be able to re-execute the jmp. */
@@ -1112,7 +1112,7 @@
static void gen_store (DisasContext *dc, TCGv addr, TCGv val,
unsigned int size)
{
- int mem_index = cpu_mmu_index(&dc->cpu->env);
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
/* If we get a fault on a delayslot we must keep the jmp state in
the cpu-state to be able to re-execute the jmp. */
diff --git a/target-cris/translate_v10.c b/target-cris/translate_v10.c
index 618c234..da0b2ca 100644
--- a/target-cris/translate_v10.c
+++ b/target-cris/translate_v10.c
@@ -96,7 +96,7 @@
static void gen_store_v10(DisasContext *dc, TCGv addr, TCGv val,
unsigned int size)
{
- int mem_index = cpu_mmu_index(&dc->cpu->env);
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
/* If we get a fault on a delayslot we must keep the jmp state in
the cpu-state to be able to re-execute the jmp. */
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index 74b674d..151f1b9 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -1199,7 +1199,7 @@
#define MMU_KSMAP_IDX 0
#define MMU_USER_IDX 1
#define MMU_KNOSMAP_IDX 2
-static inline int cpu_mmu_index(CPUX86State *env)
+static inline int cpu_mmu_index(CPUX86State *env, bool ifetch)
{
return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX :
(!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK))
diff --git a/target-i386/translate.c b/target-i386/translate.c
index b1a5ad9..d72fa46 100644
--- a/target-i386/translate.c
+++ b/target-i386/translate.c
@@ -7942,7 +7942,7 @@
/* select memory access functions */
dc->mem_index = 0;
if (flags & HF_SOFTMMU_MASK) {
- dc->mem_index = cpu_mmu_index(env);
+ dc->mem_index = cpu_mmu_index(env, false);
}
dc->cpuid_features = env->features[FEAT_1_EDX];
dc->cpuid_ext_features = env->features[FEAT_1_ECX];
diff --git a/target-lm32/cpu.h b/target-lm32/cpu.h
index 944777d..cc77263 100644
--- a/target-lm32/cpu.h
+++ b/target-lm32/cpu.h
@@ -34,7 +34,7 @@
#define NB_MMU_MODES 1
#define TARGET_PAGE_BITS 12
-static inline int cpu_mmu_index(CPULM32State *env)
+static inline int cpu_mmu_index(CPULM32State *env, bool ifetch)
{
return 0;
}
diff --git a/target-m68k/cpu.h b/target-m68k/cpu.h
index 9a62f6c..43a9a1c 100644
--- a/target-m68k/cpu.h
+++ b/target-m68k/cpu.h
@@ -223,7 +223,7 @@
#define MMU_MODE0_SUFFIX _kernel
#define MMU_MODE1_SUFFIX _user
#define MMU_USER_IDX 1
-static inline int cpu_mmu_index (CPUM68KState *env)
+static inline int cpu_mmu_index (CPUM68KState *env, bool ifetch)
{
return (env->sr & SR_S) == 0 ? 1 : 0;
}
diff --git a/target-microblaze/cpu.h b/target-microblaze/cpu.h
index 7e20e59..402124a 100644
--- a/target-microblaze/cpu.h
+++ b/target-microblaze/cpu.h
@@ -309,7 +309,7 @@
#define MMU_USER_IDX 2
/* See NB_MMU_MODES further up the file. */
-static inline int cpu_mmu_index (CPUMBState *env)
+static inline int cpu_mmu_index (CPUMBState *env, bool ifetch)
{
/* Are we in nommu mode?. */
if (!(env->sregs[SR_MSR] & MSR_VM))
diff --git a/target-microblaze/mmu.c b/target-microblaze/mmu.c
index 728da13..2ef1dc2 100644
--- a/target-microblaze/mmu.c
+++ b/target-microblaze/mmu.c
@@ -279,7 +279,7 @@
}
hit = mmu_translate(&env->mmu, &lu,
- v & TLB_EPN_MASK, 0, cpu_mmu_index(env));
+ v & TLB_EPN_MASK, 0, cpu_mmu_index(env, false));
if (hit) {
env->mmu.regs[MMU_R_TLBX] = lu.idx;
} else
diff --git a/target-microblaze/translate.c b/target-microblaze/translate.c
index c33b16c..3de8944 100644
--- a/target-microblaze/translate.c
+++ b/target-microblaze/translate.c
@@ -418,7 +418,7 @@
CPUState *cs = CPU(dc->cpu);
TCGv t0, t1;
unsigned int sr, to, rn;
- int mem_index = cpu_mmu_index(&dc->cpu->env);
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
sr = dc->imm & ((1 << 14) - 1);
to = dc->imm & (1 << 14);
@@ -730,7 +730,7 @@
CPUState *cs = CPU(dc->cpu);
TCGv t0;
unsigned int op;
- int mem_index = cpu_mmu_index(&dc->cpu->env);
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
op = dc->ir & ((1 << 9) - 1);
switch (op) {
@@ -994,7 +994,7 @@
* address and if that succeeds we write into the destination reg.
*/
v = tcg_temp_new();
- tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env), mop);
+ tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
@@ -1072,7 +1072,7 @@
this compare and the following write to be atomic. For user
emulation we need to add atomicity between threads. */
tval = tcg_temp_new();
- tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env),
+ tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false),
MO_TEUL);
tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
write_carryi(dc, 0);
@@ -1123,7 +1123,7 @@
break;
}
}
- tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env), mop);
+ tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
/* Verify alignment if needed. */
if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
@@ -1219,7 +1219,7 @@
static void dec_br(DisasContext *dc)
{
unsigned int dslot, link, abs, mbar;
- int mem_index = cpu_mmu_index(&dc->cpu->env);
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
dslot = dc->ir & (1 << 20);
abs = dc->ir & (1 << 19);
@@ -1351,7 +1351,7 @@
static void dec_rts(DisasContext *dc)
{
unsigned int b_bit, i_bit, e_bit;
- int mem_index = cpu_mmu_index(&dc->cpu->env);
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
i_bit = dc->ir & (1 << 21);
b_bit = dc->ir & (1 << 22);
@@ -1523,7 +1523,7 @@
/* Insns connected to FSL or AXI stream attached devices. */
static void dec_stream(DisasContext *dc)
{
- int mem_index = cpu_mmu_index(&dc->cpu->env);
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
TCGv_i32 t_id, t_ctrl;
int ctrl;
diff --git a/target-mips/cpu.h b/target-mips/cpu.h
index c91883d..2acc4b3 100644
--- a/target-mips/cpu.h
+++ b/target-mips/cpu.h
@@ -634,7 +634,7 @@
#define MMU_MODE1_SUFFIX _super
#define MMU_MODE2_SUFFIX _user
#define MMU_USER_IDX 2
-static inline int cpu_mmu_index (CPUMIPSState *env)
+static inline int cpu_mmu_index (CPUMIPSState *env, bool ifetch)
{
return env->hflags & MIPS_HFLAG_KSU;
}
diff --git a/target-mips/op_helper.c b/target-mips/op_helper.c
index 809a061..1aa9e3c 100644
--- a/target-mips/op_helper.c
+++ b/target-mips/op_helper.c
@@ -3629,7 +3629,7 @@
#if !defined(CONFIG_USER_ONLY)
#define MEMOP_IDX(DF) \
TCGMemOpIdx oi = make_memop_idx(MO_TE | DF | MO_UNALN, \
- cpu_mmu_index(env));
+ cpu_mmu_index(env, false));
#else
#define MEMOP_IDX(DF)
#endif
@@ -3685,7 +3685,7 @@
target_ulong addr) \
{ \
wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
- int mmu_idx = cpu_mmu_index(env); \
+ int mmu_idx = cpu_mmu_index(env, false); \
int i; \
MEMOP_IDX(DF) \
ensure_writable_pages(env, addr, mmu_idx, GETRA()); \
diff --git a/target-moxie/cpu.h b/target-moxie/cpu.h
index 29572aa..15ca15b 100644
--- a/target-moxie/cpu.h
+++ b/target-moxie/cpu.h
@@ -127,7 +127,7 @@
#define cpu_gen_code cpu_moxie_gen_code
#define cpu_signal_handler cpu_moxie_signal_handler
-static inline int cpu_mmu_index(CPUMoxieState *env)
+static inline int cpu_mmu_index(CPUMoxieState *env, bool ifetch)
{
return 0;
}
diff --git a/target-openrisc/cpu.h b/target-openrisc/cpu.h
index 36c4f20..560210d 100644
--- a/target-openrisc/cpu.h
+++ b/target-openrisc/cpu.h
@@ -403,7 +403,7 @@
*flags = (env->flags & D_FLAG);
}
-static inline int cpu_mmu_index(CPUOpenRISCState *env)
+static inline int cpu_mmu_index(CPUOpenRISCState *env, bool ifetch)
{
if (!(env->sr & SR_IME)) {
return MMU_NOMMU_IDX;
diff --git a/target-openrisc/translate.c b/target-openrisc/translate.c
index aca1242..473556e 100644
--- a/target-openrisc/translate.c
+++ b/target-openrisc/translate.c
@@ -1653,7 +1653,7 @@
dc->ppc = pc_start;
dc->pc = pc_start;
dc->flags = cpu->env.cpucfgr;
- dc->mem_idx = cpu_mmu_index(&cpu->env);
+ dc->mem_idx = cpu_mmu_index(&cpu->env, false);
dc->synced_flags = dc->tb_flags = tb->flags;
dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
dc->singlestep_enabled = cs->singlestep_enabled;
diff --git a/target-ppc/cpu.h b/target-ppc/cpu.h
index 6f76674..406d308 100644
--- a/target-ppc/cpu.h
+++ b/target-ppc/cpu.h
@@ -1250,7 +1250,7 @@
#define MMU_MODE1_SUFFIX _kernel
#define MMU_MODE2_SUFFIX _hypv
#define MMU_USER_IDX 0
-static inline int cpu_mmu_index (CPUPPCState *env)
+static inline int cpu_mmu_index (CPUPPCState *env, bool ifetch)
{
return env->mmu_idx;
}
diff --git a/target-s390x/cpu.h b/target-s390x/cpu.h
index b650890..9aeb024 100644
--- a/target-s390x/cpu.h
+++ b/target-s390x/cpu.h
@@ -308,7 +308,7 @@
#define MMU_SECONDARY_IDX 1
#define MMU_HOME_IDX 2
-static inline int cpu_mmu_index (CPUS390XState *env)
+static inline int cpu_mmu_index (CPUS390XState *env, bool ifetch)
{
switch (env->psw.mask & PSW_MASK_ASC) {
case PSW_ASC_PRIMARY:
diff --git a/target-s390x/mem_helper.c b/target-s390x/mem_helper.c
index 84bf198..90399f1 100644
--- a/target-s390x/mem_helper.c
+++ b/target-s390x/mem_helper.c
@@ -69,7 +69,7 @@
static void fast_memset(CPUS390XState *env, uint64_t dest, uint8_t byte,
uint32_t l)
{
- int mmu_idx = cpu_mmu_index(env);
+ int mmu_idx = cpu_mmu_index(env, false);
while (l > 0) {
void *p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
@@ -92,7 +92,7 @@
static void fast_memmove(CPUS390XState *env, uint64_t dest, uint64_t src,
uint32_t l)
{
- int mmu_idx = cpu_mmu_index(env);
+ int mmu_idx = cpu_mmu_index(env, false);
while (l > 0) {
void *src_p = tlb_vaddr_to_host(env, src, MMU_DATA_LOAD, mmu_idx);
diff --git a/target-sh4/cpu.h b/target-sh4/cpu.h
index 34bb3d7..1f68b27 100644
--- a/target-sh4/cpu.h
+++ b/target-sh4/cpu.h
@@ -235,7 +235,7 @@
#define MMU_MODE0_SUFFIX _kernel
#define MMU_MODE1_SUFFIX _user
#define MMU_USER_IDX 1
-static inline int cpu_mmu_index (CPUSH4State *env)
+static inline int cpu_mmu_index (CPUSH4State *env, bool ifetch)
{
return (env->sr & (1u << SR_MD)) == 0 ? 1 : 0;
}
diff --git a/target-sparc/cpu.h b/target-sparc/cpu.h
index 0522b65..72ea171 100644
--- a/target-sparc/cpu.h
+++ b/target-sparc/cpu.h
@@ -642,7 +642,7 @@
}
#endif
-static inline int cpu_mmu_index(CPUSPARCState *env1)
+static inline int cpu_mmu_index(CPUSPARCState *env1, bool ifetch)
{
#if defined(CONFIG_USER_ONLY)
return MMU_USER_IDX;
diff --git a/target-sparc/mmu_helper.c b/target-sparc/mmu_helper.c
index 2a0c6f0..7495406 100644
--- a/target-sparc/mmu_helper.c
+++ b/target-sparc/mmu_helper.c
@@ -849,7 +849,7 @@
SPARCCPU *cpu = SPARC_CPU(cs);
CPUSPARCState *env = &cpu->env;
hwaddr phys_addr;
- int mmu_idx = cpu_mmu_index(env);
+ int mmu_idx = cpu_mmu_index(env, false);
MemoryRegionSection section;
if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) {
diff --git a/target-sparc/translate.c b/target-sparc/translate.c
index 48fc2ab..4690b46 100644
--- a/target-sparc/translate.c
+++ b/target-sparc/translate.c
@@ -5234,7 +5234,7 @@
last_pc = dc->pc;
dc->npc = (target_ulong) tb->cs_base;
dc->cc_op = CC_OP_DYNAMIC;
- dc->mem_idx = cpu_mmu_index(env);
+ dc->mem_idx = cpu_mmu_index(env, false);
dc->def = env->def;
dc->fpu_enabled = tb_fpu_enabled(tb->flags);
dc->address_mask_32bit = tb_am_enabled(tb->flags);
diff --git a/target-tricore/cpu.h b/target-tricore/cpu.h
index 916ee27..42751e8 100644
--- a/target-tricore/cpu.h
+++ b/target-tricore/cpu.h
@@ -350,7 +350,7 @@
#define cpu_signal_handler cpu_tricore_signal_handler
#define cpu_list tricore_cpu_list
-static inline int cpu_mmu_index(CPUTriCoreState *env)
+static inline int cpu_mmu_index(CPUTriCoreState *env, bool ifetch)
{
return 0;
}
diff --git a/target-tricore/translate.c b/target-tricore/translate.c
index f02bef4..440f30a 100644
--- a/target-tricore/translate.c
+++ b/target-tricore/translate.c
@@ -8287,7 +8287,7 @@
ctx.tb = tb;
ctx.singlestep_enabled = cs->singlestep_enabled;
ctx.bstate = BS_NONE;
- ctx.mem_idx = cpu_mmu_index(env);
+ ctx.mem_idx = cpu_mmu_index(env, false);
tcg_clear_temp_count();
gen_tb_start(tb);
diff --git a/target-unicore32/cpu.h b/target-unicore32/cpu.h
index 45e31e5..121e528 100644
--- a/target-unicore32/cpu.h
+++ b/target-unicore32/cpu.h
@@ -131,7 +131,7 @@
#define MMU_MODE0_SUFFIX _kernel
#define MMU_MODE1_SUFFIX _user
#define MMU_USER_IDX 1
-static inline int cpu_mmu_index(CPUUniCore32State *env)
+static inline int cpu_mmu_index(CPUUniCore32State *env, bool ifetch)
{
return (env->uncached_asr & ASR_M) == ASR_MODE_USER ? 1 : 0;
}
diff --git a/target-xtensa/cpu.h b/target-xtensa/cpu.h
index 96bfc82..dbd2c9c 100644
--- a/target-xtensa/cpu.h
+++ b/target-xtensa/cpu.h
@@ -492,7 +492,7 @@
#define MMU_MODE2_SUFFIX _ring2
#define MMU_MODE3_SUFFIX _ring3
-static inline int cpu_mmu_index(CPUXtensaState *env)
+static inline int cpu_mmu_index(CPUXtensaState *env, bool ifetch)
{
return xtensa_get_cring(env);
}