cputlb: Move env->vtlb_index to env->tlb_d.vindex
The rest of the tlb victim cache is per-tlb,
the next use index should be as well.
Tested-by: Emilio G. Cota <cota@braap.org>
Reviewed-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 8060ec9..2cd3886 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -119,6 +119,7 @@
memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
env->tlb_d[mmu_idx].large_page_addr = -1;
env->tlb_d[mmu_idx].large_page_mask = -1;
+ env->tlb_d[mmu_idx].vindex = 0;
}
/* This is OK because CPU architectures generally permit an
@@ -149,8 +150,6 @@
qemu_spin_unlock(&env->tlb_c.lock);
cpu_tb_jmp_cache_clear(cpu);
-
- env->vtlb_index = 0;
}
static void tlb_flush_global_async_work(CPUState *cpu, run_on_cpu_data data)
@@ -667,7 +666,7 @@
* different page; otherwise just overwrite the stale data.
*/
if (!tlb_hit_page_anyprot(te, vaddr_page)) {
- unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
+ unsigned vidx = env->tlb_d[mmu_idx].vindex++ % CPU_VTLB_SIZE;
CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx];
/* Evict the old entry into the victim tlb. */
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index df8ae18..181c0db 100644
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -150,6 +150,8 @@
*/
target_ulong large_page_addr;
target_ulong large_page_mask;
+ /* The next index to use in the tlb victim table. */
+ size_t vindex;
} CPUTLBDesc;
/*
@@ -178,8 +180,7 @@
CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE]; \
CPUIOTLBEntry iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
CPUIOTLBEntry iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE]; \
- size_t tlb_flush_count; \
- target_ulong vtlb_index; \
+ size_t tlb_flush_count;
#else