tcg: Add page_bits and page_mask to TCGContext
Disconnect guest page size from TCG compilation.
While this could be done via exec/target_page.h, we want to cache
the value across multiple memory access operations, so we might
as well initialize this early.
The changes within tcg/ are entirely mechanical:
sed -i s/TARGET_PAGE_BITS/s->page_bits/g
sed -i s/TARGET_PAGE_MASK/s->page_mask/g
Reviewed-by: Anton Johansson <anjo@rev.ng>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index 99a9d0e..ca306f6 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -357,6 +357,10 @@
tb_set_page_addr1(tb, -1);
tcg_ctx->gen_tb = tb;
tcg_ctx->addr_type = TCG_TYPE_TL;
+#ifdef CONFIG_SOFTMMU
+ tcg_ctx->page_bits = TARGET_PAGE_BITS;
+ tcg_ctx->page_mask = TARGET_PAGE_MASK;
+#endif
tb_overflow:
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
index b9748fd..db57c4d 100644
--- a/include/tcg/tcg.h
+++ b/include/tcg/tcg.h
@@ -560,6 +560,11 @@
int nb_ops;
TCGType addr_type; /* TCG_TYPE_I32 or TCG_TYPE_I64 */
+#ifdef CONFIG_SOFTMMU
+ int page_mask;
+ uint8_t page_bits;
+#endif
+
TCGRegSet reserved_regs;
intptr_t current_frame_offset;
intptr_t frame_start;
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
index 462663f..c2a1f09 100644
--- a/tcg/aarch64/tcg-target.c.inc
+++ b/tcg/aarch64/tcg-target.c.inc
@@ -1663,7 +1663,7 @@
ldst->oi = oi;
ldst->addrlo_reg = addr_reg;
- mask_type = (TARGET_PAGE_BITS + CPU_TLB_DYN_MAX_BITS > 32
+ mask_type = (s->page_bits + CPU_TLB_DYN_MAX_BITS > 32
? TCG_TYPE_I64 : TCG_TYPE_I32);
/* Load env_tlb(env)->f[mmu_idx].{mask,table} into {x0,x1}. */
@@ -1677,7 +1677,7 @@
/* Extract the TLB index from the address into X0. */
tcg_out_insn(s, 3502S, AND_LSR, mask_type == TCG_TYPE_I64,
TCG_REG_X0, TCG_REG_X0, addr_reg,
- TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
+ s->page_bits - CPU_TLB_ENTRY_BITS);
/* Add the tlb_table pointer, creating the CPUTLBEntry address into X1. */
tcg_out_insn(s, 3502, ADD, 1, TCG_REG_X1, TCG_REG_X1, TCG_REG_X0);
@@ -1701,7 +1701,7 @@
TCG_REG_X3, addr_reg, s_mask - a_mask);
x3 = TCG_REG_X3;
}
- compare_mask = (uint64_t)TARGET_PAGE_MASK | a_mask;
+ compare_mask = (uint64_t)s->page_mask | a_mask;
/* Store the page mask part of the address into X3. */
tcg_out_logicali(s, I3404_ANDI, addr_type, TCG_REG_X3, x3, compare_mask);
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
index 3c38e86..20cc1cc 100644
--- a/tcg/arm/tcg-target.c.inc
+++ b/tcg/arm/tcg-target.c.inc
@@ -1424,7 +1424,7 @@
/* Extract the tlb index from the address into R0. */
tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo,
- SHIFT_IMM_LSR(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS));
+ SHIFT_IMM_LSR(s->page_bits - CPU_TLB_ENTRY_BITS));
/*
* Add the tlb_table pointer, creating the CPUTLBEntry address in R1.
@@ -1468,8 +1468,8 @@
tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr,
addrlo, s_mask - a_mask);
}
- if (use_armv7_instructions && TARGET_PAGE_BITS <= 16) {
- tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(TARGET_PAGE_MASK | a_mask));
+ if (use_armv7_instructions && s->page_bits <= 16) {
+ tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(s->page_mask | a_mask));
tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP,
t_addr, TCG_REG_TMP, 0);
tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, 0);
@@ -1479,10 +1479,10 @@
tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
}
tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr,
- SHIFT_IMM_LSR(TARGET_PAGE_BITS));
+ SHIFT_IMM_LSR(s->page_bits));
tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP,
0, TCG_REG_R2, TCG_REG_TMP,
- SHIFT_IMM_LSL(TARGET_PAGE_BITS));
+ SHIFT_IMM_LSL(s->page_bits));
}
if (s->addr_type != TCG_TYPE_I32) {
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
index 14bddc8..c5d4570 100644
--- a/tcg/i386/tcg-target.c.inc
+++ b/tcg/i386/tcg-target.c.inc
@@ -1933,7 +1933,7 @@
trexw = (ttype == TCG_TYPE_I32 ? 0 : P_REXW);
if (TCG_TYPE_PTR == TCG_TYPE_I64) {
hrexw = P_REXW;
- if (TARGET_PAGE_BITS + CPU_TLB_DYN_MAX_BITS > 32) {
+ if (s->page_bits + CPU_TLB_DYN_MAX_BITS > 32) {
tlbtype = TCG_TYPE_I64;
tlbrexw = P_REXW;
}
@@ -1942,7 +1942,7 @@
tcg_out_mov(s, tlbtype, TCG_REG_L0, addrlo);
tcg_out_shifti(s, SHIFT_SHR + tlbrexw, TCG_REG_L0,
- TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
+ s->page_bits - CPU_TLB_ENTRY_BITS);
tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, TCG_REG_L0, TCG_AREG0,
TLB_MASK_TABLE_OFS(mem_index) +
@@ -1963,7 +1963,7 @@
tcg_out_modrm_offset(s, OPC_LEA + trexw, TCG_REG_L1,
addrlo, s_mask - a_mask);
}
- tlb_mask = TARGET_PAGE_MASK | a_mask;
+ tlb_mask = s->page_mask | a_mask;
tgen_arithi(s, ARITH_AND + trexw, TCG_REG_L1, tlb_mask, 0);
/* cmp 0(TCG_REG_L0), TCG_REG_L1 */
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
index e5f9884..0bae922 100644
--- a/tcg/loongarch64/tcg-target.c.inc
+++ b/tcg/loongarch64/tcg-target.c.inc
@@ -870,7 +870,7 @@
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
tcg_out_opc_srli_d(s, TCG_REG_TMP2, addr_reg,
- TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
+ s->page_bits - CPU_TLB_ENTRY_BITS);
tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
@@ -894,7 +894,7 @@
tcg_out_mov(s, addr_type, TCG_REG_TMP1, addr_reg);
}
tcg_out_opc_bstrins_d(s, TCG_REG_TMP1, TCG_REG_ZERO,
- a_bits, TARGET_PAGE_BITS - 1);
+ a_bits, s->page_bits - 1);
/* Compare masked address with the TLB entry. */
ldst->label_ptr[0] = s->code_ptr;
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
index 209d959..ef146b1 100644
--- a/tcg/mips/tcg-target.c.inc
+++ b/tcg/mips/tcg-target.c.inc
@@ -1189,10 +1189,10 @@
/* Extract the TLB index from the address into TMP3. */
if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) {
tcg_out_opc_sa(s, OPC_SRL, TCG_TMP3, addrlo,
- TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
+ s->page_bits - CPU_TLB_ENTRY_BITS);
} else {
tcg_out_dsrl(s, TCG_TMP3, addrlo,
- TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
+ s->page_bits - CPU_TLB_ENTRY_BITS);
}
tcg_out_opc_reg(s, OPC_AND, TCG_TMP3, TCG_TMP3, TCG_TMP0);
@@ -1214,7 +1214,7 @@
* For unaligned accesses, compare against the end of the access to
* verify that it does not cross a page boundary.
*/
- tcg_out_movi(s, addr_type, TCG_TMP1, TARGET_PAGE_MASK | a_mask);
+ tcg_out_movi(s, addr_type, TCG_TMP1, s->page_mask | a_mask);
if (a_mask < s_mask) {
if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) {
tcg_out_opc_imm(s, OPC_ADDIU, TCG_TMP2, addrlo, s_mask - a_mask);
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
index e2851c5..d4269df 100644
--- a/tcg/ppc/tcg-target.c.inc
+++ b/tcg/ppc/tcg-target.c.inc
@@ -2077,10 +2077,10 @@
/* Extract the page index, shifted into place for tlb index. */
if (TCG_TARGET_REG_BITS == 32) {
tcg_out_shri32(s, TCG_REG_R0, addrlo,
- TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
+ s->page_bits - CPU_TLB_ENTRY_BITS);
} else {
tcg_out_shri64(s, TCG_REG_R0, addrlo,
- TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
+ s->page_bits - CPU_TLB_ENTRY_BITS);
}
tcg_out32(s, AND | SAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_R0));
@@ -2119,7 +2119,7 @@
a_bits = s_bits;
}
tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0,
- (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
+ (32 - a_bits) & 31, 31 - s->page_bits);
} else {
TCGReg t = addrlo;
@@ -2140,13 +2140,13 @@
/* Mask the address for the requested alignment. */
if (TARGET_LONG_BITS == 32) {
tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0,
- (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
+ (32 - a_bits) & 31, 31 - s->page_bits);
} else if (a_bits == 0) {
- tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - TARGET_PAGE_BITS);
+ tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - s->page_bits);
} else {
tcg_out_rld(s, RLDICL, TCG_REG_R0, t,
- 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - a_bits);
- tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0);
+ 64 - s->page_bits, s->page_bits - a_bits);
+ tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, s->page_bits, 0);
}
}
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
index de61edb..ff63349 100644
--- a/tcg/riscv/tcg-target.c.inc
+++ b/tcg/riscv/tcg-target.c.inc
@@ -937,7 +937,7 @@
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr_reg,
- TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
+ s->page_bits - CPU_TLB_ENTRY_BITS);
tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
@@ -952,7 +952,7 @@
tcg_out_opc_imm(s, TARGET_LONG_BITS == 32 ? OPC_ADDIW : OPC_ADDI,
addr_adj, addr_reg, s_mask - a_mask);
}
- compare_mask = TARGET_PAGE_MASK | a_mask;
+ compare_mask = s->page_mask | a_mask;
if (compare_mask == sextreg(compare_mask, 0, 12)) {
tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_adj, compare_mask);
} else {
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
index 466d8e7..dfaa34c 100644
--- a/tcg/s390x/tcg-target.c.inc
+++ b/tcg/s390x/tcg-target.c.inc
@@ -1755,7 +1755,7 @@
ldst->addrlo_reg = addr_reg;
tcg_out_sh64(s, RSY_SRLG, TCG_TMP0, addr_reg, TCG_REG_NONE,
- TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
+ s->page_bits - CPU_TLB_ENTRY_BITS);
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 19));
@@ -1768,7 +1768,7 @@
* cross pages using the address of the last byte of the access.
*/
a_off = (a_mask >= s_mask ? 0 : s_mask - a_mask);
- tlb_mask = (uint64_t)TARGET_PAGE_MASK | a_mask;
+ tlb_mask = (uint64_t)s->page_mask | a_mask;
if (a_off == 0) {
tgen_andi_risbg(s, TCG_REG_R0, addr_reg, tlb_mask);
} else {
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
index 6e6c26d..d2d0f60 100644
--- a/tcg/sparc64/tcg-target.c.inc
+++ b/tcg/sparc64/tcg-target.c.inc
@@ -1056,7 +1056,7 @@
/* Extract the page index, shifted into place for tlb index. */
tcg_out_arithi(s, TCG_REG_T1, addr_reg,
- TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS, SHIFT_SRL);
+ s->page_bits - CPU_TLB_ENTRY_BITS, SHIFT_SRL);
tcg_out_arith(s, TCG_REG_T1, TCG_REG_T1, TCG_REG_T2, ARITH_AND);
/* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */
@@ -1068,7 +1068,7 @@
h->base = TCG_REG_T1;
/* Mask out the page offset, except for the required alignment. */
- compare_mask = TARGET_PAGE_MASK | a_mask;
+ compare_mask = s->page_mask | a_mask;
if (check_fit_tl(compare_mask, 13)) {
tcg_out_arithi(s, TCG_REG_T3, addr_reg, compare_mask, ARITH_AND);
} else {