target/i386/tcg: Remove SEG_ADDL
This truncation is now handled by MMU_*32_IDX. The introduction of
MMU_*32_IDX in fact applied correct 32-bit wraparound to 16-bit accesses
with a high segment base (e.g. big real mode or vm86 mode), which did
not use SEG_ADDL.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Link: https://lore.kernel.org/r/20240617161210.4639-3-richard.henderson@linaro.org
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
diff --git a/target/i386/tcg/seg_helper.c b/target/i386/tcg/seg_helper.c
index aee3d19..19d6b41 100644
--- a/target/i386/tcg/seg_helper.c
+++ b/target/i386/tcg/seg_helper.c
@@ -579,10 +579,6 @@
} while (0)
#endif
-/* in 64-bit machines, this can overflow. So this segment addition macro
- * can be used to trim the value to 32-bit whenever needed */
-#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
-
/* XXX: add a is_user flag to have proper security support */
#define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
{ \
@@ -593,7 +589,7 @@
#define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
{ \
sp -= 4; \
- cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
+ cpu_stl_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
}
#define POPW_RA(ssp, sp, sp_mask, val, ra) \
@@ -604,7 +600,7 @@
#define POPL_RA(ssp, sp, sp_mask, val, ra) \
{ \
- val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
+ val = (uint32_t)cpu_ldl_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
sp += 4; \
}