tcg: Split INDEX_op_qemu_{ld,st}* for guest address size
For 32-bit hosts, we cannot simply rely on TCGContext.addr_bits,
as we need one or two host registers to represent the guest address.
Create the new opcodes and update all users. Since we have not
yet eliminated TARGET_LONG_BITS, only one of the two opcodes will
ever be used, so we can get away with treating them the same in
the backends.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
diff --git a/tcg/optimize.c b/tcg/optimize.c
index da400b9..bf975a3 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -2184,15 +2184,22 @@
CASE_OP_32_64_VEC(orc):
done = fold_orc(&ctx, op);
break;
- case INDEX_op_qemu_ld_i32:
- case INDEX_op_qemu_ld_i64:
- case INDEX_op_qemu_ld_i128:
+ case INDEX_op_qemu_ld_a32_i32:
+ case INDEX_op_qemu_ld_a64_i32:
+ case INDEX_op_qemu_ld_a32_i64:
+ case INDEX_op_qemu_ld_a64_i64:
+ case INDEX_op_qemu_ld_a32_i128:
+ case INDEX_op_qemu_ld_a64_i128:
done = fold_qemu_ld(&ctx, op);
break;
- case INDEX_op_qemu_st_i32:
- case INDEX_op_qemu_st8_i32:
- case INDEX_op_qemu_st_i64:
- case INDEX_op_qemu_st_i128:
+ case INDEX_op_qemu_st8_a32_i32:
+ case INDEX_op_qemu_st8_a64_i32:
+ case INDEX_op_qemu_st_a32_i32:
+ case INDEX_op_qemu_st_a64_i32:
+ case INDEX_op_qemu_st_a32_i64:
+ case INDEX_op_qemu_st_a64_i64:
+ case INDEX_op_qemu_st_a32_i128:
+ case INDEX_op_qemu_st_a64_i128:
done = fold_qemu_st(&ctx, op);
break;
CASE_OP_32_64(rem):