Merge tag 'pull-9p-20240329' of https://github.com/cschoenebeck/qemu into staging
Changes for 9p tests only:
* Fix 9p tests for riscv.
* Re-enable 9p 'local' tests for running in CI pipelines.
# -----BEGIN PGP SIGNATURE-----
#
# iQJLBAABCgA1FiEEltjREM96+AhPiFkBNMK1h2Wkc5UFAmYGf9oXHHFlbXVfb3Nz
# QGNydWRlYnl0ZS5jb20ACgkQNMK1h2Wkc5Xy6RAApJ+UCRRf5fbZ6DRKm8ZVVwXa
# YVtwDYy1fEbljN2kud1WeRiw+pTOa/8W4h/QzgR+q0SN3RKhGvGvmKf+X1N+/Z0H
# YpOG1fDmgnyh20VNIwZi2WSoX0pS8DQrlWgGCmsWdOjLmflq4qUJSQ/p/Z2Z0OFJ
# V90w5CtPpFq1X8LgPMKHXe2U0orBlPU3zguw6LPYFPp4LY3p3me9TkufcnuIW3Xg
# Mxp+ZYvkKER9vfhgH0Yz0MecQGkfIEKdP3KNQCgwvynPTbDzXCEj0iaASD5+8dP4
# u8AEoBQccONRm9+iQn1Fk1nhTDTjmhdrD0yfbwqJzbOy4k0W/wFpOR2l+J1QqvFX
# 3LdmalzJ6ZHaT5Kl7QDJj6lBNfVZ9QUS7WKiVnDM5ifoqlfzTgFEr6RXdqSgc/oy
# ax+zF0PhTDckg7/kRCXh+60/kMXG1L2PmlbCOccuk9Z0P9T0GrFhWlvs9Kq+URPh
# r/amaV7+p2XmK/v4sF+IcgZaXwD8ppLFjFVie1/Ol/6kUakzO5Co0WRgCDDW8HkK
# aJz9OBmJnS8Fzf0WwwKMzoNKwrmjiaF9DXIlMYc2carZ4OJNS5ZVpG6Lh/MD8keg
# otzoQpUhfyvu5BZH2sSMrVOJy1VtjP89pcF++zQ5T2RgODCVb0WOgPPS3q1NQowm
# hoxlKvVGZfK/gKOd1+E=
# =xodL
# -----END PGP SIGNATURE-----
# gpg: Signature made Fri 29 Mar 2024 08:46:18 GMT
# gpg: using RSA key 96D8D110CF7AF8084F88590134C2B58765A47395
# gpg: issuer "qemu_oss@crudebyte.com"
# gpg: Good signature from "Christian Schoenebeck <qemu_oss@crudebyte.com>" [unknown]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg: There is no indication that the signature belongs to the owner.
# Primary key fingerprint: ECAB 1A45 4014 1413 BA38 4926 30DB 47C3 A012 D5F4
# Subkey fingerprint: 96D8 D110 CF7A F808 4F88 5901 34C2 B587 65A4 7395
* tag 'pull-9p-20240329' of https://github.com/cschoenebeck/qemu:
qtest/virtio-9p-test.c: remove g_test_slow() gate
qtest/virtio-9p-test.c: create/remove temp dirs after each test
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index c1f57e8..83cc14f 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -634,7 +634,7 @@
cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | n;
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
- vaddr pc = log_pc(cpu, tb);
+ vaddr pc = cpu->cc->get_pc(cpu);
if (qemu_log_in_addr_range(pc)) {
qemu_log("cpu_io_recompile: rewound execution of TB to %016"
VADDR_PRIx "\n", pc);
diff --git a/disas/disas-mon.c b/disas/disas-mon.c
index 48ac492..5d6d9aa 100644
--- a/disas/disas-mon.c
+++ b/disas/disas-mon.c
@@ -34,6 +34,7 @@
disas_initialize_debug_target(&s, cpu);
s.info.fprintf_func = disas_gstring_printf;
s.info.stream = (FILE *)ds; /* abuse this slot */
+ s.info.show_opcodes = true;
if (is_physical) {
s.info.read_memory_func = physical_read_memory;
diff --git a/disas/disas.c b/disas/disas.c
index 17170d2..7e3b0bb 100644
--- a/disas/disas.c
+++ b/disas/disas.c
@@ -211,6 +211,7 @@
s.info.stream = out;
s.info.buffer_vma = code;
s.info.buffer_length = size;
+ s.info.show_opcodes = true;
if (s.info.cap_arch >= 0 && cap_disas_target(&s.info, code, size)) {
return;
diff --git a/ebpf/ebpf_rss.c b/ebpf/ebpf_rss.c
index 2e506f9..d102f3d 100644
--- a/ebpf/ebpf_rss.c
+++ b/ebpf/ebpf_rss.c
@@ -185,13 +185,18 @@
uint16_t *indirections_table,
size_t len)
{
+ char *cursor = ctx->mmap_indirections_table;
+
if (!ebpf_rss_is_loaded(ctx) || indirections_table == NULL ||
len > VIRTIO_NET_RSS_MAX_TABLE_LEN) {
return false;
}
- memcpy(ctx->mmap_indirections_table, indirections_table,
- sizeof(*indirections_table) * len);
+ for (size_t i = 0; i < len; i++) {
+ *(uint16_t *)cursor = indirections_table[i];
+ cursor += 8;
+ }
+
return true;
}
diff --git a/hw/net/net_tx_pkt.c b/hw/net/net_tx_pkt.c
index 2e5f58b..2134a18 100644
--- a/hw/net/net_tx_pkt.c
+++ b/hw/net/net_tx_pkt.c
@@ -833,6 +833,7 @@
if (offload || gso_type == VIRTIO_NET_HDR_GSO_NONE) {
if (!offload && pkt->virt_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
+ pkt->virt_hdr.flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
net_tx_pkt_do_sw_csum(pkt, &pkt->vec[NET_TX_PKT_L2HDR_FRAG],
pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - 1,
pkt->payload_len);
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 9959f19..a6ff000 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -3426,7 +3426,7 @@
VirtIONet *n = VIRTIO_NET(vdev);
NetClientState *nc;
assert(n->vhost_started);
- if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ) && idx == 2) {
+ if (!n->multiqueue && idx == 2) {
/* Must guard against invalid features and bogus queue index
* from being set by malicious guest, or penetrated through
* buggy migration stream.
@@ -3458,7 +3458,7 @@
VirtIONet *n = VIRTIO_NET(vdev);
NetClientState *nc;
assert(n->vhost_started);
- if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ) && idx == 2) {
+ if (!n->multiqueue && idx == 2) {
/* Must guard against invalid features and bogus queue index
* from being set by malicious guest, or penetrated through
* buggy migration stream.
diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h
index c37aba3..c6a5361 100644
--- a/include/net/vhost_net.h
+++ b/include/net/vhost_net.h
@@ -4,9 +4,6 @@
#include "net/net.h"
#include "hw/virtio/vhost-backend.h"
-#define VHOST_NET_INIT_FAILED \
- "vhost-net requested but could not be initialized"
-
struct vhost_net;
typedef struct vhost_net VHostNetState;
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index 4505fd7..be3b9a6 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -1354,7 +1354,7 @@
if (h_len != t_len) {
int mmap_p = PROT_READ | (shmflg & SHM_RDONLY ? 0 : PROT_WRITE);
int mmap_f = MAP_PRIVATE | MAP_ANONYMOUS
- | (reserved_va || (shmflg & SHM_REMAP)
+ | (reserved_va || mapped || (shmflg & SHM_REMAP)
? MAP_FIXED : MAP_FIXED_NOREPLACE);
test = mmap(want, m_len, mmap_p, mmap_f, -1, 0);
diff --git a/linux-user/strace.c b/linux-user/strace.c
index 8d13e55..b4d1098 100644
--- a/linux-user/strace.c
+++ b/linux-user/strace.c
@@ -657,7 +657,6 @@
}
#endif
-#ifdef TARGET_NR_semctl
static void
print_semctl(CPUArchState *cpu_env, const struct syscallname *name,
abi_long arg1, abi_long arg2, abi_long arg3,
@@ -668,7 +667,6 @@
print_ipc_cmd(arg3);
qemu_log(",0x" TARGET_ABI_FMT_lx ")", arg4);
}
-#endif
static void
print_shmat(CPUArchState *cpu_env, const struct syscallname *name,
@@ -698,14 +696,12 @@
{
switch(arg1) {
case IPCOP_semctl:
- qemu_log("semctl(" TARGET_ABI_FMT_ld "," TARGET_ABI_FMT_ld ",",
- arg1, arg2);
- print_ipc_cmd(arg3);
- qemu_log(",0x" TARGET_ABI_FMT_lx ")", arg4);
+ print_semctl(cpu_env, &(const struct syscallname){ .name = "semctl" },
+ arg2, arg3, arg4, arg5, 0, 0);
break;
case IPCOP_shmat:
print_shmat(cpu_env, &(const struct syscallname){ .name = "shmat" },
- arg1, arg4, arg2, 0, 0, 0);
+ arg2, arg5, arg3, 0, 0, 0);
break;
default:
qemu_log(("%s("
diff --git a/net/tap-win32.c b/net/tap-win32.c
index 7b8b4be..7edbd71 100644
--- a/net/tap-win32.c
+++ b/net/tap-win32.c
@@ -707,70 +707,16 @@
}
}
-static bool tap_has_ufo(NetClientState *nc)
-{
- return false;
-}
-
-static bool tap_has_vnet_hdr(NetClientState *nc)
-{
- return false;
-}
-
-int tap_probe_vnet_hdr_len(int fd, int len)
-{
- return 0;
-}
-
-void tap_fd_set_vnet_hdr_len(int fd, int len)
-{
-}
-
-int tap_fd_set_vnet_le(int fd, int is_le)
-{
- return -EINVAL;
-}
-
-int tap_fd_set_vnet_be(int fd, int is_be)
-{
- return -EINVAL;
-}
-
-static void tap_using_vnet_hdr(NetClientState *nc, bool using_vnet_hdr)
-{
-}
-
-static void tap_set_offload(NetClientState *nc, int csum, int tso4,
- int tso6, int ecn, int ufo, int uso4, int uso6)
-{
-}
-
struct vhost_net *tap_get_vhost_net(NetClientState *nc)
{
return NULL;
}
-static bool tap_has_vnet_hdr_len(NetClientState *nc, int len)
-{
- return false;
-}
-
-static void tap_set_vnet_hdr_len(NetClientState *nc, int len)
-{
- abort();
-}
-
static NetClientInfo net_tap_win32_info = {
.type = NET_CLIENT_DRIVER_TAP,
.size = sizeof(TAPState),
.receive = tap_receive,
.cleanup = tap_cleanup,
- .has_ufo = tap_has_ufo,
- .has_vnet_hdr = tap_has_vnet_hdr,
- .has_vnet_hdr_len = tap_has_vnet_hdr_len,
- .using_vnet_hdr = tap_using_vnet_hdr,
- .set_offload = tap_set_offload,
- .set_vnet_hdr_len = tap_set_vnet_hdr_len,
};
static int tap_win32_init(NetClientState *peer, const char *model,
diff --git a/net/tap.c b/net/tap.c
index c698b70..baaa2f7 100644
--- a/net/tap.c
+++ b/net/tap.c
@@ -743,11 +743,7 @@
if (vhostfdname) {
vhostfd = monitor_fd_param(monitor_cur(), vhostfdname, &err);
if (vhostfd == -1) {
- if (tap->has_vhostforce && tap->vhostforce) {
- error_propagate(errp, err);
- } else {
- warn_report_err(err);
- }
+ error_propagate(errp, err);
goto failed;
}
if (!g_unix_set_fd_nonblocking(vhostfd, true, NULL)) {
@@ -758,13 +754,8 @@
} else {
vhostfd = open("/dev/vhost-net", O_RDWR);
if (vhostfd < 0) {
- if (tap->has_vhostforce && tap->vhostforce) {
- error_setg_errno(errp, errno,
- "tap: open vhost char device failed");
- } else {
- warn_report("tap: open vhost char device failed: %s",
- strerror(errno));
- }
+ error_setg_errno(errp, errno,
+ "tap: open vhost char device failed");
goto failed;
}
if (!g_unix_set_fd_nonblocking(vhostfd, true, NULL)) {
@@ -777,11 +768,8 @@
s->vhost_net = vhost_net_init(&options);
if (!s->vhost_net) {
- if (tap->has_vhostforce && tap->vhostforce) {
- error_setg(errp, VHOST_NET_INIT_FAILED);
- } else {
- warn_report(VHOST_NET_INIT_FAILED);
- }
+ error_setg(errp,
+ "vhost-net requested but could not be initialized");
goto failed;
}
} else if (vhostfdname) {
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
index a92dc35..a072d0b 100644
--- a/target/hppa/cpu.h
+++ b/target/hppa/cpu.h
@@ -285,14 +285,20 @@
#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU
+static inline uint64_t gva_offset_mask(target_ulong psw)
+{
+ return (psw & PSW_W
+ ? MAKE_64BIT_MASK(0, 62)
+ : MAKE_64BIT_MASK(0, 32));
+}
+
static inline target_ulong hppa_form_gva_psw(target_ulong psw, uint64_t spc,
target_ulong off)
{
#ifdef CONFIG_USER_ONLY
return off;
#else
- off &= psw & PSW_W ? MAKE_64BIT_MASK(0, 62) : MAKE_64BIT_MASK(0, 32);
- return spc | off;
+ return spc | (off & gva_offset_mask(psw));
#endif
}
diff --git a/target/hppa/helper.h b/target/hppa/helper.h
index 1bdbcd8..5900fd7 100644
--- a/target/hppa/helper.h
+++ b/target/hppa/helper.h
@@ -86,12 +86,10 @@
#ifndef CONFIG_USER_ONLY
DEF_HELPER_1(halt, noreturn, env)
DEF_HELPER_1(reset, noreturn, env)
-DEF_HELPER_1(getshadowregs, void, env)
DEF_HELPER_1(rfi, void, env)
DEF_HELPER_1(rfi_r, void, env)
DEF_HELPER_FLAGS_2(write_interval_timer, TCG_CALL_NO_RWG, void, env, tl)
DEF_HELPER_FLAGS_2(write_eirr, TCG_CALL_NO_RWG, void, env, tl)
-DEF_HELPER_FLAGS_2(write_eiem, TCG_CALL_NO_RWG, void, env, tl)
DEF_HELPER_FLAGS_2(swap_system_mask, TCG_CALL_NO_RWG, tl, env, tl)
DEF_HELPER_FLAGS_3(itlba_pa11, TCG_CALL_NO_RWG, void, env, tl, tl)
DEF_HELPER_FLAGS_3(itlbp_pa11, TCG_CALL_NO_RWG, void, env, tl, tl)
diff --git a/target/hppa/insns.decode b/target/hppa/insns.decode
index f58455d..71074a6 100644
--- a/target/hppa/insns.decode
+++ b/target/hppa/insns.decode
@@ -57,11 +57,16 @@
%neg_to_m 0:1 !function=neg_to_m
%a_to_m 2:1 !function=neg_to_m
%cmpbid_c 13:2 !function=cmpbid_c
+%d_5 5:1 !function=pa20_d
+%d_11 11:1 !function=pa20_d
+%d_13 13:1 !function=pa20_d
####
# Argument set definitions
####
+&empty
+
# All insns that need to form a virtual address should use this set.
&ldst t b x disp sp m scale size
@@ -84,15 +89,16 @@
# Format definitions
####
-@rr_cf_d ...... r:5 ..... cf:4 ...... d:1 t:5 &rr_cf_d
+@rr_cf_d ...... r:5 ..... cf:4 ...... . t:5 &rr_cf_d d=%d_5
@rrr ...... r2:5 r1:5 .... ....... t:5 &rrr
@rrr_cf ...... r2:5 r1:5 cf:4 ....... t:5 &rrr_cf
-@rrr_cf_d ...... r2:5 r1:5 cf:4 ...... d:1 t:5 &rrr_cf_d
+@rrr_cf_d ...... r2:5 r1:5 cf:4 ...... . t:5 &rrr_cf_d d=%d_5
@rrr_sh ...... r2:5 r1:5 ........ sh:2 . t:5 &rrr_sh
-@rrr_cf_d_sh ...... r2:5 r1:5 cf:4 .... sh:2 d:1 t:5 &rrr_cf_d_sh
-@rrr_cf_d_sh0 ...... r2:5 r1:5 cf:4 ...... d:1 t:5 &rrr_cf_d_sh sh=0
+@rrr_cf_d_sh ...... r2:5 r1:5 cf:4 .... sh:2 . t:5 &rrr_cf_d_sh d=%d_5
+@rrr_cf_d_sh0 ...... r2:5 r1:5 cf:4 ...... . t:5 &rrr_cf_d_sh d=%d_5 sh=0
@rri_cf ...... r:5 t:5 cf:4 . ........... &rri_cf i=%lowsign_11
-@rri_cf_d ...... r:5 t:5 cf:4 d:1 ........... &rri_cf_d i=%lowsign_11
+@rri_cf_d ...... r:5 t:5 cf:4 . ........... \
+ &rri_cf_d d=%d_11 i=%lowsign_11
@rrb_cf ...... r2:5 r1:5 c:3 ........... n:1 . \
&rrb_c_f disp=%assemble_12
@@ -368,8 +374,10 @@
# Conditional Branches
####
-bb_sar 110000 00000 r:5 c:1 1 d:1 ........... n:1 . disp=%assemble_12
-bb_imm 110001 p:5 r:5 c:1 1 d:1 ........... n:1 . disp=%assemble_12
+bb_sar 110000 00000 r:5 c:1 1 . ........... n:1 . \
+ disp=%assemble_12 d=%d_13
+bb_imm 110001 p:5 r:5 c:1 1 . ........... n:1 . \
+ disp=%assemble_12 d=%d_13
movb 110010 ..... ..... ... ........... . . @rrb_cf f=0
movbi 110011 ..... ..... ... ........... . . @rib_cf f=0
@@ -628,4 +636,18 @@
xmpyu 001110 ..... ..... 010 .0111 .00 t:5 r1=%ra64 r2=%rb64
# diag
-diag 000101 i:26
+{
+ [
+ diag_btlb 000101 00 0000 0000 0000 0001 0000 0000
+ diag_cout 000101 00 0000 0000 0000 0001 0000 0001
+
+ # For 32-bit PA-7300LC (PCX-L2)
+ diag_getshadowregs_pa1 000101 00 0000 0000 0001 1010 0000 0000
+ diag_putshadowregs_pa1 000101 00 0000 0000 0001 1010 0100 0000
+
+ # For 64-bit PA8700 (PCX-W2)
+ diag_getshadowregs_pa2 000101 00 0111 1000 0001 1000 0100 0000
+ diag_putshadowregs_pa2 000101 00 0111 0000 0001 1000 0100 0000
+ ]
+ diag_unimp 000101 i:26
+}
diff --git a/target/hppa/int_helper.c b/target/hppa/int_helper.c
index efe638b..90437a9 100644
--- a/target/hppa/int_helper.c
+++ b/target/hppa/int_helper.c
@@ -28,7 +28,7 @@
static void eval_interrupt(HPPACPU *cpu)
{
CPUState *cs = CPU(cpu);
- if (cpu->env.cr[CR_EIRR] & cpu->env.cr[CR_EIEM]) {
+ if (cpu->env.cr[CR_EIRR]) {
cpu_interrupt(cs, CPU_INTERRUPT_HARD);
} else {
cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
@@ -89,14 +89,6 @@
bql_unlock();
}
-void HELPER(write_eiem)(CPUHPPAState *env, target_ulong val)
-{
- env->cr[CR_EIEM] = val;
- bql_lock();
- eval_interrupt(env_archcpu(env));
- bql_unlock();
-}
-
void hppa_cpu_do_interrupt(CPUState *cs)
{
HPPACPU *cpu = HPPA_CPU(cs);
@@ -280,7 +272,9 @@
}
/* If interrupts are requested and enabled, raise them. */
- if ((env->psw & PSW_I) && (interrupt_request & CPU_INTERRUPT_HARD)) {
+ if ((interrupt_request & CPU_INTERRUPT_HARD)
+ && (env->psw & PSW_I)
+ && (env->cr[CR_EIRR] & env->cr[CR_EIEM])) {
cs->exception_index = EXCP_EXT_INTERRUPT;
hppa_cpu_do_interrupt(cs);
return true;
diff --git a/target/hppa/sys_helper.c b/target/hppa/sys_helper.c
index 4a31748..208e51c 100644
--- a/target/hppa/sys_helper.c
+++ b/target/hppa/sys_helper.c
@@ -95,7 +95,7 @@
cpu_hppa_put_psw(env, env->cr[CR_IPSW]);
}
-void HELPER(getshadowregs)(CPUHPPAState *env)
+static void getshadowregs(CPUHPPAState *env)
{
env->gr[1] = env->shadow[0];
env->gr[8] = env->shadow[1];
@@ -108,7 +108,7 @@
void HELPER(rfi_r)(CPUHPPAState *env)
{
- helper_getshadowregs(env);
+ getshadowregs(env);
helper_rfi(env);
}
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
index 19594f9..8a1a8bc 100644
--- a/target/hppa/translate.c
+++ b/target/hppa/translate.c
@@ -200,6 +200,14 @@
return val ? val : 4; /* 0 == "*<<" */
}
+/*
+ * In many places pa1.x did not decode the bit that later became
+ * the pa2.0 D bit. Suppress D unless the cpu is pa2.0.
+ */
+static int pa20_d(DisasContext *ctx, int val)
+{
+ return ctx->is_pa20 & val;
+}
/* Include the auto-generated decoder. */
#include "decode-insns.c.inc"
@@ -586,17 +594,10 @@
return true;
}
-static uint64_t gva_offset_mask(DisasContext *ctx)
-{
- return (ctx->tb_flags & PSW_W
- ? MAKE_64BIT_MASK(0, 62)
- : MAKE_64BIT_MASK(0, 32));
-}
-
static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest,
uint64_t ival, TCGv_i64 vval)
{
- uint64_t mask = gva_offset_mask(ctx);
+ uint64_t mask = gva_offset_mask(ctx->tb_flags);
if (ival != -1) {
tcg_gen_movi_i64(dest, ival & mask);
@@ -700,19 +701,13 @@
return c == 4 || c == 5;
}
-/* Need extensions from TCGv_i32 to TCGv_i64. */
-static bool cond_need_ext(DisasContext *ctx, bool d)
-{
- return !(ctx->is_pa20 && d);
-}
-
/*
* Compute conditional for arithmetic. See Page 5-3, Table 5-1, of
* the Parisc 1.1 Architecture Reference Manual for details.
*/
static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d,
- TCGv_i64 res, TCGv_i64 cb_msb, TCGv_i64 sv)
+ TCGv_i64 res, TCGv_i64 uv, TCGv_i64 sv)
{
DisasCond cond;
TCGv_i64 tmp;
@@ -722,7 +717,7 @@
cond = cond_make_f();
break;
case 1: /* = / <> (Z / !Z) */
- if (cond_need_ext(ctx, d)) {
+ if (!d) {
tmp = tcg_temp_new_i64();
tcg_gen_ext32u_i64(tmp, res);
res = tmp;
@@ -732,7 +727,7 @@
case 2: /* < / >= (N ^ V / !(N ^ V) */
tmp = tcg_temp_new_i64();
tcg_gen_xor_i64(tmp, res, sv);
- if (cond_need_ext(ctx, d)) {
+ if (!d) {
tcg_gen_ext32s_i64(tmp, tmp);
}
cond = cond_make_0_tmp(TCG_COND_LT, tmp);
@@ -749,7 +744,7 @@
*/
tmp = tcg_temp_new_i64();
tcg_gen_eqv_i64(tmp, res, sv);
- if (cond_need_ext(ctx, d)) {
+ if (!d) {
tcg_gen_sextract_i64(tmp, tmp, 31, 1);
tcg_gen_and_i64(tmp, tmp, res);
tcg_gen_ext32u_i64(tmp, tmp);
@@ -759,21 +754,19 @@
}
cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
break;
- case 4: /* NUV / UV (!C / C) */
- /* Only bit 0 of cb_msb is ever set. */
- cond = cond_make_0(TCG_COND_EQ, cb_msb);
+ case 4: /* NUV / UV (!UV / UV) */
+ cond = cond_make_0(TCG_COND_EQ, uv);
break;
- case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
+ case 5: /* ZNV / VNZ (!UV | Z / UV & !Z) */
tmp = tcg_temp_new_i64();
- tcg_gen_neg_i64(tmp, cb_msb);
- tcg_gen_and_i64(tmp, tmp, res);
- if (cond_need_ext(ctx, d)) {
+ tcg_gen_movcond_i64(TCG_COND_EQ, tmp, uv, ctx->zero, ctx->zero, res);
+ if (!d) {
tcg_gen_ext32u_i64(tmp, tmp);
}
cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
break;
case 6: /* SV / NSV (V / !V) */
- if (cond_need_ext(ctx, d)) {
+ if (!d) {
tmp = tcg_temp_new_i64();
tcg_gen_ext32s_i64(tmp, sv);
sv = tmp;
@@ -834,7 +827,7 @@
if (cf & 1) {
tc = tcg_invert_cond(tc);
}
- if (cond_need_ext(ctx, d)) {
+ if (!d) {
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
@@ -911,7 +904,7 @@
g_assert_not_reached();
}
- if (cond_need_ext(ctx, d)) {
+ if (!d) {
TCGv_i64 tmp = tcg_temp_new_i64();
if (ext_uns) {
@@ -943,83 +936,50 @@
return do_log_cond(ctx, c * 2 + f, d, res);
}
-/* Similar, but for unit conditions. */
-
-static DisasCond do_unit_cond(unsigned cf, bool d, TCGv_i64 res,
- TCGv_i64 in1, TCGv_i64 in2)
+/* Similar, but for unit zero conditions. */
+static DisasCond do_unit_zero_cond(unsigned cf, bool d, TCGv_i64 res)
{
- DisasCond cond;
- TCGv_i64 tmp, cb = NULL;
+ TCGv_i64 tmp;
uint64_t d_repl = d ? 0x0000000100000001ull : 1;
-
- if (cf & 8) {
- /* Since we want to test lots of carry-out bits all at once, do not
- * do our normal thing and compute carry-in of bit B+1 since that
- * leaves us with carry bits spread across two words.
- */
- cb = tcg_temp_new_i64();
- tmp = tcg_temp_new_i64();
- tcg_gen_or_i64(cb, in1, in2);
- tcg_gen_and_i64(tmp, in1, in2);
- tcg_gen_andc_i64(cb, cb, res);
- tcg_gen_or_i64(cb, cb, tmp);
- }
+ uint64_t ones = 0, sgns = 0;
switch (cf >> 1) {
- case 0: /* never / TR */
- case 1: /* undefined */
- case 5: /* undefined */
- cond = cond_make_f();
+ case 1: /* SBW / NBW */
+ if (d) {
+ ones = d_repl;
+ sgns = d_repl << 31;
+ }
break;
-
case 2: /* SBZ / NBZ */
- /* See hasless(v,1) from
- * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
- */
- tmp = tcg_temp_new_i64();
- tcg_gen_subi_i64(tmp, res, d_repl * 0x01010101u);
- tcg_gen_andc_i64(tmp, tmp, res);
- tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80808080u);
- cond = cond_make_0(TCG_COND_NE, tmp);
+ ones = d_repl * 0x01010101u;
+ sgns = ones << 7;
break;
-
case 3: /* SHZ / NHZ */
- tmp = tcg_temp_new_i64();
- tcg_gen_subi_i64(tmp, res, d_repl * 0x00010001u);
- tcg_gen_andc_i64(tmp, tmp, res);
- tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80008000u);
- cond = cond_make_0(TCG_COND_NE, tmp);
+ ones = d_repl * 0x00010001u;
+ sgns = ones << 15;
break;
-
- case 4: /* SDC / NDC */
- tcg_gen_andi_i64(cb, cb, d_repl * 0x88888888u);
- cond = cond_make_0(TCG_COND_NE, cb);
- break;
-
- case 6: /* SBC / NBC */
- tcg_gen_andi_i64(cb, cb, d_repl * 0x80808080u);
- cond = cond_make_0(TCG_COND_NE, cb);
- break;
-
- case 7: /* SHC / NHC */
- tcg_gen_andi_i64(cb, cb, d_repl * 0x80008000u);
- cond = cond_make_0(TCG_COND_NE, cb);
- break;
-
- default:
- g_assert_not_reached();
}
- if (cf & 1) {
- cond.c = tcg_invert_cond(cond.c);
+ if (ones == 0) {
+ /* Undefined, or 0/1 (never/always). */
+ return cf & 1 ? cond_make_t() : cond_make_f();
}
- return cond;
+ /*
+ * See hasless(v,1) from
+ * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
+ */
+ tmp = tcg_temp_new_i64();
+ tcg_gen_subi_i64(tmp, res, ones);
+ tcg_gen_andc_i64(tmp, tmp, res);
+ tcg_gen_andi_i64(tmp, tmp, sgns);
+
+ return cond_make_0_tmp(cf & 1 ? TCG_COND_EQ : TCG_COND_NE, tmp);
}
static TCGv_i64 get_carry(DisasContext *ctx, bool d,
TCGv_i64 cb, TCGv_i64 cb_msb)
{
- if (cond_need_ext(ctx, d)) {
+ if (!d) {
TCGv_i64 t = tcg_temp_new_i64();
tcg_gen_extract_i64(t, cb, 32, 1);
return t;
@@ -1034,7 +994,8 @@
/* Compute signed overflow for addition. */
static TCGv_i64 do_add_sv(DisasContext *ctx, TCGv_i64 res,
- TCGv_i64 in1, TCGv_i64 in2)
+ TCGv_i64 in1, TCGv_i64 in2,
+ TCGv_i64 orig_in1, int shift, bool d)
{
TCGv_i64 sv = tcg_temp_new_i64();
TCGv_i64 tmp = tcg_temp_new_i64();
@@ -1043,9 +1004,49 @@
tcg_gen_xor_i64(tmp, in1, in2);
tcg_gen_andc_i64(sv, sv, tmp);
+ switch (shift) {
+ case 0:
+ break;
+ case 1:
+ /* Shift left by one and compare the sign. */
+ tcg_gen_add_i64(tmp, orig_in1, orig_in1);
+ tcg_gen_xor_i64(tmp, tmp, orig_in1);
+ /* Incorporate into the overflow. */
+ tcg_gen_or_i64(sv, sv, tmp);
+ break;
+ default:
+ {
+ int sign_bit = d ? 63 : 31;
+
+ /* Compare the sign against all lower bits. */
+ tcg_gen_sextract_i64(tmp, orig_in1, sign_bit, 1);
+ tcg_gen_xor_i64(tmp, tmp, orig_in1);
+ /*
+ * If one of the bits shifting into or through the sign
+ * differs, then we have overflow.
+ */
+ tcg_gen_extract_i64(tmp, tmp, sign_bit - shift, shift);
+ tcg_gen_movcond_i64(TCG_COND_NE, sv, tmp, ctx->zero,
+ tcg_constant_i64(-1), sv);
+ }
+ }
return sv;
}
+/* Compute unsigned overflow for addition. */
+static TCGv_i64 do_add_uv(DisasContext *ctx, TCGv_i64 cb, TCGv_i64 cb_msb,
+ TCGv_i64 in1, int shift, bool d)
+{
+ if (shift == 0) {
+ return get_carry(ctx, d, cb, cb_msb);
+ } else {
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_extract_i64(tmp, in1, (d ? 63 : 31) - shift, shift);
+ tcg_gen_or_i64(tmp, tmp, get_carry(ctx, d, cb, cb_msb));
+ return tmp;
+ }
+}
+
/* Compute signed overflow for subtraction. */
static TCGv_i64 do_sub_sv(DisasContext *ctx, TCGv_i64 res,
TCGv_i64 in1, TCGv_i64 in2)
@@ -1060,19 +1061,19 @@
return sv;
}
-static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
+static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 orig_in1,
TCGv_i64 in2, unsigned shift, bool is_l,
bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d)
{
- TCGv_i64 dest, cb, cb_msb, cb_cond, sv, tmp;
+ TCGv_i64 dest, cb, cb_msb, in1, uv, sv, tmp;
unsigned c = cf >> 1;
DisasCond cond;
dest = tcg_temp_new_i64();
cb = NULL;
cb_msb = NULL;
- cb_cond = NULL;
+ in1 = orig_in1;
if (shift) {
tmp = tcg_temp_new_i64();
tcg_gen_shli_i64(tmp, in1, shift);
@@ -1090,9 +1091,6 @@
}
tcg_gen_xor_i64(cb, in1, in2);
tcg_gen_xor_i64(cb, cb, dest);
- if (cond_need_cb(c)) {
- cb_cond = get_carry(ctx, d, cb, cb_msb);
- }
} else {
tcg_gen_add_i64(dest, in1, in2);
if (is_c) {
@@ -1103,15 +1101,23 @@
/* Compute signed overflow if required. */
sv = NULL;
if (is_tsv || cond_need_sv(c)) {
- sv = do_add_sv(ctx, dest, in1, in2);
+ sv = do_add_sv(ctx, dest, in1, in2, orig_in1, shift, d);
if (is_tsv) {
- /* ??? Need to include overflow from shift. */
+ if (!d) {
+ tcg_gen_ext32s_i64(sv, sv);
+ }
gen_helper_tsv(tcg_env, sv);
}
}
+ /* Compute unsigned overflow if required. */
+ uv = NULL;
+ if (cond_need_cb(c)) {
+ uv = do_add_uv(ctx, cb, cb_msb, orig_in1, shift, d);
+ }
+
/* Emit any conditional trap before any writeback. */
- cond = do_cond(ctx, cf, d, dest, cb_cond, sv);
+ cond = do_cond(ctx, cf, d, dest, uv, sv);
if (is_tc) {
tmp = tcg_temp_new_i64();
tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
@@ -1196,6 +1202,9 @@
if (is_tsv || cond_need_sv(c)) {
sv = do_sub_sv(ctx, dest, in1, in2);
if (is_tsv) {
+ if (!d) {
+ tcg_gen_ext32s_i64(sv, sv);
+ }
gen_helper_tsv(tcg_env, sv);
}
}
@@ -1310,34 +1319,86 @@
return nullify_end(ctx);
}
-static void do_unit(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
- TCGv_i64 in2, unsigned cf, bool d, bool is_tc,
- void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
+static void do_unit_addsub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
+ TCGv_i64 in2, unsigned cf, bool d,
+ bool is_tc, bool is_add)
{
- TCGv_i64 dest;
+ TCGv_i64 dest = tcg_temp_new_i64();
+ uint64_t test_cb = 0;
DisasCond cond;
- if (cf == 0) {
- dest = dest_gpr(ctx, rt);
- fn(dest, in1, in2);
- save_gpr(ctx, rt, dest);
- cond_free(&ctx->null_cond);
- } else {
- dest = tcg_temp_new_i64();
- fn(dest, in1, in2);
-
- cond = do_unit_cond(cf, d, dest, in1, in2);
-
- if (is_tc) {
- TCGv_i64 tmp = tcg_temp_new_i64();
- tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
- gen_helper_tcond(tcg_env, tmp);
+ /* Select which carry-out bits to test. */
+ switch (cf >> 1) {
+ case 4: /* NDC / SDC -- 4-bit carries */
+ test_cb = dup_const(MO_8, 0x88);
+ break;
+ case 5: /* NWC / SWC -- 32-bit carries */
+ if (d) {
+ test_cb = dup_const(MO_32, INT32_MIN);
+ } else {
+ cf &= 1; /* undefined -- map to never/always */
}
- save_gpr(ctx, rt, dest);
-
- cond_free(&ctx->null_cond);
- ctx->null_cond = cond;
+ break;
+ case 6: /* NBC / SBC -- 8-bit carries */
+ test_cb = dup_const(MO_8, INT8_MIN);
+ break;
+ case 7: /* NHC / SHC -- 16-bit carries */
+ test_cb = dup_const(MO_16, INT16_MIN);
+ break;
}
+ if (!d) {
+ test_cb = (uint32_t)test_cb;
+ }
+
+ if (!test_cb) {
+ /* No need to compute carries if we don't need to test them. */
+ if (is_add) {
+ tcg_gen_add_i64(dest, in1, in2);
+ } else {
+ tcg_gen_sub_i64(dest, in1, in2);
+ }
+ cond = do_unit_zero_cond(cf, d, dest);
+ } else {
+ TCGv_i64 cb = tcg_temp_new_i64();
+
+ if (d) {
+ TCGv_i64 cb_msb = tcg_temp_new_i64();
+ if (is_add) {
+ tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
+ tcg_gen_xor_i64(cb, in1, in2);
+ } else {
+ /* See do_sub, !is_b. */
+ TCGv_i64 one = tcg_constant_i64(1);
+ tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero);
+ tcg_gen_eqv_i64(cb, in1, in2);
+ }
+ tcg_gen_xor_i64(cb, cb, dest);
+ tcg_gen_extract2_i64(cb, cb, cb_msb, 1);
+ } else {
+ if (is_add) {
+ tcg_gen_add_i64(dest, in1, in2);
+ tcg_gen_xor_i64(cb, in1, in2);
+ } else {
+ tcg_gen_sub_i64(dest, in1, in2);
+ tcg_gen_eqv_i64(cb, in1, in2);
+ }
+ tcg_gen_xor_i64(cb, cb, dest);
+ tcg_gen_shri_i64(cb, cb, 1);
+ }
+
+ tcg_gen_andi_i64(cb, cb, test_cb);
+ cond = cond_make_0_tmp(cf & 1 ? TCG_COND_EQ : TCG_COND_NE, cb);
+ }
+
+ if (is_tc) {
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
+ gen_helper_tcond(tcg_env, tmp);
+ }
+ save_gpr(ctx, rt, dest);
+
+ cond_free(&ctx->null_cond);
+ ctx->null_cond = cond;
}
#ifndef CONFIG_USER_ONLY
@@ -1403,7 +1464,8 @@
*pofs = ofs;
*pgva = addr = tcg_temp_new_i64();
- tcg_gen_andi_i64(addr, modify <= 0 ? ofs : base, gva_offset_mask(ctx));
+ tcg_gen_andi_i64(addr, modify <= 0 ? ofs : base,
+ gva_offset_mask(ctx->tb_flags));
#ifndef CONFIG_USER_ONLY
if (!is_phys) {
tcg_gen_or_i64(addr, addr, space_select(ctx, sp, base));
@@ -2055,11 +2117,9 @@
nullify_over(ctx);
tmp = dest_gpr(ctx, rt);
if (translator_io_start(&ctx->base)) {
- gen_helper_read_interval_timer(tmp);
ctx->base.is_jmp = DISAS_IAQ_N_STALE;
- } else {
- gen_helper_read_interval_timer(tmp);
}
+ gen_helper_read_interval_timer(tmp);
save_gpr(ctx, rt, tmp);
return nullify_end(ctx);
case 26:
@@ -2135,13 +2195,16 @@
switch (ctl) {
case CR_IT:
+ if (translator_io_start(&ctx->base)) {
+ ctx->base.is_jmp = DISAS_IAQ_N_STALE;
+ }
gen_helper_write_interval_timer(tcg_env, reg);
break;
case CR_EIRR:
+ /* Helper modifies interrupt lines and is therefore IO. */
+ translator_io_start(&ctx->base);
gen_helper_write_eirr(tcg_env, reg);
- break;
- case CR_EIEM:
- gen_helper_write_eiem(tcg_env, reg);
+ /* Exit to re-evaluate interrupts in the main loop. */
ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
break;
@@ -2167,6 +2230,10 @@
#endif
break;
+ case CR_EIEM:
+ /* Exit to re-evaluate interrupts in the main loop. */
+ ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
+ /* FALLTHRU */
default:
tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
break;
@@ -2318,14 +2385,37 @@
#endif
}
-static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
+static bool do_getshadowregs(DisasContext *ctx)
{
CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
-#ifndef CONFIG_USER_ONLY
nullify_over(ctx);
- gen_helper_getshadowregs(tcg_env);
+ tcg_gen_ld_i64(cpu_gr[1], tcg_env, offsetof(CPUHPPAState, shadow[0]));
+ tcg_gen_ld_i64(cpu_gr[8], tcg_env, offsetof(CPUHPPAState, shadow[1]));
+ tcg_gen_ld_i64(cpu_gr[9], tcg_env, offsetof(CPUHPPAState, shadow[2]));
+ tcg_gen_ld_i64(cpu_gr[16], tcg_env, offsetof(CPUHPPAState, shadow[3]));
+ tcg_gen_ld_i64(cpu_gr[17], tcg_env, offsetof(CPUHPPAState, shadow[4]));
+ tcg_gen_ld_i64(cpu_gr[24], tcg_env, offsetof(CPUHPPAState, shadow[5]));
+ tcg_gen_ld_i64(cpu_gr[25], tcg_env, offsetof(CPUHPPAState, shadow[6]));
return nullify_end(ctx);
-#endif
+}
+
+static bool do_putshadowregs(DisasContext *ctx)
+{
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+ nullify_over(ctx);
+ tcg_gen_st_i64(cpu_gr[1], tcg_env, offsetof(CPUHPPAState, shadow[0]));
+ tcg_gen_st_i64(cpu_gr[8], tcg_env, offsetof(CPUHPPAState, shadow[1]));
+ tcg_gen_st_i64(cpu_gr[9], tcg_env, offsetof(CPUHPPAState, shadow[2]));
+ tcg_gen_st_i64(cpu_gr[16], tcg_env, offsetof(CPUHPPAState, shadow[3]));
+ tcg_gen_st_i64(cpu_gr[17], tcg_env, offsetof(CPUHPPAState, shadow[4]));
+ tcg_gen_st_i64(cpu_gr[24], tcg_env, offsetof(CPUHPPAState, shadow[5]));
+ tcg_gen_st_i64(cpu_gr[25], tcg_env, offsetof(CPUHPPAState, shadow[6]));
+ return nullify_end(ctx);
+}
+
+static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
+{
+ return do_getshadowregs(ctx);
}
static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
@@ -2722,14 +2812,24 @@
static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a)
{
- TCGv_i64 tcg_r1, tcg_r2;
+ TCGv_i64 tcg_r1, tcg_r2, dest;
if (a->cf) {
nullify_over(ctx);
}
+
tcg_r1 = load_gpr(ctx, a->r1);
tcg_r2 = load_gpr(ctx, a->r2);
- do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, false, tcg_gen_xor_i64);
+ dest = dest_gpr(ctx, a->t);
+
+ tcg_gen_xor_i64(dest, tcg_r1, tcg_r2);
+ save_gpr(ctx, a->t, dest);
+
+ cond_free(&ctx->null_cond);
+ if (a->cf) {
+ ctx->null_cond = do_unit_zero_cond(a->cf, a->d, dest);
+ }
+
return nullify_end(ctx);
}
@@ -2737,14 +2837,34 @@
{
TCGv_i64 tcg_r1, tcg_r2, tmp;
- if (a->cf) {
- nullify_over(ctx);
+ if (a->cf == 0) {
+ tcg_r2 = load_gpr(ctx, a->r2);
+ tmp = dest_gpr(ctx, a->t);
+
+ if (a->r1 == 0) {
+ /* UADDCM r0,src,dst is the common idiom for dst = ~src. */
+ tcg_gen_not_i64(tmp, tcg_r2);
+ } else {
+ /*
+ * Recall that r1 - r2 == r1 + ~r2 + 1.
+ * Thus r1 + ~r2 == r1 - r2 - 1,
+ * which does not require an extra temporary.
+ */
+ tcg_r1 = load_gpr(ctx, a->r1);
+ tcg_gen_sub_i64(tmp, tcg_r1, tcg_r2);
+ tcg_gen_subi_i64(tmp, tmp, 1);
+ }
+ save_gpr(ctx, a->t, tmp);
+ cond_free(&ctx->null_cond);
+ return true;
}
+
+ nullify_over(ctx);
tcg_r1 = load_gpr(ctx, a->r1);
tcg_r2 = load_gpr(ctx, a->r2);
tmp = tcg_temp_new_i64();
tcg_gen_not_i64(tmp, tcg_r2);
- do_unit(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, tcg_gen_add_i64);
+ do_unit_addsub(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, true);
return nullify_end(ctx);
}
@@ -2765,14 +2885,14 @@
nullify_over(ctx);
tmp = tcg_temp_new_i64();
- tcg_gen_shri_i64(tmp, cpu_psw_cb, 3);
+ tcg_gen_extract2_i64(tmp, cpu_psw_cb, cpu_psw_cb_msb, 4);
if (!is_i) {
tcg_gen_not_i64(tmp, tmp);
}
tcg_gen_andi_i64(tmp, tmp, (uint64_t)0x1111111111111111ull);
tcg_gen_muli_i64(tmp, tmp, 6);
- do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, a->d, false,
- is_i ? tcg_gen_add_i64 : tcg_gen_sub_i64);
+ do_unit_addsub(ctx, a->t, load_gpr(ctx, a->r), tmp,
+ a->cf, a->d, false, is_i);
return nullify_end(ctx);
}
@@ -2789,7 +2909,6 @@
static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
{
TCGv_i64 dest, add1, add2, addc, in1, in2;
- TCGv_i64 cout;
nullify_over(ctx);
@@ -2826,19 +2945,23 @@
tcg_gen_xor_i64(cpu_psw_cb, add1, add2);
tcg_gen_xor_i64(cpu_psw_cb, cpu_psw_cb, dest);
- /* Write back PSW[V] for the division step. */
- cout = get_psw_carry(ctx, false);
- tcg_gen_neg_i64(cpu_psw_v, cout);
+ /*
+ * Write back PSW[V] for the division step.
+ * Shift cb{8} from where it lives in bit 32 to bit 31,
+ * so that it overlaps r2{32} in bit 31.
+ */
+ tcg_gen_shri_i64(cpu_psw_v, cpu_psw_cb, 1);
tcg_gen_xor_i64(cpu_psw_v, cpu_psw_v, in2);
/* Install the new nullification. */
if (a->cf) {
- TCGv_i64 sv = NULL;
+ TCGv_i64 sv = NULL, uv = NULL;
if (cond_need_sv(a->cf >> 1)) {
- /* ??? The lshift is supposed to contribute to overflow. */
- sv = do_add_sv(ctx, dest, add1, add2);
+ sv = do_add_sv(ctx, dest, add1, add2, in1, 1, false);
+ } else if (cond_need_cb(a->cf >> 1)) {
+ uv = do_add_uv(ctx, cpu_psw_cb, NULL, in1, 1, false);
}
- ctx->null_cond = do_cond(ctx, a->cf, false, dest, cout, sv);
+ ctx->null_cond = do_cond(ctx, a->cf, false, dest, uv, sv);
}
return nullify_end(ctx);
@@ -3365,7 +3488,7 @@
tcg_gen_add_i64(dest, in1, in2);
}
if (cond_need_sv(c)) {
- sv = do_add_sv(ctx, dest, in1, in2);
+ sv = do_add_sv(ctx, dest, in1, in2, in1, 0, d);
}
cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv);
@@ -3394,12 +3517,12 @@
tmp = tcg_temp_new_i64();
tcg_r = load_gpr(ctx, a->r);
- if (cond_need_ext(ctx, a->d)) {
+ if (a->d) {
+ tcg_gen_shl_i64(tmp, tcg_r, cpu_sar);
+ } else {
/* Force shift into [32,63] */
tcg_gen_ori_i64(tmp, cpu_sar, 32);
tcg_gen_shl_i64(tmp, tcg_r, tmp);
- } else {
- tcg_gen_shl_i64(tmp, tcg_r, cpu_sar);
}
cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
@@ -3416,7 +3539,7 @@
tmp = tcg_temp_new_i64();
tcg_r = load_gpr(ctx, a->r);
- p = a->p | (cond_need_ext(ctx, a->d) ? 32 : 0);
+ p = a->p | (a->d ? 0 : 32);
tcg_gen_shli_i64(tmp, tcg_r, p);
cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
@@ -3817,7 +3940,7 @@
load_spr(ctx, new_spc, a->sp);
if (a->l) {
copy_iaoq_entry(ctx, cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
- tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
+ tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_b);
}
if (a->n && use_nullify_skip(ctx)) {
copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
@@ -3825,6 +3948,7 @@
copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
tcg_gen_mov_i64(cpu_iasq_f, new_spc);
tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
+ nullify_set(ctx, 0);
} else {
copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
if (ctx->iaoq_b == -1) {
@@ -3880,7 +4004,7 @@
}
/* No change for non-gateway pages or for priv decrease. */
if (type >= 4 && type - 4 < ctx->privilege) {
- dest = deposit32(dest, 0, 2, type - 4);
+ dest = deposit64(dest, 0, 2, type - 4);
}
} else {
dest &= -4; /* priv = 0 */
@@ -4463,23 +4587,51 @@
return nullify_end(ctx);
}
-static bool trans_diag(DisasContext *ctx, arg_diag *a)
+/* Emulate PDC BTLB, called by SeaBIOS-hppa */
+static bool trans_diag_btlb(DisasContext *ctx, arg_diag_btlb *a)
{
CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
#ifndef CONFIG_USER_ONLY
- if (a->i == 0x100) {
- /* emulate PDC BTLB, called by SeaBIOS-hppa */
- nullify_over(ctx);
- gen_helper_diag_btlb(tcg_env);
- return nullify_end(ctx);
- }
- if (a->i == 0x101) {
- /* print char in %r26 to first serial console, used by SeaBIOS-hppa */
- nullify_over(ctx);
- gen_helper_diag_console_output(tcg_env);
- return nullify_end(ctx);
- }
+ nullify_over(ctx);
+ gen_helper_diag_btlb(tcg_env);
+ return nullify_end(ctx);
#endif
+}
+
+/* Print char in %r26 to first serial console, used by SeaBIOS-hppa */
+static bool trans_diag_cout(DisasContext *ctx, arg_diag_cout *a)
+{
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+#ifndef CONFIG_USER_ONLY
+ nullify_over(ctx);
+ gen_helper_diag_console_output(tcg_env);
+ return nullify_end(ctx);
+#endif
+}
+
+static bool trans_diag_getshadowregs_pa1(DisasContext *ctx, arg_empty *a)
+{
+ return !ctx->is_pa20 && do_getshadowregs(ctx);
+}
+
+static bool trans_diag_getshadowregs_pa2(DisasContext *ctx, arg_empty *a)
+{
+ return ctx->is_pa20 && do_getshadowregs(ctx);
+}
+
+static bool trans_diag_putshadowregs_pa1(DisasContext *ctx, arg_empty *a)
+{
+ return !ctx->is_pa20 && do_putshadowregs(ctx);
+}
+
+static bool trans_diag_putshadowregs_pa2(DisasContext *ctx, arg_empty *a)
+{
+ return ctx->is_pa20 && do_putshadowregs(ctx);
+}
+
+static bool trans_diag_unimp(DisasContext *ctx, arg_diag_unimp *a)
+{
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
return true;
}
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 752cc5c..275db77 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -2376,7 +2376,7 @@
* will not reduced the number of input sign repetitions.
*/
sign = (s_mask & -s_mask) >> 1;
- if (!(z_mask & sign)) {
+ if (sign && !(z_mask & sign)) {
ctx->s_mask = s_mask;
}
break;
diff --git a/tests/tcg/aarch64/Makefile.target b/tests/tcg/aarch64/Makefile.target
index ea3e232..0efd565 100644
--- a/tests/tcg/aarch64/Makefile.target
+++ b/tests/tcg/aarch64/Makefile.target
@@ -10,6 +10,7 @@
# Base architecture tests
AARCH64_TESTS=fcvt pcalign-a64 lse2-fault
+AARCH64_TESTS += test-2248
fcvt: LDFLAGS+=-lm
diff --git a/tests/tcg/aarch64/test-2248.c b/tests/tcg/aarch64/test-2248.c
new file mode 100644
index 0000000..aac2e17
--- /dev/null
+++ b/tests/tcg/aarch64/test-2248.c
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* See https://gitlab.com/qemu-project/qemu/-/issues/2248 */
+
+#include <assert.h>
+
+__attribute__((noinline))
+long test(long x, long y, long sh)
+{
+ long r;
+ asm("cmp %1, %2\n\t"
+ "cset x12, lt\n\t"
+ "and w11, w12, #0xff\n\t"
+ "cmp w11, #0\n\t"
+ "csetm x14, ne\n\t"
+ "lsr x13, x14, %3\n\t"
+ "sxtb %0, w13"
+ : "=r"(r)
+ : "r"(x), "r"(y), "r"(sh)
+ : "x11", "x12", "x13", "x14");
+ return r;
+}
+
+int main()
+{
+ long r = test(0, 1, 2);
+ assert(r == -1);
+ return 0;
+}
diff --git a/tests/tcg/multiarch/linux/linux-shmat-null.c b/tests/tcg/multiarch/linux/linux-shmat-null.c
new file mode 100644
index 0000000..94eaaec
--- /dev/null
+++ b/tests/tcg/multiarch/linux/linux-shmat-null.c
@@ -0,0 +1,38 @@
+/*
+ * Test shmat(NULL).
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#include <assert.h>
+#include <stdlib.h>
+#include <sys/ipc.h>
+#include <sys/shm.h>
+
+int main(void)
+{
+ int shmid;
+ char *p;
+ int err;
+
+ /* Create, attach and intialize shared memory. */
+ shmid = shmget(IPC_PRIVATE, 1, IPC_CREAT | 0600);
+ assert(shmid != -1);
+ p = shmat(shmid, NULL, 0);
+ assert(p != (void *)-1);
+ *p = 42;
+
+ /* Reattach, check that the value is still there. */
+ err = shmdt(p);
+ assert(err == 0);
+ p = shmat(shmid, NULL, 0);
+ assert(p != (void *)-1);
+ assert(*p == 42);
+
+ /* Detach. */
+ err = shmdt(p);
+ assert(err == 0);
+ err = shmctl(shmid, IPC_RMID, NULL);
+ assert(err == 0);
+
+ return EXIT_SUCCESS;
+}