Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20180926' into staging
Queued tcg patches
# gpg: Signature made Wed 26 Sep 2018 19:27:22 BST
# gpg: using RSA key 64DF38E8AF7E215F
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>"
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F
* remotes/rth/tags/pull-tcg-20180926:
tcg/i386: fix vector operations on 32-bit hosts
qht-bench: add -p flag to precompute hash values
qht: constify arguments to some internal functions
qht: constify qht_statistics_init
qht: constify qht_lookup
qht: fix comment in qht_bucket_remove_entry
qht: drop ht argument from qht iterators
test-qht: speed up + test qht_resize
test-qht: test deletion of the last entry in a bucket
test-qht: test removal of non-existent entries
test-qht: test qht_iter_remove
qht: add qht_iter_remove
qht: remove unused map param from qht_remove__locked
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
diff --git a/Makefile b/Makefile
index fe623e4..f42e176 100644
--- a/Makefile
+++ b/Makefile
@@ -978,7 +978,7 @@
qemu-doc.html qemu-doc.info qemu-doc.pdf qemu-doc.txt: \
qemu-img.texi qemu-nbd.texi qemu-options.texi qemu-option-trace.texi \
- qemu-monitor.texi qemu-img-cmds.texi qemu-ga.texi \
+ qemu-deprecated.texi qemu-monitor.texi qemu-img-cmds.texi qemu-ga.texi \
qemu-monitor-info.texi docs/qemu-block-drivers.texi \
docs/qemu-cpu-models.texi
diff --git a/block/blkreplay.c b/block/blkreplay.c
old mode 100755
new mode 100644
diff --git a/block/vmdk.c b/block/vmdk.c
index a9d0084..2c9e86d 100644
--- a/block/vmdk.c
+++ b/block/vmdk.c
@@ -1698,6 +1698,27 @@
vmdk_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, QEMUIOVector *qiov)
{
+ if (bytes == 0) {
+ /* The caller will write bytes 0 to signal EOF.
+ * When receive it, we align EOF to a sector boundary. */
+ BDRVVmdkState *s = bs->opaque;
+ int i, ret;
+ int64_t length;
+
+ for (i = 0; i < s->num_extents; i++) {
+ length = bdrv_getlength(s->extents[i].file->bs);
+ if (length < 0) {
+ return length;
+ }
+ length = QEMU_ALIGN_UP(length, BDRV_SECTOR_SIZE);
+ ret = bdrv_truncate(s->extents[i].file, length,
+ PREALLOC_MODE_OFF, NULL);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+ return 0;
+ }
return vmdk_co_pwritev(bs, offset, bytes, qiov, 0);
}
diff --git a/docs/COLO-FT.txt b/docs/COLO-FT.txt
index d7c7dcd..70cfb9c 100644
--- a/docs/COLO-FT.txt
+++ b/docs/COLO-FT.txt
@@ -104,7 +104,7 @@
COLO Proxy:
Delivers packets to Primary and Seconday, and then compare the responses from
both side. Then decide whether to start a checkpoint according to some rules.
-Please refer to docs/colo-proxy.txt for more informations.
+Please refer to docs/colo-proxy.txt for more information.
Note:
HeartBeat has not been implemented yet, so you need to trigger failover process
diff --git a/docs/interop/vhost-user.txt b/docs/interop/vhost-user.txt
index f59667f..c219471 100644
--- a/docs/interop/vhost-user.txt
+++ b/docs/interop/vhost-user.txt
@@ -666,12 +666,12 @@
Equivalent ioctl: VHOST_SET_VRING_ENDIAN
Master payload: vring state description
- Set the endianess of a VQ for legacy devices. Little-endian is indicated
+ Set the endianness of a VQ for legacy devices. Little-endian is indicated
with state.num set to 0 and big-endian is indicated with state.num set
to 1. Other values are invalid.
This request should be sent only when VHOST_USER_PROTOCOL_F_CROSS_ENDIAN
has been negotiated.
- Backends that negotiated this feature should handle both endianesses
+ Backends that negotiated this feature should handle both endiannesses
and expect this message once (per VQ) during device configuration
(ie. before the master starts the VQ).
diff --git a/docs/replay.txt b/docs/replay.txt
index 2e21e9c..3497585 100644
--- a/docs/replay.txt
+++ b/docs/replay.txt
@@ -320,7 +320,7 @@
async event id from the following list:
- REPLAY_ASYNC_EVENT_BH. Bottom-half callback. This event synchronizes
callbacks that affect virtual machine state, but normally called
- asyncronously.
+ asynchronously.
Argument: 8-byte operation id.
- REPLAY_ASYNC_EVENT_INPUT. Input device event. Contains
parameters of keyboard and mouse input operations
diff --git a/hmp.c b/hmp.c
index 3a9f797..61ef120 100644
--- a/hmp.c
+++ b/hmp.c
@@ -271,6 +271,19 @@
info->xbzrle_cache->overflow);
}
+ if (info->has_compression) {
+ monitor_printf(mon, "compression pages: %" PRIu64 " pages\n",
+ info->compression->pages);
+ monitor_printf(mon, "compression busy: %" PRIu64 "\n",
+ info->compression->busy);
+ monitor_printf(mon, "compression busy rate: %0.2f\n",
+ info->compression->busy_rate);
+ monitor_printf(mon, "compressed size: %" PRIu64 "\n",
+ info->compression->compressed_size);
+ monitor_printf(mon, "compression rate: %0.2f\n",
+ info->compression->compression_rate);
+ }
+
if (info->has_cpu_throttle_percentage) {
monitor_printf(mon, "cpu throttle percentage: %" PRIu64 "\n",
info->cpu_throttle_percentage);
diff --git a/hw/core/machine.c b/hw/core/machine.c
index 6b68e12..1987557 100644
--- a/hw/core/machine.c
+++ b/hw/core/machine.c
@@ -324,6 +324,9 @@
{
MachineState *ms = MACHINE(obj);
+ warn_report("enforce-config-section is deprecated, please use "
+ "-global migration.send-configuration=on|off instead");
+
ms->enforce_config_section = value;
}
diff --git a/hw/core/qdev.c b/hw/core/qdev.c
index 36b788a..046d8f1 100644
--- a/hw/core/qdev.c
+++ b/hw/core/qdev.c
@@ -643,7 +643,7 @@
* the string depends on the property type. Legacy properties are only
* needed for "info qtree".
*
- * Do not use this is new code! QOM Properties added through this interface
+ * Do not use this in new code! QOM Properties added through this interface
* will be given names in the "legacy" namespace.
*/
static void qdev_property_add_legacy(DeviceState *dev, Property *prop,
diff --git a/include/hw/qdev-core.h b/include/hw/qdev-core.h
index f1fd0f8..a24d0dd 100644
--- a/include/hw/qdev-core.h
+++ b/include/hw/qdev-core.h
@@ -51,8 +51,9 @@
* Devices are constructed in two stages,
* 1) object instantiation via object_initialize() and
* 2) device realization via #DeviceState:realized property.
- * The former may not fail (it might assert or exit), the latter may return
- * error information to the caller and must be re-entrant.
+ * The former may not fail (and must not abort or exit, since it is called
+ * during device introspection already), and the latter may return error
+ * information to the caller and must be re-entrant.
* Trivial field initializations should go into #TypeInfo.instance_init.
* Operations depending on @props static properties should go into @realize.
* After successful realization, setting static properties will fail.
diff --git a/linux-user/Makefile.objs b/linux-user/Makefile.objs
index b5dfb71..769b8d8 100644
--- a/linux-user/Makefile.objs
+++ b/linux-user/Makefile.objs
@@ -1,7 +1,7 @@
obj-y = main.o syscall.o strace.o mmap.o signal.o \
elfload.o linuxload.o uaccess.o uname.o \
safe-syscall.o $(TARGET_ABI_DIR)/signal.o \
- $(TARGET_ABI_DIR)/cpu_loop.o exit.o
+ $(TARGET_ABI_DIR)/cpu_loop.o exit.o fd-trans.o
obj-$(TARGET_HAS_BFLT) += flatload.o
obj-$(TARGET_I386) += vm86.o
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index e97c4cd..10bca65 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -1439,7 +1439,10 @@
#define QMAGIC 0314
/* Necessary parameters */
-#define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
+#define TARGET_ELF_EXEC_PAGESIZE \
+ (((eppnt->p_align & ~qemu_host_page_mask) != 0) ? \
+ TARGET_PAGE_SIZE : MAX(qemu_host_page_size, TARGET_PAGE_SIZE))
+#define TARGET_ELF_PAGELENGTH(_v) ROUND_UP((_v), TARGET_ELF_EXEC_PAGESIZE)
#define TARGET_ELF_PAGESTART(_v) ((_v) & \
~(abi_ulong)(TARGET_ELF_EXEC_PAGESIZE-1))
#define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
@@ -2281,7 +2284,7 @@
for (i = 0; i < ehdr->e_phnum; i++) {
struct elf_phdr *eppnt = phdr + i;
if (eppnt->p_type == PT_LOAD) {
- abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em;
+ abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em, vaddr_len;
int elf_prot = 0;
if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
@@ -2291,8 +2294,9 @@
vaddr = load_bias + eppnt->p_vaddr;
vaddr_po = TARGET_ELF_PAGEOFFSET(vaddr);
vaddr_ps = TARGET_ELF_PAGESTART(vaddr);
+ vaddr_len = TARGET_ELF_PAGELENGTH(eppnt->p_filesz + vaddr_po);
- error = target_mmap(vaddr_ps, eppnt->p_filesz + vaddr_po,
+ error = target_mmap(vaddr_ps, vaddr_len,
elf_prot, MAP_PRIVATE | MAP_FIXED,
image_fd, eppnt->p_offset - vaddr_po);
if (error == -1) {
diff --git a/linux-user/fd-trans.c b/linux-user/fd-trans.c
new file mode 100644
index 0000000..216b9f0
--- /dev/null
+++ b/linux-user/fd-trans.c
@@ -0,0 +1,1409 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+
+#include <sys/signalfd.h>
+#include <linux/unistd.h>
+#include <linux/audit.h>
+#ifdef CONFIG_INOTIFY
+#include <sys/inotify.h>
+#endif
+#include <linux/netlink.h>
+#ifdef CONFIG_RTNETLINK
+#include <linux/rtnetlink.h>
+#include <linux/if_bridge.h>
+#endif
+#include "qemu.h"
+#include "fd-trans.h"
+
+enum {
+ QEMU_IFLA_BR_UNSPEC,
+ QEMU_IFLA_BR_FORWARD_DELAY,
+ QEMU_IFLA_BR_HELLO_TIME,
+ QEMU_IFLA_BR_MAX_AGE,
+ QEMU_IFLA_BR_AGEING_TIME,
+ QEMU_IFLA_BR_STP_STATE,
+ QEMU_IFLA_BR_PRIORITY,
+ QEMU_IFLA_BR_VLAN_FILTERING,
+ QEMU_IFLA_BR_VLAN_PROTOCOL,
+ QEMU_IFLA_BR_GROUP_FWD_MASK,
+ QEMU_IFLA_BR_ROOT_ID,
+ QEMU_IFLA_BR_BRIDGE_ID,
+ QEMU_IFLA_BR_ROOT_PORT,
+ QEMU_IFLA_BR_ROOT_PATH_COST,
+ QEMU_IFLA_BR_TOPOLOGY_CHANGE,
+ QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
+ QEMU_IFLA_BR_HELLO_TIMER,
+ QEMU_IFLA_BR_TCN_TIMER,
+ QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
+ QEMU_IFLA_BR_GC_TIMER,
+ QEMU_IFLA_BR_GROUP_ADDR,
+ QEMU_IFLA_BR_FDB_FLUSH,
+ QEMU_IFLA_BR_MCAST_ROUTER,
+ QEMU_IFLA_BR_MCAST_SNOOPING,
+ QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
+ QEMU_IFLA_BR_MCAST_QUERIER,
+ QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
+ QEMU_IFLA_BR_MCAST_HASH_MAX,
+ QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
+ QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
+ QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
+ QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
+ QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
+ QEMU_IFLA_BR_MCAST_QUERY_INTVL,
+ QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
+ QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
+ QEMU_IFLA_BR_NF_CALL_IPTABLES,
+ QEMU_IFLA_BR_NF_CALL_IP6TABLES,
+ QEMU_IFLA_BR_NF_CALL_ARPTABLES,
+ QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
+ QEMU_IFLA_BR_PAD,
+ QEMU_IFLA_BR_VLAN_STATS_ENABLED,
+ QEMU_IFLA_BR_MCAST_STATS_ENABLED,
+ QEMU_IFLA_BR_MCAST_IGMP_VERSION,
+ QEMU_IFLA_BR_MCAST_MLD_VERSION,
+ QEMU___IFLA_BR_MAX,
+};
+
+enum {
+ QEMU_IFLA_UNSPEC,
+ QEMU_IFLA_ADDRESS,
+ QEMU_IFLA_BROADCAST,
+ QEMU_IFLA_IFNAME,
+ QEMU_IFLA_MTU,
+ QEMU_IFLA_LINK,
+ QEMU_IFLA_QDISC,
+ QEMU_IFLA_STATS,
+ QEMU_IFLA_COST,
+ QEMU_IFLA_PRIORITY,
+ QEMU_IFLA_MASTER,
+ QEMU_IFLA_WIRELESS,
+ QEMU_IFLA_PROTINFO,
+ QEMU_IFLA_TXQLEN,
+ QEMU_IFLA_MAP,
+ QEMU_IFLA_WEIGHT,
+ QEMU_IFLA_OPERSTATE,
+ QEMU_IFLA_LINKMODE,
+ QEMU_IFLA_LINKINFO,
+ QEMU_IFLA_NET_NS_PID,
+ QEMU_IFLA_IFALIAS,
+ QEMU_IFLA_NUM_VF,
+ QEMU_IFLA_VFINFO_LIST,
+ QEMU_IFLA_STATS64,
+ QEMU_IFLA_VF_PORTS,
+ QEMU_IFLA_PORT_SELF,
+ QEMU_IFLA_AF_SPEC,
+ QEMU_IFLA_GROUP,
+ QEMU_IFLA_NET_NS_FD,
+ QEMU_IFLA_EXT_MASK,
+ QEMU_IFLA_PROMISCUITY,
+ QEMU_IFLA_NUM_TX_QUEUES,
+ QEMU_IFLA_NUM_RX_QUEUES,
+ QEMU_IFLA_CARRIER,
+ QEMU_IFLA_PHYS_PORT_ID,
+ QEMU_IFLA_CARRIER_CHANGES,
+ QEMU_IFLA_PHYS_SWITCH_ID,
+ QEMU_IFLA_LINK_NETNSID,
+ QEMU_IFLA_PHYS_PORT_NAME,
+ QEMU_IFLA_PROTO_DOWN,
+ QEMU_IFLA_GSO_MAX_SEGS,
+ QEMU_IFLA_GSO_MAX_SIZE,
+ QEMU_IFLA_PAD,
+ QEMU_IFLA_XDP,
+ QEMU_IFLA_EVENT,
+ QEMU_IFLA_NEW_NETNSID,
+ QEMU_IFLA_IF_NETNSID,
+ QEMU_IFLA_CARRIER_UP_COUNT,
+ QEMU_IFLA_CARRIER_DOWN_COUNT,
+ QEMU_IFLA_NEW_IFINDEX,
+ QEMU___IFLA_MAX
+};
+
+enum {
+ QEMU_IFLA_BRPORT_UNSPEC,
+ QEMU_IFLA_BRPORT_STATE,
+ QEMU_IFLA_BRPORT_PRIORITY,
+ QEMU_IFLA_BRPORT_COST,
+ QEMU_IFLA_BRPORT_MODE,
+ QEMU_IFLA_BRPORT_GUARD,
+ QEMU_IFLA_BRPORT_PROTECT,
+ QEMU_IFLA_BRPORT_FAST_LEAVE,
+ QEMU_IFLA_BRPORT_LEARNING,
+ QEMU_IFLA_BRPORT_UNICAST_FLOOD,
+ QEMU_IFLA_BRPORT_PROXYARP,
+ QEMU_IFLA_BRPORT_LEARNING_SYNC,
+ QEMU_IFLA_BRPORT_PROXYARP_WIFI,
+ QEMU_IFLA_BRPORT_ROOT_ID,
+ QEMU_IFLA_BRPORT_BRIDGE_ID,
+ QEMU_IFLA_BRPORT_DESIGNATED_PORT,
+ QEMU_IFLA_BRPORT_DESIGNATED_COST,
+ QEMU_IFLA_BRPORT_ID,
+ QEMU_IFLA_BRPORT_NO,
+ QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
+ QEMU_IFLA_BRPORT_CONFIG_PENDING,
+ QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
+ QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
+ QEMU_IFLA_BRPORT_HOLD_TIMER,
+ QEMU_IFLA_BRPORT_FLUSH,
+ QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
+ QEMU_IFLA_BRPORT_PAD,
+ QEMU_IFLA_BRPORT_MCAST_FLOOD,
+ QEMU_IFLA_BRPORT_MCAST_TO_UCAST,
+ QEMU_IFLA_BRPORT_VLAN_TUNNEL,
+ QEMU_IFLA_BRPORT_BCAST_FLOOD,
+ QEMU_IFLA_BRPORT_GROUP_FWD_MASK,
+ QEMU_IFLA_BRPORT_NEIGH_SUPPRESS,
+ QEMU___IFLA_BRPORT_MAX
+};
+
+enum {
+ QEMU_IFLA_TUN_UNSPEC,
+ QEMU_IFLA_TUN_OWNER,
+ QEMU_IFLA_TUN_GROUP,
+ QEMU_IFLA_TUN_TYPE,
+ QEMU_IFLA_TUN_PI,
+ QEMU_IFLA_TUN_VNET_HDR,
+ QEMU_IFLA_TUN_PERSIST,
+ QEMU_IFLA_TUN_MULTI_QUEUE,
+ QEMU_IFLA_TUN_NUM_QUEUES,
+ QEMU_IFLA_TUN_NUM_DISABLED_QUEUES,
+ QEMU___IFLA_TUN_MAX,
+};
+
+enum {
+ QEMU_IFLA_INFO_UNSPEC,
+ QEMU_IFLA_INFO_KIND,
+ QEMU_IFLA_INFO_DATA,
+ QEMU_IFLA_INFO_XSTATS,
+ QEMU_IFLA_INFO_SLAVE_KIND,
+ QEMU_IFLA_INFO_SLAVE_DATA,
+ QEMU___IFLA_INFO_MAX,
+};
+
+enum {
+ QEMU_IFLA_INET_UNSPEC,
+ QEMU_IFLA_INET_CONF,
+ QEMU___IFLA_INET_MAX,
+};
+
+enum {
+ QEMU_IFLA_INET6_UNSPEC,
+ QEMU_IFLA_INET6_FLAGS,
+ QEMU_IFLA_INET6_CONF,
+ QEMU_IFLA_INET6_STATS,
+ QEMU_IFLA_INET6_MCAST,
+ QEMU_IFLA_INET6_CACHEINFO,
+ QEMU_IFLA_INET6_ICMP6STATS,
+ QEMU_IFLA_INET6_TOKEN,
+ QEMU_IFLA_INET6_ADDR_GEN_MODE,
+ QEMU___IFLA_INET6_MAX
+};
+
+enum {
+ QEMU_IFLA_XDP_UNSPEC,
+ QEMU_IFLA_XDP_FD,
+ QEMU_IFLA_XDP_ATTACHED,
+ QEMU_IFLA_XDP_FLAGS,
+ QEMU_IFLA_XDP_PROG_ID,
+ QEMU___IFLA_XDP_MAX,
+};
+
+enum {
+ QEMU_RTA_UNSPEC,
+ QEMU_RTA_DST,
+ QEMU_RTA_SRC,
+ QEMU_RTA_IIF,
+ QEMU_RTA_OIF,
+ QEMU_RTA_GATEWAY,
+ QEMU_RTA_PRIORITY,
+ QEMU_RTA_PREFSRC,
+ QEMU_RTA_METRICS,
+ QEMU_RTA_MULTIPATH,
+ QEMU_RTA_PROTOINFO, /* no longer used */
+ QEMU_RTA_FLOW,
+ QEMU_RTA_CACHEINFO,
+ QEMU_RTA_SESSION, /* no longer used */
+ QEMU_RTA_MP_ALGO, /* no longer used */
+ QEMU_RTA_TABLE,
+ QEMU_RTA_MARK,
+ QEMU_RTA_MFC_STATS,
+ QEMU_RTA_VIA,
+ QEMU_RTA_NEWDST,
+ QEMU_RTA_PREF,
+ QEMU_RTA_ENCAP_TYPE,
+ QEMU_RTA_ENCAP,
+ QEMU_RTA_EXPIRES,
+ QEMU_RTA_PAD,
+ QEMU_RTA_UID,
+ QEMU_RTA_TTL_PROPAGATE,
+ QEMU_RTA_IP_PROTO,
+ QEMU_RTA_SPORT,
+ QEMU_RTA_DPORT,
+ QEMU___RTA_MAX
+};
+
+TargetFdTrans **target_fd_trans;
+unsigned int target_fd_max;
+
+static void tswap_nlmsghdr(struct nlmsghdr *nlh)
+{
+ nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
+ nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
+ nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
+ nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
+ nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
+}
+
+static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
+ size_t len,
+ abi_long (*host_to_target_nlmsg)
+ (struct nlmsghdr *))
+{
+ uint32_t nlmsg_len;
+ abi_long ret;
+
+ while (len > sizeof(struct nlmsghdr)) {
+
+ nlmsg_len = nlh->nlmsg_len;
+ if (nlmsg_len < sizeof(struct nlmsghdr) ||
+ nlmsg_len > len) {
+ break;
+ }
+
+ switch (nlh->nlmsg_type) {
+ case NLMSG_DONE:
+ tswap_nlmsghdr(nlh);
+ return 0;
+ case NLMSG_NOOP:
+ break;
+ case NLMSG_ERROR:
+ {
+ struct nlmsgerr *e = NLMSG_DATA(nlh);
+ e->error = tswap32(e->error);
+ tswap_nlmsghdr(&e->msg);
+ tswap_nlmsghdr(nlh);
+ return 0;
+ }
+ default:
+ ret = host_to_target_nlmsg(nlh);
+ if (ret < 0) {
+ tswap_nlmsghdr(nlh);
+ return ret;
+ }
+ break;
+ }
+ tswap_nlmsghdr(nlh);
+ len -= NLMSG_ALIGN(nlmsg_len);
+ nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
+ }
+ return 0;
+}
+
+static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
+ size_t len,
+ abi_long (*target_to_host_nlmsg)
+ (struct nlmsghdr *))
+{
+ int ret;
+
+ while (len > sizeof(struct nlmsghdr)) {
+ if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
+ tswap32(nlh->nlmsg_len) > len) {
+ break;
+ }
+ tswap_nlmsghdr(nlh);
+ switch (nlh->nlmsg_type) {
+ case NLMSG_DONE:
+ return 0;
+ case NLMSG_NOOP:
+ break;
+ case NLMSG_ERROR:
+ {
+ struct nlmsgerr *e = NLMSG_DATA(nlh);
+ e->error = tswap32(e->error);
+ tswap_nlmsghdr(&e->msg);
+ return 0;
+ }
+ default:
+ ret = target_to_host_nlmsg(nlh);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+ len -= NLMSG_ALIGN(nlh->nlmsg_len);
+ nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
+ }
+ return 0;
+}
+
+#ifdef CONFIG_RTNETLINK
+static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
+ size_t len, void *context,
+ abi_long (*host_to_target_nlattr)
+ (struct nlattr *,
+ void *context))
+{
+ unsigned short nla_len;
+ abi_long ret;
+
+ while (len > sizeof(struct nlattr)) {
+ nla_len = nlattr->nla_len;
+ if (nla_len < sizeof(struct nlattr) ||
+ nla_len > len) {
+ break;
+ }
+ ret = host_to_target_nlattr(nlattr, context);
+ nlattr->nla_len = tswap16(nlattr->nla_len);
+ nlattr->nla_type = tswap16(nlattr->nla_type);
+ if (ret < 0) {
+ return ret;
+ }
+ len -= NLA_ALIGN(nla_len);
+ nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
+ }
+ return 0;
+}
+
+static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
+ size_t len,
+ abi_long (*host_to_target_rtattr)
+ (struct rtattr *))
+{
+ unsigned short rta_len;
+ abi_long ret;
+
+ while (len > sizeof(struct rtattr)) {
+ rta_len = rtattr->rta_len;
+ if (rta_len < sizeof(struct rtattr) ||
+ rta_len > len) {
+ break;
+ }
+ ret = host_to_target_rtattr(rtattr);
+ rtattr->rta_len = tswap16(rtattr->rta_len);
+ rtattr->rta_type = tswap16(rtattr->rta_type);
+ if (ret < 0) {
+ return ret;
+ }
+ len -= RTA_ALIGN(rta_len);
+ rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
+ }
+ return 0;
+}
+
+#define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
+
+static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
+ void *context)
+{
+ uint16_t *u16;
+ uint32_t *u32;
+ uint64_t *u64;
+
+ switch (nlattr->nla_type) {
+ /* no data */
+ case QEMU_IFLA_BR_FDB_FLUSH:
+ break;
+ /* binary */
+ case QEMU_IFLA_BR_GROUP_ADDR:
+ break;
+ /* uint8_t */
+ case QEMU_IFLA_BR_VLAN_FILTERING:
+ case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
+ case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
+ case QEMU_IFLA_BR_MCAST_ROUTER:
+ case QEMU_IFLA_BR_MCAST_SNOOPING:
+ case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
+ case QEMU_IFLA_BR_MCAST_QUERIER:
+ case QEMU_IFLA_BR_NF_CALL_IPTABLES:
+ case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
+ case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
+ case QEMU_IFLA_BR_VLAN_STATS_ENABLED:
+ case QEMU_IFLA_BR_MCAST_STATS_ENABLED:
+ case QEMU_IFLA_BR_MCAST_IGMP_VERSION:
+ case QEMU_IFLA_BR_MCAST_MLD_VERSION:
+ break;
+ /* uint16_t */
+ case QEMU_IFLA_BR_PRIORITY:
+ case QEMU_IFLA_BR_VLAN_PROTOCOL:
+ case QEMU_IFLA_BR_GROUP_FWD_MASK:
+ case QEMU_IFLA_BR_ROOT_PORT:
+ case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
+ u16 = NLA_DATA(nlattr);
+ *u16 = tswap16(*u16);
+ break;
+ /* uint32_t */
+ case QEMU_IFLA_BR_FORWARD_DELAY:
+ case QEMU_IFLA_BR_HELLO_TIME:
+ case QEMU_IFLA_BR_MAX_AGE:
+ case QEMU_IFLA_BR_AGEING_TIME:
+ case QEMU_IFLA_BR_STP_STATE:
+ case QEMU_IFLA_BR_ROOT_PATH_COST:
+ case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
+ case QEMU_IFLA_BR_MCAST_HASH_MAX:
+ case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
+ case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
+ u32 = NLA_DATA(nlattr);
+ *u32 = tswap32(*u32);
+ break;
+ /* uint64_t */
+ case QEMU_IFLA_BR_HELLO_TIMER:
+ case QEMU_IFLA_BR_TCN_TIMER:
+ case QEMU_IFLA_BR_GC_TIMER:
+ case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
+ case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
+ case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
+ case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
+ case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
+ case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
+ case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
+ u64 = NLA_DATA(nlattr);
+ *u64 = tswap64(*u64);
+ break;
+ /* ifla_bridge_id: uin8_t[] */
+ case QEMU_IFLA_BR_ROOT_ID:
+ case QEMU_IFLA_BR_BRIDGE_ID:
+ break;
+ default:
+ gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
+ break;
+ }
+ return 0;
+}
+
+static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
+ void *context)
+{
+ uint16_t *u16;
+ uint32_t *u32;
+ uint64_t *u64;
+
+ switch (nlattr->nla_type) {
+ /* uint8_t */
+ case QEMU_IFLA_BRPORT_STATE:
+ case QEMU_IFLA_BRPORT_MODE:
+ case QEMU_IFLA_BRPORT_GUARD:
+ case QEMU_IFLA_BRPORT_PROTECT:
+ case QEMU_IFLA_BRPORT_FAST_LEAVE:
+ case QEMU_IFLA_BRPORT_LEARNING:
+ case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
+ case QEMU_IFLA_BRPORT_PROXYARP:
+ case QEMU_IFLA_BRPORT_LEARNING_SYNC:
+ case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
+ case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
+ case QEMU_IFLA_BRPORT_CONFIG_PENDING:
+ case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
+ case QEMU_IFLA_BRPORT_MCAST_FLOOD:
+ case QEMU_IFLA_BRPORT_MCAST_TO_UCAST:
+ case QEMU_IFLA_BRPORT_VLAN_TUNNEL:
+ case QEMU_IFLA_BRPORT_BCAST_FLOOD:
+ case QEMU_IFLA_BRPORT_NEIGH_SUPPRESS:
+ break;
+ /* uint16_t */
+ case QEMU_IFLA_BRPORT_PRIORITY:
+ case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
+ case QEMU_IFLA_BRPORT_DESIGNATED_COST:
+ case QEMU_IFLA_BRPORT_ID:
+ case QEMU_IFLA_BRPORT_NO:
+ case QEMU_IFLA_BRPORT_GROUP_FWD_MASK:
+ u16 = NLA_DATA(nlattr);
+ *u16 = tswap16(*u16);
+ break;
+ /* uin32_t */
+ case QEMU_IFLA_BRPORT_COST:
+ u32 = NLA_DATA(nlattr);
+ *u32 = tswap32(*u32);
+ break;
+ /* uint64_t */
+ case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
+ case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
+ case QEMU_IFLA_BRPORT_HOLD_TIMER:
+ u64 = NLA_DATA(nlattr);
+ *u64 = tswap64(*u64);
+ break;
+ /* ifla_bridge_id: uint8_t[] */
+ case QEMU_IFLA_BRPORT_ROOT_ID:
+ case QEMU_IFLA_BRPORT_BRIDGE_ID:
+ break;
+ default:
+ gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
+ break;
+ }
+ return 0;
+}
+
+static abi_long host_to_target_data_tun_nlattr(struct nlattr *nlattr,
+ void *context)
+{
+ uint32_t *u32;
+
+ switch (nlattr->nla_type) {
+ /* uint8_t */
+ case QEMU_IFLA_TUN_TYPE:
+ case QEMU_IFLA_TUN_PI:
+ case QEMU_IFLA_TUN_VNET_HDR:
+ case QEMU_IFLA_TUN_PERSIST:
+ case QEMU_IFLA_TUN_MULTI_QUEUE:
+ break;
+ /* uint32_t */
+ case QEMU_IFLA_TUN_NUM_QUEUES:
+ case QEMU_IFLA_TUN_NUM_DISABLED_QUEUES:
+ case QEMU_IFLA_TUN_OWNER:
+ case QEMU_IFLA_TUN_GROUP:
+ u32 = NLA_DATA(nlattr);
+ *u32 = tswap32(*u32);
+ break;
+ default:
+ gemu_log("Unknown QEMU_IFLA_TUN type %d\n", nlattr->nla_type);
+ break;
+ }
+ return 0;
+}
+
+struct linkinfo_context {
+ int len;
+ char *name;
+ int slave_len;
+ char *slave_name;
+};
+
+static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
+ void *context)
+{
+ struct linkinfo_context *li_context = context;
+
+ switch (nlattr->nla_type) {
+ /* string */
+ case QEMU_IFLA_INFO_KIND:
+ li_context->name = NLA_DATA(nlattr);
+ li_context->len = nlattr->nla_len - NLA_HDRLEN;
+ break;
+ case QEMU_IFLA_INFO_SLAVE_KIND:
+ li_context->slave_name = NLA_DATA(nlattr);
+ li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
+ break;
+ /* stats */
+ case QEMU_IFLA_INFO_XSTATS:
+ /* FIXME: only used by CAN */
+ break;
+ /* nested */
+ case QEMU_IFLA_INFO_DATA:
+ if (strncmp(li_context->name, "bridge",
+ li_context->len) == 0) {
+ return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
+ nlattr->nla_len,
+ NULL,
+ host_to_target_data_bridge_nlattr);
+ } else if (strncmp(li_context->name, "tun",
+ li_context->len) == 0) {
+ return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
+ nlattr->nla_len,
+ NULL,
+ host_to_target_data_tun_nlattr);
+ } else {
+ gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
+ }
+ break;
+ case QEMU_IFLA_INFO_SLAVE_DATA:
+ if (strncmp(li_context->slave_name, "bridge",
+ li_context->slave_len) == 0) {
+ return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
+ nlattr->nla_len,
+ NULL,
+ host_to_target_slave_data_bridge_nlattr);
+ } else {
+ gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
+ li_context->slave_name);
+ }
+ break;
+ default:
+ gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
+ break;
+ }
+
+ return 0;
+}
+
+static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
+ void *context)
+{
+ uint32_t *u32;
+ int i;
+
+ switch (nlattr->nla_type) {
+ case QEMU_IFLA_INET_CONF:
+ u32 = NLA_DATA(nlattr);
+ for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
+ i++) {
+ u32[i] = tswap32(u32[i]);
+ }
+ break;
+ default:
+ gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
+ }
+ return 0;
+}
+
+static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
+ void *context)
+{
+ uint32_t *u32;
+ uint64_t *u64;
+ struct ifla_cacheinfo *ci;
+ int i;
+
+ switch (nlattr->nla_type) {
+ /* binaries */
+ case QEMU_IFLA_INET6_TOKEN:
+ break;
+ /* uint8_t */
+ case QEMU_IFLA_INET6_ADDR_GEN_MODE:
+ break;
+ /* uint32_t */
+ case QEMU_IFLA_INET6_FLAGS:
+ u32 = NLA_DATA(nlattr);
+ *u32 = tswap32(*u32);
+ break;
+ /* uint32_t[] */
+ case QEMU_IFLA_INET6_CONF:
+ u32 = NLA_DATA(nlattr);
+ for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
+ i++) {
+ u32[i] = tswap32(u32[i]);
+ }
+ break;
+ /* ifla_cacheinfo */
+ case QEMU_IFLA_INET6_CACHEINFO:
+ ci = NLA_DATA(nlattr);
+ ci->max_reasm_len = tswap32(ci->max_reasm_len);
+ ci->tstamp = tswap32(ci->tstamp);
+ ci->reachable_time = tswap32(ci->reachable_time);
+ ci->retrans_time = tswap32(ci->retrans_time);
+ break;
+ /* uint64_t[] */
+ case QEMU_IFLA_INET6_STATS:
+ case QEMU_IFLA_INET6_ICMP6STATS:
+ u64 = NLA_DATA(nlattr);
+ for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
+ i++) {
+ u64[i] = tswap64(u64[i]);
+ }
+ break;
+ default:
+ gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
+ }
+ return 0;
+}
+
+static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
+ void *context)
+{
+ switch (nlattr->nla_type) {
+ case AF_INET:
+ return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
+ NULL,
+ host_to_target_data_inet_nlattr);
+ case AF_INET6:
+ return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
+ NULL,
+ host_to_target_data_inet6_nlattr);
+ default:
+ gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
+ break;
+ }
+ return 0;
+}
+
+static abi_long host_to_target_data_xdp_nlattr(struct nlattr *nlattr,
+ void *context)
+{
+ uint32_t *u32;
+
+ switch (nlattr->nla_type) {
+ /* uint8_t */
+ case QEMU_IFLA_XDP_ATTACHED:
+ break;
+ /* uint32_t */
+ case QEMU_IFLA_XDP_PROG_ID:
+ u32 = NLA_DATA(nlattr);
+ *u32 = tswap32(*u32);
+ break;
+ default:
+ gemu_log("Unknown host XDP type: %d\n", nlattr->nla_type);
+ break;
+ }
+ return 0;
+}
+
+static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
+{
+ uint32_t *u32;
+ struct rtnl_link_stats *st;
+ struct rtnl_link_stats64 *st64;
+ struct rtnl_link_ifmap *map;
+ struct linkinfo_context li_context;
+
+ switch (rtattr->rta_type) {
+ /* binary stream */
+ case QEMU_IFLA_ADDRESS:
+ case QEMU_IFLA_BROADCAST:
+ /* string */
+ case QEMU_IFLA_IFNAME:
+ case QEMU_IFLA_QDISC:
+ break;
+ /* uin8_t */
+ case QEMU_IFLA_OPERSTATE:
+ case QEMU_IFLA_LINKMODE:
+ case QEMU_IFLA_CARRIER:
+ case QEMU_IFLA_PROTO_DOWN:
+ break;
+ /* uint32_t */
+ case QEMU_IFLA_MTU:
+ case QEMU_IFLA_LINK:
+ case QEMU_IFLA_WEIGHT:
+ case QEMU_IFLA_TXQLEN:
+ case QEMU_IFLA_CARRIER_CHANGES:
+ case QEMU_IFLA_NUM_RX_QUEUES:
+ case QEMU_IFLA_NUM_TX_QUEUES:
+ case QEMU_IFLA_PROMISCUITY:
+ case QEMU_IFLA_EXT_MASK:
+ case QEMU_IFLA_LINK_NETNSID:
+ case QEMU_IFLA_GROUP:
+ case QEMU_IFLA_MASTER:
+ case QEMU_IFLA_NUM_VF:
+ case QEMU_IFLA_GSO_MAX_SEGS:
+ case QEMU_IFLA_GSO_MAX_SIZE:
+ case QEMU_IFLA_CARRIER_UP_COUNT:
+ case QEMU_IFLA_CARRIER_DOWN_COUNT:
+ u32 = RTA_DATA(rtattr);
+ *u32 = tswap32(*u32);
+ break;
+ /* struct rtnl_link_stats */
+ case QEMU_IFLA_STATS:
+ st = RTA_DATA(rtattr);
+ st->rx_packets = tswap32(st->rx_packets);
+ st->tx_packets = tswap32(st->tx_packets);
+ st->rx_bytes = tswap32(st->rx_bytes);
+ st->tx_bytes = tswap32(st->tx_bytes);
+ st->rx_errors = tswap32(st->rx_errors);
+ st->tx_errors = tswap32(st->tx_errors);
+ st->rx_dropped = tswap32(st->rx_dropped);
+ st->tx_dropped = tswap32(st->tx_dropped);
+ st->multicast = tswap32(st->multicast);
+ st->collisions = tswap32(st->collisions);
+
+ /* detailed rx_errors: */
+ st->rx_length_errors = tswap32(st->rx_length_errors);
+ st->rx_over_errors = tswap32(st->rx_over_errors);
+ st->rx_crc_errors = tswap32(st->rx_crc_errors);
+ st->rx_frame_errors = tswap32(st->rx_frame_errors);
+ st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
+ st->rx_missed_errors = tswap32(st->rx_missed_errors);
+
+ /* detailed tx_errors */
+ st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
+ st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
+ st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
+ st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
+ st->tx_window_errors = tswap32(st->tx_window_errors);
+
+ /* for cslip etc */
+ st->rx_compressed = tswap32(st->rx_compressed);
+ st->tx_compressed = tswap32(st->tx_compressed);
+ break;
+ /* struct rtnl_link_stats64 */
+ case QEMU_IFLA_STATS64:
+ st64 = RTA_DATA(rtattr);
+ st64->rx_packets = tswap64(st64->rx_packets);
+ st64->tx_packets = tswap64(st64->tx_packets);
+ st64->rx_bytes = tswap64(st64->rx_bytes);
+ st64->tx_bytes = tswap64(st64->tx_bytes);
+ st64->rx_errors = tswap64(st64->rx_errors);
+ st64->tx_errors = tswap64(st64->tx_errors);
+ st64->rx_dropped = tswap64(st64->rx_dropped);
+ st64->tx_dropped = tswap64(st64->tx_dropped);
+ st64->multicast = tswap64(st64->multicast);
+ st64->collisions = tswap64(st64->collisions);
+
+ /* detailed rx_errors: */
+ st64->rx_length_errors = tswap64(st64->rx_length_errors);
+ st64->rx_over_errors = tswap64(st64->rx_over_errors);
+ st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
+ st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
+ st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
+ st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
+
+ /* detailed tx_errors */
+ st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
+ st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
+ st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
+ st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
+ st64->tx_window_errors = tswap64(st64->tx_window_errors);
+
+ /* for cslip etc */
+ st64->rx_compressed = tswap64(st64->rx_compressed);
+ st64->tx_compressed = tswap64(st64->tx_compressed);
+ break;
+ /* struct rtnl_link_ifmap */
+ case QEMU_IFLA_MAP:
+ map = RTA_DATA(rtattr);
+ map->mem_start = tswap64(map->mem_start);
+ map->mem_end = tswap64(map->mem_end);
+ map->base_addr = tswap64(map->base_addr);
+ map->irq = tswap16(map->irq);
+ break;
+ /* nested */
+ case QEMU_IFLA_LINKINFO:
+ memset(&li_context, 0, sizeof(li_context));
+ return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
+ &li_context,
+ host_to_target_data_linkinfo_nlattr);
+ case QEMU_IFLA_AF_SPEC:
+ return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
+ NULL,
+ host_to_target_data_spec_nlattr);
+ case QEMU_IFLA_XDP:
+ return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
+ NULL,
+ host_to_target_data_xdp_nlattr);
+ default:
+ gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
+ break;
+ }
+ return 0;
+}
+
+static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
+{
+ uint32_t *u32;
+ struct ifa_cacheinfo *ci;
+
+ switch (rtattr->rta_type) {
+ /* binary: depends on family type */
+ case IFA_ADDRESS:
+ case IFA_LOCAL:
+ break;
+ /* string */
+ case IFA_LABEL:
+ break;
+ /* u32 */
+ case IFA_FLAGS:
+ case IFA_BROADCAST:
+ u32 = RTA_DATA(rtattr);
+ *u32 = tswap32(*u32);
+ break;
+ /* struct ifa_cacheinfo */
+ case IFA_CACHEINFO:
+ ci = RTA_DATA(rtattr);
+ ci->ifa_prefered = tswap32(ci->ifa_prefered);
+ ci->ifa_valid = tswap32(ci->ifa_valid);
+ ci->cstamp = tswap32(ci->cstamp);
+ ci->tstamp = tswap32(ci->tstamp);
+ break;
+ default:
+ gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
+ break;
+ }
+ return 0;
+}
+
+static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
+{
+ uint32_t *u32;
+ struct rta_cacheinfo *ci;
+
+ switch (rtattr->rta_type) {
+ /* binary: depends on family type */
+ case QEMU_RTA_GATEWAY:
+ case QEMU_RTA_DST:
+ case QEMU_RTA_PREFSRC:
+ break;
+ /* u8 */
+ case QEMU_RTA_PREF:
+ break;
+ /* u32 */
+ case QEMU_RTA_PRIORITY:
+ case QEMU_RTA_TABLE:
+ case QEMU_RTA_OIF:
+ u32 = RTA_DATA(rtattr);
+ *u32 = tswap32(*u32);
+ break;
+ /* struct rta_cacheinfo */
+ case QEMU_RTA_CACHEINFO:
+ ci = RTA_DATA(rtattr);
+ ci->rta_clntref = tswap32(ci->rta_clntref);
+ ci->rta_lastuse = tswap32(ci->rta_lastuse);
+ ci->rta_expires = tswap32(ci->rta_expires);
+ ci->rta_error = tswap32(ci->rta_error);
+ ci->rta_used = tswap32(ci->rta_used);
+#if defined(RTNETLINK_HAVE_PEERINFO)
+ ci->rta_id = tswap32(ci->rta_id);
+ ci->rta_ts = tswap32(ci->rta_ts);
+ ci->rta_tsage = tswap32(ci->rta_tsage);
+#endif
+ break;
+ default:
+ gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
+ break;
+ }
+ return 0;
+}
+
+static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
+ uint32_t rtattr_len)
+{
+ return host_to_target_for_each_rtattr(rtattr, rtattr_len,
+ host_to_target_data_link_rtattr);
+}
+
+static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
+ uint32_t rtattr_len)
+{
+ return host_to_target_for_each_rtattr(rtattr, rtattr_len,
+ host_to_target_data_addr_rtattr);
+}
+
+static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
+ uint32_t rtattr_len)
+{
+ return host_to_target_for_each_rtattr(rtattr, rtattr_len,
+ host_to_target_data_route_rtattr);
+}
+
+static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
+{
+ uint32_t nlmsg_len;
+ struct ifinfomsg *ifi;
+ struct ifaddrmsg *ifa;
+ struct rtmsg *rtm;
+
+ nlmsg_len = nlh->nlmsg_len;
+ switch (nlh->nlmsg_type) {
+ case RTM_NEWLINK:
+ case RTM_DELLINK:
+ case RTM_GETLINK:
+ if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
+ ifi = NLMSG_DATA(nlh);
+ ifi->ifi_type = tswap16(ifi->ifi_type);
+ ifi->ifi_index = tswap32(ifi->ifi_index);
+ ifi->ifi_flags = tswap32(ifi->ifi_flags);
+ ifi->ifi_change = tswap32(ifi->ifi_change);
+ host_to_target_link_rtattr(IFLA_RTA(ifi),
+ nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
+ }
+ break;
+ case RTM_NEWADDR:
+ case RTM_DELADDR:
+ case RTM_GETADDR:
+ if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
+ ifa = NLMSG_DATA(nlh);
+ ifa->ifa_index = tswap32(ifa->ifa_index);
+ host_to_target_addr_rtattr(IFA_RTA(ifa),
+ nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
+ }
+ break;
+ case RTM_NEWROUTE:
+ case RTM_DELROUTE:
+ case RTM_GETROUTE:
+ if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
+ rtm = NLMSG_DATA(nlh);
+ rtm->rtm_flags = tswap32(rtm->rtm_flags);
+ host_to_target_route_rtattr(RTM_RTA(rtm),
+ nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
+ }
+ break;
+ default:
+ return -TARGET_EINVAL;
+ }
+ return 0;
+}
+
+static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
+ size_t len)
+{
+ return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
+}
+
+static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
+ size_t len,
+ abi_long (*target_to_host_rtattr)
+ (struct rtattr *))
+{
+ abi_long ret;
+
+ while (len >= sizeof(struct rtattr)) {
+ if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
+ tswap16(rtattr->rta_len) > len) {
+ break;
+ }
+ rtattr->rta_len = tswap16(rtattr->rta_len);
+ rtattr->rta_type = tswap16(rtattr->rta_type);
+ ret = target_to_host_rtattr(rtattr);
+ if (ret < 0) {
+ return ret;
+ }
+ len -= RTA_ALIGN(rtattr->rta_len);
+ rtattr = (struct rtattr *)(((char *)rtattr) +
+ RTA_ALIGN(rtattr->rta_len));
+ }
+ return 0;
+}
+
+static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
+{
+ switch (rtattr->rta_type) {
+ default:
+ gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
+ break;
+ }
+ return 0;
+}
+
+static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
+{
+ switch (rtattr->rta_type) {
+ /* binary: depends on family type */
+ case IFA_LOCAL:
+ case IFA_ADDRESS:
+ break;
+ default:
+ gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
+ break;
+ }
+ return 0;
+}
+
+static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
+{
+ uint32_t *u32;
+ switch (rtattr->rta_type) {
+ /* binary: depends on family type */
+ case QEMU_RTA_DST:
+ case QEMU_RTA_SRC:
+ case QEMU_RTA_GATEWAY:
+ break;
+ /* u32 */
+ case QEMU_RTA_PRIORITY:
+ case QEMU_RTA_OIF:
+ u32 = RTA_DATA(rtattr);
+ *u32 = tswap32(*u32);
+ break;
+ default:
+ gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
+ break;
+ }
+ return 0;
+}
+
+static void target_to_host_link_rtattr(struct rtattr *rtattr,
+ uint32_t rtattr_len)
+{
+ target_to_host_for_each_rtattr(rtattr, rtattr_len,
+ target_to_host_data_link_rtattr);
+}
+
+static void target_to_host_addr_rtattr(struct rtattr *rtattr,
+ uint32_t rtattr_len)
+{
+ target_to_host_for_each_rtattr(rtattr, rtattr_len,
+ target_to_host_data_addr_rtattr);
+}
+
+static void target_to_host_route_rtattr(struct rtattr *rtattr,
+ uint32_t rtattr_len)
+{
+ target_to_host_for_each_rtattr(rtattr, rtattr_len,
+ target_to_host_data_route_rtattr);
+}
+
+static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
+{
+ struct ifinfomsg *ifi;
+ struct ifaddrmsg *ifa;
+ struct rtmsg *rtm;
+
+ switch (nlh->nlmsg_type) {
+ case RTM_GETLINK:
+ break;
+ case RTM_NEWLINK:
+ case RTM_DELLINK:
+ if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
+ ifi = NLMSG_DATA(nlh);
+ ifi->ifi_type = tswap16(ifi->ifi_type);
+ ifi->ifi_index = tswap32(ifi->ifi_index);
+ ifi->ifi_flags = tswap32(ifi->ifi_flags);
+ ifi->ifi_change = tswap32(ifi->ifi_change);
+ target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
+ NLMSG_LENGTH(sizeof(*ifi)));
+ }
+ break;
+ case RTM_GETADDR:
+ case RTM_NEWADDR:
+ case RTM_DELADDR:
+ if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
+ ifa = NLMSG_DATA(nlh);
+ ifa->ifa_index = tswap32(ifa->ifa_index);
+ target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
+ NLMSG_LENGTH(sizeof(*ifa)));
+ }
+ break;
+ case RTM_GETROUTE:
+ break;
+ case RTM_NEWROUTE:
+ case RTM_DELROUTE:
+ if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
+ rtm = NLMSG_DATA(nlh);
+ rtm->rtm_flags = tswap32(rtm->rtm_flags);
+ target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
+ NLMSG_LENGTH(sizeof(*rtm)));
+ }
+ break;
+ default:
+ return -TARGET_EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
+{
+ return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
+}
+#endif /* CONFIG_RTNETLINK */
+
+static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
+{
+ switch (nlh->nlmsg_type) {
+ default:
+ gemu_log("Unknown host audit message type %d\n",
+ nlh->nlmsg_type);
+ return -TARGET_EINVAL;
+ }
+ return 0;
+}
+
+static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
+ size_t len)
+{
+ return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
+}
+
+static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
+{
+ switch (nlh->nlmsg_type) {
+ case AUDIT_USER:
+ case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
+ case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
+ break;
+ default:
+ gemu_log("Unknown target audit message type %d\n",
+ nlh->nlmsg_type);
+ return -TARGET_EINVAL;
+ }
+
+ return 0;
+}
+
+static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
+{
+ return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
+}
+
+static abi_long packet_target_to_host_sockaddr(void *host_addr,
+ abi_ulong target_addr,
+ socklen_t len)
+{
+ struct sockaddr *addr = host_addr;
+ struct target_sockaddr *target_saddr;
+
+ target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
+ if (!target_saddr) {
+ return -TARGET_EFAULT;
+ }
+
+ memcpy(addr, target_saddr, len);
+ addr->sa_family = tswap16(target_saddr->sa_family);
+ /* spkt_protocol is big-endian */
+
+ unlock_user(target_saddr, target_addr, 0);
+ return 0;
+}
+
+TargetFdTrans target_packet_trans = {
+ .target_to_host_addr = packet_target_to_host_sockaddr,
+};
+
+#ifdef CONFIG_RTNETLINK
+static abi_long netlink_route_target_to_host(void *buf, size_t len)
+{
+ abi_long ret;
+
+ ret = target_to_host_nlmsg_route(buf, len);
+ if (ret < 0) {
+ return ret;
+ }
+
+ return len;
+}
+
+static abi_long netlink_route_host_to_target(void *buf, size_t len)
+{
+ abi_long ret;
+
+ ret = host_to_target_nlmsg_route(buf, len);
+ if (ret < 0) {
+ return ret;
+ }
+
+ return len;
+}
+
+TargetFdTrans target_netlink_route_trans = {
+ .target_to_host_data = netlink_route_target_to_host,
+ .host_to_target_data = netlink_route_host_to_target,
+};
+#endif /* CONFIG_RTNETLINK */
+
+static abi_long netlink_audit_target_to_host(void *buf, size_t len)
+{
+ abi_long ret;
+
+ ret = target_to_host_nlmsg_audit(buf, len);
+ if (ret < 0) {
+ return ret;
+ }
+
+ return len;
+}
+
+static abi_long netlink_audit_host_to_target(void *buf, size_t len)
+{
+ abi_long ret;
+
+ ret = host_to_target_nlmsg_audit(buf, len);
+ if (ret < 0) {
+ return ret;
+ }
+
+ return len;
+}
+
+TargetFdTrans target_netlink_audit_trans = {
+ .target_to_host_data = netlink_audit_target_to_host,
+ .host_to_target_data = netlink_audit_host_to_target,
+};
+
+/* signalfd siginfo conversion */
+
+static void
+host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
+ const struct signalfd_siginfo *info)
+{
+ int sig = host_to_target_signal(info->ssi_signo);
+
+ /* linux/signalfd.h defines a ssi_addr_lsb
+ * not defined in sys/signalfd.h but used by some kernels
+ */
+
+#ifdef BUS_MCEERR_AO
+ if (tinfo->ssi_signo == SIGBUS &&
+ (tinfo->ssi_code == BUS_MCEERR_AR ||
+ tinfo->ssi_code == BUS_MCEERR_AO)) {
+ uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
+ uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
+ *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
+ }
+#endif
+
+ tinfo->ssi_signo = tswap32(sig);
+ tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
+ tinfo->ssi_code = tswap32(info->ssi_code);
+ tinfo->ssi_pid = tswap32(info->ssi_pid);
+ tinfo->ssi_uid = tswap32(info->ssi_uid);
+ tinfo->ssi_fd = tswap32(info->ssi_fd);
+ tinfo->ssi_tid = tswap32(info->ssi_tid);
+ tinfo->ssi_band = tswap32(info->ssi_band);
+ tinfo->ssi_overrun = tswap32(info->ssi_overrun);
+ tinfo->ssi_trapno = tswap32(info->ssi_trapno);
+ tinfo->ssi_status = tswap32(info->ssi_status);
+ tinfo->ssi_int = tswap32(info->ssi_int);
+ tinfo->ssi_ptr = tswap64(info->ssi_ptr);
+ tinfo->ssi_utime = tswap64(info->ssi_utime);
+ tinfo->ssi_stime = tswap64(info->ssi_stime);
+ tinfo->ssi_addr = tswap64(info->ssi_addr);
+}
+
+static abi_long host_to_target_data_signalfd(void *buf, size_t len)
+{
+ int i;
+
+ for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
+ host_to_target_signalfd_siginfo(buf + i, buf + i);
+ }
+
+ return len;
+}
+
+TargetFdTrans target_signalfd_trans = {
+ .host_to_target_data = host_to_target_data_signalfd,
+};
+
+static abi_long swap_data_eventfd(void *buf, size_t len)
+{
+ uint64_t *counter = buf;
+ int i;
+
+ if (len < sizeof(uint64_t)) {
+ return -EINVAL;
+ }
+
+ for (i = 0; i < len; i += sizeof(uint64_t)) {
+ *counter = tswap64(*counter);
+ counter++;
+ }
+
+ return len;
+}
+
+TargetFdTrans target_eventfd_trans = {
+ .host_to_target_data = swap_data_eventfd,
+ .target_to_host_data = swap_data_eventfd,
+};
+
+#if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
+ (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
+ defined(__NR_inotify_init1))
+static abi_long host_to_target_data_inotify(void *buf, size_t len)
+{
+ struct inotify_event *ev;
+ int i;
+ uint32_t name_len;
+
+ for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) {
+ ev = (struct inotify_event *)((char *)buf + i);
+ name_len = ev->len;
+
+ ev->wd = tswap32(ev->wd);
+ ev->mask = tswap32(ev->mask);
+ ev->cookie = tswap32(ev->cookie);
+ ev->len = tswap32(name_len);
+ }
+
+ return len;
+}
+
+TargetFdTrans target_inotify_trans = {
+ .host_to_target_data = host_to_target_data_inotify,
+};
+#endif
diff --git a/linux-user/fd-trans.h b/linux-user/fd-trans.h
new file mode 100644
index 0000000..a3fcdaa
--- /dev/null
+++ b/linux-user/fd-trans.h
@@ -0,0 +1,97 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef FD_TRANS_H
+#define FD_TRANS_H
+
+typedef abi_long (*TargetFdDataFunc)(void *, size_t);
+typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
+typedef struct TargetFdTrans {
+ TargetFdDataFunc host_to_target_data;
+ TargetFdDataFunc target_to_host_data;
+ TargetFdAddrFunc target_to_host_addr;
+} TargetFdTrans;
+
+extern TargetFdTrans **target_fd_trans;
+
+extern unsigned int target_fd_max;
+
+static inline TargetFdDataFunc fd_trans_target_to_host_data(int fd)
+{
+ if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
+ return target_fd_trans[fd]->target_to_host_data;
+ }
+ return NULL;
+}
+
+static inline TargetFdDataFunc fd_trans_host_to_target_data(int fd)
+{
+ if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
+ return target_fd_trans[fd]->host_to_target_data;
+ }
+ return NULL;
+}
+
+static inline TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
+{
+ if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
+ return target_fd_trans[fd]->target_to_host_addr;
+ }
+ return NULL;
+}
+
+static inline void fd_trans_register(int fd, TargetFdTrans *trans)
+{
+ unsigned int oldmax;
+
+ if (fd >= target_fd_max) {
+ oldmax = target_fd_max;
+ target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
+ target_fd_trans = g_renew(TargetFdTrans *,
+ target_fd_trans, target_fd_max);
+ memset((void *)(target_fd_trans + oldmax), 0,
+ (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
+ }
+ target_fd_trans[fd] = trans;
+}
+
+static inline void fd_trans_unregister(int fd)
+{
+ if (fd >= 0 && fd < target_fd_max) {
+ target_fd_trans[fd] = NULL;
+ }
+}
+
+static inline void fd_trans_dup(int oldfd, int newfd)
+{
+ fd_trans_unregister(newfd);
+ if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
+ fd_trans_register(newfd, target_fd_trans[oldfd]);
+ }
+}
+
+extern TargetFdTrans target_packet_trans;
+#ifdef CONFIG_RTNETLINK
+extern TargetFdTrans target_netlink_route_trans;
+#endif
+extern TargetFdTrans target_netlink_audit_trans;
+extern TargetFdTrans target_signalfd_trans;
+extern TargetFdTrans target_eventfd_trans;
+#if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
+ (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
+ defined(__NR_inotify_init1))
+extern TargetFdTrans target_inotify_trans;
+#endif
+#endif
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index 850b72a..ae3c0df 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -102,15 +102,11 @@
#include <linux/blkpg.h>
#include <netpacket/packet.h>
#include <linux/netlink.h>
-#ifdef CONFIG_RTNETLINK
-#include <linux/rtnetlink.h>
-#include <linux/if_bridge.h>
-#endif
-#include <linux/audit.h>
#include "linux_loop.h"
#include "uname.h"
#include "qemu.h"
+#include "fd-trans.h"
#ifndef CLONE_IO
#define CLONE_IO 0x80000000 /* Clone io context */
@@ -360,298 +356,6 @@
{ 0, 0, 0, 0 }
};
-enum {
- QEMU_IFLA_BR_UNSPEC,
- QEMU_IFLA_BR_FORWARD_DELAY,
- QEMU_IFLA_BR_HELLO_TIME,
- QEMU_IFLA_BR_MAX_AGE,
- QEMU_IFLA_BR_AGEING_TIME,
- QEMU_IFLA_BR_STP_STATE,
- QEMU_IFLA_BR_PRIORITY,
- QEMU_IFLA_BR_VLAN_FILTERING,
- QEMU_IFLA_BR_VLAN_PROTOCOL,
- QEMU_IFLA_BR_GROUP_FWD_MASK,
- QEMU_IFLA_BR_ROOT_ID,
- QEMU_IFLA_BR_BRIDGE_ID,
- QEMU_IFLA_BR_ROOT_PORT,
- QEMU_IFLA_BR_ROOT_PATH_COST,
- QEMU_IFLA_BR_TOPOLOGY_CHANGE,
- QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
- QEMU_IFLA_BR_HELLO_TIMER,
- QEMU_IFLA_BR_TCN_TIMER,
- QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
- QEMU_IFLA_BR_GC_TIMER,
- QEMU_IFLA_BR_GROUP_ADDR,
- QEMU_IFLA_BR_FDB_FLUSH,
- QEMU_IFLA_BR_MCAST_ROUTER,
- QEMU_IFLA_BR_MCAST_SNOOPING,
- QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
- QEMU_IFLA_BR_MCAST_QUERIER,
- QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
- QEMU_IFLA_BR_MCAST_HASH_MAX,
- QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
- QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
- QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
- QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
- QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
- QEMU_IFLA_BR_MCAST_QUERY_INTVL,
- QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
- QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
- QEMU_IFLA_BR_NF_CALL_IPTABLES,
- QEMU_IFLA_BR_NF_CALL_IP6TABLES,
- QEMU_IFLA_BR_NF_CALL_ARPTABLES,
- QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
- QEMU_IFLA_BR_PAD,
- QEMU_IFLA_BR_VLAN_STATS_ENABLED,
- QEMU_IFLA_BR_MCAST_STATS_ENABLED,
- QEMU_IFLA_BR_MCAST_IGMP_VERSION,
- QEMU_IFLA_BR_MCAST_MLD_VERSION,
- QEMU___IFLA_BR_MAX,
-};
-
-enum {
- QEMU_IFLA_UNSPEC,
- QEMU_IFLA_ADDRESS,
- QEMU_IFLA_BROADCAST,
- QEMU_IFLA_IFNAME,
- QEMU_IFLA_MTU,
- QEMU_IFLA_LINK,
- QEMU_IFLA_QDISC,
- QEMU_IFLA_STATS,
- QEMU_IFLA_COST,
- QEMU_IFLA_PRIORITY,
- QEMU_IFLA_MASTER,
- QEMU_IFLA_WIRELESS,
- QEMU_IFLA_PROTINFO,
- QEMU_IFLA_TXQLEN,
- QEMU_IFLA_MAP,
- QEMU_IFLA_WEIGHT,
- QEMU_IFLA_OPERSTATE,
- QEMU_IFLA_LINKMODE,
- QEMU_IFLA_LINKINFO,
- QEMU_IFLA_NET_NS_PID,
- QEMU_IFLA_IFALIAS,
- QEMU_IFLA_NUM_VF,
- QEMU_IFLA_VFINFO_LIST,
- QEMU_IFLA_STATS64,
- QEMU_IFLA_VF_PORTS,
- QEMU_IFLA_PORT_SELF,
- QEMU_IFLA_AF_SPEC,
- QEMU_IFLA_GROUP,
- QEMU_IFLA_NET_NS_FD,
- QEMU_IFLA_EXT_MASK,
- QEMU_IFLA_PROMISCUITY,
- QEMU_IFLA_NUM_TX_QUEUES,
- QEMU_IFLA_NUM_RX_QUEUES,
- QEMU_IFLA_CARRIER,
- QEMU_IFLA_PHYS_PORT_ID,
- QEMU_IFLA_CARRIER_CHANGES,
- QEMU_IFLA_PHYS_SWITCH_ID,
- QEMU_IFLA_LINK_NETNSID,
- QEMU_IFLA_PHYS_PORT_NAME,
- QEMU_IFLA_PROTO_DOWN,
- QEMU_IFLA_GSO_MAX_SEGS,
- QEMU_IFLA_GSO_MAX_SIZE,
- QEMU_IFLA_PAD,
- QEMU_IFLA_XDP,
- QEMU_IFLA_EVENT,
- QEMU_IFLA_NEW_NETNSID,
- QEMU_IFLA_IF_NETNSID,
- QEMU_IFLA_CARRIER_UP_COUNT,
- QEMU_IFLA_CARRIER_DOWN_COUNT,
- QEMU_IFLA_NEW_IFINDEX,
- QEMU___IFLA_MAX
-};
-
-enum {
- QEMU_IFLA_BRPORT_UNSPEC,
- QEMU_IFLA_BRPORT_STATE,
- QEMU_IFLA_BRPORT_PRIORITY,
- QEMU_IFLA_BRPORT_COST,
- QEMU_IFLA_BRPORT_MODE,
- QEMU_IFLA_BRPORT_GUARD,
- QEMU_IFLA_BRPORT_PROTECT,
- QEMU_IFLA_BRPORT_FAST_LEAVE,
- QEMU_IFLA_BRPORT_LEARNING,
- QEMU_IFLA_BRPORT_UNICAST_FLOOD,
- QEMU_IFLA_BRPORT_PROXYARP,
- QEMU_IFLA_BRPORT_LEARNING_SYNC,
- QEMU_IFLA_BRPORT_PROXYARP_WIFI,
- QEMU_IFLA_BRPORT_ROOT_ID,
- QEMU_IFLA_BRPORT_BRIDGE_ID,
- QEMU_IFLA_BRPORT_DESIGNATED_PORT,
- QEMU_IFLA_BRPORT_DESIGNATED_COST,
- QEMU_IFLA_BRPORT_ID,
- QEMU_IFLA_BRPORT_NO,
- QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
- QEMU_IFLA_BRPORT_CONFIG_PENDING,
- QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
- QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
- QEMU_IFLA_BRPORT_HOLD_TIMER,
- QEMU_IFLA_BRPORT_FLUSH,
- QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
- QEMU_IFLA_BRPORT_PAD,
- QEMU_IFLA_BRPORT_MCAST_FLOOD,
- QEMU_IFLA_BRPORT_MCAST_TO_UCAST,
- QEMU_IFLA_BRPORT_VLAN_TUNNEL,
- QEMU_IFLA_BRPORT_BCAST_FLOOD,
- QEMU_IFLA_BRPORT_GROUP_FWD_MASK,
- QEMU_IFLA_BRPORT_NEIGH_SUPPRESS,
- QEMU___IFLA_BRPORT_MAX
-};
-
-enum {
- QEMU_IFLA_TUN_UNSPEC,
- QEMU_IFLA_TUN_OWNER,
- QEMU_IFLA_TUN_GROUP,
- QEMU_IFLA_TUN_TYPE,
- QEMU_IFLA_TUN_PI,
- QEMU_IFLA_TUN_VNET_HDR,
- QEMU_IFLA_TUN_PERSIST,
- QEMU_IFLA_TUN_MULTI_QUEUE,
- QEMU_IFLA_TUN_NUM_QUEUES,
- QEMU_IFLA_TUN_NUM_DISABLED_QUEUES,
- QEMU___IFLA_TUN_MAX,
-};
-
-enum {
- QEMU_IFLA_INFO_UNSPEC,
- QEMU_IFLA_INFO_KIND,
- QEMU_IFLA_INFO_DATA,
- QEMU_IFLA_INFO_XSTATS,
- QEMU_IFLA_INFO_SLAVE_KIND,
- QEMU_IFLA_INFO_SLAVE_DATA,
- QEMU___IFLA_INFO_MAX,
-};
-
-enum {
- QEMU_IFLA_INET_UNSPEC,
- QEMU_IFLA_INET_CONF,
- QEMU___IFLA_INET_MAX,
-};
-
-enum {
- QEMU_IFLA_INET6_UNSPEC,
- QEMU_IFLA_INET6_FLAGS,
- QEMU_IFLA_INET6_CONF,
- QEMU_IFLA_INET6_STATS,
- QEMU_IFLA_INET6_MCAST,
- QEMU_IFLA_INET6_CACHEINFO,
- QEMU_IFLA_INET6_ICMP6STATS,
- QEMU_IFLA_INET6_TOKEN,
- QEMU_IFLA_INET6_ADDR_GEN_MODE,
- QEMU___IFLA_INET6_MAX
-};
-
-enum {
- QEMU_IFLA_XDP_UNSPEC,
- QEMU_IFLA_XDP_FD,
- QEMU_IFLA_XDP_ATTACHED,
- QEMU_IFLA_XDP_FLAGS,
- QEMU_IFLA_XDP_PROG_ID,
- QEMU___IFLA_XDP_MAX,
-};
-
-enum {
- QEMU_RTA_UNSPEC,
- QEMU_RTA_DST,
- QEMU_RTA_SRC,
- QEMU_RTA_IIF,
- QEMU_RTA_OIF,
- QEMU_RTA_GATEWAY,
- QEMU_RTA_PRIORITY,
- QEMU_RTA_PREFSRC,
- QEMU_RTA_METRICS,
- QEMU_RTA_MULTIPATH,
- QEMU_RTA_PROTOINFO, /* no longer used */
- QEMU_RTA_FLOW,
- QEMU_RTA_CACHEINFO,
- QEMU_RTA_SESSION, /* no longer used */
- QEMU_RTA_MP_ALGO, /* no longer used */
- QEMU_RTA_TABLE,
- QEMU_RTA_MARK,
- QEMU_RTA_MFC_STATS,
- QEMU_RTA_VIA,
- QEMU_RTA_NEWDST,
- QEMU_RTA_PREF,
- QEMU_RTA_ENCAP_TYPE,
- QEMU_RTA_ENCAP,
- QEMU_RTA_EXPIRES,
- QEMU_RTA_PAD,
- QEMU_RTA_UID,
- QEMU_RTA_TTL_PROPAGATE,
- QEMU_RTA_IP_PROTO,
- QEMU_RTA_SPORT,
- QEMU_RTA_DPORT,
- QEMU___RTA_MAX
-};
-
-typedef abi_long (*TargetFdDataFunc)(void *, size_t);
-typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
-typedef struct TargetFdTrans {
- TargetFdDataFunc host_to_target_data;
- TargetFdDataFunc target_to_host_data;
- TargetFdAddrFunc target_to_host_addr;
-} TargetFdTrans;
-
-static TargetFdTrans **target_fd_trans;
-
-static unsigned int target_fd_max;
-
-static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
-{
- if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
- return target_fd_trans[fd]->target_to_host_data;
- }
- return NULL;
-}
-
-static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
-{
- if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
- return target_fd_trans[fd]->host_to_target_data;
- }
- return NULL;
-}
-
-static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
-{
- if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
- return target_fd_trans[fd]->target_to_host_addr;
- }
- return NULL;
-}
-
-static void fd_trans_register(int fd, TargetFdTrans *trans)
-{
- unsigned int oldmax;
-
- if (fd >= target_fd_max) {
- oldmax = target_fd_max;
- target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
- target_fd_trans = g_renew(TargetFdTrans *,
- target_fd_trans, target_fd_max);
- memset((void *)(target_fd_trans + oldmax), 0,
- (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
- }
- target_fd_trans[fd] = trans;
-}
-
-static void fd_trans_unregister(int fd)
-{
- if (fd >= 0 && fd < target_fd_max) {
- target_fd_trans[fd] = NULL;
- }
-}
-
-static void fd_trans_dup(int oldfd, int newfd)
-{
- fd_trans_unregister(newfd);
- if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
- fd_trans_register(newfd, target_fd_trans[oldfd]);
- }
-}
-
static int sys_getcwd1(char *buf, size_t size)
{
if (getcwd(buf, size) == NULL) {
@@ -2076,968 +1780,6 @@
return 0;
}
-static void tswap_nlmsghdr(struct nlmsghdr *nlh)
-{
- nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
- nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
- nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
- nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
- nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
-}
-
-static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
- size_t len,
- abi_long (*host_to_target_nlmsg)
- (struct nlmsghdr *))
-{
- uint32_t nlmsg_len;
- abi_long ret;
-
- while (len > sizeof(struct nlmsghdr)) {
-
- nlmsg_len = nlh->nlmsg_len;
- if (nlmsg_len < sizeof(struct nlmsghdr) ||
- nlmsg_len > len) {
- break;
- }
-
- switch (nlh->nlmsg_type) {
- case NLMSG_DONE:
- tswap_nlmsghdr(nlh);
- return 0;
- case NLMSG_NOOP:
- break;
- case NLMSG_ERROR:
- {
- struct nlmsgerr *e = NLMSG_DATA(nlh);
- e->error = tswap32(e->error);
- tswap_nlmsghdr(&e->msg);
- tswap_nlmsghdr(nlh);
- return 0;
- }
- default:
- ret = host_to_target_nlmsg(nlh);
- if (ret < 0) {
- tswap_nlmsghdr(nlh);
- return ret;
- }
- break;
- }
- tswap_nlmsghdr(nlh);
- len -= NLMSG_ALIGN(nlmsg_len);
- nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
- }
- return 0;
-}
-
-static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
- size_t len,
- abi_long (*target_to_host_nlmsg)
- (struct nlmsghdr *))
-{
- int ret;
-
- while (len > sizeof(struct nlmsghdr)) {
- if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
- tswap32(nlh->nlmsg_len) > len) {
- break;
- }
- tswap_nlmsghdr(nlh);
- switch (nlh->nlmsg_type) {
- case NLMSG_DONE:
- return 0;
- case NLMSG_NOOP:
- break;
- case NLMSG_ERROR:
- {
- struct nlmsgerr *e = NLMSG_DATA(nlh);
- e->error = tswap32(e->error);
- tswap_nlmsghdr(&e->msg);
- return 0;
- }
- default:
- ret = target_to_host_nlmsg(nlh);
- if (ret < 0) {
- return ret;
- }
- }
- len -= NLMSG_ALIGN(nlh->nlmsg_len);
- nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
- }
- return 0;
-}
-
-#ifdef CONFIG_RTNETLINK
-static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
- size_t len, void *context,
- abi_long (*host_to_target_nlattr)
- (struct nlattr *,
- void *context))
-{
- unsigned short nla_len;
- abi_long ret;
-
- while (len > sizeof(struct nlattr)) {
- nla_len = nlattr->nla_len;
- if (nla_len < sizeof(struct nlattr) ||
- nla_len > len) {
- break;
- }
- ret = host_to_target_nlattr(nlattr, context);
- nlattr->nla_len = tswap16(nlattr->nla_len);
- nlattr->nla_type = tswap16(nlattr->nla_type);
- if (ret < 0) {
- return ret;
- }
- len -= NLA_ALIGN(nla_len);
- nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
- }
- return 0;
-}
-
-static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
- size_t len,
- abi_long (*host_to_target_rtattr)
- (struct rtattr *))
-{
- unsigned short rta_len;
- abi_long ret;
-
- while (len > sizeof(struct rtattr)) {
- rta_len = rtattr->rta_len;
- if (rta_len < sizeof(struct rtattr) ||
- rta_len > len) {
- break;
- }
- ret = host_to_target_rtattr(rtattr);
- rtattr->rta_len = tswap16(rtattr->rta_len);
- rtattr->rta_type = tswap16(rtattr->rta_type);
- if (ret < 0) {
- return ret;
- }
- len -= RTA_ALIGN(rta_len);
- rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
- }
- return 0;
-}
-
-#define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
-
-static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
- void *context)
-{
- uint16_t *u16;
- uint32_t *u32;
- uint64_t *u64;
-
- switch (nlattr->nla_type) {
- /* no data */
- case QEMU_IFLA_BR_FDB_FLUSH:
- break;
- /* binary */
- case QEMU_IFLA_BR_GROUP_ADDR:
- break;
- /* uint8_t */
- case QEMU_IFLA_BR_VLAN_FILTERING:
- case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
- case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
- case QEMU_IFLA_BR_MCAST_ROUTER:
- case QEMU_IFLA_BR_MCAST_SNOOPING:
- case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
- case QEMU_IFLA_BR_MCAST_QUERIER:
- case QEMU_IFLA_BR_NF_CALL_IPTABLES:
- case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
- case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
- case QEMU_IFLA_BR_VLAN_STATS_ENABLED:
- case QEMU_IFLA_BR_MCAST_STATS_ENABLED:
- case QEMU_IFLA_BR_MCAST_IGMP_VERSION:
- case QEMU_IFLA_BR_MCAST_MLD_VERSION:
- break;
- /* uint16_t */
- case QEMU_IFLA_BR_PRIORITY:
- case QEMU_IFLA_BR_VLAN_PROTOCOL:
- case QEMU_IFLA_BR_GROUP_FWD_MASK:
- case QEMU_IFLA_BR_ROOT_PORT:
- case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
- u16 = NLA_DATA(nlattr);
- *u16 = tswap16(*u16);
- break;
- /* uint32_t */
- case QEMU_IFLA_BR_FORWARD_DELAY:
- case QEMU_IFLA_BR_HELLO_TIME:
- case QEMU_IFLA_BR_MAX_AGE:
- case QEMU_IFLA_BR_AGEING_TIME:
- case QEMU_IFLA_BR_STP_STATE:
- case QEMU_IFLA_BR_ROOT_PATH_COST:
- case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
- case QEMU_IFLA_BR_MCAST_HASH_MAX:
- case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
- case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
- u32 = NLA_DATA(nlattr);
- *u32 = tswap32(*u32);
- break;
- /* uint64_t */
- case QEMU_IFLA_BR_HELLO_TIMER:
- case QEMU_IFLA_BR_TCN_TIMER:
- case QEMU_IFLA_BR_GC_TIMER:
- case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
- case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
- case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
- case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
- case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
- case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
- case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
- u64 = NLA_DATA(nlattr);
- *u64 = tswap64(*u64);
- break;
- /* ifla_bridge_id: uin8_t[] */
- case QEMU_IFLA_BR_ROOT_ID:
- case QEMU_IFLA_BR_BRIDGE_ID:
- break;
- default:
- gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
- break;
- }
- return 0;
-}
-
-static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
- void *context)
-{
- uint16_t *u16;
- uint32_t *u32;
- uint64_t *u64;
-
- switch (nlattr->nla_type) {
- /* uint8_t */
- case QEMU_IFLA_BRPORT_STATE:
- case QEMU_IFLA_BRPORT_MODE:
- case QEMU_IFLA_BRPORT_GUARD:
- case QEMU_IFLA_BRPORT_PROTECT:
- case QEMU_IFLA_BRPORT_FAST_LEAVE:
- case QEMU_IFLA_BRPORT_LEARNING:
- case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
- case QEMU_IFLA_BRPORT_PROXYARP:
- case QEMU_IFLA_BRPORT_LEARNING_SYNC:
- case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
- case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
- case QEMU_IFLA_BRPORT_CONFIG_PENDING:
- case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
- case QEMU_IFLA_BRPORT_MCAST_FLOOD:
- case QEMU_IFLA_BRPORT_MCAST_TO_UCAST:
- case QEMU_IFLA_BRPORT_VLAN_TUNNEL:
- case QEMU_IFLA_BRPORT_BCAST_FLOOD:
- case QEMU_IFLA_BRPORT_NEIGH_SUPPRESS:
- break;
- /* uint16_t */
- case QEMU_IFLA_BRPORT_PRIORITY:
- case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
- case QEMU_IFLA_BRPORT_DESIGNATED_COST:
- case QEMU_IFLA_BRPORT_ID:
- case QEMU_IFLA_BRPORT_NO:
- case QEMU_IFLA_BRPORT_GROUP_FWD_MASK:
- u16 = NLA_DATA(nlattr);
- *u16 = tswap16(*u16);
- break;
- /* uin32_t */
- case QEMU_IFLA_BRPORT_COST:
- u32 = NLA_DATA(nlattr);
- *u32 = tswap32(*u32);
- break;
- /* uint64_t */
- case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
- case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
- case QEMU_IFLA_BRPORT_HOLD_TIMER:
- u64 = NLA_DATA(nlattr);
- *u64 = tswap64(*u64);
- break;
- /* ifla_bridge_id: uint8_t[] */
- case QEMU_IFLA_BRPORT_ROOT_ID:
- case QEMU_IFLA_BRPORT_BRIDGE_ID:
- break;
- default:
- gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
- break;
- }
- return 0;
-}
-
-static abi_long host_to_target_data_tun_nlattr(struct nlattr *nlattr,
- void *context)
-{
- uint32_t *u32;
-
- switch (nlattr->nla_type) {
- /* uint8_t */
- case QEMU_IFLA_TUN_TYPE:
- case QEMU_IFLA_TUN_PI:
- case QEMU_IFLA_TUN_VNET_HDR:
- case QEMU_IFLA_TUN_PERSIST:
- case QEMU_IFLA_TUN_MULTI_QUEUE:
- break;
- /* uint32_t */
- case QEMU_IFLA_TUN_NUM_QUEUES:
- case QEMU_IFLA_TUN_NUM_DISABLED_QUEUES:
- case QEMU_IFLA_TUN_OWNER:
- case QEMU_IFLA_TUN_GROUP:
- u32 = NLA_DATA(nlattr);
- *u32 = tswap32(*u32);
- break;
- default:
- gemu_log("Unknown QEMU_IFLA_TUN type %d\n", nlattr->nla_type);
- break;
- }
- return 0;
-}
-
-struct linkinfo_context {
- int len;
- char *name;
- int slave_len;
- char *slave_name;
-};
-
-static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
- void *context)
-{
- struct linkinfo_context *li_context = context;
-
- switch (nlattr->nla_type) {
- /* string */
- case QEMU_IFLA_INFO_KIND:
- li_context->name = NLA_DATA(nlattr);
- li_context->len = nlattr->nla_len - NLA_HDRLEN;
- break;
- case QEMU_IFLA_INFO_SLAVE_KIND:
- li_context->slave_name = NLA_DATA(nlattr);
- li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
- break;
- /* stats */
- case QEMU_IFLA_INFO_XSTATS:
- /* FIXME: only used by CAN */
- break;
- /* nested */
- case QEMU_IFLA_INFO_DATA:
- if (strncmp(li_context->name, "bridge",
- li_context->len) == 0) {
- return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
- nlattr->nla_len,
- NULL,
- host_to_target_data_bridge_nlattr);
- } else if (strncmp(li_context->name, "tun",
- li_context->len) == 0) {
- return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
- nlattr->nla_len,
- NULL,
- host_to_target_data_tun_nlattr);
- } else {
- gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
- }
- break;
- case QEMU_IFLA_INFO_SLAVE_DATA:
- if (strncmp(li_context->slave_name, "bridge",
- li_context->slave_len) == 0) {
- return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
- nlattr->nla_len,
- NULL,
- host_to_target_slave_data_bridge_nlattr);
- } else {
- gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
- li_context->slave_name);
- }
- break;
- default:
- gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
- break;
- }
-
- return 0;
-}
-
-static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
- void *context)
-{
- uint32_t *u32;
- int i;
-
- switch (nlattr->nla_type) {
- case QEMU_IFLA_INET_CONF:
- u32 = NLA_DATA(nlattr);
- for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
- i++) {
- u32[i] = tswap32(u32[i]);
- }
- break;
- default:
- gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
- }
- return 0;
-}
-
-static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
- void *context)
-{
- uint32_t *u32;
- uint64_t *u64;
- struct ifla_cacheinfo *ci;
- int i;
-
- switch (nlattr->nla_type) {
- /* binaries */
- case QEMU_IFLA_INET6_TOKEN:
- break;
- /* uint8_t */
- case QEMU_IFLA_INET6_ADDR_GEN_MODE:
- break;
- /* uint32_t */
- case QEMU_IFLA_INET6_FLAGS:
- u32 = NLA_DATA(nlattr);
- *u32 = tswap32(*u32);
- break;
- /* uint32_t[] */
- case QEMU_IFLA_INET6_CONF:
- u32 = NLA_DATA(nlattr);
- for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
- i++) {
- u32[i] = tswap32(u32[i]);
- }
- break;
- /* ifla_cacheinfo */
- case QEMU_IFLA_INET6_CACHEINFO:
- ci = NLA_DATA(nlattr);
- ci->max_reasm_len = tswap32(ci->max_reasm_len);
- ci->tstamp = tswap32(ci->tstamp);
- ci->reachable_time = tswap32(ci->reachable_time);
- ci->retrans_time = tswap32(ci->retrans_time);
- break;
- /* uint64_t[] */
- case QEMU_IFLA_INET6_STATS:
- case QEMU_IFLA_INET6_ICMP6STATS:
- u64 = NLA_DATA(nlattr);
- for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
- i++) {
- u64[i] = tswap64(u64[i]);
- }
- break;
- default:
- gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
- }
- return 0;
-}
-
-static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
- void *context)
-{
- switch (nlattr->nla_type) {
- case AF_INET:
- return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
- NULL,
- host_to_target_data_inet_nlattr);
- case AF_INET6:
- return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
- NULL,
- host_to_target_data_inet6_nlattr);
- default:
- gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
- break;
- }
- return 0;
-}
-
-static abi_long host_to_target_data_xdp_nlattr(struct nlattr *nlattr,
- void *context)
-{
- uint32_t *u32;
-
- switch (nlattr->nla_type) {
- /* uint8_t */
- case QEMU_IFLA_XDP_ATTACHED:
- break;
- /* uint32_t */
- case QEMU_IFLA_XDP_PROG_ID:
- u32 = NLA_DATA(nlattr);
- *u32 = tswap32(*u32);
- break;
- default:
- gemu_log("Unknown host XDP type: %d\n", nlattr->nla_type);
- break;
- }
- return 0;
-}
-
-static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
-{
- uint32_t *u32;
- struct rtnl_link_stats *st;
- struct rtnl_link_stats64 *st64;
- struct rtnl_link_ifmap *map;
- struct linkinfo_context li_context;
-
- switch (rtattr->rta_type) {
- /* binary stream */
- case QEMU_IFLA_ADDRESS:
- case QEMU_IFLA_BROADCAST:
- /* string */
- case QEMU_IFLA_IFNAME:
- case QEMU_IFLA_QDISC:
- break;
- /* uin8_t */
- case QEMU_IFLA_OPERSTATE:
- case QEMU_IFLA_LINKMODE:
- case QEMU_IFLA_CARRIER:
- case QEMU_IFLA_PROTO_DOWN:
- break;
- /* uint32_t */
- case QEMU_IFLA_MTU:
- case QEMU_IFLA_LINK:
- case QEMU_IFLA_WEIGHT:
- case QEMU_IFLA_TXQLEN:
- case QEMU_IFLA_CARRIER_CHANGES:
- case QEMU_IFLA_NUM_RX_QUEUES:
- case QEMU_IFLA_NUM_TX_QUEUES:
- case QEMU_IFLA_PROMISCUITY:
- case QEMU_IFLA_EXT_MASK:
- case QEMU_IFLA_LINK_NETNSID:
- case QEMU_IFLA_GROUP:
- case QEMU_IFLA_MASTER:
- case QEMU_IFLA_NUM_VF:
- case QEMU_IFLA_GSO_MAX_SEGS:
- case QEMU_IFLA_GSO_MAX_SIZE:
- case QEMU_IFLA_CARRIER_UP_COUNT:
- case QEMU_IFLA_CARRIER_DOWN_COUNT:
- u32 = RTA_DATA(rtattr);
- *u32 = tswap32(*u32);
- break;
- /* struct rtnl_link_stats */
- case QEMU_IFLA_STATS:
- st = RTA_DATA(rtattr);
- st->rx_packets = tswap32(st->rx_packets);
- st->tx_packets = tswap32(st->tx_packets);
- st->rx_bytes = tswap32(st->rx_bytes);
- st->tx_bytes = tswap32(st->tx_bytes);
- st->rx_errors = tswap32(st->rx_errors);
- st->tx_errors = tswap32(st->tx_errors);
- st->rx_dropped = tswap32(st->rx_dropped);
- st->tx_dropped = tswap32(st->tx_dropped);
- st->multicast = tswap32(st->multicast);
- st->collisions = tswap32(st->collisions);
-
- /* detailed rx_errors: */
- st->rx_length_errors = tswap32(st->rx_length_errors);
- st->rx_over_errors = tswap32(st->rx_over_errors);
- st->rx_crc_errors = tswap32(st->rx_crc_errors);
- st->rx_frame_errors = tswap32(st->rx_frame_errors);
- st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
- st->rx_missed_errors = tswap32(st->rx_missed_errors);
-
- /* detailed tx_errors */
- st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
- st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
- st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
- st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
- st->tx_window_errors = tswap32(st->tx_window_errors);
-
- /* for cslip etc */
- st->rx_compressed = tswap32(st->rx_compressed);
- st->tx_compressed = tswap32(st->tx_compressed);
- break;
- /* struct rtnl_link_stats64 */
- case QEMU_IFLA_STATS64:
- st64 = RTA_DATA(rtattr);
- st64->rx_packets = tswap64(st64->rx_packets);
- st64->tx_packets = tswap64(st64->tx_packets);
- st64->rx_bytes = tswap64(st64->rx_bytes);
- st64->tx_bytes = tswap64(st64->tx_bytes);
- st64->rx_errors = tswap64(st64->rx_errors);
- st64->tx_errors = tswap64(st64->tx_errors);
- st64->rx_dropped = tswap64(st64->rx_dropped);
- st64->tx_dropped = tswap64(st64->tx_dropped);
- st64->multicast = tswap64(st64->multicast);
- st64->collisions = tswap64(st64->collisions);
-
- /* detailed rx_errors: */
- st64->rx_length_errors = tswap64(st64->rx_length_errors);
- st64->rx_over_errors = tswap64(st64->rx_over_errors);
- st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
- st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
- st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
- st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
-
- /* detailed tx_errors */
- st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
- st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
- st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
- st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
- st64->tx_window_errors = tswap64(st64->tx_window_errors);
-
- /* for cslip etc */
- st64->rx_compressed = tswap64(st64->rx_compressed);
- st64->tx_compressed = tswap64(st64->tx_compressed);
- break;
- /* struct rtnl_link_ifmap */
- case QEMU_IFLA_MAP:
- map = RTA_DATA(rtattr);
- map->mem_start = tswap64(map->mem_start);
- map->mem_end = tswap64(map->mem_end);
- map->base_addr = tswap64(map->base_addr);
- map->irq = tswap16(map->irq);
- break;
- /* nested */
- case QEMU_IFLA_LINKINFO:
- memset(&li_context, 0, sizeof(li_context));
- return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
- &li_context,
- host_to_target_data_linkinfo_nlattr);
- case QEMU_IFLA_AF_SPEC:
- return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
- NULL,
- host_to_target_data_spec_nlattr);
- case QEMU_IFLA_XDP:
- return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
- NULL,
- host_to_target_data_xdp_nlattr);
- default:
- gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
- break;
- }
- return 0;
-}
-
-static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
-{
- uint32_t *u32;
- struct ifa_cacheinfo *ci;
-
- switch (rtattr->rta_type) {
- /* binary: depends on family type */
- case IFA_ADDRESS:
- case IFA_LOCAL:
- break;
- /* string */
- case IFA_LABEL:
- break;
- /* u32 */
- case IFA_FLAGS:
- case IFA_BROADCAST:
- u32 = RTA_DATA(rtattr);
- *u32 = tswap32(*u32);
- break;
- /* struct ifa_cacheinfo */
- case IFA_CACHEINFO:
- ci = RTA_DATA(rtattr);
- ci->ifa_prefered = tswap32(ci->ifa_prefered);
- ci->ifa_valid = tswap32(ci->ifa_valid);
- ci->cstamp = tswap32(ci->cstamp);
- ci->tstamp = tswap32(ci->tstamp);
- break;
- default:
- gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
- break;
- }
- return 0;
-}
-
-static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
-{
- uint32_t *u32;
- struct rta_cacheinfo *ci;
-
- switch (rtattr->rta_type) {
- /* binary: depends on family type */
- case QEMU_RTA_GATEWAY:
- case QEMU_RTA_DST:
- case QEMU_RTA_PREFSRC:
- break;
- /* u8 */
- case QEMU_RTA_PREF:
- break;
- /* u32 */
- case QEMU_RTA_PRIORITY:
- case QEMU_RTA_TABLE:
- case QEMU_RTA_OIF:
- u32 = RTA_DATA(rtattr);
- *u32 = tswap32(*u32);
- break;
- /* struct rta_cacheinfo */
- case QEMU_RTA_CACHEINFO:
- ci = RTA_DATA(rtattr);
- ci->rta_clntref = tswap32(ci->rta_clntref);
- ci->rta_lastuse = tswap32(ci->rta_lastuse);
- ci->rta_expires = tswap32(ci->rta_expires);
- ci->rta_error = tswap32(ci->rta_error);
- ci->rta_used = tswap32(ci->rta_used);
-#if defined(RTNETLINK_HAVE_PEERINFO)
- ci->rta_id = tswap32(ci->rta_id);
- ci->rta_ts = tswap32(ci->rta_ts);
- ci->rta_tsage = tswap32(ci->rta_tsage);
-#endif
- break;
- default:
- gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
- break;
- }
- return 0;
-}
-
-static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
- uint32_t rtattr_len)
-{
- return host_to_target_for_each_rtattr(rtattr, rtattr_len,
- host_to_target_data_link_rtattr);
-}
-
-static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
- uint32_t rtattr_len)
-{
- return host_to_target_for_each_rtattr(rtattr, rtattr_len,
- host_to_target_data_addr_rtattr);
-}
-
-static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
- uint32_t rtattr_len)
-{
- return host_to_target_for_each_rtattr(rtattr, rtattr_len,
- host_to_target_data_route_rtattr);
-}
-
-static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
-{
- uint32_t nlmsg_len;
- struct ifinfomsg *ifi;
- struct ifaddrmsg *ifa;
- struct rtmsg *rtm;
-
- nlmsg_len = nlh->nlmsg_len;
- switch (nlh->nlmsg_type) {
- case RTM_NEWLINK:
- case RTM_DELLINK:
- case RTM_GETLINK:
- if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
- ifi = NLMSG_DATA(nlh);
- ifi->ifi_type = tswap16(ifi->ifi_type);
- ifi->ifi_index = tswap32(ifi->ifi_index);
- ifi->ifi_flags = tswap32(ifi->ifi_flags);
- ifi->ifi_change = tswap32(ifi->ifi_change);
- host_to_target_link_rtattr(IFLA_RTA(ifi),
- nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
- }
- break;
- case RTM_NEWADDR:
- case RTM_DELADDR:
- case RTM_GETADDR:
- if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
- ifa = NLMSG_DATA(nlh);
- ifa->ifa_index = tswap32(ifa->ifa_index);
- host_to_target_addr_rtattr(IFA_RTA(ifa),
- nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
- }
- break;
- case RTM_NEWROUTE:
- case RTM_DELROUTE:
- case RTM_GETROUTE:
- if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
- rtm = NLMSG_DATA(nlh);
- rtm->rtm_flags = tswap32(rtm->rtm_flags);
- host_to_target_route_rtattr(RTM_RTA(rtm),
- nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
- }
- break;
- default:
- return -TARGET_EINVAL;
- }
- return 0;
-}
-
-static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
- size_t len)
-{
- return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
-}
-
-static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
- size_t len,
- abi_long (*target_to_host_rtattr)
- (struct rtattr *))
-{
- abi_long ret;
-
- while (len >= sizeof(struct rtattr)) {
- if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
- tswap16(rtattr->rta_len) > len) {
- break;
- }
- rtattr->rta_len = tswap16(rtattr->rta_len);
- rtattr->rta_type = tswap16(rtattr->rta_type);
- ret = target_to_host_rtattr(rtattr);
- if (ret < 0) {
- return ret;
- }
- len -= RTA_ALIGN(rtattr->rta_len);
- rtattr = (struct rtattr *)(((char *)rtattr) +
- RTA_ALIGN(rtattr->rta_len));
- }
- return 0;
-}
-
-static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
-{
- switch (rtattr->rta_type) {
- default:
- gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
- break;
- }
- return 0;
-}
-
-static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
-{
- switch (rtattr->rta_type) {
- /* binary: depends on family type */
- case IFA_LOCAL:
- case IFA_ADDRESS:
- break;
- default:
- gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
- break;
- }
- return 0;
-}
-
-static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
-{
- uint32_t *u32;
- switch (rtattr->rta_type) {
- /* binary: depends on family type */
- case QEMU_RTA_DST:
- case QEMU_RTA_SRC:
- case QEMU_RTA_GATEWAY:
- break;
- /* u32 */
- case QEMU_RTA_PRIORITY:
- case QEMU_RTA_OIF:
- u32 = RTA_DATA(rtattr);
- *u32 = tswap32(*u32);
- break;
- default:
- gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
- break;
- }
- return 0;
-}
-
-static void target_to_host_link_rtattr(struct rtattr *rtattr,
- uint32_t rtattr_len)
-{
- target_to_host_for_each_rtattr(rtattr, rtattr_len,
- target_to_host_data_link_rtattr);
-}
-
-static void target_to_host_addr_rtattr(struct rtattr *rtattr,
- uint32_t rtattr_len)
-{
- target_to_host_for_each_rtattr(rtattr, rtattr_len,
- target_to_host_data_addr_rtattr);
-}
-
-static void target_to_host_route_rtattr(struct rtattr *rtattr,
- uint32_t rtattr_len)
-{
- target_to_host_for_each_rtattr(rtattr, rtattr_len,
- target_to_host_data_route_rtattr);
-}
-
-static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
-{
- struct ifinfomsg *ifi;
- struct ifaddrmsg *ifa;
- struct rtmsg *rtm;
-
- switch (nlh->nlmsg_type) {
- case RTM_GETLINK:
- break;
- case RTM_NEWLINK:
- case RTM_DELLINK:
- if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
- ifi = NLMSG_DATA(nlh);
- ifi->ifi_type = tswap16(ifi->ifi_type);
- ifi->ifi_index = tswap32(ifi->ifi_index);
- ifi->ifi_flags = tswap32(ifi->ifi_flags);
- ifi->ifi_change = tswap32(ifi->ifi_change);
- target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
- NLMSG_LENGTH(sizeof(*ifi)));
- }
- break;
- case RTM_GETADDR:
- case RTM_NEWADDR:
- case RTM_DELADDR:
- if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
- ifa = NLMSG_DATA(nlh);
- ifa->ifa_index = tswap32(ifa->ifa_index);
- target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
- NLMSG_LENGTH(sizeof(*ifa)));
- }
- break;
- case RTM_GETROUTE:
- break;
- case RTM_NEWROUTE:
- case RTM_DELROUTE:
- if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
- rtm = NLMSG_DATA(nlh);
- rtm->rtm_flags = tswap32(rtm->rtm_flags);
- target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
- NLMSG_LENGTH(sizeof(*rtm)));
- }
- break;
- default:
- return -TARGET_EOPNOTSUPP;
- }
- return 0;
-}
-
-static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
-{
- return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
-}
-#endif /* CONFIG_RTNETLINK */
-
-static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
-{
- switch (nlh->nlmsg_type) {
- default:
- gemu_log("Unknown host audit message type %d\n",
- nlh->nlmsg_type);
- return -TARGET_EINVAL;
- }
- return 0;
-}
-
-static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
- size_t len)
-{
- return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
-}
-
-static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
-{
- switch (nlh->nlmsg_type) {
- case AUDIT_USER:
- case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
- case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
- break;
- default:
- gemu_log("Unknown target audit message type %d\n",
- nlh->nlmsg_type);
- return -TARGET_EINVAL;
- }
-
- return 0;
-}
-
-static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
-{
- return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
-}
-
/* do_setsockopt() Must return target values and target errnos. */
static abi_long do_setsockopt(int sockfd, int level, int optname,
abi_ulong optval_addr, socklen_t optlen)
@@ -3290,6 +2032,24 @@
unlock_user (dev_ifname, optval_addr, 0);
return ret;
}
+ case TARGET_SO_LINGER:
+ {
+ struct linger lg;
+ struct target_linger *tlg;
+
+ if (optlen != sizeof(struct target_linger)) {
+ return -TARGET_EINVAL;
+ }
+ if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
+ return -TARGET_EFAULT;
+ }
+ __get_user(lg.l_onoff, &tlg->l_onoff);
+ __get_user(lg.l_linger, &tlg->l_linger);
+ ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
+ &lg, sizeof(lg)));
+ unlock_user_struct(tlg, optval_addr, 0);
+ return ret;
+ }
/* Options with 'int' argument. */
case TARGET_SO_DEBUG:
optname = SO_DEBUG;
@@ -3381,7 +2141,6 @@
level = SOL_SOCKET;
switch (optname) {
/* These don't just return a single integer */
- case TARGET_SO_LINGER:
case TARGET_SO_RCVTIMEO:
case TARGET_SO_SNDTIMEO:
case TARGET_SO_PEERNAME:
@@ -3419,6 +2178,39 @@
}
break;
}
+ case TARGET_SO_LINGER:
+ {
+ struct linger lg;
+ socklen_t lglen;
+ struct target_linger *tlg;
+
+ if (get_user_u32(len, optlen)) {
+ return -TARGET_EFAULT;
+ }
+ if (len < 0) {
+ return -TARGET_EINVAL;
+ }
+
+ lglen = sizeof(lg);
+ ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
+ &lg, &lglen));
+ if (ret < 0) {
+ return ret;
+ }
+ if (len > lglen) {
+ len = lglen;
+ }
+ if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
+ return -TARGET_EFAULT;
+ }
+ __put_user(lg.l_onoff, &tlg->l_onoff);
+ __put_user(lg.l_linger, &tlg->l_linger);
+ unlock_user_struct(tlg, optval_addr, 1);
+ if (put_user_u32(len, optlen)) {
+ return -TARGET_EFAULT;
+ }
+ break;
+ }
/* Options with 'int' argument. */
case TARGET_SO_DEBUG:
optname = SO_DEBUG;
@@ -3733,90 +2525,6 @@
return fd;
}
-static abi_long packet_target_to_host_sockaddr(void *host_addr,
- abi_ulong target_addr,
- socklen_t len)
-{
- struct sockaddr *addr = host_addr;
- struct target_sockaddr *target_saddr;
-
- target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
- if (!target_saddr) {
- return -TARGET_EFAULT;
- }
-
- memcpy(addr, target_saddr, len);
- addr->sa_family = tswap16(target_saddr->sa_family);
- /* spkt_protocol is big-endian */
-
- unlock_user(target_saddr, target_addr, 0);
- return 0;
-}
-
-static TargetFdTrans target_packet_trans = {
- .target_to_host_addr = packet_target_to_host_sockaddr,
-};
-
-#ifdef CONFIG_RTNETLINK
-static abi_long netlink_route_target_to_host(void *buf, size_t len)
-{
- abi_long ret;
-
- ret = target_to_host_nlmsg_route(buf, len);
- if (ret < 0) {
- return ret;
- }
-
- return len;
-}
-
-static abi_long netlink_route_host_to_target(void *buf, size_t len)
-{
- abi_long ret;
-
- ret = host_to_target_nlmsg_route(buf, len);
- if (ret < 0) {
- return ret;
- }
-
- return len;
-}
-
-static TargetFdTrans target_netlink_route_trans = {
- .target_to_host_data = netlink_route_target_to_host,
- .host_to_target_data = netlink_route_host_to_target,
-};
-#endif /* CONFIG_RTNETLINK */
-
-static abi_long netlink_audit_target_to_host(void *buf, size_t len)
-{
- abi_long ret;
-
- ret = target_to_host_nlmsg_audit(buf, len);
- if (ret < 0) {
- return ret;
- }
-
- return len;
-}
-
-static abi_long netlink_audit_host_to_target(void *buf, size_t len)
-{
- abi_long ret;
-
- ret = host_to_target_nlmsg_audit(buf, len);
- if (ret < 0) {
- return ret;
- }
-
- return len;
-}
-
-static TargetFdTrans target_netlink_audit_trans = {
- .target_to_host_data = netlink_audit_target_to_host,
- .host_to_target_data = netlink_audit_host_to_target,
-};
-
/* do_socket() Must return target values and target errnos. */
static abi_long do_socket(int domain, int type, int protocol)
{
@@ -7596,61 +6304,6 @@
#if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
-/* signalfd siginfo conversion */
-
-static void
-host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
- const struct signalfd_siginfo *info)
-{
- int sig = host_to_target_signal(info->ssi_signo);
-
- /* linux/signalfd.h defines a ssi_addr_lsb
- * not defined in sys/signalfd.h but used by some kernels
- */
-
-#ifdef BUS_MCEERR_AO
- if (tinfo->ssi_signo == SIGBUS &&
- (tinfo->ssi_code == BUS_MCEERR_AR ||
- tinfo->ssi_code == BUS_MCEERR_AO)) {
- uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
- uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
- *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
- }
-#endif
-
- tinfo->ssi_signo = tswap32(sig);
- tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
- tinfo->ssi_code = tswap32(info->ssi_code);
- tinfo->ssi_pid = tswap32(info->ssi_pid);
- tinfo->ssi_uid = tswap32(info->ssi_uid);
- tinfo->ssi_fd = tswap32(info->ssi_fd);
- tinfo->ssi_tid = tswap32(info->ssi_tid);
- tinfo->ssi_band = tswap32(info->ssi_band);
- tinfo->ssi_overrun = tswap32(info->ssi_overrun);
- tinfo->ssi_trapno = tswap32(info->ssi_trapno);
- tinfo->ssi_status = tswap32(info->ssi_status);
- tinfo->ssi_int = tswap32(info->ssi_int);
- tinfo->ssi_ptr = tswap64(info->ssi_ptr);
- tinfo->ssi_utime = tswap64(info->ssi_utime);
- tinfo->ssi_stime = tswap64(info->ssi_stime);
- tinfo->ssi_addr = tswap64(info->ssi_addr);
-}
-
-static abi_long host_to_target_data_signalfd(void *buf, size_t len)
-{
- int i;
-
- for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
- host_to_target_signalfd_siginfo(buf + i, buf + i);
- }
-
- return len;
-}
-
-static TargetFdTrans target_signalfd_trans = {
- .host_to_target_data = host_to_target_data_signalfd,
-};
-
static abi_long do_signalfd4(int fd, abi_long mask, int flags)
{
int host_flags;
@@ -7976,55 +6629,6 @@
return timerid;
}
-static abi_long swap_data_eventfd(void *buf, size_t len)
-{
- uint64_t *counter = buf;
- int i;
-
- if (len < sizeof(uint64_t)) {
- return -EINVAL;
- }
-
- for (i = 0; i < len; i += sizeof(uint64_t)) {
- *counter = tswap64(*counter);
- counter++;
- }
-
- return len;
-}
-
-static TargetFdTrans target_eventfd_trans = {
- .host_to_target_data = swap_data_eventfd,
- .target_to_host_data = swap_data_eventfd,
-};
-
-#if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
- (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
- defined(__NR_inotify_init1))
-static abi_long host_to_target_data_inotify(void *buf, size_t len)
-{
- struct inotify_event *ev;
- int i;
- uint32_t name_len;
-
- for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) {
- ev = (struct inotify_event *)((char *)buf + i);
- name_len = ev->len;
-
- ev->wd = tswap32(ev->wd);
- ev->mask = tswap32(ev->mask);
- ev->cookie = tswap32(ev->cookie);
- ev->len = tswap32(name_len);
- }
-
- return len;
-}
-
-static TargetFdTrans target_inotify_trans = {
- .host_to_target_data = host_to_target_data_inotify,
-};
-#endif
-
static int target_to_host_cpu_mask(unsigned long *host_mask,
size_t host_size,
abi_ulong target_addr,
@@ -8168,6 +6772,9 @@
}
return ret;
case TARGET_NR_write:
+ if (arg2 == 0 && arg3 == 0) {
+ return get_errno(safe_write(arg1, 0, 0));
+ }
if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
return -TARGET_EFAULT;
if (fd_trans_target_to_host_data(arg1)) {
@@ -9272,7 +7879,21 @@
rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
unlock_user_struct(target_rlim, arg2, 0);
- return get_errno(setrlimit(resource, &rlim));
+ /*
+ * If we just passed through resource limit settings for memory then
+ * they would also apply to QEMU's own allocations, and QEMU will
+ * crash or hang or die if its allocations fail. Ideally we would
+ * track the guest allocations in QEMU and apply the limits ourselves.
+ * For now, just tell the guest the call succeeded but don't actually
+ * limit anything.
+ */
+ if (resource != RLIMIT_AS &&
+ resource != RLIMIT_DATA &&
+ resource != RLIMIT_STACK) {
+ return get_errno(setrlimit(resource, &rlim));
+ } else {
+ return 0;
+ }
}
#endif
#ifdef TARGET_NR_getrlimit
diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h
index 40bb60e..18d434d 100644
--- a/linux-user/syscall_defs.h
+++ b/linux-user/syscall_defs.h
@@ -203,6 +203,11 @@
uint32_t imr_sourceaddr;
};
+struct target_linger {
+ abi_int l_onoff; /* Linger active */
+ abi_int l_linger; /* How long to linger for */
+};
+
struct target_timeval {
abi_long tv_sec;
abi_long tv_usec;
diff --git a/migration/migration.c b/migration/migration.c
index 05d0a72..d6ae879 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -758,6 +758,18 @@
info->xbzrle_cache->overflow = xbzrle_counters.overflow;
}
+ if (migrate_use_compression()) {
+ info->has_compression = true;
+ info->compression = g_malloc0(sizeof(*info->compression));
+ info->compression->pages = compression_counters.pages;
+ info->compression->busy = compression_counters.busy;
+ info->compression->busy_rate = compression_counters.busy_rate;
+ info->compression->compressed_size =
+ compression_counters.compressed_size;
+ info->compression->compression_rate =
+ compression_counters.compression_rate;
+ }
+
if (cpu_throttle_active()) {
info->has_cpu_throttle_percentage = true;
info->cpu_throttle_percentage = cpu_throttle_get_percentage();
@@ -2268,7 +2280,10 @@
*/
if (postcopy_pause_return_path_thread(ms)) {
/* Reload rp, reset the rest */
- rp = ms->rp_state.from_dst_file;
+ if (rp != ms->rp_state.from_dst_file) {
+ qemu_fclose(rp);
+ rp = ms->rp_state.from_dst_file;
+ }
ms->rp_state.error = false;
goto retry;
}
diff --git a/migration/ram.c b/migration/ram.c
index f6fd8e5..bc38d98 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -301,10 +301,19 @@
uint64_t num_dirty_pages_period;
/* xbzrle misses since the beginning of the period */
uint64_t xbzrle_cache_miss_prev;
- /* number of iterations at the beginning of period */
- uint64_t iterations_prev;
- /* Iterations since start */
- uint64_t iterations;
+
+ /* compression statistics since the beginning of the period */
+ /* amount of count that no free thread to compress data */
+ uint64_t compress_thread_busy_prev;
+ /* amount bytes after compression */
+ uint64_t compressed_size_prev;
+ /* amount of compressed pages */
+ uint64_t compress_pages_prev;
+
+ /* total handled target pages at the beginning of period */
+ uint64_t target_page_count_prev;
+ /* total handled target pages since start */
+ uint64_t target_page_count;
/* number of dirty bits in the bitmap */
uint64_t migration_dirty_pages;
/* protects modification of the bitmap */
@@ -338,6 +347,8 @@
};
typedef struct PageSearchStatus PageSearchStatus;
+CompressionStats compression_counters;
+
struct CompressParam {
bool done;
bool quit;
@@ -420,28 +431,14 @@
return NULL;
}
-static inline void terminate_compression_threads(void)
-{
- int idx, thread_count;
-
- thread_count = migrate_compress_threads();
-
- for (idx = 0; idx < thread_count; idx++) {
- qemu_mutex_lock(&comp_param[idx].mutex);
- comp_param[idx].quit = true;
- qemu_cond_signal(&comp_param[idx].cond);
- qemu_mutex_unlock(&comp_param[idx].mutex);
- }
-}
-
static void compress_threads_save_cleanup(void)
{
int i, thread_count;
- if (!migrate_use_compression()) {
+ if (!migrate_use_compression() || !comp_param) {
return;
}
- terminate_compression_threads();
+
thread_count = migrate_compress_threads();
for (i = 0; i < thread_count; i++) {
/*
@@ -451,6 +448,12 @@
if (!comp_param[i].file) {
break;
}
+
+ qemu_mutex_lock(&comp_param[i].mutex);
+ comp_param[i].quit = true;
+ qemu_cond_signal(&comp_param[i].cond);
+ qemu_mutex_unlock(&comp_param[i].mutex);
+
qemu_thread_join(compress_threads + i);
qemu_mutex_destroy(&comp_param[i].mutex);
qemu_cond_destroy(&comp_param[i].cond);
@@ -648,8 +651,8 @@
return -1;
}
- be32_to_cpus(&msg.magic);
- be32_to_cpus(&msg.version);
+ msg.magic = be32_to_cpu(msg.magic);
+ msg.version = be32_to_cpu(msg.version);
if (msg.magic != MULTIFD_MAGIC) {
error_setg(errp, "multifd: received packet magic %x "
@@ -734,7 +737,7 @@
RAMBlock *block;
int i;
- be32_to_cpus(&packet->magic);
+ packet->magic = be32_to_cpu(packet->magic);
if (packet->magic != MULTIFD_MAGIC) {
error_setg(errp, "multifd: received packet "
"magic %x and expected magic %x",
@@ -742,7 +745,7 @@
return -1;
}
- be32_to_cpus(&packet->version);
+ packet->version = be32_to_cpu(packet->version);
if (packet->version != MULTIFD_VERSION) {
error_setg(errp, "multifd: received packet "
"version %d and expected version %d",
@@ -752,7 +755,7 @@
p->flags = be32_to_cpu(packet->flags);
- be32_to_cpus(&packet->size);
+ packet->size = be32_to_cpu(packet->size);
if (packet->size > migrate_multifd_page_count()) {
error_setg(errp, "multifd: received packet "
"with size %d and expected maximum size %d",
@@ -1592,21 +1595,42 @@
static void migration_update_rates(RAMState *rs, int64_t end_time)
{
- uint64_t iter_count = rs->iterations - rs->iterations_prev;
+ uint64_t page_count = rs->target_page_count - rs->target_page_count_prev;
+ double compressed_size;
/* calculate period counters */
ram_counters.dirty_pages_rate = rs->num_dirty_pages_period * 1000
/ (end_time - rs->time_last_bitmap_sync);
- if (!iter_count) {
+ if (!page_count) {
return;
}
if (migrate_use_xbzrle()) {
xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss -
- rs->xbzrle_cache_miss_prev) / iter_count;
+ rs->xbzrle_cache_miss_prev) / page_count;
rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss;
}
+
+ if (migrate_use_compression()) {
+ compression_counters.busy_rate = (double)(compression_counters.busy -
+ rs->compress_thread_busy_prev) / page_count;
+ rs->compress_thread_busy_prev = compression_counters.busy;
+
+ compressed_size = compression_counters.compressed_size -
+ rs->compressed_size_prev;
+ if (compressed_size) {
+ double uncompressed_size = (compression_counters.pages -
+ rs->compress_pages_prev) * TARGET_PAGE_SIZE;
+
+ /* Compression-Ratio = Uncompressed-size / Compressed-size */
+ compression_counters.compression_rate =
+ uncompressed_size / compressed_size;
+
+ rs->compress_pages_prev = compression_counters.pages;
+ rs->compressed_size_prev = compression_counters.compressed_size;
+ }
+ }
}
static void migration_bitmap_sync(RAMState *rs)
@@ -1662,7 +1686,7 @@
migration_update_rates(rs, end_time);
- rs->iterations_prev = rs->iterations;
+ rs->target_page_count_prev = rs->target_page_count;
/* reset period counters */
rs->time_last_bitmap_sync = end_time;
@@ -1888,17 +1912,25 @@
static void
update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
{
+ ram_counters.transferred += bytes_xmit;
+
if (param->zero_page) {
ram_counters.duplicate++;
+ return;
}
- ram_counters.transferred += bytes_xmit;
+
+ /* 8 means a header with RAM_SAVE_FLAG_CONTINUE. */
+ compression_counters.compressed_size += bytes_xmit - 8;
+ compression_counters.pages++;
}
+static bool save_page_use_compression(RAMState *rs);
+
static void flush_compressed_data(RAMState *rs)
{
int idx, len, thread_count;
- if (!migrate_use_compression()) {
+ if (!save_page_use_compression(rs)) {
return;
}
thread_count = migrate_compress_threads();
@@ -1996,17 +2028,22 @@
pss->page = 0;
pss->block = QLIST_NEXT_RCU(pss->block, next);
if (!pss->block) {
+ /*
+ * If memory migration starts over, we will meet a dirtied page
+ * which may still exists in compression threads's ring, so we
+ * should flush the compressed data to make sure the new page
+ * is not overwritten by the old one in the destination.
+ *
+ * Also If xbzrle is on, stop using the data compression at this
+ * point. In theory, xbzrle can do better than compression.
+ */
+ flush_compressed_data(rs);
+
/* Hit the end of the list */
pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
/* Flag that we've looped */
pss->complete_round = true;
rs->ram_bulk_stage = false;
- if (migrate_use_xbzrle()) {
- /* If xbzrle is on, stop using the data compression at this
- * point. In theory, xbzrle can do better than compression.
- */
- flush_compressed_data(rs);
- }
}
/* Didn't find anything this time, but try again on the new block */
*again = true;
@@ -2259,6 +2296,7 @@
return true;
}
+ compression_counters.busy++;
return false;
}
@@ -2372,7 +2410,8 @@
*
* Called within an RCU critical section.
*
- * Returns the number of pages written where zero means no dirty pages
+ * Returns the number of pages written where zero means no dirty pages,
+ * or negative on error
*
* @rs: current RAM state
* @last_stage: if we are at the completion stage
@@ -3196,7 +3235,13 @@
done = 1;
break;
}
- rs->iterations++;
+
+ if (pages < 0) {
+ qemu_file_set_error(f, pages);
+ break;
+ }
+
+ rs->target_page_count += pages;
/* we want to check in the 1st loop, just in case it was the 1st time
and we had to sync the dirty bitmap.
@@ -3212,7 +3257,6 @@
}
i++;
}
- flush_compressed_data(rs);
rcu_read_unlock();
/*
@@ -3238,7 +3282,7 @@
/**
* ram_save_complete: function called to send the remaining amount of ram
*
- * Returns zero to indicate success
+ * Returns zero to indicate success or negative on error
*
* Called with iothread lock
*
@@ -3249,6 +3293,7 @@
{
RAMState **temp = opaque;
RAMState *rs = *temp;
+ int ret = 0;
rcu_read_lock();
@@ -3269,6 +3314,10 @@
if (pages == 0) {
break;
}
+ if (pages < 0) {
+ ret = pages;
+ break;
+ }
}
flush_compressed_data(rs);
@@ -3280,7 +3329,7 @@
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
qemu_fflush(f);
- return 0;
+ return ret;
}
static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
diff --git a/migration/ram.h b/migration/ram.h
index 457bf54..a139066 100644
--- a/migration/ram.h
+++ b/migration/ram.h
@@ -36,6 +36,7 @@
extern MigrationStats ram_counters;
extern XBZRLECacheStats xbzrle_counters;
+extern CompressionStats compression_counters;
int xbzrle_cache_resize(int64_t new_size, Error **errp);
uint64_t ram_bytes_remaining(void);
diff --git a/migration/rdma.c b/migration/rdma.c
index ae07515..9b2e7e1 100644
--- a/migration/rdma.c
+++ b/migration/rdma.c
@@ -4012,7 +4012,7 @@
void rdma_start_incoming_migration(const char *host_port, Error **errp)
{
int ret;
- RDMAContext *rdma, *rdma_return_path;
+ RDMAContext *rdma, *rdma_return_path = NULL;
Error *local_err = NULL;
trace_rdma_start_incoming_migration();
diff --git a/migration/savevm.c b/migration/savevm.c
index 13e51f0..2d10e45 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -1679,6 +1679,7 @@
qemu_loadvm_state_cleanup();
rcu_unregister_thread();
+ mis->have_listen_thread = false;
return NULL;
}
@@ -2078,7 +2079,9 @@
/* Find savevm section */
se = find_se(idstr, instance_id);
if (se == NULL) {
- error_report("Unknown savevm section or instance '%s' %d",
+ error_report("Unknown savevm section or instance '%s' %d. "
+ "Make sure that your current VM setup matches your "
+ "saved VM setup, including any hotplugged devices",
idstr, instance_id);
return -EINVAL;
}
@@ -2330,11 +2333,13 @@
if (migrate_get_current()->send_configuration) {
if (qemu_get_byte(f) != QEMU_VM_CONFIGURATION) {
error_report("Configuration section missing");
+ qemu_loadvm_state_cleanup();
return -EINVAL;
}
ret = vmstate_load_state(f, &vmstate_configuration, &savevm_state, 0);
if (ret) {
+ qemu_loadvm_state_cleanup();
return ret;
}
}
diff --git a/net/net.c b/net/net.c
index 2a31339..cdcd5cf 100644
--- a/net/net.c
+++ b/net/net.c
@@ -984,6 +984,10 @@
/* missing optional values have been initialized to "all bits zero" */
name = net->has_id ? net->id : net->name;
+ if (net->has_name) {
+ warn_report("The 'name' parameter is deprecated, use 'id' instead");
+ }
+
/* Map the old options to the new flat type */
switch (opts->type) {
case NET_LEGACY_OPTIONS_TYPE_NONE:
diff --git a/net/slirp.c b/net/slirp.c
index c18060f..c93b64d 100644
--- a/net/slirp.c
+++ b/net/slirp.c
@@ -404,6 +404,8 @@
monitor_printf(mon, "unrecognized (hub-id, stackname) pair\n");
return NULL;
}
+ warn_report("Using 'hub-id' is deprecated, specify the netdev id "
+ "directly instead");
} else {
nc = qemu_find_netdev(name);
if (!nc) {
diff --git a/pc-bios/hppa-firmware.img b/pc-bios/hppa-firmware.img
old mode 100755
new mode 100644
Binary files differ
diff --git a/pc-bios/palcode-clipper b/pc-bios/palcode-clipper
old mode 100755
new mode 100644
Binary files differ
diff --git a/pc-bios/u-boot-sam460-20100605.bin b/pc-bios/u-boot-sam460-20100605.bin
old mode 100755
new mode 100644
Binary files differ
diff --git a/pc-bios/u-boot.e500 b/pc-bios/u-boot.e500
old mode 100755
new mode 100644
Binary files differ
diff --git a/qapi/migration.json b/qapi/migration.json
index f62d3f9..6e8c212 100644
--- a/qapi/migration.json
+++ b/qapi/migration.json
@@ -76,6 +76,27 @@
'overflow': 'int' } }
##
+# @CompressionStats:
+#
+# Detailed migration compression statistics
+#
+# @pages: amount of pages compressed and transferred to the target VM
+#
+# @busy: count of times that no free thread was available to compress data
+#
+# @busy-rate: rate of thread busy
+#
+# @compressed-size: amount of bytes after compression
+#
+# @compression-rate: rate of compressed size
+#
+# Since: 3.1
+##
+{ 'struct': 'CompressionStats',
+ 'data': {'pages': 'int', 'busy': 'int', 'busy-rate': 'number',
+ 'compressed-size': 'int', 'compression-rate': 'number' } }
+
+##
# @MigrationStatus:
#
# An enumeration of migration status.
@@ -172,6 +193,8 @@
# only present when the postcopy-blocktime migration capability
# is enabled. (Since 3.0)
#
+# @compression: migration compression statistics, only returned if compression
+# feature is on and status is 'active' or 'completed' (Since 3.1)
#
# Since: 0.14.0
##
@@ -186,7 +209,8 @@
'*cpu-throttle-percentage': 'int',
'*error-desc': 'str',
'*postcopy-blocktime' : 'uint32',
- '*postcopy-vcpu-blocktime': ['uint32']} }
+ '*postcopy-vcpu-blocktime': ['uint32'],
+ '*compression': 'CompressionStats'} }
##
# @query-migrate:
diff --git a/qemu-deprecated.texi b/qemu-deprecated.texi
index 2283fc5..16ff946 100644
--- a/qemu-deprecated.texi
+++ b/qemu-deprecated.texi
@@ -35,6 +35,11 @@
@section System emulator command line arguments
+@subsection -machine enforce-config-section=on|off (since 3.1)
+
+The @option{enforce-config-section} parameter is replaced by the
+@option{-global migration.send-configuration=@var{on|off}} option.
+
@subsection -no-kvm (since 1.3.0)
The ``-no-kvm'' argument is now a synonym for setting
@@ -83,6 +88,11 @@
devices and will only accept regular files (S_IFREG). The correct driver
for these file types is 'host_cdrom' or 'host_device' as appropriate.
+@subsection -net ...,name=@var{name} (since 3.1)
+
+The @option{name} parameter of the @option{-net} option is a synonym
+for the @option{id} parameter, which should now be used instead.
+
@section QEMU Machine Protocol (QMP) commands
@subsection block-dirty-bitmap-add "autoload" parameter (since 2.12.0)
@@ -99,6 +109,13 @@
The ``arch'' output member of the ``query-cpus-fast'' command is
replaced by the ``target'' output member.
+@section System emulator human monitor commands
+
+@subsection The hub_id parameter of 'hostfwd_add' / 'hostfwd_remove' (since 3.1)
+
+The @option{[hub_id name]} parameter tuple of the 'hostfwd_add' and
+'hostfwd_remove' HMP commands has been replaced by @option{netdev_id}.
+
@section System emulator devices
@subsection ivshmem (since 2.6.0)
diff --git a/qemu-seccomp.c b/qemu-seccomp.c
index 4729eb1..1baa5c6 100644
--- a/qemu-seccomp.c
+++ b/qemu-seccomp.c
@@ -282,7 +282,24 @@
static void seccomp_register(void)
{
- qemu_add_opts(&qemu_sandbox_opts);
+ bool add = false;
+
+ /* FIXME: use seccomp_api_get() >= 2 check when released */
+
+#if defined(SECCOMP_FILTER_FLAG_TSYNC)
+ int check;
+
+ /* check host TSYNC capability, it returns errno == ENOSYS if unavailable */
+ check = qemu_seccomp(SECCOMP_SET_MODE_FILTER,
+ SECCOMP_FILTER_FLAG_TSYNC, NULL);
+ if (check < 0 && errno == EFAULT) {
+ add = true;
+ }
+#endif
+
+ if (add) {
+ qemu_add_opts(&qemu_sandbox_opts);
+ }
}
opts_init(seccomp_register);
#endif
diff --git a/replay/replay-char.c b/replay/replay-char.c
old mode 100755
new mode 100644
diff --git a/tests/Makefile.include b/tests/Makefile.include
index 87c81d1..d0c0a92 100644
--- a/tests/Makefile.include
+++ b/tests/Makefile.include
@@ -255,12 +255,8 @@
gcov-files-pci-$(CONFIG_IVSHMEM_DEVICE) += hw/misc/ivshmem.c
check-qtest-pci-y += tests/megasas-test$(EXESUF)
gcov-files-pci-y += hw/scsi/megasas.c
-check-qtest-$(CONFIG_VMXNET3_PCI) += tests/vmxnet3-test$(EXESUF)
-gcov-files-$(CONFIG_VMXNET3_PCI) += hw/net/vmxnet3.c
-check-qtest-$(CONFIG_ISA_TESTDEV) = tests/endianness-test$(EXESUF)
-check-qtest-$(CONFIG_WDT_IB700) += tests/wdt_ib700-test$(EXESUF)
-gcov-files-$(CONFIG_WDT_IB700) += hw/watchdog/watchdog.c hw/watchdog/wdt_ib700.c
+check-qtest-i386-$(CONFIG_ISA_TESTDEV) = tests/endianness-test$(EXESUF)
check-qtest-i386-y += tests/fdc-test$(EXESUF)
gcov-files-i386-y = hw/block/fdc.c
check-qtest-i386-y += tests/ide-test$(EXESUF)
@@ -277,9 +273,13 @@
check-qtest-i386-y += tests/i440fx-test$(EXESUF)
check-qtest-i386-y += tests/fw_cfg-test$(EXESUF)
check-qtest-i386-y += tests/drive_del-test$(EXESUF)
+check-qtest-i386-$(CONFIG_WDT_IB700) += tests/wdt_ib700-test$(EXESUF)
+gcov-files-i386-$(CONFIG_WDT_IB700) += hw/watchdog/watchdog.c hw/watchdog/wdt_ib700.c
check-qtest-i386-y += tests/tco-test$(EXESUF)
check-qtest-i386-y += $(check-qtest-pci-y)
gcov-files-i386-y += $(gcov-files-pci-y)
+check-qtest-i386-$(CONFIG_VMXNET3_PCI) += tests/vmxnet3-test$(EXESUF)
+gcov-files-i386-$(CONFIG_VMXNET3_PCI) += hw/net/vmxnet3.c
gcov-files-i386-y += hw/net/net_rx_pkt.c
gcov-files-i386-y += hw/net/net_tx_pkt.c
check-qtest-i386-$(CONFIG_PVPANIC) += tests/pvpanic-test$(EXESUF)
@@ -332,8 +332,15 @@
check-qtest-microblaze-y = tests/boot-serial-test$(EXESUF)
+check-qtest-mips-$(CONFIG_ISA_TESTDEV) = tests/endianness-test$(EXESUF)
+
+check-qtest-mips64-$(CONFIG_ISA_TESTDEV) = tests/endianness-test$(EXESUF)
+
+check-qtest-mips64el-$(CONFIG_ISA_TESTDEV) = tests/endianness-test$(EXESUF)
+
check-qtest-moxie-y = tests/boot-serial-test$(EXESUF)
+check-qtest-ppc-$(CONFIG_ISA_TESTDEV) = tests/endianness-test$(EXESUF)
check-qtest-ppc-y += tests/boot-order-test$(EXESUF)
check-qtest-ppc-y += tests/prom-env-test$(EXESUF)
check-qtest-ppc-y += tests/drive_del-test$(EXESUF)
@@ -366,11 +373,16 @@
gcov-files-ppc64-$(CONFIG_IVSHMEM_DEVICE) += hw/misc/ivshmem.c
check-qtest-ppc64-y += tests/cpu-plug-test$(EXESUF)
+check-qtest-sh4-$(CONFIG_ISA_TESTDEV) = tests/endianness-test$(EXESUF)
+
+check-qtest-sh4eb-$(CONFIG_ISA_TESTDEV) = tests/endianness-test$(EXESUF)
+
check-qtest-sparc-y = tests/prom-env-test$(EXESUF)
check-qtest-sparc-y += tests/m48t59-test$(EXESUF)
gcov-files-sparc-y = hw/timer/m48t59.c
check-qtest-sparc-y += tests/boot-serial-test$(EXESUF)
+check-qtest-sparc64-$(CONFIG_ISA_TESTDEV) = tests/endianness-test$(EXESUF)
check-qtest-sparc64-y += tests/prom-env-test$(EXESUF)
check-qtest-sparc64-y += tests/boot-serial-test$(EXESUF)
diff --git a/tests/migration-test.c b/tests/migration-test.c
index 0e687b7..20f38f1 100644
--- a/tests/migration-test.c
+++ b/tests/migration-test.c
@@ -21,11 +21,13 @@
#include "chardev/char.h"
#include "sysemu/sysemu.h"
+#include "migration/migration-test.h"
+
/* TODO actually test the results and get rid of this */
#define qtest_qmp_discard_response(...) qobject_unref(qtest_qmp(__VA_ARGS__))
-const unsigned start_address = 1024 * 1024;
-const unsigned end_address = 100 * 1024 * 1024;
+unsigned start_address;
+unsigned end_address;
bool got_stop;
static bool uffd_feature_thread_id;
@@ -80,10 +82,10 @@
static const char *tmpfs;
-/* A simple PC boot sector that modifies memory (1-100MB) quickly
- * outputting a 'B' every so often if it's still running.
+/* The boot file modifies memory area in [start_address, end_address)
+ * repeatedly. It outputs a 'B' at a fixed rate while it's still running.
*/
-#include "tests/migration/x86-a-b-bootblock.h"
+#include "tests/migration/i386/a-b-bootblock.h"
static void init_bootfile_x86(const char *bootpath)
{
@@ -270,11 +272,11 @@
static void check_guests_ram(QTestState *who)
{
/* Our ASM test will have been incrementing one byte from each page from
- * 1MB to <100MB in order.
- * This gives us a constraint that any page's byte should be equal or less
- * than the previous pages byte (mod 256); and they should all be equal
- * except for one transition at the point where we meet the incrementer.
- * (We're running this with the guest stopped).
+ * start_address to < end_address in order. This gives us a constraint
+ * that any page's byte should be equal or less than the previous pages
+ * byte (mod 256); and they should all be equal except for one transition
+ * at the point where we meet the incrementer. (We're running this with
+ * the guest stopped).
*/
unsigned address;
uint8_t first_byte;
@@ -285,7 +287,8 @@
qtest_memread(who, start_address, &first_byte, 1);
last_byte = first_byte;
- for (address = start_address + 4096; address < end_address; address += 4096)
+ for (address = start_address + TEST_MEM_PAGE_SIZE; address < end_address;
+ address += TEST_MEM_PAGE_SIZE)
{
uint8_t b;
qtest_memread(who, address, &b, 1);
@@ -437,12 +440,14 @@
" -drive file=%s,format=raw"
" -incoming %s",
accel, tmpfs, bootpath, uri);
+ start_address = X86_TEST_MEM_START;
+ end_address = X86_TEST_MEM_END;
} else if (strcmp(arch, "ppc64") == 0) {
- cmd_src = g_strdup_printf("-machine accel=%s -m 256M"
+ cmd_src = g_strdup_printf("-machine accel=%s -m 256M -nodefaults"
" -name source,debug-threads=on"
" -serial file:%s/src_serial"
- " -prom-env '"
- "boot-command=hex .\" _\" begin %x %x "
+ " -prom-env 'use-nvramrc?=true' -prom-env "
+ "'nvramrc=hex .\" _\" begin %x %x "
"do i c@ 1 + i c! 1000 +loop .\" B\" 0 "
"until'", accel, tmpfs, end_address,
start_address);
@@ -451,6 +456,9 @@
" -serial file:%s/dest_serial"
" -incoming %s",
accel, tmpfs, uri);
+
+ start_address = PPC_TEST_MEM_START;
+ end_address = PPC_TEST_MEM_END;
} else {
g_assert_not_reached();
}
diff --git a/tests/migration/Makefile b/tests/migration/Makefile
new file mode 100644
index 0000000..dc3b551
--- /dev/null
+++ b/tests/migration/Makefile
@@ -0,0 +1,35 @@
+#
+# Copyright (c) 2018 Red Hat, Inc. and/or its affiliates
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or later.
+# See the COPYING file in the top-level directory.
+#
+
+TARGET_LIST = i386
+
+SRC_PATH = ../..
+
+override define __note
+/* This file is automatically generated from the assembly file in
+ * tests/migration/$@. Edit that file and then run "make all"
+ * inside tests/migration to update, and then remember to send both
+ * the header and the assembler differences in your patch submission.
+ */
+endef
+export __note
+
+find-arch-cross-cc = $(lastword $(shell grep -h "CROSS_CC_GUEST=" $(wildcard $(SRC_PATH)/$(patsubst i386,*86*,$(1))-softmmu/config-target.mak) /dev/null))
+parse-cross-prefix = $(subst gcc,,$(patsubst cc,gcc,$(patsubst CROSS_CC_GUEST="%",%,$(call find-arch-cross-cc,$(1)))))
+gen-cross-prefix = $(patsubst %-,CROSS_PREFIX=%-,$(call parse-cross-prefix,$(1)))
+
+.PHONY: all $(TARGET_LIST)
+
+all: $(TARGET_LIST)
+
+$(TARGET_LIST):
+ $(MAKE) -C $@ $(call gen-cross-prefix,$@)
+
+clean:
+ for target in $(TARGET_LIST); do \
+ $(MAKE) -C $$target clean; \
+ done
diff --git a/tests/migration/i386/Makefile b/tests/migration/i386/Makefile
new file mode 100644
index 0000000..5c03241
--- /dev/null
+++ b/tests/migration/i386/Makefile
@@ -0,0 +1,22 @@
+# To specify cross compiler prefix, use CROSS_PREFIX=
+# $ make CROSS_PREFIX=x86_64-linux-gnu-
+
+.PHONY: all clean
+all: a-b-bootblock.h
+
+a-b-bootblock.h: x86.bootsect
+ echo "$$__note" > header.tmp
+ xxd -i $< | sed -e 's/.*int.*//' >> header.tmp
+ mv header.tmp $@
+
+x86.bootsect: x86.boot
+ dd if=$< of=$@ bs=256 count=2 skip=124
+
+x86.boot: x86.o
+ $(CROSS_PREFIX)objcopy -O binary $< $@
+
+x86.o: a-b-bootblock.S
+ $(CROSS_PREFIX)gcc -m32 -march=i486 -c $< -o $@
+
+clean:
+ @rm -rf *.boot *.o *.bootsect
diff --git a/tests/migration/x86-a-b-bootblock.s b/tests/migration/i386/a-b-bootblock.S
similarity index 93%
rename from tests/migration/x86-a-b-bootblock.s
rename to tests/migration/i386/a-b-bootblock.S
index b164264..3f97f28 100644
--- a/tests/migration/x86-a-b-bootblock.s
+++ b/tests/migration/i386/a-b-bootblock.S
@@ -3,10 +3,6 @@
# range.
# Outputs an initial 'A' on serial followed by repeated 'B's
#
-# run tests/migration/rebuild-x86-bootblock.sh
-# to regenerate the hex, and remember to include both the .h and .s
-# in any patches.
-#
# Copyright (c) 2016 Red Hat, Inc. and/or its affiliates
# This work is licensed under the terms of the GNU GPL, version 2 or later.
# See the COPYING file in the top-level directory.
diff --git a/tests/migration/x86-a-b-bootblock.h b/tests/migration/i386/a-b-bootblock.h
similarity index 91%
rename from tests/migration/x86-a-b-bootblock.h
rename to tests/migration/i386/a-b-bootblock.h
index 78a151f..7d459d4 100644
--- a/tests/migration/x86-a-b-bootblock.h
+++ b/tests/migration/i386/a-b-bootblock.h
@@ -1,7 +1,7 @@
-/* This file is automatically generated from
- * tests/migration/x86-a-b-bootblock.s, edit that and then run
- * tests/migration/rebuild-x86-bootblock.sh to update,
- * and then remember to send both in your patch submission.
+/* This file is automatically generated from the assembly file in
+ * tests/migration/i386. Edit that file and then run "make all"
+ * inside tests/migration to update, and then remember to send both
+ * the header and the assembler differences in your patch submission.
*/
unsigned char x86_bootsect[] = {
0xfa, 0x0f, 0x01, 0x16, 0x74, 0x7c, 0x66, 0xb8, 0x01, 0x00, 0x00, 0x00,
diff --git a/tests/migration/migration-test.h b/tests/migration/migration-test.h
new file mode 100644
index 0000000..c4c0c52
--- /dev/null
+++ b/tests/migration/migration-test.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2018 Red Hat, Inc. and/or its affiliates
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef _TEST_MIGRATION_H_
+#define _TEST_MIGRATION_H_
+
+/* Common */
+#define TEST_MEM_PAGE_SIZE 4096
+
+/* x86 */
+#define X86_TEST_MEM_START (1 * 1024 * 1024)
+#define X86_TEST_MEM_END (100 * 1024 * 1024)
+
+/* PPC */
+#define PPC_TEST_MEM_START (1 * 1024 * 1024)
+#define PPC_TEST_MEM_END (100 * 1024 * 1024)
+
+#endif /* _TEST_MIGRATION_H_ */
diff --git a/tests/migration/rebuild-x86-bootblock.sh b/tests/migration/rebuild-x86-bootblock.sh
deleted file mode 100755
index 86cec5d..0000000
--- a/tests/migration/rebuild-x86-bootblock.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/sh
-# Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates
-# This work is licensed under the terms of the GNU GPL, version 2 or later.
-# See the COPYING file in the top-level directory.
-#
-# Author: dgilbert@redhat.com
-
-ASMFILE=$PWD/tests/migration/x86-a-b-bootblock.s
-HEADER=$PWD/tests/migration/x86-a-b-bootblock.h
-
-if [ ! -e "$ASMFILE" ]
-then
- echo "Couldn't find $ASMFILE" >&2
- exit 1
-fi
-
-ASM_WORK_DIR=$(mktemp -d --tmpdir X86BB.XXXXXX)
-cd "$ASM_WORK_DIR" &&
-as --32 -march=i486 "$ASMFILE" -o x86.o &&
-objcopy -O binary x86.o x86.boot &&
-dd if=x86.boot of=x86.bootsect bs=256 count=2 skip=124 &&
-xxd -i x86.bootsect |
-sed -e 's/.*int.*//' > x86.hex &&
-cat - x86.hex <<HERE > "$HEADER"
-/* This file is automatically generated from
- * tests/migration/x86-a-b-bootblock.s, edit that and then run
- * tests/migration/rebuild-x86-bootblock.sh to update,
- * and then remember to send both in your patch submission.
- */
-HERE
-
-rm x86.hex x86.bootsect x86.boot x86.o
-cd .. && rmdir "$ASM_WORK_DIR"
diff --git a/tests/vm/basevm.py b/tests/vm/basevm.py
index 7e58d9e..cafbc6b 100755
--- a/tests/vm/basevm.py
+++ b/tests/vm/basevm.py
@@ -65,6 +65,7 @@
self._stdout = self._devnull
self._args = [ \
"-nodefaults", "-m", "4G",
+ "-cpu", "max",
"-netdev", "user,id=vnet,hostfwd=:127.0.0.1:0-:22",
"-device", "virtio-net-pci,netdev=vnet",
"-vnc", "127.0.0.1:0,to=20",
@@ -72,11 +73,9 @@
if vcpus:
self._args += ["-smp", str(vcpus)]
if os.access("/dev/kvm", os.R_OK | os.W_OK):
- self._args += ["-cpu", "host"]
self._args += ["-enable-kvm"]
else:
logging.info("KVM not available, not using -enable-kvm")
- self._args += ["-cpu", "max"]
self._data_args = []
def _download_with_cache(self, url, sha256sum=None):
diff --git a/util/aio-posix.c b/util/aio-posix.c
index 131ba6b..621b302 100644
--- a/util/aio-posix.c
+++ b/util/aio-posix.c
@@ -211,6 +211,7 @@
AioHandler *node;
bool is_new = false;
bool deleted = false;
+ int poll_disable_change;
qemu_lockcnt_lock(&ctx->list_lock);
@@ -244,11 +245,9 @@
QLIST_REMOVE(node, node);
deleted = true;
}
-
- if (!node->io_poll) {
- ctx->poll_disable_cnt--;
- }
+ poll_disable_change = -!node->io_poll;
} else {
+ poll_disable_change = !io_poll - (node && !node->io_poll);
if (node == NULL) {
/* Alloc and insert if it's not already there */
node = g_new0(AioHandler, 1);
@@ -257,10 +256,6 @@
g_source_add_poll(&ctx->source, &node->pfd);
is_new = true;
-
- ctx->poll_disable_cnt += !io_poll;
- } else {
- ctx->poll_disable_cnt += !io_poll - !node->io_poll;
}
/* Update handler with latest information */
@@ -274,6 +269,15 @@
node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0);
}
+ /* No need to order poll_disable_cnt writes against other updates;
+ * the counter is only used to avoid wasting time and latency on
+ * iterated polling when the system call will be ultimately necessary.
+ * Changing handlers is a rare event, and a little wasted polling until
+ * the aio_notify below is not an issue.
+ */
+ atomic_set(&ctx->poll_disable_cnt,
+ atomic_read(&ctx->poll_disable_cnt) + poll_disable_change);
+
aio_epoll_update(ctx, node, is_new);
qemu_lockcnt_unlock(&ctx->list_lock);
aio_notify(ctx);
@@ -486,7 +490,7 @@
npfd++;
}
-static bool run_poll_handlers_once(AioContext *ctx)
+static bool run_poll_handlers_once(AioContext *ctx, int64_t *timeout)
{
bool progress = false;
AioHandler *node;
@@ -494,9 +498,11 @@
QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
if (!node->deleted && node->io_poll &&
aio_node_check(ctx, node->is_external) &&
- node->io_poll(node->opaque) &&
- node->opaque != &ctx->notifier) {
- progress = true;
+ node->io_poll(node->opaque)) {
+ *timeout = 0;
+ if (node->opaque != &ctx->notifier) {
+ progress = true;
+ }
}
/* Caller handles freeing deleted nodes. Don't do it here. */
@@ -518,31 +524,38 @@
*
* Returns: true if progress was made, false otherwise
*/
-static bool run_poll_handlers(AioContext *ctx, int64_t max_ns)
+static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout)
{
bool progress;
- int64_t end_time;
+ int64_t start_time, elapsed_time;
assert(ctx->notify_me);
assert(qemu_lockcnt_count(&ctx->list_lock) > 0);
- assert(ctx->poll_disable_cnt == 0);
- trace_run_poll_handlers_begin(ctx, max_ns);
+ trace_run_poll_handlers_begin(ctx, max_ns, *timeout);
- end_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + max_ns;
-
+ start_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
do {
- progress = run_poll_handlers_once(ctx);
- } while (!progress && qemu_clock_get_ns(QEMU_CLOCK_REALTIME) < end_time);
+ progress = run_poll_handlers_once(ctx, timeout);
+ elapsed_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start_time;
+ } while (!progress && elapsed_time < max_ns
+ && !atomic_read(&ctx->poll_disable_cnt));
- trace_run_poll_handlers_end(ctx, progress);
+ /* If time has passed with no successful polling, adjust *timeout to
+ * keep the same ending time.
+ */
+ if (*timeout != -1) {
+ *timeout -= MIN(*timeout, elapsed_time);
+ }
+ trace_run_poll_handlers_end(ctx, progress, *timeout);
return progress;
}
/* try_poll_mode:
* @ctx: the AioContext
- * @blocking: busy polling is only attempted when blocking is true
+ * @timeout: timeout for blocking wait, computed by the caller and updated if
+ * polling succeeds.
*
* ctx->notify_me must be non-zero so this function can detect aio_notify().
*
@@ -550,19 +563,16 @@
*
* Returns: true if progress was made, false otherwise
*/
-static bool try_poll_mode(AioContext *ctx, bool blocking)
+static bool try_poll_mode(AioContext *ctx, int64_t *timeout)
{
- if (blocking && ctx->poll_max_ns && ctx->poll_disable_cnt == 0) {
- /* See qemu_soonest_timeout() uint64_t hack */
- int64_t max_ns = MIN((uint64_t)aio_compute_timeout(ctx),
- (uint64_t)ctx->poll_ns);
+ /* See qemu_soonest_timeout() uint64_t hack */
+ int64_t max_ns = MIN((uint64_t)*timeout, (uint64_t)ctx->poll_ns);
- if (max_ns) {
- poll_set_started(ctx, true);
+ if (max_ns && !atomic_read(&ctx->poll_disable_cnt)) {
+ poll_set_started(ctx, true);
- if (run_poll_handlers(ctx, max_ns)) {
- return true;
- }
+ if (run_poll_handlers(ctx, max_ns, timeout)) {
+ return true;
}
}
@@ -571,7 +581,7 @@
/* Even if we don't run busy polling, try polling once in case it can make
* progress and the caller will be able to avoid ppoll(2)/epoll_wait(2).
*/
- return run_poll_handlers_once(ctx);
+ return run_poll_handlers_once(ctx, timeout);
}
bool aio_poll(AioContext *ctx, bool blocking)
@@ -601,8 +611,14 @@
start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
}
- progress = try_poll_mode(ctx, blocking);
- if (!progress) {
+ timeout = blocking ? aio_compute_timeout(ctx) : 0;
+ progress = try_poll_mode(ctx, &timeout);
+ assert(!(timeout && progress));
+
+ /* If polling is allowed, non-blocking aio_poll does not need the
+ * system call---a single round of run_poll_handlers_once suffices.
+ */
+ if (timeout || atomic_read(&ctx->poll_disable_cnt)) {
assert(npfd == 0);
/* fill pollfds */
@@ -616,8 +632,6 @@
}
}
- timeout = blocking ? aio_compute_timeout(ctx) : 0;
-
/* wait until next event */
if (aio_epoll_check_poll(ctx, pollfds, npfd, timeout)) {
AioHandler epoll_handler;
diff --git a/util/memfd.c b/util/memfd.c
index d248a53..6287946 100644
--- a/util/memfd.c
+++ b/util/memfd.c
@@ -187,6 +187,7 @@
int fd;
void *ptr;
+ fd = -1;
ptr = qemu_memfd_alloc("test", 4096, 0, &fd, NULL);
memfd_check = ptr ? MEMFD_OK : MEMFD_KO;
qemu_memfd_free(ptr, 4096, fd);
diff --git a/util/trace-events b/util/trace-events
index 4822434..79569b7 100644
--- a/util/trace-events
+++ b/util/trace-events
@@ -1,8 +1,8 @@
# See docs/devel/tracing.txt for syntax documentation.
# util/aio-posix.c
-run_poll_handlers_begin(void *ctx, int64_t max_ns) "ctx %p max_ns %"PRId64
-run_poll_handlers_end(void *ctx, bool progress) "ctx %p progress %d"
+run_poll_handlers_begin(void *ctx, int64_t max_ns, int64_t timeout) "ctx %p max_ns %"PRId64 " timeout %"PRId64
+run_poll_handlers_end(void *ctx, bool progress, int64_t timeout) "ctx %p progress %d new timeout %"PRId64
poll_shrink(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64
poll_grow(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64
diff --git a/vl.c b/vl.c
index 694bb67..cc55fe0 100644
--- a/vl.c
+++ b/vl.c
@@ -3917,8 +3917,8 @@
}
#ifdef CONFIG_SECCOMP
- if (qemu_opts_foreach(qemu_find_opts("sandbox"),
- parse_sandbox, NULL, NULL)) {
+ olist = qemu_find_opts_err("sandbox", NULL);
+ if (olist && qemu_opts_foreach(olist, parse_sandbox, NULL, NULL)) {
exit(1);
}
#endif
@@ -4530,6 +4530,7 @@
if (load_snapshot(loadvm, &local_err) < 0) {
error_report_err(local_err);
autostart = 0;
+ exit(1);
}
}