Merge tag 'qga-pull-2023-03-22' of github.com:kostyanf14/qemu into staging
qga-pull-2023-03-22
# -----BEGIN PGP SIGNATURE-----
#
# iQIzBAABCgAdFiEEwsLBCepDxjwUI+uE711egWG6hOcFAmQbUgoACgkQ711egWG6
# hOe8pw/9Gc3rySQ0mEt45kb5ESpRbwvpkYHEmA4aYmhDhuVHjuWpw1MgL980GTGe
# X2/y1ApcS6GXme4Bb3DbLu1U+N/B9l2yHDYmAMpvtljydcOc0F5KRnwhYFtB6P2L
# nSmOorIteaQmZ/DYnGF3u7KZ08dIOqujZ6QUTDAdR2q7+P/9kW4kuF3/XJHXVA8Q
# f+DQ8dYLSDw/eLPcwM18IdUV3xhTZgvSnADiQ4L3NEexLcrq7ZFYv1S66Q5+dQTK
# xijFSDVWR8+Q6PVBOBz5bP+hrYc+rmjAblk+DT+LkPruNOuBY1y09RPmaKJnvBjo
# hsj7BmcJ3dVPRmADy7gQWaE2F8A1GR4OU79JSCm46BHUMDGm1363gwhvPSeLeQQ9
# 5pqKyRImU3cMF3Re06ZsOX5D02jWz7VSGKWT/JEHnWrX9U5hurnNl20pgiAbKpkv
# k10IUfEufTfQLjz3oNY/At1XFtqg8xVGRS3bhwWoFBrWiUEwVYGEg1AwrtSQ25Tw
# +7j54A3DSvJie5nxYVJAnpZMNQxUVaBkF5PWJ1fRy23UbZZwPT2MSKZDbQPAvl9g
# 7K/rZVDHnBxTA/hvMeoGuxCY/kpkAV0WfiKAi1zzNGdKvxDKlyFJAD07KtfyN5oF
# QH2HQwTu1/gma+hWzSuJi2rhhcEXwojYemLfLMzCK4OBuQj8dLE=
# =uTF5
# -----END PGP SIGNATURE-----
# gpg: Signature made Wed 22 Mar 2023 19:07:54 GMT
# gpg: using RSA key C2C2C109EA43C63C1423EB84EF5D5E8161BA84E7
# gpg: Good signature from "Kostiantyn Kostiuk (Upstream PR sign) <kkostiuk@redhat.com>" [unknown]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg: There is no indication that the signature belongs to the owner.
# Primary key fingerprint: C2C2 C109 EA43 C63C 1423 EB84 EF5D 5E81 61BA 84E7
* tag 'qga-pull-2023-03-22' of github.com:kostyanf14/qemu:
qga/vss-win32: fix warning for clang++-15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
diff --git a/.gitlab-ci.d/buildtest.yml b/.gitlab-ci.d/buildtest.yml
index 44b8275..ba6f551 100644
--- a/.gitlab-ci.d/buildtest.yml
+++ b/.gitlab-ci.d/buildtest.yml
@@ -9,8 +9,7 @@
- job: amd64-alpine-container
variables:
IMAGE: alpine
- TARGETS: aarch64-softmmu alpha-softmmu cris-softmmu hppa-softmmu
- microblazeel-softmmu mips64el-softmmu
+ TARGETS: avr-softmmu loongarch64-softmmu mips64-softmmu mipsel-softmmu
MAKE_CHECK_ARGS: check-build
CONFIGURE_ARGS: --enable-docs --enable-trace-backends=log,simple,syslog
@@ -72,8 +71,8 @@
variables:
IMAGE: debian-amd64
CONFIGURE_ARGS: --with-coroutine=sigaltstack
- TARGETS: arm-softmmu avr-softmmu i386-softmmu mipsel-softmmu
- riscv64-softmmu sh4eb-softmmu sparc-softmmu xtensaeb-softmmu
+ TARGETS: arm-softmmu i386-softmmu riscv64-softmmu sh4eb-softmmu
+ sparc-softmmu xtensaeb-softmmu
MAKE_CHECK_ARGS: check-build
check-system-debian:
diff --git a/.gitlab-ci.d/crossbuilds.yml b/.gitlab-ci.d/crossbuilds.yml
index d3a31a2..61b8ac8 100644
--- a/.gitlab-ci.d/crossbuilds.yml
+++ b/.gitlab-ci.d/crossbuilds.yml
@@ -1,13 +1,6 @@
include:
- local: '/.gitlab-ci.d/crossbuild-template.yml'
-cross-armel-system:
- extends: .cross_system_build_job
- needs:
- job: armel-debian-cross-container
- variables:
- IMAGE: debian-armel-cross
-
cross-armel-user:
extends: .cross_user_build_job
needs:
@@ -15,13 +8,6 @@
variables:
IMAGE: debian-armel-cross
-cross-armhf-system:
- extends: .cross_system_build_job
- needs:
- job: armhf-debian-cross-container
- variables:
- IMAGE: debian-armhf-cross
-
cross-armhf-user:
extends: .cross_user_build_job
needs:
@@ -43,16 +29,6 @@
variables:
IMAGE: debian-arm64-cross
-cross-i386-system:
- extends:
- - .cross_system_build_job
- - .cross_test_artifacts
- needs:
- job: i386-fedora-cross-container
- variables:
- IMAGE: fedora-i386-cross
- MAKE_CHECK_ARGS: check-qtest
-
cross-i386-user:
extends:
- .cross_user_build_job
diff --git a/.gitlab-ci.d/custom-runners/centos-stream-8-x86_64.yml b/.gitlab-ci.d/custom-runners/centos-stream-8-x86_64.yml
index 068b0c4..367424d 100644
--- a/.gitlab-ci.d/custom-runners/centos-stream-8-x86_64.yml
+++ b/.gitlab-ci.d/custom-runners/centos-stream-8-x86_64.yml
@@ -1,4 +1,9 @@
+# All centos-stream-8 jobs should run successfully in an environment
+# setup by the scripts/ci/setup/stream/8/build-environment.yml task
+# "Installation of extra packages to build QEMU"
+
centos-stream-8-x86_64:
+ extends: .custom_runner_template
allow_failure: true
needs: []
stage: build
@@ -8,15 +13,6 @@
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
- if: "$CENTOS_STREAM_8_x86_64_RUNNER_AVAILABLE"
- artifacts:
- name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
- when: on_failure
- expire_in: 7 days
- paths:
- - build/tests/results/latest/results.xml
- - build/tests/results/latest/test-results
- reports:
- junit: build/tests/results/latest/results.xml
before_script:
- JOBS=$(expr $(nproc) + 1)
script:
@@ -25,6 +21,4 @@
- ../scripts/ci/org.centos/stream/8/x86_64/configure
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
- make -j"$JOBS"
- - make NINJA=":" check
- || { cat meson-logs/testlog.txt; exit 1; } ;
- - ../scripts/ci/org.centos/stream/8/x86_64/test-avocado
+ - make NINJA=":" check check-avocado
diff --git a/.gitlab-ci.d/edk2.yml b/.gitlab-ci.d/edk2.yml
deleted file mode 100644
index 314e101..0000000
--- a/.gitlab-ci.d/edk2.yml
+++ /dev/null
@@ -1,85 +0,0 @@
-# All jobs needing docker-edk2 must use the same rules it uses.
-.edk2_job_rules:
- rules:
- # Forks don't get pipelines unless QEMU_CI=1 or QEMU_CI=2 is set
- - if: '$QEMU_CI != "1" && $QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"'
- when: never
-
- # In forks, if QEMU_CI=1 is set, then create manual job
- # if any of the files affecting the build are touched
- - if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project"'
- changes:
- - .gitlab-ci.d/edk2.yml
- - .gitlab-ci.d/edk2/Dockerfile
- - roms/edk2/*
- when: manual
-
- # In forks, if QEMU_CI=1 is set, then create manual job
- # if the branch/tag starts with 'edk2'
- - if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_REF_NAME =~ /^edk2/'
- when: manual
-
- # In forks, if QEMU_CI=1 is set, then create manual job
- # if last commit msg contains 'EDK2' (case insensitive)
- - if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_MESSAGE =~ /edk2/i'
- when: manual
-
- # Run if any files affecting the build output are touched
- - changes:
- - .gitlab-ci.d/edk2.yml
- - .gitlab-ci.d/edk2/Dockerfile
- - roms/edk2/*
- when: on_success
-
- # Run if the branch/tag starts with 'edk2'
- - if: '$CI_COMMIT_REF_NAME =~ /^edk2/'
- when: on_success
-
- # Run if last commit msg contains 'EDK2' (case insensitive)
- - if: '$CI_COMMIT_MESSAGE =~ /edk2/i'
- when: on_success
-
-docker-edk2:
- extends: .edk2_job_rules
- stage: containers
- image: docker:19.03.1
- services:
- - docker:19.03.1-dind
- variables:
- GIT_DEPTH: 3
- IMAGE_TAG: $CI_REGISTRY_IMAGE:edk2-cross-build
- # We don't use TLS
- DOCKER_HOST: tcp://docker:2375
- DOCKER_TLS_CERTDIR: ""
- before_script:
- - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- script:
- - docker pull $IMAGE_TAG || true
- - docker build --cache-from $IMAGE_TAG --tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
- --tag $IMAGE_TAG .gitlab-ci.d/edk2
- - docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
- - docker push $IMAGE_TAG
-
-build-edk2:
- extends: .edk2_job_rules
- stage: build
- needs: ['docker-edk2']
- artifacts:
- paths: # 'artifacts.zip' will contains the following files:
- - pc-bios/edk2*bz2
- - pc-bios/edk2-licenses.txt
- - edk2-stdout.log
- - edk2-stderr.log
- image: $CI_REGISTRY_IMAGE:edk2-cross-build
- variables:
- GIT_DEPTH: 3
- script: # Clone the required submodules and build EDK2
- - git submodule update --init roms/edk2
- - git -C roms/edk2 submodule update --init --
- ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3
- BaseTools/Source/C/BrotliCompress/brotli
- CryptoPkg/Library/OpensslLib/openssl
- MdeModulePkg/Library/BrotliCustomDecompressLib/brotli
- - export JOBS=$(($(getconf _NPROCESSORS_ONLN) + 1))
- - echo "=== Using ${JOBS} simultaneous jobs ==="
- - make -j${JOBS} -C roms efi 2>&1 1>edk2-stdout.log | tee -a edk2-stderr.log >&2
diff --git a/.gitlab-ci.d/edk2/Dockerfile b/.gitlab-ci.d/edk2/Dockerfile
deleted file mode 100644
index bbe50ff..0000000
--- a/.gitlab-ci.d/edk2/Dockerfile
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-# Docker image to cross-compile EDK2 firmware binaries
-#
-FROM ubuntu:18.04
-
-MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>
-
-# Install packages required to build EDK2
-RUN apt update \
- && \
- \
- DEBIAN_FRONTEND=noninteractive \
- apt install --assume-yes --no-install-recommends \
- build-essential \
- ca-certificates \
- dos2unix \
- gcc-aarch64-linux-gnu \
- gcc-arm-linux-gnueabi \
- git \
- iasl \
- make \
- nasm \
- python3 \
- uuid-dev \
- && \
- \
- rm -rf /var/lib/apt/lists/*
diff --git a/.gitlab-ci.d/qemu-project.yml b/.gitlab-ci.d/qemu-project.yml
index 691d9bf..a7ed447 100644
--- a/.gitlab-ci.d/qemu-project.yml
+++ b/.gitlab-ci.d/qemu-project.yml
@@ -4,7 +4,6 @@
include:
- local: '/.gitlab-ci.d/base.yml'
- local: '/.gitlab-ci.d/stages.yml'
- - local: '/.gitlab-ci.d/edk2.yml'
- local: '/.gitlab-ci.d/opensbi.yml'
- local: '/.gitlab-ci.d/containers.yml'
- local: '/.gitlab-ci.d/crossbuilds.yml'
diff --git a/.mailmap b/.mailmap
index fad2aff..7677047 100644
--- a/.mailmap
+++ b/.mailmap
@@ -56,6 +56,7 @@
Alexander Graf <agraf@csgraf.de> <agraf@suse.de>
Anthony Liguori <anthony@codemonkey.ws> Anthony Liguori <aliguori@us.ibm.com>
Christian Borntraeger <borntraeger@linux.ibm.com> <borntraeger@de.ibm.com>
+Damien Hedde <damien.hedde@dahe.fr> <damien.hedde@greensocs.com>
Filip Bozuta <filip.bozuta@syrmia.com> <filip.bozuta@rt-rk.com.com>
Frederic Konrad <konrad.frederic@yahoo.fr> <fred.konrad@greensocs.com>
Frederic Konrad <konrad.frederic@yahoo.fr> <konrad@adacore.com>
diff --git a/MAINTAINERS b/MAINTAINERS
index 5340de0..9b56ccd 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -136,6 +136,8 @@
F: docs/devel/tcg*
F: include/exec/cpu*.h
F: include/exec/exec-all.h
+F: include/exec/tb-flush.h
+F: include/exec/target_long.h
F: include/exec/helper*.h
F: include/sysemu/cpus.h
F: include/sysemu/tcg.h
@@ -255,9 +257,9 @@
F: tests/tcg/mips/
NiosII TCG CPUs
-M: Chris Wulff <crwulff@gmail.com>
-M: Marek Vasut <marex@denx.de>
-S: Maintained
+R: Chris Wulff <crwulff@gmail.com>
+R: Marek Vasut <marex@denx.de>
+S: Orphan
F: target/nios2/
F: hw/nios2/
F: disas/nios2.c
@@ -369,6 +371,7 @@
F: target/xtensa/
F: hw/xtensa/
F: tests/tcg/xtensa/
+F: tests/tcg/xtensaeb/
F: disas/xtensa.c
F: include/hw/xtensa/xtensa-isa.h
F: configs/devices/xtensa*/default.mak
@@ -443,6 +446,15 @@
F: target/i386/sev*
F: scripts/kvm/vmxcap
+Xen emulation on X86 KVM CPUs
+M: David Woodhouse <dwmw2@infradead.org>
+M: Paul Durrant <paul@xen.org>
+S: Supported
+F: include/sysemu/kvm_xen.h
+F: target/i386/kvm/xen*
+F: hw/i386/kvm/xen*
+F: tests/avocado/xen_guest.py
+
Guest CPU Cores (other accelerators)
------------------------------------
Overall
@@ -2225,14 +2237,27 @@
e1000x
M: Dmitry Fleytman <dmitry.fleytman@gmail.com>
+R: Akihiko Odaki <akihiko.odaki@daynix.com>
S: Maintained
F: hw/net/e1000x*
e1000e
M: Dmitry Fleytman <dmitry.fleytman@gmail.com>
+R: Akihiko Odaki <akihiko.odaki@daynix.com>
S: Maintained
F: hw/net/e1000e*
F: tests/qtest/fuzz-e1000e-test.c
+F: tests/qtest/e1000e-test.c
+F: tests/qtest/libqos/e1000e.*
+
+igb
+M: Akihiko Odaki <akihiko.odaki@daynix.com>
+S: Maintained
+F: docs/system/devices/igb.rst
+F: hw/net/igb*
+F: tests/avocado/igb.py
+F: tests/qtest/igb-test.c
+F: tests/qtest/libqos/igb.c
eepro100
M: Stefan Weil <sw@weilnetz.de>
@@ -2642,7 +2667,6 @@
T: git https://gitlab.com/vsementsov/qemu.git block
Compute Express Link
-M: Ben Widawsky <ben.widawsky@intel.com>
M: Jonathan Cameron <jonathan.cameron@huawei.com>
R: Fan Ni <fan.ni@samsung.com>
S: Supported
@@ -2742,9 +2766,11 @@
F: docs/system/gdb.rst
F: gdbstub/*
F: include/exec/gdbstub.h
+F: include/gdbstub/*
F: gdb-xml/
F: tests/tcg/multiarch/gdbstub/
F: scripts/feature_to_c.sh
+F: scripts/probe-gdb-support.py
Memory API
M: Paolo Bonzini <pbonzini@redhat.com>
@@ -2876,9 +2902,11 @@
Cryptodev Backends
M: Gonglei <arei.gonglei@huawei.com>
+M: zhenwei pi <pizhenwei@bytedance.com>
S: Maintained
F: include/sysemu/cryptodev*.h
F: backends/cryptodev*.c
+F: qapi/cryptodev.json
Python library
M: John Snow <jsnow@redhat.com>
@@ -3316,8 +3344,6 @@
F: roms/edk2-*
F: tests/data/uefi-boot-images/
F: tests/uefi-test-tools/
-F: .gitlab-ci.d/edk2.yml
-F: .gitlab-ci.d/edk2/
VT-d Emulation
M: Michael S. Tsirkin <mst@redhat.com>
@@ -3337,7 +3363,7 @@
Clock framework
M: Luc Michel <luc@lmichel.fr>
-R: Damien Hedde <damien.hedde@greensocs.com>
+R: Damien Hedde <damien.hedde@dahe.fr>
S: Maintained
F: include/hw/clock.h
F: include/hw/qdev-clock.h
diff --git a/VERSION b/VERSION
index d182ea1..2b20514 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-7.2.50
+7.2.91
diff --git a/accel/accel-softmmu.c b/accel/accel-softmmu.c
index f9cdafb..9c804ba 100644
--- a/accel/accel-softmmu.c
+++ b/accel/accel-softmmu.c
@@ -27,7 +27,7 @@
#include "qemu/accel.h"
#include "hw/boards.h"
#include "sysemu/cpus.h"
-
+#include "qemu/error-report.h"
#include "accel-softmmu.h"
int accel_init_machine(AccelState *accel, MachineState *ms)
diff --git a/accel/kvm/kvm-accel-ops.c b/accel/kvm/kvm-accel-ops.c
index fbf4fe3..457eafa 100644
--- a/accel/kvm/kvm-accel-ops.c
+++ b/accel/kvm/kvm-accel-ops.c
@@ -86,6 +86,13 @@
return !kvm_enabled() || kvm_cpu_check_are_resettable();
}
+#ifdef KVM_CAP_SET_GUEST_DEBUG
+static int kvm_update_guest_debug_ops(CPUState *cpu)
+{
+ return kvm_update_guest_debug(cpu, 0);
+}
+#endif
+
static void kvm_accel_ops_class_init(ObjectClass *oc, void *data)
{
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
@@ -99,6 +106,7 @@
ops->synchronize_pre_loadvm = kvm_cpu_synchronize_pre_loadvm;
#ifdef KVM_CAP_SET_GUEST_DEBUG
+ ops->update_guest_debug = kvm_update_guest_debug_ops;
ops->supports_guest_debug = kvm_supports_guest_debug;
ops->insert_breakpoint = kvm_insert_breakpoint;
ops->remove_breakpoint = kvm_remove_breakpoint;
diff --git a/accel/stubs/tcg-stub.c b/accel/stubs/tcg-stub.c
index 96af23d..813695b 100644
--- a/accel/stubs/tcg-stub.c
+++ b/accel/stubs/tcg-stub.c
@@ -11,6 +11,7 @@
*/
#include "qemu/osdep.h"
+#include "exec/tb-flush.h"
#include "exec/exec-all.h"
void tb_flush(CPUState *cpu)
diff --git a/accel/tcg/cpu-exec-common.c b/accel/tcg/cpu-exec-common.c
index c7bc8c6..e7962c9 100644
--- a/accel/tcg/cpu-exec-common.c
+++ b/accel/tcg/cpu-exec-common.c
@@ -21,6 +21,7 @@
#include "sysemu/cpus.h"
#include "sysemu/tcg.h"
#include "exec/exec-all.h"
+#include "qemu/plugin.h"
bool tcg_allowed;
@@ -65,6 +66,8 @@
{
/* Undo the setting in cpu_tb_exec. */
cpu->can_do_io = 1;
+ /* Undo any setting in generated code. */
+ qemu_plugin_disable_mem_helpers(cpu);
siglongjmp(cpu->jmp_env, 1);
}
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 56aaf58..c815f2d 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -459,6 +459,7 @@
qemu_thread_jit_execute();
ret = tcg_qemu_tb_exec(env, tb_ptr);
cpu->can_do_io = 1;
+ qemu_plugin_disable_mem_helpers(cpu);
/*
* TODO: Delay swapping back to the read-write region of the TB
* until we actually need to modify the TB. The read-only copy,
@@ -526,7 +527,6 @@
if (cc->tcg_ops->cpu_exec_exit) {
cc->tcg_ops->cpu_exec_exit(cpu);
}
- QEMU_PLUGIN_ASSERT(cpu->plugin_mem_cbs == NULL);
}
void cpu_exec_step_atomic(CPUState *cpu)
@@ -580,7 +580,6 @@
qemu_mutex_unlock_iothread();
}
assert_no_pages_locked();
- qemu_plugin_disable_mem_helpers(cpu);
}
/*
@@ -1004,7 +1003,6 @@
cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit);
- QEMU_PLUGIN_ASSERT(cpu->plugin_mem_cbs == NULL);
/* Try to align the host and virtual clocks
if the guest is in advance */
align_clocks(sc, cpu);
@@ -1029,7 +1027,6 @@
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
}
- qemu_plugin_disable_mem_helpers(cpu);
assert_no_pages_locked();
}
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
index c42a436..5efb8db 100644
--- a/accel/tcg/plugin-gen.c
+++ b/accel/tcg/plugin-gen.c
@@ -44,6 +44,7 @@
*/
#include "qemu/osdep.h"
#include "tcg/tcg.h"
+#include "tcg/tcg-temp-internal.h"
#include "tcg/tcg-op.h"
#include "exec/exec-all.h"
#include "exec/plugin-gen.h"
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
index efefa08..7246c1c 100644
--- a/accel/tcg/tb-maint.c
+++ b/accel/tcg/tb-maint.c
@@ -22,6 +22,7 @@
#include "exec/cputlb.h"
#include "exec/log.h"
#include "exec/exec-all.h"
+#include "exec/tb-flush.h"
#include "exec/translate-all.h"
#include "sysemu/tcg.h"
#include "tcg/tcg.h"
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index a5bea8f..74deb18 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -47,6 +47,7 @@
#include "exec/cputlb.h"
#include "exec/translate-all.h"
#include "exec/translator.h"
+#include "exec/tb-flush.h"
#include "qemu/bitmap.h"
#include "qemu/qemu-print.h"
#include "qemu/main-loop.h"
diff --git a/accel/xen/xen-all.c b/accel/xen/xen-all.c
index e85e4ae..00221e2 100644
--- a/accel/xen/xen-all.c
+++ b/accel/xen/xen-all.c
@@ -12,6 +12,7 @@
#include "qemu/error-report.h"
#include "qemu/module.h"
#include "qapi/error.h"
+#include "hw/xen/xen_native.h"
#include "hw/xen/xen-legacy-backend.h"
#include "hw/xen/xen_pt.h"
#include "chardev/char.h"
@@ -29,73 +30,15 @@
xenforeignmemory_handle *xen_fmem;
xendevicemodel_handle *xen_dmod;
-static int store_dev_info(int domid, Chardev *cs, const char *string)
+static void xenstore_record_dm_state(const char *state)
{
- struct xs_handle *xs = NULL;
- char *path = NULL;
- char *newpath = NULL;
- char *pts = NULL;
- int ret = -1;
-
- /* Only continue if we're talking to a pty. */
- if (!CHARDEV_IS_PTY(cs)) {
- return 0;
- }
- pts = cs->filename + 4;
+ struct xs_handle *xs;
+ char path[50];
/* We now have everything we need to set the xenstore entry. */
xs = xs_open(0);
if (xs == NULL) {
fprintf(stderr, "Could not contact XenStore\n");
- goto out;
- }
-
- path = xs_get_domain_path(xs, domid);
- if (path == NULL) {
- fprintf(stderr, "xs_get_domain_path() error\n");
- goto out;
- }
- newpath = realloc(path, (strlen(path) + strlen(string) +
- strlen("/tty") + 1));
- if (newpath == NULL) {
- fprintf(stderr, "realloc error\n");
- goto out;
- }
- path = newpath;
-
- strcat(path, string);
- strcat(path, "/tty");
- if (!xs_write(xs, XBT_NULL, path, pts, strlen(pts))) {
- fprintf(stderr, "xs_write for '%s' fail", string);
- goto out;
- }
- ret = 0;
-
-out:
- free(path);
- xs_close(xs);
-
- return ret;
-}
-
-void xenstore_store_pv_console_info(int i, Chardev *chr)
-{
- if (i == 0) {
- store_dev_info(xen_domid, chr, "/console");
- } else {
- char buf[32];
- snprintf(buf, sizeof(buf), "/device/console/%d", i);
- store_dev_info(xen_domid, chr, buf);
- }
-}
-
-
-static void xenstore_record_dm_state(struct xs_handle *xs, const char *state)
-{
- char path[50];
-
- if (xs == NULL) {
- error_report("xenstore connection not initialized");
exit(1);
}
@@ -109,6 +52,8 @@
error_report("error recording dm state");
exit(1);
}
+
+ xs_close(xs);
}
@@ -117,7 +62,7 @@
{
if (running) {
/* record state running */
- xenstore_record_dm_state(xenstore, "running");
+ xenstore_record_dm_state("running");
}
}
diff --git a/audio/audio_int.h b/audio/audio_int.h
index d51d63f..e57ff50 100644
--- a/audio/audio_int.h
+++ b/audio/audio_int.h
@@ -143,7 +143,7 @@
void *(*init) (Audiodev *);
void (*fini) (void *);
#ifdef CONFIG_GIO
- void (*set_dbus_server) (AudioState *s, GDBusObjectManagerServer *manager);
+ void (*set_dbus_server) (AudioState *s, GDBusObjectManagerServer *manager, bool p2p);
#endif
struct audio_pcm_ops *pcm_ops;
int can_be_default;
diff --git a/audio/dbusaudio.c b/audio/dbusaudio.c
index 722df03..fece74f 100644
--- a/audio/dbusaudio.c
+++ b/audio/dbusaudio.c
@@ -43,6 +43,7 @@
typedef struct DBusAudio {
GDBusObjectManagerServer *server;
+ bool p2p;
GDBusObjectSkeleton *audio;
QemuDBusDisplay1Audio *iface;
GHashTable *out_listeners;
@@ -448,7 +449,8 @@
bool out)
{
DBusAudio *da = s->drv_opaque;
- const char *sender = g_dbus_method_invocation_get_sender(invocation);
+ const char *sender =
+ da->p2p ? "p2p" : g_dbus_method_invocation_get_sender(invocation);
g_autoptr(GDBusConnection) listener_conn = NULL;
g_autoptr(GError) err = NULL;
g_autoptr(GSocket) socket = NULL;
@@ -591,7 +593,7 @@
}
static void
-dbus_audio_set_server(AudioState *s, GDBusObjectManagerServer *server)
+dbus_audio_set_server(AudioState *s, GDBusObjectManagerServer *server, bool p2p)
{
DBusAudio *da = s->drv_opaque;
@@ -599,6 +601,7 @@
g_assert(!da->server);
da->server = g_object_ref(server);
+ da->p2p = p2p;
da->audio = g_dbus_object_skeleton_new(DBUS_DISPLAY1_AUDIO_PATH);
da->iface = qemu_dbus_display1_audio_skeleton_new();
diff --git a/backends/cryptodev-builtin.c b/backends/cryptodev-builtin.c
index cda6ca3..39d0455 100644
--- a/backends/cryptodev-builtin.c
+++ b/backends/cryptodev-builtin.c
@@ -59,6 +59,19 @@
CryptoDevBackendBuiltinSession *sessions[MAX_NUM_SESSIONS];
};
+static void cryptodev_builtin_init_akcipher(CryptoDevBackend *backend)
+{
+ QCryptoAkCipherOptions opts;
+
+ opts.alg = QCRYPTO_AKCIPHER_ALG_RSA;
+ opts.u.rsa.padding_alg = QCRYPTO_RSA_PADDING_ALG_RAW;
+ if (qcrypto_akcipher_supports(&opts)) {
+ backend->conf.crypto_services |=
+ (1u << QCRYPTODEV_BACKEND_SERVICE_AKCIPHER);
+ backend->conf.akcipher_algo = 1u << VIRTIO_CRYPTO_AKCIPHER_RSA;
+ }
+}
+
static void cryptodev_builtin_init(
CryptoDevBackend *backend, Error **errp)
{
@@ -72,21 +85,18 @@
return;
}
- cc = cryptodev_backend_new_client(
- "cryptodev-builtin", NULL);
+ cc = cryptodev_backend_new_client();
cc->info_str = g_strdup_printf("cryptodev-builtin0");
cc->queue_index = 0;
- cc->type = CRYPTODEV_BACKEND_TYPE_BUILTIN;
+ cc->type = QCRYPTODEV_BACKEND_TYPE_BUILTIN;
backend->conf.peers.ccs[0] = cc;
backend->conf.crypto_services =
- 1u << VIRTIO_CRYPTO_SERVICE_CIPHER |
- 1u << VIRTIO_CRYPTO_SERVICE_HASH |
- 1u << VIRTIO_CRYPTO_SERVICE_MAC |
- 1u << VIRTIO_CRYPTO_SERVICE_AKCIPHER;
+ 1u << QCRYPTODEV_BACKEND_SERVICE_CIPHER |
+ 1u << QCRYPTODEV_BACKEND_SERVICE_HASH |
+ 1u << QCRYPTODEV_BACKEND_SERVICE_MAC;
backend->conf.cipher_algo_l = 1u << VIRTIO_CRYPTO_CIPHER_AES_CBC;
backend->conf.hash_algo = 1u << VIRTIO_CRYPTO_HASH_SHA1;
- backend->conf.akcipher_algo = 1u << VIRTIO_CRYPTO_AKCIPHER_RSA;
/*
* Set the Maximum length of crypto request.
* Why this value? Just avoid to overflow when
@@ -95,6 +105,7 @@
backend->conf.max_size = LONG_MAX - sizeof(CryptoDevBackendOpInfo);
backend->conf.max_cipher_key_len = CRYPTODEV_BUITLIN_MAX_CIPHER_KEY_LEN;
backend->conf.max_auth_key_len = CRYPTODEV_BUITLIN_MAX_AUTH_KEY_LEN;
+ cryptodev_builtin_init_akcipher(backend);
cryptodev_backend_set_ready(backend, true);
}
@@ -528,17 +539,14 @@
static int cryptodev_builtin_operation(
CryptoDevBackend *backend,
- CryptoDevBackendOpInfo *op_info,
- uint32_t queue_index,
- CryptoDevCompletionFunc cb,
- void *opaque)
+ CryptoDevBackendOpInfo *op_info)
{
CryptoDevBackendBuiltin *builtin =
CRYPTODEV_BACKEND_BUILTIN(backend);
CryptoDevBackendBuiltinSession *sess;
CryptoDevBackendSymOpInfo *sym_op_info;
CryptoDevBackendAsymOpInfo *asym_op_info;
- enum CryptoDevBackendAlgType algtype = op_info->algtype;
+ QCryptodevBackendAlgType algtype = op_info->algtype;
int status = -VIRTIO_CRYPTO_ERR;
Error *local_error = NULL;
@@ -550,11 +558,11 @@
}
sess = builtin->sessions[op_info->session_id];
- if (algtype == CRYPTODEV_BACKEND_ALG_SYM) {
+ if (algtype == QCRYPTODEV_BACKEND_ALG_SYM) {
sym_op_info = op_info->u.sym_op_info;
status = cryptodev_builtin_sym_operation(sess, sym_op_info,
&local_error);
- } else if (algtype == CRYPTODEV_BACKEND_ALG_ASYM) {
+ } else if (algtype == QCRYPTODEV_BACKEND_ALG_ASYM) {
asym_op_info = op_info->u.asym_op_info;
status = cryptodev_builtin_asym_operation(sess, op_info->op_code,
asym_op_info, &local_error);
@@ -563,8 +571,8 @@
if (local_error) {
error_report_err(local_error);
}
- if (cb) {
- cb(opaque, status);
+ if (op_info->cb) {
+ op_info->cb(op_info->opaque, status);
}
return 0;
}
diff --git a/backends/cryptodev-hmp-cmds.c b/backends/cryptodev-hmp-cmds.c
new file mode 100644
index 0000000..4f7220b
--- /dev/null
+++ b/backends/cryptodev-hmp-cmds.c
@@ -0,0 +1,54 @@
+/*
+ * HMP commands related to cryptodev
+ *
+ * Copyright (c) 2023 Bytedance.Inc
+ *
+ * Authors:
+ * zhenwei pi<pizhenwei@bytedance.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version.
+ */
+
+#include "qemu/osdep.h"
+#include "monitor/hmp.h"
+#include "monitor/monitor.h"
+#include "qapi/qapi-commands-cryptodev.h"
+#include "qapi/qmp/qdict.h"
+
+
+void hmp_info_cryptodev(Monitor *mon, const QDict *qdict)
+{
+ QCryptodevInfoList *il;
+ QCryptodevBackendServiceTypeList *sl;
+ QCryptodevBackendClientList *cl;
+
+ for (il = qmp_query_cryptodev(NULL); il; il = il->next) {
+ g_autofree char *services = NULL;
+ QCryptodevInfo *info = il->value;
+ char *tmp_services;
+
+ /* build a string like 'service=[akcipher|mac|hash|cipher]' */
+ for (sl = info->service; sl; sl = sl->next) {
+ const char *service = QCryptodevBackendServiceType_str(sl->value);
+
+ if (!services) {
+ services = g_strdup(service);
+ } else {
+ tmp_services = g_strjoin("|", services, service, NULL);
+ g_free(services);
+ services = tmp_services;
+ }
+ }
+ monitor_printf(mon, "%s: service=[%s]\n", info->id, services);
+
+ for (cl = info->client; cl; cl = cl->next) {
+ QCryptodevBackendClient *client = cl->value;
+ monitor_printf(mon, " queue %" PRIu32 ": type=%s\n",
+ client->queue,
+ QCryptodevBackendType_str(client->type));
+ }
+ }
+
+ qapi_free_QCryptodevInfoList(il);
+}
diff --git a/backends/cryptodev-lkcf.c b/backends/cryptodev-lkcf.c
index 133bd70..45aba1f 100644
--- a/backends/cryptodev-lkcf.c
+++ b/backends/cryptodev-lkcf.c
@@ -223,14 +223,14 @@
return;
}
- cc = cryptodev_backend_new_client("cryptodev-lkcf", NULL);
+ cc = cryptodev_backend_new_client();
cc->info_str = g_strdup_printf("cryptodev-lkcf0");
cc->queue_index = 0;
- cc->type = CRYPTODEV_BACKEND_TYPE_LKCF;
+ cc->type = QCRYPTODEV_BACKEND_TYPE_LKCF;
backend->conf.peers.ccs[0] = cc;
backend->conf.crypto_services =
- 1u << VIRTIO_CRYPTO_SERVICE_AKCIPHER;
+ 1u << QCRYPTODEV_BACKEND_SERVICE_AKCIPHER;
backend->conf.akcipher_algo = 1u << VIRTIO_CRYPTO_AKCIPHER_RSA;
lkcf->running = true;
@@ -469,15 +469,12 @@
static int cryptodev_lkcf_operation(
CryptoDevBackend *backend,
- CryptoDevBackendOpInfo *op_info,
- uint32_t queue_index,
- CryptoDevCompletionFunc cb,
- void *opaque)
+ CryptoDevBackendOpInfo *op_info)
{
CryptoDevBackendLKCF *lkcf =
CRYPTODEV_BACKEND_LKCF(backend);
CryptoDevBackendLKCFSession *sess;
- enum CryptoDevBackendAlgType algtype = op_info->algtype;
+ QCryptodevBackendAlgType algtype = op_info->algtype;
CryptoDevLKCFTask *task;
if (op_info->session_id >= MAX_SESSIONS ||
@@ -488,15 +485,15 @@
}
sess = lkcf->sess[op_info->session_id];
- if (algtype != CRYPTODEV_BACKEND_ALG_ASYM) {
+ if (algtype != QCRYPTODEV_BACKEND_ALG_ASYM) {
error_report("algtype not supported: %u", algtype);
return -VIRTIO_CRYPTO_NOTSUPP;
}
task = g_new0(CryptoDevLKCFTask, 1);
task->op_info = op_info;
- task->cb = cb;
- task->opaque = opaque;
+ task->cb = op_info->cb;
+ task->opaque = op_info->opaque;
task->sess = sess;
task->lkcf = lkcf;
task->status = -VIRTIO_CRYPTO_ERR;
diff --git a/backends/cryptodev-vhost-user.c b/backends/cryptodev-vhost-user.c
index ab3028e..b1d9eb7 100644
--- a/backends/cryptodev-vhost-user.c
+++ b/backends/cryptodev-vhost-user.c
@@ -67,7 +67,7 @@
{
CryptoDevBackendVhostUser *s =
CRYPTODEV_BACKEND_VHOST_USER(b);
- assert(cc->type == CRYPTODEV_BACKEND_TYPE_VHOST_USER);
+ assert(cc->type == QCRYPTODEV_BACKEND_TYPE_VHOST_USER);
assert(queue < MAX_CRYPTO_QUEUE_NUM);
return s->vhost_crypto[queue];
@@ -198,12 +198,11 @@
s->opened = true;
for (i = 0; i < queues; i++) {
- cc = cryptodev_backend_new_client(
- "cryptodev-vhost-user", NULL);
+ cc = cryptodev_backend_new_client();
cc->info_str = g_strdup_printf("cryptodev-vhost-user%zu to %s ",
i, chr->label);
cc->queue_index = i;
- cc->type = CRYPTODEV_BACKEND_TYPE_VHOST_USER;
+ cc->type = QCRYPTODEV_BACKEND_TYPE_VHOST_USER;
backend->conf.peers.ccs[i] = cc;
@@ -222,9 +221,9 @@
cryptodev_vhost_user_event, NULL, s, NULL, true);
backend->conf.crypto_services =
- 1u << VIRTIO_CRYPTO_SERVICE_CIPHER |
- 1u << VIRTIO_CRYPTO_SERVICE_HASH |
- 1u << VIRTIO_CRYPTO_SERVICE_MAC;
+ 1u << QCRYPTODEV_BACKEND_SERVICE_CIPHER |
+ 1u << QCRYPTODEV_BACKEND_SERVICE_HASH |
+ 1u << QCRYPTODEV_BACKEND_SERVICE_MAC;
backend->conf.cipher_algo_l = 1u << VIRTIO_CRYPTO_CIPHER_AES_CBC;
backend->conf.hash_algo = 1u << VIRTIO_CRYPTO_HASH_SHA1;
diff --git a/backends/cryptodev-vhost.c b/backends/cryptodev-vhost.c
index 74ea0ad..9352373 100644
--- a/backends/cryptodev-vhost.c
+++ b/backends/cryptodev-vhost.c
@@ -127,7 +127,7 @@
switch (cc->type) {
#if defined(CONFIG_VHOST_USER) && defined(CONFIG_LINUX)
- case CRYPTODEV_BACKEND_TYPE_VHOST_USER:
+ case QCRYPTODEV_BACKEND_TYPE_VHOST_USER:
vhost_crypto = cryptodev_vhost_user_get_vhost(cc, b, queue);
break;
#endif
@@ -195,7 +195,7 @@
* because vhost user doesn't interrupt masking/unmasking
* properly.
*/
- if (cc->type == CRYPTODEV_BACKEND_TYPE_VHOST_USER) {
+ if (cc->type == QCRYPTODEV_BACKEND_TYPE_VHOST_USER) {
dev->use_guest_notifier_mask = false;
}
}
diff --git a/backends/cryptodev.c b/backends/cryptodev.c
index 54ee8c8..94ca393 100644
--- a/backends/cryptodev.c
+++ b/backends/cryptodev.c
@@ -23,29 +23,92 @@
#include "qemu/osdep.h"
#include "sysemu/cryptodev.h"
+#include "sysemu/stats.h"
#include "qapi/error.h"
+#include "qapi/qapi-commands-cryptodev.h"
+#include "qapi/qapi-types-stats.h"
#include "qapi/visitor.h"
#include "qemu/config-file.h"
#include "qemu/error-report.h"
+#include "qemu/main-loop.h"
#include "qom/object_interfaces.h"
#include "hw/virtio/virtio-crypto.h"
+#define SYM_ENCRYPT_OPS_STR "sym-encrypt-ops"
+#define SYM_DECRYPT_OPS_STR "sym-decrypt-ops"
+#define SYM_ENCRYPT_BYTES_STR "sym-encrypt-bytes"
+#define SYM_DECRYPT_BYTES_STR "sym-decrypt-bytes"
+
+#define ASYM_ENCRYPT_OPS_STR "asym-encrypt-ops"
+#define ASYM_DECRYPT_OPS_STR "asym-decrypt-ops"
+#define ASYM_SIGN_OPS_STR "asym-sign-ops"
+#define ASYM_VERIFY_OPS_STR "asym-verify-ops"
+#define ASYM_ENCRYPT_BYTES_STR "asym-encrypt-bytes"
+#define ASYM_DECRYPT_BYTES_STR "asym-decrypt-bytes"
+#define ASYM_SIGN_BYTES_STR "asym-sign-bytes"
+#define ASYM_VERIFY_BYTES_STR "asym-verify-bytes"
+
+typedef struct StatsArgs {
+ union StatsResultsType {
+ StatsResultList **stats;
+ StatsSchemaList **schema;
+ } result;
+ strList *names;
+ Error **errp;
+} StatsArgs;
static QTAILQ_HEAD(, CryptoDevBackendClient) crypto_clients;
+static int qmp_query_cryptodev_foreach(Object *obj, void *data)
+{
+ CryptoDevBackend *backend;
+ QCryptodevInfoList **infolist = data;
+ uint32_t services, i;
-CryptoDevBackendClient *
-cryptodev_backend_new_client(const char *model,
- const char *name)
+ if (!object_dynamic_cast(obj, TYPE_CRYPTODEV_BACKEND)) {
+ return 0;
+ }
+
+ QCryptodevInfo *info = g_new0(QCryptodevInfo, 1);
+ info->id = g_strdup(object_get_canonical_path_component(obj));
+
+ backend = CRYPTODEV_BACKEND(obj);
+ services = backend->conf.crypto_services;
+ for (i = 0; i < QCRYPTODEV_BACKEND_SERVICE__MAX; i++) {
+ if (services & (1 << i)) {
+ QAPI_LIST_PREPEND(info->service, i);
+ }
+ }
+
+ for (i = 0; i < backend->conf.peers.queues; i++) {
+ CryptoDevBackendClient *cc = backend->conf.peers.ccs[i];
+ QCryptodevBackendClient *client = g_new0(QCryptodevBackendClient, 1);
+
+ client->queue = cc->queue_index;
+ client->type = cc->type;
+ QAPI_LIST_PREPEND(info->client, client);
+ }
+
+ QAPI_LIST_PREPEND(*infolist, info);
+
+ return 0;
+}
+
+QCryptodevInfoList *qmp_query_cryptodev(Error **errp)
+{
+ QCryptodevInfoList *list = NULL;
+ Object *objs = container_get(object_get_root(), "/objects");
+
+ object_child_foreach(objs, qmp_query_cryptodev_foreach, &list);
+
+ return list;
+}
+
+CryptoDevBackendClient *cryptodev_backend_new_client(void)
{
CryptoDevBackendClient *cc;
cc = g_new0(CryptoDevBackendClient, 1);
- cc->model = g_strdup(model);
- if (name) {
- cc->name = g_strdup(name);
- }
-
QTAILQ_INSERT_TAIL(&crypto_clients, cc, next);
return cc;
@@ -55,8 +118,6 @@
CryptoDevBackendClient *cc)
{
QTAILQ_REMOVE(&crypto_clients, cc, next);
- g_free(cc->name);
- g_free(cc->model);
g_free(cc->info_str);
g_free(cc);
}
@@ -71,6 +132,9 @@
if (bc->cleanup) {
bc->cleanup(backend, errp);
}
+
+ g_free(backend->sym_stat);
+ g_free(backend->asym_stat);
}
int cryptodev_backend_create_session(
@@ -107,38 +171,111 @@
static int cryptodev_backend_operation(
CryptoDevBackend *backend,
- CryptoDevBackendOpInfo *op_info,
- uint32_t queue_index,
- CryptoDevCompletionFunc cb,
- void *opaque)
+ CryptoDevBackendOpInfo *op_info)
{
CryptoDevBackendClass *bc =
CRYPTODEV_BACKEND_GET_CLASS(backend);
if (bc->do_op) {
- return bc->do_op(backend, op_info, queue_index, cb, opaque);
+ return bc->do_op(backend, op_info);
}
return -VIRTIO_CRYPTO_NOTSUPP;
}
-int cryptodev_backend_crypto_operation(
- CryptoDevBackend *backend,
- void *opaque1,
- uint32_t queue_index,
- CryptoDevCompletionFunc cb, void *opaque2)
+static int cryptodev_backend_account(CryptoDevBackend *backend,
+ CryptoDevBackendOpInfo *op_info)
{
- VirtIOCryptoReq *req = opaque1;
- CryptoDevBackendOpInfo *op_info = &req->op_info;
- enum CryptoDevBackendAlgType algtype = req->flags;
+ enum QCryptodevBackendAlgType algtype = op_info->algtype;
+ int len;
- if ((algtype != CRYPTODEV_BACKEND_ALG_SYM)
- && (algtype != CRYPTODEV_BACKEND_ALG_ASYM)) {
+ if (algtype == QCRYPTODEV_BACKEND_ALG_ASYM) {
+ CryptoDevBackendAsymOpInfo *asym_op_info = op_info->u.asym_op_info;
+ len = asym_op_info->src_len;
+ switch (op_info->op_code) {
+ case VIRTIO_CRYPTO_AKCIPHER_ENCRYPT:
+ CryptodevAsymStatIncEncrypt(backend, len);
+ break;
+ case VIRTIO_CRYPTO_AKCIPHER_DECRYPT:
+ CryptodevAsymStatIncDecrypt(backend, len);
+ break;
+ case VIRTIO_CRYPTO_AKCIPHER_SIGN:
+ CryptodevAsymStatIncSign(backend, len);
+ break;
+ case VIRTIO_CRYPTO_AKCIPHER_VERIFY:
+ CryptodevAsymStatIncVerify(backend, len);
+ break;
+ default:
+ return -VIRTIO_CRYPTO_NOTSUPP;
+ }
+ } else if (algtype == QCRYPTODEV_BACKEND_ALG_SYM) {
+ CryptoDevBackendSymOpInfo *sym_op_info = op_info->u.sym_op_info;
+ len = sym_op_info->src_len;
+ switch (op_info->op_code) {
+ case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
+ CryptodevSymStatIncEncrypt(backend, len);
+ break;
+ case VIRTIO_CRYPTO_CIPHER_DECRYPT:
+ CryptodevSymStatIncDecrypt(backend, len);
+ break;
+ default:
+ return -VIRTIO_CRYPTO_NOTSUPP;
+ }
+ } else {
error_report("Unsupported cryptodev alg type: %" PRIu32 "", algtype);
return -VIRTIO_CRYPTO_NOTSUPP;
}
- return cryptodev_backend_operation(backend, op_info, queue_index,
- cb, opaque2);
+ return len;
+}
+
+static void cryptodev_backend_throttle_timer_cb(void *opaque)
+{
+ CryptoDevBackend *backend = (CryptoDevBackend *)opaque;
+ CryptoDevBackendOpInfo *op_info, *tmpop;
+ int ret;
+
+ QTAILQ_FOREACH_SAFE(op_info, &backend->opinfos, next, tmpop) {
+ QTAILQ_REMOVE(&backend->opinfos, op_info, next);
+ ret = cryptodev_backend_account(backend, op_info);
+ if (ret < 0) {
+ op_info->cb(op_info->opaque, ret);
+ continue;
+ }
+
+ throttle_account(&backend->ts, true, ret);
+ cryptodev_backend_operation(backend, op_info);
+ if (throttle_enabled(&backend->tc) &&
+ throttle_schedule_timer(&backend->ts, &backend->tt, true)) {
+ break;
+ }
+ }
+}
+
+int cryptodev_backend_crypto_operation(
+ CryptoDevBackend *backend,
+ CryptoDevBackendOpInfo *op_info)
+{
+ int ret;
+
+ if (!throttle_enabled(&backend->tc)) {
+ goto do_account;
+ }
+
+ if (throttle_schedule_timer(&backend->ts, &backend->tt, true) ||
+ !QTAILQ_EMPTY(&backend->opinfos)) {
+ QTAILQ_INSERT_TAIL(&backend->opinfos, op_info, next);
+ return 0;
+ }
+
+do_account:
+ ret = cryptodev_backend_account(backend, op_info);
+ if (ret < 0) {
+ return ret;
+ }
+
+ throttle_account(&backend->ts, true, ret);
+
+ return cryptodev_backend_operation(backend, op_info);
}
static void
@@ -169,15 +306,111 @@
backend->conf.peers.queues = value;
}
+static void cryptodev_backend_set_throttle(CryptoDevBackend *backend, int field,
+ uint64_t value, Error **errp)
+{
+ uint64_t orig = backend->tc.buckets[field].avg;
+ bool enabled = throttle_enabled(&backend->tc);
+
+ if (orig == value) {
+ return;
+ }
+
+ backend->tc.buckets[field].avg = value;
+ if (!throttle_enabled(&backend->tc)) {
+ throttle_timers_destroy(&backend->tt);
+ cryptodev_backend_throttle_timer_cb(backend); /* drain opinfos */
+ return;
+ }
+
+ if (!throttle_is_valid(&backend->tc, errp)) {
+ backend->tc.buckets[field].avg = orig; /* revert change */
+ return;
+ }
+
+ if (!enabled) {
+ throttle_init(&backend->ts);
+ throttle_timers_init(&backend->tt, qemu_get_aio_context(),
+ QEMU_CLOCK_REALTIME,
+ cryptodev_backend_throttle_timer_cb, /* FIXME */
+ cryptodev_backend_throttle_timer_cb, backend);
+ }
+
+ throttle_config(&backend->ts, QEMU_CLOCK_REALTIME, &backend->tc);
+}
+
+static void cryptodev_backend_get_bps(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ CryptoDevBackend *backend = CRYPTODEV_BACKEND(obj);
+ uint64_t value = backend->tc.buckets[THROTTLE_BPS_TOTAL].avg;
+
+ visit_type_uint64(v, name, &value, errp);
+}
+
+static void cryptodev_backend_set_bps(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ CryptoDevBackend *backend = CRYPTODEV_BACKEND(obj);
+ uint64_t value;
+
+ if (!visit_type_uint64(v, name, &value, errp)) {
+ return;
+ }
+
+ cryptodev_backend_set_throttle(backend, THROTTLE_BPS_TOTAL, value, errp);
+}
+
+static void cryptodev_backend_get_ops(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ CryptoDevBackend *backend = CRYPTODEV_BACKEND(obj);
+ uint64_t value = backend->tc.buckets[THROTTLE_OPS_TOTAL].avg;
+
+ visit_type_uint64(v, name, &value, errp);
+}
+
+static void cryptodev_backend_set_ops(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ CryptoDevBackend *backend = CRYPTODEV_BACKEND(obj);
+ uint64_t value;
+
+ if (!visit_type_uint64(v, name, &value, errp)) {
+ return;
+ }
+
+ cryptodev_backend_set_throttle(backend, THROTTLE_OPS_TOTAL, value, errp);
+}
+
static void
cryptodev_backend_complete(UserCreatable *uc, Error **errp)
{
CryptoDevBackend *backend = CRYPTODEV_BACKEND(uc);
CryptoDevBackendClass *bc = CRYPTODEV_BACKEND_GET_CLASS(uc);
+ uint32_t services;
+ uint64_t value;
+
+ QTAILQ_INIT(&backend->opinfos);
+ value = backend->tc.buckets[THROTTLE_OPS_TOTAL].avg;
+ cryptodev_backend_set_throttle(backend, THROTTLE_OPS_TOTAL, value, errp);
+ value = backend->tc.buckets[THROTTLE_BPS_TOTAL].avg;
+ cryptodev_backend_set_throttle(backend, THROTTLE_BPS_TOTAL, value, errp);
if (bc->init) {
bc->init(backend, errp);
}
+
+ services = backend->conf.crypto_services;
+ if (services & (1 << QCRYPTODEV_BACKEND_SERVICE_CIPHER)) {
+ backend->sym_stat = g_new0(CryptodevBackendSymStat, 1);
+ }
+
+ if (services & (1 << QCRYPTODEV_BACKEND_SERVICE_AKCIPHER)) {
+ backend->asym_stat = g_new0(CryptodevBackendAsymStat, 1);
+ }
}
void cryptodev_backend_set_used(CryptoDevBackend *backend, bool used)
@@ -208,8 +441,12 @@
static void cryptodev_backend_instance_init(Object *obj)
{
+ CryptoDevBackend *backend = CRYPTODEV_BACKEND(obj);
+
/* Initialize devices' queues property to 1 */
object_property_set_int(obj, "queues", 1, NULL);
+
+ throttle_config_init(&backend->tc);
}
static void cryptodev_backend_finalize(Object *obj)
@@ -217,6 +454,137 @@
CryptoDevBackend *backend = CRYPTODEV_BACKEND(obj);
cryptodev_backend_cleanup(backend, NULL);
+ if (throttle_enabled(&backend->tc)) {
+ throttle_timers_destroy(&backend->tt);
+ }
+}
+
+static StatsList *cryptodev_backend_stats_add(const char *name, int64_t *val,
+ StatsList *stats_list)
+{
+ Stats *stats = g_new0(Stats, 1);
+
+ stats->name = g_strdup(name);
+ stats->value = g_new0(StatsValue, 1);
+ stats->value->type = QTYPE_QNUM;
+ stats->value->u.scalar = *val;
+
+ QAPI_LIST_PREPEND(stats_list, stats);
+ return stats_list;
+}
+
+static int cryptodev_backend_stats_query(Object *obj, void *data)
+{
+ StatsArgs *stats_args = data;
+ StatsResultList **stats_results = stats_args->result.stats;
+ StatsList *stats_list = NULL;
+ StatsResult *entry;
+ CryptoDevBackend *backend;
+ CryptodevBackendSymStat *sym_stat;
+ CryptodevBackendAsymStat *asym_stat;
+
+ if (!object_dynamic_cast(obj, TYPE_CRYPTODEV_BACKEND)) {
+ return 0;
+ }
+
+ backend = CRYPTODEV_BACKEND(obj);
+ sym_stat = backend->sym_stat;
+ if (sym_stat) {
+ stats_list = cryptodev_backend_stats_add(SYM_ENCRYPT_OPS_STR,
+ &sym_stat->encrypt_ops, stats_list);
+ stats_list = cryptodev_backend_stats_add(SYM_DECRYPT_OPS_STR,
+ &sym_stat->decrypt_ops, stats_list);
+ stats_list = cryptodev_backend_stats_add(SYM_ENCRYPT_BYTES_STR,
+ &sym_stat->encrypt_bytes, stats_list);
+ stats_list = cryptodev_backend_stats_add(SYM_DECRYPT_BYTES_STR,
+ &sym_stat->decrypt_bytes, stats_list);
+ }
+
+ asym_stat = backend->asym_stat;
+ if (asym_stat) {
+ stats_list = cryptodev_backend_stats_add(ASYM_ENCRYPT_OPS_STR,
+ &asym_stat->encrypt_ops, stats_list);
+ stats_list = cryptodev_backend_stats_add(ASYM_DECRYPT_OPS_STR,
+ &asym_stat->decrypt_ops, stats_list);
+ stats_list = cryptodev_backend_stats_add(ASYM_SIGN_OPS_STR,
+ &asym_stat->sign_ops, stats_list);
+ stats_list = cryptodev_backend_stats_add(ASYM_VERIFY_OPS_STR,
+ &asym_stat->verify_ops, stats_list);
+ stats_list = cryptodev_backend_stats_add(ASYM_ENCRYPT_BYTES_STR,
+ &asym_stat->encrypt_bytes, stats_list);
+ stats_list = cryptodev_backend_stats_add(ASYM_DECRYPT_BYTES_STR,
+ &asym_stat->decrypt_bytes, stats_list);
+ stats_list = cryptodev_backend_stats_add(ASYM_SIGN_BYTES_STR,
+ &asym_stat->sign_bytes, stats_list);
+ stats_list = cryptodev_backend_stats_add(ASYM_VERIFY_BYTES_STR,
+ &asym_stat->verify_bytes, stats_list);
+ }
+
+ entry = g_new0(StatsResult, 1);
+ entry->provider = STATS_PROVIDER_CRYPTODEV;
+ entry->qom_path = g_strdup(object_get_canonical_path(obj));
+ entry->stats = stats_list;
+ QAPI_LIST_PREPEND(*stats_results, entry);
+
+ return 0;
+}
+
+static void cryptodev_backend_stats_cb(StatsResultList **result,
+ StatsTarget target,
+ strList *names, strList *targets,
+ Error **errp)
+{
+ switch (target) {
+ case STATS_TARGET_CRYPTODEV:
+ {
+ Object *objs = container_get(object_get_root(), "/objects");
+ StatsArgs stats_args;
+ stats_args.result.stats = result;
+ stats_args.names = names;
+ stats_args.errp = errp;
+
+ object_child_foreach(objs, cryptodev_backend_stats_query, &stats_args);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+static StatsSchemaValueList *cryptodev_backend_schemas_add(const char *name,
+ StatsSchemaValueList *list)
+{
+ StatsSchemaValueList *schema_entry = g_new0(StatsSchemaValueList, 1);
+
+ schema_entry->value = g_new0(StatsSchemaValue, 1);
+ schema_entry->value->type = STATS_TYPE_CUMULATIVE;
+ schema_entry->value->name = g_strdup(name);
+ schema_entry->next = list;
+
+ return schema_entry;
+}
+
+static void cryptodev_backend_schemas_cb(StatsSchemaList **result,
+ Error **errp)
+{
+ StatsSchemaValueList *stats_list = NULL;
+ const char *sym_stats[] = { SYM_ENCRYPT_OPS_STR, SYM_DECRYPT_OPS_STR,
+ SYM_ENCRYPT_BYTES_STR, SYM_DECRYPT_BYTES_STR };
+ const char *asym_stats[] = { ASYM_ENCRYPT_OPS_STR, ASYM_DECRYPT_OPS_STR,
+ ASYM_SIGN_OPS_STR, ASYM_VERIFY_OPS_STR,
+ ASYM_ENCRYPT_BYTES_STR, ASYM_DECRYPT_BYTES_STR,
+ ASYM_SIGN_BYTES_STR, ASYM_VERIFY_BYTES_STR };
+
+ for (int i = 0; i < ARRAY_SIZE(sym_stats); i++) {
+ stats_list = cryptodev_backend_schemas_add(sym_stats[i], stats_list);
+ }
+
+ for (int i = 0; i < ARRAY_SIZE(asym_stats); i++) {
+ stats_list = cryptodev_backend_schemas_add(asym_stats[i], stats_list);
+ }
+
+ add_stats_schema(result, STATS_PROVIDER_CRYPTODEV, STATS_TARGET_CRYPTODEV,
+ stats_list);
}
static void
@@ -232,6 +600,17 @@
cryptodev_backend_get_queues,
cryptodev_backend_set_queues,
NULL, NULL);
+ object_class_property_add(oc, "throttle-bps", "uint64",
+ cryptodev_backend_get_bps,
+ cryptodev_backend_set_bps,
+ NULL, NULL);
+ object_class_property_add(oc, "throttle-ops", "uint64",
+ cryptodev_backend_get_ops,
+ cryptodev_backend_set_ops,
+ NULL, NULL);
+
+ add_stats_callbacks(STATS_PROVIDER_CRYPTODEV, cryptodev_backend_stats_cb,
+ cryptodev_backend_schemas_cb);
}
static const TypeInfo cryptodev_backend_info = {
diff --git a/backends/meson.build b/backends/meson.build
index 954e658..b369e0a 100644
--- a/backends/meson.build
+++ b/backends/meson.build
@@ -1,5 +1,6 @@
softmmu_ss.add([files(
'cryptodev-builtin.c',
+ 'cryptodev-hmp-cmds.c',
'cryptodev.c',
'hostmem-ram.c',
'hostmem.c',
diff --git a/backends/tpm/tpm_emulator.c b/backends/tpm/tpm_emulator.c
index d18144b..402a2d6 100644
--- a/backends/tpm/tpm_emulator.c
+++ b/backends/tpm/tpm_emulator.c
@@ -573,13 +573,13 @@
goto err_exit;
}
- closesocket(fds[1]);
+ close(fds[1]);
return 0;
err_exit:
- closesocket(fds[0]);
- closesocket(fds[1]);
+ close(fds[0]);
+ close(fds[1]);
return -1;
}
diff --git a/block/export/fuse.c b/block/export/fuse.c
index e5fc4af..06fa410 100644
--- a/block/export/fuse.c
+++ b/block/export/fuse.c
@@ -673,7 +673,16 @@
do {
int size = MIN(length, BDRV_REQUEST_MAX_BYTES);
- ret = blk_pdiscard(exp->common.blk, offset, size);
+ ret = blk_pwrite_zeroes(exp->common.blk, offset, size,
+ BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK);
+ if (ret == -ENOTSUP) {
+ /*
+ * fallocate() specifies to return EOPNOTSUPP for unsupported
+ * operations
+ */
+ ret = -EOPNOTSUPP;
+ }
+
offset += size;
length -= size;
} while (ret == 0 && length > 0);
diff --git a/block/monitor/block-hmp-cmds.c b/block/monitor/block-hmp-cmds.c
index 6aa5f1b..2846083 100644
--- a/block/monitor/block-hmp-cmds.c
+++ b/block/monitor/block-hmp-cmds.c
@@ -48,6 +48,7 @@
#include "qemu/option.h"
#include "qemu/sockets.h"
#include "qemu/cutils.h"
+#include "qemu/error-report.h"
#include "sysemu/sysemu.h"
#include "monitor/monitor.h"
#include "monitor/hmp.h"
diff --git a/block/qed.c b/block/qed.c
index ed94bb6..0705a7b 100644
--- a/block/qed.c
+++ b/block/qed.c
@@ -594,7 +594,6 @@
qemu_coroutine_enter(qemu_coroutine_create(bdrv_qed_open_entry, &qoc));
BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS);
}
- BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS);
return qoc.ret;
}
diff --git a/bsd-user/freebsd/os-syscall.c b/bsd-user/freebsd/os-syscall.c
index 179a20c..c8f998e 100644
--- a/bsd-user/freebsd/os-syscall.c
+++ b/bsd-user/freebsd/os-syscall.c
@@ -38,6 +38,8 @@
#include <sys/sysctl.h>
#include <utime.h>
+#include "include/gdbstub/syscalls.h"
+
#include "qemu.h"
#include "signal-common.h"
#include "user/syscall-trace.h"
diff --git a/bsd-user/main.c b/bsd-user/main.c
index 41290e1..89f225d 100644
--- a/bsd-user/main.c
+++ b/bsd-user/main.c
@@ -44,6 +44,7 @@
#include "trace/control.h"
#include "crypto/init.h"
#include "qemu/guest-random.h"
+#include "gdbstub/user.h"
#include "host-os.h"
#include "target_arch_cpu.h"
diff --git a/bsd-user/signal.c b/bsd-user/signal.c
index 58a5386..f4e078e 100644
--- a/bsd-user/signal.c
+++ b/bsd-user/signal.c
@@ -21,6 +21,7 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "qemu.h"
+#include "gdbstub/user.h"
#include "signal-common.h"
#include "trace.h"
#include "hw/core/tcg-cpu-ops.h"
diff --git a/chardev/baum.c b/chardev/baum.c
index 0a0d126..a1d9784 100644
--- a/chardev/baum.c
+++ b/chardev/baum.c
@@ -44,39 +44,39 @@
#define ESC 0x1B
-#define BAUM_REQ_DisplayData 0x01
-#define BAUM_REQ_GetVersionNumber 0x05
-#define BAUM_REQ_GetKeys 0x08
-#define BAUM_REQ_SetMode 0x12
-#define BAUM_REQ_SetProtocol 0x15
-#define BAUM_REQ_GetDeviceIdentity 0x84
-#define BAUM_REQ_GetSerialNumber 0x8A
+#define BAUM_REQ_DisplayData 0x01
+#define BAUM_REQ_GetVersionNumber 0x05
+#define BAUM_REQ_GetKeys 0x08
+#define BAUM_REQ_SetMode 0x12
+#define BAUM_REQ_SetProtocol 0x15
+#define BAUM_REQ_GetDeviceIdentity 0x84
+#define BAUM_REQ_GetSerialNumber 0x8A
-#define BAUM_RSP_CellCount 0x01
-#define BAUM_RSP_VersionNumber 0x05
-#define BAUM_RSP_ModeSetting 0x11
-#define BAUM_RSP_CommunicationChannel 0x16
-#define BAUM_RSP_PowerdownSignal 0x17
-#define BAUM_RSP_HorizontalSensors 0x20
-#define BAUM_RSP_VerticalSensors 0x21
-#define BAUM_RSP_RoutingKeys 0x22
-#define BAUM_RSP_Switches 0x23
-#define BAUM_RSP_TopKeys 0x24
-#define BAUM_RSP_HorizontalSensor 0x25
-#define BAUM_RSP_VerticalSensor 0x26
-#define BAUM_RSP_RoutingKey 0x27
-#define BAUM_RSP_FrontKeys6 0x28
-#define BAUM_RSP_BackKeys6 0x29
-#define BAUM_RSP_CommandKeys 0x2B
-#define BAUM_RSP_FrontKeys10 0x2C
-#define BAUM_RSP_BackKeys10 0x2D
-#define BAUM_RSP_EntryKeys 0x33
-#define BAUM_RSP_JoyStick 0x34
-#define BAUM_RSP_ErrorCode 0x40
-#define BAUM_RSP_InfoBlock 0x42
-#define BAUM_RSP_DeviceIdentity 0x84
-#define BAUM_RSP_SerialNumber 0x8A
-#define BAUM_RSP_BluetoothName 0x8C
+#define BAUM_RSP_CellCount 0x01
+#define BAUM_RSP_VersionNumber 0x05
+#define BAUM_RSP_ModeSetting 0x11
+#define BAUM_RSP_CommunicationChannel 0x16
+#define BAUM_RSP_PowerdownSignal 0x17
+#define BAUM_RSP_HorizontalSensors 0x20
+#define BAUM_RSP_VerticalSensors 0x21
+#define BAUM_RSP_RoutingKeys 0x22
+#define BAUM_RSP_Switches 0x23
+#define BAUM_RSP_TopKeys 0x24
+#define BAUM_RSP_HorizontalSensor 0x25
+#define BAUM_RSP_VerticalSensor 0x26
+#define BAUM_RSP_RoutingKey 0x27
+#define BAUM_RSP_FrontKeys6 0x28
+#define BAUM_RSP_BackKeys6 0x29
+#define BAUM_RSP_CommandKeys 0x2B
+#define BAUM_RSP_FrontKeys10 0x2C
+#define BAUM_RSP_BackKeys10 0x2D
+#define BAUM_RSP_EntryKeys 0x33
+#define BAUM_RSP_JoyStick 0x34
+#define BAUM_RSP_ErrorCode 0x40
+#define BAUM_RSP_InfoBlock 0x42
+#define BAUM_RSP_DeviceIdentity 0x84
+#define BAUM_RSP_SerialNumber 0x8A
+#define BAUM_RSP_BluetoothName 0x8C
#define BAUM_TL1 0x01
#define BAUM_TL2 0x02
diff --git a/chardev/char.c b/chardev/char.c
index 11eab77..e693906 100644
--- a/chardev/char.c
+++ b/chardev/char.c
@@ -1175,12 +1175,10 @@
if (!s) {
error_setg(errp, "protocol '%s' is invalid", protocol);
- close(fd);
return false;
}
if (qemu_chr_add_client(s, fd) < 0) {
error_setg(errp, "failed to add client");
- close(fd);
return false;
}
return true;
diff --git a/configure b/configure
index 219ff13..05bed4f 100755
--- a/configure
+++ b/configure
@@ -230,6 +230,7 @@
safe_stack=""
use_containers="yes"
gdb_bin=$(command -v "gdb-multiarch" || command -v "gdb")
+gdb_arches=""
if test -e "$source_path/.git"
then
@@ -2262,6 +2263,7 @@
# tests might fail. Prefer to keep the relevant files in their own
# directory and symlink the directory instead.
LINKS="Makefile"
+LINKS="$LINKS docs/config"
LINKS="$LINKS pc-bios/optionrom/Makefile"
LINKS="$LINKS pc-bios/s390-ccw/Makefile"
LINKS="$LINKS pc-bios/vof/Makefile"
@@ -2395,6 +2397,7 @@
gdb_version=$($gdb_bin --version | head -n 1)
if version_ge ${gdb_version##* } 9.1; then
echo "HAVE_GDB_BIN=$gdb_bin" >> $config_host_mak
+ gdb_arches=$("$source_path/scripts/probe-gdb-support.py" $gdb_bin)
else
gdb_bin=""
fi
@@ -2519,6 +2522,12 @@
write_target_makefile "build-tcg-tests-$target" >> "$config_target_mak"
echo "BUILD_STATIC=$build_static" >> "$config_target_mak"
echo "QEMU=$PWD/$qemu" >> "$config_target_mak"
+
+ # will GDB work with these binaries?
+ if test "${gdb_arches#*$arch}" != "$gdb_arches"; then
+ echo "HOST_GDB_SUPPORTS_ARCH=y" >> "$config_target_mak"
+ fi
+
echo "run-tcg-tests-$target: $qemu\$(EXESUF)" >> Makefile.prereqs
tcg_tests_targets="$tcg_tests_targets $target"
fi
diff --git a/contrib/elf2dmp/addrspace.c b/contrib/elf2dmp/addrspace.c
index 53ded17..0b04cba 100644
--- a/contrib/elf2dmp/addrspace.c
+++ b/contrib/elf2dmp/addrspace.c
@@ -11,6 +11,7 @@
static struct pa_block *pa_space_find_block(struct pa_space *ps, uint64_t pa)
{
size_t i;
+
for (i = 0; i < ps->block_nr; i++) {
if (ps->block[i].paddr <= pa &&
pa <= ps->block[i].paddr + ps->block[i].size) {
diff --git a/contrib/elf2dmp/main.c b/contrib/elf2dmp/main.c
index d77b8f9..89f0c69 100644
--- a/contrib/elf2dmp/main.c
+++ b/contrib/elf2dmp/main.c
@@ -17,6 +17,7 @@
#define SYM_URL_BASE "https://msdl.microsoft.com/download/symbols/"
#define PDB_NAME "ntkrnlmp.pdb"
+#define PE_NAME "ntoskrnl.exe"
#define INITIAL_MXCSR 0x1f80
@@ -282,14 +283,16 @@
};
for (i = 0; i < ps->block_nr; i++) {
- h.PhysicalMemoryBlock.NumberOfPages += ps->block[i].size / ELF2DMP_PAGE_SIZE;
+ h.PhysicalMemoryBlock.NumberOfPages +=
+ ps->block[i].size / ELF2DMP_PAGE_SIZE;
h.PhysicalMemoryBlock.Run[i] = (WinDumpPhyMemRun64) {
.BasePage = ps->block[i].paddr / ELF2DMP_PAGE_SIZE,
.PageCount = ps->block[i].size / ELF2DMP_PAGE_SIZE,
};
}
- h.RequiredDumpSpace += h.PhysicalMemoryBlock.NumberOfPages << ELF2DMP_PAGE_BITS;
+ h.RequiredDumpSpace +=
+ h.PhysicalMemoryBlock.NumberOfPages << ELF2DMP_PAGE_BITS;
*hdr = h;
@@ -299,7 +302,8 @@
static int fill_context(KDDEBUGGER_DATA64 *kdbg,
struct va_space *vs, QEMU_Elf *qe)
{
- int i;
+ int i;
+
for (i = 0; i < qe->state_nr; i++) {
uint64_t Prcb;
uint64_t Context;
@@ -330,6 +334,45 @@
return 0;
}
+static int pe_get_data_dir_entry(uint64_t base, void *start_addr, int idx,
+ void *entry, size_t size, struct va_space *vs)
+{
+ const char e_magic[2] = "MZ";
+ const char Signature[4] = "PE\0\0";
+ IMAGE_DOS_HEADER *dos_hdr = start_addr;
+ IMAGE_NT_HEADERS64 nt_hdrs;
+ IMAGE_FILE_HEADER *file_hdr = &nt_hdrs.FileHeader;
+ IMAGE_OPTIONAL_HEADER64 *opt_hdr = &nt_hdrs.OptionalHeader;
+ IMAGE_DATA_DIRECTORY *data_dir = nt_hdrs.OptionalHeader.DataDirectory;
+
+ QEMU_BUILD_BUG_ON(sizeof(*dos_hdr) >= ELF2DMP_PAGE_SIZE);
+
+ if (memcmp(&dos_hdr->e_magic, e_magic, sizeof(e_magic))) {
+ return 1;
+ }
+
+ if (va_space_rw(vs, base + dos_hdr->e_lfanew,
+ &nt_hdrs, sizeof(nt_hdrs), 0)) {
+ return 1;
+ }
+
+ if (memcmp(&nt_hdrs.Signature, Signature, sizeof(Signature)) ||
+ file_hdr->Machine != 0x8664 || opt_hdr->Magic != 0x020b) {
+ return 1;
+ }
+
+ if (va_space_rw(vs,
+ base + data_dir[idx].VirtualAddress,
+ entry, size, 0)) {
+ return 1;
+ }
+
+ printf("Data directory entry #%d: RVA = 0x%08"PRIx32"\n", idx,
+ (uint32_t)data_dir[idx].VirtualAddress);
+
+ return 0;
+}
+
static int write_dump(struct pa_space *ps,
WinDumpHeader64 *hdr, const char *name)
{
@@ -363,45 +406,38 @@
return fclose(dmp_file);
}
+static bool pe_check_export_name(uint64_t base, void *start_addr,
+ struct va_space *vs)
+{
+ IMAGE_EXPORT_DIRECTORY export_dir;
+ const char *pe_name;
+
+ if (pe_get_data_dir_entry(base, start_addr, IMAGE_FILE_EXPORT_DIRECTORY,
+ &export_dir, sizeof(export_dir), vs)) {
+ return false;
+ }
+
+ pe_name = va_space_resolve(vs, base + export_dir.Name);
+ if (!pe_name) {
+ return false;
+ }
+
+ return !strcmp(pe_name, PE_NAME);
+}
+
static int pe_get_pdb_symstore_hash(uint64_t base, void *start_addr,
char *hash, struct va_space *vs)
{
- const char e_magic[2] = "MZ";
- const char Signature[4] = "PE\0\0";
const char sign_rsds[4] = "RSDS";
- IMAGE_DOS_HEADER *dos_hdr = start_addr;
- IMAGE_NT_HEADERS64 nt_hdrs;
- IMAGE_FILE_HEADER *file_hdr = &nt_hdrs.FileHeader;
- IMAGE_OPTIONAL_HEADER64 *opt_hdr = &nt_hdrs.OptionalHeader;
- IMAGE_DATA_DIRECTORY *data_dir = nt_hdrs.OptionalHeader.DataDirectory;
IMAGE_DEBUG_DIRECTORY debug_dir;
OMFSignatureRSDS rsds;
char *pdb_name;
size_t pdb_name_sz;
size_t i;
- QEMU_BUILD_BUG_ON(sizeof(*dos_hdr) >= ELF2DMP_PAGE_SIZE);
-
- if (memcmp(&dos_hdr->e_magic, e_magic, sizeof(e_magic))) {
- return 1;
- }
-
- if (va_space_rw(vs, base + dos_hdr->e_lfanew,
- &nt_hdrs, sizeof(nt_hdrs), 0)) {
- return 1;
- }
-
- if (memcmp(&nt_hdrs.Signature, Signature, sizeof(Signature)) ||
- file_hdr->Machine != 0x8664 || opt_hdr->Magic != 0x020b) {
- return 1;
- }
-
- printf("Debug Directory RVA = 0x%08"PRIx32"\n",
- (uint32_t)data_dir[IMAGE_FILE_DEBUG_DIRECTORY].VirtualAddress);
-
- if (va_space_rw(vs,
- base + data_dir[IMAGE_FILE_DEBUG_DIRECTORY].VirtualAddress,
- &debug_dir, sizeof(debug_dir), 0)) {
+ if (pe_get_data_dir_entry(base, start_addr, IMAGE_FILE_DEBUG_DIRECTORY,
+ &debug_dir, sizeof(debug_dir), vs)) {
+ eprintf("Failed to get Debug Directory\n");
return 1;
}
@@ -473,6 +509,7 @@
uint64_t KdDebuggerDataBlock;
KDDEBUGGER_DATA64 *kdbg;
uint64_t KdVersionBlock;
+ bool kernel_found = false;
if (argc != 3) {
eprintf("usage:\n\t%s elf_file dmp_file\n", argv[0]);
@@ -520,11 +557,14 @@
}
if (*(uint16_t *)nt_start_addr == 0x5a4d) { /* MZ */
- break;
+ if (pe_check_export_name(KernBase, nt_start_addr, &vs)) {
+ kernel_found = true;
+ break;
+ }
}
}
- if (!nt_start_addr) {
+ if (!kernel_found) {
eprintf("Failed to find NT kernel image\n");
err = 1;
goto out_ps;
diff --git a/contrib/elf2dmp/pe.h b/contrib/elf2dmp/pe.h
index c2a4a6b..71126af 100644
--- a/contrib/elf2dmp/pe.h
+++ b/contrib/elf2dmp/pe.h
@@ -33,75 +33,90 @@
} __attribute__ ((packed)) IMAGE_DOS_HEADER;
typedef struct IMAGE_FILE_HEADER {
- uint16_t Machine;
- uint16_t NumberOfSections;
- uint32_t TimeDateStamp;
- uint32_t PointerToSymbolTable;
- uint32_t NumberOfSymbols;
- uint16_t SizeOfOptionalHeader;
- uint16_t Characteristics;
+ uint16_t Machine;
+ uint16_t NumberOfSections;
+ uint32_t TimeDateStamp;
+ uint32_t PointerToSymbolTable;
+ uint32_t NumberOfSymbols;
+ uint16_t SizeOfOptionalHeader;
+ uint16_t Characteristics;
} __attribute__ ((packed)) IMAGE_FILE_HEADER;
typedef struct IMAGE_DATA_DIRECTORY {
- uint32_t VirtualAddress;
- uint32_t Size;
+ uint32_t VirtualAddress;
+ uint32_t Size;
} __attribute__ ((packed)) IMAGE_DATA_DIRECTORY;
#define IMAGE_NUMBEROF_DIRECTORY_ENTRIES 16
typedef struct IMAGE_OPTIONAL_HEADER64 {
- uint16_t Magic; /* 0x20b */
- uint8_t MajorLinkerVersion;
- uint8_t MinorLinkerVersion;
- uint32_t SizeOfCode;
- uint32_t SizeOfInitializedData;
- uint32_t SizeOfUninitializedData;
- uint32_t AddressOfEntryPoint;
- uint32_t BaseOfCode;
- uint64_t ImageBase;
- uint32_t SectionAlignment;
- uint32_t FileAlignment;
- uint16_t MajorOperatingSystemVersion;
- uint16_t MinorOperatingSystemVersion;
- uint16_t MajorImageVersion;
- uint16_t MinorImageVersion;
- uint16_t MajorSubsystemVersion;
- uint16_t MinorSubsystemVersion;
- uint32_t Win32VersionValue;
- uint32_t SizeOfImage;
- uint32_t SizeOfHeaders;
- uint32_t CheckSum;
- uint16_t Subsystem;
- uint16_t DllCharacteristics;
- uint64_t SizeOfStackReserve;
- uint64_t SizeOfStackCommit;
- uint64_t SizeOfHeapReserve;
- uint64_t SizeOfHeapCommit;
- uint32_t LoaderFlags;
- uint32_t NumberOfRvaAndSizes;
- IMAGE_DATA_DIRECTORY DataDirectory[IMAGE_NUMBEROF_DIRECTORY_ENTRIES];
+ uint16_t Magic; /* 0x20b */
+ uint8_t MajorLinkerVersion;
+ uint8_t MinorLinkerVersion;
+ uint32_t SizeOfCode;
+ uint32_t SizeOfInitializedData;
+ uint32_t SizeOfUninitializedData;
+ uint32_t AddressOfEntryPoint;
+ uint32_t BaseOfCode;
+ uint64_t ImageBase;
+ uint32_t SectionAlignment;
+ uint32_t FileAlignment;
+ uint16_t MajorOperatingSystemVersion;
+ uint16_t MinorOperatingSystemVersion;
+ uint16_t MajorImageVersion;
+ uint16_t MinorImageVersion;
+ uint16_t MajorSubsystemVersion;
+ uint16_t MinorSubsystemVersion;
+ uint32_t Win32VersionValue;
+ uint32_t SizeOfImage;
+ uint32_t SizeOfHeaders;
+ uint32_t CheckSum;
+ uint16_t Subsystem;
+ uint16_t DllCharacteristics;
+ uint64_t SizeOfStackReserve;
+ uint64_t SizeOfStackCommit;
+ uint64_t SizeOfHeapReserve;
+ uint64_t SizeOfHeapCommit;
+ uint32_t LoaderFlags;
+ uint32_t NumberOfRvaAndSizes;
+ IMAGE_DATA_DIRECTORY DataDirectory[IMAGE_NUMBEROF_DIRECTORY_ENTRIES];
} __attribute__ ((packed)) IMAGE_OPTIONAL_HEADER64;
typedef struct IMAGE_NT_HEADERS64 {
- uint32_t Signature;
- IMAGE_FILE_HEADER FileHeader;
- IMAGE_OPTIONAL_HEADER64 OptionalHeader;
+ uint32_t Signature;
+ IMAGE_FILE_HEADER FileHeader;
+ IMAGE_OPTIONAL_HEADER64 OptionalHeader;
} __attribute__ ((packed)) IMAGE_NT_HEADERS64;
+typedef struct IMAGE_EXPORT_DIRECTORY {
+ uint32_t Characteristics;
+ uint32_t TimeDateStamp;
+ uint16_t MajorVersion;
+ uint16_t MinorVersion;
+ uint32_t Name;
+ uint32_t Base;
+ uint32_t NumberOfFunctions;
+ uint32_t NumberOfNames;
+ uint32_t AddressOfFunctions;
+ uint32_t AddressOfNames;
+ uint32_t AddressOfNameOrdinals;
+} __attribute__ ((packed)) IMAGE_EXPORT_DIRECTORY;
+
typedef struct IMAGE_DEBUG_DIRECTORY {
- uint32_t Characteristics;
- uint32_t TimeDateStamp;
- uint16_t MajorVersion;
- uint16_t MinorVersion;
- uint32_t Type;
- uint32_t SizeOfData;
- uint32_t AddressOfRawData;
- uint32_t PointerToRawData;
+ uint32_t Characteristics;
+ uint32_t TimeDateStamp;
+ uint16_t MajorVersion;
+ uint16_t MinorVersion;
+ uint32_t Type;
+ uint32_t SizeOfData;
+ uint32_t AddressOfRawData;
+ uint32_t PointerToRawData;
} __attribute__ ((packed)) IMAGE_DEBUG_DIRECTORY;
#define IMAGE_DEBUG_TYPE_CODEVIEW 2
#endif
+#define IMAGE_FILE_EXPORT_DIRECTORY 0
#define IMAGE_FILE_DEBUG_DIRECTORY 6
typedef struct guid_t {
diff --git a/contrib/gitdm/domain-map b/contrib/gitdm/domain-map
index 3727918..3e31a06 100644
--- a/contrib/gitdm/domain-map
+++ b/contrib/gitdm/domain-map
@@ -4,7 +4,12 @@
# This maps email domains to nice easy to read company names
#
+linux.alibaba.com Alibaba
+amazon.com Amazon
+amazon.co.uk Amazon
+amazon.de Amazon
amd.com AMD
+aspeedtech.com ASPEED Technology Inc.
baidu.com Baidu
bytedance.com ByteDance
cmss.chinamobile.com China Mobile
@@ -12,6 +17,7 @@
crudebyte.com Crudebyte
chinatelecom.cn China Telecom
eldorado.org.br Instituto de Pesquisas Eldorado
+fb.com Facebook
fujitsu.com Fujitsu
google.com Google
greensocs.com GreenSocs
@@ -31,15 +37,18 @@
proxmox.com Proxmox
quicinc.com Qualcomm Innovation Center
redhat.com Red Hat
+rev.ng rev.ng Labs
rt-rk.com RT-RK
samsung.com Samsung
siemens.com Siemens
sifive.com SiFive
suse.com SUSE
suse.de SUSE
+syrmia.com SYRMIA
+ventanamicro.com Ventana Micro Systems
virtuozzo.com Virtuozzo
+vrull.eu VRULL
wdc.com Western Digital
windriver.com Wind River
-xilinx.com Xilinx
yadro.com YADRO
yandex-team.ru Yandex
diff --git a/contrib/gitdm/group-map-alibaba b/contrib/gitdm/group-map-alibaba
new file mode 100644
index 0000000..4c34446
--- /dev/null
+++ b/contrib/gitdm/group-map-alibaba
@@ -0,0 +1,7 @@
+#
+# Alibaba contributors including its subsidiaries
+#
+
+# c-sky.com, now part of T-Head, wholly-owned entity of Alibaba Group
+ren_guo@c-sky.com
+zhiwei_liu@c-sky.com
diff --git a/contrib/gitdm/group-map-amd b/contrib/gitdm/group-map-amd
new file mode 100644
index 0000000..bda4239
--- /dev/null
+++ b/contrib/gitdm/group-map-amd
@@ -0,0 +1,8 @@
+# AMD acquired Xilinx and contributors have been slowly updating emails
+
+edgar.iglesias@xilinx.com
+fnu.vikram@xilinx.com
+francisco.iglesias@xilinx.com
+sai.pavan.boddu@xilinx.com
+stefano.stabellini@xilinx.com
+tong.ho@xilinx.com
diff --git a/contrib/gitdm/group-map-facebook b/contrib/gitdm/group-map-facebook
new file mode 100644
index 0000000..38589f8
--- /dev/null
+++ b/contrib/gitdm/group-map-facebook
@@ -0,0 +1,5 @@
+#
+# Some Facebook contributors also occasionally use personal email addresses.
+#
+
+peter@pjd.dev
diff --git a/contrib/gitdm/group-map-ibm b/contrib/gitdm/group-map-ibm
index da62fa3..24d8dc1 100644
--- a/contrib/gitdm/group-map-ibm
+++ b/contrib/gitdm/group-map-ibm
@@ -12,3 +12,4 @@
joel@jms.id.au
sjitindarsingh@gmail.com
tommusta@gmail.com
+idan.horowitz@gmail.com
diff --git a/contrib/gitdm/group-map-individuals b/contrib/gitdm/group-map-individuals
index 53883cc..d7116f5 100644
--- a/contrib/gitdm/group-map-individuals
+++ b/contrib/gitdm/group-map-individuals
@@ -37,3 +37,8 @@
paul@nowt.org
git@xen0n.name
simon@simonsafar.com
+research_trasio@irq.a4lg.com
+shentey@gmail.com
+bmeng@tinylab.org
+strahinja.p.jankovic@gmail.com
+Jason@zx2c4.com
diff --git a/cpu.c b/cpu.c
index 2e9f931..849bac0 100644
--- a/cpu.c
+++ b/cpu.c
@@ -31,16 +31,18 @@
#include "hw/core/sysemu-cpu-ops.h"
#include "exec/address-spaces.h"
#endif
+#include "sysemu/cpus.h"
#include "sysemu/tcg.h"
-#include "sysemu/kvm.h"
#include "exec/replay-core.h"
#include "exec/cpu-common.h"
#include "exec/exec-all.h"
+#include "exec/tb-flush.h"
#include "exec/translate-all.h"
#include "exec/log.h"
#include "hw/core/accel-cpu.h"
#include "trace/trace-root.h"
#include "qemu/accel.h"
+#include "qemu/plugin.h"
uintptr_t qemu_host_page_size;
intptr_t qemu_host_page_mask;
@@ -325,9 +327,14 @@
{
if (cpu->singlestep_enabled != enabled) {
cpu->singlestep_enabled = enabled;
- if (kvm_enabled()) {
- kvm_update_guest_debug(cpu, 0);
+
+#if !defined(CONFIG_USER_ONLY)
+ const AccelOpsClass *ops = cpus_get_accel();
+ if (ops->update_guest_debug) {
+ ops->update_guest_debug(cpu);
}
+#endif
+
trace_breakpoint_singlestep(cpu->cpu_index, enabled);
}
}
diff --git a/crypto/afalg.c b/crypto/afalg.c
index 10046bb..348301e 100644
--- a/crypto/afalg.c
+++ b/crypto/afalg.c
@@ -59,7 +59,7 @@
if (bind(sbind, (const struct sockaddr *)&salg, sizeof(salg)) != 0) {
error_setg_errno(errp, errno, "Failed to bind socket");
- closesocket(sbind);
+ close(sbind);
return -1;
}
@@ -105,11 +105,11 @@
}
if (afalg->tfmfd != -1) {
- closesocket(afalg->tfmfd);
+ close(afalg->tfmfd);
}
if (afalg->opfd != -1) {
- closesocket(afalg->opfd);
+ close(afalg->opfd);
}
g_free(afalg);
diff --git a/disas/riscv.c b/disas/riscv.c
index 54455aa..d6b0fbe 100644
--- a/disas/riscv.c
+++ b/disas/riscv.c
@@ -1014,6 +1014,7 @@
#define rv_fmt_rd_offset "O\t0,o"
#define rv_fmt_rd_rs1_rs2 "O\t0,1,2"
#define rv_fmt_frd_rs1 "O\t3,1"
+#define rv_fmt_frd_frs1 "O\t3,4"
#define rv_fmt_rd_frs1 "O\t0,4"
#define rv_fmt_rd_frs1_frs2 "O\t0,4,5"
#define rv_fmt_frd_frs1_frs2 "O\t3,4,5"
@@ -1580,15 +1581,15 @@
{ "snez", rv_codec_r, rv_fmt_rd_rs2, NULL, 0, 0, 0 },
{ "sltz", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 },
{ "sgtz", rv_codec_r, rv_fmt_rd_rs2, NULL, 0, 0, 0 },
- { "fmv.s", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 },
- { "fabs.s", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 },
- { "fneg.s", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 },
- { "fmv.d", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 },
- { "fabs.d", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 },
- { "fneg.d", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 },
- { "fmv.q", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 },
- { "fabs.q", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 },
- { "fneg.q", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 },
+ { "fmv.s", rv_codec_r, rv_fmt_frd_frs1, NULL, 0, 0, 0 },
+ { "fabs.s", rv_codec_r, rv_fmt_frd_frs1, NULL, 0, 0, 0 },
+ { "fneg.s", rv_codec_r, rv_fmt_frd_frs1, NULL, 0, 0, 0 },
+ { "fmv.d", rv_codec_r, rv_fmt_frd_frs1, NULL, 0, 0, 0 },
+ { "fabs.d", rv_codec_r, rv_fmt_frd_frs1, NULL, 0, 0, 0 },
+ { "fneg.d", rv_codec_r, rv_fmt_frd_frs1, NULL, 0, 0, 0 },
+ { "fmv.q", rv_codec_r, rv_fmt_frd_frs1, NULL, 0, 0, 0 },
+ { "fabs.q", rv_codec_r, rv_fmt_frd_frs1, NULL, 0, 0, 0 },
+ { "fneg.q", rv_codec_r, rv_fmt_frd_frs1, NULL, 0, 0, 0 },
{ "beqz", rv_codec_sb, rv_fmt_rs1_offset, NULL, 0, 0, 0 },
{ "bnez", rv_codec_sb, rv_fmt_rs1_offset, NULL, 0, 0, 0 },
{ "blez", rv_codec_sb, rv_fmt_rs2_offset, NULL, 0, 0, 0 },
@@ -1647,7 +1648,7 @@
{ "clzw", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 },
{ "ctzw", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 },
{ "cpopw", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 },
- { "slli.uw", rv_codec_i_sh5, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 },
+ { "slli.uw", rv_codec_i_sh6, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 },
{ "add.uw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
{ "rolw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
{ "rorw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
@@ -2617,10 +2618,10 @@
switch (((inst >> 12) & 0b111)) {
case 0: op = rv_op_addiw; break;
case 1:
- switch (((inst >> 25) & 0b1111111)) {
+ switch (((inst >> 26) & 0b111111)) {
case 0: op = rv_op_slliw; break;
- case 4: op = rv_op_slli_uw; break;
- case 48:
+ case 2: op = rv_op_slli_uw; break;
+ case 24:
switch ((inst >> 20) & 0b11111) {
case 0b00000: op = rv_op_clzw; break;
case 0b00001: op = rv_op_ctzw; break;
diff --git a/docs/about/build-platforms.rst b/docs/about/build-platforms.rst
index 20b97c3..89cae5a 100644
--- a/docs/about/build-platforms.rst
+++ b/docs/about/build-platforms.rst
@@ -67,7 +67,8 @@
Linux OS, macOS, FreeBSD, NetBSD, OpenBSD
-----------------------------------------
-The project aims to support the most recent major version at all times. Support
+The project aims to support the most recent major version at all times for
+up to five years after its initial release. Support
for the previous major version will be dropped 2 years after the new major
version is released or when the vendor itself drops support, whichever comes
first. In this context, third-party efforts to extend the lifetime of a distro
diff --git a/docs/about/deprecated.rst b/docs/about/deprecated.rst
index 15084f7..33b9422 100644
--- a/docs/about/deprecated.rst
+++ b/docs/about/deprecated.rst
@@ -196,6 +196,26 @@
completes. The little endian variants of MIPS (both 32 and 64 bit) are
still a supported host architecture.
+System emulation on 32-bit x86 hosts (since 8.0)
+''''''''''''''''''''''''''''''''''''''''''''''''
+
+Support for 32-bit x86 host deployments is increasingly uncommon in mainstream
+OS distributions given the widespread availability of 64-bit x86 hardware.
+The QEMU project no longer considers 32-bit x86 support for system emulation to
+be an effective use of its limited resources, and thus intends to discontinue
+it. Since all recent x86 hardware from the past >10 years is capable of the
+64-bit x86 extensions, a corresponding 64-bit OS should be used instead.
+
+System emulation on 32-bit arm hosts (since 8.0)
+''''''''''''''''''''''''''''''''''''''''''''''''
+
+Since QEMU needs a strong host machine for running full system emulation, and
+all recent powerful arm hosts support 64-bit, the QEMU project deprecates the
+support for running any system emulation on 32-bit arm hosts in general. Use
+64-bit arm hosts for system emulation instead. (Note: "user" mode emulation
+continues to be supported on 32-bit arm hosts, too)
+
+
QEMU API (QAPI) events
----------------------
diff --git a/docs/config/mach-virt-graphical.cfg b/docs/config/mach-virt-graphical.cfg
index d6d31b1..eba76eb 100644
--- a/docs/config/mach-virt-graphical.cfg
+++ b/docs/config/mach-virt-graphical.cfg
@@ -56,9 +56,11 @@
[machine]
type = "virt"
- accel = "kvm"
gic-version = "host"
+[accel]
+ accel = "kvm"
+
[memory]
size = "1024"
diff --git a/docs/config/mach-virt-serial.cfg b/docs/config/mach-virt-serial.cfg
index 18a7c83..324b054 100644
--- a/docs/config/mach-virt-serial.cfg
+++ b/docs/config/mach-virt-serial.cfg
@@ -62,9 +62,11 @@
[machine]
type = "virt"
- accel = "kvm"
gic-version = "host"
+[accel]
+ accel = "kvm"
+
[memory]
size = "1024"
diff --git a/docs/config/q35-emulated.cfg b/docs/config/q35-emulated.cfg
index 99ac918..c8806e6 100644
--- a/docs/config/q35-emulated.cfg
+++ b/docs/config/q35-emulated.cfg
@@ -61,6 +61,8 @@
[machine]
type = "q35"
+
+[accel]
accel = "kvm"
[memory]
diff --git a/docs/config/q35-virtio-graphical.cfg b/docs/config/q35-virtio-graphical.cfg
index 4207f11..148b5d2 100644
--- a/docs/config/q35-virtio-graphical.cfg
+++ b/docs/config/q35-virtio-graphical.cfg
@@ -55,6 +55,8 @@
[machine]
type = "q35"
+
+[accel]
accel = "kvm"
[memory]
diff --git a/docs/config/q35-virtio-serial.cfg b/docs/config/q35-virtio-serial.cfg
index d2830ae..0232913 100644
--- a/docs/config/q35-virtio-serial.cfg
+++ b/docs/config/q35-virtio-serial.cfg
@@ -60,6 +60,8 @@
[machine]
type = "q35"
+
+[accel]
accel = "kvm"
[memory]
diff --git a/docs/devel/atomics.rst b/docs/devel/atomics.rst
index 7957310..81ec26b 100644
--- a/docs/devel/atomics.rst
+++ b/docs/devel/atomics.rst
@@ -27,7 +27,8 @@
- weak atomic access and manual memory barriers: ``qatomic_read()``,
``qatomic_set()``, ``smp_rmb()``, ``smp_wmb()``, ``smp_mb()``,
- ``smp_mb_acquire()``, ``smp_mb_release()``, ``smp_read_barrier_depends()``;
+ ``smp_mb_acquire()``, ``smp_mb_release()``, ``smp_read_barrier_depends()``,
+ ``smp_mb__before_rmw()``, ``smp_mb__after_rmw()``;
- sequentially consistent atomic access: everything else.
@@ -468,13 +469,19 @@
In QEMU, the second kind is named ``atomic_OP_fetch``.
- different atomic read-modify-write operations in Linux imply
- a different set of memory barriers; in QEMU, all of them enforce
- sequential consistency.
+ a different set of memory barriers. In QEMU, all of them enforce
+ sequential consistency: there is a single order in which the
+ program sees them happen.
-- in QEMU, ``qatomic_read()`` and ``qatomic_set()`` do not participate in
- the total ordering enforced by sequentially-consistent operations.
- This is because QEMU uses the C11 memory model. The following example
- is correct in Linux but not in QEMU:
+- however, according to the C11 memory model that QEMU uses, this order
+ does not propagate to other memory accesses on either side of the
+ read-modify-write operation. As far as those are concerned, the
+ operation consist of just a load-acquire followed by a store-release.
+ Stores that precede the RMW operation, and loads that follow it, can
+ still be reordered and will happen *in the middle* of the read-modify-write
+ operation!
+
+ Therefore, the following example is correct in Linux but not in QEMU:
+----------------------------------+--------------------------------+
| Linux (correct) | QEMU (incorrect) |
@@ -488,9 +495,24 @@
because the read of ``y`` can be moved (by either the processor or the
compiler) before the write of ``x``.
- Fixing this requires an ``smp_mb()`` memory barrier between the write
- of ``x`` and the read of ``y``. In the common case where only one thread
- writes ``x``, it is also possible to write it like this:
+ Fixing this requires a full memory barrier between the write of ``x`` and
+ the read of ``y``. QEMU provides ``smp_mb__before_rmw()`` and
+ ``smp_mb__after_rmw()``; they act both as an optimization,
+ avoiding the memory barrier on processors where it is unnecessary,
+ and as a clarification of this corner case of the C11 memory model:
+
+ +--------------------------------+
+ | QEMU (correct) |
+ +================================+
+ | :: |
+ | |
+ | a = qatomic_fetch_add(&x, 2);|
+ | smp_mb__after_rmw(); |
+ | b = qatomic_read(&y); |
+ +--------------------------------+
+
+ In the common case where only one thread writes ``x``, it is also possible
+ to write it like this:
+--------------------------------+
| QEMU (correct) |
diff --git a/docs/devel/vfio-migration.rst b/docs/devel/vfio-migration.rst
index c214c73..1b68ccf 100644
--- a/docs/devel/vfio-migration.rst
+++ b/docs/devel/vfio-migration.rst
@@ -59,22 +59,37 @@
----------------------------------
A ``log_global_start`` and ``log_global_stop`` memory listener callback informs
-the VFIO IOMMU module to start and stop dirty page tracking. A ``log_sync``
-memory listener callback marks those system memory pages as dirty which are
-used for DMA by the VFIO device. The dirty pages bitmap is queried per
-container. All pages pinned by the vendor driver through external APIs have to
-be marked as dirty during migration. When there are CPU writes, CPU dirty page
-tracking can identify dirtied pages, but any page pinned by the vendor driver
-can also be written by the device. There is currently no device or IOMMU
-support for dirty page tracking in hardware.
+the VFIO dirty tracking module to start and stop dirty page tracking. A
+``log_sync`` memory listener callback queries the dirty page bitmap from the
+dirty tracking module and marks system memory pages which were DMA-ed by the
+VFIO device as dirty. The dirty page bitmap is queried per container.
+
+Currently there are two ways dirty page tracking can be done:
+(1) Device dirty tracking:
+In this method the device is responsible to log and report its DMAs. This
+method can be used only if the device is capable of tracking its DMAs.
+Discovering device capability, starting and stopping dirty tracking, and
+syncing the dirty bitmaps from the device are done using the DMA logging uAPI.
+More info about the uAPI can be found in the comments of the
+``vfio_device_feature_dma_logging_control`` and
+``vfio_device_feature_dma_logging_report`` structures in the header file
+linux-headers/linux/vfio.h.
+
+(2) VFIO IOMMU module:
+In this method dirty tracking is done by IOMMU. However, there is currently no
+IOMMU support for dirty page tracking. For this reason, all pages are
+perpetually marked dirty, unless the device driver pins pages through external
+APIs in which case only those pinned pages are perpetually marked dirty.
+
+If the above two methods are not supported, all pages are perpetually marked
+dirty by QEMU.
By default, dirty pages are tracked during pre-copy as well as stop-and-copy
-phase. So, a page pinned by the vendor driver will be copied to the destination
-in both phases. Copying dirty pages in pre-copy phase helps QEMU to predict if
-it can achieve its downtime tolerances. If QEMU during pre-copy phase keeps
-finding dirty pages continuously, then it understands that even in stop-and-copy
-phase, it is likely to find dirty pages and can predict the downtime
-accordingly.
+phase. So, a page marked as dirty will be copied to the destination in both
+phases. Copying dirty pages in pre-copy phase helps QEMU to predict if it can
+achieve its downtime tolerances. If QEMU during pre-copy phase keeps finding
+dirty pages continuously, then it understands that even in stop-and-copy phase,
+it is likely to find dirty pages and can predict the downtime accordingly.
QEMU also provides a per device opt-out option ``pre-copy-dirty-page-tracking``
which disables querying the dirty bitmap during pre-copy phase. If it is set to
@@ -89,7 +104,8 @@
that range and QEMU reports corresponding guest physical pages dirty. During
stop-and-copy phase, an IOMMU notifier is used to get a callback for mapped
pages and then dirty pages bitmap is fetched from VFIO IOMMU modules for those
-mapped ranges.
+mapped ranges. If device dirty tracking is enabled with vIOMMU, live migration
+will be blocked.
Flow of state changes during Live migration
===========================================
diff --git a/docs/meson.build b/docs/meson.build
index bb72c10..f220800 100644
--- a/docs/meson.build
+++ b/docs/meson.build
@@ -7,7 +7,7 @@
SPHINX_ARGS = ['env', 'CONFDIR=' + qemu_confdir, sphinx_build, '-q']
# If we're making warnings fatal, apply this to Sphinx runs as well
if get_option('werror')
- SPHINX_ARGS += [ '-W' ]
+ SPHINX_ARGS += [ '-W', '-Dkerneldoc_werror=1' ]
endif
# This is a bit awkward but works: create a trivial document and
diff --git a/docs/sphinx/kerneldoc.py b/docs/sphinx/kerneldoc.py
index bf44215..72c403a 100644
--- a/docs/sphinx/kerneldoc.py
+++ b/docs/sphinx/kerneldoc.py
@@ -74,6 +74,10 @@
# Sphinx versions
cmd += ['-sphinx-version', sphinx.__version__]
+ # Pass through the warnings-as-errors flag
+ if env.config.kerneldoc_werror:
+ cmd += ['-Werror']
+
filename = env.config.kerneldoc_srctree + '/' + self.arguments[0]
export_file_patterns = []
@@ -167,6 +171,7 @@
app.add_config_value('kerneldoc_bin', None, 'env')
app.add_config_value('kerneldoc_srctree', None, 'env')
app.add_config_value('kerneldoc_verbosity', 1, 'env')
+ app.add_config_value('kerneldoc_werror', 0, 'env')
app.add_directive('kernel-doc', KernelDocDirective)
diff --git a/docs/system/arm/cpu-features.rst b/docs/system/arm/cpu-features.rst
index 00c4440..f4524b6 100644
--- a/docs/system/arm/cpu-features.rst
+++ b/docs/system/arm/cpu-features.rst
@@ -177,39 +177,32 @@
enabled, and disabled in the same way as other CPU features. Below is
the list of KVM VCPU features and their descriptions.
- kvm-no-adjvtime By default kvm-no-adjvtime is disabled. This
- means that by default the virtual time
- adjustment is enabled (vtime is not *not*
- adjusted).
+``kvm-no-adjvtime``
+ By default kvm-no-adjvtime is disabled. This means that by default
+ the virtual time adjustment is enabled (vtime is not *not* adjusted).
- When virtual time adjustment is enabled each
- time the VM transitions back to running state
- the VCPU's virtual counter is updated to ensure
- stopped time is not counted. This avoids time
- jumps surprising guest OSes and applications,
- as long as they use the virtual counter for
- timekeeping. However it has the side effect of
- the virtual and physical counters diverging.
- All timekeeping based on the virtual counter
- will appear to lag behind any timekeeping that
- does not subtract VM stopped time. The guest
- may resynchronize its virtual counter with
- other time sources as needed.
+ When virtual time adjustment is enabled each time the VM transitions
+ back to running state the VCPU's virtual counter is updated to
+ ensure stopped time is not counted. This avoids time jumps
+ surprising guest OSes and applications, as long as they use the
+ virtual counter for timekeeping. However it has the side effect of
+ the virtual and physical counters diverging. All timekeeping based
+ on the virtual counter will appear to lag behind any timekeeping
+ that does not subtract VM stopped time. The guest may resynchronize
+ its virtual counter with other time sources as needed.
- Enable kvm-no-adjvtime to disable virtual time
- adjustment, also restoring the legacy (pre-5.0)
- behavior.
+ Enable kvm-no-adjvtime to disable virtual time adjustment, also
+ restoring the legacy (pre-5.0) behavior.
- kvm-steal-time Since v5.2, kvm-steal-time is enabled by
- default when KVM is enabled, the feature is
- supported, and the guest is 64-bit.
+``kvm-steal-time``
+ Since v5.2, kvm-steal-time is enabled by default when KVM is
+ enabled, the feature is supported, and the guest is 64-bit.
- When kvm-steal-time is enabled a 64-bit guest
- can account for time its CPUs were not running
- due to the host not scheduling the corresponding
- VCPU threads. The accounting statistics may
- influence the guest scheduler behavior and/or be
- exposed to the guest userspace.
+ When kvm-steal-time is enabled a 64-bit guest can account for time
+ its CPUs were not running due to the host not scheduling the
+ corresponding VCPU threads. The accounting statistics may influence
+ the guest scheduler behavior and/or be exposed to the guest
+ userspace.
TCG VCPU Features
=================
@@ -217,16 +210,15 @@
TCG VCPU features are CPU features that are specific to TCG.
Below is the list of TCG VCPU features and their descriptions.
- pauth-impdef When ``FEAT_Pauth`` is enabled, either the
- *impdef* (Implementation Defined) algorithm
- is enabled or the *architected* QARMA algorithm
- is enabled. By default the impdef algorithm
- is disabled, and QARMA is enabled.
+``pauth-impdef``
+ When ``FEAT_Pauth`` is enabled, either the *impdef* (Implementation
+ Defined) algorithm is enabled or the *architected* QARMA algorithm
+ is enabled. By default the impdef algorithm is disabled, and QARMA
+ is enabled.
- The architected QARMA algorithm has good
- cryptographic properties, but can be quite slow
- to emulate. The impdef algorithm used by QEMU
- is non-cryptographic but significantly faster.
+ The architected QARMA algorithm has good cryptographic properties,
+ but can be quite slow to emulate. The impdef algorithm used by QEMU
+ is non-cryptographic but significantly faster.
SVE CPU Properties
==================
diff --git a/docs/system/device-emulation.rst b/docs/system/device-emulation.rst
index 0506006..c1b1934 100644
--- a/docs/system/device-emulation.rst
+++ b/docs/system/device-emulation.rst
@@ -93,3 +93,4 @@
devices/virtio-pmem.rst
devices/vhost-user-rng.rst
devices/canokey.rst
+ devices/igb.rst
diff --git a/docs/system/devices/igb.rst b/docs/system/devices/igb.rst
new file mode 100644
index 0000000..70edadd
--- /dev/null
+++ b/docs/system/devices/igb.rst
@@ -0,0 +1,71 @@
+.. SPDX-License-Identifier: GPL-2.0-or-later
+.. _igb:
+
+igb
+---
+
+igb is a family of Intel's gigabit ethernet controllers. In QEMU, 82576
+emulation is implemented in particular. Its datasheet is available at [1]_.
+
+This implementation is expected to be useful to test SR-IOV networking without
+requiring physical hardware.
+
+Limitations
+===========
+
+This igb implementation was tested with Linux Test Project [2]_ and Windows HLK
+[3]_ during the initial development. The command used when testing with LTP is:
+
+.. code-block:: shell
+
+ network.sh -6mta
+
+Be aware that this implementation lacks many functionalities available with the
+actual hardware, and you may experience various failures if you try to use it
+with a different operating system other than Linux and Windows or if you try
+functionalities not covered by the tests.
+
+Using igb
+=========
+
+Using igb should be nothing different from using another network device. See
+:ref:`pcsys_005fnetwork` in general.
+
+However, you may also need to perform additional steps to activate SR-IOV
+feature on your guest. For Linux, refer to [4]_.
+
+Developing igb
+==============
+
+igb is the successor of e1000e, and e1000e is the successor of e1000 in turn.
+As these devices are very similar, if you make a change for igb and the same
+change can be applied to e1000e and e1000, please do so.
+
+Please do not forget to run tests before submitting a change. As tests included
+in QEMU is very minimal, run some application which is likely to be affected by
+the change to confirm it works in an integrated system.
+
+Testing igb
+===========
+
+A qtest of the basic functionality is available. Run the below at the build
+directory:
+
+.. code-block:: shell
+
+ meson test qtest-x86_64/qos-test
+
+ethtool can test register accesses, interrupts, etc. It is automated as an
+Avocado test and can be ran with the following command:
+
+.. code:: shell
+
+ make check-avocado AVOCADO_TESTS=tests/avocado/igb.py
+
+References
+==========
+
+.. [1] https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/82576eb-gigabit-ethernet-controller-datasheet.pdf
+.. [2] https://github.com/linux-test-project/ltp
+.. [3] https://learn.microsoft.com/en-us/windows-hardware/test/hlk/
+.. [4] https://docs.kernel.org/PCI/pci-iov-howto.html
diff --git a/docs/system/i386/xen.rst b/docs/system/i386/xen.rst
index a00523b..f06765e 100644
--- a/docs/system/i386/xen.rst
+++ b/docs/system/i386/xen.rst
@@ -9,6 +9,8 @@
channel (Xen PV interrupt) delivery. This allows guests which expect to be
run under Xen to be hosted in QEMU under Linux/KVM instead.
+Using the split irqchip is mandatory for Xen support.
+
Setup
-----
@@ -17,14 +19,14 @@
.. parsed-literal::
- |qemu_system| --accel kvm,xen-version=0x4000a
+ |qemu_system| --accel kvm,xen-version=0x4000a,kernel-irqchip=split
Additionally, virtual APIC support can be advertised to the guest through the
``xen-vapic`` CPU flag:
.. parsed-literal::
- |qemu_system| --accel kvm,xen-version=0x4000a --cpu host,+xen_vapic
+ |qemu_system| --accel kvm,xen-version=0x4000a,kernel-irqchip=split --cpu host,+xen_vapic
When Xen support is enabled, QEMU changes hypervisor identification (CPUID
0x40000000..0x4000000A) to Xen. The KVM identification and features are not
@@ -33,11 +35,25 @@
The Xen platform device is enabled automatically for a Xen guest. This allows
a guest to unplug all emulated devices, in order to use Xen PV block and network
-drivers instead. Note that until the Xen PV device back ends are enabled to work
-with Xen mode in QEMU, that is unlikely to cause significant joy. Linux guests
-can be dissuaded from this by adding 'xen_emul_unplug=never' on their command
-line, and it can also be noted that AHCI disk controllers are exempt from being
-unplugged, as are passthrough VFIO PCI devices.
+drivers instead. Under Xen, the boot disk is typically available both via IDE
+emulation, and as a PV block device. Guest bootloaders typically use IDE to load
+the guest kernel, which then unplugs the IDE and continues with the Xen PV block
+device.
+
+This configuration can be achieved as follows
+
+.. parsed-literal::
+
+ |qemu_system| -M pc --accel kvm,xen-version=0x4000a,kernel-irqchip=split \\
+ -drive file=${GUEST_IMAGE},if=none,id=disk,file.locking=off -device xen-disk,drive=disk,vdev=xvda \\
+ -drive file=${GUEST_IMAGE},index=2,media=disk,file.locking=off,if=ide
+
+It is necessary to use the pc machine type, as the q35 machine uses AHCI instead
+of legacy IDE, and AHCI disks are not unplugged through the Xen PV unplug
+mechanism.
+
+VirtIO devices can also be used; Linux guests may need to be dissuaded from
+umplugging them by adding 'xen_emul_unplug=never' on their command line.
Properties
----------
diff --git a/docs/system/target-mips.rst b/docs/system/target-mips.rst
index 138441b..83239fb 100644
--- a/docs/system/target-mips.rst
+++ b/docs/system/target-mips.rst
@@ -8,8 +8,6 @@
``qemu-system-mips64`` and ``qemu-system-mips64el``. Five different
machine types are emulated:
-- A generic ISA PC-like machine \"mips\"
-
- The MIPS Malta prototype board \"malta\"
- An ACER Pica \"pica61\". This machine needs the 64-bit emulator.
@@ -19,18 +17,6 @@
- A MIPS Magnum R4000 machine \"magnum\". This machine needs the
64-bit emulator.
-The generic emulation is supported by Debian 'Etch' and is able to
-install Debian into a virtual disk image. The following devices are
-emulated:
-
-- A range of MIPS CPUs, default is the 24Kf
-
-- PC style serial port
-
-- PC style IDE disk
-
-- NE2000 network card
-
The Malta emulation supports the following devices:
- Core board with MIPS 24Kf CPU and Galileo system controller
diff --git a/dump/dump.c b/dump/dump.c
index 544d5bc..1f1a6ed 100644
--- a/dump/dump.c
+++ b/dump/dump.c
@@ -24,6 +24,7 @@
#include "qapi/qapi-commands-dump.h"
#include "qapi/qapi-events-dump.h"
#include "qapi/qmp/qerror.h"
+#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "hw/misc/vmcoreinfo.h"
#include "migration/blocker.h"
diff --git a/dump/win_dump.c b/dump/win_dump.c
index 0152f73..b7bfaff 100644
--- a/dump/win_dump.c
+++ b/dump/win_dump.c
@@ -11,6 +11,7 @@
#include "qemu/osdep.h"
#include "sysemu/dump.h"
#include "qapi/error.h"
+#include "qemu/error-report.h"
#include "qapi/qmp/qerror.h"
#include "exec/cpu-defs.h"
#include "hw/core/cpu.h"
diff --git a/ebpf/rss.bpf.skeleton.h b/ebpf/rss.bpf.skeleton.h
index 126683e..18eb2ad 100644
--- a/ebpf/rss.bpf.skeleton.h
+++ b/ebpf/rss.bpf.skeleton.h
@@ -1,9 +1,10 @@
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
-/* THIS FILE IS AUTOGENERATED! */
+/* THIS FILE IS AUTOGENERATED BY BPFTOOL! */
#ifndef __RSS_BPF_SKEL_H__
#define __RSS_BPF_SKEL_H__
+#include <errno.h>
#include <stdlib.h>
#include <bpf/libbpf.h>
@@ -12,8 +13,8 @@
struct bpf_object *obj;
struct {
struct bpf_map *tap_rss_map_configurations;
- struct bpf_map *tap_rss_map_indirection_table;
struct bpf_map *tap_rss_map_toeplitz_key;
+ struct bpf_map *tap_rss_map_indirection_table;
} maps;
struct {
struct bpf_program *tun_rss_steering_prog;
@@ -21,6 +22,16 @@
struct {
struct bpf_link *tun_rss_steering_prog;
} links;
+
+#ifdef __cplusplus
+ static inline struct rss_bpf *open(const struct bpf_object_open_opts *opts = nullptr);
+ static inline struct rss_bpf *open_and_load();
+ static inline int load(struct rss_bpf *skel);
+ static inline int attach(struct rss_bpf *skel);
+ static inline void detach(struct rss_bpf *skel);
+ static inline void destroy(struct rss_bpf *skel);
+ static inline const void *elf_bytes(size_t *sz);
+#endif /* __cplusplus */
};
static void
@@ -40,18 +51,26 @@
rss_bpf__open_opts(const struct bpf_object_open_opts *opts)
{
struct rss_bpf *obj;
+ int err;
obj = (struct rss_bpf *)calloc(1, sizeof(*obj));
- if (!obj)
+ if (!obj) {
+ errno = ENOMEM;
return NULL;
- if (rss_bpf__create_skeleton(obj))
- goto err;
- if (bpf_object__open_skeleton(obj->skeleton, opts))
- goto err;
+ }
+
+ err = rss_bpf__create_skeleton(obj);
+ if (err)
+ goto err_out;
+
+ err = bpf_object__open_skeleton(obj->skeleton, opts);
+ if (err)
+ goto err_out;
return obj;
-err:
+err_out:
rss_bpf__destroy(obj);
+ errno = -err;
return NULL;
}
@@ -71,12 +90,15 @@
rss_bpf__open_and_load(void)
{
struct rss_bpf *obj;
+ int err;
obj = rss_bpf__open();
if (!obj)
return NULL;
- if (rss_bpf__load(obj)) {
+ err = rss_bpf__load(obj);
+ if (err) {
rss_bpf__destroy(obj);
+ errno = -err;
return NULL;
}
return obj;
@@ -91,18 +113,22 @@
static inline void
rss_bpf__detach(struct rss_bpf *obj)
{
- return bpf_object__detach_skeleton(obj->skeleton);
+ bpf_object__detach_skeleton(obj->skeleton);
}
+static inline const void *rss_bpf__elf_bytes(size_t *sz);
+
static inline int
rss_bpf__create_skeleton(struct rss_bpf *obj)
{
struct bpf_object_skeleton *s;
+ int err;
s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));
- if (!s)
- return -1;
- obj->skeleton = s;
+ if (!s) {
+ err = -ENOMEM;
+ goto err;
+ }
s->sz = sizeof(*s);
s->name = "rss_bpf";
@@ -112,320 +138,855 @@
s->map_cnt = 3;
s->map_skel_sz = sizeof(*s->maps);
s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);
- if (!s->maps)
+ if (!s->maps) {
+ err = -ENOMEM;
goto err;
+ }
s->maps[0].name = "tap_rss_map_configurations";
s->maps[0].map = &obj->maps.tap_rss_map_configurations;
- s->maps[1].name = "tap_rss_map_indirection_table";
- s->maps[1].map = &obj->maps.tap_rss_map_indirection_table;
+ s->maps[1].name = "tap_rss_map_toeplitz_key";
+ s->maps[1].map = &obj->maps.tap_rss_map_toeplitz_key;
- s->maps[2].name = "tap_rss_map_toeplitz_key";
- s->maps[2].map = &obj->maps.tap_rss_map_toeplitz_key;
+ s->maps[2].name = "tap_rss_map_indirection_table";
+ s->maps[2].map = &obj->maps.tap_rss_map_indirection_table;
/* programs */
s->prog_cnt = 1;
s->prog_skel_sz = sizeof(*s->progs);
s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);
- if (!s->progs)
+ if (!s->progs) {
+ err = -ENOMEM;
goto err;
+ }
s->progs[0].name = "tun_rss_steering_prog";
s->progs[0].prog = &obj->progs.tun_rss_steering_prog;
s->progs[0].link = &obj->links.tun_rss_steering_prog;
- s->data_sz = 8088;
- s->data = (void *)"\
-\x7f\x45\x4c\x46\x02\x01\x01\0\0\0\0\0\0\0\0\0\x01\0\xf7\0\x01\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\x18\x1d\0\0\0\0\0\0\0\0\0\0\x40\0\0\0\0\0\x40\0\x0a\0\
-\x01\0\xbf\x18\0\0\0\0\0\0\xb7\x01\0\0\0\0\0\0\x63\x1a\x4c\xff\0\0\0\0\xbf\xa7\
-\0\0\0\0\0\0\x07\x07\0\0\x4c\xff\xff\xff\x18\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\xbf\x72\0\0\0\0\0\0\x85\0\0\0\x01\0\0\0\xbf\x06\0\0\0\0\0\0\x18\x01\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\xbf\x72\0\0\0\0\0\0\x85\0\0\0\x01\0\0\0\xbf\x07\0\0\0\0\0\0\
-\x18\0\0\0\xff\xff\xff\xff\0\0\0\0\0\0\0\0\x15\x06\x66\x02\0\0\0\0\xbf\x79\0\0\
-\0\0\0\0\x15\x09\x64\x02\0\0\0\0\x71\x61\0\0\0\0\0\0\x55\x01\x01\0\0\0\0\0\x05\
-\0\x5d\x02\0\0\0\0\xb7\x01\0\0\0\0\0\0\x63\x1a\xc0\xff\0\0\0\0\x7b\x1a\xb8\xff\
-\0\0\0\0\x7b\x1a\xb0\xff\0\0\0\0\x7b\x1a\xa8\xff\0\0\0\0\x7b\x1a\xa0\xff\0\0\0\
-\0\x63\x1a\x98\xff\0\0\0\0\x7b\x1a\x90\xff\0\0\0\0\x7b\x1a\x88\xff\0\0\0\0\x7b\
-\x1a\x80\xff\0\0\0\0\x7b\x1a\x78\xff\0\0\0\0\x7b\x1a\x70\xff\0\0\0\0\x7b\x1a\
-\x68\xff\0\0\0\0\x7b\x1a\x60\xff\0\0\0\0\x7b\x1a\x58\xff\0\0\0\0\x7b\x1a\x50\
-\xff\0\0\0\0\x15\x08\x4c\x02\0\0\0\0\x6b\x1a\xd0\xff\0\0\0\0\xbf\xa3\0\0\0\0\0\
-\0\x07\x03\0\0\xd0\xff\xff\xff\xbf\x81\0\0\0\0\0\0\xb7\x02\0\0\x0c\0\0\0\xb7\
-\x04\0\0\x02\0\0\0\xb7\x05\0\0\0\0\0\0\x85\0\0\0\x44\0\0\0\x67\0\0\0\x20\0\0\0\
-\x77\0\0\0\x20\0\0\0\x55\0\x11\0\0\0\0\0\xb7\x02\0\0\x10\0\0\0\x69\xa1\xd0\xff\
-\0\0\0\0\xbf\x13\0\0\0\0\0\0\xdc\x03\0\0\x10\0\0\0\x15\x03\x02\0\0\x81\0\0\x55\
-\x03\x0c\0\xa8\x88\0\0\xb7\x02\0\0\x14\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\
-\xd0\xff\xff\xff\xbf\x81\0\0\0\0\0\0\xb7\x04\0\0\x02\0\0\0\xb7\x05\0\0\0\0\0\0\
-\x85\0\0\0\x44\0\0\0\x69\xa1\xd0\xff\0\0\0\0\x67\0\0\0\x20\0\0\0\x77\0\0\0\x20\
-\0\0\0\x15\0\x01\0\0\0\0\0\x05\0\x2f\x02\0\0\0\0\x15\x01\x2e\x02\0\0\0\0\x7b\
-\x9a\x30\xff\0\0\0\0\x15\x01\x57\0\x86\xdd\0\0\x55\x01\x3b\0\x08\0\0\0\x7b\x7a\
-\x20\xff\0\0\0\0\xb7\x07\0\0\x01\0\0\0\x73\x7a\x50\xff\0\0\0\0\xb7\x01\0\0\0\0\
-\0\0\x63\x1a\xe0\xff\0\0\0\0\x7b\x1a\xd8\xff\0\0\0\0\x7b\x1a\xd0\xff\0\0\0\0\
-\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\xd0\xff\xff\xff\xbf\x81\0\0\0\0\0\0\xb7\x02\0\
-\0\0\0\0\0\xb7\x04\0\0\x14\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\0\x67\
-\0\0\0\x20\0\0\0\x77\0\0\0\x20\0\0\0\x55\0\x1a\x02\0\0\0\0\x69\xa1\xd6\xff\0\0\
-\0\0\x55\x01\x01\0\0\0\0\0\xb7\x07\0\0\0\0\0\0\x61\xa1\xdc\xff\0\0\0\0\x63\x1a\
-\x5c\xff\0\0\0\0\x61\xa1\xe0\xff\0\0\0\0\x63\x1a\x60\xff\0\0\0\0\x73\x7a\x56\
-\xff\0\0\0\0\x71\xa9\xd9\xff\0\0\0\0\x71\xa1\xd0\xff\0\0\0\0\x67\x01\0\0\x02\0\
-\0\0\x57\x01\0\0\x3c\0\0\0\x7b\x1a\x40\xff\0\0\0\0\x79\xa7\x20\xff\0\0\0\0\xbf\
-\x91\0\0\0\0\0\0\x57\x01\0\0\xff\0\0\0\x15\x01\x19\0\0\0\0\0\x71\xa1\x56\xff\0\
-\0\0\0\x55\x01\x17\0\0\0\0\0\x57\x09\0\0\xff\0\0\0\x15\x09\x7a\x01\x11\0\0\0\
-\x55\x09\x14\0\x06\0\0\0\xb7\x01\0\0\x01\0\0\0\x73\x1a\x53\xff\0\0\0\0\xb7\x01\
-\0\0\0\0\0\0\x63\x1a\xe0\xff\0\0\0\0\x7b\x1a\xd8\xff\0\0\0\0\x7b\x1a\xd0\xff\0\
-\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\xd0\xff\xff\xff\xbf\x81\0\0\0\0\0\0\x79\
-\xa2\x40\xff\0\0\0\0\xb7\x04\0\0\x14\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\
-\0\0\0\x67\0\0\0\x20\0\0\0\x77\0\0\0\x20\0\0\0\x55\0\xf4\x01\0\0\0\0\x69\xa1\
-\xd0\xff\0\0\0\0\x6b\x1a\x58\xff\0\0\0\0\x69\xa1\xd2\xff\0\0\0\0\x6b\x1a\x5a\
-\xff\0\0\0\0\x71\xa1\x50\xff\0\0\0\0\x15\x01\xd4\0\0\0\0\0\x71\x62\x03\0\0\0\0\
-\0\x67\x02\0\0\x08\0\0\0\x71\x61\x02\0\0\0\0\0\x4f\x12\0\0\0\0\0\0\x71\x63\x04\
-\0\0\0\0\0\x71\x61\x05\0\0\0\0\0\x67\x01\0\0\x08\0\0\0\x4f\x31\0\0\0\0\0\0\x67\
-\x01\0\0\x10\0\0\0\x4f\x21\0\0\0\0\0\0\x71\xa2\x53\xff\0\0\0\0\x79\xa0\x30\xff\
-\0\0\0\0\x15\x02\x06\x01\0\0\0\0\xbf\x12\0\0\0\0\0\0\x57\x02\0\0\x02\0\0\0\x15\
-\x02\x03\x01\0\0\0\0\x61\xa1\x5c\xff\0\0\0\0\x63\x1a\xa0\xff\0\0\0\0\x61\xa1\
-\x60\xff\0\0\0\0\x63\x1a\xa4\xff\0\0\0\0\x69\xa1\x58\xff\0\0\0\0\x6b\x1a\xa8\
-\xff\0\0\0\0\x69\xa1\x5a\xff\0\0\0\0\x6b\x1a\xaa\xff\0\0\0\0\x05\0\x65\x01\0\0\
-\0\0\xb7\x01\0\0\x01\0\0\0\x73\x1a\x51\xff\0\0\0\0\xb7\x01\0\0\0\0\0\0\x7b\x1a\
-\xf0\xff\0\0\0\0\x7b\x1a\xe8\xff\0\0\0\0\x7b\x1a\xe0\xff\0\0\0\0\x7b\x1a\xd8\
-\xff\0\0\0\0\x7b\x1a\xd0\xff\0\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\xd0\xff\
-\xff\xff\xb7\x01\0\0\x28\0\0\0\x7b\x1a\x40\xff\0\0\0\0\xbf\x81\0\0\0\0\0\0\xb7\
-\x02\0\0\0\0\0\0\xb7\x04\0\0\x28\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\
-\0\x67\0\0\0\x20\0\0\0\x77\0\0\0\x20\0\0\0\x55\0\x10\x01\0\0\0\0\x79\xa1\xe0\
-\xff\0\0\0\0\x63\x1a\x64\xff\0\0\0\0\x77\x01\0\0\x20\0\0\0\x63\x1a\x68\xff\0\0\
-\0\0\x79\xa1\xd8\xff\0\0\0\0\x63\x1a\x5c\xff\0\0\0\0\x77\x01\0\0\x20\0\0\0\x63\
-\x1a\x60\xff\0\0\0\0\x79\xa1\xe8\xff\0\0\0\0\x63\x1a\x6c\xff\0\0\0\0\x77\x01\0\
-\0\x20\0\0\0\x63\x1a\x70\xff\0\0\0\0\x79\xa1\xf0\xff\0\0\0\0\x63\x1a\x74\xff\0\
-\0\0\0\x77\x01\0\0\x20\0\0\0\x63\x1a\x78\xff\0\0\0\0\x71\xa9\xd6\xff\0\0\0\0\
-\x25\x09\xff\0\x3c\0\0\0\xb7\x01\0\0\x01\0\0\0\x6f\x91\0\0\0\0\0\0\x18\x02\0\0\
-\x01\0\0\0\0\0\0\0\0\x18\0\x1c\x5f\x21\0\0\0\0\0\0\x55\x01\x01\0\0\0\0\0\x05\0\
-\xf8\0\0\0\0\0\xb7\x01\0\0\0\0\0\0\x6b\x1a\xfe\xff\0\0\0\0\xb7\x01\0\0\x28\0\0\
-\0\x7b\x1a\x40\xff\0\0\0\0\xbf\xa1\0\0\0\0\0\0\x07\x01\0\0\x8c\xff\xff\xff\x7b\
-\x1a\x18\xff\0\0\0\0\xbf\xa1\0\0\0\0\0\0\x07\x01\0\0\x7c\xff\xff\xff\x7b\x1a\
-\x10\xff\0\0\0\0\xb7\x01\0\0\0\0\0\0\x7b\x1a\x28\xff\0\0\0\0\x7b\x7a\x20\xff\0\
-\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\xfe\xff\xff\xff\xbf\x81\0\0\0\0\0\0\x79\
-\xa2\x40\xff\0\0\0\0\xb7\x04\0\0\x02\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\
-\0\0\0\x67\0\0\0\x20\0\0\0\x77\0\0\0\x20\0\0\0\x15\0\x01\0\0\0\0\0\x05\0\x90\
-\x01\0\0\0\0\xbf\x91\0\0\0\0\0\0\x15\x01\x23\0\x3c\0\0\0\x15\x01\x59\0\x2c\0\0\
-\0\x55\x01\x5a\0\x2b\0\0\0\xb7\x01\0\0\0\0\0\0\x63\x1a\xf8\xff\0\0\0\0\xbf\xa3\
-\0\0\0\0\0\0\x07\x03\0\0\xf8\xff\xff\xff\xbf\x81\0\0\0\0\0\0\x79\xa2\x40\xff\0\
-\0\0\0\xb7\x04\0\0\x04\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\0\xbf\x01\
-\0\0\0\0\0\0\x67\x01\0\0\x20\0\0\0\x77\x01\0\0\x20\0\0\0\x55\x01\x03\x01\0\0\0\
-\0\x71\xa1\xfa\xff\0\0\0\0\x55\x01\x4b\0\x02\0\0\0\x71\xa1\xf9\xff\0\0\0\0\x55\
-\x01\x49\0\x02\0\0\0\x71\xa1\xfb\xff\0\0\0\0\x55\x01\x47\0\x01\0\0\0\x79\xa2\
-\x40\xff\0\0\0\0\x07\x02\0\0\x08\0\0\0\xbf\x81\0\0\0\0\0\0\x79\xa3\x18\xff\0\0\
-\0\0\xb7\x04\0\0\x10\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\0\xbf\x01\0\
-\0\0\0\0\0\x67\x01\0\0\x20\0\0\0\x77\x01\0\0\x20\0\0\0\x55\x01\xf2\0\0\0\0\0\
-\xb7\x01\0\0\x01\0\0\0\x73\x1a\x55\xff\0\0\0\0\x05\0\x39\0\0\0\0\0\xb7\x01\0\0\
-\0\0\0\0\x6b\x1a\xf8\xff\0\0\0\0\xb7\x09\0\0\x02\0\0\0\xb7\x07\0\0\x1e\0\0\0\
-\x05\0\x0e\0\0\0\0\0\x79\xa2\x38\xff\0\0\0\0\x0f\x29\0\0\0\0\0\0\xbf\x92\0\0\0\
-\0\0\0\x07\x02\0\0\x01\0\0\0\x71\xa3\xff\xff\0\0\0\0\x67\x03\0\0\x03\0\0\0\x2d\
-\x23\x02\0\0\0\0\0\x79\xa7\x20\xff\0\0\0\0\x05\0\x2b\0\0\0\0\0\x07\x07\0\0\xff\
-\xff\xff\xff\xbf\x72\0\0\0\0\0\0\x67\x02\0\0\x20\0\0\0\x77\x02\0\0\x20\0\0\0\
-\x15\x02\xf9\xff\0\0\0\0\x7b\x9a\x38\xff\0\0\0\0\x79\xa1\x40\xff\0\0\0\0\x0f\
-\x19\0\0\0\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\xf8\xff\xff\xff\xbf\x81\0\0\0\
-\0\0\0\xbf\x92\0\0\0\0\0\0\xb7\x04\0\0\x02\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\
-\0\x44\0\0\0\xbf\x01\0\0\0\0\0\0\x67\x01\0\0\x20\0\0\0\x77\x01\0\0\x20\0\0\0\
-\x55\x01\x94\0\0\0\0\0\x71\xa2\xf8\xff\0\0\0\0\x55\x02\x0f\0\xc9\0\0\0\x07\x09\
-\0\0\x02\0\0\0\xbf\x81\0\0\0\0\0\0\xbf\x92\0\0\0\0\0\0\x79\xa3\x10\xff\0\0\0\0\
-\xb7\x04\0\0\x10\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\0\xbf\x01\0\0\0\
-\0\0\0\x67\x01\0\0\x20\0\0\0\x77\x01\0\0\x20\0\0\0\x55\x01\x87\0\0\0\0\0\xb7\
-\x01\0\0\x01\0\0\0\x73\x1a\x54\xff\0\0\0\0\x79\xa7\x20\xff\0\0\0\0\x05\0\x07\0\
-\0\0\0\0\xb7\x09\0\0\x01\0\0\0\x15\x02\xd1\xff\0\0\0\0\x71\xa9\xf9\xff\0\0\0\0\
-\x07\x09\0\0\x02\0\0\0\x05\0\xce\xff\0\0\0\0\xb7\x01\0\0\x01\0\0\0\x73\x1a\x56\
-\xff\0\0\0\0\x71\xa1\xff\xff\0\0\0\0\x67\x01\0\0\x03\0\0\0\x79\xa2\x40\xff\0\0\
-\0\0\x0f\x12\0\0\0\0\0\0\x07\x02\0\0\x08\0\0\0\x7b\x2a\x40\xff\0\0\0\0\x71\xa9\
-\xfe\xff\0\0\0\0\x25\x09\x0e\0\x3c\0\0\0\xb7\x01\0\0\x01\0\0\0\x6f\x91\0\0\0\0\
-\0\0\x18\x02\0\0\x01\0\0\0\0\0\0\0\0\x18\0\x1c\x5f\x21\0\0\0\0\0\0\x55\x01\x01\
-\0\0\0\0\0\x05\0\x07\0\0\0\0\0\x79\xa1\x28\xff\0\0\0\0\x07\x01\0\0\x01\0\0\0\
-\x7b\x1a\x28\xff\0\0\0\0\x67\x01\0\0\x20\0\0\0\x77\x01\0\0\x20\0\0\0\x55\x01\
-\x82\xff\x0b\0\0\0\x05\0\x10\xff\0\0\0\0\x15\x09\xf8\xff\x87\0\0\0\x05\0\xfd\
-\xff\0\0\0\0\x71\xa1\x51\xff\0\0\0\0\x79\xa0\x30\xff\0\0\0\0\x15\x01\x17\x01\0\
-\0\0\0\x71\x62\x03\0\0\0\0\0\x67\x02\0\0\x08\0\0\0\x71\x61\x02\0\0\0\0\0\x4f\
-\x12\0\0\0\0\0\0\x71\x63\x04\0\0\0\0\0\x71\x61\x05\0\0\0\0\0\x67\x01\0\0\x08\0\
-\0\0\x4f\x31\0\0\0\0\0\0\x67\x01\0\0\x10\0\0\0\x4f\x21\0\0\0\0\0\0\x71\xa2\x53\
-\xff\0\0\0\0\x15\x02\x3d\0\0\0\0\0\xbf\x12\0\0\0\0\0\0\x57\x02\0\0\x10\0\0\0\
-\x15\x02\x3a\0\0\0\0\0\xbf\xa2\0\0\0\0\0\0\x07\x02\0\0\x5c\xff\xff\xff\x71\xa4\
-\x54\xff\0\0\0\0\xbf\x23\0\0\0\0\0\0\x15\x04\x02\0\0\0\0\0\xbf\xa3\0\0\0\0\0\0\
-\x07\x03\0\0\x7c\xff\xff\xff\x67\x01\0\0\x38\0\0\0\xc7\x01\0\0\x38\0\0\0\x65\
-\x01\x01\0\xff\xff\xff\xff\xbf\x32\0\0\0\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\
-\x6c\xff\xff\xff\x71\xa5\x55\xff\0\0\0\0\xbf\x34\0\0\0\0\0\0\x15\x05\x02\0\0\0\
-\0\0\xbf\xa4\0\0\0\0\0\0\x07\x04\0\0\x8c\xff\xff\xff\x65\x01\x01\0\xff\xff\xff\
-\xff\xbf\x43\0\0\0\0\0\0\x61\x21\x04\0\0\0\0\0\x67\x01\0\0\x20\0\0\0\x61\x24\0\
-\0\0\0\0\0\x4f\x41\0\0\0\0\0\0\x7b\x1a\xa0\xff\0\0\0\0\x61\x21\x08\0\0\0\0\0\
-\x61\x22\x0c\0\0\0\0\0\x67\x02\0\0\x20\0\0\0\x4f\x12\0\0\0\0\0\0\x7b\x2a\xa8\
-\xff\0\0\0\0\x61\x31\0\0\0\0\0\0\x61\x32\x04\0\0\0\0\0\x61\x34\x08\0\0\0\0\0\
-\x61\x33\x0c\0\0\0\0\0\x69\xa5\x5a\xff\0\0\0\0\x6b\x5a\xc2\xff\0\0\0\0\x69\xa5\
-\x58\xff\0\0\0\0\x6b\x5a\xc0\xff\0\0\0\0\x67\x03\0\0\x20\0\0\0\x4f\x43\0\0\0\0\
-\0\0\x7b\x3a\xb8\xff\0\0\0\0\x67\x02\0\0\x20\0\0\0\x4f\x12\0\0\0\0\0\0\x7b\x2a\
-\xb0\xff\0\0\0\0\x05\0\x6b\0\0\0\0\0\x71\xa2\x52\xff\0\0\0\0\x15\x02\x04\0\0\0\
-\0\0\xbf\x12\0\0\0\0\0\0\x57\x02\0\0\x04\0\0\0\x15\x02\x01\0\0\0\0\0\x05\0\xf7\
-\xfe\0\0\0\0\x57\x01\0\0\x01\0\0\0\x15\x01\xd3\0\0\0\0\0\x61\xa1\x5c\xff\0\0\0\
-\0\x63\x1a\xa0\xff\0\0\0\0\x61\xa1\x60\xff\0\0\0\0\x63\x1a\xa4\xff\0\0\0\0\x05\
-\0\x5e\0\0\0\0\0\x71\xa2\x52\xff\0\0\0\0\x15\x02\x1e\0\0\0\0\0\xbf\x12\0\0\0\0\
-\0\0\x57\x02\0\0\x20\0\0\0\x15\x02\x1b\0\0\0\0\0\xbf\xa2\0\0\0\0\0\0\x07\x02\0\
-\0\x5c\xff\xff\xff\x71\xa4\x54\xff\0\0\0\0\xbf\x23\0\0\0\0\0\0\x15\x04\x02\0\0\
-\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\x7c\xff\xff\xff\x57\x01\0\0\0\x01\0\0\
-\x15\x01\x01\0\0\0\0\0\xbf\x32\0\0\0\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\x6c\
-\xff\xff\xff\x71\xa5\x55\xff\0\0\0\0\xbf\x34\0\0\0\0\0\0\x15\x05\x02\0\0\0\0\0\
-\xbf\xa4\0\0\0\0\0\0\x07\x04\0\0\x8c\xff\xff\xff\x15\x01\xc3\xff\0\0\0\0\x05\0\
-\xc1\xff\0\0\0\0\xb7\x09\0\0\x3c\0\0\0\x79\xa7\x20\xff\0\0\0\0\x67\0\0\0\x20\0\
-\0\0\x77\0\0\0\x20\0\0\0\x15\0\xa5\xfe\0\0\0\0\x05\0\xb0\0\0\0\0\0\x15\x09\x07\
-\xff\x87\0\0\0\x05\0\xa2\xfe\0\0\0\0\xbf\x12\0\0\0\0\0\0\x57\x02\0\0\x08\0\0\0\
-\x15\x02\xab\0\0\0\0\0\xbf\xa2\0\0\0\0\0\0\x07\x02\0\0\x5c\xff\xff\xff\x71\xa4\
-\x54\xff\0\0\0\0\xbf\x23\0\0\0\0\0\0\x15\x04\x02\0\0\0\0\0\xbf\xa3\0\0\0\0\0\0\
-\x07\x03\0\0\x7c\xff\xff\xff\x57\x01\0\0\x40\0\0\0\x15\x01\x01\0\0\0\0\0\xbf\
-\x32\0\0\0\0\0\0\x61\x23\x04\0\0\0\0\0\x67\x03\0\0\x20\0\0\0\x61\x24\0\0\0\0\0\
-\0\x4f\x43\0\0\0\0\0\0\x7b\x3a\xa0\xff\0\0\0\0\x61\x23\x08\0\0\0\0\0\x61\x22\
-\x0c\0\0\0\0\0\x67\x02\0\0\x20\0\0\0\x4f\x32\0\0\0\0\0\0\x7b\x2a\xa8\xff\0\0\0\
-\0\x15\x01\x1c\0\0\0\0\0\x71\xa1\x55\xff\0\0\0\0\x15\x01\x1a\0\0\0\0\0\x61\xa1\
-\x98\xff\0\0\0\0\x67\x01\0\0\x20\0\0\0\x61\xa2\x94\xff\0\0\0\0\x4f\x21\0\0\0\0\
-\0\0\x7b\x1a\xb8\xff\0\0\0\0\x61\xa1\x90\xff\0\0\0\0\x67\x01\0\0\x20\0\0\0\x61\
-\xa2\x8c\xff\0\0\0\0\x05\0\x19\0\0\0\0\0\xb7\x01\0\0\x01\0\0\0\x73\x1a\x52\xff\
-\0\0\0\0\xb7\x01\0\0\0\0\0\0\x7b\x1a\xd0\xff\0\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\
-\x03\0\0\xd0\xff\xff\xff\xbf\x81\0\0\0\0\0\0\x79\xa2\x40\xff\0\0\0\0\xb7\x04\0\
-\0\x08\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\0\x67\0\0\0\x20\0\0\0\x77\
-\0\0\0\x20\0\0\0\x55\0\x7d\0\0\0\0\0\x05\0\x88\xfe\0\0\0\0\xb7\x09\0\0\x2b\0\0\
-\0\x05\0\xc6\xff\0\0\0\0\x61\xa1\x78\xff\0\0\0\0\x67\x01\0\0\x20\0\0\0\x61\xa2\
-\x74\xff\0\0\0\0\x4f\x21\0\0\0\0\0\0\x7b\x1a\xb8\xff\0\0\0\0\x61\xa1\x70\xff\0\
-\0\0\0\x67\x01\0\0\x20\0\0\0\x61\xa2\x6c\xff\0\0\0\0\x4f\x21\0\0\0\0\0\0\x7b\
-\x1a\xb0\xff\0\0\0\0\xb7\x01\0\0\0\0\0\0\x07\x07\0\0\x04\0\0\0\x61\x03\0\0\0\0\
-\0\0\xb7\x05\0\0\0\0\0\0\x05\0\x4e\0\0\0\0\0\xaf\x52\0\0\0\0\0\0\xbf\x75\0\0\0\
-\0\0\0\x0f\x15\0\0\0\0\0\0\x71\x55\0\0\0\0\0\0\x67\x03\0\0\x01\0\0\0\xbf\x50\0\
-\0\0\0\0\0\x77\0\0\0\x07\0\0\0\x4f\x03\0\0\0\0\0\0\xbf\x40\0\0\0\0\0\0\x67\0\0\
-\0\x39\0\0\0\xc7\0\0\0\x3f\0\0\0\x5f\x30\0\0\0\0\0\0\xaf\x02\0\0\0\0\0\0\xbf\
-\x50\0\0\0\0\0\0\x77\0\0\0\x06\0\0\0\x57\0\0\0\x01\0\0\0\x67\x03\0\0\x01\0\0\0\
-\x4f\x03\0\0\0\0\0\0\xbf\x40\0\0\0\0\0\0\x67\0\0\0\x3a\0\0\0\xc7\0\0\0\x3f\0\0\
-\0\x5f\x30\0\0\0\0\0\0\xaf\x02\0\0\0\0\0\0\x67\x03\0\0\x01\0\0\0\xbf\x50\0\0\0\
-\0\0\0\x77\0\0\0\x05\0\0\0\x57\0\0\0\x01\0\0\0\x4f\x03\0\0\0\0\0\0\xbf\x40\0\0\
-\0\0\0\0\x67\0\0\0\x3b\0\0\0\xc7\0\0\0\x3f\0\0\0\x5f\x30\0\0\0\0\0\0\xaf\x02\0\
-\0\0\0\0\0\x67\x03\0\0\x01\0\0\0\xbf\x50\0\0\0\0\0\0\x77\0\0\0\x04\0\0\0\x57\0\
-\0\0\x01\0\0\0\x4f\x03\0\0\0\0\0\0\xbf\x40\0\0\0\0\0\0\x67\0\0\0\x3c\0\0\0\xc7\
-\0\0\0\x3f\0\0\0\x5f\x30\0\0\0\0\0\0\xaf\x02\0\0\0\0\0\0\xbf\x50\0\0\0\0\0\0\
-\x77\0\0\0\x03\0\0\0\x57\0\0\0\x01\0\0\0\x67\x03\0\0\x01\0\0\0\x4f\x03\0\0\0\0\
-\0\0\xbf\x40\0\0\0\0\0\0\x67\0\0\0\x3d\0\0\0\xc7\0\0\0\x3f\0\0\0\x5f\x30\0\0\0\
-\0\0\0\xaf\x02\0\0\0\0\0\0\xbf\x50\0\0\0\0\0\0\x77\0\0\0\x02\0\0\0\x57\0\0\0\
-\x01\0\0\0\x67\x03\0\0\x01\0\0\0\x4f\x03\0\0\0\0\0\0\xbf\x40\0\0\0\0\0\0\x67\0\
-\0\0\x3e\0\0\0\xc7\0\0\0\x3f\0\0\0\x5f\x30\0\0\0\0\0\0\xaf\x02\0\0\0\0\0\0\xbf\
-\x50\0\0\0\0\0\0\x77\0\0\0\x01\0\0\0\x57\0\0\0\x01\0\0\0\x67\x03\0\0\x01\0\0\0\
-\x4f\x03\0\0\0\0\0\0\x57\x04\0\0\x01\0\0\0\x87\x04\0\0\0\0\0\0\x5f\x34\0\0\0\0\
-\0\0\xaf\x42\0\0\0\0\0\0\x57\x05\0\0\x01\0\0\0\x67\x03\0\0\x01\0\0\0\x4f\x53\0\
-\0\0\0\0\0\x07\x01\0\0\x01\0\0\0\xbf\x25\0\0\0\0\0\0\x15\x01\x0b\0\x24\0\0\0\
-\xbf\xa2\0\0\0\0\0\0\x07\x02\0\0\xa0\xff\xff\xff\x0f\x12\0\0\0\0\0\0\x71\x24\0\
-\0\0\0\0\0\xbf\x40\0\0\0\0\0\0\x67\0\0\0\x38\0\0\0\xc7\0\0\0\x38\0\0\0\xb7\x02\
-\0\0\0\0\0\0\x65\0\xa9\xff\xff\xff\xff\xff\xbf\x32\0\0\0\0\0\0\x05\0\xa7\xff\0\
-\0\0\0\xbf\x21\0\0\0\0\0\0\x67\x01\0\0\x20\0\0\0\x77\x01\0\0\x20\0\0\0\x15\x01\
-\x0e\0\0\0\0\0\x71\x63\x06\0\0\0\0\0\x71\x64\x07\0\0\0\0\0\x67\x04\0\0\x08\0\0\
-\0\x4f\x34\0\0\0\0\0\0\x3f\x41\0\0\0\0\0\0\x2f\x41\0\0\0\0\0\0\x1f\x12\0\0\0\0\
-\0\0\x63\x2a\x50\xff\0\0\0\0\xbf\xa2\0\0\0\0\0\0\x07\x02\0\0\x50\xff\xff\xff\
-\x18\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x85\0\0\0\x01\0\0\0\x55\0\x05\0\0\0\0\0\
-\x71\x61\x08\0\0\0\0\0\x71\x60\x09\0\0\0\0\0\x67\0\0\0\x08\0\0\0\x4f\x10\0\0\0\
-\0\0\0\x95\0\0\0\0\0\0\0\x69\0\0\0\0\0\0\0\x05\0\xfd\xff\0\0\0\0\x02\0\0\0\x04\
-\0\0\0\x0a\0\0\0\x01\0\0\0\0\0\0\0\x02\0\0\0\x04\0\0\0\x28\0\0\0\x01\0\0\0\0\0\
-\0\0\x02\0\0\0\x04\0\0\0\x02\0\0\0\x80\0\0\0\0\0\0\0\x47\x50\x4c\x20\x76\x32\0\
-\0\0\0\0\0\x10\0\0\0\0\0\0\0\x01\x7a\x52\0\x08\x7c\x0b\x01\x0c\0\0\0\x18\0\0\0\
-\x18\0\0\0\0\0\0\0\0\0\0\0\xd8\x13\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\xa0\0\0\0\x04\0\xf1\xff\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\x60\x02\0\0\0\0\x03\0\x20\x02\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x3f\x02\0\0\0\0\
-\x03\0\xd0\x0f\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xed\x01\0\0\0\0\x03\0\x10\x10\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\xd4\x01\0\0\0\0\x03\0\x20\x10\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\xa3\x01\0\0\0\0\x03\0\xb8\x12\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x63\x01\0\0\0\0\
-\x03\0\x48\x10\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x2a\x01\0\0\0\0\x03\0\x10\x13\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\xe1\0\0\0\0\0\x03\0\xa0\x13\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\x2e\x02\0\0\0\0\x03\0\x28\x02\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x68\x02\0\0\0\0\x03\
-\0\xc0\x13\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x36\x02\0\0\0\0\x03\0\xc8\x13\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\x22\x01\0\0\0\0\x03\0\xe8\x02\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\x02\x01\0\0\0\0\x03\0\x40\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xd9\0\0\0\0\0\x03\0\
-\xf8\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x26\x02\0\0\0\0\x03\0\x20\x0e\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\xcc\x01\0\0\0\0\x03\0\x60\x06\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x9b\
-\x01\0\0\0\0\x03\0\xc8\x06\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x5b\x01\0\0\0\0\x03\0\
-\x20\x07\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x7c\x01\0\0\0\0\x03\0\x48\x08\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\x53\x01\0\0\0\0\x03\0\xb8\x08\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x1a\
-\x01\0\0\0\0\x03\0\xe0\x08\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x84\x01\0\0\0\0\x03\0\
-\xb8\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x1e\x02\0\0\0\0\x03\0\xd8\x09\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\xc4\x01\0\0\0\0\x03\0\x70\x08\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x93\
-\x01\0\0\0\0\x03\0\xa8\x08\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x74\x01\0\0\0\0\x03\0\
-\xf0\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x4b\x01\0\0\0\0\x03\0\0\x0a\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\x12\x01\0\0\0\0\x03\0\x10\x0a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xfa\0\
-\0\0\0\0\x03\0\xc0\x0a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x58\x02\0\0\0\0\x03\0\x88\
-\x0a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x16\x02\0\0\0\0\x03\0\xb8\x0a\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\xe5\x01\0\0\0\0\x03\0\xc0\x0f\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xbc\x01\
-\0\0\0\0\x03\0\0\x0e\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x8b\x01\0\0\0\0\x03\0\x18\x0e\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xd1\0\0\0\0\0\x03\0\0\x04\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\x50\x02\0\0\0\0\x03\0\x20\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x0e\x02\0\0\0\0\
-\x03\0\x48\x0f\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x6c\x01\0\0\0\0\x03\0\xb0\x04\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\x43\x01\0\0\0\0\x03\0\xc8\x0c\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\xc9\0\0\0\0\0\x03\0\xf8\x0c\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x06\x02\0\0\0\0\x03\
-\0\xd0\x0a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x3b\x01\0\0\0\0\x03\0\x98\x0b\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\xf2\0\0\0\0\0\x03\0\xb8\x0b\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x48\
-\x02\0\0\0\0\x03\0\xf0\x0b\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xfe\x01\0\0\0\0\x03\0\
-\xf8\x0b\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xdd\x01\0\0\0\0\x03\0\0\x0c\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\xb4\x01\0\0\0\0\x03\0\x30\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x0a\
-\x01\0\0\0\0\x03\0\x90\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xc1\0\0\0\0\0\x03\0\xa8\
-\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xba\0\0\0\0\0\x03\0\xd0\x01\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\xf6\x01\0\0\0\0\x03\0\xe0\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xac\x01\0\
-\0\0\0\x03\0\x30\x0e\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x33\x01\0\0\0\0\x03\0\x80\x0e\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xea\0\0\0\0\0\x03\0\x98\x0e\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\x03\0\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x6b\0\0\0\x11\0\x06\
-\0\0\0\0\0\0\0\0\0\x07\0\0\0\0\0\0\0\x25\0\0\0\x11\0\x05\0\0\0\0\0\0\0\0\0\x14\
-\0\0\0\0\0\0\0\x82\0\0\0\x11\0\x05\0\x28\0\0\0\0\0\0\0\x14\0\0\0\0\0\0\0\x01\0\
-\0\0\x11\0\x05\0\x14\0\0\0\0\0\0\0\x14\0\0\0\0\0\0\0\x40\0\0\0\x12\0\x03\0\0\0\
-\0\0\0\0\0\0\xd8\x13\0\0\0\0\0\0\x28\0\0\0\0\0\0\0\x01\0\0\0\x3a\0\0\0\x50\0\0\
-\0\0\0\0\0\x01\0\0\0\x3c\0\0\0\x80\x13\0\0\0\0\0\0\x01\0\0\0\x3b\0\0\0\x1c\0\0\
-\0\0\0\0\0\x01\0\0\0\x38\0\0\0\0\x74\x61\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\
-\x5f\x74\x6f\x65\x70\x6c\x69\x74\x7a\x5f\x6b\x65\x79\0\x2e\x74\x65\x78\x74\0\
-\x6d\x61\x70\x73\0\x74\x61\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\x5f\x63\x6f\x6e\
-\x66\x69\x67\x75\x72\x61\x74\x69\x6f\x6e\x73\0\x74\x75\x6e\x5f\x72\x73\x73\x5f\
-\x73\x74\x65\x65\x72\x69\x6e\x67\x5f\x70\x72\x6f\x67\0\x2e\x72\x65\x6c\x74\x75\
-\x6e\x5f\x72\x73\x73\x5f\x73\x74\x65\x65\x72\x69\x6e\x67\0\x5f\x6c\x69\x63\x65\
-\x6e\x73\x65\0\x2e\x72\x65\x6c\x2e\x65\x68\x5f\x66\x72\x61\x6d\x65\0\x74\x61\
-\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\x5f\x69\x6e\x64\x69\x72\x65\x63\x74\x69\
-\x6f\x6e\x5f\x74\x61\x62\x6c\x65\0\x72\x73\x73\x2e\x62\x70\x66\x2e\x63\0\x2e\
-\x73\x74\x72\x74\x61\x62\0\x2e\x73\x79\x6d\x74\x61\x62\0\x4c\x42\x42\x30\x5f\
-\x39\0\x4c\x42\x42\x30\x5f\x38\x39\0\x4c\x42\x42\x30\x5f\x36\x39\0\x4c\x42\x42\
-\x30\x5f\x35\x39\0\x4c\x42\x42\x30\x5f\x31\x39\0\x4c\x42\x42\x30\x5f\x31\x30\
-\x39\0\x4c\x42\x42\x30\x5f\x39\x38\0\x4c\x42\x42\x30\x5f\x37\x38\0\x4c\x42\x42\
-\x30\x5f\x34\x38\0\x4c\x42\x42\x30\x5f\x31\x38\0\x4c\x42\x42\x30\x5f\x38\x37\0\
-\x4c\x42\x42\x30\x5f\x34\x37\0\x4c\x42\x42\x30\x5f\x33\x37\0\x4c\x42\x42\x30\
-\x5f\x31\x37\0\x4c\x42\x42\x30\x5f\x31\x30\x37\0\x4c\x42\x42\x30\x5f\x39\x36\0\
-\x4c\x42\x42\x30\x5f\x37\x36\0\x4c\x42\x42\x30\x5f\x36\x36\0\x4c\x42\x42\x30\
-\x5f\x34\x36\0\x4c\x42\x42\x30\x5f\x33\x36\0\x4c\x42\x42\x30\x5f\x32\x36\0\x4c\
-\x42\x42\x30\x5f\x31\x30\x36\0\x4c\x42\x42\x30\x5f\x36\x35\0\x4c\x42\x42\x30\
-\x5f\x34\x35\0\x4c\x42\x42\x30\x5f\x33\x35\0\x4c\x42\x42\x30\x5f\x34\0\x4c\x42\
-\x42\x30\x5f\x35\x34\0\x4c\x42\x42\x30\x5f\x34\x34\0\x4c\x42\x42\x30\x5f\x32\
-\x34\0\x4c\x42\x42\x30\x5f\x31\x30\x34\0\x4c\x42\x42\x30\x5f\x39\x33\0\x4c\x42\
-\x42\x30\x5f\x38\x33\0\x4c\x42\x42\x30\x5f\x35\x33\0\x4c\x42\x42\x30\x5f\x34\
-\x33\0\x4c\x42\x42\x30\x5f\x32\x33\0\x4c\x42\x42\x30\x5f\x31\x30\x33\0\x4c\x42\
-\x42\x30\x5f\x38\x32\0\x4c\x42\x42\x30\x5f\x35\x32\0\x4c\x42\x42\x30\x5f\x31\
-\x30\x32\0\x4c\x42\x42\x30\x5f\x39\x31\0\x4c\x42\x42\x30\x5f\x38\x31\0\x4c\x42\
-\x42\x30\x5f\x37\x31\0\x4c\x42\x42\x30\x5f\x36\x31\0\x4c\x42\x42\x30\x5f\x35\
-\x31\0\x4c\x42\x42\x30\x5f\x34\x31\0\x4c\x42\x42\x30\x5f\x32\x31\0\x4c\x42\x42\
-\x30\x5f\x31\x31\0\x4c\x42\x42\x30\x5f\x31\x31\x31\0\x4c\x42\x42\x30\x5f\x31\
-\x30\x31\0\x4c\x42\x42\x30\x5f\x38\x30\0\x4c\x42\x42\x30\x5f\x36\x30\0\x4c\x42\
-\x42\x30\x5f\x35\x30\0\x4c\x42\x42\x30\x5f\x31\x30\0\x4c\x42\x42\x30\x5f\x31\
-\x31\x30\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xaa\
-\0\0\0\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xa0\x1a\0\0\0\0\0\0\x71\x02\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x1a\0\0\0\x01\0\0\
-\0\x06\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x40\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x5a\0\0\0\x01\0\0\0\x06\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\x40\0\0\0\0\0\0\0\xd8\x13\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x08\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x56\0\0\0\x09\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\x60\x1a\0\0\0\0\0\0\x30\0\0\0\0\0\0\0\x09\0\0\0\x03\0\0\0\x08\0\0\0\0\0\0\0\
-\x10\0\0\0\0\0\0\0\x20\0\0\0\x01\0\0\0\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x18\
-\x14\0\0\0\0\0\0\x3c\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x04\0\0\0\0\0\0\0\0\0\0\0\0\
-\0\0\0\x6c\0\0\0\x01\0\0\0\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x54\x14\0\0\0\0\0\
-\0\x07\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x78\0\0\
-\0\x01\0\0\0\x02\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x60\x14\0\0\0\0\0\0\x30\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\x08\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x74\0\0\0\x09\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x90\x1a\0\0\0\0\0\0\x10\0\0\0\0\0\0\0\x09\0\0\0\
-\x07\0\0\0\x08\0\0\0\0\0\0\0\x10\0\0\0\0\0\0\0\xb2\0\0\0\x02\0\0\0\0\0\0\0\0\0\
-\0\0\0\0\0\0\0\0\0\0\x90\x14\0\0\0\0\0\0\xd0\x05\0\0\0\0\0\0\x01\0\0\0\x39\0\0\
-\0\x08\0\0\0\0\0\0\0\x18\0\0\0\0\0\0\0";
+ s->data = (void *)rss_bpf__elf_bytes(&s->data_sz);
+ obj->skeleton = s;
return 0;
err:
bpf_object__destroy_skeleton(s);
- return -1;
+ return err;
+}
+
+static inline const void *rss_bpf__elf_bytes(size_t *sz)
+{
+ *sz = 20440;
+ return (const void *)"\
+\x7f\x45\x4c\x46\x02\x01\x01\0\0\0\0\0\0\0\0\0\x01\0\xf7\0\x01\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\x98\x4c\0\0\0\0\0\0\0\0\0\0\x40\0\0\0\0\0\x40\0\x0d\0\
+\x01\0\xbf\x19\0\0\0\0\0\0\xb7\x01\0\0\0\0\0\0\x63\x1a\x54\xff\0\0\0\0\xbf\xa7\
+\0\0\0\0\0\0\x07\x07\0\0\x54\xff\xff\xff\x18\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\xbf\x72\0\0\0\0\0\0\x85\0\0\0\x01\0\0\0\xbf\x06\0\0\0\0\0\0\x18\x01\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\xbf\x72\0\0\0\0\0\0\x85\0\0\0\x01\0\0\0\xbf\x08\0\0\0\0\0\0\
+\x18\0\0\0\xff\xff\xff\xff\0\0\0\0\0\0\0\0\x15\x06\x67\x02\0\0\0\0\xbf\x87\0\0\
+\0\0\0\0\x15\x07\x65\x02\0\0\0\0\x71\x61\0\0\0\0\0\0\x55\x01\x01\0\0\0\0\0\x05\
+\0\x5e\x02\0\0\0\0\xb7\x01\0\0\0\0\0\0\x63\x1a\xc8\xff\0\0\0\0\x7b\x1a\xc0\xff\
+\0\0\0\0\x7b\x1a\xb8\xff\0\0\0\0\x7b\x1a\xb0\xff\0\0\0\0\x7b\x1a\xa8\xff\0\0\0\
+\0\x63\x1a\xa0\xff\0\0\0\0\x7b\x1a\x98\xff\0\0\0\0\x7b\x1a\x90\xff\0\0\0\0\x7b\
+\x1a\x88\xff\0\0\0\0\x7b\x1a\x80\xff\0\0\0\0\x7b\x1a\x78\xff\0\0\0\0\x7b\x1a\
+\x70\xff\0\0\0\0\x7b\x1a\x68\xff\0\0\0\0\x7b\x1a\x60\xff\0\0\0\0\x7b\x1a\x58\
+\xff\0\0\0\0\x15\x09\x4d\x02\0\0\0\0\x6b\x1a\xd0\xff\0\0\0\0\xbf\xa3\0\0\0\0\0\
+\0\x07\x03\0\0\xd0\xff\xff\xff\xbf\x91\0\0\0\0\0\0\xb7\x02\0\0\x0c\0\0\0\xb7\
+\x04\0\0\x02\0\0\0\xb7\x05\0\0\0\0\0\0\x85\0\0\0\x44\0\0\0\x67\0\0\0\x20\0\0\0\
+\x77\0\0\0\x20\0\0\0\x55\0\x42\x02\0\0\0\0\xb7\x02\0\0\x10\0\0\0\x69\xa1\xd0\
+\xff\0\0\0\0\xbf\x13\0\0\0\0\0\0\xdc\x03\0\0\x10\0\0\0\x15\x03\x02\0\0\x81\0\0\
+\x55\x03\x0b\0\xa8\x88\0\0\xb7\x02\0\0\x14\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\
+\0\xd0\xff\xff\xff\xbf\x91\0\0\0\0\0\0\xb7\x04\0\0\x02\0\0\0\xb7\x05\0\0\0\0\0\
+\0\x85\0\0\0\x44\0\0\0\x67\0\0\0\x20\0\0\0\x77\0\0\0\x20\0\0\0\x55\0\x32\x02\0\
+\0\0\0\x69\xa1\xd0\xff\0\0\0\0\x15\x01\x30\x02\0\0\0\0\x7b\x7a\x38\xff\0\0\0\0\
+\x7b\x9a\x40\xff\0\0\0\0\x15\x01\x55\0\x86\xdd\0\0\x55\x01\x39\0\x08\0\0\0\xb7\
+\x07\0\0\x01\0\0\0\x73\x7a\x58\xff\0\0\0\0\xb7\x01\0\0\0\0\0\0\x63\x1a\xe0\xff\
+\0\0\0\0\x7b\x1a\xd8\xff\0\0\0\0\x7b\x1a\xd0\xff\0\0\0\0\xbf\xa3\0\0\0\0\0\0\
+\x07\x03\0\0\xd0\xff\xff\xff\x79\xa1\x40\xff\0\0\0\0\xb7\x02\0\0\0\0\0\0\xb7\
+\x04\0\0\x14\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\0\x67\0\0\0\x20\0\0\
+\0\x77\0\0\0\x20\0\0\0\x55\0\x1c\x02\0\0\0\0\x69\xa1\xd6\xff\0\0\0\0\x55\x01\
+\x01\0\0\0\0\0\xb7\x07\0\0\0\0\0\0\x61\xa1\xdc\xff\0\0\0\0\x63\x1a\x64\xff\0\0\
+\0\0\x61\xa1\xe0\xff\0\0\0\0\x63\x1a\x68\xff\0\0\0\0\x71\xa9\xd9\xff\0\0\0\0\
+\x73\x7a\x5e\xff\0\0\0\0\x71\xa1\xd0\xff\0\0\0\0\x67\x01\0\0\x02\0\0\0\x57\x01\
+\0\0\x3c\0\0\0\x7b\x1a\x48\xff\0\0\0\0\xbf\x91\0\0\0\0\0\0\x57\x01\0\0\xff\0\0\
+\0\x15\x01\x19\0\0\0\0\0\x57\x07\0\0\xff\0\0\0\x55\x07\x17\0\0\0\0\0\x57\x09\0\
+\0\xff\0\0\0\x15\x09\x5a\x01\x11\0\0\0\x55\x09\x14\0\x06\0\0\0\xb7\x01\0\0\x01\
+\0\0\0\x73\x1a\x5b\xff\0\0\0\0\xb7\x01\0\0\0\0\0\0\x63\x1a\xe0\xff\0\0\0\0\x7b\
+\x1a\xd8\xff\0\0\0\0\x7b\x1a\xd0\xff\0\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\
+\xd0\xff\xff\xff\x79\xa1\x40\xff\0\0\0\0\x79\xa2\x48\xff\0\0\0\0\xb7\x04\0\0\
+\x14\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\0\x67\0\0\0\x20\0\0\0\x77\0\
+\0\0\x20\0\0\0\x55\0\xf7\x01\0\0\0\0\x69\xa1\xd0\xff\0\0\0\0\x6b\x1a\x60\xff\0\
+\0\0\0\x69\xa1\xd2\xff\0\0\0\0\x6b\x1a\x62\xff\0\0\0\0\x71\xa1\x58\xff\0\0\0\0\
+\x15\x01\xdb\0\0\0\0\0\x71\x62\x03\0\0\0\0\0\x67\x02\0\0\x08\0\0\0\x71\x61\x02\
+\0\0\0\0\0\x4f\x12\0\0\0\0\0\0\x71\x63\x04\0\0\0\0\0\x71\x61\x05\0\0\0\0\0\x67\
+\x01\0\0\x08\0\0\0\x4f\x31\0\0\0\0\0\0\x67\x01\0\0\x10\0\0\0\x4f\x21\0\0\0\0\0\
+\0\x71\xa2\x5b\xff\0\0\0\0\x79\xa0\x38\xff\0\0\0\0\x15\x02\x0c\x01\0\0\0\0\xbf\
+\x12\0\0\0\0\0\0\x57\x02\0\0\x02\0\0\0\x15\x02\x09\x01\0\0\0\0\x61\xa1\x64\xff\
+\0\0\0\0\x63\x1a\xa8\xff\0\0\0\0\x61\xa1\x68\xff\0\0\0\0\x63\x1a\xac\xff\0\0\0\
+\0\x69\xa1\x60\xff\0\0\0\0\x6b\x1a\xb0\xff\0\0\0\0\x69\xa1\x62\xff\0\0\0\0\x6b\
+\x1a\xb2\xff\0\0\0\0\x05\0\x6b\x01\0\0\0\0\xb7\x01\0\0\x01\0\0\0\x73\x1a\x59\
+\xff\0\0\0\0\xb7\x01\0\0\0\0\0\0\x7b\x1a\xf0\xff\0\0\0\0\x7b\x1a\xe8\xff\0\0\0\
+\0\x7b\x1a\xe0\xff\0\0\0\0\x7b\x1a\xd8\xff\0\0\0\0\x7b\x1a\xd0\xff\0\0\0\0\xbf\
+\xa3\0\0\0\0\0\0\x07\x03\0\0\xd0\xff\xff\xff\xb7\x01\0\0\x28\0\0\0\x7b\x1a\x48\
+\xff\0\0\0\0\xbf\x91\0\0\0\0\0\0\xb7\x02\0\0\0\0\0\0\xb7\x04\0\0\x28\0\0\0\xb7\
+\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\0\x67\0\0\0\x20\0\0\0\x77\0\0\0\x20\0\0\0\
+\x55\0\xfe\0\0\0\0\0\x79\xa1\xe0\xff\0\0\0\0\x63\x1a\x6c\xff\0\0\0\0\x77\x01\0\
+\0\x20\0\0\0\x63\x1a\x70\xff\0\0\0\0\x79\xa1\xd8\xff\0\0\0\0\x63\x1a\x64\xff\0\
+\0\0\0\x77\x01\0\0\x20\0\0\0\x63\x1a\x68\xff\0\0\0\0\x79\xa1\xe8\xff\0\0\0\0\
+\x63\x1a\x74\xff\0\0\0\0\x77\x01\0\0\x20\0\0\0\x63\x1a\x78\xff\0\0\0\0\x79\xa1\
+\xf0\xff\0\0\0\0\x63\x1a\x7c\xff\0\0\0\0\x77\x01\0\0\x20\0\0\0\x63\x1a\x80\xff\
+\0\0\0\0\x71\xa9\xd6\xff\0\0\0\0\x25\x09\x13\x01\x3c\0\0\0\xb7\x01\0\0\x01\0\0\
+\0\x6f\x91\0\0\0\0\0\0\x18\x02\0\0\x01\0\0\0\0\0\0\0\0\x18\0\x1c\x5f\x21\0\0\0\
+\0\0\0\x55\x01\x01\0\0\0\0\0\x05\0\x0c\x01\0\0\0\0\xb7\x01\0\0\0\0\0\0\x6b\x1a\
+\xfe\xff\0\0\0\0\xb7\x01\0\0\x28\0\0\0\x7b\x1a\x48\xff\0\0\0\0\xbf\xa1\0\0\0\0\
+\0\0\x07\x01\0\0\x94\xff\xff\xff\x7b\x1a\x20\xff\0\0\0\0\xbf\xa1\0\0\0\0\0\0\
+\x07\x01\0\0\x84\xff\xff\xff\x7b\x1a\x18\xff\0\0\0\0\x18\x07\0\0\x01\0\0\0\0\0\
+\0\0\0\x18\0\x1c\xb7\x02\0\0\0\0\0\0\x7b\x8a\x28\xff\0\0\0\0\x7b\x2a\x30\xff\0\
+\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\xfe\xff\xff\xff\x79\xa1\x40\xff\0\0\0\0\
+\x79\xa2\x48\xff\0\0\0\0\xb7\x04\0\0\x02\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\
+\x44\0\0\0\x67\0\0\0\x20\0\0\0\x77\0\0\0\x20\0\0\0\x15\0\x01\0\0\0\0\0\x05\0\
+\x91\x01\0\0\0\0\xbf\x91\0\0\0\0\0\0\x15\x01\x26\0\x3c\0\0\0\x15\x01\x5f\0\x2c\
+\0\0\0\x55\x01\x60\0\x2b\0\0\0\xb7\x01\0\0\0\0\0\0\x63\x1a\xf8\xff\0\0\0\0\xbf\
+\xa3\0\0\0\0\0\0\x07\x03\0\0\xf8\xff\xff\xff\x79\xa7\x40\xff\0\0\0\0\xbf\x71\0\
+\0\0\0\0\0\x79\xa2\x48\xff\0\0\0\0\xb7\x04\0\0\x04\0\0\0\xb7\x05\0\0\x01\0\0\0\
+\x85\0\0\0\x44\0\0\0\xbf\x01\0\0\0\0\0\0\x67\x01\0\0\x20\0\0\0\x77\x01\0\0\x20\
+\0\0\0\x55\x01\x06\x01\0\0\0\0\x71\xa1\xfa\xff\0\0\0\0\x55\x01\x11\0\x02\0\0\0\
+\x71\xa1\xf9\xff\0\0\0\0\x55\x01\x0f\0\x02\0\0\0\x71\xa1\xfb\xff\0\0\0\0\x55\
+\x01\x0d\0\x01\0\0\0\x79\xa2\x48\xff\0\0\0\0\x07\x02\0\0\x08\0\0\0\xbf\x71\0\0\
+\0\0\0\0\x79\xa3\x20\xff\0\0\0\0\xb7\x04\0\0\x10\0\0\0\xb7\x05\0\0\x01\0\0\0\
+\x85\0\0\0\x44\0\0\0\xbf\x01\0\0\0\0\0\0\x67\x01\0\0\x20\0\0\0\x77\x01\0\0\x20\
+\0\0\0\x55\x01\xf5\0\0\0\0\0\xb7\x01\0\0\x01\0\0\0\x73\x1a\x5d\xff\0\0\0\0\x18\
+\x07\0\0\x01\0\0\0\0\0\0\0\0\x18\0\x1c\x05\0\x3c\0\0\0\0\0\xb7\x08\0\0\x02\0\0\
+\0\xb7\x07\0\0\0\0\0\0\x6b\x7a\xf8\xff\0\0\0\0\x05\0\x13\0\0\0\0\0\x0f\x81\0\0\
+\0\0\0\0\xbf\x12\0\0\0\0\0\0\x07\x02\0\0\x01\0\0\0\x71\xa3\xff\xff\0\0\0\0\x67\
+\x03\0\0\x03\0\0\0\x3d\x32\x09\0\0\0\0\0\xbf\x72\0\0\0\0\0\0\x07\x02\0\0\x01\0\
+\0\0\x67\x07\0\0\x20\0\0\0\xbf\x73\0\0\0\0\0\0\x77\x03\0\0\x20\0\0\0\xbf\x27\0\
+\0\0\0\0\0\xbf\x18\0\0\0\0\0\0\xb7\x01\0\0\x1d\0\0\0\x2d\x31\x04\0\0\0\0\0\x79\
+\xa8\x28\xff\0\0\0\0\x18\x07\0\0\x01\0\0\0\0\0\0\0\0\x18\0\x1c\x05\0\x25\0\0\0\
+\0\0\xbf\x89\0\0\0\0\0\0\x79\xa1\x48\xff\0\0\0\0\x0f\x19\0\0\0\0\0\0\xbf\xa3\0\
+\0\0\0\0\0\x07\x03\0\0\xf8\xff\xff\xff\x79\xa1\x40\xff\0\0\0\0\xbf\x92\0\0\0\0\
+\0\0\xb7\x04\0\0\x02\0\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\0\xbf\x01\0\
+\0\0\0\0\0\x67\x01\0\0\x20\0\0\0\x77\x01\0\0\x20\0\0\0\x55\x01\x79\0\0\0\0\0\
+\x71\xa2\xf8\xff\0\0\0\0\x55\x02\x0e\0\xc9\0\0\0\x07\x09\0\0\x02\0\0\0\x79\xa1\
+\x40\xff\0\0\0\0\xbf\x92\0\0\0\0\0\0\x79\xa3\x18\xff\0\0\0\0\xb7\x04\0\0\x10\0\
+\0\0\xb7\x05\0\0\x01\0\0\0\x85\0\0\0\x44\0\0\0\xbf\x01\0\0\0\0\0\0\x67\x01\0\0\
+\x20\0\0\0\x77\x01\0\0\x20\0\0\0\x55\x01\x6c\0\0\0\0\0\xb7\x01\0\0\x01\0\0\0\
+\x73\x1a\x5c\xff\0\0\0\0\x05\0\xde\xff\0\0\0\0\xb7\x01\0\0\x01\0\0\0\x15\x02\
+\xcd\xff\0\0\0\0\x71\xa1\xf9\xff\0\0\0\0\x07\x01\0\0\x02\0\0\0\x05\0\xca\xff\0\
+\0\0\0\xb7\x01\0\0\x01\0\0\0\x73\x1a\x5e\xff\0\0\0\0\x71\xa1\xff\xff\0\0\0\0\
+\x67\x01\0\0\x03\0\0\0\x79\xa2\x48\xff\0\0\0\0\x0f\x12\0\0\0\0\0\0\x07\x02\0\0\
+\x08\0\0\0\x7b\x2a\x48\xff\0\0\0\0\x71\xa9\xfe\xff\0\0\0\0\x79\xa2\x30\xff\0\0\
+\0\0\x25\x09\x0c\0\x3c\0\0\0\xb7\x01\0\0\x01\0\0\0\x6f\x91\0\0\0\0\0\0\x5f\x71\
+\0\0\0\0\0\0\x55\x01\x01\0\0\0\0\0\x05\0\x07\0\0\0\0\0\x07\x02\0\0\x01\0\0\0\
+\xbf\x21\0\0\0\0\0\0\x67\x01\0\0\x20\0\0\0\x77\x01\0\0\x20\0\0\0\x55\x01\x7d\
+\xff\x0b\0\0\0\x71\xa7\x5e\xff\0\0\0\0\x05\0\x09\xff\0\0\0\0\x15\x09\xf8\xff\
+\x87\0\0\0\x05\0\xfc\xff\0\0\0\0\x71\xa1\x59\xff\0\0\0\0\x79\xa0\x38\xff\0\0\0\
+\0\x15\x01\x13\x01\0\0\0\0\x71\x62\x03\0\0\0\0\0\x67\x02\0\0\x08\0\0\0\x71\x61\
+\x02\0\0\0\0\0\x4f\x12\0\0\0\0\0\0\x71\x63\x04\0\0\0\0\0\x71\x61\x05\0\0\0\0\0\
+\x67\x01\0\0\x08\0\0\0\x4f\x31\0\0\0\0\0\0\x67\x01\0\0\x10\0\0\0\x4f\x21\0\0\0\
+\0\0\0\x71\xa2\x5b\xff\0\0\0\0\x15\x02\x42\0\0\0\0\0\xbf\x12\0\0\0\0\0\0\x57\
+\x02\0\0\x10\0\0\0\x15\x02\x3f\0\0\0\0\0\x57\x01\0\0\x80\0\0\0\xb7\x02\0\0\x10\
+\0\0\0\xb7\x03\0\0\x10\0\0\0\x15\x01\x01\0\0\0\0\0\xb7\x03\0\0\x30\0\0\0\x71\
+\xa4\x5d\xff\0\0\0\0\x15\x04\x01\0\0\0\0\0\xbf\x32\0\0\0\0\0\0\xbf\xa3\0\0\0\0\
+\0\0\x07\x03\0\0\x64\xff\xff\xff\xbf\x34\0\0\0\0\0\0\x15\x01\x02\0\0\0\0\0\xbf\
+\xa4\0\0\0\0\0\0\x07\x04\0\0\x84\xff\xff\xff\x71\xa5\x5c\xff\0\0\0\0\xbf\x31\0\
+\0\0\0\0\0\x15\x05\x01\0\0\0\0\0\xbf\x41\0\0\0\0\0\0\x61\x14\x04\0\0\0\0\0\x67\
+\x04\0\0\x20\0\0\0\x61\x15\0\0\0\0\0\0\x4f\x54\0\0\0\0\0\0\x7b\x4a\xa8\xff\0\0\
+\0\0\x61\x14\x08\0\0\0\0\0\x61\x11\x0c\0\0\0\0\0\x67\x01\0\0\x20\0\0\0\x4f\x41\
+\0\0\0\0\0\0\x7b\x1a\xb0\xff\0\0\0\0\x0f\x23\0\0\0\0\0\0\x61\x31\0\0\0\0\0\0\
+\x61\x32\x04\0\0\0\0\0\x61\x34\x08\0\0\0\0\0\x61\x33\x0c\0\0\0\0\0\x69\xa5\x62\
+\xff\0\0\0\0\x6b\x5a\xca\xff\0\0\0\0\x69\xa5\x60\xff\0\0\0\0\x6b\x5a\xc8\xff\0\
+\0\0\0\x67\x03\0\0\x20\0\0\0\x4f\x43\0\0\0\0\0\0\x7b\x3a\xc0\xff\0\0\0\0\x67\
+\x02\0\0\x20\0\0\0\x4f\x12\0\0\0\0\0\0\x7b\x2a\xb8\xff\0\0\0\0\x05\0\x6b\0\0\0\
+\0\0\x71\xa2\x5a\xff\0\0\0\0\x15\x02\x04\0\0\0\0\0\xbf\x12\0\0\0\0\0\0\x57\x02\
+\0\0\x04\0\0\0\x15\x02\x01\0\0\0\0\0\x05\0\xf1\xfe\0\0\0\0\x57\x01\0\0\x01\0\0\
+\0\x15\x01\xd0\0\0\0\0\0\x61\xa1\x64\xff\0\0\0\0\x63\x1a\xa8\xff\0\0\0\0\x61\
+\xa1\x68\xff\0\0\0\0\x63\x1a\xac\xff\0\0\0\0\x05\0\x5e\0\0\0\0\0\xb7\x09\0\0\
+\x3c\0\0\0\x79\xa8\x28\xff\0\0\0\0\x67\0\0\0\x20\0\0\0\x77\0\0\0\x20\0\0\0\x15\
+\0\xac\xff\0\0\0\0\x05\0\xc5\0\0\0\0\0\x71\xa2\x5a\xff\0\0\0\0\x15\x02\x26\0\0\
+\0\0\0\xbf\x12\0\0\0\0\0\0\x57\x02\0\0\x20\0\0\0\x15\x02\x23\0\0\0\0\0\x57\x01\
+\0\0\0\x01\0\0\xb7\x02\0\0\x10\0\0\0\xb7\x03\0\0\x10\0\0\0\x15\x01\x01\0\0\0\0\
+\0\xb7\x03\0\0\x30\0\0\0\x71\xa4\x5d\xff\0\0\0\0\x15\x04\x01\0\0\0\0\0\xbf\x32\
+\0\0\0\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\x64\xff\xff\xff\xbf\x34\0\0\0\0\0\
+\0\x15\x01\x02\0\0\0\0\0\xbf\xa4\0\0\0\0\0\0\x07\x04\0\0\x84\xff\xff\xff\x71\
+\xa5\x5c\xff\0\0\0\0\xbf\x31\0\0\0\0\0\0\x15\x05\xbd\xff\0\0\0\0\x05\0\xbb\xff\
+\0\0\0\0\xb7\x01\0\0\x01\0\0\0\x73\x1a\x5a\xff\0\0\0\0\xb7\x01\0\0\0\0\0\0\x7b\
+\x1a\xd0\xff\0\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\xd0\xff\xff\xff\x79\xa1\
+\x40\xff\0\0\0\0\x79\xa2\x48\xff\0\0\0\0\xb7\x04\0\0\x08\0\0\0\xb7\x05\0\0\x01\
+\0\0\0\x85\0\0\0\x44\0\0\0\x67\0\0\0\x20\0\0\0\x77\0\0\0\x20\0\0\0\x55\0\xa0\0\
+\0\0\0\0\x05\0\xa8\xfe\0\0\0\0\x15\x09\xf3\xfe\x87\0\0\0\x05\0\x83\xff\0\0\0\0\
+\xbf\x12\0\0\0\0\0\0\x57\x02\0\0\x08\0\0\0\x15\x02\x9a\0\0\0\0\0\x57\x01\0\0\
+\x40\0\0\0\xb7\x02\0\0\x0c\0\0\0\xb7\x03\0\0\x0c\0\0\0\x15\x01\x01\0\0\0\0\0\
+\xb7\x03\0\0\x2c\0\0\0\x71\xa4\x5c\xff\0\0\0\0\x15\x04\x01\0\0\0\0\0\xbf\x32\0\
+\0\0\0\0\0\xbf\xa3\0\0\0\0\0\0\x07\x03\0\0\x58\xff\xff\xff\x0f\x23\0\0\0\0\0\0\
+\x61\x32\x04\0\0\0\0\0\x67\x02\0\0\x20\0\0\0\x61\x34\0\0\0\0\0\0\x4f\x42\0\0\0\
+\0\0\0\x7b\x2a\xa8\xff\0\0\0\0\x61\x32\x08\0\0\0\0\0\x61\x33\x0c\0\0\0\0\0\x67\
+\x03\0\0\x20\0\0\0\x4f\x23\0\0\0\0\0\0\x7b\x3a\xb0\xff\0\0\0\0\x71\xa2\x5d\xff\
+\0\0\0\0\x15\x02\x0c\0\0\0\0\0\x15\x01\x0b\0\0\0\0\0\x61\xa1\xa0\xff\0\0\0\0\
+\x67\x01\0\0\x20\0\0\0\x61\xa2\x9c\xff\0\0\0\0\x4f\x21\0\0\0\0\0\0\x7b\x1a\xc0\
+\xff\0\0\0\0\x61\xa1\x98\xff\0\0\0\0\x67\x01\0\0\x20\0\0\0\x61\xa2\x94\xff\0\0\
+\0\0\x05\0\x0a\0\0\0\0\0\xb7\x09\0\0\x2b\0\0\0\x05\0\xae\xff\0\0\0\0\x61\xa1\
+\x80\xff\0\0\0\0\x67\x01\0\0\x20\0\0\0\x61\xa2\x7c\xff\0\0\0\0\x4f\x21\0\0\0\0\
+\0\0\x7b\x1a\xc0\xff\0\0\0\0\x61\xa1\x78\xff\0\0\0\0\x67\x01\0\0\x20\0\0\0\x61\
+\xa2\x74\xff\0\0\0\0\x4f\x21\0\0\0\0\0\0\x7b\x1a\xb8\xff\0\0\0\0\xb7\x02\0\0\0\
+\0\0\0\x07\x08\0\0\x04\0\0\0\x61\x03\0\0\0\0\0\0\xb7\x05\0\0\0\0\0\0\xbf\xa1\0\
+\0\0\0\0\0\x07\x01\0\0\xa8\xff\xff\xff\x0f\x21\0\0\0\0\0\0\x71\x14\0\0\0\0\0\0\
+\xbf\x41\0\0\0\0\0\0\x67\x01\0\0\x38\0\0\0\xc7\x01\0\0\x3f\0\0\0\x5f\x31\0\0\0\
+\0\0\0\xaf\x51\0\0\0\0\0\0\xbf\x85\0\0\0\0\0\0\x0f\x25\0\0\0\0\0\0\x71\x55\0\0\
+\0\0\0\0\x67\x03\0\0\x01\0\0\0\xbf\x50\0\0\0\0\0\0\x77\0\0\0\x07\0\0\0\x4f\x03\
+\0\0\0\0\0\0\xbf\x40\0\0\0\0\0\0\x67\0\0\0\x39\0\0\0\xc7\0\0\0\x3f\0\0\0\x5f\
+\x30\0\0\0\0\0\0\xaf\x01\0\0\0\0\0\0\xbf\x50\0\0\0\0\0\0\x77\0\0\0\x06\0\0\0\
+\x57\0\0\0\x01\0\0\0\x67\x03\0\0\x01\0\0\0\x4f\x03\0\0\0\0\0\0\xbf\x40\0\0\0\0\
+\0\0\x67\0\0\0\x3a\0\0\0\xc7\0\0\0\x3f\0\0\0\x5f\x30\0\0\0\0\0\0\xaf\x01\0\0\0\
+\0\0\0\x67\x03\0\0\x01\0\0\0\xbf\x50\0\0\0\0\0\0\x77\0\0\0\x05\0\0\0\x57\0\0\0\
+\x01\0\0\0\x4f\x03\0\0\0\0\0\0\xbf\x40\0\0\0\0\0\0\x67\0\0\0\x3b\0\0\0\xc7\0\0\
+\0\x3f\0\0\0\x5f\x30\0\0\0\0\0\0\xaf\x01\0\0\0\0\0\0\x67\x03\0\0\x01\0\0\0\xbf\
+\x50\0\0\0\0\0\0\x77\0\0\0\x04\0\0\0\x57\0\0\0\x01\0\0\0\x4f\x03\0\0\0\0\0\0\
+\xbf\x40\0\0\0\0\0\0\x67\0\0\0\x3c\0\0\0\xc7\0\0\0\x3f\0\0\0\x5f\x30\0\0\0\0\0\
+\0\xaf\x01\0\0\0\0\0\0\xbf\x50\0\0\0\0\0\0\x77\0\0\0\x03\0\0\0\x57\0\0\0\x01\0\
+\0\0\x67\x03\0\0\x01\0\0\0\x4f\x03\0\0\0\0\0\0\xbf\x40\0\0\0\0\0\0\x67\0\0\0\
+\x3d\0\0\0\xc7\0\0\0\x3f\0\0\0\x5f\x30\0\0\0\0\0\0\xaf\x01\0\0\0\0\0\0\xbf\x50\
+\0\0\0\0\0\0\x77\0\0\0\x02\0\0\0\x57\0\0\0\x01\0\0\0\x67\x03\0\0\x01\0\0\0\x4f\
+\x03\0\0\0\0\0\0\xbf\x40\0\0\0\0\0\0\x67\0\0\0\x3e\0\0\0\xc7\0\0\0\x3f\0\0\0\
+\x5f\x30\0\0\0\0\0\0\xaf\x01\0\0\0\0\0\0\xbf\x50\0\0\0\0\0\0\x77\0\0\0\x01\0\0\
+\0\x57\0\0\0\x01\0\0\0\x67\x03\0\0\x01\0\0\0\x4f\x03\0\0\0\0\0\0\x57\x04\0\0\
+\x01\0\0\0\x87\x04\0\0\0\0\0\0\x5f\x34\0\0\0\0\0\0\xaf\x41\0\0\0\0\0\0\x57\x05\
+\0\0\x01\0\0\0\x67\x03\0\0\x01\0\0\0\x4f\x53\0\0\0\0\0\0\x07\x02\0\0\x01\0\0\0\
+\xbf\x15\0\0\0\0\0\0\x15\x02\x01\0\x24\0\0\0\x05\0\xa9\xff\0\0\0\0\xbf\x12\0\0\
+\0\0\0\0\x67\x02\0\0\x20\0\0\0\x77\x02\0\0\x20\0\0\0\x15\x02\x0e\0\0\0\0\0\x71\
+\x63\x06\0\0\0\0\0\x71\x64\x07\0\0\0\0\0\x67\x04\0\0\x08\0\0\0\x4f\x34\0\0\0\0\
+\0\0\x3f\x42\0\0\0\0\0\0\x2f\x42\0\0\0\0\0\0\x1f\x21\0\0\0\0\0\0\x63\x1a\x58\
+\xff\0\0\0\0\xbf\xa2\0\0\0\0\0\0\x07\x02\0\0\x58\xff\xff\xff\x18\x01\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\x85\0\0\0\x01\0\0\0\x55\0\x05\0\0\0\0\0\x71\x61\x08\0\0\0\0\
+\0\x71\x60\x09\0\0\0\0\0\x67\0\0\0\x08\0\0\0\x4f\x10\0\0\0\0\0\0\x95\0\0\0\0\0\
+\0\0\x69\0\0\0\0\0\0\0\x05\0\xfd\xff\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\x47\x50\x4c\x20\x76\x32\0\0\x9f\xeb\x01\0\x18\0\0\0\0\0\0\0\x10\x05\0\0\x10\
+\x05\0\0\x65\x11\0\0\0\0\0\0\0\0\0\x02\x03\0\0\0\x01\0\0\0\0\0\0\x01\x04\0\0\0\
+\x20\0\0\x01\0\0\0\0\0\0\0\x03\0\0\0\0\x02\0\0\0\x04\0\0\0\x02\0\0\0\x05\0\0\0\
+\0\0\0\x01\x04\0\0\0\x20\0\0\0\0\0\0\0\0\0\0\x02\x06\0\0\0\0\0\0\0\0\0\0\x03\0\
+\0\0\0\x02\0\0\0\x04\0\0\0\x04\0\0\0\0\0\0\0\0\0\0\x02\x08\0\0\0\0\0\0\0\0\0\0\
+\x03\0\0\0\0\x02\0\0\0\x04\0\0\0\x0a\0\0\0\0\0\0\0\0\0\0\x02\x0a\0\0\0\0\0\0\0\
+\0\0\0\x03\0\0\0\0\x02\0\0\0\x04\0\0\0\x01\0\0\0\0\0\0\0\x04\0\0\x04\x20\0\0\0\
+\x19\0\0\0\x01\0\0\0\0\0\0\0\x1e\0\0\0\x05\0\0\0\x40\0\0\0\x27\0\0\0\x07\0\0\0\
+\x80\0\0\0\x32\0\0\0\x09\0\0\0\xc0\0\0\0\x3e\0\0\0\0\0\0\x0e\x0b\0\0\0\x01\0\0\
+\0\0\0\0\0\0\0\0\x02\x0e\0\0\0\0\0\0\0\0\0\0\x03\0\0\0\0\x02\0\0\0\x04\0\0\0\
+\x28\0\0\0\0\0\0\0\x04\0\0\x04\x20\0\0\0\x19\0\0\0\x01\0\0\0\0\0\0\0\x1e\0\0\0\
+\x05\0\0\0\x40\0\0\0\x27\0\0\0\x0d\0\0\0\x80\0\0\0\x32\0\0\0\x09\0\0\0\xc0\0\0\
+\0\x59\0\0\0\0\0\0\x0e\x0f\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\x02\x12\0\0\0\0\0\0\0\
+\0\0\0\x03\0\0\0\0\x02\0\0\0\x04\0\0\0\x80\0\0\0\0\0\0\0\x04\0\0\x04\x20\0\0\0\
+\x19\0\0\0\x01\0\0\0\0\0\0\0\x1e\0\0\0\x05\0\0\0\x40\0\0\0\x27\0\0\0\x01\0\0\0\
+\x80\0\0\0\x32\0\0\0\x11\0\0\0\xc0\0\0\0\x72\0\0\0\0\0\0\x0e\x13\0\0\0\x01\0\0\
+\0\0\0\0\0\0\0\0\x02\x16\0\0\0\x90\0\0\0\x22\0\0\x04\xc0\0\0\0\x9a\0\0\0\x17\0\
+\0\0\0\0\0\0\x9e\0\0\0\x17\0\0\0\x20\0\0\0\xa7\0\0\0\x17\0\0\0\x40\0\0\0\xac\0\
+\0\0\x17\0\0\0\x60\0\0\0\xba\0\0\0\x17\0\0\0\x80\0\0\0\xc3\0\0\0\x17\0\0\0\xa0\
+\0\0\0\xd0\0\0\0\x17\0\0\0\xc0\0\0\0\xd9\0\0\0\x17\0\0\0\xe0\0\0\0\xe4\0\0\0\
+\x17\0\0\0\0\x01\0\0\xed\0\0\0\x17\0\0\0\x20\x01\0\0\xfd\0\0\0\x17\0\0\0\x40\
+\x01\0\0\x05\x01\0\0\x17\0\0\0\x60\x01\0\0\x0e\x01\0\0\x19\0\0\0\x80\x01\0\0\
+\x11\x01\0\0\x17\0\0\0\x20\x02\0\0\x16\x01\0\0\x17\0\0\0\x40\x02\0\0\x21\x01\0\
+\0\x17\0\0\0\x60\x02\0\0\x26\x01\0\0\x17\0\0\0\x80\x02\0\0\x2f\x01\0\0\x17\0\0\
+\0\xa0\x02\0\0\x37\x01\0\0\x17\0\0\0\xc0\x02\0\0\x3e\x01\0\0\x17\0\0\0\xe0\x02\
+\0\0\x49\x01\0\0\x17\0\0\0\0\x03\0\0\x53\x01\0\0\x1a\0\0\0\x20\x03\0\0\x5e\x01\
+\0\0\x1a\0\0\0\xa0\x03\0\0\x68\x01\0\0\x17\0\0\0\x20\x04\0\0\x74\x01\0\0\x17\0\
+\0\0\x40\x04\0\0\x7f\x01\0\0\x17\0\0\0\x60\x04\0\0\0\0\0\0\x1b\0\0\0\x80\x04\0\
+\0\x89\x01\0\0\x1d\0\0\0\xc0\x04\0\0\x90\x01\0\0\x17\0\0\0\0\x05\0\0\x99\x01\0\
+\0\x17\0\0\0\x20\x05\0\0\0\0\0\0\x1f\0\0\0\x40\x05\0\0\xa2\x01\0\0\x17\0\0\0\
+\x80\x05\0\0\xab\x01\0\0\x21\0\0\0\xa0\x05\0\0\xb7\x01\0\0\x1d\0\0\0\xc0\x05\0\
+\0\xc0\x01\0\0\0\0\0\x08\x18\0\0\0\xc6\x01\0\0\0\0\0\x01\x04\0\0\0\x20\0\0\0\0\
+\0\0\0\0\0\0\x03\0\0\0\0\x17\0\0\0\x04\0\0\0\x05\0\0\0\0\0\0\0\0\0\0\x03\0\0\0\
+\0\x17\0\0\0\x04\0\0\0\x04\0\0\0\0\0\0\0\x01\0\0\x05\x08\0\0\0\xd3\x01\0\0\x1c\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x02\x2a\0\0\0\xdd\x01\0\0\0\0\0\x08\x1e\0\0\0\xe3\
+\x01\0\0\0\0\0\x01\x08\0\0\0\x40\0\0\0\0\0\0\0\x01\0\0\x05\x08\0\0\0\xf6\x01\0\
+\0\x20\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x02\x2b\0\0\0\xf9\x01\0\0\0\0\0\x08\x22\0\0\
+\0\xfe\x01\0\0\0\0\0\x01\x01\0\0\0\x08\0\0\0\0\0\0\0\x01\0\0\x0d\x02\0\0\0\x0c\
+\x02\0\0\x15\0\0\0\x10\x02\0\0\x01\0\0\x0c\x23\0\0\0\x32\x11\0\0\0\0\0\x01\x01\
+\0\0\0\x08\0\0\x01\0\0\0\0\0\0\0\x03\0\0\0\0\x25\0\0\0\x04\0\0\0\x07\0\0\0\x37\
+\x11\0\0\0\0\0\x0e\x26\0\0\0\x01\0\0\0\x40\x11\0\0\x03\0\0\x0f\0\0\0\0\x0c\0\0\
+\0\0\0\0\0\x20\0\0\0\x10\0\0\0\0\0\0\0\x20\0\0\0\x14\0\0\0\0\0\0\0\x20\0\0\0\
+\x46\x11\0\0\x01\0\0\x0f\0\0\0\0\x27\0\0\0\0\0\0\0\x07\0\0\0\x4e\x11\0\0\0\0\0\
+\x07\0\0\0\0\x5c\x11\0\0\0\0\0\x07\0\0\0\0\0\x69\x6e\x74\0\x5f\x5f\x41\x52\x52\
+\x41\x59\x5f\x53\x49\x5a\x45\x5f\x54\x59\x50\x45\x5f\x5f\0\x74\x79\x70\x65\0\
+\x6b\x65\x79\x5f\x73\x69\x7a\x65\0\x76\x61\x6c\x75\x65\x5f\x73\x69\x7a\x65\0\
+\x6d\x61\x78\x5f\x65\x6e\x74\x72\x69\x65\x73\0\x74\x61\x70\x5f\x72\x73\x73\x5f\
+\x6d\x61\x70\x5f\x63\x6f\x6e\x66\x69\x67\x75\x72\x61\x74\x69\x6f\x6e\x73\0\x74\
+\x61\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\x5f\x74\x6f\x65\x70\x6c\x69\x74\x7a\
+\x5f\x6b\x65\x79\0\x74\x61\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\x5f\x69\x6e\x64\
+\x69\x72\x65\x63\x74\x69\x6f\x6e\x5f\x74\x61\x62\x6c\x65\0\x5f\x5f\x73\x6b\x5f\
+\x62\x75\x66\x66\0\x6c\x65\x6e\0\x70\x6b\x74\x5f\x74\x79\x70\x65\0\x6d\x61\x72\
+\x6b\0\x71\x75\x65\x75\x65\x5f\x6d\x61\x70\x70\x69\x6e\x67\0\x70\x72\x6f\x74\
+\x6f\x63\x6f\x6c\0\x76\x6c\x61\x6e\x5f\x70\x72\x65\x73\x65\x6e\x74\0\x76\x6c\
+\x61\x6e\x5f\x74\x63\x69\0\x76\x6c\x61\x6e\x5f\x70\x72\x6f\x74\x6f\0\x70\x72\
+\x69\x6f\x72\x69\x74\x79\0\x69\x6e\x67\x72\x65\x73\x73\x5f\x69\x66\x69\x6e\x64\
+\x65\x78\0\x69\x66\x69\x6e\x64\x65\x78\0\x74\x63\x5f\x69\x6e\x64\x65\x78\0\x63\
+\x62\0\x68\x61\x73\x68\0\x74\x63\x5f\x63\x6c\x61\x73\x73\x69\x64\0\x64\x61\x74\
+\x61\0\x64\x61\x74\x61\x5f\x65\x6e\x64\0\x6e\x61\x70\x69\x5f\x69\x64\0\x66\x61\
+\x6d\x69\x6c\x79\0\x72\x65\x6d\x6f\x74\x65\x5f\x69\x70\x34\0\x6c\x6f\x63\x61\
+\x6c\x5f\x69\x70\x34\0\x72\x65\x6d\x6f\x74\x65\x5f\x69\x70\x36\0\x6c\x6f\x63\
+\x61\x6c\x5f\x69\x70\x36\0\x72\x65\x6d\x6f\x74\x65\x5f\x70\x6f\x72\x74\0\x6c\
+\x6f\x63\x61\x6c\x5f\x70\x6f\x72\x74\0\x64\x61\x74\x61\x5f\x6d\x65\x74\x61\0\
+\x74\x73\x74\x61\x6d\x70\0\x77\x69\x72\x65\x5f\x6c\x65\x6e\0\x67\x73\x6f\x5f\
+\x73\x65\x67\x73\0\x67\x73\x6f\x5f\x73\x69\x7a\x65\0\x74\x73\x74\x61\x6d\x70\
+\x5f\x74\x79\x70\x65\0\x68\x77\x74\x73\x74\x61\x6d\x70\0\x5f\x5f\x75\x33\x32\0\
+\x75\x6e\x73\x69\x67\x6e\x65\x64\x20\x69\x6e\x74\0\x66\x6c\x6f\x77\x5f\x6b\x65\
+\x79\x73\0\x5f\x5f\x75\x36\x34\0\x75\x6e\x73\x69\x67\x6e\x65\x64\x20\x6c\x6f\
+\x6e\x67\x20\x6c\x6f\x6e\x67\0\x73\x6b\0\x5f\x5f\x75\x38\0\x75\x6e\x73\x69\x67\
+\x6e\x65\x64\x20\x63\x68\x61\x72\0\x73\x6b\x62\0\x74\x75\x6e\x5f\x72\x73\x73\
+\x5f\x73\x74\x65\x65\x72\x69\x6e\x67\x5f\x70\x72\x6f\x67\0\x74\x75\x6e\x5f\x72\
+\x73\x73\x5f\x73\x74\x65\x65\x72\x69\x6e\x67\0\x2f\x68\x6f\x6d\x65\x2f\x73\x68\
+\x72\x65\x65\x73\x68\x2f\x63\x2f\x71\x65\x6d\x75\x2f\x74\x6f\x6f\x6c\x73\x2f\
+\x65\x62\x70\x66\x2f\x72\x73\x73\x2e\x62\x70\x66\x2e\x63\0\x69\x6e\x74\x20\x74\
+\x75\x6e\x5f\x72\x73\x73\x5f\x73\x74\x65\x65\x72\x69\x6e\x67\x5f\x70\x72\x6f\
+\x67\x28\x73\x74\x72\x75\x63\x74\x20\x5f\x5f\x73\x6b\x5f\x62\x75\x66\x66\x20\
+\x2a\x73\x6b\x62\x29\0\x20\x20\x20\x20\x5f\x5f\x75\x33\x32\x20\x6b\x65\x79\x20\
+\x3d\x20\x30\x3b\0\x20\x20\x20\x20\x63\x6f\x6e\x66\x69\x67\x20\x3d\x20\x62\x70\
+\x66\x5f\x6d\x61\x70\x5f\x6c\x6f\x6f\x6b\x75\x70\x5f\x65\x6c\x65\x6d\x28\x26\
+\x74\x61\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\x5f\x63\x6f\x6e\x66\x69\x67\x75\
+\x72\x61\x74\x69\x6f\x6e\x73\x2c\x20\x26\x6b\x65\x79\x29\x3b\0\x20\x20\x20\x20\
+\x74\x6f\x65\x20\x3d\x20\x62\x70\x66\x5f\x6d\x61\x70\x5f\x6c\x6f\x6f\x6b\x75\
+\x70\x5f\x65\x6c\x65\x6d\x28\x26\x74\x61\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\
+\x5f\x74\x6f\x65\x70\x6c\x69\x74\x7a\x5f\x6b\x65\x79\x2c\x20\x26\x6b\x65\x79\
+\x29\x3b\0\x20\x20\x20\x20\x69\x66\x20\x28\x63\x6f\x6e\x66\x69\x67\x20\x26\x26\
+\x20\x74\x6f\x65\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\
+\x21\x63\x6f\x6e\x66\x69\x67\x2d\x3e\x72\x65\x64\x69\x72\x65\x63\x74\x29\x20\
+\x7b\0\x20\x20\x20\x20\x5f\x5f\x75\x38\x20\x72\x73\x73\x5f\x69\x6e\x70\x75\x74\
+\x5b\x48\x41\x53\x48\x5f\x43\x41\x4c\x43\x55\x4c\x41\x54\x49\x4f\x4e\x5f\x42\
+\x55\x46\x46\x45\x52\x5f\x53\x49\x5a\x45\x5d\x20\x3d\x20\x7b\x7d\x3b\0\x20\x20\
+\x20\x20\x73\x74\x72\x75\x63\x74\x20\x70\x61\x63\x6b\x65\x74\x5f\x68\x61\x73\
+\x68\x5f\x69\x6e\x66\x6f\x5f\x74\x20\x70\x61\x63\x6b\x65\x74\x5f\x69\x6e\x66\
+\x6f\x20\x3d\x20\x7b\x7d\x3b\0\x20\x20\x20\x20\x69\x66\x20\x28\x21\x69\x6e\x66\
+\x6f\x20\x7c\x7c\x20\x21\x73\x6b\x62\x29\x20\x7b\0\x20\x20\x20\x20\x5f\x5f\x62\
+\x65\x31\x36\x20\x72\x65\x74\x20\x3d\x20\x30\x3b\0\x20\x20\x20\x20\x65\x72\x72\
+\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\x62\x5f\x6c\x6f\x61\x64\x5f\x62\x79\x74\
+\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\x76\x65\x28\x73\x6b\x62\x2c\x20\x6f\x66\
+\x66\x73\x65\x74\x2c\x20\x26\x72\x65\x74\x2c\x20\x73\x69\x7a\x65\x6f\x66\x28\
+\x72\x65\x74\x29\x2c\0\x20\x20\x20\x20\x69\x66\x20\x28\x65\x72\x72\x29\x20\x7b\
+\0\x20\x20\x20\x20\x73\x77\x69\x74\x63\x68\x20\x28\x62\x70\x66\x5f\x6e\x74\x6f\
+\x68\x73\x28\x72\x65\x74\x29\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x65\
+\x72\x72\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\x62\x5f\x6c\x6f\x61\x64\x5f\x62\
+\x79\x74\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\x76\x65\x28\x73\x6b\x62\x2c\x20\
+\x6f\x66\x66\x73\x65\x74\x2c\x20\x26\x72\x65\x74\x2c\x20\x73\x69\x7a\x65\x6f\
+\x66\x28\x72\x65\x74\x29\x2c\0\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x72\
+\x65\x74\x3b\0\x20\x20\x20\x20\x69\x66\x20\x28\x6c\x33\x5f\x70\x72\x6f\x74\x6f\
+\x63\x6f\x6c\x20\x3d\x3d\x20\x30\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\
+\x69\x6e\x66\x6f\x2d\x3e\x69\x73\x5f\x69\x70\x76\x34\x20\x3d\x20\x31\x3b\0\x20\
+\x20\x20\x20\x20\x20\x20\x20\x73\x74\x72\x75\x63\x74\x20\x69\x70\x68\x64\x72\
+\x20\x69\x70\x20\x3d\x20\x7b\x7d\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x65\x72\
+\x72\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\x62\x5f\x6c\x6f\x61\x64\x5f\x62\x79\
+\x74\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\x76\x65\x28\x73\x6b\x62\x2c\x20\x30\
+\x2c\x20\x26\x69\x70\x2c\x20\x73\x69\x7a\x65\x6f\x66\x28\x69\x70\x29\x2c\0\x20\
+\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\x65\x72\x72\x29\x20\x7b\0\x20\x20\
+\x20\x20\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\x3e\x69\x73\x5f\x66\x72\x61\x67\
+\x6d\x65\x6e\x74\x65\x64\x20\x3d\x20\x21\x21\x69\x70\x2e\x66\x72\x61\x67\x5f\
+\x6f\x66\x66\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\x3e\x69\
+\x6e\x5f\x73\x72\x63\x20\x3d\x20\x69\x70\x2e\x73\x61\x64\x64\x72\x3b\0\x20\x20\
+\x20\x20\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\x3e\x69\x6e\x5f\x64\x73\x74\x20\
+\x3d\x20\x69\x70\x2e\x64\x61\x64\x64\x72\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\
+\x6c\x34\x5f\x70\x72\x6f\x74\x6f\x63\x6f\x6c\x20\x3d\x20\x69\x70\x2e\x70\x72\
+\x6f\x74\x6f\x63\x6f\x6c\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x6c\x34\x5f\x6f\
+\x66\x66\x73\x65\x74\x20\x3d\x20\x69\x70\x2e\x69\x68\x6c\x20\x2a\x20\x34\x3b\0\
+\x20\x20\x20\x20\x69\x66\x20\x28\x6c\x34\x5f\x70\x72\x6f\x74\x6f\x63\x6f\x6c\
+\x20\x21\x3d\x20\x30\x20\x26\x26\x20\x21\x69\x6e\x66\x6f\x2d\x3e\x69\x73\x5f\
+\x66\x72\x61\x67\x6d\x65\x6e\x74\x65\x64\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\
+\x20\x20\x69\x66\x20\x28\x6c\x34\x5f\x70\x72\x6f\x74\x6f\x63\x6f\x6c\x20\x3d\
+\x3d\x20\x49\x50\x50\x52\x4f\x54\x4f\x5f\x54\x43\x50\x29\x20\x7b\0\x20\x20\x20\
+\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\x3e\x69\x73\x5f\x74\
+\x63\x70\x20\x3d\x20\x31\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\
+\x73\x74\x72\x75\x63\x74\x20\x74\x63\x70\x68\x64\x72\x20\x74\x63\x70\x20\x3d\
+\x20\x7b\x7d\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x72\x72\
+\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\x62\x5f\x6c\x6f\x61\x64\x5f\x62\x79\x74\
+\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\x76\x65\x28\x73\x6b\x62\x2c\x20\x6c\x34\
+\x5f\x6f\x66\x66\x73\x65\x74\x2c\x20\x26\x74\x63\x70\x2c\x20\x73\x69\x7a\x65\
+\x6f\x66\x28\x74\x63\x70\x29\x2c\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\
+\x20\x69\x66\x20\x28\x65\x72\x72\x29\x20\x7b\0\x20\x20\x20\x20\x69\x66\x20\x28\
+\x70\x61\x63\x6b\x65\x74\x5f\x69\x6e\x66\x6f\x2e\x69\x73\x5f\x69\x70\x76\x34\
+\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\x70\x61\x63\x6b\
+\x65\x74\x5f\x69\x6e\x66\x6f\x2e\x69\x73\x5f\x74\x63\x70\x20\x26\x26\0\x20\x20\
+\x20\x20\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\x3e\x69\x73\x5f\x69\x70\x76\x36\
+\x20\x3d\x20\x31\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x73\x74\x72\x75\x63\x74\
+\x20\x69\x70\x76\x36\x68\x64\x72\x20\x69\x70\x36\x20\x3d\x20\x7b\x7d\x3b\0\x20\
+\x20\x20\x20\x20\x20\x20\x20\x65\x72\x72\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\
+\x62\x5f\x6c\x6f\x61\x64\x5f\x62\x79\x74\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\
+\x76\x65\x28\x73\x6b\x62\x2c\x20\x30\x2c\x20\x26\x69\x70\x36\x2c\x20\x73\x69\
+\x7a\x65\x6f\x66\x28\x69\x70\x36\x29\x2c\0\x20\x20\x20\x20\x20\x20\x20\x20\x69\
+\x6e\x66\x6f\x2d\x3e\x69\x6e\x36\x5f\x73\x72\x63\x20\x3d\x20\x69\x70\x36\x2e\
+\x73\x61\x64\x64\x72\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\
+\x3e\x69\x6e\x36\x5f\x64\x73\x74\x20\x3d\x20\x69\x70\x36\x2e\x64\x61\x64\x64\
+\x72\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x6c\x34\x5f\x70\x72\x6f\x74\x6f\x63\
+\x6f\x6c\x20\x3d\x20\x69\x70\x36\x2e\x6e\x65\x78\x74\x68\x64\x72\x3b\0\x20\x20\
+\x20\x20\x73\x77\x69\x74\x63\x68\x20\x28\x68\x64\x72\x5f\x74\x79\x70\x65\x29\
+\x20\x7b\0\x20\x20\x20\x20\x73\x74\x72\x75\x63\x74\x20\x69\x70\x76\x36\x5f\x6f\
+\x70\x74\x5f\x68\x64\x72\x20\x65\x78\x74\x5f\x68\x64\x72\x20\x3d\x20\x7b\x7d\
+\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x65\x72\x72\x20\x3d\x20\x62\x70\x66\x5f\
+\x73\x6b\x62\x5f\x6c\x6f\x61\x64\x5f\x62\x79\x74\x65\x73\x5f\x72\x65\x6c\x61\
+\x74\x69\x76\x65\x28\x73\x6b\x62\x2c\x20\x2a\x6c\x34\x5f\x6f\x66\x66\x73\x65\
+\x74\x2c\x20\x26\x65\x78\x74\x5f\x68\x64\x72\x2c\0\x20\x20\x20\x20\x20\x20\x20\
+\x20\x69\x66\x20\x28\x2a\x6c\x34\x5f\x70\x72\x6f\x74\x6f\x63\x6f\x6c\x20\x3d\
+\x3d\x20\x49\x50\x50\x52\x4f\x54\x4f\x5f\x52\x4f\x55\x54\x49\x4e\x47\x29\x20\
+\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x74\x72\x75\x63\x74\
+\x20\x69\x70\x76\x36\x5f\x72\x74\x5f\x68\x64\x72\x20\x65\x78\x74\x5f\x72\x74\
+\x20\x3d\x20\x7b\x7d\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\
+\x72\x72\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\x62\x5f\x6c\x6f\x61\x64\x5f\x62\
+\x79\x74\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\x76\x65\x28\x73\x6b\x62\x2c\x20\
+\x2a\x6c\x34\x5f\x6f\x66\x66\x73\x65\x74\x2c\x20\x26\x65\x78\x74\x5f\x72\x74\
+\x2c\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\x28\x65\
+\x78\x74\x5f\x72\x74\x2e\x74\x79\x70\x65\x20\x3d\x3d\x20\x49\x50\x56\x36\x5f\
+\x53\x52\x43\x52\x54\x5f\x54\x59\x50\x45\x5f\x32\x29\x20\x26\x26\0\x20\x20\x20\
+\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x2a\x6c\
+\x34\x5f\x6f\x66\x66\x73\x65\x74\x20\x2b\x20\x6f\x66\x66\x73\x65\x74\x6f\x66\
+\x28\x73\x74\x72\x75\x63\x74\x20\x72\x74\x32\x5f\x68\x64\x72\x2c\x20\x61\x64\
+\x64\x72\x29\x2c\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\
+\x20\x65\x72\x72\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\x62\x5f\x6c\x6f\x61\x64\
+\x5f\x62\x79\x74\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\x76\x65\x28\x73\x6b\x62\
+\x2c\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\
+\x20\x28\x65\x72\x72\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\
+\x20\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\x3e\x69\x73\x5f\x69\x70\x76\x36\x5f\
+\x65\x78\x74\x5f\x64\x73\x74\x20\x3d\x20\x31\x3b\0\x20\x20\x20\x20\x20\x20\x20\
+\x20\x20\x20\x20\x20\x7d\x20\x5f\x5f\x61\x74\x74\x72\x69\x62\x75\x74\x65\x5f\
+\x5f\x28\x28\x70\x61\x63\x6b\x65\x64\x29\x29\x20\x6f\x70\x74\x20\x3d\x20\x7b\
+\x7d\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x6f\
+\x70\x74\x5f\x6f\x66\x66\x73\x65\x74\x20\x2b\x3d\x20\x28\x6f\x70\x74\x2e\x74\
+\x79\x70\x65\x20\x3d\x3d\x20\x49\x50\x56\x36\x5f\x54\x4c\x56\x5f\x50\x41\x44\
+\x31\x29\x20\x3f\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\
+\x20\x69\x66\x20\x28\x6f\x70\x74\x5f\x6f\x66\x66\x73\x65\x74\x20\x2b\x20\x31\
+\x20\x3e\x3d\x20\x65\x78\x74\x5f\x68\x64\x72\x2e\x68\x64\x72\x6c\x65\x6e\x20\
+\x2a\x20\x38\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\
+\x20\x20\x20\x65\x72\x72\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\x62\x5f\x6c\x6f\
+\x61\x64\x5f\x62\x79\x74\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\x76\x65\x28\x73\
+\x6b\x62\x2c\x20\x2a\x6c\x34\x5f\x6f\x66\x66\x73\x65\x74\x20\x2b\x20\x6f\x70\
+\x74\x5f\x6f\x66\x66\x73\x65\x74\x2c\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\
+\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\x6f\x70\x74\x2e\x74\x79\x70\x65\x20\
+\x3d\x3d\x20\x49\x50\x56\x36\x5f\x54\x4c\x56\x5f\x48\x41\x4f\x29\x20\x7b\0\x20\
+\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\
+\x20\x20\x20\x20\x2a\x6c\x34\x5f\x6f\x66\x66\x73\x65\x74\x20\x2b\x20\x6f\x70\
+\x74\x5f\x6f\x66\x66\x73\x65\x74\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\
+\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x72\x72\x20\x3d\x20\x62\x70\x66\x5f\
+\x73\x6b\x62\x5f\x6c\x6f\x61\x64\x5f\x62\x79\x74\x65\x73\x5f\x72\x65\x6c\x61\
+\x74\x69\x76\x65\x28\x73\x6b\x62\x2c\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\
+\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\x65\x72\x72\x29\x20\
+\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\
+\x20\x20\x69\x6e\x66\x6f\x2d\x3e\x69\x73\x5f\x69\x70\x76\x36\x5f\x65\x78\x74\
+\x5f\x73\x72\x63\x20\x3d\x20\x31\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\
+\x20\x20\x69\x6e\x66\x6f\x2d\x3e\x69\x73\x5f\x66\x72\x61\x67\x6d\x65\x6e\x74\
+\x65\x64\x20\x3d\x20\x74\x72\x75\x65\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x2a\
+\x6c\x34\x5f\x6f\x66\x66\x73\x65\x74\x20\x2b\x3d\x20\x28\x65\x78\x74\x5f\x68\
+\x64\x72\x2e\x68\x64\x72\x6c\x65\x6e\x20\x2b\x20\x31\x29\x20\x2a\x20\x38\x3b\0\
+\x20\x20\x20\x20\x20\x20\x20\x20\x2a\x6c\x34\x5f\x70\x72\x6f\x74\x6f\x63\x6f\
+\x6c\x20\x3d\x20\x65\x78\x74\x5f\x68\x64\x72\x2e\x6e\x65\x78\x74\x68\x64\x72\
+\x3b\0\x20\x20\x20\x20\x66\x6f\x72\x20\x28\x75\x6e\x73\x69\x67\x6e\x65\x64\x20\
+\x69\x6e\x74\x20\x69\x20\x3d\x20\x30\x3b\x20\x69\x20\x3c\x20\x49\x50\x36\x5f\
+\x45\x58\x54\x45\x4e\x53\x49\x4f\x4e\x53\x5f\x43\x4f\x55\x4e\x54\x3b\x20\x2b\
+\x2b\x69\x29\x20\x7b\0\x20\x20\x20\x20\x7d\x20\x65\x6c\x73\x65\x20\x69\x66\x20\
+\x28\x70\x61\x63\x6b\x65\x74\x5f\x69\x6e\x66\x6f\x2e\x69\x73\x5f\x69\x70\x76\
+\x36\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\
+\x28\x70\x61\x63\x6b\x65\x74\x5f\x69\x6e\x66\x6f\x2e\x69\x73\x5f\x69\x70\x76\
+\x36\x5f\x65\x78\x74\x5f\x64\x73\x74\x20\x26\x26\0\x20\x20\x20\x20\x20\x20\x20\
+\x20\x20\x20\x20\x20\x69\x66\x20\x28\x70\x61\x63\x6b\x65\x74\x5f\x69\x6e\x66\
+\x6f\x2e\x69\x73\x5f\x69\x70\x76\x36\x5f\x65\x78\x74\x5f\x73\x72\x63\x20\x26\
+\x26\0\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x20\x65\x6c\x73\x65\x20\x69\x66\x20\
+\x28\x70\x61\x63\x6b\x65\x74\x5f\x69\x6e\x66\x6f\x2e\x69\x73\x5f\x75\x64\x70\
+\x20\x26\x26\0\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x20\x65\x6c\x73\x65\x20\x69\
+\x66\x20\x28\x63\x6f\x6e\x66\x69\x67\x2d\x3e\x68\x61\x73\x68\x5f\x74\x79\x70\
+\x65\x73\x20\x26\x20\x56\x49\x52\x54\x49\x4f\x5f\x4e\x45\x54\x5f\x52\x53\x53\
+\x5f\x48\x41\x53\x48\x5f\x54\x59\x50\x45\x5f\x49\x50\x76\x34\x29\x20\x7b\0\x20\
+\x20\x20\x20\x5f\x5f\x62\x75\x69\x6c\x74\x69\x6e\x5f\x6d\x65\x6d\x63\x70\x79\
+\x28\x26\x72\x73\x73\x5f\x69\x6e\x70\x75\x74\x5b\x2a\x62\x79\x74\x65\x73\x5f\
+\x77\x72\x69\x74\x74\x65\x6e\x5d\x2c\x20\x70\x74\x72\x2c\x20\x73\x69\x7a\x65\
+\x29\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x66\x6f\x2d\
+\x3e\x69\x73\x5f\x75\x64\x70\x20\x3d\x20\x31\x3b\0\x20\x20\x20\x20\x20\x20\x20\
+\x20\x20\x20\x20\x20\x73\x74\x72\x75\x63\x74\x20\x75\x64\x70\x68\x64\x72\x20\
+\x75\x64\x70\x20\x3d\x20\x7b\x7d\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\
+\x20\x20\x65\x72\x72\x20\x3d\x20\x62\x70\x66\x5f\x73\x6b\x62\x5f\x6c\x6f\x61\
+\x64\x5f\x62\x79\x74\x65\x73\x5f\x72\x65\x6c\x61\x74\x69\x76\x65\x28\x73\x6b\
+\x62\x2c\x20\x6c\x34\x5f\x6f\x66\x66\x73\x65\x74\x2c\x20\x26\x75\x64\x70\x2c\
+\x20\x73\x69\x7a\x65\x6f\x66\x28\x75\x64\x70\x29\x2c\0\x20\x20\x20\x20\x20\x20\
+\x20\x20\x7d\x20\x65\x6c\x73\x65\x20\x69\x66\x20\x28\x63\x6f\x6e\x66\x69\x67\
+\x2d\x3e\x68\x61\x73\x68\x5f\x74\x79\x70\x65\x73\x20\x26\x20\x56\x49\x52\x54\
+\x49\x4f\x5f\x4e\x45\x54\x5f\x52\x53\x53\x5f\x48\x41\x53\x48\x5f\x54\x59\x50\
+\x45\x5f\x49\x50\x76\x36\x29\x20\x7b\0\x20\x20\x20\x20\x66\x6f\x72\x20\x28\x62\
+\x79\x74\x65\x20\x3d\x20\x30\x3b\x20\x62\x79\x74\x65\x20\x3c\x20\x48\x41\x53\
+\x48\x5f\x43\x41\x4c\x43\x55\x4c\x41\x54\x49\x4f\x4e\x5f\x42\x55\x46\x46\x45\
+\x52\x5f\x53\x49\x5a\x45\x3b\x20\x62\x79\x74\x65\x2b\x2b\x29\x20\x7b\0\x20\x20\
+\x20\x20\x5f\x5f\x75\x33\x32\x20\x6c\x65\x66\x74\x6d\x6f\x73\x74\x5f\x33\x32\
+\x5f\x62\x69\x74\x73\x20\x3d\x20\x6b\x65\x79\x2d\x3e\x6c\x65\x66\x74\x6d\x6f\
+\x73\x74\x5f\x33\x32\x5f\x62\x69\x74\x73\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\
+\x5f\x5f\x75\x38\x20\x69\x6e\x70\x75\x74\x5f\x62\x79\x74\x65\x20\x3d\x20\x69\
+\x6e\x70\x75\x74\x5b\x62\x79\x74\x65\x5d\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\
+\x20\x20\x20\x20\x69\x66\x20\x28\x69\x6e\x70\x75\x74\x5f\x62\x79\x74\x65\x20\
+\x26\x20\x28\x31\x20\x3c\x3c\x20\x37\x29\x29\x20\x7b\0\x20\x20\x20\x20\x20\x20\
+\x20\x20\x5f\x5f\x75\x38\x20\x6b\x65\x79\x5f\x62\x79\x74\x65\x20\x3d\x20\x6b\
+\x65\x79\x2d\x3e\x6e\x65\x78\x74\x5f\x62\x79\x74\x65\x5b\x62\x79\x74\x65\x5d\
+\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\
+\x20\x20\x28\x6c\x65\x66\x74\x6d\x6f\x73\x74\x5f\x33\x32\x5f\x62\x69\x74\x73\
+\x20\x3c\x3c\x20\x31\x29\x20\x7c\x20\x28\x28\x6b\x65\x79\x5f\x62\x79\x74\x65\
+\x20\x26\x20\x28\x31\x20\x3c\x3c\x20\x37\x29\x29\x20\x3e\x3e\x20\x37\x29\x3b\0\
+\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\x68\x61\x73\x68\x29\x20\x7b\0\
+\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x5f\x5f\x75\x33\x32\x20\x74\
+\x61\x62\x6c\x65\x5f\x69\x64\x78\x20\x3d\x20\x68\x61\x73\x68\x20\x25\x20\x63\
+\x6f\x6e\x66\x69\x67\x2d\x3e\x69\x6e\x64\x69\x72\x65\x63\x74\x69\x6f\x6e\x73\
+\x5f\x6c\x65\x6e\x3b\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x71\x75\
+\x65\x75\x65\x20\x3d\x20\x62\x70\x66\x5f\x6d\x61\x70\x5f\x6c\x6f\x6f\x6b\x75\
+\x70\x5f\x65\x6c\x65\x6d\x28\x26\x74\x61\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\
+\x5f\x69\x6e\x64\x69\x72\x65\x63\x74\x69\x6f\x6e\x5f\x74\x61\x62\x6c\x65\x2c\0\
+\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x28\x71\x75\x65\
+\x75\x65\x29\x20\x7b\0\x7d\0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\
+\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x2a\x71\x75\x65\x75\x65\x3b\0\x63\
+\x68\x61\x72\0\x5f\x6c\x69\x63\x65\x6e\x73\x65\0\x2e\x6d\x61\x70\x73\0\x6c\x69\
+\x63\x65\x6e\x73\x65\0\x62\x70\x66\x5f\x66\x6c\x6f\x77\x5f\x6b\x65\x79\x73\0\
+\x62\x70\x66\x5f\x73\x6f\x63\x6b\0\0\0\0\x9f\xeb\x01\0\x20\0\0\0\0\0\0\0\x14\0\
+\0\0\x14\0\0\0\x6c\x0c\0\0\x80\x0c\0\0\0\0\0\0\x08\0\0\0\x26\x02\0\0\x01\0\0\0\
+\0\0\0\0\x24\0\0\0\x10\0\0\0\x26\x02\0\0\xc6\0\0\0\0\0\0\0\x37\x02\0\0\x61\x02\
+\0\0\0\x50\x08\0\x10\0\0\0\x37\x02\0\0\x92\x02\0\0\x0b\x68\x08\0\x20\0\0\0\x37\
+\x02\0\0\0\0\0\0\0\0\0\0\x28\0\0\0\x37\x02\0\0\xa5\x02\0\0\x0e\x74\x08\0\x50\0\
+\0\0\x37\x02\0\0\xea\x02\0\0\x0b\x78\x08\0\x88\0\0\0\x37\x02\0\0\x2a\x03\0\0\
+\x10\x80\x08\0\x90\0\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x98\0\0\0\x37\x02\0\0\x2a\
+\x03\0\0\x10\x80\x08\0\xa0\0\0\0\x37\x02\0\0\x43\x03\0\0\x16\x84\x08\0\xa8\0\0\
+\0\x37\x02\0\0\x43\x03\0\0\x0d\x84\x08\0\xc0\0\0\0\x37\x02\0\0\x64\x03\0\0\x0a\
+\xfc\x05\0\xe8\0\0\0\x37\x02\0\0\x9b\x03\0\0\x1f\x0c\x06\0\x38\x01\0\0\x37\x02\
+\0\0\xcb\x03\0\0\x0f\xa0\x04\0\x40\x01\0\0\x37\x02\0\0\xe4\x03\0\0\x0c\x20\x04\
+\0\x50\x01\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x58\x01\0\0\x37\x02\0\0\xf8\x03\0\0\
+\x0b\x2c\x04\0\x90\x01\0\0\x37\x02\0\0\x3e\x04\0\0\x09\x34\x04\0\xa0\x01\0\0\
+\x37\x02\0\0\x4d\x04\0\0\x0d\x44\x04\0\xb8\x01\0\0\x37\x02\0\0\x4d\x04\0\0\x05\
+\x44\x04\0\xd8\x01\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\xe0\x01\0\0\x37\x02\0\0\x6b\
+\x04\0\0\x0f\x58\x04\0\x10\x02\0\0\x37\x02\0\0\x3e\x04\0\0\x09\x70\x04\0\x18\
+\x02\0\0\x37\x02\0\0\xb5\x04\0\0\x0c\x80\x04\0\x20\x02\0\0\x37\x02\0\0\xc5\x04\
+\0\0\x09\xbc\x04\0\x50\x02\0\0\x37\x02\0\0\xe1\x04\0\0\x17\xd4\x04\0\x60\x02\0\
+\0\x37\x02\0\0\xfc\x04\0\0\x16\xdc\x04\0\x80\x02\0\0\x37\x02\0\0\xe1\x04\0\0\
+\x17\xd4\x04\0\x88\x02\0\0\x37\x02\0\0\x1a\x05\0\0\x0f\xe0\x04\0\xc0\x02\0\0\
+\x37\x02\0\0\x5d\x05\0\0\x0d\xe8\x04\0\xc8\x02\0\0\x37\x02\0\0\x70\x05\0\0\x24\
+\0\x05\0\xd0\x02\0\0\x37\x02\0\0\x70\x05\0\0\x20\0\x05\0\xe0\x02\0\0\x37\x02\0\
+\0\x9d\x05\0\0\x1b\xf8\x04\0\xe8\x02\0\0\x37\x02\0\0\x9d\x05\0\0\x16\xf8\x04\0\
+\xf0\x02\0\0\x37\x02\0\0\xbe\x05\0\0\x1b\xfc\x04\0\xf8\x02\0\0\x37\x02\0\0\xbe\
+\x05\0\0\x16\xfc\x04\0\0\x03\0\0\x37\x02\0\0\xdf\x05\0\0\x1a\x08\x05\0\x08\x03\
+\0\0\x37\x02\0\0\x70\x05\0\0\x1d\0\x05\0\x10\x03\0\0\x37\x02\0\0\x02\x06\0\0\
+\x18\x0c\x05\0\x18\x03\0\0\x37\x02\0\0\x02\x06\0\0\x1c\x0c\x05\0\x30\x03\0\0\
+\x37\x02\0\0\x22\x06\0\0\x15\x68\x05\0\x40\x03\0\0\x37\x02\0\0\x22\x06\0\0\x1a\
+\x68\x05\0\x58\x03\0\0\x37\x02\0\0\x56\x06\0\0\x0d\x6c\x05\0\x78\x03\0\0\x37\
+\x02\0\0\x80\x06\0\0\x1a\x70\x05\0\x88\x03\0\0\x37\x02\0\0\x9e\x06\0\0\x1b\x78\
+\x05\0\xa8\x03\0\0\x37\x02\0\0\x80\x06\0\0\x1a\x70\x05\0\xb0\x03\0\0\x37\x02\0\
+\0\xc2\x06\0\0\x13\x7c\x05\0\xe8\x03\0\0\x37\x02\0\0\x13\x07\0\0\x11\x84\x05\0\
+\xf0\x03\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x10\x04\0\0\x37\x02\0\0\x2a\x07\0\0\
+\x15\x28\x06\0\x18\x04\0\0\x37\x02\0\0\x2a\x07\0\0\x09\x28\x06\0\x20\x04\0\0\
+\x37\x02\0\0\0\0\0\0\0\0\0\0\x70\x04\0\0\x37\x02\0\0\x49\x07\0\0\x19\x2c\x06\0\
+\x80\x04\0\0\x37\x02\0\0\x49\x07\0\0\x20\x2c\x06\0\xa0\x04\0\0\x37\x02\0\0\0\0\
+\0\0\0\0\0\0\xf0\x04\0\0\x37\x02\0\0\x6b\x07\0\0\x17\x14\x05\0\0\x05\0\0\x37\
+\x02\0\0\x86\x07\0\0\x18\x1c\x05\0\x30\x05\0\0\x37\x02\0\0\x6b\x07\0\0\x17\x14\
+\x05\0\x48\x05\0\0\x37\x02\0\0\xa7\x07\0\0\x0f\x20\x05\0\x80\x05\0\0\x37\x02\0\
+\0\x5d\x05\0\0\x0d\x28\x05\0\x88\x05\0\0\x37\x02\0\0\xec\x07\0\0\x1d\x38\x05\0\
+\xc8\x05\0\0\x37\x02\0\0\x0f\x08\0\0\x1d\x3c\x05\0\x08\x06\0\0\x37\x02\0\0\x32\
+\x08\0\0\x1b\x44\x05\0\x10\x06\0\0\x37\x02\0\0\x55\x08\0\0\x05\x30\x02\0\x58\
+\x06\0\0\x37\x02\0\0\x6d\x08\0\0\x19\xb8\x02\0\xd0\x06\0\0\x37\x02\0\0\0\0\0\0\
+\0\0\0\0\xd8\x06\0\0\x37\x02\0\0\x93\x08\0\0\x0f\xc8\x02\0\x10\x07\0\0\x37\x02\
+\0\0\x5d\x05\0\0\x0d\xd0\x02\0\x20\x07\0\0\x37\x02\0\0\xd8\x08\0\0\x0d\xe0\x02\
+\0\x40\x07\0\0\x37\x02\0\0\x07\x09\0\0\x20\xe4\x02\0\x68\x07\0\0\x37\x02\0\0\
+\x33\x09\0\0\x13\xec\x02\0\xa8\x07\0\0\x37\x02\0\0\x13\x07\0\0\x11\xf4\x02\0\
+\xb0\x07\0\0\x37\x02\0\0\x7b\x09\0\0\x19\x04\x03\0\xb8\x07\0\0\x37\x02\0\0\x7b\
+\x09\0\0\x34\x04\x03\0\xe0\x07\0\0\x37\x02\0\0\xb1\x09\0\0\x15\x18\x03\0\xf0\
+\x07\0\0\x37\x02\0\0\xf2\x09\0\0\x17\x14\x03\0\x30\x08\0\0\x37\x02\0\0\x29\x0a\
+\0\0\x15\x24\x03\0\x38\x08\0\0\x37\x02\0\0\x44\x0a\0\0\x27\x34\x03\0\x70\x08\0\
+\0\x37\x02\0\0\x6f\x0a\0\0\x27\x50\x03\0\x80\x08\0\0\x37\x02\0\0\x9f\x0a\0\0\
+\x1c\xb4\x03\0\x88\x08\0\0\x37\x02\0\0\xdb\x0a\0\0\x20\xc0\x03\0\x98\x08\0\0\
+\x37\x02\0\0\xdb\x0a\0\0\x2f\xc0\x03\0\xa0\x08\0\0\x37\x02\0\0\xdb\x0a\0\0\x36\
+\xc0\x03\0\xa8\x08\0\0\x37\x02\0\0\xdb\x0a\0\0\x15\xc0\x03\0\x18\x09\0\0\x37\
+\x02\0\0\x17\x0b\0\0\x43\x64\x03\0\x38\x09\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x40\
+\x09\0\0\x37\x02\0\0\x17\x0b\0\0\x17\x64\x03\0\x80\x09\0\0\x37\x02\0\0\x29\x0a\
+\0\0\x15\x6c\x03\0\x88\x09\0\0\x37\x02\0\0\x67\x0b\0\0\x19\x7c\x03\0\x90\x09\0\
+\0\x37\x02\0\0\x67\x0b\0\0\x15\x7c\x03\0\x98\x09\0\0\x37\x02\0\0\x97\x0b\0\0\
+\x19\x84\x03\0\xa0\x09\0\0\x37\x02\0\0\xc7\x0b\0\0\x1b\x80\x03\0\xe8\x09\0\0\
+\x37\x02\0\0\x02\x0c\0\0\x19\x94\x03\0\xf0\x09\0\0\x37\x02\0\0\x21\x0c\0\0\x2b\
+\xa4\x03\0\x10\x0a\0\0\x37\x02\0\0\x9f\x0a\0\0\x1f\xb4\x03\0\x30\x0a\0\0\x37\
+\x02\0\0\x50\x0c\0\0\x21\xd4\x03\0\x40\x0a\0\0\x37\x02\0\0\x78\x0c\0\0\x20\xe4\
+\x03\0\x48\x0a\0\0\x37\x02\0\0\x78\x0c\0\0\x2c\xe4\x03\0\x60\x0a\0\0\x37\x02\0\
+\0\x78\x0c\0\0\x14\xe4\x03\0\x70\x0a\0\0\x37\x02\0\0\xa8\x0c\0\0\x20\xe0\x03\0\
+\x80\x0a\0\0\x37\x02\0\0\x55\x08\0\0\x05\x30\x02\0\xb0\x0a\0\0\x37\x02\0\0\xd0\
+\x0c\0\0\x38\xc0\x02\0\xd0\x0a\0\0\x37\x02\0\0\xd0\x0c\0\0\x05\xc0\x02\0\xe8\
+\x0a\0\0\x37\x02\0\0\x55\x08\0\0\x05\x30\x02\0\xf8\x0a\0\0\x37\x02\0\0\x0e\x0d\
+\0\0\x1c\xc4\x06\0\x08\x0b\0\0\x37\x02\0\0\x0e\x0d\0\0\x10\xc4\x06\0\x10\x0b\0\
+\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x60\x0b\0\0\x37\x02\0\0\x49\x07\0\0\x19\xc8\x06\
+\0\x68\x0b\0\0\x37\x02\0\0\x49\x07\0\0\x20\xc8\x06\0\xa0\x0b\0\0\x37\x02\0\0\
+\x34\x0d\0\0\x2d\0\x07\0\xb0\x0b\0\0\x37\x02\0\0\x34\x0d\0\0\x1d\0\x07\0\xb8\
+\x0b\0\0\x37\x02\0\0\x34\x0d\0\0\x2d\0\x07\0\xc8\x0b\0\0\x37\x02\0\0\x63\x0d\0\
+\0\x2d\xd4\x06\0\xf8\x0b\0\0\x37\x02\0\0\x63\x0d\0\0\x1d\xd4\x06\0\x08\x0c\0\0\
+\x37\x02\0\0\x63\x0d\0\0\x2d\xd4\x06\0\x18\x0c\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\
+\xe8\x0c\0\0\x37\x02\0\0\x92\x0d\0\0\x20\x68\x06\0\xf0\x0c\0\0\x37\x02\0\0\x92\
+\x0d\0\0\x27\x68\x06\0\x18\x0d\0\0\x37\x02\0\0\xbb\x0d\0\0\x27\xa4\x06\0\x20\
+\x0d\0\0\x37\x02\0\0\xbb\x0d\0\0\x14\xa4\x06\0\x28\x0d\0\0\x37\x02\0\0\x04\x0e\
+\0\0\x05\x98\x01\0\x38\x0d\0\0\x37\x02\0\0\x04\x0e\0\0\x05\x98\x01\0\x60\x0d\0\
+\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x70\x0d\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x80\x0d\
+\0\0\x37\x02\0\0\x92\x0d\0\0\x20\x44\x07\0\x88\x0d\0\0\x37\x02\0\0\x92\x0d\0\0\
+\x27\x44\x07\0\xc0\x0d\0\0\x37\x02\0\0\x34\x0d\0\0\x2d\x7c\x07\0\xd0\x0d\0\0\
+\x37\x02\0\0\x34\x0d\0\0\x1d\x7c\x07\0\xd8\x0d\0\0\x37\x02\0\0\x34\x0d\0\0\x2d\
+\x7c\x07\0\xe8\x0d\0\0\x37\x02\0\0\x63\x0d\0\0\x2d\x50\x07\0\x18\x0e\0\0\x37\
+\x02\0\0\x63\x0d\0\0\x1d\x50\x07\0\x28\x0e\0\0\x37\x02\0\0\x63\x0d\0\0\x2d\x50\
+\x07\0\x40\x0e\0\0\x37\x02\0\0\x41\x0e\0\0\x1a\xa0\x05\0\x50\x0e\0\0\x37\x02\0\
+\0\x5f\x0e\0\0\x1b\xa8\x05\0\x60\x0e\0\0\x37\x02\0\0\x41\x0e\0\0\x1a\xa0\x05\0\
+\x68\x0e\0\0\x37\x02\0\0\x83\x0e\0\0\x13\xac\x05\0\xa0\x0e\0\0\x37\x02\0\0\x13\
+\x07\0\0\x11\xb4\x05\0\xb0\x0e\0\0\x37\x02\0\0\x55\x08\0\0\x05\x30\x02\0\xc0\
+\x0e\0\0\x37\x02\0\0\xd4\x0e\0\0\x27\xc8\x07\0\xd0\x0e\0\0\x37\x02\0\0\xd4\x0e\
+\0\0\x14\xc8\x07\0\xf0\x0e\0\0\x37\x02\0\0\x63\x0d\0\0\x2d\xcc\x07\0\0\x0f\0\0\
+\x37\x02\0\0\x63\x0d\0\0\x1d\xcc\x07\0\x08\x0f\0\0\x37\x02\0\0\x63\x0d\0\0\x2d\
+\xcc\x07\0\x30\x0f\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x80\x0f\0\0\x37\x02\0\0\x34\
+\x0d\0\0\x1d\xf8\x07\0\x88\x0f\0\0\x37\x02\0\0\x34\x0d\0\0\x2d\xf8\x07\0\x98\
+\x0f\0\0\x37\x02\0\0\x04\x0e\0\0\x05\x98\x01\0\xf0\x0f\0\0\x37\x02\0\0\x04\x0e\
+\0\0\x05\x98\x01\0\x30\x10\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x48\x10\0\0\x37\x02\
+\0\0\x1d\x0f\0\0\x05\xd0\x01\0\x50\x10\0\0\x37\x02\0\0\x5f\x0f\0\0\x23\xc4\x01\
+\0\x68\x10\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\x70\x10\0\0\x37\x02\0\0\x93\x0f\0\0\
+\x1b\xd4\x01\0\x90\x10\0\0\x37\x02\0\0\xba\x0f\0\0\x11\xe8\x01\0\xa8\x10\0\0\
+\x37\x02\0\0\xe3\x0f\0\0\x19\xd8\x01\0\xc0\x10\0\0\x37\x02\0\0\x11\x10\0\0\x27\
+\xfc\x01\0\xc8\x10\0\0\x37\x02\0\0\x11\x10\0\0\x46\xfc\x01\0\xd8\x10\0\0\x37\
+\x02\0\0\x11\x10\0\0\x2d\xfc\x01\0\xe0\x10\0\0\x37\x02\0\0\xba\x0f\0\0\x11\xe8\
+\x01\0\x08\x11\0\0\x37\x02\0\0\x11\x10\0\0\x46\xfc\x01\0\x20\x11\0\0\x37\x02\0\
+\0\x11\x10\0\0\x27\xfc\x01\0\x28\x11\0\0\x37\x02\0\0\x11\x10\0\0\x2d\xfc\x01\0\
+\x30\x11\0\0\x37\x02\0\0\xba\x0f\0\0\x11\xe8\x01\0\x58\x11\0\0\x37\x02\0\0\x11\
+\x10\0\0\x27\xfc\x01\0\x60\x11\0\0\x37\x02\0\0\x11\x10\0\0\x46\xfc\x01\0\x78\
+\x11\0\0\x37\x02\0\0\x11\x10\0\0\x2d\xfc\x01\0\x80\x11\0\0\x37\x02\0\0\xba\x0f\
+\0\0\x11\xe8\x01\0\xa8\x11\0\0\x37\x02\0\0\x11\x10\0\0\x27\xfc\x01\0\xb0\x11\0\
+\0\x37\x02\0\0\x11\x10\0\0\x46\xfc\x01\0\xc8\x11\0\0\x37\x02\0\0\x11\x10\0\0\
+\x2d\xfc\x01\0\xd0\x11\0\0\x37\x02\0\0\xba\x0f\0\0\x11\xe8\x01\0\xf8\x11\0\0\
+\x37\x02\0\0\x11\x10\0\0\x46\xfc\x01\0\x10\x12\0\0\x37\x02\0\0\x11\x10\0\0\x27\
+\xfc\x01\0\x18\x12\0\0\x37\x02\0\0\x11\x10\0\0\x2d\xfc\x01\0\x20\x12\0\0\x37\
+\x02\0\0\xba\x0f\0\0\x11\xe8\x01\0\x48\x12\0\0\x37\x02\0\0\x11\x10\0\0\x46\xfc\
+\x01\0\x60\x12\0\0\x37\x02\0\0\x11\x10\0\0\x27\xfc\x01\0\x68\x12\0\0\x37\x02\0\
+\0\x11\x10\0\0\x2d\xfc\x01\0\x70\x12\0\0\x37\x02\0\0\xba\x0f\0\0\x11\xe8\x01\0\
+\x98\x12\0\0\x37\x02\0\0\x11\x10\0\0\x46\xfc\x01\0\xb0\x12\0\0\x37\x02\0\0\x11\
+\x10\0\0\x27\xfc\x01\0\xb8\x12\0\0\x37\x02\0\0\x11\x10\0\0\x2d\xfc\x01\0\xc0\
+\x12\0\0\x37\x02\0\0\xba\x0f\0\0\x11\xe8\x01\0\xe0\x12\0\0\x37\x02\0\0\x11\x10\
+\0\0\x46\xfc\x01\0\xe8\x12\0\0\x37\x02\0\0\x11\x10\0\0\x27\xfc\x01\0\xf0\x12\0\
+\0\x37\x02\0\0\x11\x10\0\0\x2d\xfc\x01\0\xf8\x12\0\0\x37\x02\0\0\x1d\x0f\0\0\
+\x3d\xd0\x01\0\x08\x13\0\0\x37\x02\0\0\x1d\x0f\0\0\x05\xd0\x01\0\x18\x13\0\0\
+\x37\x02\0\0\x5d\x10\0\0\x0d\x98\x08\0\x30\x13\0\0\x37\x02\0\0\x5d\x10\0\0\x0d\
+\x98\x08\0\x38\x13\0\0\x37\x02\0\0\x71\x10\0\0\x2e\x9c\x08\0\x58\x13\0\0\x37\
+\x02\0\0\x71\x10\0\0\x24\x9c\x08\0\x70\x13\0\0\x37\x02\0\0\x71\x10\0\0\x13\x9c\
+\x08\0\x80\x13\0\0\x37\x02\0\0\x71\x10\0\0\x2e\x9c\x08\0\x88\x13\0\0\x37\x02\0\
+\0\xb0\x10\0\0\x15\xa8\x08\0\xa0\x13\0\0\x37\x02\0\0\xf8\x10\0\0\x11\xb4\x08\0\
+\xa8\x13\0\0\x37\x02\0\0\0\0\0\0\0\0\0\0\xc8\x13\0\0\x37\x02\0\0\x11\x11\0\0\
+\x01\xd8\x08\0\xd0\x13\0\0\x37\x02\0\0\x13\x11\0\0\x18\xb8\x08\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x03\0\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\xde\0\0\0\0\0\x03\0\xc8\x13\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x7a\x01\0\0\0\
+\0\x03\0\xb8\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xff\0\0\0\0\0\x03\0\xa8\x13\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\xc7\0\0\0\0\0\x03\0\xd0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\x2c\x02\0\0\0\0\x03\0\x20\x02\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xf7\0\0\0\0\0\x03\0\
+\xe8\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x1c\x02\0\0\0\0\x03\0\x10\x04\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\x28\x01\0\0\0\0\x03\0\xe0\x02\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xf3\
+\x01\0\0\0\0\x03\0\x30\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xeb\x01\0\0\0\0\x03\0\
+\x38\x0e\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x44\x02\0\0\0\0\x03\0\xf0\x03\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\xe3\x01\0\0\0\0\x03\0\xf8\x0a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x18\
+\x01\0\0\0\0\x03\0\xe8\x0c\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x30\x01\0\0\0\0\x03\0\
+\xa0\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xa9\x01\0\0\0\0\x03\0\x40\x10\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\x51\x01\0\0\0\0\x03\0\x78\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x5c\
+\x02\0\0\0\0\x03\0\xb0\x0e\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x03\x02\0\0\0\0\x03\0\
+\x50\x06\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xc2\x01\0\0\0\0\x03\0\xc0\x06\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\x69\x01\0\0\0\0\x03\0\x20\x07\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x61\
+\x01\0\0\0\0\x03\0\x60\x08\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x38\x01\0\0\0\0\x03\0\
+\x30\x0a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x20\x01\0\0\0\0\x03\0\x40\x0a\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\xba\x01\0\0\0\0\x03\0\xe0\x0f\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xa1\
+\x01\0\0\0\0\x03\0\x48\x08\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x40\x01\0\0\0\0\x03\0\
+\x18\x09\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xfb\x01\0\0\0\0\x03\0\x80\x08\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\x99\x01\0\0\0\0\x03\0\xf8\x08\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x59\
+\x01\0\0\0\0\x03\0\x50\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x54\x02\0\0\0\0\x03\0\
+\x08\x0a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xef\0\0\0\0\0\x03\0\xe8\x0a\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\x4c\x02\0\0\0\0\x03\0\xb0\x0a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x24\
+\x02\0\0\0\0\x03\0\xd8\x0a\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x89\x01\0\0\0\0\x03\0\
+\x80\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x10\x01\0\0\0\0\x03\0\xb0\x0b\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\xd6\0\0\0\0\0\x03\0\xc8\x0b\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x14\
+\x02\0\0\0\0\x03\0\xf8\x0b\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xb2\x01\0\0\0\0\x03\0\
+\x18\x0c\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xdb\x01\0\0\0\0\x03\0\x10\x0c\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\x3c\x02\0\0\0\0\x03\0\x18\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x91\
+\x01\0\0\0\0\x03\0\x60\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x81\x01\0\0\0\0\x03\0\
+\xc0\x0e\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xe7\0\0\0\0\0\x03\0\xd0\x0d\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\x34\x02\0\0\0\0\x03\0\xe8\x0d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xd3\
+\x01\0\0\0\0\x03\0\x18\x0e\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x08\x01\0\0\0\0\x03\0\0\
+\x0f\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xce\0\0\0\0\0\x03\0\x18\x0f\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\x0b\x02\0\0\0\0\x03\0\xf0\x0f\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xca\x01\0\
+\0\0\0\x03\0\x30\x10\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x71\x01\0\0\0\0\x03\0\x60\x10\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x48\x01\0\0\0\0\x03\0\x18\x13\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\x64\x02\0\0\0\0\x03\0\xd0\x13\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x4e\0\0\0\
+\x12\0\x03\0\0\0\0\0\0\0\0\0\xe0\x13\0\0\0\0\0\0\x33\0\0\0\x11\0\x05\0\0\0\0\0\
+\0\0\0\0\x20\0\0\0\0\0\0\0\x01\0\0\0\x11\0\x05\0\x20\0\0\0\0\0\0\0\x20\0\0\0\0\
+\0\0\0\x90\0\0\0\x11\0\x05\0\x40\0\0\0\0\0\0\0\x20\0\0\0\0\0\0\0\x87\0\0\0\x11\
+\0\x06\0\0\0\0\0\0\0\0\0\x07\0\0\0\0\0\0\0\x28\0\0\0\0\0\0\0\x01\0\0\0\x37\0\0\
+\0\x50\0\0\0\0\0\0\0\x01\0\0\0\x38\0\0\0\x88\x13\0\0\0\0\0\0\x01\0\0\0\x39\0\0\
+\0\xd8\x04\0\0\0\0\0\0\x04\0\0\0\x37\0\0\0\xe4\x04\0\0\0\0\0\0\x04\0\0\0\x38\0\
+\0\0\xf0\x04\0\0\0\0\0\0\x04\0\0\0\x39\0\0\0\x08\x05\0\0\0\0\0\0\x04\0\0\0\x3a\
+\0\0\0\x2c\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x40\0\0\0\0\0\0\0\x04\0\0\0\x01\0\
+\0\0\x50\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x60\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\
+\0\x70\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x80\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\
+\x90\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xa0\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\
+\xb0\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xc0\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\
+\xd0\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xe0\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\
+\xf0\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\0\x01\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\
+\x10\x01\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x20\x01\0\0\0\0\0\0\x04\0\0\0\x01\0\0\
+\0\x30\x01\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x40\x01\0\0\0\0\0\0\x04\0\0\0\x01\0\
+\0\0\x50\x01\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x60\x01\0\0\0\0\0\0\x04\0\0\0\x01\
+\0\0\0\x70\x01\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x80\x01\0\0\0\0\0\0\x04\0\0\0\
+\x01\0\0\0\x90\x01\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xa0\x01\0\0\0\0\0\0\x04\0\0\
+\0\x01\0\0\0\xb0\x01\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xc0\x01\0\0\0\0\0\0\x04\0\
+\0\0\x01\0\0\0\xd0\x01\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xe0\x01\0\0\0\0\0\0\x04\
+\0\0\0\x01\0\0\0\xf0\x01\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\0\x02\0\0\0\0\0\0\x04\
+\0\0\0\x01\0\0\0\x10\x02\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x20\x02\0\0\0\0\0\0\
+\x04\0\0\0\x01\0\0\0\x30\x02\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x40\x02\0\0\0\0\0\
+\0\x04\0\0\0\x01\0\0\0\x50\x02\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x60\x02\0\0\0\0\
+\0\0\x04\0\0\0\x01\0\0\0\x70\x02\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x80\x02\0\0\0\
+\0\0\0\x04\0\0\0\x01\0\0\0\x90\x02\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xa0\x02\0\0\
+\0\0\0\0\x04\0\0\0\x01\0\0\0\xb0\x02\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xc0\x02\0\
+\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xd0\x02\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xe0\x02\
+\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xf0\x02\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\0\x03\
+\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x10\x03\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x20\
+\x03\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x30\x03\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\
+\x40\x03\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x50\x03\0\0\0\0\0\0\x04\0\0\0\x01\0\0\
+\0\x60\x03\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x70\x03\0\0\0\0\0\0\x04\0\0\0\x01\0\
+\0\0\x80\x03\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x90\x03\0\0\0\0\0\0\x04\0\0\0\x01\
+\0\0\0\xa0\x03\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xb0\x03\0\0\0\0\0\0\x04\0\0\0\
+\x01\0\0\0\xc0\x03\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xd0\x03\0\0\0\0\0\0\x04\0\0\
+\0\x01\0\0\0\xe0\x03\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xf0\x03\0\0\0\0\0\0\x04\0\
+\0\0\x01\0\0\0\0\x04\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x10\x04\0\0\0\0\0\0\x04\0\
+\0\0\x01\0\0\0\x20\x04\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x30\x04\0\0\0\0\0\0\x04\
+\0\0\0\x01\0\0\0\x40\x04\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x50\x04\0\0\0\0\0\0\
+\x04\0\0\0\x01\0\0\0\x60\x04\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x70\x04\0\0\0\0\0\
+\0\x04\0\0\0\x01\0\0\0\x80\x04\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x90\x04\0\0\0\0\
+\0\0\x04\0\0\0\x01\0\0\0\xa0\x04\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xb0\x04\0\0\0\
+\0\0\0\x04\0\0\0\x01\0\0\0\xc0\x04\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xd0\x04\0\0\
+\0\0\0\0\x04\0\0\0\x01\0\0\0\xe0\x04\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xf0\x04\0\
+\0\0\0\0\0\x04\0\0\0\x01\0\0\0\0\x05\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x10\x05\0\
+\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x20\x05\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x30\x05\
+\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x40\x05\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x50\
+\x05\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x60\x05\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\
+\x70\x05\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x80\x05\0\0\0\0\0\0\x04\0\0\0\x01\0\0\
+\0\x90\x05\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xa0\x05\0\0\0\0\0\0\x04\0\0\0\x01\0\
+\0\0\xb0\x05\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xc0\x05\0\0\0\0\0\0\x04\0\0\0\x01\
+\0\0\0\xd0\x05\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xe0\x05\0\0\0\0\0\0\x04\0\0\0\
+\x01\0\0\0\xf0\x05\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\0\x06\0\0\0\0\0\0\x04\0\0\0\
+\x01\0\0\0\x10\x06\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x20\x06\0\0\0\0\0\0\x04\0\0\
+\0\x01\0\0\0\x30\x06\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x40\x06\0\0\0\0\0\0\x04\0\
+\0\0\x01\0\0\0\x50\x06\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x60\x06\0\0\0\0\0\0\x04\
+\0\0\0\x01\0\0\0\x70\x06\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x80\x06\0\0\0\0\0\0\
+\x04\0\0\0\x01\0\0\0\x90\x06\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xa0\x06\0\0\0\0\0\
+\0\x04\0\0\0\x01\0\0\0\xb0\x06\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xc0\x06\0\0\0\0\
+\0\0\x04\0\0\0\x01\0\0\0\xd0\x06\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xe0\x06\0\0\0\
+\0\0\0\x04\0\0\0\x01\0\0\0\xf0\x06\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\0\x07\0\0\0\
+\0\0\0\x04\0\0\0\x01\0\0\0\x10\x07\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x20\x07\0\0\
+\0\0\0\0\x04\0\0\0\x01\0\0\0\x30\x07\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x40\x07\0\
+\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x50\x07\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x60\x07\
+\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x70\x07\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x80\
+\x07\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x90\x07\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\
+\xa0\x07\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xb0\x07\0\0\0\0\0\0\x04\0\0\0\x01\0\0\
+\0\xc0\x07\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xd0\x07\0\0\0\0\0\0\x04\0\0\0\x01\0\
+\0\0\xe0\x07\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xf0\x07\0\0\0\0\0\0\x04\0\0\0\x01\
+\0\0\0\0\x08\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x10\x08\0\0\0\0\0\0\x04\0\0\0\x01\
+\0\0\0\x20\x08\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x30\x08\0\0\0\0\0\0\x04\0\0\0\
+\x01\0\0\0\x40\x08\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x50\x08\0\0\0\0\0\0\x04\0\0\
+\0\x01\0\0\0\x60\x08\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x70\x08\0\0\0\0\0\0\x04\0\
+\0\0\x01\0\0\0\x80\x08\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x90\x08\0\0\0\0\0\0\x04\
+\0\0\0\x01\0\0\0\xa0\x08\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xb0\x08\0\0\0\0\0\0\
+\x04\0\0\0\x01\0\0\0\xc0\x08\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xd0\x08\0\0\0\0\0\
+\0\x04\0\0\0\x01\0\0\0\xe0\x08\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xf0\x08\0\0\0\0\
+\0\0\x04\0\0\0\x01\0\0\0\0\x09\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x10\x09\0\0\0\0\
+\0\0\x04\0\0\0\x01\0\0\0\x20\x09\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x30\x09\0\0\0\
+\0\0\0\x04\0\0\0\x01\0\0\0\x40\x09\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x50\x09\0\0\
+\0\0\0\0\x04\0\0\0\x01\0\0\0\x60\x09\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x70\x09\0\
+\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x80\x09\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x90\x09\
+\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xa0\x09\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xb0\
+\x09\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xc0\x09\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\
+\xd0\x09\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xe0\x09\0\0\0\0\0\0\x04\0\0\0\x01\0\0\
+\0\xf0\x09\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\0\x0a\0\0\0\0\0\0\x04\0\0\0\x01\0\0\
+\0\x10\x0a\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x20\x0a\0\0\0\0\0\0\x04\0\0\0\x01\0\
+\0\0\x30\x0a\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x40\x0a\0\0\0\0\0\0\x04\0\0\0\x01\
+\0\0\0\x50\x0a\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x60\x0a\0\0\0\0\0\0\x04\0\0\0\
+\x01\0\0\0\x70\x0a\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x80\x0a\0\0\0\0\0\0\x04\0\0\
+\0\x01\0\0\0\x90\x0a\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xa0\x0a\0\0\0\0\0\0\x04\0\
+\0\0\x01\0\0\0\xb0\x0a\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xc0\x0a\0\0\0\0\0\0\x04\
+\0\0\0\x01\0\0\0\xd0\x0a\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xe0\x0a\0\0\0\0\0\0\
+\x04\0\0\0\x01\0\0\0\xf0\x0a\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\0\x0b\0\0\0\0\0\0\
+\x04\0\0\0\x01\0\0\0\x10\x0b\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x20\x0b\0\0\0\0\0\
+\0\x04\0\0\0\x01\0\0\0\x30\x0b\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x40\x0b\0\0\0\0\
+\0\0\x04\0\0\0\x01\0\0\0\x50\x0b\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x60\x0b\0\0\0\
+\0\0\0\x04\0\0\0\x01\0\0\0\x70\x0b\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x80\x0b\0\0\
+\0\0\0\0\x04\0\0\0\x01\0\0\0\x90\x0b\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xa0\x0b\0\
+\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xb0\x0b\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xc0\x0b\
+\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xd0\x0b\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xe0\
+\x0b\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\xf0\x0b\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\0\
+\x0c\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x10\x0c\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\
+\x20\x0c\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x30\x0c\0\0\0\0\0\0\x04\0\0\0\x01\0\0\
+\0\x40\x0c\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x50\x0c\0\0\0\0\0\0\x04\0\0\0\x01\0\
+\0\0\x60\x0c\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x70\x0c\0\0\0\0\0\0\x04\0\0\0\x01\
+\0\0\0\x80\x0c\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x90\x0c\0\0\0\0\0\0\x04\0\0\0\
+\x01\0\0\0\x40\x41\x42\x43\x44\0\x74\x61\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\
+\x5f\x74\x6f\x65\x70\x6c\x69\x74\x7a\x5f\x6b\x65\x79\0\x2e\x74\x65\x78\x74\0\
+\x2e\x72\x65\x6c\x2e\x42\x54\x46\x2e\x65\x78\x74\0\x2e\x6d\x61\x70\x73\0\x74\
+\x61\x70\x5f\x72\x73\x73\x5f\x6d\x61\x70\x5f\x63\x6f\x6e\x66\x69\x67\x75\x72\
+\x61\x74\x69\x6f\x6e\x73\0\x74\x75\x6e\x5f\x72\x73\x73\x5f\x73\x74\x65\x65\x72\
+\x69\x6e\x67\x5f\x70\x72\x6f\x67\0\x2e\x72\x65\x6c\x74\x75\x6e\x5f\x72\x73\x73\
+\x5f\x73\x74\x65\x65\x72\x69\x6e\x67\0\x2e\x6c\x6c\x76\x6d\x5f\x61\x64\x64\x72\
+\x73\x69\x67\0\x5f\x6c\x69\x63\x65\x6e\x73\x65\0\x74\x61\x70\x5f\x72\x73\x73\
+\x5f\x6d\x61\x70\x5f\x69\x6e\x64\x69\x72\x65\x63\x74\x69\x6f\x6e\x5f\x74\x61\
+\x62\x6c\x65\0\x2e\x73\x74\x72\x74\x61\x62\0\x2e\x73\x79\x6d\x74\x61\x62\0\x2e\
+\x72\x65\x6c\x2e\x42\x54\x46\0\x4c\x42\x42\x30\x5f\x39\0\x4c\x42\x42\x30\x5f\
+\x39\x39\0\x4c\x42\x42\x30\x5f\x37\x39\0\x4c\x42\x42\x30\x5f\x31\x30\x39\0\x4c\
+\x42\x42\x30\x5f\x38\x38\0\x4c\x42\x42\x30\x5f\x34\x38\0\x4c\x42\x42\x30\x5f\
+\x31\x38\0\x4c\x42\x42\x30\x5f\x31\x30\x38\0\x4c\x42\x42\x30\x5f\x39\x37\0\x4c\
+\x42\x42\x30\x5f\x37\x37\0\x4c\x42\x42\x30\x5f\x36\x37\0\x4c\x42\x42\x30\x5f\
+\x34\x37\0\x4c\x42\x42\x30\x5f\x31\x37\0\x4c\x42\x42\x30\x5f\x36\x36\0\x4c\x42\
+\x42\x30\x5f\x34\x36\0\x4c\x42\x42\x30\x5f\x33\x36\0\x4c\x42\x42\x30\x5f\x31\
+\x30\x36\0\x4c\x42\x42\x30\x5f\x35\x35\0\x4c\x42\x42\x30\x5f\x34\x35\0\x4c\x42\
+\x42\x30\x5f\x33\x35\0\x4c\x42\x42\x30\x5f\x32\x35\0\x4c\x42\x42\x30\x5f\x31\
+\x30\x35\0\x4c\x42\x42\x30\x5f\x34\0\x4c\x42\x42\x30\x5f\x39\x34\0\x4c\x42\x42\
+\x30\x5f\x38\x34\0\x4c\x42\x42\x30\x5f\x35\x34\0\x4c\x42\x42\x30\x5f\x34\x34\0\
+\x4c\x42\x42\x30\x5f\x33\x34\0\x4c\x42\x42\x30\x5f\x31\x30\x34\0\x4c\x42\x42\
+\x30\x5f\x38\x33\0\x4c\x42\x42\x30\x5f\x35\x33\0\x4c\x42\x42\x30\x5f\x32\x33\0\
+\x4c\x42\x42\x30\x5f\x31\x30\x33\0\x4c\x42\x42\x30\x5f\x39\x32\0\x4c\x42\x42\
+\x30\x5f\x38\x32\0\x4c\x42\x42\x30\x5f\x37\x32\0\x4c\x42\x42\x30\x5f\x36\x32\0\
+\x4c\x42\x42\x30\x5f\x35\x32\0\x4c\x42\x42\x30\x5f\x34\x32\0\x4c\x42\x42\x30\
+\x5f\x32\x32\0\x4c\x42\x42\x30\x5f\x31\x30\x32\0\x4c\x42\x42\x30\x5f\x38\x31\0\
+\x4c\x42\x42\x30\x5f\x36\x31\0\x4c\x42\x42\x30\x5f\x35\x31\0\x4c\x42\x42\x30\
+\x5f\x31\x31\0\x4c\x42\x42\x30\x5f\x39\x30\0\x4c\x42\x42\x30\x5f\x37\x30\0\x4c\
+\x42\x42\x30\x5f\x36\x30\0\x4c\x42\x42\x30\x5f\x35\x30\0\x4c\x42\x42\x30\x5f\
+\x34\x30\0\x4c\x42\x42\x30\x5f\x32\x30\0\x4c\x42\x42\x30\x5f\x31\x31\x30\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xae\0\0\0\x03\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x25\x4a\0\0\0\0\0\0\x6d\x02\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x1a\0\0\0\x01\0\0\0\x06\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\x40\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x04\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\x68\0\0\0\x01\0\0\0\x06\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\x40\0\0\0\0\0\0\0\xe0\x13\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x08\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\x64\0\0\0\x09\0\0\0\x40\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x40\x3d\0\0\
+\0\0\0\0\x30\0\0\0\0\0\0\0\x0c\0\0\0\x03\0\0\0\x08\0\0\0\0\0\0\0\x10\0\0\0\0\0\
+\0\0\x2d\0\0\0\x01\0\0\0\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x20\x14\0\0\0\0\0\0\
+\x60\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x08\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x88\0\0\0\
+\x01\0\0\0\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x80\x14\0\0\0\0\0\0\x07\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xc2\0\0\0\x01\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x88\x14\0\0\0\0\0\0\x8d\x16\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xbe\0\0\0\x09\0\0\0\x40\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\x70\x3d\0\0\0\0\0\0\x40\0\0\0\0\0\0\0\x0c\0\0\0\x07\0\0\0\x08\
+\0\0\0\0\0\0\0\x10\0\0\0\0\0\0\0\x24\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\x18\x2b\0\0\0\0\0\0\xa0\x0c\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x04\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\x20\0\0\0\x09\0\0\0\x40\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xb0\
+\x3d\0\0\0\0\0\0\x70\x0c\0\0\0\0\0\0\x0c\0\0\0\x09\0\0\0\x08\0\0\0\0\0\0\0\x10\
+\0\0\0\0\0\0\0\x79\0\0\0\x03\x4c\xff\x6f\0\0\0\x80\0\0\0\0\0\0\0\0\0\0\0\0\x20\
+\x4a\0\0\0\0\0\0\x05\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\xb6\0\0\0\x02\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xb8\x37\0\0\0\0\0\0\
+\x88\x05\0\0\0\0\0\0\x01\0\0\0\x36\0\0\0\x08\0\0\0\0\0\0\0\x18\0\0\0\0\0\0\0";
+}
+
+#ifdef __cplusplus
+struct rss_bpf *rss_bpf::open(const struct bpf_object_open_opts *opts) { return rss_bpf__open_opts(opts); }
+struct rss_bpf *rss_bpf::open_and_load() { return rss_bpf__open_and_load(); }
+int rss_bpf::load(struct rss_bpf *skel) { return rss_bpf__load(skel); }
+int rss_bpf::attach(struct rss_bpf *skel) { return rss_bpf__attach(skel); }
+void rss_bpf::detach(struct rss_bpf *skel) { rss_bpf__detach(skel); }
+void rss_bpf::destroy(struct rss_bpf *skel) { rss_bpf__destroy(skel); }
+const void *rss_bpf::elf_bytes(size_t *sz) { return rss_bpf__elf_bytes(sz); }
+#endif /* __cplusplus */
+
+__attribute__((unused)) static void
+rss_bpf__assert(struct rss_bpf *s __attribute__((unused)))
+{
+#ifdef __cplusplus
+#define _Static_assert static_assert
+#endif
+#ifdef __cplusplus
+#undef _Static_assert
+#endif
}
#endif /* __RSS_BPF_SKEL_H__ */
diff --git a/gdbstub/gdbstub.c b/gdbstub/gdbstub.c
index fb9c49e..2a66371 100644
--- a/gdbstub/gdbstub.c
+++ b/gdbstub/gdbstub.c
@@ -24,298 +24,27 @@
*/
#include "qemu/osdep.h"
-#include "qapi/error.h"
-#include "qemu/error-report.h"
#include "qemu/ctype.h"
#include "qemu/cutils.h"
#include "qemu/module.h"
+#include "qemu/error-report.h"
#include "trace.h"
#include "exec/gdbstub.h"
+#include "gdbstub/syscalls.h"
#ifdef CONFIG_USER_ONLY
-#include "qemu.h"
+#include "gdbstub/user.h"
#else
-#include "monitor/monitor.h"
-#include "chardev/char.h"
-#include "chardev/char-fe.h"
#include "hw/cpu/cluster.h"
#include "hw/boards.h"
#endif
-#define MAX_PACKET_LENGTH 4096
-
-#include "qemu/sockets.h"
#include "sysemu/hw_accel.h"
#include "sysemu/runstate.h"
-#include "semihosting/semihost.h"
-#include "exec/exec-all.h"
#include "exec/replay-core.h"
+#include "exec/hwaddr.h"
#include "internals.h"
-#ifdef CONFIG_USER_ONLY
-#define GDB_ATTACHED "0"
-#else
-#define GDB_ATTACHED "1"
-#endif
-
-#ifndef CONFIG_USER_ONLY
-static int phy_memory_mode;
-#endif
-
-static inline int target_memory_rw_debug(CPUState *cpu, target_ulong addr,
- uint8_t *buf, int len, bool is_write)
-{
- CPUClass *cc;
-
-#ifndef CONFIG_USER_ONLY
- if (phy_memory_mode) {
- if (is_write) {
- cpu_physical_memory_write(addr, buf, len);
- } else {
- cpu_physical_memory_read(addr, buf, len);
- }
- return 0;
- }
-#endif
-
- cc = CPU_GET_CLASS(cpu);
- if (cc->memory_rw_debug) {
- return cc->memory_rw_debug(cpu, addr, buf, len, is_write);
- }
- return cpu_memory_rw_debug(cpu, addr, buf, len, is_write);
-}
-
-/* Return the GDB index for a given vCPU state.
- *
- * For user mode this is simply the thread id. In system mode GDB
- * numbers CPUs from 1 as 0 is reserved as an "any cpu" index.
- */
-static inline int cpu_gdb_index(CPUState *cpu)
-{
-#if defined(CONFIG_USER_ONLY)
- TaskState *ts = (TaskState *) cpu->opaque;
- return ts ? ts->ts_tid : -1;
-#else
- return cpu->cpu_index + 1;
-#endif
-}
-
-enum {
- GDB_SIGNAL_0 = 0,
- GDB_SIGNAL_INT = 2,
- GDB_SIGNAL_QUIT = 3,
- GDB_SIGNAL_TRAP = 5,
- GDB_SIGNAL_ABRT = 6,
- GDB_SIGNAL_ALRM = 14,
- GDB_SIGNAL_IO = 23,
- GDB_SIGNAL_XCPU = 24,
- GDB_SIGNAL_UNKNOWN = 143
-};
-
-#ifdef CONFIG_USER_ONLY
-
-/* Map target signal numbers to GDB protocol signal numbers and vice
- * versa. For user emulation's currently supported systems, we can
- * assume most signals are defined.
- */
-
-static int gdb_signal_table[] = {
- 0,
- TARGET_SIGHUP,
- TARGET_SIGINT,
- TARGET_SIGQUIT,
- TARGET_SIGILL,
- TARGET_SIGTRAP,
- TARGET_SIGABRT,
- -1, /* SIGEMT */
- TARGET_SIGFPE,
- TARGET_SIGKILL,
- TARGET_SIGBUS,
- TARGET_SIGSEGV,
- TARGET_SIGSYS,
- TARGET_SIGPIPE,
- TARGET_SIGALRM,
- TARGET_SIGTERM,
- TARGET_SIGURG,
- TARGET_SIGSTOP,
- TARGET_SIGTSTP,
- TARGET_SIGCONT,
- TARGET_SIGCHLD,
- TARGET_SIGTTIN,
- TARGET_SIGTTOU,
- TARGET_SIGIO,
- TARGET_SIGXCPU,
- TARGET_SIGXFSZ,
- TARGET_SIGVTALRM,
- TARGET_SIGPROF,
- TARGET_SIGWINCH,
- -1, /* SIGLOST */
- TARGET_SIGUSR1,
- TARGET_SIGUSR2,
-#ifdef TARGET_SIGPWR
- TARGET_SIGPWR,
-#else
- -1,
-#endif
- -1, /* SIGPOLL */
- -1,
- -1,
- -1,
- -1,
- -1,
- -1,
- -1,
- -1,
- -1,
- -1,
- -1,
-#ifdef __SIGRTMIN
- __SIGRTMIN + 1,
- __SIGRTMIN + 2,
- __SIGRTMIN + 3,
- __SIGRTMIN + 4,
- __SIGRTMIN + 5,
- __SIGRTMIN + 6,
- __SIGRTMIN + 7,
- __SIGRTMIN + 8,
- __SIGRTMIN + 9,
- __SIGRTMIN + 10,
- __SIGRTMIN + 11,
- __SIGRTMIN + 12,
- __SIGRTMIN + 13,
- __SIGRTMIN + 14,
- __SIGRTMIN + 15,
- __SIGRTMIN + 16,
- __SIGRTMIN + 17,
- __SIGRTMIN + 18,
- __SIGRTMIN + 19,
- __SIGRTMIN + 20,
- __SIGRTMIN + 21,
- __SIGRTMIN + 22,
- __SIGRTMIN + 23,
- __SIGRTMIN + 24,
- __SIGRTMIN + 25,
- __SIGRTMIN + 26,
- __SIGRTMIN + 27,
- __SIGRTMIN + 28,
- __SIGRTMIN + 29,
- __SIGRTMIN + 30,
- __SIGRTMIN + 31,
- -1, /* SIGCANCEL */
- __SIGRTMIN,
- __SIGRTMIN + 32,
- __SIGRTMIN + 33,
- __SIGRTMIN + 34,
- __SIGRTMIN + 35,
- __SIGRTMIN + 36,
- __SIGRTMIN + 37,
- __SIGRTMIN + 38,
- __SIGRTMIN + 39,
- __SIGRTMIN + 40,
- __SIGRTMIN + 41,
- __SIGRTMIN + 42,
- __SIGRTMIN + 43,
- __SIGRTMIN + 44,
- __SIGRTMIN + 45,
- __SIGRTMIN + 46,
- __SIGRTMIN + 47,
- __SIGRTMIN + 48,
- __SIGRTMIN + 49,
- __SIGRTMIN + 50,
- __SIGRTMIN + 51,
- __SIGRTMIN + 52,
- __SIGRTMIN + 53,
- __SIGRTMIN + 54,
- __SIGRTMIN + 55,
- __SIGRTMIN + 56,
- __SIGRTMIN + 57,
- __SIGRTMIN + 58,
- __SIGRTMIN + 59,
- __SIGRTMIN + 60,
- __SIGRTMIN + 61,
- __SIGRTMIN + 62,
- __SIGRTMIN + 63,
- __SIGRTMIN + 64,
- __SIGRTMIN + 65,
- __SIGRTMIN + 66,
- __SIGRTMIN + 67,
- __SIGRTMIN + 68,
- __SIGRTMIN + 69,
- __SIGRTMIN + 70,
- __SIGRTMIN + 71,
- __SIGRTMIN + 72,
- __SIGRTMIN + 73,
- __SIGRTMIN + 74,
- __SIGRTMIN + 75,
- __SIGRTMIN + 76,
- __SIGRTMIN + 77,
- __SIGRTMIN + 78,
- __SIGRTMIN + 79,
- __SIGRTMIN + 80,
- __SIGRTMIN + 81,
- __SIGRTMIN + 82,
- __SIGRTMIN + 83,
- __SIGRTMIN + 84,
- __SIGRTMIN + 85,
- __SIGRTMIN + 86,
- __SIGRTMIN + 87,
- __SIGRTMIN + 88,
- __SIGRTMIN + 89,
- __SIGRTMIN + 90,
- __SIGRTMIN + 91,
- __SIGRTMIN + 92,
- __SIGRTMIN + 93,
- __SIGRTMIN + 94,
- __SIGRTMIN + 95,
- -1, /* SIGINFO */
- -1, /* UNKNOWN */
- -1, /* DEFAULT */
- -1,
- -1,
- -1,
- -1,
- -1,
- -1
-#endif
-};
-#else
-/* In system mode we only need SIGINT and SIGTRAP; other signals
- are not yet supported. */
-
-enum {
- TARGET_SIGINT = 2,
- TARGET_SIGTRAP = 5
-};
-
-static int gdb_signal_table[] = {
- -1,
- -1,
- TARGET_SIGINT,
- -1,
- -1,
- TARGET_SIGTRAP
-};
-#endif
-
-#ifdef CONFIG_USER_ONLY
-static int target_signal_to_gdb (int sig)
-{
- int i;
- for (i = 0; i < ARRAY_SIZE (gdb_signal_table); i++)
- if (gdb_signal_table[i] == sig)
- return i;
- return GDB_SIGNAL_UNKNOWN;
-}
-#endif
-
-static int gdb_signal_to_target (int sig)
-{
- if (sig < ARRAY_SIZE (gdb_signal_table))
- return gdb_signal_table[sig];
- else
- return -1;
-}
-
typedef struct GDBRegisterState {
int base_reg;
int num_regs;
@@ -325,56 +54,9 @@
struct GDBRegisterState *next;
} GDBRegisterState;
-typedef struct GDBProcess {
- uint32_t pid;
- bool attached;
+GDBState gdbserver_state;
- char target_xml[1024];
-} GDBProcess;
-
-enum RSState {
- RS_INACTIVE,
- RS_IDLE,
- RS_GETLINE,
- RS_GETLINE_ESC,
- RS_GETLINE_RLE,
- RS_CHKSUM1,
- RS_CHKSUM2,
-};
-typedef struct GDBState {
- bool init; /* have we been initialised? */
- CPUState *c_cpu; /* current CPU for step/continue ops */
- CPUState *g_cpu; /* current CPU for other ops */
- CPUState *query_cpu; /* for q{f|s}ThreadInfo */
- enum RSState state; /* parsing state */
- char line_buf[MAX_PACKET_LENGTH];
- int line_buf_index;
- int line_sum; /* running checksum */
- int line_csum; /* checksum at the end of the packet */
- GByteArray *last_packet;
- int signal;
-#ifdef CONFIG_USER_ONLY
- int fd;
- char *socket_path;
- int running_state;
-#else
- CharBackend chr;
- Chardev *mon_chr;
-#endif
- bool multiprocess;
- GDBProcess *processes;
- int process_num;
- char syscall_buf[256];
- gdb_syscall_complete_cb current_syscall_cb;
- GString *str_buf;
- GByteArray *mem_buf;
- int sstep_flags;
- int supported_sstep_flags;
-} GDBState;
-
-static GDBState gdbserver_state;
-
-static void init_gdbserver_state(void)
+void gdb_init_gdbserver_state(void)
{
g_assert(!gdbserver_state.init);
memset(&gdbserver_state, 0, sizeof(GDBState));
@@ -393,211 +75,10 @@
gdbserver_state.sstep_flags &= gdbserver_state.supported_sstep_flags;
}
-#ifndef CONFIG_USER_ONLY
-static void reset_gdbserver_state(void)
-{
- g_free(gdbserver_state.processes);
- gdbserver_state.processes = NULL;
- gdbserver_state.process_num = 0;
-}
-#endif
-
bool gdb_has_xml;
-#ifdef CONFIG_USER_ONLY
-
-static int get_char(void)
-{
- uint8_t ch;
- int ret;
-
- for(;;) {
- ret = recv(gdbserver_state.fd, &ch, 1, 0);
- if (ret < 0) {
- if (errno == ECONNRESET)
- gdbserver_state.fd = -1;
- if (errno != EINTR)
- return -1;
- } else if (ret == 0) {
- close(gdbserver_state.fd);
- gdbserver_state.fd = -1;
- return -1;
- } else {
- break;
- }
- }
- return ch;
-}
-#endif
-
-/*
- * Return true if there is a GDB currently connected to the stub
- * and attached to a CPU
- */
-static bool gdb_attached(void)
-{
- return gdbserver_state.init && gdbserver_state.c_cpu;
-}
-
-static enum {
- GDB_SYS_UNKNOWN,
- GDB_SYS_ENABLED,
- GDB_SYS_DISABLED,
-} gdb_syscall_mode;
-
-/* Decide if either remote gdb syscalls or native file IO should be used. */
-int use_gdb_syscalls(void)
-{
- SemihostingTarget target = semihosting_get_target();
- if (target == SEMIHOSTING_TARGET_NATIVE) {
- /* -semihosting-config target=native */
- return false;
- } else if (target == SEMIHOSTING_TARGET_GDB) {
- /* -semihosting-config target=gdb */
- return true;
- }
-
- /* -semihosting-config target=auto */
- /* On the first call check if gdb is connected and remember. */
- if (gdb_syscall_mode == GDB_SYS_UNKNOWN) {
- gdb_syscall_mode = gdb_attached() ? GDB_SYS_ENABLED : GDB_SYS_DISABLED;
- }
- return gdb_syscall_mode == GDB_SYS_ENABLED;
-}
-
-static bool stub_can_reverse(void)
-{
-#ifdef CONFIG_USER_ONLY
- return false;
-#else
- return replay_mode == REPLAY_MODE_PLAY;
-#endif
-}
-
-/* Resume execution. */
-static inline void gdb_continue(void)
-{
-
-#ifdef CONFIG_USER_ONLY
- gdbserver_state.running_state = 1;
- trace_gdbstub_op_continue();
-#else
- if (!runstate_needs_reset()) {
- trace_gdbstub_op_continue();
- vm_start();
- }
-#endif
-}
-
-/*
- * Resume execution, per CPU actions. For user-mode emulation it's
- * equivalent to gdb_continue.
- */
-static int gdb_continue_partial(char *newstates)
-{
- CPUState *cpu;
- int res = 0;
-#ifdef CONFIG_USER_ONLY
- /*
- * This is not exactly accurate, but it's an improvement compared to the
- * previous situation, where only one CPU would be single-stepped.
- */
- CPU_FOREACH(cpu) {
- if (newstates[cpu->cpu_index] == 's') {
- trace_gdbstub_op_stepping(cpu->cpu_index);
- cpu_single_step(cpu, gdbserver_state.sstep_flags);
- }
- }
- gdbserver_state.running_state = 1;
-#else
- int flag = 0;
-
- if (!runstate_needs_reset()) {
- bool step_requested = false;
- CPU_FOREACH(cpu) {
- if (newstates[cpu->cpu_index] == 's') {
- step_requested = true;
- break;
- }
- }
-
- if (vm_prepare_start(step_requested)) {
- return 0;
- }
-
- CPU_FOREACH(cpu) {
- switch (newstates[cpu->cpu_index]) {
- case 0:
- case 1:
- break; /* nothing to do here */
- case 's':
- trace_gdbstub_op_stepping(cpu->cpu_index);
- cpu_single_step(cpu, gdbserver_state.sstep_flags);
- cpu_resume(cpu);
- flag = 1;
- break;
- case 'c':
- trace_gdbstub_op_continue_cpu(cpu->cpu_index);
- cpu_resume(cpu);
- flag = 1;
- break;
- default:
- res = -1;
- break;
- }
- }
- }
- if (flag) {
- qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
- }
-#endif
- return res;
-}
-
-static void put_buffer(const uint8_t *buf, int len)
-{
-#ifdef CONFIG_USER_ONLY
- int ret;
-
- while (len > 0) {
- ret = send(gdbserver_state.fd, buf, len, 0);
- if (ret < 0) {
- if (errno != EINTR)
- return;
- } else {
- buf += ret;
- len -= ret;
- }
- }
-#else
- /* XXX this blocks entire thread. Rewrite to use
- * qemu_chr_fe_write and background I/O callbacks */
- qemu_chr_fe_write_all(&gdbserver_state.chr, buf, len);
-#endif
-}
-
-static inline int fromhex(int v)
-{
- if (v >= '0' && v <= '9')
- return v - '0';
- else if (v >= 'A' && v <= 'F')
- return v - 'A' + 10;
- else if (v >= 'a' && v <= 'f')
- return v - 'a' + 10;
- else
- return 0;
-}
-
-static inline int tohex(int v)
-{
- if (v < 10)
- return v + '0';
- else
- return v - 10 + 'a';
-}
-
/* writes 2*len+1 bytes in buf */
-static void memtohex(GString *buf, const uint8_t *mem, int len)
+void gdb_memtohex(GString *buf, const uint8_t *mem, int len)
{
int i, c;
for(i = 0; i < len; i++) {
@@ -608,7 +89,7 @@
g_string_append_c(buf, '\0');
}
-static void hextomem(GByteArray *mem, const char *buf, int len)
+void gdb_hextomem(GByteArray *mem, const char *buf, int len)
{
int i;
@@ -653,7 +134,7 @@
}
/* return -1 if error, 0 if OK */
-static int put_packet_binary(const char *buf, int len, bool dump)
+int gdb_put_packet_binary(const char *buf, int len, bool dump)
{
int csum, i;
uint8_t footer[3];
@@ -677,37 +158,31 @@
footer[2] = tohex((csum) & 0xf);
g_byte_array_append(gdbserver_state.last_packet, footer, 3);
- put_buffer(gdbserver_state.last_packet->data,
+ gdb_put_buffer(gdbserver_state.last_packet->data,
gdbserver_state.last_packet->len);
-#ifdef CONFIG_USER_ONLY
- i = get_char();
- if (i < 0)
- return -1;
- if (i == '+')
+ if (gdb_got_immediate_ack()) {
break;
-#else
- break;
-#endif
+ }
}
return 0;
}
/* return -1 if error, 0 if OK */
-static int put_packet(const char *buf)
+int gdb_put_packet(const char *buf)
{
trace_gdbstub_io_reply(buf);
- return put_packet_binary(buf, strlen(buf), false);
+ return gdb_put_packet_binary(buf, strlen(buf), false);
}
-static void put_strbuf(void)
+void gdb_put_strbuf(void)
{
- put_packet(gdbserver_state.str_buf->str);
+ gdb_put_packet(gdbserver_state.str_buf->str);
}
/* Encode data using the encoding for 'x' packets. */
-static void memtox(GString *buf, const char *mem, int len)
+void gdb_memtox(GString *buf, const char *mem, int len)
{
char c;
@@ -764,7 +239,7 @@
CPUState *cpu;
CPU_FOREACH(cpu) {
- if (cpu_gdb_index(cpu) == thread_id) {
+ if (gdb_get_cpu_index(cpu) == thread_id) {
return cpu;
}
}
@@ -818,7 +293,7 @@
}
/* Return the first attached cpu */
-static CPUState *gdb_first_attached_cpu(void)
+CPUState *gdb_first_attached_cpu(void)
{
CPUState *cpu = first_cpu;
GDBProcess *process = gdb_get_cpu_process(cpu);
@@ -1024,7 +499,7 @@
}
-static void gdb_set_cpu_pc(target_ulong pc)
+static void gdb_set_cpu_pc(vaddr pc)
{
CPUState *cpu = gdbserver_state.c_cpu;
@@ -1032,23 +507,16 @@
cpu_set_pc(cpu, pc);
}
-static void gdb_append_thread_id(CPUState *cpu, GString *buf)
+void gdb_append_thread_id(CPUState *cpu, GString *buf)
{
if (gdbserver_state.multiprocess) {
g_string_append_printf(buf, "p%02x.%02x",
- gdb_get_cpu_pid(cpu), cpu_gdb_index(cpu));
+ gdb_get_cpu_pid(cpu), gdb_get_cpu_index(cpu));
} else {
- g_string_append_printf(buf, "%02x", cpu_gdb_index(cpu));
+ g_string_append_printf(buf, "%02x", gdb_get_cpu_index(cpu));
}
}
-typedef enum GDBThreadIdKind {
- GDB_ONE_THREAD = 0,
- GDB_ALL_THREADS, /* One process, all threads */
- GDB_ALL_PROCESSES,
- GDB_READ_THREAD_ERR
-} GDBThreadIdKind;
-
static GDBThreadIdKind read_thread_id(const char *buf, const char **end_buf,
uint32_t *pid, uint32_t *tid)
{
@@ -1111,16 +579,7 @@
GDBProcess *process;
CPUState *cpu;
GDBThreadIdKind kind;
-#ifdef CONFIG_USER_ONLY
- int max_cpus = 1; /* global variable max_cpus exists only in system mode */
-
- CPU_FOREACH(cpu) {
- max_cpus = max_cpus <= cpu->cpu_index ? cpu->cpu_index + 1 : max_cpus;
- }
-#else
- MachineState *ms = MACHINE(qdev_get_machine());
- unsigned int max_cpus = ms->smp.max_cpus;
-#endif
+ unsigned int max_cpus = gdb_get_max_cpus();
/* uninitialised CPUs stay 0 */
newstates = g_new0(char, max_cpus);
@@ -1229,20 +688,6 @@
return res;
}
-typedef union GdbCmdVariant {
- const char *data;
- uint8_t opcode;
- unsigned long val_ul;
- unsigned long long val_ull;
- struct {
- GDBThreadIdKind kind;
- uint32_t pid;
- uint32_t tid;
- } thread_id;
-} GdbCmdVariant;
-
-#define get_param(p, i) (&g_array_index(p, GdbCmdVariant, i))
-
static const char *cmd_next_param(const char *param, const char delimiter)
{
static const char all_delimiters[] = ",;:=";
@@ -1409,7 +854,7 @@
/* In case there was an error during the command parsing we must
* send a NULL packet to indicate the command is not supported */
if (process_string_cmd(NULL, data, cmd, 1)) {
- put_packet("");
+ gdb_put_packet("");
}
}
@@ -1420,7 +865,7 @@
if (gdbserver_state.multiprocess) {
if (!params->len) {
- put_packet("E22");
+ gdb_put_packet("E22");
return;
}
@@ -1441,10 +886,10 @@
if (!gdbserver_state.c_cpu) {
/* No more process attached */
- gdb_syscall_mode = GDB_SYS_DISABLED;
+ gdb_disable_syscalls();
gdb_continue();
}
- put_packet("OK");
+ gdb_put_packet("OK");
}
static void handle_thread_alive(GArray *params, void *user_ctx)
@@ -1452,23 +897,23 @@
CPUState *cpu;
if (!params->len) {
- put_packet("E22");
+ gdb_put_packet("E22");
return;
}
if (get_param(params, 0)->thread_id.kind == GDB_READ_THREAD_ERR) {
- put_packet("E22");
+ gdb_put_packet("E22");
return;
}
cpu = gdb_get_cpu(get_param(params, 0)->thread_id.pid,
get_param(params, 0)->thread_id.tid);
if (!cpu) {
- put_packet("E22");
+ gdb_put_packet("E22");
return;
}
- put_packet("OK");
+ gdb_put_packet("OK");
}
static void handle_continue(GArray *params, void *user_ctx)
@@ -1505,24 +950,24 @@
CPUState *cpu;
if (params->len != 2) {
- put_packet("E22");
+ gdb_put_packet("E22");
return;
}
if (get_param(params, 1)->thread_id.kind == GDB_READ_THREAD_ERR) {
- put_packet("E22");
+ gdb_put_packet("E22");
return;
}
if (get_param(params, 1)->thread_id.kind != GDB_ONE_THREAD) {
- put_packet("OK");
+ gdb_put_packet("OK");
return;
}
cpu = gdb_get_cpu(get_param(params, 1)->thread_id.pid,
get_param(params, 1)->thread_id.tid);
if (!cpu) {
- put_packet("E22");
+ gdb_put_packet("E22");
return;
}
@@ -1533,14 +978,14 @@
switch (get_param(params, 0)->opcode) {
case 'c':
gdbserver_state.c_cpu = cpu;
- put_packet("OK");
+ gdb_put_packet("OK");
break;
case 'g':
gdbserver_state.g_cpu = cpu;
- put_packet("OK");
+ gdb_put_packet("OK");
break;
default:
- put_packet("E22");
+ gdb_put_packet("E22");
break;
}
}
@@ -1550,7 +995,7 @@
int res;
if (params->len != 3) {
- put_packet("E22");
+ gdb_put_packet("E22");
return;
}
@@ -1559,14 +1004,14 @@
get_param(params, 1)->val_ull,
get_param(params, 2)->val_ull);
if (res >= 0) {
- put_packet("OK");
+ gdb_put_packet("OK");
return;
} else if (res == -ENOSYS) {
- put_packet("");
+ gdb_put_packet("");
return;
}
- put_packet("E22");
+ gdb_put_packet("E22");
}
static void handle_remove_bp(GArray *params, void *user_ctx)
@@ -1574,7 +1019,7 @@
int res;
if (params->len != 3) {
- put_packet("E22");
+ gdb_put_packet("E22");
return;
}
@@ -1583,14 +1028,14 @@
get_param(params, 1)->val_ull,
get_param(params, 2)->val_ull);
if (res >= 0) {
- put_packet("OK");
+ gdb_put_packet("OK");
return;
} else if (res == -ENOSYS) {
- put_packet("");
+ gdb_put_packet("");
return;
}
- put_packet("E22");
+ gdb_put_packet("E22");
}
/*
@@ -1609,20 +1054,20 @@
int reg_size;
if (!gdb_has_xml) {
- put_packet("");
+ gdb_put_packet("");
return;
}
if (params->len != 2) {
- put_packet("E22");
+ gdb_put_packet("E22");
return;
}
reg_size = strlen(get_param(params, 1)->data) / 2;
- hextomem(gdbserver_state.mem_buf, get_param(params, 1)->data, reg_size);
+ gdb_hextomem(gdbserver_state.mem_buf, get_param(params, 1)->data, reg_size);
gdb_write_register(gdbserver_state.g_cpu, gdbserver_state.mem_buf->data,
get_param(params, 0)->val_ull);
- put_packet("OK");
+ gdb_put_packet("OK");
}
static void handle_get_reg(GArray *params, void *user_ctx)
@@ -1630,12 +1075,12 @@
int reg_size;
if (!gdb_has_xml) {
- put_packet("");
+ gdb_put_packet("");
return;
}
if (!params->len) {
- put_packet("E14");
+ gdb_put_packet("E14");
return;
}
@@ -1643,75 +1088,77 @@
gdbserver_state.mem_buf,
get_param(params, 0)->val_ull);
if (!reg_size) {
- put_packet("E14");
+ gdb_put_packet("E14");
return;
} else {
g_byte_array_set_size(gdbserver_state.mem_buf, reg_size);
}
- memtohex(gdbserver_state.str_buf, gdbserver_state.mem_buf->data, reg_size);
- put_strbuf();
+ gdb_memtohex(gdbserver_state.str_buf,
+ gdbserver_state.mem_buf->data, reg_size);
+ gdb_put_strbuf();
}
static void handle_write_mem(GArray *params, void *user_ctx)
{
if (params->len != 3) {
- put_packet("E22");
+ gdb_put_packet("E22");
return;
}
- /* hextomem() reads 2*len bytes */
+ /* gdb_hextomem() reads 2*len bytes */
if (get_param(params, 1)->val_ull >
strlen(get_param(params, 2)->data) / 2) {
- put_packet("E22");
+ gdb_put_packet("E22");
return;
}
- hextomem(gdbserver_state.mem_buf, get_param(params, 2)->data,
- get_param(params, 1)->val_ull);
- if (target_memory_rw_debug(gdbserver_state.g_cpu,
- get_param(params, 0)->val_ull,
- gdbserver_state.mem_buf->data,
- gdbserver_state.mem_buf->len, true)) {
- put_packet("E14");
+ gdb_hextomem(gdbserver_state.mem_buf, get_param(params, 2)->data,
+ get_param(params, 1)->val_ull);
+ if (gdb_target_memory_rw_debug(gdbserver_state.g_cpu,
+ get_param(params, 0)->val_ull,
+ gdbserver_state.mem_buf->data,
+ gdbserver_state.mem_buf->len, true)) {
+ gdb_put_packet("E14");
return;
}
- put_packet("OK");
+ gdb_put_packet("OK");
}
static void handle_read_mem(GArray *params, void *user_ctx)
{
if (params->len != 2) {
- put_packet("E22");
+ gdb_put_packet("E22");
return;
}
- /* memtohex() doubles the required space */
+ /* gdb_memtohex() doubles the required space */
if (get_param(params, 1)->val_ull > MAX_PACKET_LENGTH / 2) {
- put_packet("E22");
+ gdb_put_packet("E22");
return;
}
g_byte_array_set_size(gdbserver_state.mem_buf,
get_param(params, 1)->val_ull);
- if (target_memory_rw_debug(gdbserver_state.g_cpu,
- get_param(params, 0)->val_ull,
- gdbserver_state.mem_buf->data,
- gdbserver_state.mem_buf->len, false)) {
- put_packet("E14");
+ if (gdb_target_memory_rw_debug(gdbserver_state.g_cpu,
+ get_param(params, 0)->val_ull,
+ gdbserver_state.mem_buf->data,
+ gdbserver_state.mem_buf->len, false)) {
+ gdb_put_packet("E14");
return;
}
- memtohex(gdbserver_state.str_buf, gdbserver_state.mem_buf->data,
+ gdb_memtohex(gdbserver_state.str_buf, gdbserver_state.mem_buf->data,
gdbserver_state.mem_buf->len);
- put_strbuf();
+ gdb_put_strbuf();
}
static void handle_write_all_regs(GArray *params, void *user_ctx)
{
- target_ulong addr, len;
+ int reg_id;
+ size_t len;
uint8_t *registers;
int reg_size;
@@ -1721,94 +1168,42 @@
cpu_synchronize_state(gdbserver_state.g_cpu);
len = strlen(get_param(params, 0)->data) / 2;
- hextomem(gdbserver_state.mem_buf, get_param(params, 0)->data, len);
+ gdb_hextomem(gdbserver_state.mem_buf, get_param(params, 0)->data, len);
registers = gdbserver_state.mem_buf->data;
- for (addr = 0; addr < gdbserver_state.g_cpu->gdb_num_g_regs && len > 0;
- addr++) {
- reg_size = gdb_write_register(gdbserver_state.g_cpu, registers, addr);
+ for (reg_id = 0;
+ reg_id < gdbserver_state.g_cpu->gdb_num_g_regs && len > 0;
+ reg_id++) {
+ reg_size = gdb_write_register(gdbserver_state.g_cpu, registers, reg_id);
len -= reg_size;
registers += reg_size;
}
- put_packet("OK");
+ gdb_put_packet("OK");
}
static void handle_read_all_regs(GArray *params, void *user_ctx)
{
- target_ulong addr, len;
+ int reg_id;
+ size_t len;
cpu_synchronize_state(gdbserver_state.g_cpu);
g_byte_array_set_size(gdbserver_state.mem_buf, 0);
len = 0;
- for (addr = 0; addr < gdbserver_state.g_cpu->gdb_num_g_regs; addr++) {
+ for (reg_id = 0; reg_id < gdbserver_state.g_cpu->gdb_num_g_regs; reg_id++) {
len += gdb_read_register(gdbserver_state.g_cpu,
gdbserver_state.mem_buf,
- addr);
+ reg_id);
}
g_assert(len == gdbserver_state.mem_buf->len);
- memtohex(gdbserver_state.str_buf, gdbserver_state.mem_buf->data, len);
- put_strbuf();
+ gdb_memtohex(gdbserver_state.str_buf, gdbserver_state.mem_buf->data, len);
+ gdb_put_strbuf();
}
-static void handle_file_io(GArray *params, void *user_ctx)
-{
- if (params->len >= 1 && gdbserver_state.current_syscall_cb) {
- uint64_t ret;
- int err;
-
- ret = get_param(params, 0)->val_ull;
- if (params->len >= 2) {
- err = get_param(params, 1)->val_ull;
- } else {
- err = 0;
- }
-
- /* Convert GDB error numbers back to host error numbers. */
-#define E(X) case GDB_E##X: err = E##X; break
- switch (err) {
- case 0:
- break;
- E(PERM);
- E(NOENT);
- E(INTR);
- E(BADF);
- E(ACCES);
- E(FAULT);
- E(BUSY);
- E(EXIST);
- E(NODEV);
- E(NOTDIR);
- E(ISDIR);
- E(INVAL);
- E(NFILE);
- E(MFILE);
- E(FBIG);
- E(NOSPC);
- E(SPIPE);
- E(ROFS);
- E(NAMETOOLONG);
- default:
- err = EINVAL;
- break;
- }
-#undef E
-
- gdbserver_state.current_syscall_cb(gdbserver_state.c_cpu, ret, err);
- gdbserver_state.current_syscall_cb = NULL;
- }
-
- if (params->len >= 3 && get_param(params, 2)->opcode == (uint8_t)'C') {
- put_packet("T02");
- return;
- }
-
- gdb_continue();
-}
static void handle_step(GArray *params, void *user_ctx)
{
if (params->len) {
- gdb_set_cpu_pc((target_ulong)get_param(params, 0)->val_ull);
+ gdb_set_cpu_pc(get_param(params, 0)->val_ull);
}
cpu_single_step(gdbserver_state.c_cpu, gdbserver_state.sstep_flags);
@@ -1817,8 +1212,8 @@
static void handle_backward(GArray *params, void *user_ctx)
{
- if (!stub_can_reverse()) {
- put_packet("E22");
+ if (!gdb_can_reverse()) {
+ gdb_put_packet("E22");
}
if (params->len == 1) {
switch (get_param(params, 0)->opcode) {
@@ -1826,26 +1221,26 @@
if (replay_reverse_step()) {
gdb_continue();
} else {
- put_packet("E14");
+ gdb_put_packet("E14");
}
return;
case 'c':
if (replay_reverse_continue()) {
gdb_continue();
} else {
- put_packet("E14");
+ gdb_put_packet("E14");
}
return;
}
}
/* Default invalid command */
- put_packet("");
+ gdb_put_packet("");
}
static void handle_v_cont_query(GArray *params, void *user_ctx)
{
- put_packet("vCont;c;C;s;S");
+ gdb_put_packet("vCont;c;C;s;S");
}
static void handle_v_cont(GArray *params, void *user_ctx)
@@ -1858,9 +1253,9 @@
res = gdb_handle_vcont(get_param(params, 0)->data);
if ((res == -EINVAL) || (res == -ERANGE)) {
- put_packet("E22");
+ gdb_put_packet("E22");
} else if (res) {
- put_packet("");
+ gdb_put_packet("");
}
}
@@ -1892,13 +1287,13 @@
gdb_append_thread_id(cpu, gdbserver_state.str_buf);
g_string_append_c(gdbserver_state.str_buf, ';');
cleanup:
- put_strbuf();
+ gdb_put_strbuf();
}
static void handle_v_kill(GArray *params, void *user_ctx)
{
/* Kill the target */
- put_packet("OK");
+ gdb_put_packet("OK");
error_report("QEMU: Terminated via GDBstub");
gdb_exit(0);
exit(0);
@@ -1939,7 +1334,7 @@
if (process_string_cmd(NULL, get_param(params, 0)->data,
gdb_v_commands_table,
ARRAY_SIZE(gdb_v_commands_table))) {
- put_packet("");
+ gdb_put_packet("");
}
}
@@ -1957,7 +1352,7 @@
SSTEP_NOTIMER);
}
- put_strbuf();
+ gdb_put_strbuf();
}
static void handle_set_qemu_sstep(GArray *params, void *user_ctx)
@@ -1971,19 +1366,19 @@
new_sstep_flags = get_param(params, 0)->val_ul;
if (new_sstep_flags & ~gdbserver_state.supported_sstep_flags) {
- put_packet("E22");
+ gdb_put_packet("E22");
return;
}
gdbserver_state.sstep_flags = new_sstep_flags;
- put_packet("OK");
+ gdb_put_packet("OK");
}
static void handle_query_qemu_sstep(GArray *params, void *user_ctx)
{
g_string_printf(gdbserver_state.str_buf, "0x%x",
gdbserver_state.sstep_flags);
- put_strbuf();
+ gdb_put_strbuf();
}
static void handle_query_curr_tid(GArray *params, void *user_ctx)
@@ -2000,19 +1395,19 @@
cpu = get_first_cpu_in_process(process);
g_string_assign(gdbserver_state.str_buf, "QC");
gdb_append_thread_id(cpu, gdbserver_state.str_buf);
- put_strbuf();
+ gdb_put_strbuf();
}
static void handle_query_threads(GArray *params, void *user_ctx)
{
if (!gdbserver_state.query_cpu) {
- put_packet("l");
+ gdb_put_packet("l");
return;
}
g_string_assign(gdbserver_state.str_buf, "m");
gdb_append_thread_id(gdbserver_state.query_cpu, gdbserver_state.str_buf);
- put_strbuf();
+ gdb_put_strbuf();
gdbserver_state.query_cpu = gdb_next_attached_cpu(gdbserver_state.query_cpu);
}
@@ -2029,7 +1424,7 @@
if (!params->len ||
get_param(params, 0)->thread_id.kind == GDB_READ_THREAD_ERR) {
- put_packet("E22");
+ gdb_put_packet("E22");
return;
}
@@ -2054,52 +1449,10 @@
cpu->halted ? "halted " : "running");
}
trace_gdbstub_op_extra_info(rs->str);
- memtohex(gdbserver_state.str_buf, (uint8_t *)rs->str, rs->len);
- put_strbuf();
+ gdb_memtohex(gdbserver_state.str_buf, (uint8_t *)rs->str, rs->len);
+ gdb_put_strbuf();
}
-#ifdef CONFIG_USER_ONLY
-static void handle_query_offsets(GArray *params, void *user_ctx)
-{
- TaskState *ts;
-
- ts = gdbserver_state.c_cpu->opaque;
- g_string_printf(gdbserver_state.str_buf,
- "Text=" TARGET_ABI_FMT_lx
- ";Data=" TARGET_ABI_FMT_lx
- ";Bss=" TARGET_ABI_FMT_lx,
- ts->info->code_offset,
- ts->info->data_offset,
- ts->info->data_offset);
- put_strbuf();
-}
-#else
-static void handle_query_rcmd(GArray *params, void *user_ctx)
-{
- const guint8 zero = 0;
- int len;
-
- if (!params->len) {
- put_packet("E22");
- return;
- }
-
- len = strlen(get_param(params, 0)->data);
- if (len % 2) {
- put_packet("E01");
- return;
- }
-
- g_assert(gdbserver_state.mem_buf->len == 0);
- len = len / 2;
- hextomem(gdbserver_state.mem_buf, get_param(params, 0)->data, len);
- g_byte_array_append(gdbserver_state.mem_buf, &zero, 1);
- qemu_chr_be_write(gdbserver_state.mon_chr, gdbserver_state.mem_buf->data,
- gdbserver_state.mem_buf->len);
- put_packet("OK");
-}
-#endif
-
static void handle_query_supported(GArray *params, void *user_ctx)
{
CPUClass *cc;
@@ -2110,7 +1463,7 @@
g_string_append(gdbserver_state.str_buf, ";qXfer:features:read+");
}
- if (stub_can_reverse()) {
+ if (gdb_can_reverse()) {
g_string_append(gdbserver_state.str_buf,
";ReverseStep+;ReverseContinue+");
}
@@ -2127,7 +1480,7 @@
}
g_string_append(gdbserver_state.str_buf, ";vContSupported+;multiprocess+");
- put_strbuf();
+ gdb_put_strbuf();
}
static void handle_query_xfer_features(GArray *params, void *user_ctx)
@@ -2139,14 +1492,14 @@
const char *p;
if (params->len < 3) {
- put_packet("E22");
+ gdb_put_packet("E22");
return;
}
process = gdb_get_cpu_process(gdbserver_state.g_cpu);
cc = CPU_GET_CLASS(gdbserver_state.g_cpu);
if (!cc->gdb_core_xml_file) {
- put_packet("");
+ gdb_put_packet("");
return;
}
@@ -2154,7 +1507,7 @@
p = get_param(params, 0)->data;
xml = get_feature_xml(p, &p, process);
if (!xml) {
- put_packet("E00");
+ gdb_put_packet("E00");
return;
}
@@ -2162,7 +1515,7 @@
len = get_param(params, 2)->val_ul;
total_len = strlen(xml);
if (addr > total_len) {
- put_packet("E00");
+ gdb_put_packet("E00");
return;
}
@@ -2172,101 +1525,25 @@
if (len < total_len - addr) {
g_string_assign(gdbserver_state.str_buf, "m");
- memtox(gdbserver_state.str_buf, xml + addr, len);
+ gdb_memtox(gdbserver_state.str_buf, xml + addr, len);
} else {
g_string_assign(gdbserver_state.str_buf, "l");
- memtox(gdbserver_state.str_buf, xml + addr, total_len - addr);
+ gdb_memtox(gdbserver_state.str_buf, xml + addr, total_len - addr);
}
- put_packet_binary(gdbserver_state.str_buf->str,
+ gdb_put_packet_binary(gdbserver_state.str_buf->str,
gdbserver_state.str_buf->len, true);
}
-#if defined(CONFIG_USER_ONLY) && defined(CONFIG_LINUX_USER)
-static void handle_query_xfer_auxv(GArray *params, void *user_ctx)
-{
- TaskState *ts;
- unsigned long offset, len, saved_auxv, auxv_len;
-
- if (params->len < 2) {
- put_packet("E22");
- return;
- }
-
- offset = get_param(params, 0)->val_ul;
- len = get_param(params, 1)->val_ul;
- ts = gdbserver_state.c_cpu->opaque;
- saved_auxv = ts->info->saved_auxv;
- auxv_len = ts->info->auxv_len;
-
- if (offset >= auxv_len) {
- put_packet("E00");
- return;
- }
-
- if (len > (MAX_PACKET_LENGTH - 5) / 2) {
- len = (MAX_PACKET_LENGTH - 5) / 2;
- }
-
- if (len < auxv_len - offset) {
- g_string_assign(gdbserver_state.str_buf, "m");
- } else {
- g_string_assign(gdbserver_state.str_buf, "l");
- len = auxv_len - offset;
- }
-
- g_byte_array_set_size(gdbserver_state.mem_buf, len);
- if (target_memory_rw_debug(gdbserver_state.g_cpu, saved_auxv + offset,
- gdbserver_state.mem_buf->data, len, false)) {
- put_packet("E14");
- return;
- }
-
- memtox(gdbserver_state.str_buf,
- (const char *)gdbserver_state.mem_buf->data, len);
- put_packet_binary(gdbserver_state.str_buf->str,
- gdbserver_state.str_buf->len, true);
-}
-#endif
-
-static void handle_query_attached(GArray *params, void *user_ctx)
-{
- put_packet(GDB_ATTACHED);
-}
-
static void handle_query_qemu_supported(GArray *params, void *user_ctx)
{
g_string_printf(gdbserver_state.str_buf, "sstepbits;sstep");
#ifndef CONFIG_USER_ONLY
g_string_append(gdbserver_state.str_buf, ";PhyMemMode");
#endif
- put_strbuf();
+ gdb_put_strbuf();
}
-#ifndef CONFIG_USER_ONLY
-static void handle_query_qemu_phy_mem_mode(GArray *params,
- void *user_ctx)
-{
- g_string_printf(gdbserver_state.str_buf, "%d", phy_memory_mode);
- put_strbuf();
-}
-
-static void handle_set_qemu_phy_mem_mode(GArray *params, void *user_ctx)
-{
- if (!params->len) {
- put_packet("E22");
- return;
- }
-
- if (!get_param(params, 0)->val_ul) {
- phy_memory_mode = 0;
- } else {
- phy_memory_mode = 1;
- }
- put_packet("OK");
-}
-#endif
-
static const GdbCmdParseEntry gdb_gen_query_set_common_table[] = {
/* Order is important if has same prefix */
{
@@ -2306,12 +1583,12 @@
},
#ifdef CONFIG_USER_ONLY
{
- .handler = handle_query_offsets,
+ .handler = gdb_handle_query_offsets,
.cmd = "Offsets",
},
#else
{
- .handler = handle_query_rcmd,
+ .handler = gdb_handle_query_rcmd,
.cmd = "Rcmd,",
.cmd_startswith = 1,
.schema = "s0"
@@ -2334,21 +1611,21 @@
.cmd_startswith = 1,
.schema = "s:l,l0"
},
-#if defined(CONFIG_USER_ONLY) && defined(CONFIG_LINUX_USER)
+#if defined(CONFIG_USER_ONLY) && defined(CONFIG_LINUX)
{
- .handler = handle_query_xfer_auxv,
+ .handler = gdb_handle_query_xfer_auxv,
.cmd = "Xfer:auxv:read::",
.cmd_startswith = 1,
.schema = "l,l0"
},
#endif
{
- .handler = handle_query_attached,
+ .handler = gdb_handle_query_attached,
.cmd = "Attached:",
.cmd_startswith = 1
},
{
- .handler = handle_query_attached,
+ .handler = gdb_handle_query_attached,
.cmd = "Attached",
},
{
@@ -2357,7 +1634,7 @@
},
#ifndef CONFIG_USER_ONLY
{
- .handler = handle_query_qemu_phy_mem_mode,
+ .handler = gdb_handle_query_qemu_phy_mem_mode,
.cmd = "qemu.PhyMemMode",
},
#endif
@@ -2373,7 +1650,7 @@
},
#ifndef CONFIG_USER_ONLY
{
- .handler = handle_set_qemu_phy_mem_mode,
+ .handler = gdb_handle_set_qemu_phy_mem_mode,
.cmd = "qemu.PhyMemMode:",
.cmd_startswith = 1,
.schema = "l0"
@@ -2396,7 +1673,7 @@
if (process_string_cmd(NULL, get_param(params, 0)->data,
gdb_gen_query_table,
ARRAY_SIZE(gdb_gen_query_table))) {
- put_packet("");
+ gdb_put_packet("");
}
}
@@ -2415,7 +1692,7 @@
if (process_string_cmd(NULL, get_param(params, 0)->data,
gdb_gen_set_table,
ARRAY_SIZE(gdb_gen_set_table))) {
- put_packet("");
+ gdb_put_packet("");
}
}
@@ -2424,7 +1701,7 @@
g_string_printf(gdbserver_state.str_buf, "T%02xthread:", GDB_SIGNAL_TRAP);
gdb_append_thread_id(gdbserver_state.c_cpu, gdbserver_state.str_buf);
g_string_append_c(gdbserver_state.str_buf, ';');
- put_strbuf();
+ gdb_put_strbuf();
/*
* Remove all the breakpoints when this query is issued,
* because gdb is doing an initial connect and the state
@@ -2441,7 +1718,7 @@
switch (line_buf[0]) {
case '!':
- put_packet("OK");
+ gdb_put_packet("OK");
break;
case '?':
{
@@ -2527,7 +1804,7 @@
case 'F':
{
static const GdbCmdParseEntry file_io_cmd_desc = {
- .handler = handle_file_io,
+ .handler = gdb_handle_file_io,
.cmd = "F",
.cmd_startswith = 1,
.schema = "L,L,o0"
@@ -2668,7 +1945,7 @@
break;
default:
/* put empty packet */
- put_packet("");
+ gdb_put_packet("");
break;
}
@@ -2695,183 +1972,7 @@
gdbserver_state.g_cpu = cpu;
}
-#ifndef CONFIG_USER_ONLY
-static void gdb_vm_state_change(void *opaque, bool running, RunState state)
-{
- CPUState *cpu = gdbserver_state.c_cpu;
- g_autoptr(GString) buf = g_string_new(NULL);
- g_autoptr(GString) tid = g_string_new(NULL);
- const char *type;
- int ret;
-
- if (running || gdbserver_state.state == RS_INACTIVE) {
- return;
- }
- /* Is there a GDB syscall waiting to be sent? */
- if (gdbserver_state.current_syscall_cb) {
- put_packet(gdbserver_state.syscall_buf);
- return;
- }
-
- if (cpu == NULL) {
- /* No process attached */
- return;
- }
-
- gdb_append_thread_id(cpu, tid);
-
- switch (state) {
- case RUN_STATE_DEBUG:
- if (cpu->watchpoint_hit) {
- switch (cpu->watchpoint_hit->flags & BP_MEM_ACCESS) {
- case BP_MEM_READ:
- type = "r";
- break;
- case BP_MEM_ACCESS:
- type = "a";
- break;
- default:
- type = "";
- break;
- }
- trace_gdbstub_hit_watchpoint(type, cpu_gdb_index(cpu),
- (target_ulong)cpu->watchpoint_hit->vaddr);
- g_string_printf(buf, "T%02xthread:%s;%swatch:" TARGET_FMT_lx ";",
- GDB_SIGNAL_TRAP, tid->str, type,
- (target_ulong)cpu->watchpoint_hit->vaddr);
- cpu->watchpoint_hit = NULL;
- goto send_packet;
- } else {
- trace_gdbstub_hit_break();
- }
- tb_flush(cpu);
- ret = GDB_SIGNAL_TRAP;
- break;
- case RUN_STATE_PAUSED:
- trace_gdbstub_hit_paused();
- ret = GDB_SIGNAL_INT;
- break;
- case RUN_STATE_SHUTDOWN:
- trace_gdbstub_hit_shutdown();
- ret = GDB_SIGNAL_QUIT;
- break;
- case RUN_STATE_IO_ERROR:
- trace_gdbstub_hit_io_error();
- ret = GDB_SIGNAL_IO;
- break;
- case RUN_STATE_WATCHDOG:
- trace_gdbstub_hit_watchdog();
- ret = GDB_SIGNAL_ALRM;
- break;
- case RUN_STATE_INTERNAL_ERROR:
- trace_gdbstub_hit_internal_error();
- ret = GDB_SIGNAL_ABRT;
- break;
- case RUN_STATE_SAVE_VM:
- case RUN_STATE_RESTORE_VM:
- return;
- case RUN_STATE_FINISH_MIGRATE:
- ret = GDB_SIGNAL_XCPU;
- break;
- default:
- trace_gdbstub_hit_unknown(state);
- ret = GDB_SIGNAL_UNKNOWN;
- break;
- }
- gdb_set_stop_cpu(cpu);
- g_string_printf(buf, "T%02xthread:%s;", ret, tid->str);
-
-send_packet:
- put_packet(buf->str);
-
- /* disable single step if it was enabled */
- cpu_single_step(cpu, 0);
-}
-#endif
-
-/* Send a gdb syscall request.
- This accepts limited printf-style format specifiers, specifically:
- %x - target_ulong argument printed in hex.
- %lx - 64-bit argument printed in hex.
- %s - string pointer (target_ulong) and length (int) pair. */
-void gdb_do_syscallv(gdb_syscall_complete_cb cb, const char *fmt, va_list va)
-{
- char *p;
- char *p_end;
- target_ulong addr;
- uint64_t i64;
-
- if (!gdb_attached()) {
- return;
- }
-
- gdbserver_state.current_syscall_cb = cb;
-#ifndef CONFIG_USER_ONLY
- vm_stop(RUN_STATE_DEBUG);
-#endif
- p = &gdbserver_state.syscall_buf[0];
- p_end = &gdbserver_state.syscall_buf[sizeof(gdbserver_state.syscall_buf)];
- *(p++) = 'F';
- while (*fmt) {
- if (*fmt == '%') {
- fmt++;
- switch (*fmt++) {
- case 'x':
- addr = va_arg(va, target_ulong);
- p += snprintf(p, p_end - p, TARGET_FMT_lx, addr);
- break;
- case 'l':
- if (*(fmt++) != 'x')
- goto bad_format;
- i64 = va_arg(va, uint64_t);
- p += snprintf(p, p_end - p, "%" PRIx64, i64);
- break;
- case 's':
- addr = va_arg(va, target_ulong);
- p += snprintf(p, p_end - p, TARGET_FMT_lx "/%x",
- addr, va_arg(va, int));
- break;
- default:
- bad_format:
- error_report("gdbstub: Bad syscall format string '%s'",
- fmt - 1);
- break;
- }
- } else {
- *(p++) = *(fmt++);
- }
- }
- *p = 0;
-#ifdef CONFIG_USER_ONLY
- put_packet(gdbserver_state.syscall_buf);
- /* Return control to gdb for it to process the syscall request.
- * Since the protocol requires that gdb hands control back to us
- * using a "here are the results" F packet, we don't need to check
- * gdb_handlesig's return value (which is the signal to deliver if
- * execution was resumed via a continue packet).
- */
- gdb_handlesig(gdbserver_state.c_cpu, 0);
-#else
- /* In this case wait to send the syscall packet until notification that
- the CPU has stopped. This must be done because if the packet is sent
- now the reply from the syscall request could be received while the CPU
- is still in the running state, which can cause packets to be dropped
- and state transition 'T' packets to be sent while the syscall is still
- being processed. */
- qemu_cpu_kick(gdbserver_state.c_cpu);
-#endif
-}
-
-void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...)
-{
- va_list va;
-
- va_start(va, fmt);
- gdb_do_syscallv(cb, fmt, va);
- va_end(va);
-}
-
-static void gdb_read_byte(uint8_t ch)
+void gdb_read_byte(uint8_t ch)
{
uint8_t reply;
@@ -2881,7 +1982,7 @@
of a new command then abandon the previous response. */
if (ch == '-') {
trace_gdbstub_err_got_nack();
- put_buffer(gdbserver_state.last_packet->data,
+ gdb_put_buffer(gdbserver_state.last_packet->data,
gdbserver_state.last_packet->len);
} else if (ch == '+') {
trace_gdbstub_io_got_ack();
@@ -3003,12 +2104,12 @@
trace_gdbstub_err_checksum_incorrect(gdbserver_state.line_sum, gdbserver_state.line_csum);
/* send NAK reply */
reply = '-';
- put_buffer(&reply, 1);
+ gdb_put_buffer(&reply, 1);
gdbserver_state.state = RS_IDLE;
} else {
/* send ACK reply */
reply = '+';
- put_buffer(&reply, 1);
+ gdb_put_buffer(&reply, 1);
gdbserver_state.state = gdb_handle_packet(gdbserver_state.line_buf);
}
break;
@@ -3018,39 +2119,12 @@
}
}
-/* Tell the remote gdb that the process has exited. */
-void gdb_exit(int code)
-{
- char buf[4];
-
- if (!gdbserver_state.init) {
- return;
- }
-#ifdef CONFIG_USER_ONLY
- if (gdbserver_state.socket_path) {
- unlink(gdbserver_state.socket_path);
- }
- if (gdbserver_state.fd < 0) {
- return;
- }
-#endif
-
- trace_gdbstub_op_exiting((uint8_t)code);
-
- snprintf(buf, sizeof(buf), "W%02x", (uint8_t)code);
- put_packet(buf);
-
-#ifndef CONFIG_USER_ONLY
- qemu_chr_fe_deinit(&gdbserver_state.chr, true);
-#endif
-}
-
/*
* Create the process that will contain all the "orphan" CPUs (that are not
* part of a CPU cluster). Note that if this process contains no CPUs, it won't
* be attachable and thus will be invisible to the user.
*/
-static void create_default_process(GDBState *s)
+void gdb_create_default_process(GDBState *s)
{
GDBProcess *process;
int max_pid = 0;
@@ -3070,447 +2144,3 @@
process->target_xml[0] = '\0';
}
-#ifdef CONFIG_USER_ONLY
-int
-gdb_handlesig(CPUState *cpu, int sig)
-{
- char buf[256];
- int n;
-
- if (!gdbserver_state.init || gdbserver_state.fd < 0) {
- return sig;
- }
-
- /* disable single step if it was enabled */
- cpu_single_step(cpu, 0);
- tb_flush(cpu);
-
- if (sig != 0) {
- gdb_set_stop_cpu(cpu);
- g_string_printf(gdbserver_state.str_buf,
- "T%02xthread:", target_signal_to_gdb(sig));
- gdb_append_thread_id(cpu, gdbserver_state.str_buf);
- g_string_append_c(gdbserver_state.str_buf, ';');
- put_strbuf();
- }
- /* put_packet() might have detected that the peer terminated the
- connection. */
- if (gdbserver_state.fd < 0) {
- return sig;
- }
-
- sig = 0;
- gdbserver_state.state = RS_IDLE;
- gdbserver_state.running_state = 0;
- while (gdbserver_state.running_state == 0) {
- n = read(gdbserver_state.fd, buf, 256);
- if (n > 0) {
- int i;
-
- for (i = 0; i < n; i++) {
- gdb_read_byte(buf[i]);
- }
- } else {
- /* XXX: Connection closed. Should probably wait for another
- connection before continuing. */
- if (n == 0) {
- close(gdbserver_state.fd);
- }
- gdbserver_state.fd = -1;
- return sig;
- }
- }
- sig = gdbserver_state.signal;
- gdbserver_state.signal = 0;
- return sig;
-}
-
-/* Tell the remote gdb that the process has exited due to SIG. */
-void gdb_signalled(CPUArchState *env, int sig)
-{
- char buf[4];
-
- if (!gdbserver_state.init || gdbserver_state.fd < 0) {
- return;
- }
-
- snprintf(buf, sizeof(buf), "X%02x", target_signal_to_gdb(sig));
- put_packet(buf);
-}
-
-static void gdb_accept_init(int fd)
-{
- init_gdbserver_state();
- create_default_process(&gdbserver_state);
- gdbserver_state.processes[0].attached = true;
- gdbserver_state.c_cpu = gdb_first_attached_cpu();
- gdbserver_state.g_cpu = gdbserver_state.c_cpu;
- gdbserver_state.fd = fd;
- gdb_has_xml = false;
-}
-
-static bool gdb_accept_socket(int gdb_fd)
-{
- int fd;
-
- for(;;) {
- fd = accept(gdb_fd, NULL, NULL);
- if (fd < 0 && errno != EINTR) {
- perror("accept socket");
- return false;
- } else if (fd >= 0) {
- qemu_set_cloexec(fd);
- break;
- }
- }
-
- gdb_accept_init(fd);
- return true;
-}
-
-static int gdbserver_open_socket(const char *path)
-{
- struct sockaddr_un sockaddr = {};
- int fd, ret;
-
- fd = socket(AF_UNIX, SOCK_STREAM, 0);
- if (fd < 0) {
- perror("create socket");
- return -1;
- }
-
- sockaddr.sun_family = AF_UNIX;
- pstrcpy(sockaddr.sun_path, sizeof(sockaddr.sun_path) - 1, path);
- ret = bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr));
- if (ret < 0) {
- perror("bind socket");
- close(fd);
- return -1;
- }
- ret = listen(fd, 1);
- if (ret < 0) {
- perror("listen socket");
- close(fd);
- return -1;
- }
-
- return fd;
-}
-
-static bool gdb_accept_tcp(int gdb_fd)
-{
- struct sockaddr_in sockaddr = {};
- socklen_t len;
- int fd;
-
- for(;;) {
- len = sizeof(sockaddr);
- fd = accept(gdb_fd, (struct sockaddr *)&sockaddr, &len);
- if (fd < 0 && errno != EINTR) {
- perror("accept");
- return false;
- } else if (fd >= 0) {
- qemu_set_cloexec(fd);
- break;
- }
- }
-
- /* set short latency */
- if (socket_set_nodelay(fd)) {
- perror("setsockopt");
- close(fd);
- return false;
- }
-
- gdb_accept_init(fd);
- return true;
-}
-
-static int gdbserver_open_port(int port)
-{
- struct sockaddr_in sockaddr;
- int fd, ret;
-
- fd = socket(PF_INET, SOCK_STREAM, 0);
- if (fd < 0) {
- perror("socket");
- return -1;
- }
- qemu_set_cloexec(fd);
-
- socket_set_fast_reuse(fd);
-
- sockaddr.sin_family = AF_INET;
- sockaddr.sin_port = htons(port);
- sockaddr.sin_addr.s_addr = 0;
- ret = bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr));
- if (ret < 0) {
- perror("bind");
- close(fd);
- return -1;
- }
- ret = listen(fd, 1);
- if (ret < 0) {
- perror("listen");
- close(fd);
- return -1;
- }
-
- return fd;
-}
-
-int gdbserver_start(const char *port_or_path)
-{
- int port = g_ascii_strtoull(port_or_path, NULL, 10);
- int gdb_fd;
-
- if (port > 0) {
- gdb_fd = gdbserver_open_port(port);
- } else {
- gdb_fd = gdbserver_open_socket(port_or_path);
- }
-
- if (gdb_fd < 0) {
- return -1;
- }
-
- if (port > 0 && gdb_accept_tcp(gdb_fd)) {
- return 0;
- } else if (gdb_accept_socket(gdb_fd)) {
- gdbserver_state.socket_path = g_strdup(port_or_path);
- return 0;
- }
-
- /* gone wrong */
- close(gdb_fd);
- return -1;
-}
-
-/* Disable gdb stub for child processes. */
-void gdbserver_fork(CPUState *cpu)
-{
- if (!gdbserver_state.init || gdbserver_state.fd < 0) {
- return;
- }
- close(gdbserver_state.fd);
- gdbserver_state.fd = -1;
- cpu_breakpoint_remove_all(cpu, BP_GDB);
- cpu_watchpoint_remove_all(cpu, BP_GDB);
-}
-#else
-static int gdb_chr_can_receive(void *opaque)
-{
- /* We can handle an arbitrarily large amount of data.
- Pick the maximum packet size, which is as good as anything. */
- return MAX_PACKET_LENGTH;
-}
-
-static void gdb_chr_receive(void *opaque, const uint8_t *buf, int size)
-{
- int i;
-
- for (i = 0; i < size; i++) {
- gdb_read_byte(buf[i]);
- }
-}
-
-static void gdb_chr_event(void *opaque, QEMUChrEvent event)
-{
- int i;
- GDBState *s = (GDBState *) opaque;
-
- switch (event) {
- case CHR_EVENT_OPENED:
- /* Start with first process attached, others detached */
- for (i = 0; i < s->process_num; i++) {
- s->processes[i].attached = !i;
- }
-
- s->c_cpu = gdb_first_attached_cpu();
- s->g_cpu = s->c_cpu;
-
- vm_stop(RUN_STATE_PAUSED);
- replay_gdb_attached();
- gdb_has_xml = false;
- break;
- default:
- break;
- }
-}
-
-static int gdb_monitor_write(Chardev *chr, const uint8_t *buf, int len)
-{
- g_autoptr(GString) hex_buf = g_string_new("O");
- memtohex(hex_buf, buf, len);
- put_packet(hex_buf->str);
- return len;
-}
-
-#ifndef _WIN32
-static void gdb_sigterm_handler(int signal)
-{
- if (runstate_is_running()) {
- vm_stop(RUN_STATE_PAUSED);
- }
-}
-#endif
-
-static void gdb_monitor_open(Chardev *chr, ChardevBackend *backend,
- bool *be_opened, Error **errp)
-{
- *be_opened = false;
-}
-
-static void char_gdb_class_init(ObjectClass *oc, void *data)
-{
- ChardevClass *cc = CHARDEV_CLASS(oc);
-
- cc->internal = true;
- cc->open = gdb_monitor_open;
- cc->chr_write = gdb_monitor_write;
-}
-
-#define TYPE_CHARDEV_GDB "chardev-gdb"
-
-static const TypeInfo char_gdb_type_info = {
- .name = TYPE_CHARDEV_GDB,
- .parent = TYPE_CHARDEV,
- .class_init = char_gdb_class_init,
-};
-
-static int find_cpu_clusters(Object *child, void *opaque)
-{
- if (object_dynamic_cast(child, TYPE_CPU_CLUSTER)) {
- GDBState *s = (GDBState *) opaque;
- CPUClusterState *cluster = CPU_CLUSTER(child);
- GDBProcess *process;
-
- s->processes = g_renew(GDBProcess, s->processes, ++s->process_num);
-
- process = &s->processes[s->process_num - 1];
-
- /*
- * GDB process IDs -1 and 0 are reserved. To avoid subtle errors at
- * runtime, we enforce here that the machine does not use a cluster ID
- * that would lead to PID 0.
- */
- assert(cluster->cluster_id != UINT32_MAX);
- process->pid = cluster->cluster_id + 1;
- process->attached = false;
- process->target_xml[0] = '\0';
-
- return 0;
- }
-
- return object_child_foreach(child, find_cpu_clusters, opaque);
-}
-
-static int pid_order(const void *a, const void *b)
-{
- GDBProcess *pa = (GDBProcess *) a;
- GDBProcess *pb = (GDBProcess *) b;
-
- if (pa->pid < pb->pid) {
- return -1;
- } else if (pa->pid > pb->pid) {
- return 1;
- } else {
- return 0;
- }
-}
-
-static void create_processes(GDBState *s)
-{
- object_child_foreach(object_get_root(), find_cpu_clusters, s);
-
- if (gdbserver_state.processes) {
- /* Sort by PID */
- qsort(gdbserver_state.processes, gdbserver_state.process_num, sizeof(gdbserver_state.processes[0]), pid_order);
- }
-
- create_default_process(s);
-}
-
-int gdbserver_start(const char *device)
-{
- trace_gdbstub_op_start(device);
-
- char gdbstub_device_name[128];
- Chardev *chr = NULL;
- Chardev *mon_chr;
-
- if (!first_cpu) {
- error_report("gdbstub: meaningless to attach gdb to a "
- "machine without any CPU.");
- return -1;
- }
-
- if (!gdb_supports_guest_debug()) {
- error_report("gdbstub: current accelerator doesn't support guest debugging");
- return -1;
- }
-
- if (!device)
- return -1;
- if (strcmp(device, "none") != 0) {
- if (strstart(device, "tcp:", NULL)) {
- /* enforce required TCP attributes */
- snprintf(gdbstub_device_name, sizeof(gdbstub_device_name),
- "%s,wait=off,nodelay=on,server=on", device);
- device = gdbstub_device_name;
- }
-#ifndef _WIN32
- else if (strcmp(device, "stdio") == 0) {
- struct sigaction act;
-
- memset(&act, 0, sizeof(act));
- act.sa_handler = gdb_sigterm_handler;
- sigaction(SIGINT, &act, NULL);
- }
-#endif
- /*
- * FIXME: it's a bit weird to allow using a mux chardev here
- * and implicitly setup a monitor. We may want to break this.
- */
- chr = qemu_chr_new_noreplay("gdb", device, true, NULL);
- if (!chr)
- return -1;
- }
-
- if (!gdbserver_state.init) {
- init_gdbserver_state();
-
- qemu_add_vm_change_state_handler(gdb_vm_state_change, NULL);
-
- /* Initialize a monitor terminal for gdb */
- mon_chr = qemu_chardev_new(NULL, TYPE_CHARDEV_GDB,
- NULL, NULL, &error_abort);
- monitor_init_hmp(mon_chr, false, &error_abort);
- } else {
- qemu_chr_fe_deinit(&gdbserver_state.chr, true);
- mon_chr = gdbserver_state.mon_chr;
- reset_gdbserver_state();
- }
-
- create_processes(&gdbserver_state);
-
- if (chr) {
- qemu_chr_fe_init(&gdbserver_state.chr, chr, &error_abort);
- qemu_chr_fe_set_handlers(&gdbserver_state.chr, gdb_chr_can_receive,
- gdb_chr_receive, gdb_chr_event,
- NULL, &gdbserver_state, NULL, true);
- }
- gdbserver_state.state = chr ? RS_IDLE : RS_INACTIVE;
- gdbserver_state.mon_chr = mon_chr;
- gdbserver_state.current_syscall_cb = NULL;
-
- return 0;
-}
-
-static void register_types(void)
-{
- type_register_static(&char_gdb_type_info);
-}
-
-type_init(register_types);
-#endif
diff --git a/gdbstub/internals.h b/gdbstub/internals.h
index b23999f..94ddff4 100644
--- a/gdbstub/internals.h
+++ b/gdbstub/internals.h
@@ -6,14 +6,220 @@
* SPDX-License-Identifier: GPL-2.0-or-later
*/
-#ifndef _INTERNALS_H_
-#define _INTERNALS_H_
+#ifndef GDBSTUB_INTERNALS_H
+#define GDBSTUB_INTERNALS_H
#include "exec/cpu-common.h"
+#define MAX_PACKET_LENGTH 4096
+
+/*
+ * Shared structures and definitions
+ */
+
+enum {
+ GDB_SIGNAL_0 = 0,
+ GDB_SIGNAL_INT = 2,
+ GDB_SIGNAL_QUIT = 3,
+ GDB_SIGNAL_TRAP = 5,
+ GDB_SIGNAL_ABRT = 6,
+ GDB_SIGNAL_ALRM = 14,
+ GDB_SIGNAL_IO = 23,
+ GDB_SIGNAL_XCPU = 24,
+ GDB_SIGNAL_UNKNOWN = 143
+};
+
+typedef struct GDBProcess {
+ uint32_t pid;
+ bool attached;
+
+ char target_xml[1024];
+} GDBProcess;
+
+enum RSState {
+ RS_INACTIVE,
+ RS_IDLE,
+ RS_GETLINE,
+ RS_GETLINE_ESC,
+ RS_GETLINE_RLE,
+ RS_CHKSUM1,
+ RS_CHKSUM2,
+};
+
+typedef struct GDBState {
+ bool init; /* have we been initialised? */
+ CPUState *c_cpu; /* current CPU for step/continue ops */
+ CPUState *g_cpu; /* current CPU for other ops */
+ CPUState *query_cpu; /* for q{f|s}ThreadInfo */
+ enum RSState state; /* parsing state */
+ char line_buf[MAX_PACKET_LENGTH];
+ int line_buf_index;
+ int line_sum; /* running checksum */
+ int line_csum; /* checksum at the end of the packet */
+ GByteArray *last_packet;
+ int signal;
+ bool multiprocess;
+ GDBProcess *processes;
+ int process_num;
+ GString *str_buf;
+ GByteArray *mem_buf;
+ int sstep_flags;
+ int supported_sstep_flags;
+} GDBState;
+
+/* lives in main gdbstub.c */
+extern GDBState gdbserver_state;
+
+/*
+ * Inline utility function, convert from int to hex and back
+ */
+
+static inline int fromhex(int v)
+{
+ if (v >= '0' && v <= '9') {
+ return v - '0';
+ } else if (v >= 'A' && v <= 'F') {
+ return v - 'A' + 10;
+ } else if (v >= 'a' && v <= 'f') {
+ return v - 'a' + 10;
+ } else {
+ return 0;
+ }
+}
+
+static inline int tohex(int v)
+{
+ if (v < 10) {
+ return v + '0';
+ } else {
+ return v - 10 + 'a';
+ }
+}
+
+/*
+ * Connection helpers for both softmmu and user backends
+ */
+
+void gdb_put_strbuf(void);
+int gdb_put_packet(const char *buf);
+int gdb_put_packet_binary(const char *buf, int len, bool dump);
+void gdb_hextomem(GByteArray *mem, const char *buf, int len);
+void gdb_memtohex(GString *buf, const uint8_t *mem, int len);
+void gdb_memtox(GString *buf, const char *mem, int len);
+void gdb_read_byte(uint8_t ch);
+
+/*
+ * Packet acknowledgement - we handle this slightly differently
+ * between user and softmmu mode, mainly to deal with the differences
+ * between the flexible chardev and the direct fd approaches.
+ *
+ * We currently don't support a negotiated QStartNoAckMode
+ */
+
+/**
+ * gdb_got_immediate_ack() - check ok to continue
+ *
+ * Returns true to continue, false to re-transmit for user only, the
+ * softmmu stub always returns true.
+ */
+bool gdb_got_immediate_ack(void);
+/* utility helpers */
+CPUState *gdb_first_attached_cpu(void);
+void gdb_append_thread_id(CPUState *cpu, GString *buf);
+int gdb_get_cpu_index(CPUState *cpu);
+unsigned int gdb_get_max_cpus(void); /* both */
+bool gdb_can_reverse(void); /* softmmu, stub for user */
+
+void gdb_create_default_process(GDBState *s);
+
+/* signal mapping, common for softmmu, specialised for user-mode */
+int gdb_signal_to_target(int sig);
+int gdb_target_signal_to_gdb(int sig);
+
+int gdb_get_char(void); /* user only */
+
+/**
+ * gdb_continue() - handle continue in mode specific way.
+ */
+void gdb_continue(void);
+
+/**
+ * gdb_continue_partial() - handle partial continue in mode specific way.
+ */
+int gdb_continue_partial(char *newstates);
+
+/*
+ * Helpers with separate softmmu and user implementations
+ */
+void gdb_put_buffer(const uint8_t *buf, int len);
+
+/*
+ * Command handlers - either specialised or softmmu or user only
+ */
+void gdb_init_gdbserver_state(void);
+
+typedef enum GDBThreadIdKind {
+ GDB_ONE_THREAD = 0,
+ GDB_ALL_THREADS, /* One process, all threads */
+ GDB_ALL_PROCESSES,
+ GDB_READ_THREAD_ERR
+} GDBThreadIdKind;
+
+typedef union GdbCmdVariant {
+ const char *data;
+ uint8_t opcode;
+ unsigned long val_ul;
+ unsigned long long val_ull;
+ struct {
+ GDBThreadIdKind kind;
+ uint32_t pid;
+ uint32_t tid;
+ } thread_id;
+} GdbCmdVariant;
+
+#define get_param(p, i) (&g_array_index(p, GdbCmdVariant, i))
+
+void gdb_handle_query_rcmd(GArray *params, void *user_ctx); /* softmmu */
+void gdb_handle_query_offsets(GArray *params, void *user_ctx); /* user */
+void gdb_handle_query_xfer_auxv(GArray *params, void *user_ctx); /*user */
+
+void gdb_handle_query_attached(GArray *params, void *user_ctx); /* both */
+
+/* softmmu only */
+void gdb_handle_query_qemu_phy_mem_mode(GArray *params, void *user_ctx);
+void gdb_handle_set_qemu_phy_mem_mode(GArray *params, void *user_ctx);
+
+/* sycall handling */
+void gdb_handle_file_io(GArray *params, void *user_ctx);
+bool gdb_handled_syscall(void);
+void gdb_disable_syscalls(void);
+void gdb_syscall_reset(void);
+
+/* user/softmmu specific syscall handling */
+void gdb_syscall_handling(const char *syscall_packet);
+
+/*
+ * Break/Watch point support - there is an implementation for softmmu
+ * and user mode.
+ */
bool gdb_supports_guest_debug(void);
int gdb_breakpoint_insert(CPUState *cs, int type, vaddr addr, vaddr len);
int gdb_breakpoint_remove(CPUState *cs, int type, vaddr addr, vaddr len);
void gdb_breakpoint_remove_all(CPUState *cs);
-#endif /* _INTERNALS_H_ */
+/**
+ * gdb_target_memory_rw_debug() - handle debug access to memory
+ * @cs: CPUState
+ * @addr: nominal address, could be an entire physical address
+ * @buf: data
+ * @len: length of access
+ * @is_write: is it a write operation
+ *
+ * This function is specialised depending on the mode we are running
+ * in. For softmmu guests we can switch the interpretation of the
+ * address to a physical address.
+ */
+int gdb_target_memory_rw_debug(CPUState *cs, hwaddr addr,
+ uint8_t *buf, int len, bool is_write);
+
+#endif /* GDBSTUB_INTERNALS_H */
diff --git a/gdbstub/meson.build b/gdbstub/meson.build
index fc895a2..bd5c5cd 100644
--- a/gdbstub/meson.build
+++ b/gdbstub/meson.build
@@ -4,6 +4,34 @@
# types such as hwaddr.
#
-specific_ss.add(files('gdbstub.c'))
-softmmu_ss.add(files('softmmu.c'))
-user_ss.add(files('user.c'))
+# We need to build the core gdb code via a library to be able to tweak
+# cflags so:
+
+gdb_user_ss = ss.source_set()
+gdb_softmmu_ss = ss.source_set()
+
+# We build two versions of gdbstub, one for each mode
+gdb_user_ss.add(files('gdbstub.c', 'user.c'))
+gdb_softmmu_ss.add(files('gdbstub.c', 'softmmu.c'))
+
+gdb_user_ss = gdb_user_ss.apply(config_host, strict: false)
+gdb_softmmu_ss = gdb_softmmu_ss.apply(config_host, strict: false)
+
+libgdb_user = static_library('gdb_user',
+ gdb_user_ss.sources() + genh,
+ name_suffix: 'fa',
+ c_args: '-DCONFIG_USER_ONLY')
+
+libgdb_softmmu = static_library('gdb_softmmu',
+ gdb_softmmu_ss.sources() + genh,
+ name_suffix: 'fa')
+
+gdb_user = declare_dependency(link_whole: libgdb_user)
+user_ss.add(gdb_user)
+gdb_softmmu = declare_dependency(link_whole: libgdb_softmmu)
+softmmu_ss.add(gdb_softmmu)
+
+common_ss.add(files('syscalls.c'))
+
+# The user-target is specialised by the guest
+specific_ss.add(when: 'CONFIG_USER_ONLY', if_true: files('user-target.c'))
diff --git a/gdbstub/softmmu.c b/gdbstub/softmmu.c
index 129575e..22ecd09 100644
--- a/gdbstub/softmmu.c
+++ b/gdbstub/softmmu.c
@@ -4,16 +4,617 @@
* Debug integration depends on support from the individual
* accelerators so most of this involves calling the ops helpers.
*
+ * Copyright (c) 2003-2005 Fabrice Bellard
* Copyright (c) 2022 Linaro Ltd
*
- * SPDX-License-Identifier: GPL-2.0-or-later
+ * SPDX-License-Identifier: LGPL-2.0+
*/
#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/error-report.h"
+#include "qemu/cutils.h"
#include "exec/gdbstub.h"
+#include "gdbstub/syscalls.h"
+#include "exec/hwaddr.h"
+#include "exec/tb-flush.h"
#include "sysemu/cpus.h"
+#include "sysemu/runstate.h"
+#include "sysemu/replay.h"
+#include "hw/core/cpu.h"
+#include "hw/cpu/cluster.h"
+#include "hw/boards.h"
+#include "chardev/char.h"
+#include "chardev/char-fe.h"
+#include "monitor/monitor.h"
+#include "trace.h"
#include "internals.h"
+/* System emulation specific state */
+typedef struct {
+ CharBackend chr;
+ Chardev *mon_chr;
+} GDBSystemState;
+
+GDBSystemState gdbserver_system_state;
+
+static void reset_gdbserver_state(void)
+{
+ g_free(gdbserver_state.processes);
+ gdbserver_state.processes = NULL;
+ gdbserver_state.process_num = 0;
+}
+
+/*
+ * Return the GDB index for a given vCPU state.
+ *
+ * In system mode GDB numbers CPUs from 1 as 0 is reserved as an "any
+ * cpu" index.
+ */
+int gdb_get_cpu_index(CPUState *cpu)
+{
+ return cpu->cpu_index + 1;
+}
+
+/*
+ * We check the status of the last message in the chardev receive code
+ */
+bool gdb_got_immediate_ack(void)
+{
+ return true;
+}
+
+/*
+ * GDB Connection management. For system emulation we do all of this
+ * via our existing Chardev infrastructure which allows us to support
+ * network and unix sockets.
+ */
+
+void gdb_put_buffer(const uint8_t *buf, int len)
+{
+ /*
+ * XXX this blocks entire thread. Rewrite to use
+ * qemu_chr_fe_write and background I/O callbacks
+ */
+ qemu_chr_fe_write_all(&gdbserver_system_state.chr, buf, len);
+}
+
+static void gdb_chr_event(void *opaque, QEMUChrEvent event)
+{
+ int i;
+ GDBState *s = (GDBState *) opaque;
+
+ switch (event) {
+ case CHR_EVENT_OPENED:
+ /* Start with first process attached, others detached */
+ for (i = 0; i < s->process_num; i++) {
+ s->processes[i].attached = !i;
+ }
+
+ s->c_cpu = gdb_first_attached_cpu();
+ s->g_cpu = s->c_cpu;
+
+ vm_stop(RUN_STATE_PAUSED);
+ replay_gdb_attached();
+ gdb_has_xml = false;
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * In softmmu mode we stop the VM and wait to send the syscall packet
+ * until notification that the CPU has stopped. This must be done
+ * because if the packet is sent now the reply from the syscall
+ * request could be received while the CPU is still in the running
+ * state, which can cause packets to be dropped and state transition
+ * 'T' packets to be sent while the syscall is still being processed.
+ */
+void gdb_syscall_handling(const char *syscall_packet)
+{
+ vm_stop(RUN_STATE_DEBUG);
+ qemu_cpu_kick(gdbserver_state.c_cpu);
+}
+
+static void gdb_vm_state_change(void *opaque, bool running, RunState state)
+{
+ CPUState *cpu = gdbserver_state.c_cpu;
+ g_autoptr(GString) buf = g_string_new(NULL);
+ g_autoptr(GString) tid = g_string_new(NULL);
+ const char *type;
+ int ret;
+
+ if (running || gdbserver_state.state == RS_INACTIVE) {
+ return;
+ }
+
+ /* Is there a GDB syscall waiting to be sent? */
+ if (gdb_handled_syscall()) {
+ return;
+ }
+
+ if (cpu == NULL) {
+ /* No process attached */
+ return;
+ }
+
+ gdb_append_thread_id(cpu, tid);
+
+ switch (state) {
+ case RUN_STATE_DEBUG:
+ if (cpu->watchpoint_hit) {
+ switch (cpu->watchpoint_hit->flags & BP_MEM_ACCESS) {
+ case BP_MEM_READ:
+ type = "r";
+ break;
+ case BP_MEM_ACCESS:
+ type = "a";
+ break;
+ default:
+ type = "";
+ break;
+ }
+ trace_gdbstub_hit_watchpoint(type,
+ gdb_get_cpu_index(cpu),
+ cpu->watchpoint_hit->vaddr);
+ g_string_printf(buf, "T%02xthread:%s;%swatch:%" VADDR_PRIx ";",
+ GDB_SIGNAL_TRAP, tid->str, type,
+ cpu->watchpoint_hit->vaddr);
+ cpu->watchpoint_hit = NULL;
+ goto send_packet;
+ } else {
+ trace_gdbstub_hit_break();
+ }
+ tb_flush(cpu);
+ ret = GDB_SIGNAL_TRAP;
+ break;
+ case RUN_STATE_PAUSED:
+ trace_gdbstub_hit_paused();
+ ret = GDB_SIGNAL_INT;
+ break;
+ case RUN_STATE_SHUTDOWN:
+ trace_gdbstub_hit_shutdown();
+ ret = GDB_SIGNAL_QUIT;
+ break;
+ case RUN_STATE_IO_ERROR:
+ trace_gdbstub_hit_io_error();
+ ret = GDB_SIGNAL_IO;
+ break;
+ case RUN_STATE_WATCHDOG:
+ trace_gdbstub_hit_watchdog();
+ ret = GDB_SIGNAL_ALRM;
+ break;
+ case RUN_STATE_INTERNAL_ERROR:
+ trace_gdbstub_hit_internal_error();
+ ret = GDB_SIGNAL_ABRT;
+ break;
+ case RUN_STATE_SAVE_VM:
+ case RUN_STATE_RESTORE_VM:
+ return;
+ case RUN_STATE_FINISH_MIGRATE:
+ ret = GDB_SIGNAL_XCPU;
+ break;
+ default:
+ trace_gdbstub_hit_unknown(state);
+ ret = GDB_SIGNAL_UNKNOWN;
+ break;
+ }
+ gdb_set_stop_cpu(cpu);
+ g_string_printf(buf, "T%02xthread:%s;", ret, tid->str);
+
+send_packet:
+ gdb_put_packet(buf->str);
+
+ /* disable single step if it was enabled */
+ cpu_single_step(cpu, 0);
+}
+
+#ifndef _WIN32
+static void gdb_sigterm_handler(int signal)
+{
+ if (runstate_is_running()) {
+ vm_stop(RUN_STATE_PAUSED);
+ }
+}
+#endif
+
+static int gdb_monitor_write(Chardev *chr, const uint8_t *buf, int len)
+{
+ g_autoptr(GString) hex_buf = g_string_new("O");
+ gdb_memtohex(hex_buf, buf, len);
+ gdb_put_packet(hex_buf->str);
+ return len;
+}
+
+static void gdb_monitor_open(Chardev *chr, ChardevBackend *backend,
+ bool *be_opened, Error **errp)
+{
+ *be_opened = false;
+}
+
+static void char_gdb_class_init(ObjectClass *oc, void *data)
+{
+ ChardevClass *cc = CHARDEV_CLASS(oc);
+
+ cc->internal = true;
+ cc->open = gdb_monitor_open;
+ cc->chr_write = gdb_monitor_write;
+}
+
+#define TYPE_CHARDEV_GDB "chardev-gdb"
+
+static const TypeInfo char_gdb_type_info = {
+ .name = TYPE_CHARDEV_GDB,
+ .parent = TYPE_CHARDEV,
+ .class_init = char_gdb_class_init,
+};
+
+static int gdb_chr_can_receive(void *opaque)
+{
+ /*
+ * We can handle an arbitrarily large amount of data.
+ * Pick the maximum packet size, which is as good as anything.
+ */
+ return MAX_PACKET_LENGTH;
+}
+
+static void gdb_chr_receive(void *opaque, const uint8_t *buf, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ gdb_read_byte(buf[i]);
+ }
+}
+
+static int find_cpu_clusters(Object *child, void *opaque)
+{
+ if (object_dynamic_cast(child, TYPE_CPU_CLUSTER)) {
+ GDBState *s = (GDBState *) opaque;
+ CPUClusterState *cluster = CPU_CLUSTER(child);
+ GDBProcess *process;
+
+ s->processes = g_renew(GDBProcess, s->processes, ++s->process_num);
+
+ process = &s->processes[s->process_num - 1];
+
+ /*
+ * GDB process IDs -1 and 0 are reserved. To avoid subtle errors at
+ * runtime, we enforce here that the machine does not use a cluster ID
+ * that would lead to PID 0.
+ */
+ assert(cluster->cluster_id != UINT32_MAX);
+ process->pid = cluster->cluster_id + 1;
+ process->attached = false;
+ process->target_xml[0] = '\0';
+
+ return 0;
+ }
+
+ return object_child_foreach(child, find_cpu_clusters, opaque);
+}
+
+static int pid_order(const void *a, const void *b)
+{
+ GDBProcess *pa = (GDBProcess *) a;
+ GDBProcess *pb = (GDBProcess *) b;
+
+ if (pa->pid < pb->pid) {
+ return -1;
+ } else if (pa->pid > pb->pid) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static void create_processes(GDBState *s)
+{
+ object_child_foreach(object_get_root(), find_cpu_clusters, s);
+
+ if (gdbserver_state.processes) {
+ /* Sort by PID */
+ qsort(gdbserver_state.processes,
+ gdbserver_state.process_num,
+ sizeof(gdbserver_state.processes[0]),
+ pid_order);
+ }
+
+ gdb_create_default_process(s);
+}
+
+int gdbserver_start(const char *device)
+{
+ trace_gdbstub_op_start(device);
+
+ char gdbstub_device_name[128];
+ Chardev *chr = NULL;
+ Chardev *mon_chr;
+
+ if (!first_cpu) {
+ error_report("gdbstub: meaningless to attach gdb to a "
+ "machine without any CPU.");
+ return -1;
+ }
+
+ if (!gdb_supports_guest_debug()) {
+ error_report("gdbstub: current accelerator doesn't "
+ "support guest debugging");
+ return -1;
+ }
+
+ if (!device) {
+ return -1;
+ }
+ if (strcmp(device, "none") != 0) {
+ if (strstart(device, "tcp:", NULL)) {
+ /* enforce required TCP attributes */
+ snprintf(gdbstub_device_name, sizeof(gdbstub_device_name),
+ "%s,wait=off,nodelay=on,server=on", device);
+ device = gdbstub_device_name;
+ }
+#ifndef _WIN32
+ else if (strcmp(device, "stdio") == 0) {
+ struct sigaction act;
+
+ memset(&act, 0, sizeof(act));
+ act.sa_handler = gdb_sigterm_handler;
+ sigaction(SIGINT, &act, NULL);
+ }
+#endif
+ /*
+ * FIXME: it's a bit weird to allow using a mux chardev here
+ * and implicitly setup a monitor. We may want to break this.
+ */
+ chr = qemu_chr_new_noreplay("gdb", device, true, NULL);
+ if (!chr) {
+ return -1;
+ }
+ }
+
+ if (!gdbserver_state.init) {
+ gdb_init_gdbserver_state();
+
+ qemu_add_vm_change_state_handler(gdb_vm_state_change, NULL);
+
+ /* Initialize a monitor terminal for gdb */
+ mon_chr = qemu_chardev_new(NULL, TYPE_CHARDEV_GDB,
+ NULL, NULL, &error_abort);
+ monitor_init_hmp(mon_chr, false, &error_abort);
+ } else {
+ qemu_chr_fe_deinit(&gdbserver_system_state.chr, true);
+ mon_chr = gdbserver_system_state.mon_chr;
+ reset_gdbserver_state();
+ }
+
+ create_processes(&gdbserver_state);
+
+ if (chr) {
+ qemu_chr_fe_init(&gdbserver_system_state.chr, chr, &error_abort);
+ qemu_chr_fe_set_handlers(&gdbserver_system_state.chr,
+ gdb_chr_can_receive,
+ gdb_chr_receive, gdb_chr_event,
+ NULL, &gdbserver_state, NULL, true);
+ }
+ gdbserver_state.state = chr ? RS_IDLE : RS_INACTIVE;
+ gdbserver_system_state.mon_chr = mon_chr;
+ gdb_syscall_reset();
+
+ return 0;
+}
+
+static void register_types(void)
+{
+ type_register_static(&char_gdb_type_info);
+}
+
+type_init(register_types);
+
+/* Tell the remote gdb that the process has exited. */
+void gdb_exit(int code)
+{
+ char buf[4];
+
+ if (!gdbserver_state.init) {
+ return;
+ }
+
+ trace_gdbstub_op_exiting((uint8_t)code);
+
+ snprintf(buf, sizeof(buf), "W%02x", (uint8_t)code);
+ gdb_put_packet(buf);
+
+ qemu_chr_fe_deinit(&gdbserver_system_state.chr, true);
+}
+
+/*
+ * Memory access
+ */
+static int phy_memory_mode;
+
+int gdb_target_memory_rw_debug(CPUState *cpu, hwaddr addr,
+ uint8_t *buf, int len, bool is_write)
+{
+ CPUClass *cc;
+
+ if (phy_memory_mode) {
+ if (is_write) {
+ cpu_physical_memory_write(addr, buf, len);
+ } else {
+ cpu_physical_memory_read(addr, buf, len);
+ }
+ return 0;
+ }
+
+ cc = CPU_GET_CLASS(cpu);
+ if (cc->memory_rw_debug) {
+ return cc->memory_rw_debug(cpu, addr, buf, len, is_write);
+ }
+
+ return cpu_memory_rw_debug(cpu, addr, buf, len, is_write);
+}
+
+/*
+ * cpu helpers
+ */
+
+unsigned int gdb_get_max_cpus(void)
+{
+ MachineState *ms = MACHINE(qdev_get_machine());
+ return ms->smp.max_cpus;
+}
+
+bool gdb_can_reverse(void)
+{
+ return replay_mode == REPLAY_MODE_PLAY;
+}
+
+/*
+ * Softmmu specific command helpers
+ */
+
+void gdb_handle_query_qemu_phy_mem_mode(GArray *params,
+ void *user_ctx)
+{
+ g_string_printf(gdbserver_state.str_buf, "%d", phy_memory_mode);
+ gdb_put_strbuf();
+}
+
+void gdb_handle_set_qemu_phy_mem_mode(GArray *params, void *user_ctx)
+{
+ if (!params->len) {
+ gdb_put_packet("E22");
+ return;
+ }
+
+ if (!get_param(params, 0)->val_ul) {
+ phy_memory_mode = 0;
+ } else {
+ phy_memory_mode = 1;
+ }
+ gdb_put_packet("OK");
+}
+
+void gdb_handle_query_rcmd(GArray *params, void *user_ctx)
+{
+ const guint8 zero = 0;
+ int len;
+
+ if (!params->len) {
+ gdb_put_packet("E22");
+ return;
+ }
+
+ len = strlen(get_param(params, 0)->data);
+ if (len % 2) {
+ gdb_put_packet("E01");
+ return;
+ }
+
+ g_assert(gdbserver_state.mem_buf->len == 0);
+ len = len / 2;
+ gdb_hextomem(gdbserver_state.mem_buf, get_param(params, 0)->data, len);
+ g_byte_array_append(gdbserver_state.mem_buf, &zero, 1);
+ qemu_chr_be_write(gdbserver_system_state.mon_chr,
+ gdbserver_state.mem_buf->data,
+ gdbserver_state.mem_buf->len);
+ gdb_put_packet("OK");
+}
+
+/*
+ * Execution state helpers
+ */
+
+void gdb_handle_query_attached(GArray *params, void *user_ctx)
+{
+ gdb_put_packet("1");
+}
+
+void gdb_continue(void)
+{
+ if (!runstate_needs_reset()) {
+ trace_gdbstub_op_continue();
+ vm_start();
+ }
+}
+
+/*
+ * Resume execution, per CPU actions.
+ */
+int gdb_continue_partial(char *newstates)
+{
+ CPUState *cpu;
+ int res = 0;
+ int flag = 0;
+
+ if (!runstate_needs_reset()) {
+ bool step_requested = false;
+ CPU_FOREACH(cpu) {
+ if (newstates[cpu->cpu_index] == 's') {
+ step_requested = true;
+ break;
+ }
+ }
+
+ if (vm_prepare_start(step_requested)) {
+ return 0;
+ }
+
+ CPU_FOREACH(cpu) {
+ switch (newstates[cpu->cpu_index]) {
+ case 0:
+ case 1:
+ break; /* nothing to do here */
+ case 's':
+ trace_gdbstub_op_stepping(cpu->cpu_index);
+ cpu_single_step(cpu, gdbserver_state.sstep_flags);
+ cpu_resume(cpu);
+ flag = 1;
+ break;
+ case 'c':
+ trace_gdbstub_op_continue_cpu(cpu->cpu_index);
+ cpu_resume(cpu);
+ flag = 1;
+ break;
+ default:
+ res = -1;
+ break;
+ }
+ }
+ }
+ if (flag) {
+ qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
+ }
+ return res;
+}
+
+/*
+ * Signal Handling - in system mode we only need SIGINT and SIGTRAP; other
+ * signals are not yet supported.
+ */
+
+enum {
+ TARGET_SIGINT = 2,
+ TARGET_SIGTRAP = 5
+};
+
+int gdb_signal_to_target(int sig)
+{
+ switch (sig) {
+ case 2:
+ return TARGET_SIGINT;
+ case 5:
+ return TARGET_SIGTRAP;
+ default:
+ return -1;
+ }
+}
+
+/*
+ * Break/Watch point helpers
+ */
+
bool gdb_supports_guest_debug(void)
{
const AccelOpsClass *ops = cpus_get_accel();
diff --git a/gdbstub/syscalls.c b/gdbstub/syscalls.c
new file mode 100644
index 0000000..02e3a8f
--- /dev/null
+++ b/gdbstub/syscalls.c
@@ -0,0 +1,205 @@
+/*
+ * GDB Syscall Handling
+ *
+ * GDB can execute syscalls on the guests behalf, currently used by
+ * the various semihosting extensions.
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2023 Linaro Ltd
+ *
+ * SPDX-License-Identifier: LGPL-2.0+
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "semihosting/semihost.h"
+#include "sysemu/runstate.h"
+#include "gdbstub/user.h"
+#include "gdbstub/syscalls.h"
+#include "trace.h"
+#include "internals.h"
+
+/* Syscall specific state */
+typedef struct {
+ char syscall_buf[256];
+ gdb_syscall_complete_cb current_syscall_cb;
+} GDBSyscallState;
+
+static GDBSyscallState gdbserver_syscall_state;
+
+/*
+ * Return true if there is a GDB currently connected to the stub
+ * and attached to a CPU
+ */
+static bool gdb_attached(void)
+{
+ return gdbserver_state.init && gdbserver_state.c_cpu;
+}
+
+static enum {
+ GDB_SYS_UNKNOWN,
+ GDB_SYS_ENABLED,
+ GDB_SYS_DISABLED,
+} gdb_syscall_mode;
+
+/* Decide if either remote gdb syscalls or native file IO should be used. */
+int use_gdb_syscalls(void)
+{
+ SemihostingTarget target = semihosting_get_target();
+ if (target == SEMIHOSTING_TARGET_NATIVE) {
+ /* -semihosting-config target=native */
+ return false;
+ } else if (target == SEMIHOSTING_TARGET_GDB) {
+ /* -semihosting-config target=gdb */
+ return true;
+ }
+
+ /* -semihosting-config target=auto */
+ /* On the first call check if gdb is connected and remember. */
+ if (gdb_syscall_mode == GDB_SYS_UNKNOWN) {
+ gdb_syscall_mode = gdb_attached() ? GDB_SYS_ENABLED : GDB_SYS_DISABLED;
+ }
+ return gdb_syscall_mode == GDB_SYS_ENABLED;
+}
+
+/* called when the stub detaches */
+void gdb_disable_syscalls(void)
+{
+ gdb_syscall_mode = GDB_SYS_DISABLED;
+}
+
+void gdb_syscall_reset(void)
+{
+ gdbserver_syscall_state.current_syscall_cb = NULL;
+}
+
+bool gdb_handled_syscall(void)
+{
+ if (gdbserver_syscall_state.current_syscall_cb) {
+ gdb_put_packet(gdbserver_syscall_state.syscall_buf);
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Send a gdb syscall request.
+ * This accepts limited printf-style format specifiers, specifically:
+ * %x - target_ulong argument printed in hex.
+ * %lx - 64-bit argument printed in hex.
+ * %s - string pointer (target_ulong) and length (int) pair.
+ */
+void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...)
+{
+ char *p, *p_end;
+ va_list va;
+
+ if (!gdb_attached()) {
+ return;
+ }
+
+ gdbserver_syscall_state.current_syscall_cb = cb;
+ va_start(va, fmt);
+
+ p = gdbserver_syscall_state.syscall_buf;
+ p_end = p + sizeof(gdbserver_syscall_state.syscall_buf);
+ *(p++) = 'F';
+ while (*fmt) {
+ if (*fmt == '%') {
+ uint64_t i64;
+ uint32_t i32;
+
+ fmt++;
+ switch (*fmt++) {
+ case 'x':
+ i32 = va_arg(va, uint32_t);
+ p += snprintf(p, p_end - p, "%" PRIx32, i32);
+ break;
+ case 'l':
+ if (*(fmt++) != 'x') {
+ goto bad_format;
+ }
+ i64 = va_arg(va, uint64_t);
+ p += snprintf(p, p_end - p, "%" PRIx64, i64);
+ break;
+ case 's':
+ i64 = va_arg(va, uint64_t);
+ i32 = va_arg(va, uint32_t);
+ p += snprintf(p, p_end - p, "%" PRIx64 "/%x" PRIx32, i64, i32);
+ break;
+ default:
+ bad_format:
+ error_report("gdbstub: Bad syscall format string '%s'",
+ fmt - 1);
+ break;
+ }
+ } else {
+ *(p++) = *(fmt++);
+ }
+ }
+ *p = 0;
+
+ va_end(va);
+ gdb_syscall_handling(gdbserver_syscall_state.syscall_buf);
+}
+
+/*
+ * GDB Command Handlers
+ */
+
+void gdb_handle_file_io(GArray *params, void *user_ctx)
+{
+ if (params->len >= 1 && gdbserver_syscall_state.current_syscall_cb) {
+ uint64_t ret;
+ int err;
+
+ ret = get_param(params, 0)->val_ull;
+ if (params->len >= 2) {
+ err = get_param(params, 1)->val_ull;
+ } else {
+ err = 0;
+ }
+
+ /* Convert GDB error numbers back to host error numbers. */
+#define E(X) case GDB_E##X: err = E##X; break
+ switch (err) {
+ case 0:
+ break;
+ E(PERM);
+ E(NOENT);
+ E(INTR);
+ E(BADF);
+ E(ACCES);
+ E(FAULT);
+ E(BUSY);
+ E(EXIST);
+ E(NODEV);
+ E(NOTDIR);
+ E(ISDIR);
+ E(INVAL);
+ E(NFILE);
+ E(MFILE);
+ E(FBIG);
+ E(NOSPC);
+ E(SPIPE);
+ E(ROFS);
+ E(NAMETOOLONG);
+ default:
+ err = EINVAL;
+ break;
+ }
+#undef E
+
+ gdbserver_syscall_state.current_syscall_cb(gdbserver_state.c_cpu,
+ ret, err);
+ gdbserver_syscall_state.current_syscall_cb = NULL;
+ }
+
+ if (params->len >= 3 && get_param(params, 2)->opcode == (uint8_t)'C') {
+ gdb_put_packet("T02");
+ return;
+ }
+
+ gdb_continue();
+}
diff --git a/gdbstub/trace-events b/gdbstub/trace-events
index 03f0c30..0c18a4d 100644
--- a/gdbstub/trace-events
+++ b/gdbstub/trace-events
@@ -7,7 +7,6 @@
gdbstub_op_continue_cpu(int cpu_index) "Continuing CPU %d"
gdbstub_op_stepping(int cpu_index) "Stepping CPU %d"
gdbstub_op_extra_info(const char *info) "Thread extra info: %s"
-gdbstub_hit_watchpoint(const char *type, int cpu_gdb_index, uint64_t vaddr) "Watchpoint hit, type=\"%s\" cpu=%d, vaddr=0x%" PRIx64 ""
gdbstub_hit_internal_error(void) "RUN_STATE_INTERNAL_ERROR"
gdbstub_hit_break(void) "RUN_STATE_DEBUG"
gdbstub_hit_paused(void) "RUN_STATE_PAUSED"
@@ -27,3 +26,6 @@
gdbstub_err_invalid_rle(void) "got invalid RLE sequence"
gdbstub_err_checksum_invalid(uint8_t ch) "got invalid command checksum digit: 0x%02x"
gdbstub_err_checksum_incorrect(uint8_t expected, uint8_t got) "got command packet with incorrect checksum, expected=0x%02x, received=0x%02x"
+
+# softmmu.c
+gdbstub_hit_watchpoint(const char *type, int cpu_gdb_index, uint64_t vaddr) "Watchpoint hit, type=\"%s\" cpu=%d, vaddr=0x%" PRIx64 ""
diff --git a/gdbstub/user-target.c b/gdbstub/user-target.c
new file mode 100644
index 0000000..fa0e59e
--- /dev/null
+++ b/gdbstub/user-target.c
@@ -0,0 +1,283 @@
+/*
+ * Target specific user-mode handling
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2022 Linaro Ltd
+ *
+ * SPDX-License-Identifier: LGPL-2.0+
+ */
+
+#include "qemu/osdep.h"
+#include "exec/gdbstub.h"
+#include "qemu.h"
+#include "internals.h"
+
+/*
+ * Map target signal numbers to GDB protocol signal numbers and vice
+ * versa. For user emulation's currently supported systems, we can
+ * assume most signals are defined.
+ */
+
+static int gdb_signal_table[] = {
+ 0,
+ TARGET_SIGHUP,
+ TARGET_SIGINT,
+ TARGET_SIGQUIT,
+ TARGET_SIGILL,
+ TARGET_SIGTRAP,
+ TARGET_SIGABRT,
+ -1, /* SIGEMT */
+ TARGET_SIGFPE,
+ TARGET_SIGKILL,
+ TARGET_SIGBUS,
+ TARGET_SIGSEGV,
+ TARGET_SIGSYS,
+ TARGET_SIGPIPE,
+ TARGET_SIGALRM,
+ TARGET_SIGTERM,
+ TARGET_SIGURG,
+ TARGET_SIGSTOP,
+ TARGET_SIGTSTP,
+ TARGET_SIGCONT,
+ TARGET_SIGCHLD,
+ TARGET_SIGTTIN,
+ TARGET_SIGTTOU,
+ TARGET_SIGIO,
+ TARGET_SIGXCPU,
+ TARGET_SIGXFSZ,
+ TARGET_SIGVTALRM,
+ TARGET_SIGPROF,
+ TARGET_SIGWINCH,
+ -1, /* SIGLOST */
+ TARGET_SIGUSR1,
+ TARGET_SIGUSR2,
+#ifdef TARGET_SIGPWR
+ TARGET_SIGPWR,
+#else
+ -1,
+#endif
+ -1, /* SIGPOLL */
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+#ifdef __SIGRTMIN
+ __SIGRTMIN + 1,
+ __SIGRTMIN + 2,
+ __SIGRTMIN + 3,
+ __SIGRTMIN + 4,
+ __SIGRTMIN + 5,
+ __SIGRTMIN + 6,
+ __SIGRTMIN + 7,
+ __SIGRTMIN + 8,
+ __SIGRTMIN + 9,
+ __SIGRTMIN + 10,
+ __SIGRTMIN + 11,
+ __SIGRTMIN + 12,
+ __SIGRTMIN + 13,
+ __SIGRTMIN + 14,
+ __SIGRTMIN + 15,
+ __SIGRTMIN + 16,
+ __SIGRTMIN + 17,
+ __SIGRTMIN + 18,
+ __SIGRTMIN + 19,
+ __SIGRTMIN + 20,
+ __SIGRTMIN + 21,
+ __SIGRTMIN + 22,
+ __SIGRTMIN + 23,
+ __SIGRTMIN + 24,
+ __SIGRTMIN + 25,
+ __SIGRTMIN + 26,
+ __SIGRTMIN + 27,
+ __SIGRTMIN + 28,
+ __SIGRTMIN + 29,
+ __SIGRTMIN + 30,
+ __SIGRTMIN + 31,
+ -1, /* SIGCANCEL */
+ __SIGRTMIN,
+ __SIGRTMIN + 32,
+ __SIGRTMIN + 33,
+ __SIGRTMIN + 34,
+ __SIGRTMIN + 35,
+ __SIGRTMIN + 36,
+ __SIGRTMIN + 37,
+ __SIGRTMIN + 38,
+ __SIGRTMIN + 39,
+ __SIGRTMIN + 40,
+ __SIGRTMIN + 41,
+ __SIGRTMIN + 42,
+ __SIGRTMIN + 43,
+ __SIGRTMIN + 44,
+ __SIGRTMIN + 45,
+ __SIGRTMIN + 46,
+ __SIGRTMIN + 47,
+ __SIGRTMIN + 48,
+ __SIGRTMIN + 49,
+ __SIGRTMIN + 50,
+ __SIGRTMIN + 51,
+ __SIGRTMIN + 52,
+ __SIGRTMIN + 53,
+ __SIGRTMIN + 54,
+ __SIGRTMIN + 55,
+ __SIGRTMIN + 56,
+ __SIGRTMIN + 57,
+ __SIGRTMIN + 58,
+ __SIGRTMIN + 59,
+ __SIGRTMIN + 60,
+ __SIGRTMIN + 61,
+ __SIGRTMIN + 62,
+ __SIGRTMIN + 63,
+ __SIGRTMIN + 64,
+ __SIGRTMIN + 65,
+ __SIGRTMIN + 66,
+ __SIGRTMIN + 67,
+ __SIGRTMIN + 68,
+ __SIGRTMIN + 69,
+ __SIGRTMIN + 70,
+ __SIGRTMIN + 71,
+ __SIGRTMIN + 72,
+ __SIGRTMIN + 73,
+ __SIGRTMIN + 74,
+ __SIGRTMIN + 75,
+ __SIGRTMIN + 76,
+ __SIGRTMIN + 77,
+ __SIGRTMIN + 78,
+ __SIGRTMIN + 79,
+ __SIGRTMIN + 80,
+ __SIGRTMIN + 81,
+ __SIGRTMIN + 82,
+ __SIGRTMIN + 83,
+ __SIGRTMIN + 84,
+ __SIGRTMIN + 85,
+ __SIGRTMIN + 86,
+ __SIGRTMIN + 87,
+ __SIGRTMIN + 88,
+ __SIGRTMIN + 89,
+ __SIGRTMIN + 90,
+ __SIGRTMIN + 91,
+ __SIGRTMIN + 92,
+ __SIGRTMIN + 93,
+ __SIGRTMIN + 94,
+ __SIGRTMIN + 95,
+ -1, /* SIGINFO */
+ -1, /* UNKNOWN */
+ -1, /* DEFAULT */
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1
+#endif
+};
+
+int gdb_signal_to_target(int sig)
+{
+ if (sig < ARRAY_SIZE(gdb_signal_table)) {
+ return gdb_signal_table[sig];
+ } else {
+ return -1;
+ }
+}
+
+int gdb_target_signal_to_gdb(int sig)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(gdb_signal_table); i++) {
+ if (gdb_signal_table[i] == sig) {
+ return i;
+ }
+ }
+ return GDB_SIGNAL_UNKNOWN;
+}
+
+int gdb_get_cpu_index(CPUState *cpu)
+{
+ TaskState *ts = (TaskState *) cpu->opaque;
+ return ts ? ts->ts_tid : -1;
+}
+
+/*
+ * User-mode specific command helpers
+ */
+
+void gdb_handle_query_offsets(GArray *params, void *user_ctx)
+{
+ TaskState *ts;
+
+ ts = gdbserver_state.c_cpu->opaque;
+ g_string_printf(gdbserver_state.str_buf,
+ "Text=" TARGET_ABI_FMT_lx
+ ";Data=" TARGET_ABI_FMT_lx
+ ";Bss=" TARGET_ABI_FMT_lx,
+ ts->info->code_offset,
+ ts->info->data_offset,
+ ts->info->data_offset);
+ gdb_put_strbuf();
+}
+
+#if defined(CONFIG_LINUX)
+/* Partial user only duplicate of helper in gdbstub.c */
+static inline int target_memory_rw_debug(CPUState *cpu, target_ulong addr,
+ uint8_t *buf, int len, bool is_write)
+{
+ CPUClass *cc;
+ cc = CPU_GET_CLASS(cpu);
+ if (cc->memory_rw_debug) {
+ return cc->memory_rw_debug(cpu, addr, buf, len, is_write);
+ }
+ return cpu_memory_rw_debug(cpu, addr, buf, len, is_write);
+}
+
+void gdb_handle_query_xfer_auxv(GArray *params, void *user_ctx)
+{
+ TaskState *ts;
+ unsigned long offset, len, saved_auxv, auxv_len;
+
+ if (params->len < 2) {
+ gdb_put_packet("E22");
+ return;
+ }
+
+ offset = get_param(params, 0)->val_ul;
+ len = get_param(params, 1)->val_ul;
+ ts = gdbserver_state.c_cpu->opaque;
+ saved_auxv = ts->info->saved_auxv;
+ auxv_len = ts->info->auxv_len;
+
+ if (offset >= auxv_len) {
+ gdb_put_packet("E00");
+ return;
+ }
+
+ if (len > (MAX_PACKET_LENGTH - 5) / 2) {
+ len = (MAX_PACKET_LENGTH - 5) / 2;
+ }
+
+ if (len < auxv_len - offset) {
+ g_string_assign(gdbserver_state.str_buf, "m");
+ } else {
+ g_string_assign(gdbserver_state.str_buf, "l");
+ len = auxv_len - offset;
+ }
+
+ g_byte_array_set_size(gdbserver_state.mem_buf, len);
+ if (target_memory_rw_debug(gdbserver_state.g_cpu, saved_auxv + offset,
+ gdbserver_state.mem_buf->data, len, false)) {
+ gdb_put_packet("E14");
+ return;
+ }
+
+ gdb_memtox(gdbserver_state.str_buf,
+ (const char *)gdbserver_state.mem_buf->data, len);
+ gdb_put_packet_binary(gdbserver_state.str_buf->str,
+ gdbserver_state.str_buf->len, true);
+}
+#endif
diff --git a/gdbstub/user.c b/gdbstub/user.c
index 484bd8f..80488b6 100644
--- a/gdbstub/user.c
+++ b/gdbstub/user.c
@@ -3,16 +3,423 @@
*
* We know for user-mode we are using TCG so we can call stuff directly.
*
+ * Copyright (c) 2003-2005 Fabrice Bellard
* Copyright (c) 2022 Linaro Ltd
*
- * SPDX-License-Identifier: GPL-2.0-or-later
+ * SPDX-License-Identifier: LGPL-2.0+
*/
#include "qemu/osdep.h"
+#include "qemu/cutils.h"
+#include "qemu/sockets.h"
+#include "exec/hwaddr.h"
+#include "exec/tb-flush.h"
#include "exec/gdbstub.h"
+#include "gdbstub/syscalls.h"
+#include "gdbstub/user.h"
#include "hw/core/cpu.h"
+#include "trace.h"
#include "internals.h"
+/* User-mode specific state */
+typedef struct {
+ int fd;
+ char *socket_path;
+ int running_state;
+} GDBUserState;
+
+static GDBUserState gdbserver_user_state;
+
+int gdb_get_char(void)
+{
+ uint8_t ch;
+ int ret;
+
+ for (;;) {
+ ret = recv(gdbserver_user_state.fd, &ch, 1, 0);
+ if (ret < 0) {
+ if (errno == ECONNRESET) {
+ gdbserver_user_state.fd = -1;
+ }
+ if (errno != EINTR) {
+ return -1;
+ }
+ } else if (ret == 0) {
+ close(gdbserver_user_state.fd);
+ gdbserver_user_state.fd = -1;
+ return -1;
+ } else {
+ break;
+ }
+ }
+ return ch;
+}
+
+bool gdb_got_immediate_ack(void)
+{
+ int i;
+
+ i = gdb_get_char();
+ if (i < 0) {
+ /* no response, continue anyway */
+ return true;
+ }
+
+ if (i == '+') {
+ /* received correctly, continue */
+ return true;
+ }
+
+ /* anything else, including '-' then try again */
+ return false;
+}
+
+void gdb_put_buffer(const uint8_t *buf, int len)
+{
+ int ret;
+
+ while (len > 0) {
+ ret = send(gdbserver_user_state.fd, buf, len, 0);
+ if (ret < 0) {
+ if (errno != EINTR) {
+ return;
+ }
+ } else {
+ buf += ret;
+ len -= ret;
+ }
+ }
+}
+
+/* Tell the remote gdb that the process has exited. */
+void gdb_exit(int code)
+{
+ char buf[4];
+
+ if (!gdbserver_state.init) {
+ return;
+ }
+ if (gdbserver_user_state.socket_path) {
+ unlink(gdbserver_user_state.socket_path);
+ }
+ if (gdbserver_user_state.fd < 0) {
+ return;
+ }
+
+ trace_gdbstub_op_exiting((uint8_t)code);
+
+ snprintf(buf, sizeof(buf), "W%02x", (uint8_t)code);
+ gdb_put_packet(buf);
+}
+
+int gdb_handlesig(CPUState *cpu, int sig)
+{
+ char buf[256];
+ int n;
+
+ if (!gdbserver_state.init || gdbserver_user_state.fd < 0) {
+ return sig;
+ }
+
+ /* disable single step if it was enabled */
+ cpu_single_step(cpu, 0);
+ tb_flush(cpu);
+
+ if (sig != 0) {
+ gdb_set_stop_cpu(cpu);
+ g_string_printf(gdbserver_state.str_buf,
+ "T%02xthread:", gdb_target_signal_to_gdb(sig));
+ gdb_append_thread_id(cpu, gdbserver_state.str_buf);
+ g_string_append_c(gdbserver_state.str_buf, ';');
+ gdb_put_strbuf();
+ }
+ /*
+ * gdb_put_packet() might have detected that the peer terminated the
+ * connection.
+ */
+ if (gdbserver_user_state.fd < 0) {
+ return sig;
+ }
+
+ sig = 0;
+ gdbserver_state.state = RS_IDLE;
+ gdbserver_user_state.running_state = 0;
+ while (gdbserver_user_state.running_state == 0) {
+ n = read(gdbserver_user_state.fd, buf, 256);
+ if (n > 0) {
+ int i;
+
+ for (i = 0; i < n; i++) {
+ gdb_read_byte(buf[i]);
+ }
+ } else {
+ /*
+ * XXX: Connection closed. Should probably wait for another
+ * connection before continuing.
+ */
+ if (n == 0) {
+ close(gdbserver_user_state.fd);
+ }
+ gdbserver_user_state.fd = -1;
+ return sig;
+ }
+ }
+ sig = gdbserver_state.signal;
+ gdbserver_state.signal = 0;
+ return sig;
+}
+
+/* Tell the remote gdb that the process has exited due to SIG. */
+void gdb_signalled(CPUArchState *env, int sig)
+{
+ char buf[4];
+
+ if (!gdbserver_state.init || gdbserver_user_state.fd < 0) {
+ return;
+ }
+
+ snprintf(buf, sizeof(buf), "X%02x", gdb_target_signal_to_gdb(sig));
+ gdb_put_packet(buf);
+}
+
+static void gdb_accept_init(int fd)
+{
+ gdb_init_gdbserver_state();
+ gdb_create_default_process(&gdbserver_state);
+ gdbserver_state.processes[0].attached = true;
+ gdbserver_state.c_cpu = gdb_first_attached_cpu();
+ gdbserver_state.g_cpu = gdbserver_state.c_cpu;
+ gdbserver_user_state.fd = fd;
+ gdb_has_xml = false;
+}
+
+static bool gdb_accept_socket(int gdb_fd)
+{
+ int fd;
+
+ for (;;) {
+ fd = accept(gdb_fd, NULL, NULL);
+ if (fd < 0 && errno != EINTR) {
+ perror("accept socket");
+ return false;
+ } else if (fd >= 0) {
+ qemu_set_cloexec(fd);
+ break;
+ }
+ }
+
+ gdb_accept_init(fd);
+ return true;
+}
+
+static int gdbserver_open_socket(const char *path)
+{
+ struct sockaddr_un sockaddr = {};
+ int fd, ret;
+
+ fd = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (fd < 0) {
+ perror("create socket");
+ return -1;
+ }
+
+ sockaddr.sun_family = AF_UNIX;
+ pstrcpy(sockaddr.sun_path, sizeof(sockaddr.sun_path) - 1, path);
+ ret = bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr));
+ if (ret < 0) {
+ perror("bind socket");
+ close(fd);
+ return -1;
+ }
+ ret = listen(fd, 1);
+ if (ret < 0) {
+ perror("listen socket");
+ close(fd);
+ return -1;
+ }
+
+ return fd;
+}
+
+static bool gdb_accept_tcp(int gdb_fd)
+{
+ struct sockaddr_in sockaddr = {};
+ socklen_t len;
+ int fd;
+
+ for (;;) {
+ len = sizeof(sockaddr);
+ fd = accept(gdb_fd, (struct sockaddr *)&sockaddr, &len);
+ if (fd < 0 && errno != EINTR) {
+ perror("accept");
+ return false;
+ } else if (fd >= 0) {
+ qemu_set_cloexec(fd);
+ break;
+ }
+ }
+
+ /* set short latency */
+ if (socket_set_nodelay(fd)) {
+ perror("setsockopt");
+ close(fd);
+ return false;
+ }
+
+ gdb_accept_init(fd);
+ return true;
+}
+
+static int gdbserver_open_port(int port)
+{
+ struct sockaddr_in sockaddr;
+ int fd, ret;
+
+ fd = socket(PF_INET, SOCK_STREAM, 0);
+ if (fd < 0) {
+ perror("socket");
+ return -1;
+ }
+ qemu_set_cloexec(fd);
+
+ socket_set_fast_reuse(fd);
+
+ sockaddr.sin_family = AF_INET;
+ sockaddr.sin_port = htons(port);
+ sockaddr.sin_addr.s_addr = 0;
+ ret = bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr));
+ if (ret < 0) {
+ perror("bind");
+ close(fd);
+ return -1;
+ }
+ ret = listen(fd, 1);
+ if (ret < 0) {
+ perror("listen");
+ close(fd);
+ return -1;
+ }
+
+ return fd;
+}
+
+int gdbserver_start(const char *port_or_path)
+{
+ int port = g_ascii_strtoull(port_or_path, NULL, 10);
+ int gdb_fd;
+
+ if (port > 0) {
+ gdb_fd = gdbserver_open_port(port);
+ } else {
+ gdb_fd = gdbserver_open_socket(port_or_path);
+ }
+
+ if (gdb_fd < 0) {
+ return -1;
+ }
+
+ if (port > 0 && gdb_accept_tcp(gdb_fd)) {
+ return 0;
+ } else if (gdb_accept_socket(gdb_fd)) {
+ gdbserver_user_state.socket_path = g_strdup(port_or_path);
+ return 0;
+ }
+
+ /* gone wrong */
+ close(gdb_fd);
+ return -1;
+}
+
+/* Disable gdb stub for child processes. */
+void gdbserver_fork(CPUState *cpu)
+{
+ if (!gdbserver_state.init || gdbserver_user_state.fd < 0) {
+ return;
+ }
+ close(gdbserver_user_state.fd);
+ gdbserver_user_state.fd = -1;
+ cpu_breakpoint_remove_all(cpu, BP_GDB);
+ /* no cpu_watchpoint_remove_all for user-mode */
+}
+
+/*
+ * Execution state helpers
+ */
+
+void gdb_handle_query_attached(GArray *params, void *user_ctx)
+{
+ gdb_put_packet("0");
+}
+
+void gdb_continue(void)
+{
+ gdbserver_user_state.running_state = 1;
+ trace_gdbstub_op_continue();
+}
+
+/*
+ * Resume execution, for user-mode emulation it's equivalent to
+ * gdb_continue.
+ */
+int gdb_continue_partial(char *newstates)
+{
+ CPUState *cpu;
+ int res = 0;
+ /*
+ * This is not exactly accurate, but it's an improvement compared to the
+ * previous situation, where only one CPU would be single-stepped.
+ */
+ CPU_FOREACH(cpu) {
+ if (newstates[cpu->cpu_index] == 's') {
+ trace_gdbstub_op_stepping(cpu->cpu_index);
+ cpu_single_step(cpu, gdbserver_state.sstep_flags);
+ }
+ }
+ gdbserver_user_state.running_state = 1;
+ return res;
+}
+
+/*
+ * Memory access helpers
+ */
+int gdb_target_memory_rw_debug(CPUState *cpu, hwaddr addr,
+ uint8_t *buf, int len, bool is_write)
+{
+ CPUClass *cc;
+
+ cc = CPU_GET_CLASS(cpu);
+ if (cc->memory_rw_debug) {
+ return cc->memory_rw_debug(cpu, addr, buf, len, is_write);
+ }
+ return cpu_memory_rw_debug(cpu, addr, buf, len, is_write);
+}
+
+/*
+ * cpu helpers
+ */
+
+unsigned int gdb_get_max_cpus(void)
+{
+ CPUState *cpu;
+ unsigned int max_cpus = 1;
+
+ CPU_FOREACH(cpu) {
+ max_cpus = max_cpus <= cpu->cpu_index ? cpu->cpu_index + 1 : max_cpus;
+ }
+
+ return max_cpus;
+}
+
+/* replay not supported for user-mode */
+bool gdb_can_reverse(void)
+{
+ return false;
+}
+
+/*
+ * Break/Watch point helpers
+ */
+
bool gdb_supports_guest_debug(void)
{
/* user-mode == TCG == supported */
@@ -65,3 +472,17 @@
{
cpu_breakpoint_remove_all(cs, BP_GDB);
}
+
+/*
+ * For user-mode syscall support we send the system call immediately
+ * and then return control to gdb for it to process the syscall request.
+ * Since the protocol requires that gdb hands control back to us
+ * using a "here are the results" F packet, we don't need to check
+ * gdb_handlesig's return value (which is the signal to deliver if
+ * execution was resumed via a continue packet).
+ */
+void gdb_syscall_handling(const char *syscall_packet)
+{
+ gdb_put_packet(syscall_packet);
+ gdb_handlesig(gdbserver_state.c_cpu, 0);
+}
diff --git a/gitdm.config b/gitdm.config
index 288b100..9db43ca 100644
--- a/gitdm.config
+++ b/gitdm.config
@@ -31,8 +31,11 @@
# identifiable corporate emails. Please keep this list sorted.
#
+GroupMap contrib/gitdm/group-map-alibaba Alibaba
+GroupMap contrib/gitdm/group-map-amd AMD
GroupMap contrib/gitdm/group-map-cadence Cadence Design Systems
GroupMap contrib/gitdm/group-map-codeweavers CodeWeavers
+GroupMap contrib/gitdm/group-map-facebook Facebook
GroupMap contrib/gitdm/group-map-ibm IBM
GroupMap contrib/gitdm/group-map-janustech Janus Technologies
GroupMap contrib/gitdm/group-map-netflix Netflix
diff --git a/hmp-commands-info.hx b/hmp-commands-info.hx
index 754b1e8..47d63d2 100644
--- a/hmp-commands-info.hx
+++ b/hmp-commands-info.hx
@@ -993,3 +993,17 @@
``info virtio-queue-element`` *path* *queue* [*index*]
Display element of a given virtio queue
ERST
+
+ {
+ .name = "cryptodev",
+ .args_type = "",
+ .params = "",
+ .help = "show the crypto devices",
+ .cmd = hmp_info_cryptodev,
+ .flags = "p",
+ },
+
+SRST
+ ``info cryptodev``
+ Show the crypto devices.
+ERST
diff --git a/hmp-commands.hx b/hmp-commands.hx
index b87c250..bb85ee1 100644
--- a/hmp-commands.hx
+++ b/hmp-commands.hx
@@ -1486,6 +1486,7 @@
Inject an MCE on the given CPU (x86 only).
ERST
+#ifdef CONFIG_POSIX
{
.name = "getfd",
.args_type = "fdname:s",
@@ -1501,6 +1502,7 @@
mechanism on unix sockets, it is stored using the name *fdname* for
later use by other monitor commands.
ERST
+#endif
{
.name = "closefd",
diff --git a/hw/9pfs/meson.build b/hw/9pfs/meson.build
index 12443b6..fd37b7a 100644
--- a/hw/9pfs/meson.build
+++ b/hw/9pfs/meson.build
@@ -15,7 +15,7 @@
))
fs_ss.add(when: 'CONFIG_LINUX', if_true: files('9p-util-linux.c'))
fs_ss.add(when: 'CONFIG_DARWIN', if_true: files('9p-util-darwin.c'))
-fs_ss.add(when: 'CONFIG_XEN', if_true: files('xen-9p-backend.c'))
+fs_ss.add(when: 'CONFIG_XEN_BUS', if_true: files('xen-9p-backend.c'))
softmmu_ss.add_all(when: 'CONFIG_FSDEV_9P', if_true: fs_ss)
specific_ss.add(when: 'CONFIG_VIRTIO_9P', if_true: files('virtio-9p-device.c'))
diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
index 65c4979..74f3a05 100644
--- a/hw/9pfs/xen-9p-backend.c
+++ b/hw/9pfs/xen-9p-backend.c
@@ -22,6 +22,7 @@
#include "qemu/config-file.h"
#include "qemu/main-loop.h"
#include "qemu/option.h"
+#include "qemu/iov.h"
#include "fsdev/qemu-fsdev.h"
#define VERSIONS "1"
@@ -241,7 +242,7 @@
xen_wmb();
ring->inprogress = false;
- xenevtchn_notify(ring->evtchndev, ring->local_port);
+ qemu_xen_evtchn_notify(ring->evtchndev, ring->local_port);
qemu_bh_schedule(ring->bh);
}
@@ -324,8 +325,8 @@
Xen9pfsRing *ring = opaque;
evtchn_port_t port;
- port = xenevtchn_pending(ring->evtchndev);
- xenevtchn_unmask(ring->evtchndev, port);
+ port = qemu_xen_evtchn_pending(ring->evtchndev);
+ qemu_xen_evtchn_unmask(ring->evtchndev, port);
qemu_bh_schedule(ring->bh);
}
@@ -337,10 +338,10 @@
for (i = 0; i < xen_9pdev->num_rings; i++) {
if (xen_9pdev->rings[i].evtchndev != NULL) {
- qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev),
- NULL, NULL, NULL);
- xenevtchn_unbind(xen_9pdev->rings[i].evtchndev,
- xen_9pdev->rings[i].local_port);
+ qemu_set_fd_handler(qemu_xen_evtchn_fd(xen_9pdev->rings[i].evtchndev),
+ NULL, NULL, NULL);
+ qemu_xen_evtchn_unbind(xen_9pdev->rings[i].evtchndev,
+ xen_9pdev->rings[i].local_port);
xen_9pdev->rings[i].evtchndev = NULL;
}
}
@@ -359,12 +360,13 @@
if (xen_9pdev->rings[i].data != NULL) {
xen_be_unmap_grant_refs(&xen_9pdev->xendev,
xen_9pdev->rings[i].data,
+ xen_9pdev->rings[i].intf->ref,
(1 << xen_9pdev->rings[i].ring_order));
}
if (xen_9pdev->rings[i].intf != NULL) {
- xen_be_unmap_grant_refs(&xen_9pdev->xendev,
- xen_9pdev->rings[i].intf,
- 1);
+ xen_be_unmap_grant_ref(&xen_9pdev->xendev,
+ xen_9pdev->rings[i].intf,
+ xen_9pdev->rings[i].ref);
}
if (xen_9pdev->rings[i].bh != NULL) {
qemu_bh_delete(xen_9pdev->rings[i].bh);
@@ -447,12 +449,12 @@
xen_9pdev->rings[i].inprogress = false;
- xen_9pdev->rings[i].evtchndev = xenevtchn_open(NULL, 0);
+ xen_9pdev->rings[i].evtchndev = qemu_xen_evtchn_open();
if (xen_9pdev->rings[i].evtchndev == NULL) {
goto out;
}
- qemu_set_cloexec(xenevtchn_fd(xen_9pdev->rings[i].evtchndev));
- xen_9pdev->rings[i].local_port = xenevtchn_bind_interdomain
+ qemu_set_cloexec(qemu_xen_evtchn_fd(xen_9pdev->rings[i].evtchndev));
+ xen_9pdev->rings[i].local_port = qemu_xen_evtchn_bind_interdomain
(xen_9pdev->rings[i].evtchndev,
xendev->dom,
xen_9pdev->rings[i].evtchn);
@@ -463,8 +465,8 @@
goto out;
}
xen_pv_printf(xendev, 2, "bind evtchn port %d\n", xendev->local_port);
- qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev),
- xen_9pfs_evtchn_event, NULL, &xen_9pdev->rings[i]);
+ qemu_set_fd_handler(qemu_xen_evtchn_fd(xen_9pdev->rings[i].evtchndev),
+ xen_9pfs_evtchn_event, NULL, &xen_9pdev->rings[i]);
}
xen_9pdev->security_model = xenstore_read_be_str(xendev, "security_model");
diff --git a/hw/acpi/acpi-pci-hotplug-stub.c b/hw/acpi/acpi-pci-hotplug-stub.c
index a43f6da..dcee3ad 100644
--- a/hw/acpi/acpi-pci-hotplug-stub.c
+++ b/hw/acpi/acpi-pci-hotplug-stub.c
@@ -5,8 +5,7 @@
const VMStateDescription vmstate_acpi_pcihp_pci_status;
void acpi_pcihp_init(Object *owner, AcpiPciHpState *s, PCIBus *root_bus,
- MemoryRegion *address_space_io, bool bridges_enabled,
- uint16_t io_base)
+ MemoryRegion *address_space_io, uint16_t io_base)
{
return;
}
@@ -36,8 +35,12 @@
return;
}
-void acpi_pcihp_reset(AcpiPciHpState *s, bool acpihp_root_off)
+void acpi_pcihp_reset(AcpiPciHpState *s)
{
return;
}
+bool acpi_pcihp_is_hotpluggbale_bus(AcpiPciHpState *s, BusState *bus)
+{
+ return true;
+}
diff --git a/hw/acpi/ich9.c b/hw/acpi/ich9.c
index d23bfca..25e2c72 100644
--- a/hw/acpi/ich9.c
+++ b/hw/acpi/ich9.c
@@ -218,7 +218,7 @@
{
ICH9LPCPMRegs *s = opaque;
- return s->use_acpi_hotplug_bridge;
+ return s->acpi_pci_hotplug.use_acpi_hotplug_bridge;
}
static const VMStateDescription vmstate_pcihp_state = {
@@ -277,8 +277,8 @@
}
pm->smi_en_wmask = ~0;
- if (pm->use_acpi_hotplug_bridge) {
- acpi_pcihp_reset(&pm->acpi_pci_hotplug, true);
+ if (pm->acpi_pci_hotplug.use_acpi_hotplug_bridge) {
+ acpi_pcihp_reset(&pm->acpi_pci_hotplug);
}
acpi_update_sci(&pm->acpi_regs, pm->irq);
@@ -316,12 +316,11 @@
acpi_pm_tco_init(&pm->tco_regs, &pm->io);
}
- if (pm->use_acpi_hotplug_bridge) {
+ if (pm->acpi_pci_hotplug.use_acpi_hotplug_bridge) {
acpi_pcihp_init(OBJECT(lpc_pci),
&pm->acpi_pci_hotplug,
pci_get_bus(lpc_pci),
pci_address_space_io(lpc_pci),
- true,
ACPI_PCIHP_ADDR_ICH9);
qbus_set_hotplug_handler(BUS(pci_get_bus(lpc_pci)),
@@ -403,14 +402,14 @@
{
ICH9LPCState *s = ICH9_LPC_DEVICE(obj);
- return s->pm.use_acpi_hotplug_bridge;
+ return s->pm.acpi_pci_hotplug.use_acpi_hotplug_bridge;
}
static void ich9_pm_set_acpi_pci_hotplug(Object *obj, bool value, Error **errp)
{
ICH9LPCState *s = ICH9_LPC_DEVICE(obj);
- s->pm.use_acpi_hotplug_bridge = value;
+ s->pm.acpi_pci_hotplug.use_acpi_hotplug_bridge = value;
}
static bool ich9_pm_get_keep_pci_slot_hpc(Object *obj, Error **errp)
@@ -435,7 +434,7 @@
pm->disable_s3 = 0;
pm->disable_s4 = 0;
pm->s4_val = 2;
- pm->use_acpi_hotplug_bridge = true;
+ pm->acpi_pci_hotplug.use_acpi_hotplug_bridge = true;
pm->keep_pci_slot_hpc = true;
pm->enable_tco = true;
@@ -579,6 +578,12 @@
}
}
+bool ich9_pm_is_hotpluggable_bus(HotplugHandler *hotplug_dev, BusState *bus)
+{
+ ICH9LPCState *lpc = ICH9_LPC_DEVICE(hotplug_dev);
+ return acpi_pcihp_is_hotpluggbale_bus(&lpc->pm.acpi_pci_hotplug, bus);
+}
+
void ich9_pm_ospm_status(AcpiDeviceIf *adev, ACPIOSTInfoList ***list)
{
ICH9LPCState *s = ICH9_LPC_DEVICE(adev);
diff --git a/hw/acpi/pci-bridge.c b/hw/acpi/pci-bridge.c
index 5f3ee51..7baa703 100644
--- a/hw/acpi/pci-bridge.c
+++ b/hw/acpi/pci-bridge.c
@@ -21,7 +21,17 @@
{
PCIBridge *br = PCI_BRIDGE(adev);
- if (object_property_find(OBJECT(&br->sec_bus), ACPI_PCIHP_PROP_BSEL)) {
- build_append_pci_bus_devices(scope, pci_bridge_get_sec_bus(br));
+ if (!DEVICE(br)->hotplugged) {
+ PCIBus *sec_bus = pci_bridge_get_sec_bus(br);
+
+ build_append_pci_bus_devices(scope, sec_bus);
+
+ /*
+ * generate hotplug slots descriptors if
+ * bridge has ACPI PCI hotplug attached,
+ */
+ if (object_property_find(OBJECT(sec_bus), ACPI_PCIHP_PROP_BSEL)) {
+ build_append_pcihp_slots(scope, sec_bus);
+ }
}
}
diff --git a/hw/acpi/pcihp.c b/hw/acpi/pcihp.c
index 5dc7377..dcfb779 100644
--- a/hw/acpi/pcihp.c
+++ b/hw/acpi/pcihp.c
@@ -54,21 +54,6 @@
PCIBus *bus;
} AcpiPciHpFind;
-static gint g_cmp_uint32(gconstpointer a, gconstpointer b, gpointer user_data)
-{
- return a - b;
-}
-
-static GSequence *pci_acpi_index_list(void)
-{
- static GSequence *used_acpi_index_list;
-
- if (!used_acpi_index_list) {
- used_acpi_index_list = g_sequence_new(NULL);
- }
- return used_acpi_index_list;
-}
-
static int acpi_pcihp_get_bsel(PCIBus *bus)
{
Error *local_err = NULL;
@@ -136,20 +121,6 @@
}
}
-static void acpi_pcihp_disable_root_bus(void)
-{
- Object *host = acpi_get_i386_pci_host();
- PCIBus *bus;
-
- bus = PCI_HOST_BRIDGE(host)->bus;
- if (bus && qbus_is_hotpluggable(BUS(bus))) {
- /* setting the hotplug handler to NULL makes the bus non-hotpluggable */
- qbus_set_hotplug_handler(BUS(bus), NULL);
- }
-
- return;
-}
-
static void acpi_pcihp_test_hotplug_bus(PCIBus *bus, void *opaque)
{
AcpiPciHpFind *find = opaque;
@@ -291,17 +262,12 @@
}
}
-void acpi_pcihp_reset(AcpiPciHpState *s, bool acpihp_root_off)
+void acpi_pcihp_reset(AcpiPciHpState *s)
{
- if (acpihp_root_off) {
- acpi_pcihp_disable_root_bus();
- }
- acpi_set_pci_info(!s->legacy_piix);
+ acpi_set_pci_info(s->use_acpi_hotplug_bridge);
acpi_pcihp_update(s);
}
-#define ONBOARD_INDEX_MAX (16 * 1024 - 1)
-
void acpi_pcihp_device_pre_plug_cb(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
@@ -314,34 +280,6 @@
ACPI_PCIHP_PROP_BSEL "' set");
return;
}
-
- /*
- * capped by systemd (see: udev-builtin-net_id.c)
- * as it's the only known user honor it to avoid users
- * misconfigure QEMU and then wonder why acpi-index doesn't work
- */
- if (pdev->acpi_index > ONBOARD_INDEX_MAX) {
- error_setg(errp, "acpi-index should be less or equal to %u",
- ONBOARD_INDEX_MAX);
- return;
- }
-
- /*
- * make sure that acpi-index is unique across all present PCI devices
- */
- if (pdev->acpi_index) {
- GSequence *used_indexes = pci_acpi_index_list();
-
- if (g_sequence_lookup(used_indexes, GINT_TO_POINTER(pdev->acpi_index),
- g_cmp_uint32, NULL)) {
- error_setg(errp, "a PCI device with acpi-index = %" PRIu32
- " already exist", pdev->acpi_index);
- return;
- }
- g_sequence_insert_sorted(used_indexes,
- GINT_TO_POINTER(pdev->acpi_index),
- g_cmp_uint32, NULL);
- }
}
void acpi_pcihp_device_plug_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s,
@@ -361,17 +299,10 @@
* Overwrite the default hotplug handler with the ACPI PCI one
* for cold plugged bridges only.
*/
- if (!s->legacy_piix &&
+ if (s->use_acpi_hotplug_bridge &&
object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) {
PCIBus *sec = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev));
- /* Remove all hot-plug handlers if hot-plug is disabled on slot */
- if (object_dynamic_cast(OBJECT(dev), TYPE_PCIE_SLOT) &&
- !PCIE_SLOT(pdev)->hotplug) {
- qbus_set_hotplug_handler(BUS(sec), NULL);
- return;
- }
-
qbus_set_hotplug_handler(BUS(sec), OBJECT(hotplug_dev));
/* We don't have to overwrite any other hotplug handler yet */
assert(QLIST_EMPTY(&sec->child));
@@ -401,17 +332,6 @@
trace_acpi_pci_unplug(PCI_SLOT(pdev->devfn),
acpi_pcihp_get_bsel(pci_get_bus(pdev)));
- /*
- * clean up acpi-index so it could reused by another device
- */
- if (pdev->acpi_index) {
- GSequence *used_indexes = pci_acpi_index_list();
-
- g_sequence_remove(g_sequence_lookup(used_indexes,
- GINT_TO_POINTER(pdev->acpi_index),
- g_cmp_uint32, NULL));
- }
-
qdev_unrealize(dev);
}
@@ -441,6 +361,24 @@
acpi_send_event(DEVICE(hotplug_dev), ACPI_PCI_HOTPLUG_STATUS);
}
+bool acpi_pcihp_is_hotpluggbale_bus(AcpiPciHpState *s, BusState *bus)
+{
+ Object *o = OBJECT(bus->parent);
+
+ if (s->use_acpi_hotplug_bridge &&
+ object_dynamic_cast(o, TYPE_PCI_BRIDGE)) {
+ if (object_dynamic_cast(o, TYPE_PCIE_SLOT) && !PCIE_SLOT(o)->hotplug) {
+ return false;
+ }
+ return true;
+ }
+
+ if (s->use_acpi_root_pci_hotplug) {
+ return true;
+ }
+ return false;
+}
+
static uint64_t pci_read(void *opaque, hwaddr addr, unsigned int size)
{
AcpiPciHpState *s = opaque;
@@ -454,7 +392,7 @@
switch (addr) {
case PCI_UP_BASE:
val = s->acpi_pcihp_pci_status[bsel].up;
- if (!s->legacy_piix) {
+ if (s->use_acpi_hotplug_bridge) {
s->acpi_pcihp_pci_status[bsel].up = 0;
}
trace_acpi_pci_up_read(val);
@@ -529,7 +467,8 @@
trace_acpi_pci_ej_write(addr, data);
break;
case PCI_SEL_BASE:
- s->hotplug_select = s->legacy_piix ? ACPI_PCIHP_BSEL_DEFAULT : data;
+ s->hotplug_select = s->use_acpi_hotplug_bridge ? data :
+ ACPI_PCIHP_BSEL_DEFAULT;
trace_acpi_pci_sel_write(addr, data);
default:
break;
@@ -547,14 +486,13 @@
};
void acpi_pcihp_init(Object *owner, AcpiPciHpState *s, PCIBus *root_bus,
- MemoryRegion *address_space_io, bool bridges_enabled,
+ MemoryRegion *address_space_io,
uint16_t io_base)
{
s->io_len = ACPI_PCIHP_SIZE;
s->io_base = io_base;
s->root = root_bus;
- s->legacy_piix = !bridges_enabled;
memory_region_init_io(&s->io, owner, &acpi_pcihp_io_ops, s,
"acpi-pci-hotplug", s->io_len);
diff --git a/hw/acpi/piix4.c b/hw/acpi/piix4.c
index eac2125..63d2113 100644
--- a/hw/acpi/piix4.c
+++ b/hw/acpi/piix4.c
@@ -170,14 +170,14 @@
static bool vmstate_test_use_acpi_hotplug_bridge(void *opaque, int version_id)
{
PIIX4PMState *s = opaque;
- return s->use_acpi_hotplug_bridge;
+ return s->acpi_pci_hotplug.use_acpi_hotplug_bridge;
}
static bool vmstate_test_no_use_acpi_hotplug_bridge(void *opaque,
int version_id)
{
PIIX4PMState *s = opaque;
- return !s->use_acpi_hotplug_bridge;
+ return !s->acpi_pci_hotplug.use_acpi_hotplug_bridge;
}
static bool vmstate_test_use_memhp(void *opaque)
@@ -234,7 +234,8 @@
static bool vmstate_test_migrate_acpi_index(void *opaque, int version_id)
{
PIIX4PMState *s = PIIX4_PM(opaque);
- return s->use_acpi_hotplug_bridge && !s->not_migrate_acpi_index;
+ return s->acpi_pci_hotplug.use_acpi_hotplug_bridge &&
+ !s->not_migrate_acpi_index;
}
/* qemu-kvm 1.2 uses version 3 but advertised as 2
@@ -303,8 +304,9 @@
acpi_update_sci(&s->ar, s->irq);
pm_io_space_update(s);
- if (s->use_acpi_hotplug_bridge || s->use_acpi_root_pci_hotplug) {
- acpi_pcihp_reset(&s->acpi_pci_hotplug, !s->use_acpi_root_pci_hotplug);
+ if (s->acpi_pci_hotplug.use_acpi_hotplug_bridge ||
+ s->acpi_pci_hotplug.use_acpi_root_pci_hotplug) {
+ acpi_pcihp_reset(&s->acpi_pci_hotplug);
}
}
@@ -402,6 +404,13 @@
}
}
+static bool piix4_is_hotpluggable_bus(HotplugHandler *hotplug_dev,
+ BusState *bus)
+{
+ PIIX4PMState *s = PIIX4_PM(hotplug_dev);
+ return acpi_pcihp_is_hotpluggbale_bus(&s->acpi_pci_hotplug, bus);
+}
+
static void piix4_pm_machine_ready(Notifier *n, void *opaque)
{
PIIX4PMState *s = container_of(n, PIIX4PMState, machine_ready);
@@ -487,12 +496,11 @@
qemu_add_machine_init_done_notifier(&s->machine_ready);
if (xen_enabled()) {
- s->use_acpi_hotplug_bridge = false;
+ s->acpi_pci_hotplug.use_acpi_hotplug_bridge = false;
}
piix4_acpi_system_hot_add_init(pci_address_space_io(dev),
pci_get_bus(dev), s);
- qbus_set_hotplug_handler(BUS(pci_get_bus(dev)), OBJECT(s));
piix4_pm_add_properties(s);
}
@@ -561,9 +569,11 @@
"acpi-gpe0", GPE_LEN);
memory_region_add_subregion(parent, GPE_BASE, &s->io_gpe);
- if (s->use_acpi_hotplug_bridge || s->use_acpi_root_pci_hotplug) {
+ if (s->acpi_pci_hotplug.use_acpi_hotplug_bridge ||
+ s->acpi_pci_hotplug.use_acpi_root_pci_hotplug) {
acpi_pcihp_init(OBJECT(s), &s->acpi_pci_hotplug, bus, parent,
- s->use_acpi_hotplug_bridge, ACPI_PCIHP_ADDR_PIIX4);
+ ACPI_PCIHP_ADDR_PIIX4);
+ qbus_set_hotplug_handler(BUS(pci_get_bus(PCI_DEVICE(s))), OBJECT(s));
}
s->cpu_hotplug_legacy = true;
@@ -602,9 +612,9 @@
DEFINE_PROP_UINT8(ACPI_PM_PROP_S4_DISABLED, PIIX4PMState, disable_s4, 0),
DEFINE_PROP_UINT8(ACPI_PM_PROP_S4_VAL, PIIX4PMState, s4_val, 2),
DEFINE_PROP_BOOL(ACPI_PM_PROP_ACPI_PCIHP_BRIDGE, PIIX4PMState,
- use_acpi_hotplug_bridge, true),
+ acpi_pci_hotplug.use_acpi_hotplug_bridge, true),
DEFINE_PROP_BOOL(ACPI_PM_PROP_ACPI_PCI_ROOTHP, PIIX4PMState,
- use_acpi_root_pci_hotplug, true),
+ acpi_pci_hotplug.use_acpi_root_pci_hotplug, true),
DEFINE_PROP_BOOL("memory-hotplug-support", PIIX4PMState,
acpi_memory_hotplug.is_enabled, true),
DEFINE_PROP_BOOL("smm-compat", PIIX4PMState, smm_compat, false),
@@ -641,6 +651,7 @@
hc->plug = piix4_device_plug_cb;
hc->unplug_request = piix4_device_unplug_request_cb;
hc->unplug = piix4_device_unplug_cb;
+ hc->is_hotpluggable_bus = piix4_is_hotpluggable_bus;
adevc->ospm_status = piix4_ospm_status;
adevc->send_event = piix4_send_gpe;
adevc->madt_cpu = pc_madt_cpu_entry;
diff --git a/hw/arm/aspeed.c b/hw/arm/aspeed.c
index 86601cb..c1f2b9c 100644
--- a/hw/arm/aspeed.c
+++ b/hw/arm/aspeed.c
@@ -524,6 +524,11 @@
at24c_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 4), 0x51, 128 * KiB);
at24c_eeprom_init_rom(aspeed_i2c_get_bus(&soc->i2c, 8), 0x51, 128 * KiB,
yosemitev2_bmc_fruid, yosemitev2_bmc_fruid_len);
+ /* TMP421 */
+ i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 11), "tmp421", 0x1f);
+ i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 9), "tmp421", 0x4e);
+ i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 9), "tmp421", 0x4f);
+
}
static void romulus_bmc_i2c_init(AspeedMachineState *bmc)
@@ -542,6 +547,10 @@
at24c_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 4), 0x54, 128 * KiB);
at24c_eeprom_init_rom(aspeed_i2c_get_bus(&soc->i2c, 6), 0x54, 128 * KiB,
tiogapass_bmc_fruid, tiogapass_bmc_fruid_len);
+ /* TMP421 */
+ i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 8), "tmp421", 0x1f);
+ i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 6), "tmp421", 0x4f);
+ i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 6), "tmp421", 0x4e);
}
static void create_pca9552(AspeedSoCState *soc, int bus_id, int addr)
diff --git a/hw/arm/aspeed_eeprom.c b/hw/arm/aspeed_eeprom.c
index 2fb2d5d..dc33a88 100644
--- a/hw/arm/aspeed_eeprom.c
+++ b/hw/arm/aspeed_eeprom.c
@@ -101,17 +101,17 @@
/* Yosemite V2 BMC FRU */
const uint8_t yosemitev2_bmc_fruid[] = {
0x01, 0x00, 0x00, 0x01, 0x0d, 0x00, 0x00, 0xf1, 0x01, 0x0c, 0x00, 0x36,
- 0xe6, 0xd0, 0xc6, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xd2, 0x42, 0x4d,
- 0x43, 0x20, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x20, 0x4d, 0x6f,
- 0x64, 0x75, 0x6c, 0x65, 0xcd, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58,
+ 0xe6, 0xd0, 0xc6, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xd2, 0x42, 0x61,
+ 0x73, 0x65, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x20, 0x4d, 0x50, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xcd, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58,
0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xce, 0x58, 0x58, 0x58, 0x58, 0x58,
0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xc3, 0x31, 0x2e,
0x30, 0xc9, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xd2,
0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58,
0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xc1, 0x39, 0x01, 0x0c, 0x00, 0xc6,
0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xd2, 0x59, 0x6f, 0x73, 0x65, 0x6d,
- 0x69, 0x74, 0x65, 0x20, 0x56, 0x32, 0x2e, 0x30, 0x20, 0x45, 0x56, 0x54,
- 0x32, 0xce, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58,
+ 0x69, 0x74, 0x65, 0x20, 0x56, 0x32, 0x20, 0x4d, 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0xce, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58,
0x58, 0x58, 0x58, 0x58, 0xc4, 0x45, 0x56, 0x54, 0x32, 0xcd, 0x58, 0x58,
0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xc7,
0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xc3, 0x31, 0x2e, 0x30, 0xc9,
diff --git a/hw/arm/collie.c b/hw/arm/collie.c
index 9edff59..a0ad1b8 100644
--- a/hw/arm/collie.c
+++ b/hw/arm/collie.c
@@ -19,6 +19,8 @@
#include "exec/address-spaces.h"
#include "cpu.h"
#include "qom/object.h"
+#include "qemu/error-report.h"
+
#define RAM_SIZE (512 * MiB)
#define FLASH_SIZE (32 * MiB)
diff --git a/hw/arm/cubieboard.c b/hw/arm/cubieboard.c
index 71a7df1..8c7fa91 100644
--- a/hw/arm/cubieboard.c
+++ b/hw/arm/cubieboard.c
@@ -17,6 +17,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
+#include "qemu/error-report.h"
#include "hw/boards.h"
#include "hw/qdev-properties.h"
#include "hw/arm/allwinner-a10.h"
diff --git a/hw/arm/musicpal.c b/hw/arm/musicpal.c
index 06d9add..c9010b2 100644
--- a/hw/arm/musicpal.c
+++ b/hw/arm/musicpal.c
@@ -37,6 +37,8 @@
#include "qemu/cutils.h"
#include "qom/object.h"
#include "hw/net/mv88w8618_eth.h"
+#include "qemu/error-report.h"
+
#define MP_MISC_BASE 0x80002000
#define MP_MISC_SIZE 0x00001000
diff --git a/hw/arm/npcm7xx_boards.c b/hw/arm/npcm7xx_boards.c
index 9b31207..2aef579 100644
--- a/hw/arm/npcm7xx_boards.c
+++ b/hw/arm/npcm7xx_boards.c
@@ -30,6 +30,8 @@
#include "sysemu/blockdev.h"
#include "sysemu/sysemu.h"
#include "sysemu/block-backend.h"
+#include "qemu/error-report.h"
+
#define NPCM7XX_POWER_ON_STRAPS_DEFAULT ( \
NPCM7XX_PWRON_STRAP_SPI0F18 | \
diff --git a/hw/arm/nseries.c b/hw/arm/nseries.c
index c9df063..9e49e9e 100644
--- a/hw/arm/nseries.c
+++ b/hw/arm/nseries.c
@@ -45,6 +45,8 @@
#include "hw/loader.h"
#include "hw/sysbus.h"
#include "qemu/log.h"
+#include "qemu/error-report.h"
+
/* Nokia N8x0 support */
struct n800_s {
diff --git a/hw/arm/omap_sx1.c b/hw/arm/omap_sx1.c
index e721292..4bf1579 100644
--- a/hw/arm/omap_sx1.c
+++ b/hw/arm/omap_sx1.c
@@ -37,6 +37,8 @@
#include "exec/address-spaces.h"
#include "cpu.h"
#include "qemu/cutils.h"
+#include "qemu/error-report.h"
+
/*****************************************************************************/
/* Siemens SX1 Cellphone V1 */
diff --git a/hw/arm/orangepi.c b/hw/arm/orangepi.c
index 3ace474..1065336 100644
--- a/hw/arm/orangepi.c
+++ b/hw/arm/orangepi.c
@@ -21,6 +21,7 @@
#include "qemu/units.h"
#include "exec/address-spaces.h"
#include "qapi/error.h"
+#include "qemu/error-report.h"
#include "hw/boards.h"
#include "hw/qdev-properties.h"
#include "hw/arm/allwinner-h3.h"
diff --git a/hw/arm/palm.c b/hw/arm/palm.c
index 1457f10..17c11ac 100644
--- a/hw/arm/palm.c
+++ b/hw/arm/palm.c
@@ -32,6 +32,8 @@
#include "cpu.h"
#include "qemu/cutils.h"
#include "qom/object.h"
+#include "qemu/error-report.h"
+
static uint64_t static_read(void *opaque, hwaddr offset, unsigned size)
{
diff --git a/hw/audio/trace-events b/hw/audio/trace-events
index e0e71cd..4dec48a 100644
--- a/hw/audio/trace-events
+++ b/hw/audio/trace-events
@@ -11,3 +11,9 @@
hda_audio_format(const char *stream, int chan, const char *fmt, int freq) "st %s, %d x %s @ %d Hz"
hda_audio_adjust(const char *stream, int pos) "st %s, pos %d"
hda_audio_overrun(const char *stream) "st %s"
+
+#via-ac97.c
+via_ac97_codec_write(uint8_t addr, uint16_t val) "0x%x <- 0x%x"
+via_ac97_sgd_fetch(uint32_t curr, uint32_t addr, char stop, char eol, char flag, uint32_t len) "curr=0x%x addr=0x%x %c%c%c len=%d"
+via_ac97_sgd_read(uint64_t addr, unsigned size, uint64_t val) "0x%"PRIx64" %d -> 0x%"PRIx64
+via_ac97_sgd_write(uint64_t addr, unsigned size, uint64_t val) "0x%"PRIx64" %d <- 0x%"PRIx64
diff --git a/hw/audio/via-ac97.c b/hw/audio/via-ac97.c
index d1a856f..676254b 100644
--- a/hw/audio/via-ac97.c
+++ b/hw/audio/via-ac97.c
@@ -1,39 +1,482 @@
/*
* VIA south bridges sound support
*
+ * Copyright (c) 2022-2023 BALATON Zoltan
+ *
* This work is licensed under the GNU GPL license version 2 or later.
*/
/*
- * TODO: This is entirely boiler plate just registering empty PCI devices
- * with the right ID guests expect, functionality should be added here.
+ * TODO: This is only a basic implementation of one audio playback channel
+ * more functionality should be added here.
*/
#include "qemu/osdep.h"
+#include "qemu/log.h"
#include "hw/isa/vt82c686.h"
-#include "hw/pci/pci_device.h"
+#include "ac97.h"
+#include "trace.h"
+
+#define CLEN_IS_EOL(x) ((x)->clen & BIT(31))
+#define CLEN_IS_FLAG(x) ((x)->clen & BIT(30))
+#define CLEN_IS_STOP(x) ((x)->clen & BIT(29))
+#define CLEN_LEN(x) ((x)->clen & 0xffffff)
+
+#define STAT_ACTIVE BIT(7)
+#define STAT_PAUSED BIT(6)
+#define STAT_TRIG BIT(3)
+#define STAT_STOP BIT(2)
+#define STAT_EOL BIT(1)
+#define STAT_FLAG BIT(0)
+
+#define CNTL_START BIT(7)
+#define CNTL_TERM BIT(6)
+#define CNTL_PAUSE BIT(3)
+
+static void open_voice_out(ViaAC97State *s);
+
+static uint16_t codec_rates[] = { 8000, 11025, 16000, 22050, 32000, 44100,
+ 48000 };
+
+#define CODEC_REG(s, o) ((s)->codec_regs[(o) / 2])
+#define CODEC_VOL(vol, mask) ((255 * ((vol) & mask)) / mask)
+
+static void codec_volume_set_out(ViaAC97State *s)
+{
+ int lvol, rvol, mute;
+
+ lvol = 255 - CODEC_VOL(CODEC_REG(s, AC97_Master_Volume_Mute) >> 8, 0x1f);
+ lvol *= 255 - CODEC_VOL(CODEC_REG(s, AC97_PCM_Out_Volume_Mute) >> 8, 0x1f);
+ lvol /= 255;
+ rvol = 255 - CODEC_VOL(CODEC_REG(s, AC97_Master_Volume_Mute), 0x1f);
+ rvol *= 255 - CODEC_VOL(CODEC_REG(s, AC97_PCM_Out_Volume_Mute), 0x1f);
+ rvol /= 255;
+ mute = CODEC_REG(s, AC97_Master_Volume_Mute) >> MUTE_SHIFT;
+ mute |= CODEC_REG(s, AC97_PCM_Out_Volume_Mute) >> MUTE_SHIFT;
+ AUD_set_volume_out(s->vo, mute, lvol, rvol);
+}
+
+static void codec_reset(ViaAC97State *s)
+{
+ memset(s->codec_regs, 0, sizeof(s->codec_regs));
+ CODEC_REG(s, AC97_Reset) = 0x6a90;
+ CODEC_REG(s, AC97_Master_Volume_Mute) = 0x8000;
+ CODEC_REG(s, AC97_Headphone_Volume_Mute) = 0x8000;
+ CODEC_REG(s, AC97_Master_Volume_Mono_Mute) = 0x8000;
+ CODEC_REG(s, AC97_Phone_Volume_Mute) = 0x8008;
+ CODEC_REG(s, AC97_Mic_Volume_Mute) = 0x8008;
+ CODEC_REG(s, AC97_Line_In_Volume_Mute) = 0x8808;
+ CODEC_REG(s, AC97_CD_Volume_Mute) = 0x8808;
+ CODEC_REG(s, AC97_Video_Volume_Mute) = 0x8808;
+ CODEC_REG(s, AC97_Aux_Volume_Mute) = 0x8808;
+ CODEC_REG(s, AC97_PCM_Out_Volume_Mute) = 0x8808;
+ CODEC_REG(s, AC97_Record_Gain_Mute) = 0x8000;
+ CODEC_REG(s, AC97_Powerdown_Ctrl_Stat) = 0x000f;
+ CODEC_REG(s, AC97_Extended_Audio_ID) = 0x0a05;
+ CODEC_REG(s, AC97_Extended_Audio_Ctrl_Stat) = 0x0400;
+ CODEC_REG(s, AC97_PCM_Front_DAC_Rate) = 48000;
+ CODEC_REG(s, AC97_PCM_LR_ADC_Rate) = 48000;
+ /* Sigmatel 9766 (STAC9766) */
+ CODEC_REG(s, AC97_Vendor_ID1) = 0x8384;
+ CODEC_REG(s, AC97_Vendor_ID2) = 0x7666;
+}
+
+static uint16_t codec_read(ViaAC97State *s, uint8_t addr)
+{
+ return CODEC_REG(s, addr);
+}
+
+static void codec_write(ViaAC97State *s, uint8_t addr, uint16_t val)
+{
+ trace_via_ac97_codec_write(addr, val);
+ switch (addr) {
+ case AC97_Reset:
+ codec_reset(s);
+ return;
+ case AC97_Master_Volume_Mute:
+ case AC97_PCM_Out_Volume_Mute:
+ if (addr == AC97_Master_Volume_Mute) {
+ if (val & BIT(13)) {
+ val |= 0x1f00;
+ }
+ if (val & BIT(5)) {
+ val |= 0x1f;
+ }
+ }
+ CODEC_REG(s, addr) = val & 0x9f1f;
+ codec_volume_set_out(s);
+ return;
+ case AC97_Extended_Audio_Ctrl_Stat:
+ CODEC_REG(s, addr) &= ~EACS_VRA;
+ CODEC_REG(s, addr) |= val & EACS_VRA;
+ if (!(val & EACS_VRA)) {
+ CODEC_REG(s, AC97_PCM_Front_DAC_Rate) = 48000;
+ CODEC_REG(s, AC97_PCM_LR_ADC_Rate) = 48000;
+ open_voice_out(s);
+ }
+ return;
+ case AC97_PCM_Front_DAC_Rate:
+ case AC97_PCM_LR_ADC_Rate:
+ if (CODEC_REG(s, AC97_Extended_Audio_Ctrl_Stat) & EACS_VRA) {
+ int i;
+ uint16_t rate = val;
+
+ for (i = 0; i < ARRAY_SIZE(codec_rates) - 1; i++) {
+ if (rate < codec_rates[i] +
+ (codec_rates[i + 1] - codec_rates[i]) / 2) {
+ rate = codec_rates[i];
+ break;
+ }
+ }
+ if (rate > 48000) {
+ rate = 48000;
+ }
+ CODEC_REG(s, addr) = rate;
+ open_voice_out(s);
+ }
+ return;
+ case AC97_Powerdown_Ctrl_Stat:
+ CODEC_REG(s, addr) = (val & 0xff00) | (CODEC_REG(s, addr) & 0xff);
+ return;
+ case AC97_Extended_Audio_ID:
+ case AC97_Vendor_ID1:
+ case AC97_Vendor_ID2:
+ /* Read only registers */
+ return;
+ default:
+ qemu_log_mask(LOG_UNIMP,
+ "via-ac97: Unimplemented codec register 0x%x\n", addr);
+ CODEC_REG(s, addr) = val;
+ }
+}
+
+static void fetch_sgd(ViaAC97SGDChannel *c, PCIDevice *d)
+{
+ uint32_t b[2];
+
+ if (c->curr < c->base) {
+ c->curr = c->base;
+ }
+ if (unlikely(pci_dma_read(d, c->curr, b, sizeof(b)) != MEMTX_OK)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "via-ac97: DMA error reading SGD table\n");
+ return;
+ }
+ c->addr = le32_to_cpu(b[0]);
+ c->clen = le32_to_cpu(b[1]);
+ trace_via_ac97_sgd_fetch(c->curr, c->addr, CLEN_IS_STOP(c) ? 'S' : '-',
+ CLEN_IS_EOL(c) ? 'E' : '-',
+ CLEN_IS_FLAG(c) ? 'F' : '-', CLEN_LEN(c));
+}
+
+static void out_cb(void *opaque, int avail)
+{
+ ViaAC97State *s = opaque;
+ ViaAC97SGDChannel *c = &s->aur;
+ int temp, to_copy, copied;
+ bool stop = false;
+ uint8_t tmpbuf[4096];
+
+ if (c->stat & STAT_PAUSED) {
+ return;
+ }
+ c->stat |= STAT_ACTIVE;
+ while (avail && !stop) {
+ if (!c->clen) {
+ fetch_sgd(c, &s->dev);
+ }
+ temp = MIN(CLEN_LEN(c), avail);
+ while (temp) {
+ to_copy = MIN(temp, sizeof(tmpbuf));
+ pci_dma_read(&s->dev, c->addr, tmpbuf, to_copy);
+ copied = AUD_write(s->vo, tmpbuf, to_copy);
+ if (!copied) {
+ stop = true;
+ break;
+ }
+ temp -= copied;
+ avail -= copied;
+ c->addr += copied;
+ c->clen -= copied;
+ }
+ if (CLEN_LEN(c) == 0) {
+ c->curr += 8;
+ if (CLEN_IS_EOL(c)) {
+ c->stat |= STAT_EOL;
+ if (c->type & CNTL_START) {
+ c->curr = c->base;
+ c->stat |= STAT_PAUSED;
+ } else {
+ c->stat &= ~STAT_ACTIVE;
+ AUD_set_active_out(s->vo, 0);
+ }
+ if (c->type & STAT_EOL) {
+ pci_set_irq(&s->dev, 1);
+ }
+ }
+ if (CLEN_IS_FLAG(c)) {
+ c->stat |= STAT_FLAG;
+ c->stat |= STAT_PAUSED;
+ if (c->type & STAT_FLAG) {
+ pci_set_irq(&s->dev, 1);
+ }
+ }
+ if (CLEN_IS_STOP(c)) {
+ c->stat |= STAT_STOP;
+ c->stat |= STAT_PAUSED;
+ }
+ c->clen = 0;
+ stop = true;
+ }
+ }
+}
+
+static void open_voice_out(ViaAC97State *s)
+{
+ struct audsettings as = {
+ .freq = CODEC_REG(s, AC97_PCM_Front_DAC_Rate),
+ .nchannels = s->aur.type & BIT(4) ? 2 : 1,
+ .fmt = s->aur.type & BIT(5) ? AUDIO_FORMAT_S16 : AUDIO_FORMAT_S8,
+ .endianness = 0,
+ };
+ s->vo = AUD_open_out(&s->card, s->vo, "via-ac97.out", s, out_cb, &as);
+}
+
+static uint64_t sgd_read(void *opaque, hwaddr addr, unsigned size)
+{
+ ViaAC97State *s = opaque;
+ uint64_t val = 0;
+
+ switch (addr) {
+ case 0:
+ val = s->aur.stat;
+ if (s->aur.type & CNTL_START) {
+ val |= STAT_TRIG;
+ }
+ break;
+ case 1:
+ val = s->aur.stat & STAT_PAUSED ? BIT(3) : 0;
+ break;
+ case 2:
+ val = s->aur.type;
+ break;
+ case 4:
+ val = s->aur.curr;
+ break;
+ case 0xc:
+ val = CLEN_LEN(&s->aur);
+ break;
+ case 0x10:
+ /* silence unimplemented log message that happens at every IRQ */
+ break;
+ case 0x80:
+ val = s->ac97_cmd;
+ break;
+ case 0x84:
+ val = s->aur.stat & STAT_FLAG;
+ if (s->aur.stat & STAT_EOL) {
+ val |= BIT(4);
+ }
+ if (s->aur.stat & STAT_STOP) {
+ val |= BIT(8);
+ }
+ if (s->aur.stat & STAT_ACTIVE) {
+ val |= BIT(12);
+ }
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "via-ac97: Unimplemented register read 0x%"
+ HWADDR_PRIx"\n", addr);
+ }
+ trace_via_ac97_sgd_read(addr, size, val);
+ return val;
+}
+
+static void sgd_write(void *opaque, hwaddr addr, uint64_t val, unsigned size)
+{
+ ViaAC97State *s = opaque;
+
+ trace_via_ac97_sgd_write(addr, size, val);
+ switch (addr) {
+ case 0:
+ if (val & STAT_STOP) {
+ s->aur.stat &= ~STAT_PAUSED;
+ }
+ if (val & STAT_EOL) {
+ s->aur.stat &= ~(STAT_EOL | STAT_PAUSED);
+ if (s->aur.type & STAT_EOL) {
+ pci_set_irq(&s->dev, 0);
+ }
+ }
+ if (val & STAT_FLAG) {
+ s->aur.stat &= ~(STAT_FLAG | STAT_PAUSED);
+ if (s->aur.type & STAT_FLAG) {
+ pci_set_irq(&s->dev, 0);
+ }
+ }
+ break;
+ case 1:
+ if (val & CNTL_START) {
+ AUD_set_active_out(s->vo, 1);
+ s->aur.stat = STAT_ACTIVE;
+ }
+ if (val & CNTL_TERM) {
+ AUD_set_active_out(s->vo, 0);
+ s->aur.stat &= ~(STAT_ACTIVE | STAT_PAUSED);
+ s->aur.clen = 0;
+ }
+ if (val & CNTL_PAUSE) {
+ AUD_set_active_out(s->vo, 0);
+ s->aur.stat &= ~STAT_ACTIVE;
+ s->aur.stat |= STAT_PAUSED;
+ } else if (!(val & CNTL_PAUSE) && (s->aur.stat & STAT_PAUSED)) {
+ AUD_set_active_out(s->vo, 1);
+ s->aur.stat |= STAT_ACTIVE;
+ s->aur.stat &= ~STAT_PAUSED;
+ }
+ break;
+ case 2:
+ {
+ uint32_t oldval = s->aur.type;
+ s->aur.type = val;
+ if ((oldval & 0x30) != (val & 0x30)) {
+ open_voice_out(s);
+ }
+ break;
+ }
+ case 4:
+ s->aur.base = val & ~1ULL;
+ s->aur.curr = s->aur.base;
+ break;
+ case 0x80:
+ if (val >> 30) {
+ /* we only have primary codec */
+ break;
+ }
+ if (val & BIT(23)) { /* read reg */
+ s->ac97_cmd = val & 0xc0ff0000ULL;
+ s->ac97_cmd |= codec_read(s, (val >> 16) & 0x7f);
+ s->ac97_cmd |= BIT(25); /* data valid */
+ } else {
+ s->ac97_cmd = val & 0xc0ffffffULL;
+ codec_write(s, (val >> 16) & 0x7f, val);
+ }
+ break;
+ case 0xc:
+ case 0x84:
+ /* Read only */
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "via-ac97: Unimplemented register write 0x%"
+ HWADDR_PRIx"\n", addr);
+ }
+}
+
+static const MemoryRegionOps sgd_ops = {
+ .read = sgd_read,
+ .write = sgd_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static uint64_t fm_read(void *opaque, hwaddr addr, unsigned size)
+{
+ qemu_log_mask(LOG_UNIMP, "%s: 0x%"HWADDR_PRIx" %d\n", __func__, addr, size);
+ return 0;
+}
+
+static void fm_write(void *opaque, hwaddr addr, uint64_t val, unsigned size)
+{
+ qemu_log_mask(LOG_UNIMP, "%s: 0x%"HWADDR_PRIx" %d <= 0x%"PRIX64"\n",
+ __func__, addr, size, val);
+}
+
+static const MemoryRegionOps fm_ops = {
+ .read = fm_read,
+ .write = fm_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static uint64_t midi_read(void *opaque, hwaddr addr, unsigned size)
+{
+ qemu_log_mask(LOG_UNIMP, "%s: 0x%"HWADDR_PRIx" %d\n", __func__, addr, size);
+ return 0;
+}
+
+static void midi_write(void *opaque, hwaddr addr, uint64_t val, unsigned size)
+{
+ qemu_log_mask(LOG_UNIMP, "%s: 0x%"HWADDR_PRIx" %d <= 0x%"PRIX64"\n",
+ __func__, addr, size, val);
+}
+
+static const MemoryRegionOps midi_ops = {
+ .read = midi_read,
+ .write = midi_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void via_ac97_reset(DeviceState *dev)
+{
+ ViaAC97State *s = VIA_AC97(dev);
+
+ codec_reset(s);
+}
static void via_ac97_realize(PCIDevice *pci_dev, Error **errp)
{
- pci_set_word(pci_dev->config + PCI_COMMAND,
- PCI_COMMAND_INVALIDATE | PCI_COMMAND_PARITY);
+ ViaAC97State *s = VIA_AC97(pci_dev);
+ Object *o = OBJECT(s);
+
+ /*
+ * Command register Bus Master bit is documented to be fixed at 0 but it's
+ * needed for PCI DMA to work in QEMU. The pegasos2 firmware writes 0 here
+ * and the AmigaOS driver writes 1 only enabling IO bit which works on
+ * real hardware. So set it here and fix it to 1 to allow DMA.
+ */
+ pci_set_word(pci_dev->config + PCI_COMMAND, PCI_COMMAND_MASTER);
+ pci_set_word(pci_dev->wmask + PCI_COMMAND, PCI_COMMAND_IO);
pci_set_word(pci_dev->config + PCI_STATUS,
PCI_STATUS_CAP_LIST | PCI_STATUS_DEVSEL_MEDIUM);
pci_set_long(pci_dev->config + PCI_INTERRUPT_PIN, 0x03);
+ pci_set_byte(pci_dev->config + 0x40, 1); /* codec ready */
+
+ memory_region_init_io(&s->sgd, o, &sgd_ops, s, "via-ac97.sgd", 256);
+ pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->sgd);
+ memory_region_init_io(&s->fm, o, &fm_ops, s, "via-ac97.fm", 4);
+ pci_register_bar(pci_dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &s->fm);
+ memory_region_init_io(&s->midi, o, &midi_ops, s, "via-ac97.midi", 4);
+ pci_register_bar(pci_dev, 2, PCI_BASE_ADDRESS_SPACE_IO, &s->midi);
+
+ AUD_register_card ("via-ac97", &s->card);
}
+static void via_ac97_exit(PCIDevice *dev)
+{
+ ViaAC97State *s = VIA_AC97(dev);
+
+ AUD_close_out(&s->card, s->vo);
+ AUD_remove_card(&s->card);
+}
+
+static Property via_ac97_properties[] = {
+ DEFINE_AUDIO_PROPERTIES(ViaAC97State, card),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
static void via_ac97_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
k->realize = via_ac97_realize;
+ k->exit = via_ac97_exit;
k->vendor_id = PCI_VENDOR_ID_VIA;
k->device_id = PCI_DEVICE_ID_VIA_AC97;
k->revision = 0x50;
k->class_id = PCI_CLASS_MULTIMEDIA_AUDIO;
+ device_class_set_props(dc, via_ac97_properties);
set_bit(DEVICE_CATEGORY_SOUND, dc->categories);
dc->desc = "VIA AC97";
+ dc->reset = via_ac97_reset;
/* Reason: Part of a south bridge chip */
dc->user_creatable = false;
}
@@ -41,7 +484,7 @@
static const TypeInfo via_ac97_info = {
.name = TYPE_VIA_AC97,
.parent = TYPE_PCI_DEVICE,
- .instance_size = sizeof(PCIDevice),
+ .instance_size = sizeof(ViaAC97State),
.class_init = via_ac97_class_init,
.interfaces = (InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
diff --git a/hw/block/block.c b/hw/block/block.c
index af0710e..9f52ee6 100644
--- a/hw/block/block.c
+++ b/hw/block/block.c
@@ -39,8 +39,7 @@
return ret;
}
if (!(ret & BDRV_BLOCK_ZERO)) {
- ret = bdrv_pread(bs->file, offset, bytes,
- (uint8_t *) buf + offset, 0);
+ ret = blk_pread(blk, offset, bytes, (uint8_t *) buf + offset, 0);
if (ret < 0) {
return ret;
}
diff --git a/hw/block/dataplane/meson.build b/hw/block/dataplane/meson.build
index 12c6a26..78d7ac1 100644
--- a/hw/block/dataplane/meson.build
+++ b/hw/block/dataplane/meson.build
@@ -1,2 +1,2 @@
specific_ss.add(when: 'CONFIG_VIRTIO_BLK', if_true: files('virtio-blk.c'))
-specific_ss.add(when: 'CONFIG_XEN', if_true: files('xen-block.c'))
+specific_ss.add(when: 'CONFIG_XEN_BUS', if_true: files('xen-block.c'))
diff --git a/hw/block/dataplane/xen-block.c b/hw/block/dataplane/xen-block.c
index 2785b9e..734da42 100644
--- a/hw/block/dataplane/xen-block.c
+++ b/hw/block/dataplane/xen-block.c
@@ -23,8 +23,9 @@
#include "qemu/main-loop.h"
#include "qemu/memalign.h"
#include "qapi/error.h"
-#include "hw/xen/xen_common.h"
+#include "hw/xen/xen.h"
#include "hw/block/xen_blkif.h"
+#include "hw/xen/interface/io/ring.h"
#include "sysemu/block-backend.h"
#include "sysemu/iothread.h"
#include "xen-block.h"
@@ -101,9 +102,9 @@
* re-use requests, allocate the memory once here. It will be freed
* xen_block_dataplane_destroy() when the request list is freed.
*/
- request->buf = qemu_memalign(XC_PAGE_SIZE,
+ request->buf = qemu_memalign(XEN_PAGE_SIZE,
BLKIF_MAX_SEGMENTS_PER_REQUEST *
- XC_PAGE_SIZE);
+ XEN_PAGE_SIZE);
dataplane->requests_total++;
qemu_iovec_init(&request->v, 1);
} else {
@@ -185,7 +186,7 @@
goto err;
}
if (request->req.seg[i].last_sect * dataplane->sector_size >=
- XC_PAGE_SIZE) {
+ XEN_PAGE_SIZE) {
error_report("error: page crossing");
goto err;
}
@@ -705,6 +706,7 @@
Error *local_err = NULL;
xen_device_unmap_grant_refs(xendev, dataplane->sring,
+ dataplane->ring_ref,
dataplane->nr_ring_ref, &local_err);
dataplane->sring = NULL;
@@ -739,7 +741,7 @@
dataplane->protocol = protocol;
- ring_size = XC_PAGE_SIZE * dataplane->nr_ring_ref;
+ ring_size = XEN_PAGE_SIZE * dataplane->nr_ring_ref;
switch (dataplane->protocol) {
case BLKIF_PROTOCOL_NATIVE:
{
diff --git a/hw/block/m25p80.c b/hw/block/m25p80.c
index 802d2eb..dc5ffbc 100644
--- a/hw/block/m25p80.c
+++ b/hw/block/m25p80.c
@@ -24,6 +24,7 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "sysemu/block-backend.h"
+#include "hw/block/block.h"
#include "hw/qdev-properties.h"
#include "hw/qdev-properties-system.h"
#include "hw/ssi/ssi.h"
@@ -1615,8 +1616,7 @@
trace_m25p80_binding(s);
s->storage = blk_blockalign(s->blk, s->size);
- if (blk_pread(s->blk, 0, s->size, s->storage, 0) < 0) {
- error_setg(errp, "failed to read the initial flash content");
+ if (!blk_check_size_and_read_all(s->blk, s->storage, s->size, errp)) {
return;
}
} else {
diff --git a/hw/block/meson.build b/hw/block/meson.build
index b434d56..cc2a75c 100644
--- a/hw/block/meson.build
+++ b/hw/block/meson.build
@@ -14,7 +14,7 @@
softmmu_ss.add(when: 'CONFIG_SSI_M25P80', if_true: files('m25p80.c'))
softmmu_ss.add(when: 'CONFIG_SSI_M25P80', if_true: files('m25p80_sfdp.c'))
softmmu_ss.add(when: 'CONFIG_SWIM', if_true: files('swim.c'))
-softmmu_ss.add(when: 'CONFIG_XEN', if_true: files('xen-block.c'))
+softmmu_ss.add(when: 'CONFIG_XEN_BUS', if_true: files('xen-block.c'))
softmmu_ss.add(when: 'CONFIG_TC58128', if_true: files('tc58128.c'))
specific_ss.add(when: 'CONFIG_VIRTIO_BLK', if_true: files('virtio-blk.c', 'virtio-blk-common.c'))
diff --git a/hw/block/xen-block.c b/hw/block/xen-block.c
index 345b284..f5a7445 100644
--- a/hw/block/xen-block.c
+++ b/hw/block/xen-block.c
@@ -19,7 +19,6 @@
#include "qapi/qmp/qdict.h"
#include "qapi/qmp/qstring.h"
#include "qom/object_interfaces.h"
-#include "hw/xen/xen_common.h"
#include "hw/block/xen_blkif.h"
#include "hw/qdev-properties.h"
#include "hw/xen/xen-block.h"
@@ -84,7 +83,8 @@
g_free(ring_ref);
return;
}
- } else if (order <= blockdev->props.max_ring_page_order) {
+ } else if (qemu_xen_gnttab_can_map_multi() &&
+ order <= blockdev->props.max_ring_page_order) {
unsigned int i;
nr_ring_ref = 1 << order;
@@ -256,8 +256,12 @@
}
xen_device_backend_printf(xendev, "feature-flush-cache", "%u", 1);
- xen_device_backend_printf(xendev, "max-ring-page-order", "%u",
- blockdev->props.max_ring_page_order);
+
+ if (qemu_xen_gnttab_can_map_multi()) {
+ xen_device_backend_printf(xendev, "max-ring-page-order", "%u",
+ blockdev->props.max_ring_page_order);
+ }
+
xen_device_backend_printf(xendev, "info", "%u", blockdev->info);
xen_device_frontend_printf(xendev, "virtual-device", "%lu",
diff --git a/hw/char/cadence_uart.c b/hw/char/cadence_uart.c
index c069a30..807e398 100644
--- a/hw/char/cadence_uart.c
+++ b/hw/char/cadence_uart.c
@@ -450,13 +450,15 @@
}
break;
case R_BRGR: /* Baud rate generator */
+ value &= 0xffff;
if (value >= 0x01) {
- s->r[offset] = value & 0xFFFF;
+ s->r[offset] = value;
}
break;
case R_BDIV: /* Baud rate divider */
+ value &= 0xff;
if (value >= 0x04) {
- s->r[offset] = value & 0xFF;
+ s->r[offset] = value;
}
break;
default:
diff --git a/hw/char/meson.build b/hw/char/meson.build
index 7b594f5..e02c60d 100644
--- a/hw/char/meson.build
+++ b/hw/char/meson.build
@@ -18,7 +18,7 @@
softmmu_ss.add(when: 'CONFIG_SERIAL_PCI_MULTI', if_true: files('serial-pci-multi.c'))
softmmu_ss.add(when: 'CONFIG_SHAKTI_UART', if_true: files('shakti_uart.c'))
softmmu_ss.add(when: 'CONFIG_VIRTIO_SERIAL', if_true: files('virtio-console.c'))
-softmmu_ss.add(when: 'CONFIG_XEN', if_true: files('xen_console.c'))
+softmmu_ss.add(when: 'CONFIG_XEN_BUS', if_true: files('xen_console.c'))
softmmu_ss.add(when: 'CONFIG_XILINX', if_true: files('xilinx_uartlite.c'))
softmmu_ss.add(when: 'CONFIG_AVR_USART', if_true: files('avr_usart.c'))
diff --git a/hw/char/parallel.c b/hw/char/parallel.c
index 1c9ca47..af551e7 100644
--- a/hw/char/parallel.c
+++ b/hw/char/parallel.c
@@ -57,22 +57,22 @@
/*
* These are the definitions for the Printer Status Register
*/
-#define PARA_STS_BUSY 0x80 /* Busy complement */
-#define PARA_STS_ACK 0x40 /* Acknowledge */
-#define PARA_STS_PAPER 0x20 /* Out of paper */
-#define PARA_STS_ONLINE 0x10 /* Online */
-#define PARA_STS_ERROR 0x08 /* Error complement */
-#define PARA_STS_TMOUT 0x01 /* EPP timeout */
+#define PARA_STS_BUSY 0x80 /* Busy complement */
+#define PARA_STS_ACK 0x40 /* Acknowledge */
+#define PARA_STS_PAPER 0x20 /* Out of paper */
+#define PARA_STS_ONLINE 0x10 /* Online */
+#define PARA_STS_ERROR 0x08 /* Error complement */
+#define PARA_STS_TMOUT 0x01 /* EPP timeout */
/*
* These are the definitions for the Printer Control Register
*/
-#define PARA_CTR_DIR 0x20 /* Direction (1=read, 0=write) */
-#define PARA_CTR_INTEN 0x10 /* IRQ Enable */
-#define PARA_CTR_SELECT 0x08 /* Select In complement */
-#define PARA_CTR_INIT 0x04 /* Initialize Printer complement */
-#define PARA_CTR_AUTOLF 0x02 /* Auto linefeed complement */
-#define PARA_CTR_STROBE 0x01 /* Strobe complement */
+#define PARA_CTR_DIR 0x20 /* Direction (1=read, 0=write) */
+#define PARA_CTR_INTEN 0x10 /* IRQ Enable */
+#define PARA_CTR_SELECT 0x08 /* Select In complement */
+#define PARA_CTR_INIT 0x04 /* Initialize Printer complement */
+#define PARA_CTR_AUTOLF 0x02 /* Auto linefeed complement */
+#define PARA_CTR_STROBE 0x01 /* Strobe complement */
#define PARA_CTR_SIGNAL (PARA_CTR_SELECT|PARA_CTR_INIT|PARA_CTR_AUTOLF|PARA_CTR_STROBE)
diff --git a/hw/char/serial.c b/hw/char/serial.c
index 41b5e61..270e1b1 100644
--- a/hw/char/serial.c
+++ b/hw/char/serial.c
@@ -38,20 +38,20 @@
#include "hw/qdev-properties.h"
#include "hw/qdev-properties-system.h"
-#define UART_LCR_DLAB 0x80 /* Divisor latch access bit */
+#define UART_LCR_DLAB 0x80 /* Divisor latch access bit */
-#define UART_IER_MSI 0x08 /* Enable Modem status interrupt */
-#define UART_IER_RLSI 0x04 /* Enable receiver line status interrupt */
-#define UART_IER_THRI 0x02 /* Enable Transmitter holding register int. */
-#define UART_IER_RDI 0x01 /* Enable receiver data interrupt */
+#define UART_IER_MSI 0x08 /* Enable Modem status interrupt */
+#define UART_IER_RLSI 0x04 /* Enable receiver line status interrupt */
+#define UART_IER_THRI 0x02 /* Enable Transmitter holding register int. */
+#define UART_IER_RDI 0x01 /* Enable receiver data interrupt */
-#define UART_IIR_NO_INT 0x01 /* No interrupts pending */
-#define UART_IIR_ID 0x06 /* Mask for the interrupt ID */
+#define UART_IIR_NO_INT 0x01 /* No interrupts pending */
+#define UART_IIR_ID 0x06 /* Mask for the interrupt ID */
-#define UART_IIR_MSI 0x00 /* Modem status interrupt */
-#define UART_IIR_THRI 0x02 /* Transmitter holding register empty */
-#define UART_IIR_RDI 0x04 /* Receiver data interrupt */
-#define UART_IIR_RLSI 0x06 /* Receiver line status interrupt */
+#define UART_IIR_MSI 0x00 /* Modem status interrupt */
+#define UART_IIR_THRI 0x02 /* Transmitter holding register empty */
+#define UART_IIR_RDI 0x04 /* Receiver data interrupt */
+#define UART_IIR_RLSI 0x06 /* Receiver line status interrupt */
#define UART_IIR_CTI 0x0C /* Character Timeout Indication */
#define UART_IIR_FENF 0x80 /* Fifo enabled, but not functionning */
@@ -60,33 +60,33 @@
/*
* These are the definitions for the Modem Control Register
*/
-#define UART_MCR_LOOP 0x10 /* Enable loopback test mode */
-#define UART_MCR_OUT2 0x08 /* Out2 complement */
-#define UART_MCR_OUT1 0x04 /* Out1 complement */
-#define UART_MCR_RTS 0x02 /* RTS complement */
-#define UART_MCR_DTR 0x01 /* DTR complement */
+#define UART_MCR_LOOP 0x10 /* Enable loopback test mode */
+#define UART_MCR_OUT2 0x08 /* Out2 complement */
+#define UART_MCR_OUT1 0x04 /* Out1 complement */
+#define UART_MCR_RTS 0x02 /* RTS complement */
+#define UART_MCR_DTR 0x01 /* DTR complement */
/*
* These are the definitions for the Modem Status Register
*/
-#define UART_MSR_DCD 0x80 /* Data Carrier Detect */
-#define UART_MSR_RI 0x40 /* Ring Indicator */
-#define UART_MSR_DSR 0x20 /* Data Set Ready */
-#define UART_MSR_CTS 0x10 /* Clear to Send */
-#define UART_MSR_DDCD 0x08 /* Delta DCD */
-#define UART_MSR_TERI 0x04 /* Trailing edge ring indicator */
-#define UART_MSR_DDSR 0x02 /* Delta DSR */
-#define UART_MSR_DCTS 0x01 /* Delta CTS */
-#define UART_MSR_ANY_DELTA 0x0F /* Any of the delta bits! */
+#define UART_MSR_DCD 0x80 /* Data Carrier Detect */
+#define UART_MSR_RI 0x40 /* Ring Indicator */
+#define UART_MSR_DSR 0x20 /* Data Set Ready */
+#define UART_MSR_CTS 0x10 /* Clear to Send */
+#define UART_MSR_DDCD 0x08 /* Delta DCD */
+#define UART_MSR_TERI 0x04 /* Trailing edge ring indicator */
+#define UART_MSR_DDSR 0x02 /* Delta DSR */
+#define UART_MSR_DCTS 0x01 /* Delta CTS */
+#define UART_MSR_ANY_DELTA 0x0F /* Any of the delta bits! */
-#define UART_LSR_TEMT 0x40 /* Transmitter empty */
-#define UART_LSR_THRE 0x20 /* Transmit-hold-register empty */
-#define UART_LSR_BI 0x10 /* Break interrupt indicator */
-#define UART_LSR_FE 0x08 /* Frame error indicator */
-#define UART_LSR_PE 0x04 /* Parity error indicator */
-#define UART_LSR_OE 0x02 /* Overrun error indicator */
-#define UART_LSR_DR 0x01 /* Receiver data ready */
-#define UART_LSR_INT_ANY 0x1E /* Any of the lsr-interrupt-triggering status bits */
+#define UART_LSR_TEMT 0x40 /* Transmitter empty */
+#define UART_LSR_THRE 0x20 /* Transmit-hold-register empty */
+#define UART_LSR_BI 0x10 /* Break interrupt indicator */
+#define UART_LSR_FE 0x08 /* Frame error indicator */
+#define UART_LSR_PE 0x04 /* Parity error indicator */
+#define UART_LSR_OE 0x02 /* Overrun error indicator */
+#define UART_LSR_DR 0x01 /* Receiver data ready */
+#define UART_LSR_INT_ANY 0x1E /* Any of the lsr-interrupt-triggering status bits */
/* Interrupt trigger levels. The byte-counts are for 16550A - in newer UARTs the byte-count for each ITL is higher. */
diff --git a/hw/char/xen_console.c b/hw/char/xen_console.c
index 63153df..c7a19c0 100644
--- a/hw/char/xen_console.c
+++ b/hw/char/xen_console.c
@@ -173,6 +173,48 @@
/* -------------------------------------------------------------------- */
+static int store_con_info(struct XenConsole *con)
+{
+ Chardev *cs = qemu_chr_fe_get_driver(&con->chr);
+ char *pts = NULL;
+ char *dom_path;
+ GString *path;
+ int ret = -1;
+
+ /* Only continue if we're talking to a pty. */
+ if (!CHARDEV_IS_PTY(cs)) {
+ return 0;
+ }
+ pts = cs->filename + 4;
+
+ dom_path = qemu_xen_xs_get_domain_path(xenstore, xen_domid);
+ if (!dom_path) {
+ return 0;
+ }
+
+ path = g_string_new(dom_path);
+ free(dom_path);
+
+ if (con->xendev.dev) {
+ g_string_append_printf(path, "/device/console/%d", con->xendev.dev);
+ } else {
+ g_string_append(path, "/console");
+ }
+ g_string_append(path, "/tty");
+
+ if (xenstore_write_str(con->console, path->str, pts)) {
+ fprintf(stderr, "xenstore_write_str for '%s' fail", path->str);
+ goto out;
+ }
+ ret = 0;
+
+out:
+ g_string_free(path, true);
+ free(path);
+
+ return ret;
+}
+
static int con_init(struct XenLegacyDevice *xendev)
{
struct XenConsole *con = container_of(xendev, struct XenConsole, xendev);
@@ -181,7 +223,7 @@
const char *output;
/* setup */
- dom = xs_get_domain_path(xenstore, con->xendev.dom);
+ dom = qemu_xen_xs_get_domain_path(xenstore, con->xendev.dom);
if (!xendev->dev) {
snprintf(con->console, sizeof(con->console), "%s/console", dom);
} else {
@@ -215,8 +257,7 @@
&error_abort);
}
- xenstore_store_pv_console_info(con->xendev.dev,
- qemu_chr_fe_get_driver(&con->chr));
+ store_con_info(con);
out:
g_free(type);
@@ -237,9 +278,9 @@
if (!xendev->dev) {
xen_pfn_t mfn = con->ring_ref;
- con->sring = xenforeignmemory_map(xen_fmem, con->xendev.dom,
- PROT_READ | PROT_WRITE,
- 1, &mfn, NULL);
+ con->sring = qemu_xen_foreignmem_map(con->xendev.dom, NULL,
+ PROT_READ | PROT_WRITE,
+ 1, &mfn, NULL);
} else {
con->sring = xen_be_map_grant_ref(xendev, con->ring_ref,
PROT_READ | PROT_WRITE);
@@ -269,9 +310,9 @@
if (con->sring) {
if (!xendev->dev) {
- xenforeignmemory_unmap(xen_fmem, con->sring, 1);
+ qemu_xen_foreignmem_unmap(con->sring, 1);
} else {
- xen_be_unmap_grant_ref(xendev, con->sring);
+ xen_be_unmap_grant_ref(xendev, con->sring, con->ring_ref);
}
con->sring = NULL;
}
diff --git a/hw/core/loader.c b/hw/core/loader.c
index cd53235..8b7fd9e 100644
--- a/hw/core/loader.c
+++ b/hw/core/loader.c
@@ -35,7 +35,7 @@
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
@@ -44,6 +44,7 @@
#include "qemu/osdep.h"
#include "qemu/datadir.h"
+#include "qemu/error-report.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-machine.h"
#include "qapi/type-helpers.h"
@@ -210,8 +211,8 @@
#define ZMAGIC 0413
#define QMAGIC 0314
#define _N_HDROFF(x) (1024 - sizeof (struct exec))
-#define N_TXTOFF(x) \
- (N_MAGIC(x) == ZMAGIC ? _N_HDROFF((x)) + sizeof (struct exec) : \
+#define N_TXTOFF(x) \
+ (N_MAGIC(x) == ZMAGIC ? _N_HDROFF((x)) + sizeof (struct exec) : \
(N_MAGIC(x) == QMAGIC ? 0 : sizeof (struct exec)))
#define N_TXTADDR(x, target_page_size) (N_MAGIC(x) == QMAGIC ? target_page_size : 0)
#define _N_SEGMENT_ROUND(x, target_page_size) (((x) + target_page_size - 1) & ~(target_page_size - 1))
@@ -300,10 +301,10 @@
#define ELF_CLASS ELFCLASS32
#include "elf.h"
-#define SZ 32
+#define SZ 32
#define elf_word uint32_t
-#define elf_sword int32_t
-#define bswapSZs bswap32s
+#define elf_sword int32_t
+#define bswapSZs bswap32s
#include "hw/elf_ops.h"
#undef elfhdr
@@ -316,16 +317,16 @@
#undef elf_sword
#undef bswapSZs
#undef SZ
-#define elfhdr elf64_hdr
-#define elf_phdr elf64_phdr
-#define elf_note elf64_note
-#define elf_shdr elf64_shdr
-#define elf_sym elf64_sym
+#define elfhdr elf64_hdr
+#define elf_phdr elf64_phdr
+#define elf_note elf64_note
+#define elf_shdr elf64_shdr
+#define elf_sym elf64_sym
#define elf_rela elf64_rela
#define elf_word uint64_t
-#define elf_sword int64_t
-#define bswapSZs bswap64s
-#define SZ 64
+#define elf_sword int64_t
+#define bswapSZs bswap64s
+#define SZ 64
#include "hw/elf_ops.h"
const char *load_elf_strerror(ssize_t error)
@@ -527,7 +528,7 @@
}
-#define ZALLOC_ALIGNMENT 16
+#define ZALLOC_ALIGNMENT 16
static void *zalloc(void *x, unsigned items, unsigned size)
{
@@ -547,13 +548,13 @@
}
-#define HEAD_CRC 2
-#define EXTRA_FIELD 4
-#define ORIG_NAME 8
-#define COMMENT 0x10
-#define RESERVED 0xe0
+#define HEAD_CRC 2
+#define EXTRA_FIELD 4
+#define ORIG_NAME 8
+#define COMMENT 0x10
+#define RESERVED 0xe0
-#define DEFLATED 8
+#define DEFLATED 8
ssize_t gunzip(void *dst, size_t dstlen, uint8_t *src, size_t srclen)
{
diff --git a/hw/core/machine-smp.c b/hw/core/machine-smp.c
index c3dab00..89fe0cd 100644
--- a/hw/core/machine-smp.c
+++ b/hw/core/machine-smp.c
@@ -20,6 +20,8 @@
#include "qemu/osdep.h"
#include "hw/boards.h"
#include "qapi/error.h"
+#include "qemu/error-report.h"
+
/*
* Report information of a machine's supported CPU topology hierarchy.
diff --git a/hw/core/machine.c b/hw/core/machine.c
index 1cf6822..45e3d24 100644
--- a/hw/core/machine.c
+++ b/hw/core/machine.c
@@ -40,6 +40,7 @@
#include "hw/virtio/virtio-pci.h"
GlobalProperty hw_compat_7_2[] = {
+ { "e1000e", "migrate-timadj", "off" },
{ "virtio-mem", "x-early-migration", "false" },
};
const size_t hw_compat_7_2_len = G_N_ELEMENTS(hw_compat_7_2);
diff --git a/hw/cxl/cxl-component-utils.c b/hw/cxl/cxl-component-utils.c
index 3edd303..b665d4f 100644
--- a/hw/cxl/cxl-component-utils.c
+++ b/hw/cxl/cxl-component-utils.c
@@ -141,17 +141,19 @@
* Error status is RW1C but given bits are not yet set, it can
* be handled as RO.
*/
- reg_state[R_CXL_RAS_UNC_ERR_STATUS] = 0;
+ stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_STATUS, 0);
+ stl_le_p(write_msk + R_CXL_RAS_UNC_ERR_STATUS, 0x1cfff);
/* Bits 12-13 and 17-31 reserved in CXL 2.0 */
- reg_state[R_CXL_RAS_UNC_ERR_MASK] = 0x1cfff;
- write_msk[R_CXL_RAS_UNC_ERR_MASK] = 0x1cfff;
- reg_state[R_CXL_RAS_UNC_ERR_SEVERITY] = 0x1cfff;
- write_msk[R_CXL_RAS_UNC_ERR_SEVERITY] = 0x1cfff;
- reg_state[R_CXL_RAS_COR_ERR_STATUS] = 0;
- reg_state[R_CXL_RAS_COR_ERR_MASK] = 0x7f;
- write_msk[R_CXL_RAS_COR_ERR_MASK] = 0x7f;
+ stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_MASK, 0x1cfff);
+ stl_le_p(write_msk + R_CXL_RAS_UNC_ERR_MASK, 0x1cfff);
+ stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_SEVERITY, 0x1cfff);
+ stl_le_p(write_msk + R_CXL_RAS_UNC_ERR_SEVERITY, 0x1cfff);
+ stl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS, 0);
+ stl_le_p(write_msk + R_CXL_RAS_COR_ERR_STATUS, 0x7f);
+ stl_le_p(reg_state + R_CXL_RAS_COR_ERR_MASK, 0x7f);
+ stl_le_p(write_msk + R_CXL_RAS_COR_ERR_MASK, 0x7f);
/* CXL switches and devices must set */
- reg_state[R_CXL_RAS_ERR_CAP_CTRL] = 0x00;
+ stl_le_p(reg_state + R_CXL_RAS_ERR_CAP_CTRL, 0x200);
}
static void hdm_init_common(uint32_t *reg_state, uint32_t *write_msk,
diff --git a/hw/cxl/cxl-host.c b/hw/cxl/cxl-host.c
index 3c1ec87..6e923ce 100644
--- a/hw/cxl/cxl-host.c
+++ b/hw/cxl/cxl-host.c
@@ -146,21 +146,28 @@
return NULL;
}
- hb_cstate = cxl_get_hb_cstate(hb);
- if (!hb_cstate) {
- return NULL;
- }
+ if (cxl_get_hb_passthrough(hb)) {
+ rp = pcie_find_port_first(hb->bus);
+ if (!rp) {
+ return NULL;
+ }
+ } else {
+ hb_cstate = cxl_get_hb_cstate(hb);
+ if (!hb_cstate) {
+ return NULL;
+ }
- cache_mem = hb_cstate->crb.cache_mem_registers;
+ cache_mem = hb_cstate->crb.cache_mem_registers;
- target_found = cxl_hdm_find_target(cache_mem, addr, &target);
- if (!target_found) {
- return NULL;
- }
+ target_found = cxl_hdm_find_target(cache_mem, addr, &target);
+ if (!target_found) {
+ return NULL;
+ }
- rp = pcie_find_port_by_pn(hb->bus, target);
- if (!rp) {
- return NULL;
+ rp = pcie_find_port_by_pn(hb->bus, target);
+ if (!rp) {
+ return NULL;
+ }
}
d = pci_bridge_get_sec_bus(PCI_BRIDGE(rp))->devices[0];
diff --git a/hw/display/meson.build b/hw/display/meson.build
index f470179..4191694 100644
--- a/hw/display/meson.build
+++ b/hw/display/meson.build
@@ -14,7 +14,7 @@
softmmu_ss.add(when: 'CONFIG_SII9022', if_true: files('sii9022.c'))
softmmu_ss.add(when: 'CONFIG_SSD0303', if_true: files('ssd0303.c'))
softmmu_ss.add(when: 'CONFIG_SSD0323', if_true: files('ssd0323.c'))
-softmmu_ss.add(when: 'CONFIG_XEN', if_true: files('xenfb.c'))
+softmmu_ss.add(when: 'CONFIG_XEN_BUS', if_true: files('xenfb.c'))
softmmu_ss.add(when: 'CONFIG_VGA_PCI', if_true: files('vga-pci.c'))
softmmu_ss.add(when: 'CONFIG_VGA_ISA', if_true: files('vga-isa.c'))
diff --git a/hw/display/qxl-render.c b/hw/display/qxl-render.c
index fcfd40c..ec99ec8 100644
--- a/hw/display/qxl-render.c
+++ b/hw/display/qxl-render.c
@@ -290,7 +290,7 @@
return c;
fail:
- cursor_put(c);
+ cursor_unref(c);
return NULL;
}
@@ -336,7 +336,7 @@
}
qemu_mutex_lock(&qxl->ssd.lock);
if (qxl->ssd.cursor) {
- cursor_put(qxl->ssd.cursor);
+ cursor_unref(qxl->ssd.cursor);
}
qxl->ssd.cursor = c;
qxl->ssd.mouse_x = cmd->u.set.position.x;
diff --git a/hw/display/qxl.c b/hw/display/qxl.c
index ec712d3..80ce1e9 100644
--- a/hw/display/qxl.c
+++ b/hw/display/qxl.c
@@ -299,7 +299,7 @@
qxl->guest_cursor = 0;
qemu_mutex_unlock(&qxl->track_lock);
if (qxl->ssd.cursor) {
- cursor_put(qxl->ssd.cursor);
+ cursor_unref(qxl->ssd.cursor);
}
qxl->ssd.cursor = cursor_builtin_hidden();
}
diff --git a/hw/display/sm501.c b/hw/display/sm501.c
index 1783515..dbabbc4 100644
--- a/hw/display/sm501.c
+++ b/hw/display/sm501.c
@@ -465,6 +465,7 @@
uint32_t last_width;
uint32_t last_height;
bool do_full_update; /* perform a full update next time */
+ uint8_t use_pixman;
I2CBus *i2c_bus;
/* mmio registers */
@@ -827,7 +828,7 @@
de = db + (width + (height - 1) * dst_pitch) * bypp;
overlap = (db < se && sb < de);
}
- if (overlap) {
+ if (overlap && (s->use_pixman & BIT(2))) {
/* pixman can't do reverse blit: copy via temporary */
int tmp_stride = DIV_ROUND_UP(width * bypp, sizeof(uint32_t));
uint32_t *tmp = tmp_buf;
@@ -852,13 +853,15 @@
if (tmp != tmp_buf) {
g_free(tmp);
}
- } else {
+ } else if (!overlap && (s->use_pixman & BIT(1))) {
fallback = !pixman_blt((uint32_t *)&s->local_mem[src_base],
(uint32_t *)&s->local_mem[dst_base],
src_pitch * bypp / sizeof(uint32_t),
dst_pitch * bypp / sizeof(uint32_t),
8 * bypp, 8 * bypp, src_x, src_y,
dst_x, dst_y, width, height);
+ } else {
+ fallback = true;
}
if (fallback) {
uint8_t *sp = s->local_mem + src_base;
@@ -891,7 +894,7 @@
color = cpu_to_le16(color);
}
- if ((width == 1 && height == 1) ||
+ if (!(s->use_pixman & BIT(0)) || (width == 1 && height == 1) ||
!pixman_fill((uint32_t *)&s->local_mem[dst_base],
dst_pitch * bypp / sizeof(uint32_t), 8 * bypp,
dst_x, dst_y, width, height, color)) {
@@ -2035,6 +2038,7 @@
static Property sm501_sysbus_properties[] = {
DEFINE_PROP_UINT32("vram-size", SM501SysBusState, vram_size, 0),
+ DEFINE_PROP_UINT8("x-pixman", SM501SysBusState, state.use_pixman, 7),
DEFINE_PROP_END_OF_LIST(),
};
@@ -2122,6 +2126,7 @@
static Property sm501_pci_properties[] = {
DEFINE_PROP_UINT32("vram-size", SM501PCIState, vram_size, 64 * MiB),
+ DEFINE_PROP_UINT8("x-pixman", SM501PCIState, state.use_pixman, 7),
DEFINE_PROP_END_OF_LIST(),
};
@@ -2162,11 +2167,18 @@
dc->vmsd = &vmstate_sm501_pci;
}
+static void sm501_pci_init(Object *o)
+{
+ object_property_set_description(o, "x-pixman", "Use pixman for: "
+ "1: fill, 2: blit, 4: overlap blit");
+}
+
static const TypeInfo sm501_pci_info = {
.name = TYPE_PCI_SM501,
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(SM501PCIState),
.class_init = sm501_pci_class_init,
+ .instance_init = sm501_pci_init,
.interfaces = (InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
diff --git a/hw/display/vmware_vga.c b/hw/display/vmware_vga.c
index 59ae7f7..09591fb 100644
--- a/hw/display/vmware_vga.c
+++ b/hw/display/vmware_vga.c
@@ -550,12 +550,12 @@
default:
fprintf(stderr, "%s: unhandled bpp %d, using fallback cursor\n",
__func__, c->bpp);
- cursor_put(qc);
+ cursor_unref(qc);
qc = cursor_builtin_left_ptr();
}
dpy_cursor_define(s->vga.con, qc);
- cursor_put(qc);
+ cursor_unref(qc);
}
#endif
diff --git a/hw/display/xenfb.c b/hw/display/xenfb.c
index 260eb38..0074a9b 100644
--- a/hw/display/xenfb.c
+++ b/hw/display/xenfb.c
@@ -98,8 +98,9 @@
if (xenstore_read_fe_int(&c->xendev, "event-channel", &c->xendev.remote_port) == -1)
return -1;
- c->page = xenforeignmemory_map(xen_fmem, c->xendev.dom,
- PROT_READ | PROT_WRITE, 1, &mfn, NULL);
+ c->page = qemu_xen_foreignmem_map(c->xendev.dom, NULL,
+ PROT_READ | PROT_WRITE, 1, &mfn,
+ NULL);
if (c->page == NULL)
return -1;
@@ -115,7 +116,7 @@
{
xen_pv_unbind_evtchn(&c->xendev);
if (c->page) {
- xenforeignmemory_unmap(xen_fmem, c->page, 1);
+ qemu_xen_foreignmem_unmap(c->page, 1);
c->page = NULL;
}
}
@@ -488,27 +489,28 @@
}
if (xenfb->pixels) {
- munmap(xenfb->pixels, xenfb->fbpages * XC_PAGE_SIZE);
+ munmap(xenfb->pixels, xenfb->fbpages * XEN_PAGE_SIZE);
xenfb->pixels = NULL;
}
- xenfb->fbpages = DIV_ROUND_UP(xenfb->fb_len, XC_PAGE_SIZE);
+ xenfb->fbpages = DIV_ROUND_UP(xenfb->fb_len, XEN_PAGE_SIZE);
n_fbdirs = xenfb->fbpages * mode / 8;
- n_fbdirs = DIV_ROUND_UP(n_fbdirs, XC_PAGE_SIZE);
+ n_fbdirs = DIV_ROUND_UP(n_fbdirs, XEN_PAGE_SIZE);
pgmfns = g_new0(xen_pfn_t, n_fbdirs);
fbmfns = g_new0(xen_pfn_t, xenfb->fbpages);
xenfb_copy_mfns(mode, n_fbdirs, pgmfns, pd);
- map = xenforeignmemory_map(xen_fmem, xenfb->c.xendev.dom,
- PROT_READ, n_fbdirs, pgmfns, NULL);
+ map = qemu_xen_foreignmem_map(xenfb->c.xendev.dom, NULL, PROT_READ,
+ n_fbdirs, pgmfns, NULL);
if (map == NULL)
goto out;
xenfb_copy_mfns(mode, xenfb->fbpages, fbmfns, map);
- xenforeignmemory_unmap(xen_fmem, map, n_fbdirs);
+ qemu_xen_foreignmem_unmap(map, n_fbdirs);
- xenfb->pixels = xenforeignmemory_map(xen_fmem, xenfb->c.xendev.dom,
- PROT_READ, xenfb->fbpages, fbmfns, NULL);
+ xenfb->pixels = qemu_xen_foreignmem_map(xenfb->c.xendev.dom, NULL,
+ PROT_READ, xenfb->fbpages,
+ fbmfns, NULL);
if (xenfb->pixels == NULL)
goto out;
@@ -526,8 +528,8 @@
{
size_t mfn_sz = sizeof_field(struct xenfb_page, pd[0]);
size_t pd_len = sizeof_field(struct xenfb_page, pd) / mfn_sz;
- size_t fb_pages = pd_len * XC_PAGE_SIZE / mfn_sz;
- size_t fb_len_max = fb_pages * XC_PAGE_SIZE;
+ size_t fb_pages = pd_len * XEN_PAGE_SIZE / mfn_sz;
+ size_t fb_len_max = fb_pages * XEN_PAGE_SIZE;
int max_width, max_height;
if (fb_len_lim > fb_len_max) {
@@ -927,8 +929,8 @@
* Replacing the framebuffer with anonymous shared memory
* instead. This releases the guest pages and keeps qemu happy.
*/
- xenforeignmemory_unmap(xen_fmem, fb->pixels, fb->fbpages);
- fb->pixels = mmap(fb->pixels, fb->fbpages * XC_PAGE_SIZE,
+ qemu_xen_foreignmem_unmap(fb->pixels, fb->fbpages);
+ fb->pixels = mmap(fb->pixels, fb->fbpages * XEN_PAGE_SIZE,
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANON,
-1, 0);
if (fb->pixels == MAP_FAILED) {
diff --git a/hw/dma/etraxfs_dma.c b/hw/dma/etraxfs_dma.c
index 0fef00c..a1068b1 100644
--- a/hw/dma/etraxfs_dma.c
+++ b/hw/dma/etraxfs_dma.c
@@ -160,38 +160,38 @@
enum dma_ch_state
{
- RST = 1,
- STOPPED = 2,
- RUNNING = 4
+ RST = 1,
+ STOPPED = 2,
+ RUNNING = 4
};
struct fs_dma_channel
{
- qemu_irq irq;
- struct etraxfs_dma_client *client;
+ qemu_irq irq;
+ struct etraxfs_dma_client *client;
- /* Internal status. */
- int stream_cmd_src;
- enum dma_ch_state state;
+ /* Internal status. */
+ int stream_cmd_src;
+ enum dma_ch_state state;
- unsigned int input : 1;
- unsigned int eol : 1;
+ unsigned int input : 1;
+ unsigned int eol : 1;
- struct dma_descr_group current_g;
- struct dma_descr_context current_c;
- struct dma_descr_data current_d;
+ struct dma_descr_group current_g;
+ struct dma_descr_context current_c;
+ struct dma_descr_data current_d;
- /* Control registers. */
- uint32_t regs[DMA_REG_MAX];
+ /* Control registers. */
+ uint32_t regs[DMA_REG_MAX];
};
struct fs_dma_ctrl
{
- MemoryRegion mmio;
- int nr_channels;
- struct fs_dma_channel *channels;
+ MemoryRegion mmio;
+ int nr_channels;
+ struct fs_dma_channel *channels;
- QEMUBH *bh;
+ QEMUBH *bh;
};
static void DMA_run(void *opaque);
@@ -199,72 +199,72 @@
static inline uint32_t channel_reg(struct fs_dma_ctrl *ctrl, int c, int reg)
{
- return ctrl->channels[c].regs[reg];
+ return ctrl->channels[c].regs[reg];
}
static inline int channel_stopped(struct fs_dma_ctrl *ctrl, int c)
{
- return channel_reg(ctrl, c, RW_CFG) & 2;
+ return channel_reg(ctrl, c, RW_CFG) & 2;
}
static inline int channel_en(struct fs_dma_ctrl *ctrl, int c)
{
- return (channel_reg(ctrl, c, RW_CFG) & 1)
- && ctrl->channels[c].client;
+ return (channel_reg(ctrl, c, RW_CFG) & 1)
+ && ctrl->channels[c].client;
}
static inline int fs_channel(hwaddr addr)
{
- /* Every channel has a 0x2000 ctrl register map. */
- return addr >> 13;
+ /* Every channel has a 0x2000 ctrl register map. */
+ return addr >> 13;
}
#ifdef USE_THIS_DEAD_CODE
static void channel_load_g(struct fs_dma_ctrl *ctrl, int c)
{
- hwaddr addr = channel_reg(ctrl, c, RW_GROUP);
+ hwaddr addr = channel_reg(ctrl, c, RW_GROUP);
- /* Load and decode. FIXME: handle endianness. */
+ /* Load and decode. FIXME: handle endianness. */
cpu_physical_memory_read(addr, &ctrl->channels[c].current_g,
sizeof(ctrl->channels[c].current_g));
}
static void dump_c(int ch, struct dma_descr_context *c)
{
- printf("%s ch=%d\n", __func__, ch);
- printf("next=%x\n", c->next);
- printf("saved_data=%x\n", c->saved_data);
- printf("saved_data_buf=%x\n", c->saved_data_buf);
- printf("eol=%x\n", (uint32_t) c->eol);
+ printf("%s ch=%d\n", __func__, ch);
+ printf("next=%x\n", c->next);
+ printf("saved_data=%x\n", c->saved_data);
+ printf("saved_data_buf=%x\n", c->saved_data_buf);
+ printf("eol=%x\n", (uint32_t) c->eol);
}
static void dump_d(int ch, struct dma_descr_data *d)
{
- printf("%s ch=%d\n", __func__, ch);
- printf("next=%x\n", d->next);
- printf("buf=%x\n", d->buf);
- printf("after=%x\n", d->after);
- printf("intr=%x\n", (uint32_t) d->intr);
- printf("out_eop=%x\n", (uint32_t) d->out_eop);
- printf("in_eop=%x\n", (uint32_t) d->in_eop);
- printf("eol=%x\n", (uint32_t) d->eol);
+ printf("%s ch=%d\n", __func__, ch);
+ printf("next=%x\n", d->next);
+ printf("buf=%x\n", d->buf);
+ printf("after=%x\n", d->after);
+ printf("intr=%x\n", (uint32_t) d->intr);
+ printf("out_eop=%x\n", (uint32_t) d->out_eop);
+ printf("in_eop=%x\n", (uint32_t) d->in_eop);
+ printf("eol=%x\n", (uint32_t) d->eol);
}
#endif
static void channel_load_c(struct fs_dma_ctrl *ctrl, int c)
{
- hwaddr addr = channel_reg(ctrl, c, RW_GROUP_DOWN);
+ hwaddr addr = channel_reg(ctrl, c, RW_GROUP_DOWN);
- /* Load and decode. FIXME: handle endianness. */
+ /* Load and decode. FIXME: handle endianness. */
cpu_physical_memory_read(addr, &ctrl->channels[c].current_c,
sizeof(ctrl->channels[c].current_c));
- D(dump_c(c, &ctrl->channels[c].current_c));
- /* I guess this should update the current pos. */
- ctrl->channels[c].regs[RW_SAVED_DATA] =
- (uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data;
- ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
- (uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data_buf;
+ D(dump_c(c, &ctrl->channels[c].current_c));
+ /* I guess this should update the current pos. */
+ ctrl->channels[c].regs[RW_SAVED_DATA] =
+ (uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data;
+ ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
+ (uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data_buf;
}
static void channel_load_d(struct fs_dma_ctrl *ctrl, int c)
@@ -303,273 +303,273 @@
static inline void channel_stop(struct fs_dma_ctrl *ctrl, int c)
{
- /* FIXME: */
+ /* FIXME: */
}
static inline void channel_start(struct fs_dma_ctrl *ctrl, int c)
{
- if (ctrl->channels[c].client)
- {
- ctrl->channels[c].eol = 0;
- ctrl->channels[c].state = RUNNING;
- if (!ctrl->channels[c].input)
- channel_out_run(ctrl, c);
- } else
- printf("WARNING: starting DMA ch %d with no client\n", c);
+ if (ctrl->channels[c].client)
+ {
+ ctrl->channels[c].eol = 0;
+ ctrl->channels[c].state = RUNNING;
+ if (!ctrl->channels[c].input)
+ channel_out_run(ctrl, c);
+ } else
+ printf("WARNING: starting DMA ch %d with no client\n", c);
- qemu_bh_schedule_idle(ctrl->bh);
+ qemu_bh_schedule_idle(ctrl->bh);
}
static void channel_continue(struct fs_dma_ctrl *ctrl, int c)
{
- if (!channel_en(ctrl, c)
- || channel_stopped(ctrl, c)
- || ctrl->channels[c].state != RUNNING
- /* Only reload the current data descriptor if it has eol set. */
- || !ctrl->channels[c].current_d.eol) {
- D(printf("continue failed ch=%d state=%d stopped=%d en=%d eol=%d\n",
- c, ctrl->channels[c].state,
- channel_stopped(ctrl, c),
- channel_en(ctrl,c),
- ctrl->channels[c].eol));
- D(dump_d(c, &ctrl->channels[c].current_d));
- return;
- }
+ if (!channel_en(ctrl, c)
+ || channel_stopped(ctrl, c)
+ || ctrl->channels[c].state != RUNNING
+ /* Only reload the current data descriptor if it has eol set. */
+ || !ctrl->channels[c].current_d.eol) {
+ D(printf("continue failed ch=%d state=%d stopped=%d en=%d eol=%d\n",
+ c, ctrl->channels[c].state,
+ channel_stopped(ctrl, c),
+ channel_en(ctrl,c),
+ ctrl->channels[c].eol));
+ D(dump_d(c, &ctrl->channels[c].current_d));
+ return;
+ }
- /* Reload the current descriptor. */
- channel_load_d(ctrl, c);
+ /* Reload the current descriptor. */
+ channel_load_d(ctrl, c);
- /* If the current descriptor cleared the eol flag and we had already
- reached eol state, do the continue. */
- if (!ctrl->channels[c].current_d.eol && ctrl->channels[c].eol) {
- D(printf("continue %d ok %x\n", c,
- ctrl->channels[c].current_d.next));
- ctrl->channels[c].regs[RW_SAVED_DATA] =
- (uint32_t)(unsigned long)ctrl->channels[c].current_d.next;
- channel_load_d(ctrl, c);
- ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
- (uint32_t)(unsigned long)ctrl->channels[c].current_d.buf;
+ /* If the current descriptor cleared the eol flag and we had already
+ reached eol state, do the continue. */
+ if (!ctrl->channels[c].current_d.eol && ctrl->channels[c].eol) {
+ D(printf("continue %d ok %x\n", c,
+ ctrl->channels[c].current_d.next));
+ ctrl->channels[c].regs[RW_SAVED_DATA] =
+ (uint32_t)(unsigned long)ctrl->channels[c].current_d.next;
+ channel_load_d(ctrl, c);
+ ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
+ (uint32_t)(unsigned long)ctrl->channels[c].current_d.buf;
- channel_start(ctrl, c);
- }
- ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
- (uint32_t)(unsigned long)ctrl->channels[c].current_d.buf;
+ channel_start(ctrl, c);
+ }
+ ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
+ (uint32_t)(unsigned long)ctrl->channels[c].current_d.buf;
}
static void channel_stream_cmd(struct fs_dma_ctrl *ctrl, int c, uint32_t v)
{
- unsigned int cmd = v & ((1 << 10) - 1);
+ unsigned int cmd = v & ((1 << 10) - 1);
- D(printf("%s ch=%d cmd=%x\n",
- __func__, c, cmd));
- if (cmd & regk_dma_load_d) {
- channel_load_d(ctrl, c);
- if (cmd & regk_dma_burst)
- channel_start(ctrl, c);
- }
+ D(printf("%s ch=%d cmd=%x\n",
+ __func__, c, cmd));
+ if (cmd & regk_dma_load_d) {
+ channel_load_d(ctrl, c);
+ if (cmd & regk_dma_burst)
+ channel_start(ctrl, c);
+ }
- if (cmd & regk_dma_load_c) {
- channel_load_c(ctrl, c);
- }
+ if (cmd & regk_dma_load_c) {
+ channel_load_c(ctrl, c);
+ }
}
static void channel_update_irq(struct fs_dma_ctrl *ctrl, int c)
{
- D(printf("%s %d\n", __func__, c));
- ctrl->channels[c].regs[R_INTR] &=
- ~(ctrl->channels[c].regs[RW_ACK_INTR]);
+ D(printf("%s %d\n", __func__, c));
+ ctrl->channels[c].regs[R_INTR] &=
+ ~(ctrl->channels[c].regs[RW_ACK_INTR]);
- ctrl->channels[c].regs[R_MASKED_INTR] =
- ctrl->channels[c].regs[R_INTR]
- & ctrl->channels[c].regs[RW_INTR_MASK];
+ ctrl->channels[c].regs[R_MASKED_INTR] =
+ ctrl->channels[c].regs[R_INTR]
+ & ctrl->channels[c].regs[RW_INTR_MASK];
- D(printf("%s: chan=%d masked_intr=%x\n", __func__,
- c,
- ctrl->channels[c].regs[R_MASKED_INTR]));
+ D(printf("%s: chan=%d masked_intr=%x\n", __func__,
+ c,
+ ctrl->channels[c].regs[R_MASKED_INTR]));
- qemu_set_irq(ctrl->channels[c].irq,
- !!ctrl->channels[c].regs[R_MASKED_INTR]);
+ qemu_set_irq(ctrl->channels[c].irq,
+ !!ctrl->channels[c].regs[R_MASKED_INTR]);
}
static int channel_out_run(struct fs_dma_ctrl *ctrl, int c)
{
- uint32_t len;
- uint32_t saved_data_buf;
- unsigned char buf[2 * 1024];
+ uint32_t len;
+ uint32_t saved_data_buf;
+ unsigned char buf[2 * 1024];
- struct dma_context_metadata meta;
- bool send_context = true;
+ struct dma_context_metadata meta;
+ bool send_context = true;
- if (ctrl->channels[c].eol)
- return 0;
+ if (ctrl->channels[c].eol)
+ return 0;
- do {
- bool out_eop;
- D(printf("ch=%d buf=%x after=%x\n",
- c,
- (uint32_t)ctrl->channels[c].current_d.buf,
- (uint32_t)ctrl->channels[c].current_d.after));
+ do {
+ bool out_eop;
+ D(printf("ch=%d buf=%x after=%x\n",
+ c,
+ (uint32_t)ctrl->channels[c].current_d.buf,
+ (uint32_t)ctrl->channels[c].current_d.after));
- if (send_context) {
- if (ctrl->channels[c].client->client.metadata_push) {
- meta.metadata = ctrl->channels[c].current_d.md;
- ctrl->channels[c].client->client.metadata_push(
- ctrl->channels[c].client->client.opaque,
- &meta);
- }
- send_context = false;
- }
+ if (send_context) {
+ if (ctrl->channels[c].client->client.metadata_push) {
+ meta.metadata = ctrl->channels[c].current_d.md;
+ ctrl->channels[c].client->client.metadata_push(
+ ctrl->channels[c].client->client.opaque,
+ &meta);
+ }
+ send_context = false;
+ }
- channel_load_d(ctrl, c);
- saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF);
- len = (uint32_t)(unsigned long)
- ctrl->channels[c].current_d.after;
- len -= saved_data_buf;
+ channel_load_d(ctrl, c);
+ saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF);
+ len = (uint32_t)(unsigned long)
+ ctrl->channels[c].current_d.after;
+ len -= saved_data_buf;
- if (len > sizeof buf)
- len = sizeof buf;
- cpu_physical_memory_read (saved_data_buf, buf, len);
+ if (len > sizeof buf)
+ len = sizeof buf;
+ cpu_physical_memory_read (saved_data_buf, buf, len);
- out_eop = ((saved_data_buf + len) ==
- ctrl->channels[c].current_d.after) &&
- ctrl->channels[c].current_d.out_eop;
+ out_eop = ((saved_data_buf + len) ==
+ ctrl->channels[c].current_d.after) &&
+ ctrl->channels[c].current_d.out_eop;
- D(printf("channel %d pushes %x %u bytes eop=%u\n", c,
- saved_data_buf, len, out_eop));
+ D(printf("channel %d pushes %x %u bytes eop=%u\n", c,
+ saved_data_buf, len, out_eop));
- if (ctrl->channels[c].client->client.push) {
- if (len > 0) {
- ctrl->channels[c].client->client.push(
- ctrl->channels[c].client->client.opaque,
- buf, len, out_eop);
- }
- } else {
- printf("WARNING: DMA ch%d dataloss,"
- " no attached client.\n", c);
- }
+ if (ctrl->channels[c].client->client.push) {
+ if (len > 0) {
+ ctrl->channels[c].client->client.push(
+ ctrl->channels[c].client->client.opaque,
+ buf, len, out_eop);
+ }
+ } else {
+ printf("WARNING: DMA ch%d dataloss,"
+ " no attached client.\n", c);
+ }
- saved_data_buf += len;
+ saved_data_buf += len;
- if (saved_data_buf == (uint32_t)(unsigned long)
- ctrl->channels[c].current_d.after) {
- /* Done. Step to next. */
- if (ctrl->channels[c].current_d.out_eop) {
- send_context = true;
- }
- if (ctrl->channels[c].current_d.intr) {
- /* data intr. */
- D(printf("signal intr %d eol=%d\n",
- len, ctrl->channels[c].current_d.eol));
- ctrl->channels[c].regs[R_INTR] |= (1 << 2);
- channel_update_irq(ctrl, c);
- }
- channel_store_d(ctrl, c);
- if (ctrl->channels[c].current_d.eol) {
- D(printf("channel %d EOL\n", c));
- ctrl->channels[c].eol = 1;
+ if (saved_data_buf == (uint32_t)(unsigned long)
+ ctrl->channels[c].current_d.after) {
+ /* Done. Step to next. */
+ if (ctrl->channels[c].current_d.out_eop) {
+ send_context = true;
+ }
+ if (ctrl->channels[c].current_d.intr) {
+ /* data intr. */
+ D(printf("signal intr %d eol=%d\n",
+ len, ctrl->channels[c].current_d.eol));
+ ctrl->channels[c].regs[R_INTR] |= (1 << 2);
+ channel_update_irq(ctrl, c);
+ }
+ channel_store_d(ctrl, c);
+ if (ctrl->channels[c].current_d.eol) {
+ D(printf("channel %d EOL\n", c));
+ ctrl->channels[c].eol = 1;
- /* Mark the context as disabled. */
- ctrl->channels[c].current_c.dis = 1;
- channel_store_c(ctrl, c);
+ /* Mark the context as disabled. */
+ ctrl->channels[c].current_c.dis = 1;
+ channel_store_c(ctrl, c);
- channel_stop(ctrl, c);
- } else {
- ctrl->channels[c].regs[RW_SAVED_DATA] =
- (uint32_t)(unsigned long)ctrl->
- channels[c].current_d.next;
- /* Load new descriptor. */
- channel_load_d(ctrl, c);
- saved_data_buf = (uint32_t)(unsigned long)
- ctrl->channels[c].current_d.buf;
- }
+ channel_stop(ctrl, c);
+ } else {
+ ctrl->channels[c].regs[RW_SAVED_DATA] =
+ (uint32_t)(unsigned long)ctrl->
+ channels[c].current_d.next;
+ /* Load new descriptor. */
+ channel_load_d(ctrl, c);
+ saved_data_buf = (uint32_t)(unsigned long)
+ ctrl->channels[c].current_d.buf;
+ }
- ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
- saved_data_buf;
- D(dump_d(c, &ctrl->channels[c].current_d));
- }
- ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf;
- } while (!ctrl->channels[c].eol);
- return 1;
+ ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
+ saved_data_buf;
+ D(dump_d(c, &ctrl->channels[c].current_d));
+ }
+ ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf;
+ } while (!ctrl->channels[c].eol);
+ return 1;
}
static int channel_in_process(struct fs_dma_ctrl *ctrl, int c,
- unsigned char *buf, int buflen, int eop)
+ unsigned char *buf, int buflen, int eop)
{
- uint32_t len;
- uint32_t saved_data_buf;
+ uint32_t len;
+ uint32_t saved_data_buf;
- if (ctrl->channels[c].eol == 1)
- return 0;
+ if (ctrl->channels[c].eol == 1)
+ return 0;
- channel_load_d(ctrl, c);
- saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF);
- len = (uint32_t)(unsigned long)ctrl->channels[c].current_d.after;
- len -= saved_data_buf;
-
- if (len > buflen)
- len = buflen;
+ channel_load_d(ctrl, c);
+ saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF);
+ len = (uint32_t)(unsigned long)ctrl->channels[c].current_d.after;
+ len -= saved_data_buf;
- cpu_physical_memory_write (saved_data_buf, buf, len);
- saved_data_buf += len;
+ if (len > buflen)
+ len = buflen;
- if (saved_data_buf ==
- (uint32_t)(unsigned long)ctrl->channels[c].current_d.after
- || eop) {
- uint32_t r_intr = ctrl->channels[c].regs[R_INTR];
+ cpu_physical_memory_write (saved_data_buf, buf, len);
+ saved_data_buf += len;
- D(printf("in dscr end len=%d\n",
- ctrl->channels[c].current_d.after
- - ctrl->channels[c].current_d.buf));
- ctrl->channels[c].current_d.after = saved_data_buf;
+ if (saved_data_buf ==
+ (uint32_t)(unsigned long)ctrl->channels[c].current_d.after
+ || eop) {
+ uint32_t r_intr = ctrl->channels[c].regs[R_INTR];
- /* Done. Step to next. */
- if (ctrl->channels[c].current_d.intr) {
- /* TODO: signal eop to the client. */
- /* data intr. */
- ctrl->channels[c].regs[R_INTR] |= 3;
- }
- if (eop) {
- ctrl->channels[c].current_d.in_eop = 1;
- ctrl->channels[c].regs[R_INTR] |= 8;
- }
- if (r_intr != ctrl->channels[c].regs[R_INTR])
- channel_update_irq(ctrl, c);
+ D(printf("in dscr end len=%d\n",
+ ctrl->channels[c].current_d.after
+ - ctrl->channels[c].current_d.buf));
+ ctrl->channels[c].current_d.after = saved_data_buf;
- channel_store_d(ctrl, c);
- D(dump_d(c, &ctrl->channels[c].current_d));
+ /* Done. Step to next. */
+ if (ctrl->channels[c].current_d.intr) {
+ /* TODO: signal eop to the client. */
+ /* data intr. */
+ ctrl->channels[c].regs[R_INTR] |= 3;
+ }
+ if (eop) {
+ ctrl->channels[c].current_d.in_eop = 1;
+ ctrl->channels[c].regs[R_INTR] |= 8;
+ }
+ if (r_intr != ctrl->channels[c].regs[R_INTR])
+ channel_update_irq(ctrl, c);
- if (ctrl->channels[c].current_d.eol) {
- D(printf("channel %d EOL\n", c));
- ctrl->channels[c].eol = 1;
+ channel_store_d(ctrl, c);
+ D(dump_d(c, &ctrl->channels[c].current_d));
- /* Mark the context as disabled. */
- ctrl->channels[c].current_c.dis = 1;
- channel_store_c(ctrl, c);
+ if (ctrl->channels[c].current_d.eol) {
+ D(printf("channel %d EOL\n", c));
+ ctrl->channels[c].eol = 1;
- channel_stop(ctrl, c);
- } else {
- ctrl->channels[c].regs[RW_SAVED_DATA] =
- (uint32_t)(unsigned long)ctrl->
- channels[c].current_d.next;
- /* Load new descriptor. */
- channel_load_d(ctrl, c);
- saved_data_buf = (uint32_t)(unsigned long)
- ctrl->channels[c].current_d.buf;
- }
- }
+ /* Mark the context as disabled. */
+ ctrl->channels[c].current_c.dis = 1;
+ channel_store_c(ctrl, c);
- ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf;
- return len;
+ channel_stop(ctrl, c);
+ } else {
+ ctrl->channels[c].regs[RW_SAVED_DATA] =
+ (uint32_t)(unsigned long)ctrl->
+ channels[c].current_d.next;
+ /* Load new descriptor. */
+ channel_load_d(ctrl, c);
+ saved_data_buf = (uint32_t)(unsigned long)
+ ctrl->channels[c].current_d.buf;
+ }
+ }
+
+ ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf;
+ return len;
}
static inline int channel_in_run(struct fs_dma_ctrl *ctrl, int c)
{
- if (ctrl->channels[c].client->client.pull) {
- ctrl->channels[c].client->client.pull(
- ctrl->channels[c].client->client.opaque);
- return 1;
- } else
- return 0;
+ if (ctrl->channels[c].client->client.pull) {
+ ctrl->channels[c].client->client.pull(
+ ctrl->channels[c].client->client.opaque);
+ return 1;
+ } else
+ return 0;
}
static uint32_t dma_rinvalid (void *opaque, hwaddr addr)
@@ -581,33 +581,33 @@
static uint64_t
dma_read(void *opaque, hwaddr addr, unsigned int size)
{
- struct fs_dma_ctrl *ctrl = opaque;
- int c;
- uint32_t r = 0;
+ struct fs_dma_ctrl *ctrl = opaque;
+ int c;
+ uint32_t r = 0;
- if (size != 4) {
- dma_rinvalid(opaque, addr);
- }
+ if (size != 4) {
+ dma_rinvalid(opaque, addr);
+ }
- /* Make addr relative to this channel and bounded to nr regs. */
- c = fs_channel(addr);
- addr &= 0xff;
- addr >>= 2;
- switch (addr)
- {
- case RW_STAT:
- r = ctrl->channels[c].state & 7;
- r |= ctrl->channels[c].eol << 5;
- r |= ctrl->channels[c].stream_cmd_src << 8;
- break;
+ /* Make addr relative to this channel and bounded to nr regs. */
+ c = fs_channel(addr);
+ addr &= 0xff;
+ addr >>= 2;
+ switch (addr)
+ {
+ case RW_STAT:
+ r = ctrl->channels[c].state & 7;
+ r |= ctrl->channels[c].eol << 5;
+ r |= ctrl->channels[c].stream_cmd_src << 8;
+ break;
- default:
- r = ctrl->channels[c].regs[addr];
- D(printf("%s c=%d addr=" HWADDR_FMT_plx "\n",
- __func__, c, addr));
- break;
- }
- return r;
+ default:
+ r = ctrl->channels[c].regs[addr];
+ D(printf("%s c=%d addr=" HWADDR_FMT_plx "\n",
+ __func__, c, addr));
+ break;
+ }
+ return r;
}
static void
@@ -619,133 +619,133 @@
static void
dma_update_state(struct fs_dma_ctrl *ctrl, int c)
{
- if (ctrl->channels[c].regs[RW_CFG] & 2)
- ctrl->channels[c].state = STOPPED;
- if (!(ctrl->channels[c].regs[RW_CFG] & 1))
- ctrl->channels[c].state = RST;
+ if (ctrl->channels[c].regs[RW_CFG] & 2)
+ ctrl->channels[c].state = STOPPED;
+ if (!(ctrl->channels[c].regs[RW_CFG] & 1))
+ ctrl->channels[c].state = RST;
}
static void
dma_write(void *opaque, hwaddr addr,
- uint64_t val64, unsigned int size)
+ uint64_t val64, unsigned int size)
{
- struct fs_dma_ctrl *ctrl = opaque;
- uint32_t value = val64;
- int c;
+ struct fs_dma_ctrl *ctrl = opaque;
+ uint32_t value = val64;
+ int c;
- if (size != 4) {
- dma_winvalid(opaque, addr, value);
- }
+ if (size != 4) {
+ dma_winvalid(opaque, addr, value);
+ }
/* Make addr relative to this channel and bounded to nr regs. */
- c = fs_channel(addr);
- addr &= 0xff;
- addr >>= 2;
- switch (addr)
- {
- case RW_DATA:
- ctrl->channels[c].regs[addr] = value;
- break;
+ c = fs_channel(addr);
+ addr &= 0xff;
+ addr >>= 2;
+ switch (addr)
+ {
+ case RW_DATA:
+ ctrl->channels[c].regs[addr] = value;
+ break;
- case RW_CFG:
- ctrl->channels[c].regs[addr] = value;
- dma_update_state(ctrl, c);
- break;
- case RW_CMD:
- /* continue. */
- if (value & ~1)
- printf("Invalid store to ch=%d RW_CMD %x\n",
- c, value);
- ctrl->channels[c].regs[addr] = value;
- channel_continue(ctrl, c);
- break;
+ case RW_CFG:
+ ctrl->channels[c].regs[addr] = value;
+ dma_update_state(ctrl, c);
+ break;
+ case RW_CMD:
+ /* continue. */
+ if (value & ~1)
+ printf("Invalid store to ch=%d RW_CMD %x\n",
+ c, value);
+ ctrl->channels[c].regs[addr] = value;
+ channel_continue(ctrl, c);
+ break;
- case RW_SAVED_DATA:
- case RW_SAVED_DATA_BUF:
- case RW_GROUP:
- case RW_GROUP_DOWN:
- ctrl->channels[c].regs[addr] = value;
- break;
+ case RW_SAVED_DATA:
+ case RW_SAVED_DATA_BUF:
+ case RW_GROUP:
+ case RW_GROUP_DOWN:
+ ctrl->channels[c].regs[addr] = value;
+ break;
- case RW_ACK_INTR:
- case RW_INTR_MASK:
- ctrl->channels[c].regs[addr] = value;
- channel_update_irq(ctrl, c);
- if (addr == RW_ACK_INTR)
- ctrl->channels[c].regs[RW_ACK_INTR] = 0;
- break;
+ case RW_ACK_INTR:
+ case RW_INTR_MASK:
+ ctrl->channels[c].regs[addr] = value;
+ channel_update_irq(ctrl, c);
+ if (addr == RW_ACK_INTR)
+ ctrl->channels[c].regs[RW_ACK_INTR] = 0;
+ break;
- case RW_STREAM_CMD:
- if (value & ~1023)
- printf("Invalid store to ch=%d "
- "RW_STREAMCMD %x\n",
- c, value);
- ctrl->channels[c].regs[addr] = value;
- D(printf("stream_cmd ch=%d\n", c));
- channel_stream_cmd(ctrl, c, value);
- break;
+ case RW_STREAM_CMD:
+ if (value & ~1023)
+ printf("Invalid store to ch=%d "
+ "RW_STREAMCMD %x\n",
+ c, value);
+ ctrl->channels[c].regs[addr] = value;
+ D(printf("stream_cmd ch=%d\n", c));
+ channel_stream_cmd(ctrl, c, value);
+ break;
- default:
- D(printf("%s c=%d " HWADDR_FMT_plx "\n",
- __func__, c, addr));
- break;
- }
+ default:
+ D(printf("%s c=%d " HWADDR_FMT_plx "\n",
+ __func__, c, addr));
+ break;
+ }
}
static const MemoryRegionOps dma_ops = {
- .read = dma_read,
- .write = dma_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
- .valid = {
- .min_access_size = 1,
- .max_access_size = 4
- }
+ .read = dma_read,
+ .write = dma_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 4
+ }
};
static int etraxfs_dmac_run(void *opaque)
{
- struct fs_dma_ctrl *ctrl = opaque;
- int i;
- int p = 0;
+ struct fs_dma_ctrl *ctrl = opaque;
+ int i;
+ int p = 0;
- for (i = 0;
- i < ctrl->nr_channels;
- i++)
- {
- if (ctrl->channels[i].state == RUNNING)
- {
- if (ctrl->channels[i].input) {
- p += channel_in_run(ctrl, i);
- } else {
- p += channel_out_run(ctrl, i);
- }
- }
- }
- return p;
+ for (i = 0;
+ i < ctrl->nr_channels;
+ i++)
+ {
+ if (ctrl->channels[i].state == RUNNING)
+ {
+ if (ctrl->channels[i].input) {
+ p += channel_in_run(ctrl, i);
+ } else {
+ p += channel_out_run(ctrl, i);
+ }
+ }
+ }
+ return p;
}
int etraxfs_dmac_input(struct etraxfs_dma_client *client,
- void *buf, int len, int eop)
+ void *buf, int len, int eop)
{
- return channel_in_process(client->ctrl, client->channel,
- buf, len, eop);
+ return channel_in_process(client->ctrl, client->channel,
+ buf, len, eop);
}
/* Connect an IRQ line with a channel. */
void etraxfs_dmac_connect(void *opaque, int c, qemu_irq *line, int input)
{
- struct fs_dma_ctrl *ctrl = opaque;
- ctrl->channels[c].irq = *line;
- ctrl->channels[c].input = input;
+ struct fs_dma_ctrl *ctrl = opaque;
+ ctrl->channels[c].irq = *line;
+ ctrl->channels[c].input = input;
}
void etraxfs_dmac_connect_client(void *opaque, int c,
- struct etraxfs_dma_client *cl)
+ struct etraxfs_dma_client *cl)
{
- struct fs_dma_ctrl *ctrl = opaque;
- cl->ctrl = ctrl;
- cl->channel = c;
- ctrl->channels[c].client = cl;
+ struct fs_dma_ctrl *ctrl = opaque;
+ cl->ctrl = ctrl;
+ cl->channel = c;
+ ctrl->channels[c].client = cl;
}
@@ -763,18 +763,18 @@
void *etraxfs_dmac_init(hwaddr base, int nr_channels)
{
- struct fs_dma_ctrl *ctrl = NULL;
+ struct fs_dma_ctrl *ctrl = NULL;
- ctrl = g_malloc0(sizeof *ctrl);
+ ctrl = g_malloc0(sizeof *ctrl);
- ctrl->bh = qemu_bh_new(DMA_run, ctrl);
+ ctrl->bh = qemu_bh_new(DMA_run, ctrl);
- ctrl->nr_channels = nr_channels;
- ctrl->channels = g_malloc0(sizeof ctrl->channels[0] * nr_channels);
+ ctrl->nr_channels = nr_channels;
+ ctrl->channels = g_malloc0(sizeof ctrl->channels[0] * nr_channels);
- memory_region_init_io(&ctrl->mmio, NULL, &dma_ops, ctrl, "etraxfs-dma",
- nr_channels * 0x2000);
- memory_region_add_subregion(get_system_memory(), base, &ctrl->mmio);
+ memory_region_init_io(&ctrl->mmio, NULL, &dma_ops, ctrl, "etraxfs-dma",
+ nr_channels * 0x2000);
+ memory_region_add_subregion(get_system_memory(), base, &ctrl->mmio);
- return ctrl;
+ return ctrl;
}
diff --git a/hw/gpio/max7310.c b/hw/gpio/max7310.c
index 031482d..4470cfe 100644
--- a/hw/gpio/max7310.c
+++ b/hw/gpio/max7310.c
@@ -49,22 +49,22 @@
MAX7310State *s = MAX7310(i2c);
switch (s->command) {
- case 0x00: /* Input port */
+ case 0x00: /* Input port */
return s->level ^ s->polarity;
- case 0x01: /* Output port */
+ case 0x01: /* Output port */
return s->level & ~s->direction;
- case 0x02: /* Polarity inversion */
+ case 0x02: /* Polarity inversion */
return s->polarity;
- case 0x03: /* Configuration */
+ case 0x03: /* Configuration */
return s->direction;
- case 0x04: /* Timeout */
+ case 0x04: /* Timeout */
return s->status;
- case 0xff: /* Reserved */
+ case 0xff: /* Reserved */
return 0xff;
default:
@@ -95,7 +95,7 @@
}
switch (s->command) {
- case 0x01: /* Output port */
+ case 0x01: /* Output port */
for (diff = (data ^ s->level) & ~s->direction; diff;
diff &= ~(1 << line)) {
line = ctz32(diff);
@@ -105,20 +105,20 @@
s->level = (s->level & s->direction) | (data & ~s->direction);
break;
- case 0x02: /* Polarity inversion */
+ case 0x02: /* Polarity inversion */
s->polarity = data;
break;
- case 0x03: /* Configuration */
+ case 0x03: /* Configuration */
s->level &= ~(s->direction ^ data);
s->direction = data;
break;
- case 0x04: /* Timeout */
+ case 0x04: /* Timeout */
s->status = data;
break;
- case 0x00: /* Input port - ignore writes */
+ case 0x00: /* Input port - ignore writes */
break;
default:
qemu_log_mask(LOG_UNIMP, "%s: Unsupported register 0x02%" PRIx8 "\n",
diff --git a/hw/hyperv/syndbg.c b/hw/hyperv/syndbg.c
index 94fe1b5..065e12f 100644
--- a/hw/hyperv/syndbg.c
+++ b/hw/hyperv/syndbg.c
@@ -340,7 +340,7 @@
syndbg->servaddr.sin_family = AF_INET;
if (connect(syndbg->socket, (struct sockaddr *)&syndbg->servaddr,
sizeof(syndbg->servaddr)) < 0) {
- closesocket(syndbg->socket);
+ close(syndbg->socket);
error_setg(errp, "%s failed to connect to socket", TYPE_HV_SYNDBG);
return;
}
@@ -357,7 +357,7 @@
if (syndbg->socket > 0) {
qemu_set_fd_handler(syndbg->socket, NULL, NULL, NULL);
- closesocket(syndbg->socket);
+ close(syndbg->socket);
}
}
diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
index b19fb42..ec857a1 100644
--- a/hw/i386/acpi-build.c
+++ b/hw/i386/acpi-build.c
@@ -373,6 +373,104 @@
return method;
}
+static void build_append_pci_dsm_func0_common(Aml *ctx, Aml *retvar)
+{
+ Aml *UUID, *ifctx1;
+ uint8_t byte_list[1] = { 0 }; /* nothing supported yet */
+
+ aml_append(ctx, aml_store(aml_buffer(1, byte_list), retvar));
+ /*
+ * PCI Firmware Specification 3.1
+ * 4.6. _DSM Definitions for PCI
+ */
+ UUID = aml_touuid("E5C937D0-3553-4D7A-9117-EA4D19C3434D");
+ ifctx1 = aml_if(aml_lnot(aml_equal(aml_arg(0), UUID)));
+ {
+ /* call is for unsupported UUID, bail out */
+ aml_append(ifctx1, aml_return(retvar));
+ }
+ aml_append(ctx, ifctx1);
+
+ ifctx1 = aml_if(aml_lless(aml_arg(1), aml_int(2)));
+ {
+ /* call is for unsupported REV, bail out */
+ aml_append(ifctx1, aml_return(retvar));
+ }
+ aml_append(ctx, ifctx1);
+}
+
+static Aml *aml_pci_edsm(void)
+{
+ Aml *method, *ifctx;
+ Aml *zero = aml_int(0);
+ Aml *func = aml_arg(2);
+ Aml *ret = aml_local(0);
+ Aml *aidx = aml_local(1);
+ Aml *params = aml_arg(4);
+
+ method = aml_method("EDSM", 5, AML_SERIALIZED);
+
+ /* get supported functions */
+ ifctx = aml_if(aml_equal(func, zero));
+ {
+ /* 1: have supported functions */
+ /* 7: support for function 7 */
+ const uint8_t caps = 1 | BIT(7);
+ build_append_pci_dsm_func0_common(ifctx, ret);
+ aml_append(ifctx, aml_store(aml_int(caps), aml_index(ret, zero)));
+ aml_append(ifctx, aml_return(ret));
+ }
+ aml_append(method, ifctx);
+
+ /* handle specific functions requests */
+ /*
+ * PCI Firmware Specification 3.1
+ * 4.6.7. _DSM for Naming a PCI or PCI Express Device Under
+ * Operating Systems
+ */
+ ifctx = aml_if(aml_equal(func, aml_int(7)));
+ {
+ Aml *pkg = aml_package(2);
+ aml_append(pkg, zero);
+ /* optional, if not impl. should return null string */
+ aml_append(pkg, aml_string("%s", ""));
+ aml_append(ifctx, aml_store(pkg, ret));
+
+ /*
+ * IASL is fine when initializing Package with computational data,
+ * however it makes guest unhappy /it fails to process such AML/.
+ * So use runtime assignment to set acpi-index after initializer
+ * to make OSPM happy.
+ */
+ aml_append(ifctx,
+ aml_store(aml_derefof(aml_index(params, aml_int(0))), aidx));
+ aml_append(ifctx, aml_store(aidx, aml_index(ret, zero)));
+ aml_append(ifctx, aml_return(ret));
+ }
+ aml_append(method, ifctx);
+
+ return method;
+}
+
+static Aml *aml_pci_static_endpoint_dsm(PCIDevice *pdev)
+{
+ Aml *method;
+
+ g_assert(pdev->acpi_index != 0);
+ method = aml_method("_DSM", 4, AML_SERIALIZED);
+ {
+ Aml *params = aml_local(0);
+ Aml *pkg = aml_package(1);
+ aml_append(pkg, aml_int(pdev->acpi_index));
+ aml_append(method, aml_store(pkg, params));
+ aml_append(method,
+ aml_return(aml_call5("EDSM", aml_arg(0), aml_arg(1),
+ aml_arg(2), aml_arg(3), params))
+ );
+ }
+ return method;
+}
+
static void build_append_pcihp_notify_entry(Aml *method, int slot)
{
Aml *if_ctx;
@@ -396,12 +494,6 @@
if (DEVICE(pdev)->hotplugged) {
return true;
}
- } else if (!get_dev_aml_func(DEVICE(pdev))) {
- /*
- * Ignore all other devices on !0 functions unless they
- * have AML description (i.e have get_dev_aml_func() != 0)
- */
- return true;
}
}
return false;
@@ -428,12 +520,14 @@
return false;
}
-static void build_append_pcihp_slots(Aml *parent_scope, PCIBus *bus,
- QObject *bsel)
+void build_append_pcihp_slots(Aml *parent_scope, PCIBus *bus)
{
int devfn;
Aml *dev, *notify_method = NULL, *method;
+ QObject *bsel = object_property_get_qobject(OBJECT(bus),
+ ACPI_PCIHP_PROP_BSEL, NULL);
uint64_t bsel_val = qnum_get_uint(qobject_to(QNum, bsel));
+ qobject_unref(bsel);
aml_append(parent_scope, aml_name_decl("BSEL", aml_int(bsel_val)));
notify_method = aml_method("DVNT", 2, AML_NOTSERIALIZED);
@@ -478,12 +572,9 @@
void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus)
{
- QObject *bsel;
int devfn;
Aml *dev;
- bsel = object_property_get_qobject(OBJECT(bus), ACPI_PCIHP_PROP_BSEL, NULL);
-
for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) {
/* ACPI spec: 1.0b: Table 6-2 _ADR Object Bus Types, PCI type */
int adr = PCI_SLOT(devfn) << 16 | PCI_FUNC(devfn);
@@ -498,16 +589,16 @@
aml_append(dev, aml_name_decl("_ADR", aml_int(adr)));
call_dev_aml_func(DEVICE(bus->devices[devfn]), dev);
+ /* add _DSM if device has acpi-index set */
+ if (pdev->acpi_index &&
+ !object_property_get_bool(OBJECT(pdev), "hotpluggable",
+ &error_abort)) {
+ aml_append(dev, aml_pci_static_endpoint_dsm(pdev));
+ }
/* device descriptor has been composed, add it into parent context */
aml_append(parent_scope, dev);
}
-
- if (bsel) {
- build_append_pcihp_slots(parent_scope, bus, bsel);
- }
-
- qobject_unref(bsel);
}
static bool build_append_notfication_callback(Aml *parent_scope,
@@ -517,16 +608,24 @@
PCIBus *sec;
QObject *bsel;
int nr_notifiers = 0;
+ GQueue *pcnt_bus_list = g_queue_new();
QLIST_FOREACH(sec, &bus->child, sibling) {
Aml *br_scope = aml_scope("S%.02X", sec->parent_dev->devfn);
- if (pci_bus_is_root(sec) ||
- !object_property_find(OBJECT(sec), ACPI_PCIHP_PROP_BSEL)) {
+ if (pci_bus_is_root(sec)) {
continue;
}
nr_notifiers = nr_notifiers +
build_append_notfication_callback(br_scope, sec);
- aml_append(parent_scope, br_scope);
+ /*
+ * add new child scope to parent
+ * and keep track of bus that have PCNT,
+ * bus list is used later to call children PCNTs from this level PCNT
+ */
+ if (nr_notifiers) {
+ g_queue_push_tail(pcnt_bus_list, sec);
+ aml_append(parent_scope, br_scope);
+ }
}
/*
@@ -550,30 +649,25 @@
}
/* Notify about child bus events in any case */
- QLIST_FOREACH(sec, &bus->child, sibling) {
- if (pci_bus_is_root(sec) ||
- !object_property_find(OBJECT(sec), ACPI_PCIHP_PROP_BSEL)) {
- continue;
- }
-
+ while ((sec = g_queue_pop_head(pcnt_bus_list))) {
aml_append(method, aml_name("^S%.02X.PCNT", sec->parent_dev->devfn));
}
aml_append(parent_scope, method);
qobject_unref(bsel);
+ g_queue_free(pcnt_bus_list);
return !!nr_notifiers;
}
static Aml *aml_pci_pdsm(void)
{
- Aml *method, *UUID, *ifctx, *ifctx1;
+ Aml *method, *ifctx, *ifctx1;
Aml *ret = aml_local(0);
Aml *caps = aml_local(1);
Aml *acpi_index = aml_local(2);
Aml *zero = aml_int(0);
Aml *one = aml_int(1);
Aml *func = aml_arg(2);
- Aml *rev = aml_arg(1);
Aml *params = aml_arg(4);
Aml *bnum = aml_derefof(aml_index(params, aml_int(0)));
Aml *sunum = aml_derefof(aml_index(params, aml_int(1)));
@@ -583,29 +677,9 @@
/* get supported functions */
ifctx = aml_if(aml_equal(func, zero));
{
- uint8_t byte_list[1] = { 0 }; /* nothing supported yet */
- aml_append(ifctx, aml_store(aml_buffer(1, byte_list), ret));
+ build_append_pci_dsm_func0_common(ifctx, ret);
+
aml_append(ifctx, aml_store(zero, caps));
-
- /*
- * PCI Firmware Specification 3.1
- * 4.6. _DSM Definitions for PCI
- */
- UUID = aml_touuid("E5C937D0-3553-4D7A-9117-EA4D19C3434D");
- ifctx1 = aml_if(aml_lnot(aml_equal(aml_arg(0), UUID)));
- {
- /* call is for unsupported UUID, bail out */
- aml_append(ifctx1, aml_return(ret));
- }
- aml_append(ifctx, ifctx1);
-
- ifctx1 = aml_if(aml_lless(rev, aml_int(2)));
- {
- /* call is for unsupported REV, bail out */
- aml_append(ifctx1, aml_return(ret));
- }
- aml_append(ifctx, ifctx1);
-
aml_append(ifctx,
aml_store(aml_call2("AIDX", bnum, sunum), acpi_index));
/*
@@ -1388,6 +1462,7 @@
aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A03")));
aml_append(dev, aml_name_decl("_ADR", aml_int(0)));
aml_append(dev, aml_name_decl("_UID", aml_int(pcmc->pci_root_uid)));
+ aml_append(dev, aml_pci_edsm());
aml_append(sb_scope, dev);
aml_append(dsdt, sb_scope);
@@ -1403,6 +1478,7 @@
aml_append(dev, aml_name_decl("_ADR", aml_int(0)));
aml_append(dev, aml_name_decl("_UID", aml_int(pcmc->pci_root_uid)));
aml_append(dev, build_q35_osc_method(!pm->pcihp_bridge_en));
+ aml_append(dev, aml_pci_edsm());
aml_append(sb_scope, dev);
if (mcfg_valid) {
aml_append(sb_scope, build_q35_dram_controller(&mcfg));
@@ -1710,6 +1786,9 @@
Aml *scope = aml_scope("PCI0");
/* Scan all PCI buses. Generate tables to support hotplug. */
build_append_pci_bus_devices(scope, bus);
+ if (object_property_find(OBJECT(bus), ACPI_PCIHP_PROP_BSEL)) {
+ build_append_pcihp_slots(scope, bus);
+ }
aml_append(sb_scope, scope);
}
}
diff --git a/hw/i386/kvm/meson.build b/hw/i386/kvm/meson.build
index 82dd6ae..6621ba5 100644
--- a/hw/i386/kvm/meson.build
+++ b/hw/i386/kvm/meson.build
@@ -9,6 +9,7 @@
'xen_evtchn.c',
'xen_gnttab.c',
'xen_xenstore.c',
+ 'xenstore_impl.c',
))
i386_ss.add_all(when: 'CONFIG_KVM', if_true: i386_kvm_ss)
diff --git a/hw/i386/kvm/trace-events b/hw/i386/kvm/trace-events
index b83c3eb..e4c82de 100644
--- a/hw/i386/kvm/trace-events
+++ b/hw/i386/kvm/trace-events
@@ -3,3 +3,18 @@
kvm_xen_get_free_pirq(int pirq, int type) "pirq %d type %d"
kvm_xen_bind_pirq(int pirq, int port) "pirq %d port %d"
kvm_xen_unmask_pirq(int pirq, char *dev, int vector) "pirq %d dev %s vector %d"
+xenstore_error(unsigned int id, unsigned int tx_id, const char *err) "req %u tx %u err %s"
+xenstore_read(unsigned int tx_id, const char *path) "tx %u path %s"
+xenstore_write(unsigned int tx_id, const char *path) "tx %u path %s"
+xenstore_mkdir(unsigned int tx_id, const char *path) "tx %u path %s"
+xenstore_directory(unsigned int tx_id, const char *path) "tx %u path %s"
+xenstore_directory_part(unsigned int tx_id, const char *path, unsigned int offset) "tx %u path %s offset %u"
+xenstore_transaction_start(unsigned int new_tx) "new_tx %u"
+xenstore_transaction_end(unsigned int tx_id, bool commit) "tx %u commit %d"
+xenstore_rm(unsigned int tx_id, const char *path) "tx %u path %s"
+xenstore_get_perms(unsigned int tx_id, const char *path) "tx %u path %s"
+xenstore_set_perms(unsigned int tx_id, const char *path) "tx %u path %s"
+xenstore_watch(const char *path, const char *token) "path %s token %s"
+xenstore_unwatch(const char *path, const char *token) "path %s token %s"
+xenstore_reset_watches(void) ""
+xenstore_watch_event(const char *path, const char *token) "path %s token %s"
diff --git a/hw/i386/kvm/xen_evtchn.c b/hw/i386/kvm/xen_evtchn.c
index 886fbf6..3048329 100644
--- a/hw/i386/kvm/xen_evtchn.c
+++ b/hw/i386/kvm/xen_evtchn.c
@@ -15,6 +15,7 @@
#include "qemu/lockable.h"
#include "qemu/main-loop.h"
#include "qemu/log.h"
+#include "qemu/error-report.h"
#include "monitor/monitor.h"
#include "monitor/hmp.h"
#include "qapi/error.h"
@@ -34,6 +35,7 @@
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
#include "hw/irq.h"
+#include "hw/xen/xen_backend_ops.h"
#include "xen_evtchn.h"
#include "xen_overlay.h"
@@ -278,6 +280,17 @@
.class_init = xen_evtchn_class_init,
};
+static struct evtchn_backend_ops emu_evtchn_backend_ops = {
+ .open = xen_be_evtchn_open,
+ .bind_interdomain = xen_be_evtchn_bind_interdomain,
+ .unbind = xen_be_evtchn_unbind,
+ .close = xen_be_evtchn_close,
+ .get_fd = xen_be_evtchn_fd,
+ .notify = xen_be_evtchn_notify,
+ .unmask = xen_be_evtchn_unmask,
+ .pending = xen_be_evtchn_pending,
+};
+
static void gsi_assert_bh(void *opaque)
{
struct vcpu_info *vi = kvm_xen_get_vcpu_info_hva(0);
@@ -318,6 +331,9 @@
s->nr_pirq_inuse_words = DIV_ROUND_UP(s->nr_pirqs, 64);
s->pirq_inuse_bitmap = g_new0(uint64_t, s->nr_pirq_inuse_words);
s->pirq = g_new0(struct pirq_info, s->nr_pirqs);
+
+ /* Set event channel functions for backend drivers to use */
+ xen_evtchn_ops = &emu_evtchn_backend_ops;
}
void xen_evtchn_connect_gsis(qemu_irq *system_gsis)
diff --git a/hw/i386/kvm/xen_gnttab.c b/hw/i386/kvm/xen_gnttab.c
index 1e691de..21c30e3 100644
--- a/hw/i386/kvm/xen_gnttab.c
+++ b/hw/i386/kvm/xen_gnttab.c
@@ -22,6 +22,7 @@
#include "hw/sysbus.h"
#include "hw/xen/xen.h"
+#include "hw/xen/xen_backend_ops.h"
#include "xen_overlay.h"
#include "xen_gnttab.h"
@@ -34,11 +35,10 @@
#define TYPE_XEN_GNTTAB "xen-gnttab"
OBJECT_DECLARE_SIMPLE_TYPE(XenGnttabState, XEN_GNTTAB)
-#define XEN_PAGE_SHIFT 12
-#define XEN_PAGE_SIZE (1ULL << XEN_PAGE_SHIFT)
-
#define ENTRIES_PER_FRAME_V1 (XEN_PAGE_SIZE / sizeof(grant_entry_v1_t))
+static struct gnttab_backend_ops emu_gnttab_backend_ops;
+
struct XenGnttabState {
/*< private >*/
SysBusDevice busdev;
@@ -57,6 +57,8 @@
MemoryRegion gnt_frames;
MemoryRegion *gnt_aliases;
uint64_t *gnt_frame_gpas;
+
+ uint8_t *map_track;
};
struct XenGnttabState *xen_gnttab_singleton;
@@ -70,13 +72,11 @@
error_setg(errp, "Xen grant table support is for Xen emulation");
return;
}
- s->nr_frames = 0;
s->max_frames = kvm_xen_get_gnttab_max_frames();
memory_region_init_ram(&s->gnt_frames, OBJECT(dev), "xen:grant_table",
XEN_PAGE_SIZE * s->max_frames, &error_abort);
memory_region_set_enabled(&s->gnt_frames, true);
s->entries.v1 = memory_region_get_ram_ptr(&s->gnt_frames);
- memset(s->entries.v1, 0, XEN_PAGE_SIZE * s->max_frames);
/* Create individual page-sizes aliases for overlays */
s->gnt_aliases = (void *)g_new0(MemoryRegion, s->max_frames);
@@ -88,9 +88,18 @@
s->gnt_frame_gpas[i] = INVALID_GPA;
}
+ s->nr_frames = 0;
+ memset(s->entries.v1, 0, XEN_PAGE_SIZE * s->max_frames);
+ s->entries.v1[GNTTAB_RESERVED_XENSTORE].flags = GTF_permit_access;
+ s->entries.v1[GNTTAB_RESERVED_XENSTORE].frame = XEN_SPECIAL_PFN(XENSTORE);
+
qemu_mutex_init(&s->gnt_lock);
xen_gnttab_singleton = s;
+
+ s->map_track = g_new0(uint8_t, s->max_frames * ENTRIES_PER_FRAME_V1);
+
+ xen_gnttab_ops = &emu_gnttab_backend_ops;
}
static int xen_gnttab_post_load(void *opaque, int version_id)
@@ -230,3 +239,309 @@
size->max_nr_frames = s->max_frames;
return 0;
}
+
+/* Track per-open refs, to allow close() to clean up. */
+struct active_ref {
+ MemoryRegionSection mrs;
+ void *virtaddr;
+ uint32_t refcnt;
+ int prot;
+};
+
+static void gnt_unref(XenGnttabState *s, grant_ref_t ref,
+ MemoryRegionSection *mrs, int prot)
+{
+ if (mrs && mrs->mr) {
+ if (prot & PROT_WRITE) {
+ memory_region_set_dirty(mrs->mr, mrs->offset_within_region,
+ XEN_PAGE_SIZE);
+ }
+ memory_region_unref(mrs->mr);
+ mrs->mr = NULL;
+ }
+ assert(s->map_track[ref] != 0);
+
+ if (--s->map_track[ref] == 0) {
+ grant_entry_v1_t *gnt_p = &s->entries.v1[ref];
+ qatomic_and(&gnt_p->flags, (uint16_t)~(GTF_reading | GTF_writing));
+ }
+}
+
+static uint64_t gnt_ref(XenGnttabState *s, grant_ref_t ref, int prot)
+{
+ uint16_t mask = GTF_type_mask | GTF_sub_page;
+ grant_entry_v1_t gnt, *gnt_p;
+ int retries = 0;
+
+ if (ref >= s->max_frames * ENTRIES_PER_FRAME_V1 ||
+ s->map_track[ref] == UINT8_MAX) {
+ return INVALID_GPA;
+ }
+
+ if (prot & PROT_WRITE) {
+ mask |= GTF_readonly;
+ }
+
+ gnt_p = &s->entries.v1[ref];
+
+ /*
+ * The guest can legitimately be changing the GTF_readonly flag. Allow
+ * that, but don't let a malicious guest cause a livelock.
+ */
+ for (retries = 0; retries < 5; retries++) {
+ uint16_t new_flags;
+
+ /* Read the entry before an atomic operation on its flags */
+ gnt = *(volatile grant_entry_v1_t *)gnt_p;
+
+ if ((gnt.flags & mask) != GTF_permit_access ||
+ gnt.domid != DOMID_QEMU) {
+ return INVALID_GPA;
+ }
+
+ new_flags = gnt.flags | GTF_reading;
+ if (prot & PROT_WRITE) {
+ new_flags |= GTF_writing;
+ }
+
+ if (qatomic_cmpxchg(&gnt_p->flags, gnt.flags, new_flags) == gnt.flags) {
+ return (uint64_t)gnt.frame << XEN_PAGE_SHIFT;
+ }
+ }
+
+ return INVALID_GPA;
+}
+
+struct xengntdev_handle {
+ GHashTable *active_maps;
+};
+
+static int xen_be_gnttab_set_max_grants(struct xengntdev_handle *xgt,
+ uint32_t nr_grants)
+{
+ return 0;
+}
+
+static void *xen_be_gnttab_map_refs(struct xengntdev_handle *xgt,
+ uint32_t count, uint32_t domid,
+ uint32_t *refs, int prot)
+{
+ XenGnttabState *s = xen_gnttab_singleton;
+ struct active_ref *act;
+
+ if (!s) {
+ errno = ENOTSUP;
+ return NULL;
+ }
+
+ if (domid != xen_domid) {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ if (!count || count > 4096) {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ /*
+ * Making a contiguous mapping from potentially discontiguous grant
+ * references would be... distinctly non-trivial. We don't support it.
+ * Even changing the API to return an array of pointers, one per page,
+ * wouldn't be simple to use in PV backends because some structures
+ * actually cross page boundaries (e.g. 32-bit blkif_response ring
+ * entries are 12 bytes).
+ */
+ if (count != 1) {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ QEMU_LOCK_GUARD(&s->gnt_lock);
+
+ act = g_hash_table_lookup(xgt->active_maps, GINT_TO_POINTER(refs[0]));
+ if (act) {
+ if ((prot & PROT_WRITE) && !(act->prot & PROT_WRITE)) {
+ if (gnt_ref(s, refs[0], prot) == INVALID_GPA) {
+ return NULL;
+ }
+ act->prot |= PROT_WRITE;
+ }
+ act->refcnt++;
+ } else {
+ uint64_t gpa = gnt_ref(s, refs[0], prot);
+ if (gpa == INVALID_GPA) {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ act = g_new0(struct active_ref, 1);
+ act->prot = prot;
+ act->refcnt = 1;
+ act->mrs = memory_region_find(get_system_memory(), gpa, XEN_PAGE_SIZE);
+
+ if (act->mrs.mr &&
+ !int128_lt(act->mrs.size, int128_make64(XEN_PAGE_SIZE)) &&
+ memory_region_get_ram_addr(act->mrs.mr) != RAM_ADDR_INVALID) {
+ act->virtaddr = qemu_map_ram_ptr(act->mrs.mr->ram_block,
+ act->mrs.offset_within_region);
+ }
+ if (!act->virtaddr) {
+ gnt_unref(s, refs[0], &act->mrs, 0);
+ g_free(act);
+ errno = EINVAL;
+ return NULL;
+ }
+
+ s->map_track[refs[0]]++;
+ g_hash_table_insert(xgt->active_maps, GINT_TO_POINTER(refs[0]), act);
+ }
+
+ return act->virtaddr;
+}
+
+static gboolean do_unmap(gpointer key, gpointer value, gpointer user_data)
+{
+ XenGnttabState *s = user_data;
+ grant_ref_t gref = GPOINTER_TO_INT(key);
+ struct active_ref *act = value;
+
+ gnt_unref(s, gref, &act->mrs, act->prot);
+ g_free(act);
+ return true;
+}
+
+static int xen_be_gnttab_unmap(struct xengntdev_handle *xgt,
+ void *start_address, uint32_t *refs,
+ uint32_t count)
+{
+ XenGnttabState *s = xen_gnttab_singleton;
+ struct active_ref *act;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ if (count != 1) {
+ return -EINVAL;
+ }
+
+ QEMU_LOCK_GUARD(&s->gnt_lock);
+
+ act = g_hash_table_lookup(xgt->active_maps, GINT_TO_POINTER(refs[0]));
+ if (!act) {
+ return -ENOENT;
+ }
+
+ if (act->virtaddr != start_address) {
+ return -EINVAL;
+ }
+
+ if (!--act->refcnt) {
+ do_unmap(GINT_TO_POINTER(refs[0]), act, s);
+ g_hash_table_remove(xgt->active_maps, GINT_TO_POINTER(refs[0]));
+ }
+
+ return 0;
+}
+
+/*
+ * This looks a bit like the one for true Xen in xen-operations.c but
+ * in emulation we don't support multi-page mappings. And under Xen we
+ * *want* the multi-page mappings so we have fewer bounces through the
+ * kernel and the hypervisor. So the code paths end up being similar,
+ * but different.
+ */
+static int xen_be_gnttab_copy(struct xengntdev_handle *xgt, bool to_domain,
+ uint32_t domid, XenGrantCopySegment *segs,
+ uint32_t nr_segs, Error **errp)
+{
+ int prot = to_domain ? PROT_WRITE : PROT_READ;
+ unsigned int i;
+
+ for (i = 0; i < nr_segs; i++) {
+ XenGrantCopySegment *seg = &segs[i];
+ void *page;
+ uint32_t ref = to_domain ? seg->dest.foreign.ref :
+ seg->source.foreign.ref;
+
+ page = xen_be_gnttab_map_refs(xgt, 1, domid, &ref, prot);
+ if (!page) {
+ if (errp) {
+ error_setg_errno(errp, errno,
+ "xen_be_gnttab_map_refs failed");
+ }
+ return -errno;
+ }
+
+ if (to_domain) {
+ memcpy(page + seg->dest.foreign.offset, seg->source.virt,
+ seg->len);
+ } else {
+ memcpy(seg->dest.virt, page + seg->source.foreign.offset,
+ seg->len);
+ }
+
+ if (xen_be_gnttab_unmap(xgt, page, &ref, 1)) {
+ if (errp) {
+ error_setg_errno(errp, errno, "xen_be_gnttab_unmap failed");
+ }
+ return -errno;
+ }
+ }
+
+ return 0;
+}
+
+static struct xengntdev_handle *xen_be_gnttab_open(void)
+{
+ struct xengntdev_handle *xgt = g_new0(struct xengntdev_handle, 1);
+
+ xgt->active_maps = g_hash_table_new(g_direct_hash, g_direct_equal);
+ return xgt;
+}
+
+static int xen_be_gnttab_close(struct xengntdev_handle *xgt)
+{
+ XenGnttabState *s = xen_gnttab_singleton;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ g_hash_table_foreach_remove(xgt->active_maps, do_unmap, s);
+ g_hash_table_destroy(xgt->active_maps);
+ g_free(xgt);
+ return 0;
+}
+
+static struct gnttab_backend_ops emu_gnttab_backend_ops = {
+ .open = xen_be_gnttab_open,
+ .close = xen_be_gnttab_close,
+ .grant_copy = xen_be_gnttab_copy,
+ .set_max_grants = xen_be_gnttab_set_max_grants,
+ .map_refs = xen_be_gnttab_map_refs,
+ .unmap = xen_be_gnttab_unmap,
+};
+
+int xen_gnttab_reset(void)
+{
+ XenGnttabState *s = xen_gnttab_singleton;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ QEMU_LOCK_GUARD(&s->gnt_lock);
+
+ s->nr_frames = 0;
+
+ memset(s->entries.v1, 0, XEN_PAGE_SIZE * s->max_frames);
+
+ s->entries.v1[GNTTAB_RESERVED_XENSTORE].flags = GTF_permit_access;
+ s->entries.v1[GNTTAB_RESERVED_XENSTORE].frame = XEN_SPECIAL_PFN(XENSTORE);
+
+ memset(s->map_track, 0, s->max_frames * ENTRIES_PER_FRAME_V1);
+
+ return 0;
+}
diff --git a/hw/i386/kvm/xen_gnttab.h b/hw/i386/kvm/xen_gnttab.h
index 3bdbe96..ee21523 100644
--- a/hw/i386/kvm/xen_gnttab.h
+++ b/hw/i386/kvm/xen_gnttab.h
@@ -13,6 +13,7 @@
#define QEMU_XEN_GNTTAB_H
void xen_gnttab_create(void);
+int xen_gnttab_reset(void);
int xen_gnttab_map_page(uint64_t idx, uint64_t gfn);
struct gnttab_set_version;
diff --git a/hw/i386/kvm/xen_xenstore.c b/hw/i386/kvm/xen_xenstore.c
index 14193ef..900679a 100644
--- a/hw/i386/kvm/xen_xenstore.c
+++ b/hw/i386/kvm/xen_xenstore.c
@@ -15,12 +15,14 @@
#include "qemu/module.h"
#include "qemu/main-loop.h"
#include "qemu/cutils.h"
+#include "qemu/error-report.h"
#include "qapi/error.h"
#include "qom/object.h"
#include "migration/vmstate.h"
#include "hw/sysbus.h"
#include "hw/xen/xen.h"
+#include "hw/xen/xen_backend_ops.h"
#include "xen_overlay.h"
#include "xen_evtchn.h"
#include "xen_xenstore.h"
@@ -28,15 +30,17 @@
#include "sysemu/kvm.h"
#include "sysemu/kvm_xen.h"
+#include "trace.h"
+
+#include "xenstore_impl.h"
+
#include "hw/xen/interface/io/xs_wire.h"
#include "hw/xen/interface/event_channel.h"
+#include "hw/xen/interface/grant_table.h"
#define TYPE_XEN_XENSTORE "xen-xenstore"
OBJECT_DECLARE_SIMPLE_TYPE(XenXenstoreState, XEN_XENSTORE)
-#define XEN_PAGE_SHIFT 12
-#define XEN_PAGE_SIZE (1ULL << XEN_PAGE_SHIFT)
-
#define ENTRIES_PER_FRAME_V1 (XEN_PAGE_SIZE / sizeof(grant_entry_v1_t))
#define ENTRIES_PER_FRAME_V2 (XEN_PAGE_SIZE / sizeof(grant_entry_v2_t))
@@ -47,6 +51,9 @@
SysBusDevice busdev;
/*< public >*/
+ XenstoreImplState *impl;
+ GList *watch_events; /* for the guest */
+
MemoryRegion xenstore_page;
struct xenstore_domain_interface *xs;
uint8_t req_data[XENSTORE_HEADER_SIZE + XENSTORE_PAYLOAD_MAX];
@@ -59,15 +66,54 @@
evtchn_port_t guest_port;
evtchn_port_t be_port;
struct xenevtchn_handle *eh;
+
+ uint8_t *impl_state;
+ uint32_t impl_state_size;
+
+ struct xengntdev_handle *gt;
+ void *granted_xs;
};
struct XenXenstoreState *xen_xenstore_singleton;
static void xen_xenstore_event(void *opaque);
+static void fire_watch_cb(void *opaque, const char *path, const char *token);
+
+static struct xenstore_backend_ops emu_xenstore_backend_ops;
+
+static void G_GNUC_PRINTF (4, 5) relpath_printf(XenXenstoreState *s,
+ GList *perms,
+ const char *relpath,
+ const char *fmt, ...)
+{
+ gchar *abspath;
+ gchar *value;
+ va_list args;
+ GByteArray *data;
+ int err;
+
+ abspath = g_strdup_printf("/local/domain/%u/%s", xen_domid, relpath);
+ va_start(args, fmt);
+ value = g_strdup_vprintf(fmt, args);
+ va_end(args);
+
+ data = g_byte_array_new_take((void *)value, strlen(value));
+
+ err = xs_impl_write(s->impl, DOMID_QEMU, XBT_NULL, abspath, data);
+ assert(!err);
+
+ g_byte_array_unref(data);
+
+ err = xs_impl_set_perms(s->impl, DOMID_QEMU, XBT_NULL, abspath, perms);
+ assert(!err);
+
+ g_free(abspath);
+}
static void xen_xenstore_realize(DeviceState *dev, Error **errp)
{
XenXenstoreState *s = XEN_XENSTORE(dev);
+ GList *perms;
if (xen_mode != XEN_EMULATE) {
error_setg(errp, "Xen xenstore support is for Xen emulation");
@@ -89,6 +135,50 @@
}
aio_set_fd_handler(qemu_get_aio_context(), xen_be_evtchn_fd(s->eh), true,
xen_xenstore_event, NULL, NULL, NULL, s);
+
+ s->impl = xs_impl_create(xen_domid);
+
+ /* Populate the default nodes */
+
+ /* Nodes owned by 'dom0' but readable by the guest */
+ perms = g_list_append(NULL, xs_perm_as_string(XS_PERM_NONE, DOMID_QEMU));
+ perms = g_list_append(perms, xs_perm_as_string(XS_PERM_READ, xen_domid));
+
+ relpath_printf(s, perms, "", "%s", "");
+
+ relpath_printf(s, perms, "domid", "%u", xen_domid);
+
+ relpath_printf(s, perms, "control/platform-feature-xs_reset_watches", "%u", 1);
+ relpath_printf(s, perms, "control/platform-feature-multiprocessor-suspend", "%u", 1);
+
+ relpath_printf(s, perms, "platform/acpi", "%u", 1);
+ relpath_printf(s, perms, "platform/acpi_s3", "%u", 1);
+ relpath_printf(s, perms, "platform/acpi_s4", "%u", 1);
+ relpath_printf(s, perms, "platform/acpi_laptop_slate", "%u", 0);
+
+ g_list_free_full(perms, g_free);
+
+ /* Nodes owned by the guest */
+ perms = g_list_append(NULL, xs_perm_as_string(XS_PERM_NONE, xen_domid));
+
+ relpath_printf(s, perms, "attr", "%s", "");
+
+ relpath_printf(s, perms, "control/shutdown", "%s", "");
+ relpath_printf(s, perms, "control/feature-poweroff", "%u", 1);
+ relpath_printf(s, perms, "control/feature-reboot", "%u", 1);
+ relpath_printf(s, perms, "control/feature-suspend", "%u", 1);
+ relpath_printf(s, perms, "control/feature-s3", "%u", 1);
+ relpath_printf(s, perms, "control/feature-s4", "%u", 1);
+
+ relpath_printf(s, perms, "data", "%s", "");
+ relpath_printf(s, perms, "device", "%s", "");
+ relpath_printf(s, perms, "drivers", "%s", "");
+ relpath_printf(s, perms, "error", "%s", "");
+ relpath_printf(s, perms, "feature", "%s", "");
+
+ g_list_free_full(perms, g_free);
+
+ xen_xenstore_ops = &emu_xenstore_backend_ops;
}
static bool xen_xenstore_is_needed(void *opaque)
@@ -99,16 +189,26 @@
static int xen_xenstore_pre_save(void *opaque)
{
XenXenstoreState *s = opaque;
+ GByteArray *save;
if (s->eh) {
s->guest_port = xen_be_evtchn_get_guest_port(s->eh);
}
+
+ g_free(s->impl_state);
+ save = xs_impl_serialize(s->impl);
+ s->impl_state = save->data;
+ s->impl_state_size = save->len;
+ g_byte_array_free(save, false);
+
return 0;
}
static int xen_xenstore_post_load(void *opaque, int ver)
{
XenXenstoreState *s = opaque;
+ GByteArray *save;
+ int ret;
/*
* As qemu/dom0, rebind to the guest's port. The Windows drivers may
@@ -125,11 +225,18 @@
}
s->be_port = be_port;
}
- return 0;
+
+ save = g_byte_array_new_take(s->impl_state, s->impl_state_size);
+ s->impl_state = NULL;
+ s->impl_state_size = 0;
+
+ ret = xs_impl_deserialize(s->impl, save, xen_domid, fire_watch_cb, s);
+ return ret;
}
static const VMStateDescription xen_xenstore_vmstate = {
.name = "xen_xenstore",
+ .unmigratable = 1, /* The PV back ends don't migrate yet */
.version_id = 1,
.minimum_version_id = 1,
.needed = xen_xenstore_is_needed,
@@ -145,6 +252,10 @@
VMSTATE_BOOL(rsp_pending, XenXenstoreState),
VMSTATE_UINT32(guest_port, XenXenstoreState),
VMSTATE_BOOL(fatal_error, XenXenstoreState),
+ VMSTATE_UINT32(impl_state_size, XenXenstoreState),
+ VMSTATE_VARRAY_UINT32_ALLOC(impl_state, XenXenstoreState,
+ impl_state_size, 0,
+ vmstate_info_uint8, uint8_t),
VMSTATE_END_OF_LIST()
}
};
@@ -213,20 +324,761 @@
s->rsp_offset = 0;
}
+static void xs_error(XenXenstoreState *s, unsigned int id,
+ xs_transaction_t tx_id, int errnum)
+{
+ struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
+ const char *errstr = NULL;
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(xsd_errors); i++) {
+ struct xsd_errors *xsd_error = &xsd_errors[i];
+
+ if (xsd_error->errnum == errnum) {
+ errstr = xsd_error->errstring;
+ break;
+ }
+ }
+ assert(errstr);
+
+ trace_xenstore_error(id, tx_id, errstr);
+
+ rsp->type = XS_ERROR;
+ rsp->req_id = id;
+ rsp->tx_id = tx_id;
+ rsp->len = (uint32_t)strlen(errstr) + 1;
+
+ memcpy(&rsp[1], errstr, rsp->len);
+}
+
+static void xs_ok(XenXenstoreState *s, unsigned int type, unsigned int req_id,
+ xs_transaction_t tx_id)
+{
+ struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
+ const char *okstr = "OK";
+
+ rsp->type = type;
+ rsp->req_id = req_id;
+ rsp->tx_id = tx_id;
+ rsp->len = (uint32_t)strlen(okstr) + 1;
+
+ memcpy(&rsp[1], okstr, rsp->len);
+}
+
+/*
+ * The correct request and response formats are documented in xen.git:
+ * docs/misc/xenstore.txt. A summary is given below for convenience.
+ * The '|' symbol represents a NUL character.
+ *
+ * ---------- Database read, write and permissions operations ----------
+ *
+ * READ <path>| <value|>
+ * WRITE <path>|<value|>
+ * Store and read the octet string <value> at <path>.
+ * WRITE creates any missing parent paths, with empty values.
+ *
+ * MKDIR <path>|
+ * Ensures that the <path> exists, by necessary by creating
+ * it and any missing parents with empty values. If <path>
+ * or any parent already exists, its value is left unchanged.
+ *
+ * RM <path>|
+ * Ensures that the <path> does not exist, by deleting
+ * it and all of its children. It is not an error if <path> does
+ * not exist, but it _is_ an error if <path>'s immediate parent
+ * does not exist either.
+ *
+ * DIRECTORY <path>| <child-leaf-name>|*
+ * Gives a list of the immediate children of <path>, as only the
+ * leafnames. The resulting children are each named
+ * <path>/<child-leaf-name>.
+ *
+ * DIRECTORY_PART <path>|<offset> <gencnt>|<child-leaf-name>|*
+ * Same as DIRECTORY, but to be used for children lists longer than
+ * XENSTORE_PAYLOAD_MAX. Input are <path> and the byte offset into
+ * the list of children to return. Return values are the generation
+ * count <gencnt> of the node (to be used to ensure the node hasn't
+ * changed between two reads: <gencnt> being the same for multiple
+ * reads guarantees the node hasn't changed) and the list of children
+ * starting at the specified <offset> of the complete list.
+ *
+ * GET_PERMS <path>| <perm-as-string>|+
+ * SET_PERMS <path>|<perm-as-string>|+?
+ * <perm-as-string> is one of the following
+ * w<domid> write only
+ * r<domid> read only
+ * b<domid> both read and write
+ * n<domid> no access
+ * See https://wiki.xen.org/wiki/XenBus section
+ * `Permissions' for details of the permissions system.
+ * It is possible to set permissions for the special watch paths
+ * "@introduceDomain" and "@releaseDomain" to enable receiving those
+ * watches in unprivileged domains.
+ *
+ * ---------- Watches ----------
+ *
+ * WATCH <wpath>|<token>|?
+ * Adds a watch.
+ *
+ * When a <path> is modified (including path creation, removal,
+ * contents change or permissions change) this generates an event
+ * on the changed <path>. Changes made in transactions cause an
+ * event only if and when committed. Each occurring event is
+ * matched against all the watches currently set up, and each
+ * matching watch results in a WATCH_EVENT message (see below).
+ *
+ * The event's path matches the watch's <wpath> if it is an child
+ * of <wpath>.
+ *
+ * <wpath> can be a <path> to watch or @<wspecial>. In the
+ * latter case <wspecial> may have any syntax but it matches
+ * (according to the rules above) only the following special
+ * events which are invented by xenstored:
+ * @introduceDomain occurs on INTRODUCE
+ * @releaseDomain occurs on any domain crash or
+ * shutdown, and also on RELEASE
+ * and domain destruction
+ * <wspecial> events are sent to privileged callers or explicitly
+ * via SET_PERMS enabled domains only.
+ *
+ * When a watch is first set up it is triggered once straight
+ * away, with <path> equal to <wpath>. Watches may be triggered
+ * spuriously. The tx_id in a WATCH request is ignored.
+ *
+ * Watches are supposed to be restricted by the permissions
+ * system but in practice the implementation is imperfect.
+ * Applications should not rely on being sent a notification for
+ * paths that they cannot read; however, an application may rely
+ * on being sent a watch when a path which it _is_ able to read
+ * is deleted even if that leaves only a nonexistent unreadable
+ * parent. A notification may omitted if a node's permissions
+ * are changed so as to make it unreadable, in which case future
+ * notifications may be suppressed (and if the node is later made
+ * readable, some notifications may have been lost).
+ *
+ * WATCH_EVENT <epath>|<token>|
+ * Unsolicited `reply' generated for matching modification events
+ * as described above. req_id and tx_id are both 0.
+ *
+ * <epath> is the event's path, ie the actual path that was
+ * modified; however if the event was the recursive removal of an
+ * parent of <wpath>, <epath> is just
+ * <wpath> (rather than the actual path which was removed). So
+ * <epath> is a child of <wpath>, regardless.
+ *
+ * Iff <wpath> for the watch was specified as a relative pathname,
+ * the <epath> path will also be relative (with the same base,
+ * obviously).
+ *
+ * UNWATCH <wpath>|<token>|?
+ *
+ * RESET_WATCHES |
+ * Reset all watches and transactions of the caller.
+ *
+ * ---------- Transactions ----------
+ *
+ * TRANSACTION_START | <transid>|
+ * <transid> is an opaque uint32_t allocated by xenstored
+ * represented as unsigned decimal. After this, transaction may
+ * be referenced by using <transid> (as 32-bit binary) in the
+ * tx_id request header field. When transaction is started whole
+ * db is copied; reads and writes happen on the copy.
+ * It is not legal to send non-0 tx_id in TRANSACTION_START.
+ *
+ * TRANSACTION_END T|
+ * TRANSACTION_END F|
+ * tx_id must refer to existing transaction. After this
+ * request the tx_id is no longer valid and may be reused by
+ * xenstore. If F, the transaction is discarded. If T,
+ * it is committed: if there were any other intervening writes
+ * then our END gets get EAGAIN.
+ *
+ * The plan is that in the future only intervening `conflicting'
+ * writes cause EAGAIN, meaning only writes or other commits
+ * which changed paths which were read or written in the
+ * transaction at hand.
+ *
+ */
+
+static void xs_read(XenXenstoreState *s, unsigned int req_id,
+ xs_transaction_t tx_id, uint8_t *req_data, unsigned int len)
+{
+ const char *path = (const char *)req_data;
+ struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
+ uint8_t *rsp_data = (uint8_t *)&rsp[1];
+ g_autoptr(GByteArray) data = g_byte_array_new();
+ int err;
+
+ if (len == 0 || req_data[len - 1] != '\0') {
+ xs_error(s, req_id, tx_id, EINVAL);
+ return;
+ }
+
+ trace_xenstore_read(tx_id, path);
+ err = xs_impl_read(s->impl, xen_domid, tx_id, path, data);
+ if (err) {
+ xs_error(s, req_id, tx_id, err);
+ return;
+ }
+
+ rsp->type = XS_READ;
+ rsp->req_id = req_id;
+ rsp->tx_id = tx_id;
+ rsp->len = 0;
+
+ len = data->len;
+ if (len > XENSTORE_PAYLOAD_MAX) {
+ xs_error(s, req_id, tx_id, E2BIG);
+ return;
+ }
+
+ memcpy(&rsp_data[rsp->len], data->data, len);
+ rsp->len += len;
+}
+
+static void xs_write(XenXenstoreState *s, unsigned int req_id,
+ xs_transaction_t tx_id, uint8_t *req_data,
+ unsigned int len)
+{
+ g_autoptr(GByteArray) data = g_byte_array_new();
+ const char *path;
+ int err;
+
+ if (len == 0) {
+ xs_error(s, req_id, tx_id, EINVAL);
+ return;
+ }
+
+ path = (const char *)req_data;
+
+ while (len--) {
+ if (*req_data++ == '\0') {
+ break;
+ }
+ if (len == 0) {
+ xs_error(s, req_id, tx_id, EINVAL);
+ return;
+ }
+ }
+
+ g_byte_array_append(data, req_data, len);
+
+ trace_xenstore_write(tx_id, path);
+ err = xs_impl_write(s->impl, xen_domid, tx_id, path, data);
+ if (err) {
+ xs_error(s, req_id, tx_id, err);
+ return;
+ }
+
+ xs_ok(s, XS_WRITE, req_id, tx_id);
+}
+
+static void xs_mkdir(XenXenstoreState *s, unsigned int req_id,
+ xs_transaction_t tx_id, uint8_t *req_data,
+ unsigned int len)
+{
+ g_autoptr(GByteArray) data = g_byte_array_new();
+ const char *path;
+ int err;
+
+ if (len == 0 || req_data[len - 1] != '\0') {
+ xs_error(s, req_id, tx_id, EINVAL);
+ return;
+ }
+
+ path = (const char *)req_data;
+
+ trace_xenstore_mkdir(tx_id, path);
+ err = xs_impl_read(s->impl, xen_domid, tx_id, path, data);
+ if (err == ENOENT) {
+ err = xs_impl_write(s->impl, xen_domid, tx_id, path, data);
+ }
+
+ if (!err) {
+ xs_error(s, req_id, tx_id, err);
+ return;
+ }
+
+ xs_ok(s, XS_MKDIR, req_id, tx_id);
+}
+
+static void xs_append_strings(XenXenstoreState *s, struct xsd_sockmsg *rsp,
+ GList *strings, unsigned int start, bool truncate)
+{
+ uint8_t *rsp_data = (uint8_t *)&rsp[1];
+ GList *l;
+
+ for (l = strings; l; l = l->next) {
+ size_t len = strlen(l->data) + 1; /* Including the NUL termination */
+ char *str = l->data;
+
+ if (rsp->len + len > XENSTORE_PAYLOAD_MAX) {
+ if (truncate) {
+ len = XENSTORE_PAYLOAD_MAX - rsp->len;
+ if (!len) {
+ return;
+ }
+ } else {
+ xs_error(s, rsp->req_id, rsp->tx_id, E2BIG);
+ return;
+ }
+ }
+
+ if (start) {
+ if (start >= len) {
+ start -= len;
+ continue;
+ }
+
+ str += start;
+ len -= start;
+ start = 0;
+ }
+
+ memcpy(&rsp_data[rsp->len], str, len);
+ rsp->len += len;
+ }
+ /* XS_DIRECTORY_PART wants an extra NUL to indicate the end */
+ if (truncate && rsp->len < XENSTORE_PAYLOAD_MAX) {
+ rsp_data[rsp->len++] = '\0';
+ }
+}
+
+static void xs_directory(XenXenstoreState *s, unsigned int req_id,
+ xs_transaction_t tx_id, uint8_t *req_data,
+ unsigned int len)
+{
+ struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
+ GList *items = NULL;
+ const char *path;
+ int err;
+
+ if (len == 0 || req_data[len - 1] != '\0') {
+ xs_error(s, req_id, tx_id, EINVAL);
+ return;
+ }
+
+ path = (const char *)req_data;
+
+ trace_xenstore_directory(tx_id, path);
+ err = xs_impl_directory(s->impl, xen_domid, tx_id, path, NULL, &items);
+ if (err != 0) {
+ xs_error(s, req_id, tx_id, err);
+ return;
+ }
+
+ rsp->type = XS_DIRECTORY;
+ rsp->req_id = req_id;
+ rsp->tx_id = tx_id;
+ rsp->len = 0;
+
+ xs_append_strings(s, rsp, items, 0, false);
+
+ g_list_free_full(items, g_free);
+}
+
+static void xs_directory_part(XenXenstoreState *s, unsigned int req_id,
+ xs_transaction_t tx_id, uint8_t *req_data,
+ unsigned int len)
+{
+ const char *offset_str, *path = (const char *)req_data;
+ struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
+ char *rsp_data = (char *)&rsp[1];
+ uint64_t gencnt = 0;
+ unsigned int offset;
+ GList *items = NULL;
+ int err;
+
+ if (len == 0) {
+ xs_error(s, req_id, tx_id, EINVAL);
+ return;
+ }
+
+ while (len--) {
+ if (*req_data++ == '\0') {
+ break;
+ }
+ if (len == 0) {
+ xs_error(s, req_id, tx_id, EINVAL);
+ return;
+ }
+ }
+
+ offset_str = (const char *)req_data;
+ while (len--) {
+ if (*req_data++ == '\0') {
+ break;
+ }
+ if (len == 0) {
+ xs_error(s, req_id, tx_id, EINVAL);
+ return;
+ }
+ }
+
+ if (len) {
+ xs_error(s, req_id, tx_id, EINVAL);
+ return;
+ }
+
+ if (qemu_strtoui(offset_str, NULL, 10, &offset) < 0) {
+ xs_error(s, req_id, tx_id, EINVAL);
+ return;
+ }
+
+ trace_xenstore_directory_part(tx_id, path, offset);
+ err = xs_impl_directory(s->impl, xen_domid, tx_id, path, &gencnt, &items);
+ if (err != 0) {
+ xs_error(s, req_id, tx_id, err);
+ return;
+ }
+
+ rsp->type = XS_DIRECTORY_PART;
+ rsp->req_id = req_id;
+ rsp->tx_id = tx_id;
+ rsp->len = snprintf(rsp_data, XENSTORE_PAYLOAD_MAX, "%" PRIu64, gencnt) + 1;
+
+ xs_append_strings(s, rsp, items, offset, true);
+
+ g_list_free_full(items, g_free);
+}
+
+static void xs_transaction_start(XenXenstoreState *s, unsigned int req_id,
+ xs_transaction_t tx_id, uint8_t *req_data,
+ unsigned int len)
+{
+ struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
+ char *rsp_data = (char *)&rsp[1];
+ int err;
+
+ if (len != 1 || req_data[0] != '\0') {
+ xs_error(s, req_id, tx_id, EINVAL);
+ return;
+ }
+
+ rsp->type = XS_TRANSACTION_START;
+ rsp->req_id = req_id;
+ rsp->tx_id = tx_id;
+ rsp->len = 0;
+
+ err = xs_impl_transaction_start(s->impl, xen_domid, &tx_id);
+ if (err) {
+ xs_error(s, req_id, tx_id, err);
+ return;
+ }
+
+ trace_xenstore_transaction_start(tx_id);
+
+ rsp->len = snprintf(rsp_data, XENSTORE_PAYLOAD_MAX, "%u", tx_id);
+ assert(rsp->len < XENSTORE_PAYLOAD_MAX);
+ rsp->len++;
+}
+
+static void xs_transaction_end(XenXenstoreState *s, unsigned int req_id,
+ xs_transaction_t tx_id, uint8_t *req_data,
+ unsigned int len)
+{
+ bool commit;
+ int err;
+
+ if (len != 2 || req_data[1] != '\0') {
+ xs_error(s, req_id, tx_id, EINVAL);
+ return;
+ }
+
+ switch (req_data[0]) {
+ case 'T':
+ commit = true;
+ break;
+ case 'F':
+ commit = false;
+ break;
+ default:
+ xs_error(s, req_id, tx_id, EINVAL);
+ return;
+ }
+
+ trace_xenstore_transaction_end(tx_id, commit);
+ err = xs_impl_transaction_end(s->impl, xen_domid, tx_id, commit);
+ if (err) {
+ xs_error(s, req_id, tx_id, err);
+ return;
+ }
+
+ xs_ok(s, XS_TRANSACTION_END, req_id, tx_id);
+}
+
+static void xs_rm(XenXenstoreState *s, unsigned int req_id,
+ xs_transaction_t tx_id, uint8_t *req_data, unsigned int len)
+{
+ const char *path = (const char *)req_data;
+ int err;
+
+ if (len == 0 || req_data[len - 1] != '\0') {
+ xs_error(s, req_id, tx_id, EINVAL);
+ return;
+ }
+
+ trace_xenstore_rm(tx_id, path);
+ err = xs_impl_rm(s->impl, xen_domid, tx_id, path);
+ if (err) {
+ xs_error(s, req_id, tx_id, err);
+ return;
+ }
+
+ xs_ok(s, XS_RM, req_id, tx_id);
+}
+
+static void xs_get_perms(XenXenstoreState *s, unsigned int req_id,
+ xs_transaction_t tx_id, uint8_t *req_data,
+ unsigned int len)
+{
+ const char *path = (const char *)req_data;
+ struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
+ GList *perms = NULL;
+ int err;
+
+ if (len == 0 || req_data[len - 1] != '\0') {
+ xs_error(s, req_id, tx_id, EINVAL);
+ return;
+ }
+
+ trace_xenstore_get_perms(tx_id, path);
+ err = xs_impl_get_perms(s->impl, xen_domid, tx_id, path, &perms);
+ if (err) {
+ xs_error(s, req_id, tx_id, err);
+ return;
+ }
+
+ rsp->type = XS_GET_PERMS;
+ rsp->req_id = req_id;
+ rsp->tx_id = tx_id;
+ rsp->len = 0;
+
+ xs_append_strings(s, rsp, perms, 0, false);
+
+ g_list_free_full(perms, g_free);
+}
+
+static void xs_set_perms(XenXenstoreState *s, unsigned int req_id,
+ xs_transaction_t tx_id, uint8_t *req_data,
+ unsigned int len)
+{
+ const char *path = (const char *)req_data;
+ uint8_t *perm;
+ GList *perms = NULL;
+ int err;
+
+ if (len == 0) {
+ xs_error(s, req_id, tx_id, EINVAL);
+ return;
+ }
+
+ while (len--) {
+ if (*req_data++ == '\0') {
+ break;
+ }
+ if (len == 0) {
+ xs_error(s, req_id, tx_id, EINVAL);
+ return;
+ }
+ }
+
+ perm = req_data;
+ while (len--) {
+ if (*req_data++ == '\0') {
+ perms = g_list_append(perms, perm);
+ perm = req_data;
+ }
+ }
+
+ /*
+ * Note that there may be trailing garbage at the end of the buffer.
+ * This is explicitly permitted by the '?' at the end of the definition:
+ *
+ * SET_PERMS <path>|<perm-as-string>|+?
+ */
+
+ trace_xenstore_set_perms(tx_id, path);
+ err = xs_impl_set_perms(s->impl, xen_domid, tx_id, path, perms);
+ g_list_free(perms);
+ if (err) {
+ xs_error(s, req_id, tx_id, err);
+ return;
+ }
+
+ xs_ok(s, XS_SET_PERMS, req_id, tx_id);
+}
+
+static void xs_watch(XenXenstoreState *s, unsigned int req_id,
+ xs_transaction_t tx_id, uint8_t *req_data,
+ unsigned int len)
+{
+ const char *token, *path = (const char *)req_data;
+ int err;
+
+ if (len == 0) {
+ xs_error(s, req_id, tx_id, EINVAL);
+ return;
+ }
+
+ while (len--) {
+ if (*req_data++ == '\0') {
+ break;
+ }
+ if (len == 0) {
+ xs_error(s, req_id, tx_id, EINVAL);
+ return;
+ }
+ }
+
+ token = (const char *)req_data;
+ while (len--) {
+ if (*req_data++ == '\0') {
+ break;
+ }
+ if (len == 0) {
+ xs_error(s, req_id, tx_id, EINVAL);
+ return;
+ }
+ }
+
+ /*
+ * Note that there may be trailing garbage at the end of the buffer.
+ * This is explicitly permitted by the '?' at the end of the definition:
+ *
+ * WATCH <wpath>|<token>|?
+ */
+
+ trace_xenstore_watch(path, token);
+ err = xs_impl_watch(s->impl, xen_domid, path, token, fire_watch_cb, s);
+ if (err) {
+ xs_error(s, req_id, tx_id, err);
+ return;
+ }
+
+ xs_ok(s, XS_WATCH, req_id, tx_id);
+}
+
+static void xs_unwatch(XenXenstoreState *s, unsigned int req_id,
+ xs_transaction_t tx_id, uint8_t *req_data,
+ unsigned int len)
+{
+ const char *token, *path = (const char *)req_data;
+ int err;
+
+ if (len == 0) {
+ xs_error(s, req_id, tx_id, EINVAL);
+ return;
+ }
+
+ while (len--) {
+ if (*req_data++ == '\0') {
+ break;
+ }
+ if (len == 0) {
+ xs_error(s, req_id, tx_id, EINVAL);
+ return;
+ }
+ }
+
+ token = (const char *)req_data;
+ while (len--) {
+ if (*req_data++ == '\0') {
+ break;
+ }
+ if (len == 0) {
+ xs_error(s, req_id, tx_id, EINVAL);
+ return;
+ }
+ }
+
+ trace_xenstore_unwatch(path, token);
+ err = xs_impl_unwatch(s->impl, xen_domid, path, token, fire_watch_cb, s);
+ if (err) {
+ xs_error(s, req_id, tx_id, err);
+ return;
+ }
+
+ xs_ok(s, XS_UNWATCH, req_id, tx_id);
+}
+
+static void xs_reset_watches(XenXenstoreState *s, unsigned int req_id,
+ xs_transaction_t tx_id, uint8_t *req_data,
+ unsigned int len)
+{
+ if (len == 0 || req_data[len - 1] != '\0') {
+ xs_error(s, req_id, tx_id, EINVAL);
+ return;
+ }
+
+ trace_xenstore_reset_watches();
+ xs_impl_reset_watches(s->impl, xen_domid);
+
+ xs_ok(s, XS_RESET_WATCHES, req_id, tx_id);
+}
+
+static void xs_priv(XenXenstoreState *s, unsigned int req_id,
+ xs_transaction_t tx_id, uint8_t *data,
+ unsigned int len)
+{
+ xs_error(s, req_id, tx_id, EACCES);
+}
+
+static void xs_unimpl(XenXenstoreState *s, unsigned int req_id,
+ xs_transaction_t tx_id, uint8_t *data,
+ unsigned int len)
+{
+ xs_error(s, req_id, tx_id, ENOSYS);
+}
+
+typedef void (*xs_impl)(XenXenstoreState *s, unsigned int req_id,
+ xs_transaction_t tx_id, uint8_t *data,
+ unsigned int len);
+
+struct xsd_req {
+ const char *name;
+ xs_impl fn;
+};
+#define XSD_REQ(_type, _fn) \
+ [_type] = { .name = #_type, .fn = _fn }
+
+struct xsd_req xsd_reqs[] = {
+ XSD_REQ(XS_READ, xs_read),
+ XSD_REQ(XS_WRITE, xs_write),
+ XSD_REQ(XS_MKDIR, xs_mkdir),
+ XSD_REQ(XS_DIRECTORY, xs_directory),
+ XSD_REQ(XS_DIRECTORY_PART, xs_directory_part),
+ XSD_REQ(XS_TRANSACTION_START, xs_transaction_start),
+ XSD_REQ(XS_TRANSACTION_END, xs_transaction_end),
+ XSD_REQ(XS_RM, xs_rm),
+ XSD_REQ(XS_GET_PERMS, xs_get_perms),
+ XSD_REQ(XS_SET_PERMS, xs_set_perms),
+ XSD_REQ(XS_WATCH, xs_watch),
+ XSD_REQ(XS_UNWATCH, xs_unwatch),
+ XSD_REQ(XS_CONTROL, xs_priv),
+ XSD_REQ(XS_INTRODUCE, xs_priv),
+ XSD_REQ(XS_RELEASE, xs_priv),
+ XSD_REQ(XS_IS_DOMAIN_INTRODUCED, xs_priv),
+ XSD_REQ(XS_RESUME, xs_priv),
+ XSD_REQ(XS_SET_TARGET, xs_priv),
+ XSD_REQ(XS_RESET_WATCHES, xs_reset_watches),
+};
+
static void process_req(XenXenstoreState *s)
{
struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data;
- struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
- const char enosys[] = "ENOSYS";
+ xs_impl handler = NULL;
assert(req_pending(s));
assert(!s->rsp_pending);
- rsp->type = XS_ERROR;
- rsp->req_id = req->req_id;
- rsp->tx_id = req->tx_id;
- rsp->len = sizeof(enosys);
- memcpy((void *)&rsp[1], enosys, sizeof(enosys));
+ if (req->type < ARRAY_SIZE(xsd_reqs)) {
+ handler = xsd_reqs[req->type].fn;
+ }
+ if (!handler) {
+ handler = &xs_unimpl;
+ }
+
+ handler(s, req->req_id, req->tx_id, (uint8_t *)&req[1], req->len);
s->rsp_pending = true;
reset_req(s);
@@ -415,6 +1267,113 @@
return copylen;
}
+static void deliver_watch(XenXenstoreState *s, const char *path,
+ const char *token)
+{
+ struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
+ uint8_t *rsp_data = (uint8_t *)&rsp[1];
+ unsigned int len;
+
+ assert(!s->rsp_pending);
+
+ trace_xenstore_watch_event(path, token);
+
+ rsp->type = XS_WATCH_EVENT;
+ rsp->req_id = 0;
+ rsp->tx_id = 0;
+ rsp->len = 0;
+
+ len = strlen(path);
+
+ /* XENSTORE_ABS/REL_PATH_MAX should ensure there can be no overflow */
+ assert(rsp->len + len < XENSTORE_PAYLOAD_MAX);
+
+ memcpy(&rsp_data[rsp->len], path, len);
+ rsp->len += len;
+ rsp_data[rsp->len] = '\0';
+ rsp->len++;
+
+ len = strlen(token);
+ /*
+ * It is possible for the guest to have chosen a token that will
+ * not fit (along with the patch) into a watch event. We have no
+ * choice but to drop the event if this is the case.
+ */
+ if (rsp->len + len >= XENSTORE_PAYLOAD_MAX) {
+ return;
+ }
+
+ memcpy(&rsp_data[rsp->len], token, len);
+ rsp->len += len;
+ rsp_data[rsp->len] = '\0';
+ rsp->len++;
+
+ s->rsp_pending = true;
+}
+
+struct watch_event {
+ char *path;
+ char *token;
+};
+
+static void free_watch_event(struct watch_event *ev)
+{
+ if (ev) {
+ g_free(ev->path);
+ g_free(ev->token);
+ g_free(ev);
+ }
+}
+
+static void queue_watch(XenXenstoreState *s, const char *path,
+ const char *token)
+{
+ struct watch_event *ev = g_new0(struct watch_event, 1);
+
+ ev->path = g_strdup(path);
+ ev->token = g_strdup(token);
+
+ s->watch_events = g_list_append(s->watch_events, ev);
+}
+
+static void fire_watch_cb(void *opaque, const char *path, const char *token)
+{
+ XenXenstoreState *s = opaque;
+
+ assert(qemu_mutex_iothread_locked());
+
+ /*
+ * If there's a response pending, we obviously can't scribble over
+ * it. But if there's a request pending, it has dibs on the buffer
+ * too.
+ *
+ * In the common case of a watch firing due to backend activity
+ * when the ring was otherwise idle, we should be able to copy the
+ * strings directly into the rsp_data and thence the actual ring,
+ * without needing to perform any allocations and queue them.
+ */
+ if (s->rsp_pending || req_pending(s)) {
+ queue_watch(s, path, token);
+ } else {
+ deliver_watch(s, path, token);
+ /*
+ * If the message was queued because there was already ring activity,
+ * no need to wake the guest. But if not, we need to send the evtchn.
+ */
+ xen_be_evtchn_notify(s->eh, s->be_port);
+ }
+}
+
+static void process_watch_events(XenXenstoreState *s)
+{
+ struct watch_event *ev = s->watch_events->data;
+
+ deliver_watch(s, ev->path, ev->token);
+
+ s->watch_events = g_list_remove(s->watch_events, ev);
+ free_watch_event(ev);
+}
+
static void xen_xenstore_event(void *opaque)
{
XenXenstoreState *s = opaque;
@@ -433,6 +1392,10 @@
copied_to = copied_from = 0;
processed = false;
+ if (!s->rsp_pending && s->watch_events) {
+ process_watch_events(s);
+ }
+
if (s->rsp_pending) {
copied_to = put_rsp(s);
}
@@ -441,7 +1404,7 @@
copied_from = get_req(s);
}
- if (req_pending(s) && !s->rsp_pending) {
+ if (req_pending(s) && !s->rsp_pending && !s->watch_events) {
process_req(s);
processed = true;
}
@@ -496,5 +1459,270 @@
}
s->be_port = err;
+ /*
+ * We don't actually access the guest's page through the grant, because
+ * this isn't real Xen, and we can just use the page we gave it in the
+ * first place. Map the grant anyway, mostly for cosmetic purposes so
+ * it *looks* like it's in use in the guest-visible grant table.
+ */
+ s->gt = qemu_xen_gnttab_open();
+ uint32_t xs_gntref = GNTTAB_RESERVED_XENSTORE;
+ s->granted_xs = qemu_xen_gnttab_map_refs(s->gt, 1, xen_domid, &xs_gntref,
+ PROT_READ | PROT_WRITE);
+
return 0;
}
+
+struct qemu_xs_handle {
+ XenstoreImplState *impl;
+ GList *watches;
+ QEMUBH *watch_bh;
+};
+
+struct qemu_xs_watch {
+ struct qemu_xs_handle *h;
+ char *path;
+ xs_watch_fn fn;
+ void *opaque;
+ GList *events;
+};
+
+static char *xs_be_get_domain_path(struct qemu_xs_handle *h, unsigned int domid)
+{
+ return g_strdup_printf("/local/domain/%u", domid);
+}
+
+static char **xs_be_directory(struct qemu_xs_handle *h, xs_transaction_t t,
+ const char *path, unsigned int *num)
+{
+ GList *items = NULL, *l;
+ unsigned int i = 0;
+ char **items_ret;
+ int err;
+
+ err = xs_impl_directory(h->impl, DOMID_QEMU, t, path, NULL, &items);
+ if (err) {
+ errno = err;
+ return NULL;
+ }
+
+ items_ret = g_new0(char *, g_list_length(items) + 1);
+ *num = 0;
+ for (l = items; l; l = l->next) {
+ items_ret[i++] = l->data;
+ (*num)++;
+ }
+ g_list_free(items);
+ return items_ret;
+}
+
+static void *xs_be_read(struct qemu_xs_handle *h, xs_transaction_t t,
+ const char *path, unsigned int *len)
+{
+ GByteArray *data = g_byte_array_new();
+ bool free_segment = false;
+ int err;
+
+ err = xs_impl_read(h->impl, DOMID_QEMU, t, path, data);
+ if (err) {
+ free_segment = true;
+ errno = err;
+ } else {
+ if (len) {
+ *len = data->len;
+ }
+ /* The xen-bus-helper code expects to get NUL terminated string! */
+ g_byte_array_append(data, (void *)"", 1);
+ }
+
+ return g_byte_array_free(data, free_segment);
+}
+
+static bool xs_be_write(struct qemu_xs_handle *h, xs_transaction_t t,
+ const char *path, const void *data, unsigned int len)
+{
+ GByteArray *gdata = g_byte_array_new();
+ int err;
+
+ g_byte_array_append(gdata, data, len);
+ err = xs_impl_write(h->impl, DOMID_QEMU, t, path, gdata);
+ g_byte_array_unref(gdata);
+ if (err) {
+ errno = err;
+ return false;
+ }
+ return true;
+}
+
+static bool xs_be_create(struct qemu_xs_handle *h, xs_transaction_t t,
+ unsigned int owner, unsigned int domid,
+ unsigned int perms, const char *path)
+{
+ g_autoptr(GByteArray) data = g_byte_array_new();
+ GList *perms_list = NULL;
+ int err;
+
+ /* mkdir does this */
+ err = xs_impl_read(h->impl, DOMID_QEMU, t, path, data);
+ if (err == ENOENT) {
+ err = xs_impl_write(h->impl, DOMID_QEMU, t, path, data);
+ }
+ if (err) {
+ errno = err;
+ return false;
+ }
+
+ perms_list = g_list_append(perms_list,
+ xs_perm_as_string(XS_PERM_NONE, owner));
+ perms_list = g_list_append(perms_list,
+ xs_perm_as_string(perms, domid));
+
+ err = xs_impl_set_perms(h->impl, DOMID_QEMU, t, path, perms_list);
+ g_list_free_full(perms_list, g_free);
+ if (err) {
+ errno = err;
+ return false;
+ }
+ return true;
+}
+
+static bool xs_be_destroy(struct qemu_xs_handle *h, xs_transaction_t t,
+ const char *path)
+{
+ int err = xs_impl_rm(h->impl, DOMID_QEMU, t, path);
+ if (err) {
+ errno = err;
+ return false;
+ }
+ return true;
+}
+
+static void be_watch_bh(void *_h)
+{
+ struct qemu_xs_handle *h = _h;
+ GList *l;
+
+ for (l = h->watches; l; l = l->next) {
+ struct qemu_xs_watch *w = l->data;
+
+ while (w->events) {
+ struct watch_event *ev = w->events->data;
+
+ w->fn(w->opaque, ev->path);
+
+ w->events = g_list_remove(w->events, ev);
+ free_watch_event(ev);
+ }
+ }
+}
+
+static void xs_be_watch_cb(void *opaque, const char *path, const char *token)
+{
+ struct watch_event *ev = g_new0(struct watch_event, 1);
+ struct qemu_xs_watch *w = opaque;
+
+ /* We don't care about the token */
+ ev->path = g_strdup(path);
+ w->events = g_list_append(w->events, ev);
+
+ qemu_bh_schedule(w->h->watch_bh);
+}
+
+static struct qemu_xs_watch *xs_be_watch(struct qemu_xs_handle *h,
+ const char *path, xs_watch_fn fn,
+ void *opaque)
+{
+ struct qemu_xs_watch *w = g_new0(struct qemu_xs_watch, 1);
+ int err;
+
+ w->h = h;
+ w->fn = fn;
+ w->opaque = opaque;
+
+ err = xs_impl_watch(h->impl, DOMID_QEMU, path, NULL, xs_be_watch_cb, w);
+ if (err) {
+ errno = err;
+ g_free(w);
+ return NULL;
+ }
+
+ w->path = g_strdup(path);
+ h->watches = g_list_append(h->watches, w);
+ return w;
+}
+
+static void xs_be_unwatch(struct qemu_xs_handle *h, struct qemu_xs_watch *w)
+{
+ xs_impl_unwatch(h->impl, DOMID_QEMU, w->path, NULL, xs_be_watch_cb, w);
+
+ h->watches = g_list_remove(h->watches, w);
+ g_list_free_full(w->events, (GDestroyNotify)free_watch_event);
+ g_free(w->path);
+ g_free(w);
+}
+
+static xs_transaction_t xs_be_transaction_start(struct qemu_xs_handle *h)
+{
+ unsigned int new_tx = XBT_NULL;
+ int err = xs_impl_transaction_start(h->impl, DOMID_QEMU, &new_tx);
+ if (err) {
+ errno = err;
+ return XBT_NULL;
+ }
+ return new_tx;
+}
+
+static bool xs_be_transaction_end(struct qemu_xs_handle *h, xs_transaction_t t,
+ bool abort)
+{
+ int err = xs_impl_transaction_end(h->impl, DOMID_QEMU, t, !abort);
+ if (err) {
+ errno = err;
+ return false;
+ }
+ return true;
+}
+
+static struct qemu_xs_handle *xs_be_open(void)
+{
+ XenXenstoreState *s = xen_xenstore_singleton;
+ struct qemu_xs_handle *h;
+
+ if (!s && !s->impl) {
+ errno = -ENOSYS;
+ return NULL;
+ }
+
+ h = g_new0(struct qemu_xs_handle, 1);
+ h->impl = s->impl;
+
+ h->watch_bh = aio_bh_new(qemu_get_aio_context(), be_watch_bh, h);
+
+ return h;
+}
+
+static void xs_be_close(struct qemu_xs_handle *h)
+{
+ while (h->watches) {
+ struct qemu_xs_watch *w = h->watches->data;
+ xs_be_unwatch(h, w);
+ }
+
+ qemu_bh_delete(h->watch_bh);
+ g_free(h);
+}
+
+static struct xenstore_backend_ops emu_xenstore_backend_ops = {
+ .open = xs_be_open,
+ .close = xs_be_close,
+ .get_domain_path = xs_be_get_domain_path,
+ .directory = xs_be_directory,
+ .read = xs_be_read,
+ .write = xs_be_write,
+ .create = xs_be_create,
+ .destroy = xs_be_destroy,
+ .watch = xs_be_watch,
+ .unwatch = xs_be_unwatch,
+ .transaction_start = xs_be_transaction_start,
+ .transaction_end = xs_be_transaction_end,
+};
diff --git a/hw/i386/kvm/xenstore_impl.c b/hw/i386/kvm/xenstore_impl.c
new file mode 100644
index 0000000..305fe75
--- /dev/null
+++ b/hw/i386/kvm/xenstore_impl.c
@@ -0,0 +1,1927 @@
+/*
+ * QEMU Xen emulation: The actual implementation of XenStore
+ *
+ * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Authors: David Woodhouse <dwmw2@infradead.org>, Paul Durrant <paul@xen.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qom/object.h"
+
+#include "hw/xen/xen.h"
+
+#include "xen_xenstore.h"
+#include "xenstore_impl.h"
+
+#include "hw/xen/interface/io/xs_wire.h"
+
+#define XS_MAX_WATCHES 128
+#define XS_MAX_DOMAIN_NODES 1000
+#define XS_MAX_NODE_SIZE 2048
+#define XS_MAX_TRANSACTIONS 10
+#define XS_MAX_PERMS_PER_NODE 5
+
+#define XS_VALID_CHARS "abcdefghijklmnopqrstuvwxyz" \
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ" \
+ "0123456789-/_"
+
+typedef struct XsNode {
+ uint32_t ref;
+ GByteArray *content;
+ GList *perms;
+ GHashTable *children;
+ uint64_t gencnt;
+ bool deleted_in_tx;
+ bool modified_in_tx;
+ unsigned int serialized_tx;
+#ifdef XS_NODE_UNIT_TEST
+ gchar *name; /* debug only */
+#endif
+} XsNode;
+
+typedef struct XsWatch {
+ struct XsWatch *next;
+ xs_impl_watch_fn *cb;
+ void *cb_opaque;
+ char *token;
+ unsigned int dom_id;
+ int rel_prefix;
+} XsWatch;
+
+typedef struct XsTransaction {
+ XsNode *root;
+ unsigned int nr_nodes;
+ unsigned int base_tx;
+ unsigned int tx_id;
+ unsigned int dom_id;
+} XsTransaction;
+
+struct XenstoreImplState {
+ XsNode *root;
+ unsigned int nr_nodes;
+ GHashTable *watches;
+ unsigned int nr_domu_watches;
+ GHashTable *transactions;
+ unsigned int nr_domu_transactions;
+ unsigned int root_tx;
+ unsigned int last_tx;
+ bool serialized;
+};
+
+
+static void nobble_tx(gpointer key, gpointer value, gpointer user_data)
+{
+ unsigned int *new_tx_id = user_data;
+ XsTransaction *tx = value;
+
+ if (tx->base_tx == *new_tx_id) {
+ /* Transactions based on XBT_NULL will always fail */
+ tx->base_tx = XBT_NULL;
+ }
+}
+
+static inline unsigned int next_tx(struct XenstoreImplState *s)
+{
+ unsigned int tx_id;
+
+ /* Find the next TX id which isn't either XBT_NULL or in use. */
+ do {
+ tx_id = ++s->last_tx;
+ } while (tx_id == XBT_NULL || tx_id == s->root_tx ||
+ g_hash_table_lookup(s->transactions, GINT_TO_POINTER(tx_id)));
+
+ /*
+ * It is vanishingly unlikely, but ensure that no outstanding transaction
+ * is based on the (previous incarnation of the) newly-allocated TX id.
+ */
+ g_hash_table_foreach(s->transactions, nobble_tx, &tx_id);
+
+ return tx_id;
+}
+
+static inline XsNode *xs_node_new(void)
+{
+ XsNode *n = g_new0(XsNode, 1);
+ n->ref = 1;
+
+#ifdef XS_NODE_UNIT_TEST
+ nr_xs_nodes++;
+ xs_node_list = g_list_prepend(xs_node_list, n);
+#endif
+ return n;
+}
+
+static inline XsNode *xs_node_ref(XsNode *n)
+{
+ /* With just 10 transactions, it can never get anywhere near this. */
+ g_assert(n->ref < INT_MAX);
+
+ g_assert(n->ref);
+ n->ref++;
+ return n;
+}
+
+static inline void xs_node_unref(XsNode *n)
+{
+ if (!n) {
+ return;
+ }
+ g_assert(n->ref);
+ if (--n->ref) {
+ return;
+ }
+
+ if (n->content) {
+ g_byte_array_unref(n->content);
+ }
+ if (n->perms) {
+ g_list_free_full(n->perms, g_free);
+ }
+ if (n->children) {
+ g_hash_table_unref(n->children);
+ }
+#ifdef XS_NODE_UNIT_TEST
+ g_free(n->name);
+ nr_xs_nodes--;
+ xs_node_list = g_list_remove(xs_node_list, n);
+#endif
+ g_free(n);
+}
+
+char *xs_perm_as_string(unsigned int perm, unsigned int domid)
+{
+ char letter;
+
+ switch (perm) {
+ case XS_PERM_READ | XS_PERM_WRITE:
+ letter = 'b';
+ break;
+ case XS_PERM_READ:
+ letter = 'r';
+ break;
+ case XS_PERM_WRITE:
+ letter = 'w';
+ break;
+ case XS_PERM_NONE:
+ default:
+ letter = 'n';
+ break;
+ }
+
+ return g_strdup_printf("%c%u", letter, domid);
+}
+
+static gpointer do_perm_copy(gconstpointer src, gpointer user_data)
+{
+ return g_strdup(src);
+}
+
+static XsNode *xs_node_create(const char *name, GList *perms)
+{
+ XsNode *n = xs_node_new();
+
+#ifdef XS_NODE_UNIT_TEST
+ if (name) {
+ n->name = g_strdup(name);
+ }
+#endif
+
+ n->perms = g_list_copy_deep(perms, do_perm_copy, NULL);
+
+ return n;
+}
+
+/* For copying from one hash table to another using g_hash_table_foreach() */
+static void do_child_insert(gpointer key, gpointer value, gpointer user_data)
+{
+ g_hash_table_insert(user_data, g_strdup(key), xs_node_ref(value));
+}
+
+static XsNode *xs_node_copy(XsNode *old)
+{
+ XsNode *n = xs_node_new();
+
+ n->gencnt = old->gencnt;
+
+#ifdef XS_NODE_UNIT_TEST
+ if (n->name) {
+ n->name = g_strdup(old->name);
+ }
+#endif
+
+ assert(old);
+ if (old->children) {
+ n->children = g_hash_table_new_full(g_str_hash, g_str_equal, g_free,
+ (GDestroyNotify)xs_node_unref);
+ g_hash_table_foreach(old->children, do_child_insert, n->children);
+ }
+ if (old->perms) {
+ n->perms = g_list_copy_deep(old->perms, do_perm_copy, NULL);
+ }
+ if (old->content) {
+ n->content = g_byte_array_ref(old->content);
+ }
+ return n;
+}
+
+/* Returns true if it made a change to the hash table */
+static bool xs_node_add_child(XsNode *n, const char *path_elem, XsNode *child)
+{
+ assert(!strchr(path_elem, '/'));
+
+ if (!child) {
+ assert(n->children);
+ return g_hash_table_remove(n->children, path_elem);
+ }
+
+#ifdef XS_NODE_UNIT_TEST
+ g_free(child->name);
+ child->name = g_strdup(path_elem);
+#endif
+ if (!n->children) {
+ n->children = g_hash_table_new_full(g_str_hash, g_str_equal, g_free,
+ (GDestroyNotify)xs_node_unref);
+ }
+
+ /*
+ * The documentation for g_hash_table_insert() says that it "returns a
+ * boolean value to indicate whether the newly added value was already
+ * in the hash table or not."
+ *
+ * It could perhaps be clearer that returning TRUE means it wasn't,
+ */
+ return g_hash_table_insert(n->children, g_strdup(path_elem), child);
+}
+
+struct walk_op {
+ struct XenstoreImplState *s;
+ char path[XENSTORE_ABS_PATH_MAX + 2]; /* Two NUL terminators */
+ int (*op_fn)(XsNode **n, struct walk_op *op);
+ void *op_opaque;
+ void *op_opaque2;
+
+ GList *watches;
+ unsigned int dom_id;
+ unsigned int tx_id;
+
+ /* The number of nodes which will exist in the tree if this op succeeds. */
+ unsigned int new_nr_nodes;
+
+ /*
+ * This is maintained on the way *down* the walk to indicate
+ * whether nodes can be modified in place or whether COW is
+ * required. It starts off being true, as we're always going to
+ * replace the root node. If we walk into a shared subtree it
+ * becomes false. If we start *creating* new nodes for a write,
+ * it becomes true again.
+ *
+ * Do not use it on the way back up.
+ */
+ bool inplace;
+ bool mutating;
+ bool create_dirs;
+ bool in_transaction;
+
+ /* Tracking during recursion so we know which is first. */
+ bool deleted_in_tx;
+};
+
+static void fire_watches(struct walk_op *op, bool parents)
+{
+ GList *l = NULL;
+ XsWatch *w;
+
+ if (!op->mutating || op->in_transaction) {
+ return;
+ }
+
+ if (parents) {
+ l = op->watches;
+ }
+
+ w = g_hash_table_lookup(op->s->watches, op->path);
+ while (w || l) {
+ if (!w) {
+ /* Fire the parent nodes from 'op' if asked to */
+ w = l->data;
+ l = l->next;
+ continue;
+ }
+
+ assert(strlen(op->path) > w->rel_prefix);
+ w->cb(w->cb_opaque, op->path + w->rel_prefix, w->token);
+
+ w = w->next;
+ }
+}
+
+static int xs_node_add_content(XsNode **n, struct walk_op *op)
+{
+ GByteArray *data = op->op_opaque;
+
+ if (op->dom_id) {
+ /*
+ * The real XenStored includes permissions and names of child nodes
+ * in the calculated datasize but life's too short. For a single
+ * tenant internal XenStore, we don't have to be quite as pedantic.
+ */
+ if (data->len > XS_MAX_NODE_SIZE) {
+ return E2BIG;
+ }
+ }
+ /* We *are* the node to be written. Either this or a copy. */
+ if (!op->inplace) {
+ XsNode *old = *n;
+ *n = xs_node_copy(old);
+ xs_node_unref(old);
+ }
+
+ if ((*n)->content) {
+ g_byte_array_unref((*n)->content);
+ }
+ (*n)->content = g_byte_array_ref(data);
+ if (op->tx_id != XBT_NULL) {
+ (*n)->modified_in_tx = true;
+ }
+ return 0;
+}
+
+static int xs_node_get_content(XsNode **n, struct walk_op *op)
+{
+ GByteArray *data = op->op_opaque;
+ GByteArray *node_data;
+
+ assert(op->inplace);
+ assert(*n);
+
+ node_data = (*n)->content;
+ if (node_data) {
+ g_byte_array_append(data, node_data->data, node_data->len);
+ }
+
+ return 0;
+}
+
+static int node_rm_recurse(gpointer key, gpointer value, gpointer user_data)
+{
+ struct walk_op *op = user_data;
+ int path_len = strlen(op->path);
+ int key_len = strlen(key);
+ XsNode *n = value;
+ bool this_inplace = op->inplace;
+
+ if (n->ref != 1) {
+ op->inplace = 0;
+ }
+
+ assert(key_len + path_len + 2 <= sizeof(op->path));
+ op->path[path_len] = '/';
+ memcpy(op->path + path_len + 1, key, key_len + 1);
+
+ if (n->children) {
+ g_hash_table_foreach_remove(n->children, node_rm_recurse, op);
+ }
+ op->new_nr_nodes--;
+
+ /*
+ * Fire watches on *this* node but not the parents because they are
+ * going to be deleted too, so the watch will fire for them anyway.
+ */
+ fire_watches(op, false);
+ op->path[path_len] = '\0';
+
+ /*
+ * Actually deleting the child here is just an optimisation; if we
+ * don't then the final unref on the topmost victim will just have
+ * to cascade down again repeating all the g_hash_table_foreach()
+ * calls.
+ */
+ return this_inplace;
+}
+
+static XsNode *xs_node_copy_deleted(XsNode *old, struct walk_op *op);
+static void copy_deleted_recurse(gpointer key, gpointer value,
+ gpointer user_data)
+{
+ struct walk_op *op = user_data;
+ GHashTable *siblings = op->op_opaque2;
+ XsNode *n = xs_node_copy_deleted(value, op);
+
+ /*
+ * Reinsert the deleted_in_tx copy of the node into the parent's
+ * 'children' hash table. Having stashed it from op->op_opaque2
+ * before the recursive call to xs_node_copy_deleted() scribbled
+ * over it.
+ */
+ g_hash_table_insert(siblings, g_strdup(key), n);
+}
+
+static XsNode *xs_node_copy_deleted(XsNode *old, struct walk_op *op)
+{
+ XsNode *n = xs_node_new();
+
+ n->gencnt = old->gencnt;
+
+#ifdef XS_NODE_UNIT_TEST
+ if (old->name) {
+ n->name = g_strdup(old->name);
+ }
+#endif
+
+ if (old->children) {
+ n->children = g_hash_table_new_full(g_str_hash, g_str_equal, g_free,
+ (GDestroyNotify)xs_node_unref);
+ op->op_opaque2 = n->children;
+ g_hash_table_foreach(old->children, copy_deleted_recurse, op);
+ }
+ if (old->perms) {
+ n->perms = g_list_copy_deep(old->perms, do_perm_copy, NULL);
+ }
+ n->deleted_in_tx = true;
+ /* If it gets resurrected we only fire a watch if it lost its content */
+ if (old->content) {
+ n->modified_in_tx = true;
+ }
+ op->new_nr_nodes--;
+ return n;
+}
+
+static int xs_node_rm(XsNode **n, struct walk_op *op)
+{
+ bool this_inplace = op->inplace;
+
+ if (op->tx_id != XBT_NULL) {
+ /* It's not trivial to do inplace handling for this one */
+ XsNode *old = *n;
+ *n = xs_node_copy_deleted(old, op);
+ xs_node_unref(old);
+ return 0;
+ }
+
+ /* Fire watches for, and count, nodes in the subtree which get deleted */
+ if ((*n)->children) {
+ g_hash_table_foreach_remove((*n)->children, node_rm_recurse, op);
+ }
+ op->new_nr_nodes--;
+
+ if (this_inplace) {
+ xs_node_unref(*n);
+ }
+ *n = NULL;
+ return 0;
+}
+
+static int xs_node_get_perms(XsNode **n, struct walk_op *op)
+{
+ GList **perms = op->op_opaque;
+
+ assert(op->inplace);
+ assert(*n);
+
+ *perms = g_list_copy_deep((*n)->perms, do_perm_copy, NULL);
+ return 0;
+}
+
+static void parse_perm(const char *perm, char *letter, unsigned int *dom_id)
+{
+ unsigned int n = sscanf(perm, "%c%u", letter, dom_id);
+
+ assert(n == 2);
+}
+
+static bool can_access(unsigned int dom_id, GList *perms, const char *letters)
+{
+ unsigned int i, n;
+ char perm_letter;
+ unsigned int perm_dom_id;
+ bool access;
+
+ if (dom_id == 0) {
+ return true;
+ }
+
+ n = g_list_length(perms);
+ assert(n >= 1);
+
+ /*
+ * The dom_id of the first perm is the owner, and the owner always has
+ * read-write access.
+ */
+ parse_perm(g_list_nth_data(perms, 0), &perm_letter, &perm_dom_id);
+ if (dom_id == perm_dom_id) {
+ return true;
+ }
+
+ /*
+ * The letter of the first perm specified the default access for all other
+ * domains.
+ */
+ access = !!strchr(letters, perm_letter);
+ for (i = 1; i < n; i++) {
+ parse_perm(g_list_nth_data(perms, i), &perm_letter, &perm_dom_id);
+ if (dom_id != perm_dom_id) {
+ continue;
+ }
+ access = !!strchr(letters, perm_letter);
+ }
+
+ return access;
+}
+
+static int xs_node_set_perms(XsNode **n, struct walk_op *op)
+{
+ GList *perms = op->op_opaque;
+
+ if (op->dom_id) {
+ unsigned int perm_dom_id;
+ char perm_letter;
+
+ /* A guest may not change permissions on nodes it does not own */
+ if (!can_access(op->dom_id, (*n)->perms, "")) {
+ return EPERM;
+ }
+
+ /* A guest may not change the owner of a node it owns. */
+ parse_perm(perms->data, &perm_letter, &perm_dom_id);
+ if (perm_dom_id != op->dom_id) {
+ return EPERM;
+ }
+
+ if (g_list_length(perms) > XS_MAX_PERMS_PER_NODE) {
+ return ENOSPC;
+ }
+ }
+
+ /* We *are* the node to be written. Either this or a copy. */
+ if (!op->inplace) {
+ XsNode *old = *n;
+ *n = xs_node_copy(old);
+ xs_node_unref(old);
+ }
+
+ if ((*n)->perms) {
+ g_list_free_full((*n)->perms, g_free);
+ }
+ (*n)->perms = g_list_copy_deep(perms, do_perm_copy, NULL);
+ if (op->tx_id != XBT_NULL) {
+ (*n)->modified_in_tx = true;
+ }
+ return 0;
+}
+
+/*
+ * Passed a full reference in *n which it may free if it needs to COW.
+ *
+ * When changing the tree, the op->inplace flag indicates whether this
+ * node may be modified in place (i.e. it and all its parents had a
+ * refcount of one). If walking down the tree we find a node whose
+ * refcount is higher, we must clear op->inplace and COW from there
+ * down. Unless we are creating new nodes as scaffolding for a write
+ * (which works like 'mkdir -p' does). In which case those newly
+ * created nodes can (and must) be modified in place again.
+ */
+static int xs_node_walk(XsNode **n, struct walk_op *op)
+{
+ char *child_name = NULL;
+ size_t namelen;
+ XsNode *old = *n, *child = NULL;
+ bool stole_child = false;
+ bool this_inplace;
+ XsWatch *watch;
+ int err;
+
+ namelen = strlen(op->path);
+ watch = g_hash_table_lookup(op->s->watches, op->path);
+
+ /* Is there a child, or do we hit the double-NUL termination? */
+ if (op->path[namelen + 1]) {
+ char *slash;
+ child_name = op->path + namelen + 1;
+ slash = strchr(child_name, '/');
+ if (slash) {
+ *slash = '\0';
+ }
+ op->path[namelen] = '/';
+ }
+
+ /* If we walk into a subtree which is shared, we must COW */
+ if (op->mutating && old->ref != 1) {
+ op->inplace = false;
+ }
+
+ if (!child_name) {
+ const char *letters = op->mutating ? "wb" : "rb";
+
+ if (!can_access(op->dom_id, old->perms, letters)) {
+ err = EACCES;
+ goto out;
+ }
+
+ /* This is the actual node on which the operation shall be performed */
+ err = op->op_fn(n, op);
+ if (!err) {
+ fire_watches(op, true);
+ }
+ goto out;
+ }
+
+ /* op->inplace will be further modified during the recursion */
+ this_inplace = op->inplace;
+
+ if (old && old->children) {
+ child = g_hash_table_lookup(old->children, child_name);
+ /* This is a *weak* reference to 'child', owned by the hash table */
+ }
+
+ if (child) {
+ if (child->deleted_in_tx) {
+ assert(child->ref == 1);
+ /* Cannot actually set child->deleted_in_tx = false until later */
+ }
+ xs_node_ref(child);
+ /*
+ * Now we own it too. But if we can modify inplace, that's going to
+ * foil the check and force it to COW. We want to be the *only* owner
+ * so that it can be modified in place, so remove it from the hash
+ * table in that case. We'll add it (or its replacement) back later.
+ */
+ if (op->mutating && this_inplace) {
+ g_hash_table_remove(old->children, child_name);
+ stole_child = true;
+ }
+ } else if (op->create_dirs) {
+ assert(op->mutating);
+
+ if (!can_access(op->dom_id, old->perms, "wb")) {
+ err = EACCES;
+ goto out;
+ }
+
+ if (op->dom_id && op->new_nr_nodes >= XS_MAX_DOMAIN_NODES) {
+ err = ENOSPC;
+ goto out;
+ }
+
+ child = xs_node_create(child_name, old->perms);
+ op->new_nr_nodes++;
+
+ /*
+ * If we're creating a new child, we can clearly modify it (and its
+ * children) in place from here on down.
+ */
+ op->inplace = true;
+ } else {
+ err = ENOENT;
+ goto out;
+ }
+
+ /*
+ * If there's a watch on this node, add it to the list to be fired
+ * (with the correct full pathname for the modified node) at the end.
+ */
+ if (watch) {
+ op->watches = g_list_append(op->watches, watch);
+ }
+
+ /*
+ * Except for the temporary child-stealing as noted, our node has not
+ * changed yet. We don't yet know the overall operation will complete.
+ */
+ err = xs_node_walk(&child, op);
+
+ if (watch) {
+ op->watches = g_list_remove(op->watches, watch);
+ }
+
+ if (err || !op->mutating) {
+ if (stole_child) {
+ /* Put it back as it was. */
+ g_hash_table_replace(old->children, g_strdup(child_name), child);
+ } else {
+ xs_node_unref(child);
+ }
+ goto out;
+ }
+
+ /*
+ * Now we know the operation has completed successfully and we're on
+ * the way back up. Make the change, substituting 'child' in the
+ * node at our level.
+ */
+ if (!this_inplace) {
+ *n = xs_node_copy(old);
+ xs_node_unref(old);
+ }
+
+ /*
+ * If we resurrected a deleted_in_tx node, we can mark it as no longer
+ * deleted now that we know the overall operation has succeeded.
+ */
+ if (op->create_dirs && child && child->deleted_in_tx) {
+ op->new_nr_nodes++;
+ child->deleted_in_tx = false;
+ }
+
+ /*
+ * The child may be NULL here, for a remove operation. Either way,
+ * xs_node_add_child() will do the right thing and return a value
+ * indicating whether it changed the parent's hash table or not.
+ *
+ * We bump the parent gencnt if it adds a child that we *didn't*
+ * steal from it in the first place, or if child==NULL and was
+ * thus removed (whether we stole it earlier and didn't put it
+ * back, or xs_node_add_child() actually removed it now).
+ */
+ if ((xs_node_add_child(*n, child_name, child) && !stole_child) || !child) {
+ (*n)->gencnt++;
+ }
+
+ out:
+ op->path[namelen] = '\0';
+ if (!namelen) {
+ assert(!op->watches);
+ /*
+ * On completing the recursion back up the path walk and reaching the
+ * top, assign the new node count if the operation was successful. If
+ * the main tree was changed, bump its tx ID so that outstanding
+ * transactions correctly fail. But don't bump it every time; only
+ * if it makes a difference.
+ */
+ if (!err && op->mutating) {
+ if (!op->in_transaction) {
+ if (op->s->root_tx != op->s->last_tx) {
+ op->s->root_tx = next_tx(op->s);
+ }
+ op->s->nr_nodes = op->new_nr_nodes;
+ } else {
+ XsTransaction *tx = g_hash_table_lookup(op->s->transactions,
+ GINT_TO_POINTER(op->tx_id));
+ assert(tx);
+ tx->nr_nodes = op->new_nr_nodes;
+ }
+ }
+ }
+ return err;
+}
+
+static void append_directory_item(gpointer key, gpointer value,
+ gpointer user_data)
+{
+ GList **items = user_data;
+
+ *items = g_list_insert_sorted(*items, g_strdup(key), (GCompareFunc)strcmp);
+}
+
+/* Populates items with char * names which caller must free. */
+static int xs_node_directory(XsNode **n, struct walk_op *op)
+{
+ GList **items = op->op_opaque;
+
+ assert(op->inplace);
+ assert(*n);
+
+ if ((*n)->children) {
+ g_hash_table_foreach((*n)->children, append_directory_item, items);
+ }
+
+ if (op->op_opaque2) {
+ *(uint64_t *)op->op_opaque2 = (*n)->gencnt;
+ }
+
+ return 0;
+}
+
+static int validate_path(char *outpath, const char *userpath,
+ unsigned int dom_id)
+{
+ size_t i, pathlen = strlen(userpath);
+
+ if (!pathlen || userpath[pathlen] == '/' || strstr(userpath, "//")) {
+ return EINVAL;
+ }
+ for (i = 0; i < pathlen; i++) {
+ if (!strchr(XS_VALID_CHARS, userpath[i])) {
+ return EINVAL;
+ }
+ }
+ if (userpath[0] == '/') {
+ if (pathlen > XENSTORE_ABS_PATH_MAX) {
+ return E2BIG;
+ }
+ memcpy(outpath, userpath, pathlen + 1);
+ } else {
+ if (pathlen > XENSTORE_REL_PATH_MAX) {
+ return E2BIG;
+ }
+ snprintf(outpath, XENSTORE_ABS_PATH_MAX, "/local/domain/%u/%s", dom_id,
+ userpath);
+ }
+ return 0;
+}
+
+
+static int init_walk_op(XenstoreImplState *s, struct walk_op *op,
+ xs_transaction_t tx_id, unsigned int dom_id,
+ const char *path, XsNode ***rootp)
+{
+ int ret = validate_path(op->path, path, dom_id);
+ if (ret) {
+ return ret;
+ }
+
+ /*
+ * We use *two* NUL terminators at the end of the path, as during the walk
+ * we will temporarily turn each '/' into a NUL to allow us to use that
+ * path element for the lookup.
+ */
+ op->path[strlen(op->path) + 1] = '\0';
+ op->watches = NULL;
+ op->path[0] = '\0';
+ op->inplace = true;
+ op->mutating = false;
+ op->create_dirs = false;
+ op->in_transaction = false;
+ op->dom_id = dom_id;
+ op->tx_id = tx_id;
+ op->s = s;
+
+ if (tx_id == XBT_NULL) {
+ *rootp = &s->root;
+ op->new_nr_nodes = s->nr_nodes;
+ } else {
+ XsTransaction *tx = g_hash_table_lookup(s->transactions,
+ GINT_TO_POINTER(tx_id));
+ if (!tx) {
+ return ENOENT;
+ }
+ *rootp = &tx->root;
+ op->new_nr_nodes = tx->nr_nodes;
+ op->in_transaction = true;
+ }
+
+ return 0;
+}
+
+int xs_impl_read(XenstoreImplState *s, unsigned int dom_id,
+ xs_transaction_t tx_id, const char *path, GByteArray *data)
+{
+ /*
+ * The data GByteArray shall exist, and will be freed by caller.
+ * Just g_byte_array_append() to it.
+ */
+ struct walk_op op;
+ XsNode **n;
+ int ret;
+
+ ret = init_walk_op(s, &op, tx_id, dom_id, path, &n);
+ if (ret) {
+ return ret;
+ }
+ op.op_fn = xs_node_get_content;
+ op.op_opaque = data;
+ return xs_node_walk(n, &op);
+}
+
+int xs_impl_write(XenstoreImplState *s, unsigned int dom_id,
+ xs_transaction_t tx_id, const char *path, GByteArray *data)
+{
+ /*
+ * The data GByteArray shall exist, will be freed by caller. You are
+ * free to use g_byte_array_steal() and keep the data. Or just ref it.
+ */
+ struct walk_op op;
+ XsNode **n;
+ int ret;
+
+ ret = init_walk_op(s, &op, tx_id, dom_id, path, &n);
+ if (ret) {
+ return ret;
+ }
+ op.op_fn = xs_node_add_content;
+ op.op_opaque = data;
+ op.mutating = true;
+ op.create_dirs = true;
+ return xs_node_walk(n, &op);
+}
+
+int xs_impl_directory(XenstoreImplState *s, unsigned int dom_id,
+ xs_transaction_t tx_id, const char *path,
+ uint64_t *gencnt, GList **items)
+{
+ /*
+ * The items are (char *) to be freed by caller. Although it's consumed
+ * immediately so if you want to change it to (const char *) and keep
+ * them, go ahead and change the caller.
+ */
+ struct walk_op op;
+ XsNode **n;
+ int ret;
+
+ ret = init_walk_op(s, &op, tx_id, dom_id, path, &n);
+ if (ret) {
+ return ret;
+ }
+ op.op_fn = xs_node_directory;
+ op.op_opaque = items;
+ op.op_opaque2 = gencnt;
+ return xs_node_walk(n, &op);
+}
+
+int xs_impl_transaction_start(XenstoreImplState *s, unsigned int dom_id,
+ xs_transaction_t *tx_id)
+{
+ XsTransaction *tx;
+
+ if (*tx_id != XBT_NULL) {
+ return EINVAL;
+ }
+
+ if (dom_id && s->nr_domu_transactions >= XS_MAX_TRANSACTIONS) {
+ return ENOSPC;
+ }
+
+ tx = g_new0(XsTransaction, 1);
+
+ tx->nr_nodes = s->nr_nodes;
+ tx->tx_id = next_tx(s);
+ tx->base_tx = s->root_tx;
+ tx->root = xs_node_ref(s->root);
+ tx->dom_id = dom_id;
+
+ g_hash_table_insert(s->transactions, GINT_TO_POINTER(tx->tx_id), tx);
+ if (dom_id) {
+ s->nr_domu_transactions++;
+ }
+ *tx_id = tx->tx_id;
+ return 0;
+}
+
+static gboolean tx_commit_walk(gpointer key, gpointer value,
+ gpointer user_data)
+{
+ struct walk_op *op = user_data;
+ int path_len = strlen(op->path);
+ int key_len = strlen(key);
+ bool fire_parents = true;
+ XsWatch *watch;
+ XsNode *n = value;
+
+ if (n->ref != 1) {
+ return false;
+ }
+
+ if (n->deleted_in_tx) {
+ /*
+ * We fire watches on our parents if we are the *first* node
+ * to be deleted (the topmost one). This matches the behaviour
+ * when deleting in the live tree.
+ */
+ fire_parents = !op->deleted_in_tx;
+
+ /* Only used on the way down so no need to clear it later */
+ op->deleted_in_tx = true;
+ }
+
+ assert(key_len + path_len + 2 <= sizeof(op->path));
+ op->path[path_len] = '/';
+ memcpy(op->path + path_len + 1, key, key_len + 1);
+
+ watch = g_hash_table_lookup(op->s->watches, op->path);
+ if (watch) {
+ op->watches = g_list_append(op->watches, watch);
+ }
+
+ if (n->children) {
+ g_hash_table_foreach_remove(n->children, tx_commit_walk, op);
+ }
+
+ if (watch) {
+ op->watches = g_list_remove(op->watches, watch);
+ }
+
+ /*
+ * Don't fire watches if this node was only copied because a
+ * descendent was changed. The modified_in_tx flag indicates the
+ * ones which were really changed.
+ */
+ if (n->modified_in_tx || n->deleted_in_tx) {
+ fire_watches(op, fire_parents);
+ n->modified_in_tx = false;
+ }
+ op->path[path_len] = '\0';
+
+ /* Deleted nodes really do get expunged when we commit */
+ return n->deleted_in_tx;
+}
+
+static int transaction_commit(XenstoreImplState *s, XsTransaction *tx)
+{
+ struct walk_op op;
+ XsNode **n;
+
+ if (s->root_tx != tx->base_tx) {
+ return EAGAIN;
+ }
+ xs_node_unref(s->root);
+ s->root = tx->root;
+ tx->root = NULL;
+ s->root_tx = tx->tx_id;
+ s->nr_nodes = tx->nr_nodes;
+
+ init_walk_op(s, &op, XBT_NULL, tx->dom_id, "/", &n);
+ op.deleted_in_tx = false;
+ op.mutating = true;
+
+ /*
+ * Walk the new root and fire watches on any node which has a
+ * refcount of one (which is therefore unique to this transaction).
+ */
+ if (s->root->children) {
+ g_hash_table_foreach_remove(s->root->children, tx_commit_walk, &op);
+ }
+
+ return 0;
+}
+
+int xs_impl_transaction_end(XenstoreImplState *s, unsigned int dom_id,
+ xs_transaction_t tx_id, bool commit)
+{
+ int ret = 0;
+ XsTransaction *tx = g_hash_table_lookup(s->transactions,
+ GINT_TO_POINTER(tx_id));
+
+ if (!tx || tx->dom_id != dom_id) {
+ return ENOENT;
+ }
+
+ if (commit) {
+ ret = transaction_commit(s, tx);
+ }
+
+ g_hash_table_remove(s->transactions, GINT_TO_POINTER(tx_id));
+ if (dom_id) {
+ assert(s->nr_domu_transactions);
+ s->nr_domu_transactions--;
+ }
+ return ret;
+}
+
+int xs_impl_rm(XenstoreImplState *s, unsigned int dom_id,
+ xs_transaction_t tx_id, const char *path)
+{
+ struct walk_op op;
+ XsNode **n;
+ int ret;
+
+ ret = init_walk_op(s, &op, tx_id, dom_id, path, &n);
+ if (ret) {
+ return ret;
+ }
+ op.op_fn = xs_node_rm;
+ op.mutating = true;
+ return xs_node_walk(n, &op);
+}
+
+int xs_impl_get_perms(XenstoreImplState *s, unsigned int dom_id,
+ xs_transaction_t tx_id, const char *path, GList **perms)
+{
+ struct walk_op op;
+ XsNode **n;
+ int ret;
+
+ ret = init_walk_op(s, &op, tx_id, dom_id, path, &n);
+ if (ret) {
+ return ret;
+ }
+ op.op_fn = xs_node_get_perms;
+ op.op_opaque = perms;
+ return xs_node_walk(n, &op);
+}
+
+static void is_valid_perm(gpointer data, gpointer user_data)
+{
+ char *perm = data;
+ bool *valid = user_data;
+ char letter;
+ unsigned int dom_id;
+
+ if (!*valid) {
+ return;
+ }
+
+ if (sscanf(perm, "%c%u", &letter, &dom_id) != 2) {
+ *valid = false;
+ return;
+ }
+
+ switch (letter) {
+ case 'n':
+ case 'r':
+ case 'w':
+ case 'b':
+ break;
+
+ default:
+ *valid = false;
+ break;
+ }
+}
+
+int xs_impl_set_perms(XenstoreImplState *s, unsigned int dom_id,
+ xs_transaction_t tx_id, const char *path, GList *perms)
+{
+ struct walk_op op;
+ XsNode **n;
+ bool valid = true;
+ int ret;
+
+ if (!g_list_length(perms)) {
+ return EINVAL;
+ }
+
+ g_list_foreach(perms, is_valid_perm, &valid);
+ if (!valid) {
+ return EINVAL;
+ }
+
+ ret = init_walk_op(s, &op, tx_id, dom_id, path, &n);
+ if (ret) {
+ return ret;
+ }
+ op.op_fn = xs_node_set_perms;
+ op.op_opaque = perms;
+ op.mutating = true;
+ return xs_node_walk(n, &op);
+}
+
+static int do_xs_impl_watch(XenstoreImplState *s, unsigned int dom_id,
+ const char *path, const char *token,
+ xs_impl_watch_fn fn, void *opaque)
+
+{
+ char abspath[XENSTORE_ABS_PATH_MAX + 1];
+ XsWatch *w, *l;
+ int ret;
+
+ ret = validate_path(abspath, path, dom_id);
+ if (ret) {
+ return ret;
+ }
+
+ /* Check for duplicates */
+ l = w = g_hash_table_lookup(s->watches, abspath);
+ while (w) {
+ if (!g_strcmp0(token, w->token) && opaque == w->cb_opaque &&
+ fn == w->cb && dom_id == w->dom_id) {
+ return EEXIST;
+ }
+ w = w->next;
+ }
+
+ if (dom_id && s->nr_domu_watches >= XS_MAX_WATCHES) {
+ return E2BIG;
+ }
+
+ w = g_new0(XsWatch, 1);
+ w->token = g_strdup(token);
+ w->cb = fn;
+ w->cb_opaque = opaque;
+ w->dom_id = dom_id;
+ w->rel_prefix = strlen(abspath) - strlen(path);
+
+ /* l was looked up above when checking for duplicates */
+ if (l) {
+ w->next = l->next;
+ l->next = w;
+ } else {
+ g_hash_table_insert(s->watches, g_strdup(abspath), w);
+ }
+ if (dom_id) {
+ s->nr_domu_watches++;
+ }
+
+ return 0;
+}
+
+int xs_impl_watch(XenstoreImplState *s, unsigned int dom_id, const char *path,
+ const char *token, xs_impl_watch_fn fn, void *opaque)
+{
+ int ret = do_xs_impl_watch(s, dom_id, path, token, fn, opaque);
+
+ if (!ret) {
+ /* A new watch should fire immediately */
+ fn(opaque, path, token);
+ }
+
+ return ret;
+}
+
+static XsWatch *free_watch(XenstoreImplState *s, XsWatch *w)
+{
+ XsWatch *next = w->next;
+
+ if (w->dom_id) {
+ assert(s->nr_domu_watches);
+ s->nr_domu_watches--;
+ }
+
+ g_free(w->token);
+ g_free(w);
+
+ return next;
+}
+
+int xs_impl_unwatch(XenstoreImplState *s, unsigned int dom_id,
+ const char *path, const char *token,
+ xs_impl_watch_fn fn, void *opaque)
+{
+ char abspath[XENSTORE_ABS_PATH_MAX + 1];
+ XsWatch *w, **l;
+ int ret;
+
+ ret = validate_path(abspath, path, dom_id);
+ if (ret) {
+ return ret;
+ }
+
+ w = g_hash_table_lookup(s->watches, abspath);
+ if (!w) {
+ return ENOENT;
+ }
+
+ /*
+ * The hash table contains the first element of a list of
+ * watches. Removing the first element in the list is a
+ * special case because we have to update the hash table to
+ * point to the next (or remove it if there's nothing left).
+ */
+ if (!g_strcmp0(token, w->token) && fn == w->cb && opaque == w->cb_opaque &&
+ dom_id == w->dom_id) {
+ if (w->next) {
+ /* Insert the previous 'next' into the hash table */
+ g_hash_table_insert(s->watches, g_strdup(abspath), w->next);
+ } else {
+ /* Nothing left; remove from hash table */
+ g_hash_table_remove(s->watches, abspath);
+ }
+ free_watch(s, w);
+ return 0;
+ }
+
+ /*
+ * We're all done messing with the hash table because the element
+ * it points to has survived the cull. Now it's just a simple
+ * linked list removal operation.
+ */
+ for (l = &w->next; *l; l = &w->next) {
+ w = *l;
+
+ if (!g_strcmp0(token, w->token) && fn == w->cb &&
+ opaque != w->cb_opaque && dom_id == w->dom_id) {
+ *l = free_watch(s, w);
+ return 0;
+ }
+ }
+
+ return ENOENT;
+}
+
+int xs_impl_reset_watches(XenstoreImplState *s, unsigned int dom_id)
+{
+ char **watch_paths;
+ guint nr_watch_paths;
+ guint i;
+
+ watch_paths = (char **)g_hash_table_get_keys_as_array(s->watches,
+ &nr_watch_paths);
+
+ for (i = 0; i < nr_watch_paths; i++) {
+ XsWatch *w1 = g_hash_table_lookup(s->watches, watch_paths[i]);
+ XsWatch *w2, *w, **l;
+
+ /*
+ * w1 is the original list. The hash table has this pointer.
+ * w2 is the head of our newly-filtered list.
+ * w and l are temporary for processing. w is somewhat redundant
+ * with *l but makes my eyes bleed less.
+ */
+
+ w = w2 = w1;
+ l = &w;
+ while (w) {
+ if (w->dom_id == dom_id) {
+ /* If we're freeing the head of the list, bump w2 */
+ if (w2 == w) {
+ w2 = w->next;
+ }
+ *l = free_watch(s, w);
+ } else {
+ l = &w->next;
+ }
+ w = *l;
+ }
+ /*
+ * If the head of the list survived the cull, we don't need to
+ * touch the hash table and we're done with this path. Else...
+ */
+ if (w1 != w2) {
+ g_hash_table_steal(s->watches, watch_paths[i]);
+
+ /*
+ * It was already freed. (Don't worry, this whole thing is
+ * single-threaded and nobody saw it in the meantime). And
+ * having *stolen* it, we now own the watch_paths[i] string
+ * so if we don't give it back to the hash table, we need
+ * to free it.
+ */
+ if (w2) {
+ g_hash_table_insert(s->watches, watch_paths[i], w2);
+ } else {
+ g_free(watch_paths[i]);
+ }
+ }
+ }
+ g_free(watch_paths);
+ return 0;
+}
+
+static void xs_tx_free(void *_tx)
+{
+ XsTransaction *tx = _tx;
+ if (tx->root) {
+ xs_node_unref(tx->root);
+ }
+ g_free(tx);
+}
+
+XenstoreImplState *xs_impl_create(unsigned int dom_id)
+{
+ XenstoreImplState *s = g_new0(XenstoreImplState, 1);
+ GList *perms;
+
+ s->watches = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, NULL);
+ s->transactions = g_hash_table_new_full(g_direct_hash, g_direct_equal,
+ NULL, xs_tx_free);
+
+ perms = g_list_append(NULL, xs_perm_as_string(XS_PERM_NONE, 0));
+ s->root = xs_node_create("/", perms);
+ g_list_free_full(perms, g_free);
+ s->nr_nodes = 1;
+
+ s->root_tx = s->last_tx = 1;
+ return s;
+}
+
+
+static void clear_serialized_tx(gpointer key, gpointer value, gpointer opaque)
+{
+ XsNode *n = value;
+
+ n->serialized_tx = XBT_NULL;
+ if (n->children) {
+ g_hash_table_foreach(n->children, clear_serialized_tx, NULL);
+ }
+}
+
+static void clear_tx_serialized_tx(gpointer key, gpointer value,
+ gpointer opaque)
+{
+ XsTransaction *t = value;
+
+ clear_serialized_tx(NULL, t->root, NULL);
+}
+
+static void write_be32(GByteArray *save, uint32_t val)
+{
+ uint32_t be = htonl(val);
+ g_byte_array_append(save, (void *)&be, sizeof(be));
+}
+
+
+struct save_state {
+ GByteArray *bytes;
+ unsigned int tx_id;
+};
+
+#define MODIFIED_IN_TX (1U << 0)
+#define DELETED_IN_TX (1U << 1)
+#define NODE_REF (1U << 2)
+
+static void save_node(gpointer key, gpointer value, gpointer opaque)
+{
+ struct save_state *ss = opaque;
+ XsNode *n = value;
+ char *name = key;
+ uint8_t flag = 0;
+
+ /* Child nodes (i.e. anything but the root) have a name */
+ if (name) {
+ g_byte_array_append(ss->bytes, key, strlen(key) + 1);
+ }
+
+ /*
+ * If we already wrote this node, refer to the previous copy.
+ * There's no rename/move in XenStore, so all we need to find
+ * it is the tx_id of the transation in which it exists. Which
+ * may be the root tx.
+ */
+ if (n->serialized_tx != XBT_NULL) {
+ flag = NODE_REF;
+ g_byte_array_append(ss->bytes, &flag, 1);
+ write_be32(ss->bytes, n->serialized_tx);
+ } else {
+ GList *l;
+ n->serialized_tx = ss->tx_id;
+
+ if (n->modified_in_tx) {
+ flag |= MODIFIED_IN_TX;
+ }
+ if (n->deleted_in_tx) {
+ flag |= DELETED_IN_TX;
+ }
+ g_byte_array_append(ss->bytes, &flag, 1);
+
+ if (n->content) {
+ write_be32(ss->bytes, n->content->len);
+ g_byte_array_append(ss->bytes, n->content->data, n->content->len);
+ } else {
+ write_be32(ss->bytes, 0);
+ }
+
+ for (l = n->perms; l; l = l->next) {
+ g_byte_array_append(ss->bytes, l->data, strlen(l->data) + 1);
+ }
+ /* NUL termination after perms */
+ g_byte_array_append(ss->bytes, (void *)"", 1);
+
+ if (n->children) {
+ g_hash_table_foreach(n->children, save_node, ss);
+ }
+ /* NUL termination after children (child name is NUL) */
+ g_byte_array_append(ss->bytes, (void *)"", 1);
+ }
+}
+
+static void save_tree(struct save_state *ss, uint32_t tx_id, XsNode *root)
+{
+ write_be32(ss->bytes, tx_id);
+ ss->tx_id = tx_id;
+ save_node(NULL, root, ss);
+}
+
+static void save_tx(gpointer key, gpointer value, gpointer opaque)
+{
+ uint32_t tx_id = GPOINTER_TO_INT(key);
+ struct save_state *ss = opaque;
+ XsTransaction *n = value;
+
+ write_be32(ss->bytes, n->base_tx);
+ write_be32(ss->bytes, n->dom_id);
+
+ save_tree(ss, tx_id, n->root);
+}
+
+static void save_watch(gpointer key, gpointer value, gpointer opaque)
+{
+ struct save_state *ss = opaque;
+ XsWatch *w = value;
+
+ /* We only save the *guest* watches. */
+ if (w->dom_id) {
+ gpointer relpath = key + w->rel_prefix;
+ g_byte_array_append(ss->bytes, relpath, strlen(relpath) + 1);
+ g_byte_array_append(ss->bytes, (void *)w->token, strlen(w->token) + 1);
+ }
+}
+
+GByteArray *xs_impl_serialize(XenstoreImplState *s)
+{
+ struct save_state ss;
+
+ ss.bytes = g_byte_array_new();
+
+ /*
+ * node = flags [ real_node / node_ref ]
+ * flags = uint8_t (MODIFIED_IN_TX | DELETED_IN_TX | NODE_REF)
+ * node_ref = tx_id (in which the original version of this node exists)
+ * real_node = content perms child* NUL
+ * content = len data
+ * len = uint32_t
+ * data = uint8_t{len}
+ * perms = perm* NUL
+ * perm = asciiz
+ * child = name node
+ * name = asciiz
+ *
+ * tree = tx_id node
+ * tx_id = uint32_t
+ *
+ * transaction = base_tx_id dom_id tree
+ * base_tx_id = uint32_t
+ * dom_id = uint32_t
+ *
+ * tx_list = tree transaction* XBT_NULL
+ *
+ * watch = path token
+ * path = asciiz
+ * token = asciiz
+ *
+ * watch_list = watch* NUL
+ *
+ * xs_serialize_stream = last_tx tx_list watch_list
+ * last_tx = uint32_t
+ */
+
+ /* Clear serialized_tx in every node. */
+ if (s->serialized) {
+ clear_serialized_tx(NULL, s->root, NULL);
+ g_hash_table_foreach(s->transactions, clear_tx_serialized_tx, NULL);
+ }
+
+ s->serialized = true;
+
+ write_be32(ss.bytes, s->last_tx);
+ save_tree(&ss, s->root_tx, s->root);
+ g_hash_table_foreach(s->transactions, save_tx, &ss);
+
+ write_be32(ss.bytes, XBT_NULL);
+
+ g_hash_table_foreach(s->watches, save_watch, &ss);
+ g_byte_array_append(ss.bytes, (void *)"", 1);
+
+ return ss.bytes;
+}
+
+struct unsave_state {
+ char path[XENSTORE_ABS_PATH_MAX + 1];
+ XenstoreImplState *s;
+ GByteArray *bytes;
+ uint8_t *d;
+ size_t l;
+ bool root_walk;
+};
+
+static int consume_be32(struct unsave_state *us, unsigned int *val)
+{
+ uint32_t d;
+
+ if (us->l < sizeof(d)) {
+ return -EINVAL;
+ }
+ memcpy(&d, us->d, sizeof(d));
+ *val = ntohl(d);
+ us->d += sizeof(d);
+ us->l -= sizeof(d);
+ return 0;
+}
+
+static int consume_string(struct unsave_state *us, char **str, size_t *len)
+{
+ size_t l;
+
+ if (!us->l) {
+ return -EINVAL;
+ }
+
+ l = strnlen((void *)us->d, us->l);
+ if (l == us->l) {
+ return -EINVAL;
+ }
+
+ if (str) {
+ *str = (void *)us->d;
+ }
+ if (len) {
+ *len = l;
+ }
+
+ us->d += l + 1;
+ us->l -= l + 1;
+ return 0;
+}
+
+static XsNode *lookup_node(XsNode *n, char *path)
+{
+ char *slash = strchr(path, '/');
+ XsNode *child;
+
+ if (path[0] == '\0') {
+ return n;
+ }
+
+ if (slash) {
+ *slash = '\0';
+ }
+
+ if (!n->children) {
+ return NULL;
+ }
+ child = g_hash_table_lookup(n->children, path);
+ if (!slash) {
+ return child;
+ }
+
+ *slash = '/';
+ if (!child) {
+ return NULL;
+ }
+ return lookup_node(child, slash + 1);
+}
+
+static XsNode *lookup_tx_node(struct unsave_state *us, unsigned int tx_id)
+{
+ XsTransaction *t;
+ if (tx_id == us->s->root_tx) {
+ return lookup_node(us->s->root, us->path + 1);
+ }
+
+ t = g_hash_table_lookup(us->s->transactions, GINT_TO_POINTER(tx_id));
+ if (!t) {
+ return NULL;
+ }
+ g_assert(t->root);
+ return lookup_node(t->root, us->path + 1);
+}
+
+static void count_child_nodes(gpointer key, gpointer value, gpointer user_data)
+{
+ unsigned int *nr_nodes = user_data;
+ XsNode *n = value;
+
+ (*nr_nodes)++;
+
+ if (n->children) {
+ g_hash_table_foreach(n->children, count_child_nodes, nr_nodes);
+ }
+}
+
+static int consume_node(struct unsave_state *us, XsNode **nodep,
+ unsigned int *nr_nodes)
+{
+ XsNode *n = NULL;
+ uint8_t flags;
+ int ret;
+
+ if (us->l < 1) {
+ return -EINVAL;
+ }
+ flags = us->d[0];
+ us->d++;
+ us->l--;
+
+ if (flags == NODE_REF) {
+ unsigned int tx;
+
+ ret = consume_be32(us, &tx);
+ if (ret) {
+ return ret;
+ }
+
+ n = lookup_tx_node(us, tx);
+ if (!n) {
+ return -EINVAL;
+ }
+ n->ref++;
+ if (n->children) {
+ g_hash_table_foreach(n->children, count_child_nodes, nr_nodes);
+ }
+ } else {
+ uint32_t datalen;
+
+ if (flags & ~(DELETED_IN_TX | MODIFIED_IN_TX)) {
+ return -EINVAL;
+ }
+ n = xs_node_new();
+
+ if (flags & DELETED_IN_TX) {
+ n->deleted_in_tx = true;
+ }
+ if (flags & MODIFIED_IN_TX) {
+ n->modified_in_tx = true;
+ }
+ ret = consume_be32(us, &datalen);
+ if (ret) {
+ xs_node_unref(n);
+ return -EINVAL;
+ }
+ if (datalen) {
+ if (datalen > us->l) {
+ xs_node_unref(n);
+ return -EINVAL;
+ }
+
+ GByteArray *node_data = g_byte_array_new();
+ g_byte_array_append(node_data, us->d, datalen);
+ us->d += datalen;
+ us->l -= datalen;
+ n->content = node_data;
+
+ if (us->root_walk) {
+ n->modified_in_tx = true;
+ }
+ }
+ while (1) {
+ char *perm = NULL;
+ size_t permlen = 0;
+
+ ret = consume_string(us, &perm, &permlen);
+ if (ret) {
+ xs_node_unref(n);
+ return ret;
+ }
+
+ if (!permlen) {
+ break;
+ }
+
+ n->perms = g_list_append(n->perms, g_strdup(perm));
+ }
+
+ /* Now children */
+ while (1) {
+ size_t childlen;
+ char *childname;
+ char *pathend;
+ XsNode *child = NULL;
+
+ ret = consume_string(us, &childname, &childlen);
+ if (ret) {
+ xs_node_unref(n);
+ return ret;
+ }
+
+ if (!childlen) {
+ break;
+ }
+
+ pathend = us->path + strlen(us->path);
+ strncat(us->path, "/", sizeof(us->path) - 1);
+ strncat(us->path, childname, sizeof(us->path) - 1);
+
+ ret = consume_node(us, &child, nr_nodes);
+ *pathend = '\0';
+ if (ret) {
+ xs_node_unref(n);
+ return ret;
+ }
+ g_assert(child);
+ xs_node_add_child(n, childname, child);
+ }
+
+ /*
+ * If the node has no data and no children we still want to fire
+ * a watch on it.
+ */
+ if (us->root_walk && !n->children) {
+ n->modified_in_tx = true;
+ }
+ }
+
+ if (!n->deleted_in_tx) {
+ (*nr_nodes)++;
+ }
+
+ *nodep = n;
+ return 0;
+}
+
+static int consume_tree(struct unsave_state *us, XsTransaction *t)
+{
+ int ret;
+
+ ret = consume_be32(us, &t->tx_id);
+ if (ret) {
+ return ret;
+ }
+
+ if (t->tx_id > us->s->last_tx) {
+ return -EINVAL;
+ }
+
+ us->path[0] = '\0';
+
+ return consume_node(us, &t->root, &t->nr_nodes);
+}
+
+int xs_impl_deserialize(XenstoreImplState *s, GByteArray *bytes,
+ unsigned int dom_id, xs_impl_watch_fn watch_fn,
+ void *watch_opaque)
+{
+ struct unsave_state us;
+ XsTransaction base_t = { 0 };
+ int ret;
+
+ us.s = s;
+ us.bytes = bytes;
+ us.d = bytes->data;
+ us.l = bytes->len;
+
+ xs_impl_reset_watches(s, dom_id);
+ g_hash_table_remove_all(s->transactions);
+
+ xs_node_unref(s->root);
+ s->root = NULL;
+ s->root_tx = s->last_tx = XBT_NULL;
+
+ ret = consume_be32(&us, &s->last_tx);
+ if (ret) {
+ return ret;
+ }
+
+ /*
+ * Consume the base tree into a transaction so that watches can be
+ * fired as we commit it. By setting us.root_walk we cause the nodes
+ * to be marked as 'modified_in_tx' as they are created, so that the
+ * watches are triggered on them.
+ */
+ base_t.dom_id = dom_id;
+ base_t.base_tx = XBT_NULL;
+ us.root_walk = true;
+ ret = consume_tree(&us, &base_t);
+ if (ret) {
+ return ret;
+ }
+ us.root_walk = false;
+
+ /*
+ * Commit the transaction now while the refcount on all nodes is 1.
+ * Note that we haven't yet reinstated the *guest* watches but that's
+ * OK because we don't want the guest to see any changes. Even any
+ * backend nodes which get recreated should be *precisely* as they
+ * were before the migration. Back ends may have been instantiated
+ * already, and will see the frontend magically blink into existence
+ * now (well, from the aio_bh which fires the watches). It's their
+ * responsibility to rebuild everything precisely as it was before.
+ */
+ ret = transaction_commit(s, &base_t);
+ if (ret) {
+ return ret;
+ }
+
+ while (1) {
+ unsigned int base_tx;
+ XsTransaction *t;
+
+ ret = consume_be32(&us, &base_tx);
+ if (ret) {
+ return ret;
+ }
+ if (base_tx == XBT_NULL) {
+ break;
+ }
+
+ t = g_new0(XsTransaction, 1);
+ t->base_tx = base_tx;
+
+ ret = consume_be32(&us, &t->dom_id);
+ if (!ret) {
+ ret = consume_tree(&us, t);
+ }
+ if (ret) {
+ g_free(t);
+ return ret;
+ }
+ g_assert(t->root);
+ if (t->dom_id) {
+ s->nr_domu_transactions++;
+ }
+ g_hash_table_insert(s->transactions, GINT_TO_POINTER(t->tx_id), t);
+ }
+
+ while (1) {
+ char *path, *token;
+ size_t pathlen, toklen;
+
+ ret = consume_string(&us, &path, &pathlen);
+ if (ret) {
+ return ret;
+ }
+ if (!pathlen) {
+ break;
+ }
+
+ ret = consume_string(&us, &token, &toklen);
+ if (ret) {
+ return ret;
+ }
+
+ if (!watch_fn) {
+ continue;
+ }
+
+ ret = do_xs_impl_watch(s, dom_id, path, token, watch_fn, watch_opaque);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ if (us.l) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/hw/i386/kvm/xenstore_impl.h b/hw/i386/kvm/xenstore_impl.h
new file mode 100644
index 0000000..0df2a91
--- /dev/null
+++ b/hw/i386/kvm/xenstore_impl.h
@@ -0,0 +1,63 @@
+/*
+ * QEMU Xen emulation: The actual implementation of XenStore
+ *
+ * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Authors: David Woodhouse <dwmw2@infradead.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_XENSTORE_IMPL_H
+#define QEMU_XENSTORE_IMPL_H
+
+#include "hw/xen/xen_backend_ops.h"
+
+typedef struct XenstoreImplState XenstoreImplState;
+
+XenstoreImplState *xs_impl_create(unsigned int dom_id);
+
+char *xs_perm_as_string(unsigned int perm, unsigned int domid);
+
+/*
+ * These functions return *positive* error numbers. This is a little
+ * unconventional but it helps to keep us honest because there is
+ * also a very limited set of error numbers that they are permitted
+ * to return (those in xsd_errors).
+ */
+
+int xs_impl_read(XenstoreImplState *s, unsigned int dom_id,
+ xs_transaction_t tx_id, const char *path, GByteArray *data);
+int xs_impl_write(XenstoreImplState *s, unsigned int dom_id,
+ xs_transaction_t tx_id, const char *path, GByteArray *data);
+int xs_impl_directory(XenstoreImplState *s, unsigned int dom_id,
+ xs_transaction_t tx_id, const char *path,
+ uint64_t *gencnt, GList **items);
+int xs_impl_transaction_start(XenstoreImplState *s, unsigned int dom_id,
+ xs_transaction_t *tx_id);
+int xs_impl_transaction_end(XenstoreImplState *s, unsigned int dom_id,
+ xs_transaction_t tx_id, bool commit);
+int xs_impl_rm(XenstoreImplState *s, unsigned int dom_id,
+ xs_transaction_t tx_id, const char *path);
+int xs_impl_get_perms(XenstoreImplState *s, unsigned int dom_id,
+ xs_transaction_t tx_id, const char *path, GList **perms);
+int xs_impl_set_perms(XenstoreImplState *s, unsigned int dom_id,
+ xs_transaction_t tx_id, const char *path, GList *perms);
+
+/* This differs from xs_watch_fn because it has the token */
+typedef void(xs_impl_watch_fn)(void *opaque, const char *path,
+ const char *token);
+int xs_impl_watch(XenstoreImplState *s, unsigned int dom_id, const char *path,
+ const char *token, xs_impl_watch_fn fn, void *opaque);
+int xs_impl_unwatch(XenstoreImplState *s, unsigned int dom_id,
+ const char *path, const char *token, xs_impl_watch_fn fn,
+ void *opaque);
+int xs_impl_reset_watches(XenstoreImplState *s, unsigned int dom_id);
+
+GByteArray *xs_impl_serialize(XenstoreImplState *s);
+int xs_impl_deserialize(XenstoreImplState *s, GByteArray *bytes,
+ unsigned int dom_id, xs_impl_watch_fn watch_fn,
+ void *watch_opaque);
+
+#endif /* QEMU_XENSTORE_IMPL_H */
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index 7bebea5..1489abf 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -102,6 +102,11 @@
#include "trace.h"
#include CONFIG_DEVICES
+#ifdef CONFIG_XEN_EMU
+#include "hw/xen/xen-legacy-backend.h"
+#include "hw/xen/xen-bus.h"
+#endif
+
/*
* Helper for setting model-id for CPU models that changed model-id
* depending on QEMU versions up to QEMU 2.4.
@@ -1318,6 +1323,8 @@
if (pcms->bus) {
pci_create_simple(pcms->bus, -1, "xen-platform");
}
+ xen_bus_init();
+ xen_be_init();
}
#endif
diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
index 4bf15f9..30eedd6 100644
--- a/hw/i386/pc_piix.c
+++ b/hw/i386/pc_piix.c
@@ -47,8 +47,6 @@
#include "hw/kvm/clock.h"
#include "hw/sysbus.h"
#include "hw/i2c/smbus_eeprom.h"
-#include "hw/xen/xen-x86.h"
-#include "hw/xen/xen.h"
#include "exec/memory.h"
#include "hw/acpi/acpi.h"
#include "hw/acpi/piix4.h"
@@ -60,6 +58,8 @@
#include <xen/hvm/hvm_info_table.h>
#include "hw/xen/xen_pt.h"
#endif
+#include "hw/xen/xen-x86.h"
+#include "hw/xen/xen.h"
#include "migration/global_state.h"
#include "migration/misc.h"
#include "sysemu/numa.h"
diff --git a/hw/i386/sgx.c b/hw/i386/sgx.c
index db004d1..7030554 100644
--- a/hw/i386/sgx.c
+++ b/hw/i386/sgx.c
@@ -18,6 +18,7 @@
#include "monitor/monitor.h"
#include "monitor/hmp-target.h"
#include "qapi/error.h"
+#include "qemu/error-report.h"
#include "qapi/qapi-commands-misc-target.h"
#include "exec/address-spaces.h"
#include "sysemu/hw_accel.h"
diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c
index e5a1dd1..56641a5 100644
--- a/hw/i386/xen/xen-hvm.c
+++ b/hw/i386/xen/xen-hvm.c
@@ -18,7 +18,7 @@
#include "hw/irq.h"
#include "hw/hw.h"
#include "hw/i386/apic-msidef.h"
-#include "hw/xen/xen_common.h"
+#include "hw/xen/xen_native.h"
#include "hw/xen/xen-legacy-backend.h"
#include "hw/xen/xen-bus.h"
#include "hw/xen/xen-x86.h"
@@ -52,10 +52,11 @@
/* Compatibility with older version */
-/* This allows QEMU to build on a system that has Xen 4.5 or earlier
- * installed. This here (not in hw/xen/xen_common.h) because xen/hvm/ioreq.h
- * needs to be included before this block and hw/xen/xen_common.h needs to
- * be included before xen/hvm/ioreq.h
+/*
+ * This allows QEMU to build on a system that has Xen 4.5 or earlier installed.
+ * This is here (not in hw/xen/xen_native.h) because xen/hvm/ioreq.h needs to
+ * be included before this block and hw/xen/xen_native.h needs to be included
+ * before xen/hvm/ioreq.h
*/
#ifndef IOREQ_TYPE_VMWARE_PORT
#define IOREQ_TYPE_VMWARE_PORT 3
@@ -761,7 +762,7 @@
int i;
evtchn_port_t port;
- port = xenevtchn_pending(state->xce_handle);
+ port = qemu_xen_evtchn_pending(state->xce_handle);
if (port == state->bufioreq_local_port) {
timer_mod(state->buffered_io_timer,
BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
@@ -780,7 +781,7 @@
}
/* unmask the wanted port again */
- xenevtchn_unmask(state->xce_handle, port);
+ qemu_xen_evtchn_unmask(state->xce_handle, port);
/* get the io packet from shared memory */
state->send_vcpu = i;
@@ -1147,7 +1148,7 @@
BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
} else {
timer_del(state->buffered_io_timer);
- xenevtchn_unmask(state->xce_handle, state->bufioreq_local_port);
+ qemu_xen_evtchn_unmask(state->xce_handle, state->bufioreq_local_port);
}
}
@@ -1196,8 +1197,8 @@
}
req->state = STATE_IORESP_READY;
- xenevtchn_notify(state->xce_handle,
- state->ioreq_local_port[state->send_vcpu]);
+ qemu_xen_evtchn_notify(state->xce_handle,
+ state->ioreq_local_port[state->send_vcpu]);
}
}
@@ -1206,7 +1207,7 @@
int evtchn_fd = -1;
if (state->xce_handle != NULL) {
- evtchn_fd = xenevtchn_fd(state->xce_handle);
+ evtchn_fd = qemu_xen_evtchn_fd(state->xce_handle);
}
state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io,
@@ -1249,7 +1250,7 @@
xenforeignmemory_unmap_resource(xen_fmem, state->fres);
}
- xenevtchn_close(state->xce_handle);
+ qemu_xen_evtchn_close(state->xce_handle);
xs_daemon_close(state->xenstore);
}
@@ -1397,9 +1398,11 @@
xen_pfn_t ioreq_pfn;
XenIOState *state;
+ setup_xen_backend_ops();
+
state = g_new0(XenIOState, 1);
- state->xce_handle = xenevtchn_open(NULL, 0);
+ state->xce_handle = qemu_xen_evtchn_open();
if (state->xce_handle == NULL) {
perror("xen: event channel open");
goto err;
@@ -1463,8 +1466,9 @@
/* FIXME: how about if we overflow the page here? */
for (i = 0; i < max_cpus; i++) {
- rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid,
- xen_vcpu_eport(state->shared_page, i));
+ rc = qemu_xen_evtchn_bind_interdomain(state->xce_handle, xen_domid,
+ xen_vcpu_eport(state->shared_page,
+ i));
if (rc == -1) {
error_report("shared evtchn %d bind error %d", i, errno);
goto err;
@@ -1472,8 +1476,8 @@
state->ioreq_local_port[i] = rc;
}
- rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid,
- state->bufioreq_remote_port);
+ rc = qemu_xen_evtchn_bind_interdomain(state->xce_handle, xen_domid,
+ state->bufioreq_remote_port);
if (rc == -1) {
error_report("buffered evtchn bind error %d", errno);
goto err;
diff --git a/hw/i386/xen/xen-mapcache.c b/hw/i386/xen/xen-mapcache.c
index 1d0879d..f7d9746 100644
--- a/hw/i386/xen/xen-mapcache.c
+++ b/hw/i386/xen/xen-mapcache.c
@@ -14,7 +14,7 @@
#include <sys/resource.h>
-#include "hw/xen/xen-legacy-backend.h"
+#include "hw/xen/xen_native.h"
#include "qemu/bitmap.h"
#include "sysemu/runstate.h"
diff --git a/hw/i386/xen/xen_platform.c b/hw/i386/xen/xen_platform.c
index 539f7da..57f1d74 100644
--- a/hw/i386/xen/xen_platform.c
+++ b/hw/i386/xen/xen_platform.c
@@ -28,7 +28,6 @@
#include "hw/ide/pci.h"
#include "hw/pci/pci.h"
#include "migration/vmstate.h"
-#include "hw/xen/xen.h"
#include "net/net.h"
#include "trace.h"
#include "sysemu/xen.h"
@@ -38,10 +37,12 @@
#include "qom/object.h"
#ifdef CONFIG_XEN
-#include "hw/xen/xen_common.h"
-#include "hw/xen/xen-legacy-backend.h"
+#include "hw/xen/xen_native.h"
#endif
+/* The rule is that xen_native.h must come first */
+#include "hw/xen/xen.h"
+
//#define DEBUG_PLATFORM
#ifdef DEBUG_PLATFORM
diff --git a/hw/input/ads7846.c b/hw/input/ads7846.c
index 1d4e04a..dc0998a 100644
--- a/hw/input/ads7846.c
+++ b/hw/input/ads7846.c
@@ -34,28 +34,28 @@
OBJECT_DECLARE_SIMPLE_TYPE(ADS7846State, ADS7846)
/* Control-byte bitfields */
-#define CB_PD0 (1 << 0)
-#define CB_PD1 (1 << 1)
-#define CB_SER (1 << 2)
-#define CB_MODE (1 << 3)
-#define CB_A0 (1 << 4)
-#define CB_A1 (1 << 5)
-#define CB_A2 (1 << 6)
-#define CB_START (1 << 7)
+#define CB_PD0 (1 << 0)
+#define CB_PD1 (1 << 1)
+#define CB_SER (1 << 2)
+#define CB_MODE (1 << 3)
+#define CB_A0 (1 << 4)
+#define CB_A1 (1 << 5)
+#define CB_A2 (1 << 6)
+#define CB_START (1 << 7)
-#define X_AXIS_DMAX 3470
-#define X_AXIS_MIN 290
-#define Y_AXIS_DMAX 3450
-#define Y_AXIS_MIN 200
+#define X_AXIS_DMAX 3470
+#define X_AXIS_MIN 290
+#define Y_AXIS_DMAX 3450
+#define Y_AXIS_MIN 200
-#define ADS_VBAT 2000
-#define ADS_VAUX 2000
-#define ADS_TEMP0 2000
-#define ADS_TEMP1 3000
-#define ADS_XPOS(x, y) (X_AXIS_MIN + ((X_AXIS_DMAX * (x)) >> 15))
-#define ADS_YPOS(x, y) (Y_AXIS_MIN + ((Y_AXIS_DMAX * (y)) >> 15))
-#define ADS_Z1POS(x, y) 600
-#define ADS_Z2POS(x, y) (600 + 6000 / ADS_XPOS(x, y))
+#define ADS_VBAT 2000
+#define ADS_VAUX 2000
+#define ADS_TEMP0 2000
+#define ADS_TEMP1 3000
+#define ADS_XPOS(x, y) (X_AXIS_MIN + ((X_AXIS_DMAX * (x)) >> 15))
+#define ADS_YPOS(x, y) (Y_AXIS_MIN + ((Y_AXIS_DMAX * (y)) >> 15))
+#define ADS_Z1POS(x, y) 600
+#define ADS_Z2POS(x, y) (600 + 6000 / ADS_XPOS(x, y))
static void ads7846_int_update(ADS7846State *s)
{
@@ -86,7 +86,7 @@
}
if (value & CB_MODE)
- s->output >>= 4; /* 8 bits instead of 12 */
+ s->output >>= 4; /* 8 bits instead of 12 */
break;
case 1:
@@ -147,10 +147,10 @@
qdev_init_gpio_out(dev, &s->interrupt, 1);
- s->input[0] = ADS_TEMP0; /* TEMP0 */
- s->input[2] = ADS_VBAT; /* VBAT */
- s->input[6] = ADS_VAUX; /* VAUX */
- s->input[7] = ADS_TEMP1; /* TEMP1 */
+ s->input[0] = ADS_TEMP0; /* TEMP0 */
+ s->input[2] = ADS_VBAT; /* VBAT */
+ s->input[6] = ADS_VAUX; /* VAUX */
+ s->input[7] = ADS_TEMP1; /* TEMP1 */
/* We want absolute coordinates */
qemu_add_mouse_event_handler(ads7846_ts_event, s, 1,
diff --git a/hw/input/ps2.c b/hw/input/ps2.c
index 3253ab6..45af76a 100644
--- a/hw/input/ps2.c
+++ b/hw/input/ps2.c
@@ -402,6 +402,9 @@
ps2_put_keycode(s, 0xaa);
}
}
+ } else if ((qcode == Q_KEY_CODE_LANG1 || qcode == Q_KEY_CODE_LANG2)
+ && !key->down) {
+ /* Ignore release for these keys */
} else {
if (qcode < qemu_input_map_qcode_to_atset1_len) {
keycode = qemu_input_map_qcode_to_atset1[qcode];
@@ -497,6 +500,9 @@
ps2_put_keycode(s, 0x12);
}
}
+ } else if ((qcode == Q_KEY_CODE_LANG1 || qcode == Q_KEY_CODE_LANG2) &&
+ !key->down) {
+ /* Ignore release for these keys */
} else {
if (qcode < qemu_input_map_qcode_to_atset2_len) {
keycode = qemu_input_map_qcode_to_atset2[qcode];
diff --git a/hw/intc/apic.c b/hw/intc/apic.c
index 0ff060f..20b5a94 100644
--- a/hw/intc/apic.c
+++ b/hw/intc/apic.c
@@ -18,6 +18,7 @@
*/
#include "qemu/osdep.h"
#include "qemu/thread.h"
+#include "qemu/error-report.h"
#include "hw/i386/apic_internal.h"
#include "hw/i386/apic.h"
#include "hw/intc/ioapic.h"
diff --git a/hw/intc/i8259.c b/hw/intc/i8259.c
index 17910f3..bbae2d8 100644
--- a/hw/intc/i8259.c
+++ b/hw/intc/i8259.c
@@ -133,7 +133,7 @@
}
#endif
- if (s->elcr & mask) {
+ if (s->ltim || (s->elcr & mask)) {
/* level triggered */
if (level) {
s->irr |= mask;
@@ -167,7 +167,7 @@
s->isr |= (1 << irq);
}
/* We don't clear a level sensitive interrupt here */
- if (!(s->elcr & (1 << irq))) {
+ if (!s->ltim && !(s->elcr & (1 << irq))) {
s->irr &= ~(1 << irq);
}
pic_update_irq(s);
@@ -224,6 +224,7 @@
PICCommonState *s = PIC_COMMON(dev);
s->elcr = 0;
+ s->ltim = 0;
pic_init_reset(s);
}
@@ -243,10 +244,7 @@
s->init_state = 1;
s->init4 = val & 1;
s->single_mode = val & 2;
- if (val & 0x08) {
- qemu_log_mask(LOG_UNIMP,
- "i8259: level sensitive irq not supported\n");
- }
+ s->ltim = val & 8;
} else if (val & 0x08) {
if (val & 0x04) {
s->poll = 1;
diff --git a/hw/intc/i8259_common.c b/hw/intc/i8259_common.c
index af2e4a2..c931dc2 100644
--- a/hw/intc/i8259_common.c
+++ b/hw/intc/i8259_common.c
@@ -51,7 +51,7 @@
s->special_fully_nested_mode = 0;
s->init4 = 0;
s->single_mode = 0;
- /* Note: ELCR is not reset */
+ /* Note: ELCR and LTIM are not reset */
}
static int pic_dispatch_pre_save(void *opaque)
@@ -144,6 +144,24 @@
s->special_fully_nested_mode);
}
+static bool ltim_state_needed(void *opaque)
+{
+ PICCommonState *s = PIC_COMMON(opaque);
+
+ return !!s->ltim;
+}
+
+static const VMStateDescription vmstate_pic_ltim = {
+ .name = "i8259/ltim",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = ltim_state_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8(ltim, PICCommonState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static const VMStateDescription vmstate_pic_common = {
.name = "i8259",
.version_id = 1,
@@ -168,6 +186,10 @@
VMSTATE_UINT8(single_mode, PICCommonState),
VMSTATE_UINT8(elcr, PICCommonState),
VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_pic_ltim,
+ NULL
}
};
diff --git a/hw/intc/ioapic.c b/hw/intc/ioapic.c
index 6364eca..716ffc8 100644
--- a/hw/intc/ioapic.c
+++ b/hw/intc/ioapic.c
@@ -405,6 +405,7 @@
s->ioredtbl[index] |= ro_bits;
s->irq_eoi[index] = 0;
ioapic_fix_edge_remote_irr(&s->ioredtbl[index]);
+ ioapic_update_kvm_routes(s);
ioapic_service(s);
}
}
@@ -417,8 +418,6 @@
ioapic_eoi_broadcast(val);
break;
}
-
- ioapic_update_kvm_routes(s);
}
static const MemoryRegionOps ioapic_io_ops = {
diff --git a/hw/intc/mips_gic.c b/hw/intc/mips_gic.c
index bda4549..4bdc3b1 100644
--- a/hw/intc/mips_gic.c
+++ b/hw/intc/mips_gic.c
@@ -439,8 +439,8 @@
}
static Property mips_gic_properties[] = {
- DEFINE_PROP_INT32("num-vp", MIPSGICState, num_vps, 1),
- DEFINE_PROP_INT32("num-irq", MIPSGICState, num_irq, 256),
+ DEFINE_PROP_UINT32("num-vp", MIPSGICState, num_vps, 1),
+ DEFINE_PROP_UINT32("num-irq", MIPSGICState, num_irq, 256),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/isa/i82378.c b/hw/isa/i82378.c
index 233059c..5432ab5 100644
--- a/hw/isa/i82378.c
+++ b/hw/isa/i82378.c
@@ -47,6 +47,12 @@
},
};
+static void i82378_request_out0_irq(void *opaque, int irq, int level)
+{
+ I82378State *s = opaque;
+ qemu_set_irq(s->cpu_intr, level);
+}
+
static void i82378_request_pic_irq(void *opaque, int irq, int level)
{
DeviceState *dev = opaque;
@@ -88,7 +94,9 @@
*/
/* 2 82C59 (irq) */
- s->isa_irqs_in = i8259_init(isabus, s->cpu_intr);
+ s->isa_irqs_in = i8259_init(isabus,
+ qemu_allocate_irq(i82378_request_out0_irq,
+ s, 0));
isa_bus_register_input_irqs(isabus, s->isa_irqs_in);
/* 1 82C54 (pit) */
diff --git a/hw/isa/lpc_ich9.c b/hw/isa/lpc_ich9.c
index d8303d0..9714b00 100644
--- a/hw/isa/lpc_ich9.c
+++ b/hw/isa/lpc_ich9.c
@@ -865,6 +865,7 @@
hc->plug = ich9_pm_device_plug_cb;
hc->unplug_request = ich9_pm_device_unplug_request_cb;
hc->unplug = ich9_pm_device_unplug_cb;
+ hc->is_hotpluggable_bus = ich9_pm_is_hotpluggable_bus;
adevc->ospm_status = ich9_pm_ospm_status;
adevc->send_event = ich9_send_gpe;
adevc->madt_cpu = pc_madt_cpu_entry;
diff --git a/hw/isa/trace-events b/hw/isa/trace-events
index c4567a9..1816e83 100644
--- a/hw/isa/trace-events
+++ b/hw/isa/trace-events
@@ -16,6 +16,7 @@
# vt82c686.c
via_isa_write(uint32_t addr, uint32_t val, int len) "addr 0x%x val 0x%x len 0x%x"
+via_pm_read(uint32_t addr, uint32_t val, int len) "addr 0x%x val 0x%x len 0x%x"
via_pm_write(uint32_t addr, uint32_t val, int len) "addr 0x%x val 0x%x len 0x%x"
via_pm_io_read(uint32_t addr, uint32_t val, int len) "addr 0x%x val 0x%x len 0x%x"
via_pm_io_write(uint32_t addr, uint32_t val, int len) "addr 0x%x val 0x%x len 0x%x"
diff --git a/hw/isa/vt82c686.c b/hw/isa/vt82c686.c
index f4c4096..ca89119 100644
--- a/hw/isa/vt82c686.c
+++ b/hw/isa/vt82c686.c
@@ -554,7 +554,7 @@
PCIIDEState ide;
UHCIState uhci[2];
ViaPMState pm;
- PCIDevice ac97;
+ ViaAC97State ac97;
PCIDevice mc97;
};
@@ -598,15 +598,63 @@
qemu_set_irq(s->isa_irqs_in[n], level);
}
+static void via_isa_request_i8259_irq(void *opaque, int irq, int level)
+{
+ ViaISAState *s = opaque;
+ qemu_set_irq(s->cpu_intr, level);
+}
+
+static int via_isa_get_pci_irq(const ViaISAState *s, int irq_num)
+{
+ switch (irq_num) {
+ case 0:
+ return s->dev.config[0x55] >> 4;
+ case 1:
+ return s->dev.config[0x56] & 0xf;
+ case 2:
+ return s->dev.config[0x56] >> 4;
+ case 3:
+ return s->dev.config[0x57] >> 4;
+ }
+ return 0;
+}
+
+static void via_isa_set_pci_irq(void *opaque, int irq_num, int level)
+{
+ ViaISAState *s = opaque;
+ PCIBus *bus = pci_get_bus(&s->dev);
+ int i, pic_level, pic_irq = via_isa_get_pci_irq(s, irq_num);
+
+ /* IRQ 0: disabled, IRQ 2,8,13: reserved */
+ if (!pic_irq) {
+ return;
+ }
+ if (unlikely(pic_irq == 2 || pic_irq == 8 || pic_irq == 13)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid ISA IRQ routing");
+ }
+
+ /* The pic level is the logical OR of all the PCI irqs mapped to it. */
+ pic_level = 0;
+ for (i = 0; i < PCI_NUM_PINS; i++) {
+ if (pic_irq == via_isa_get_pci_irq(s, i)) {
+ pic_level |= pci_bus_get_irq_level(bus, i);
+ }
+ }
+ /* Now we change the pic irq level according to the via irq mappings. */
+ qemu_set_irq(s->isa_irqs_in[pic_irq], pic_level);
+}
+
static void via_isa_realize(PCIDevice *d, Error **errp)
{
ViaISAState *s = VIA_ISA(d);
DeviceState *dev = DEVICE(d);
PCIBus *pci_bus = pci_get_bus(d);
+ qemu_irq *isa_irq;
ISABus *isa_bus;
int i;
qdev_init_gpio_out(dev, &s->cpu_intr, 1);
+ isa_irq = qemu_allocate_irqs(via_isa_request_i8259_irq, s, 1);
isa_bus = isa_bus_new(dev, pci_address_space(d), pci_address_space_io(d),
errp);
@@ -614,11 +662,13 @@
return;
}
- s->isa_irqs_in = i8259_init(isa_bus, s->cpu_intr);
+ s->isa_irqs_in = i8259_init(isa_bus, *isa_irq);
isa_bus_register_input_irqs(isa_bus, s->isa_irqs_in);
i8254_pit_init(isa_bus, 0x40, 0, NULL);
i8257_dma_init(isa_bus, 0);
+ qdev_init_gpio_in_named(dev, via_isa_set_pci_irq, "pirq", PCI_NUM_PINS);
+
/* RTC */
qdev_prop_set_int32(DEVICE(&s->rtc), "base_year", 2000);
if (!qdev_realize(DEVICE(&s->rtc), BUS(isa_bus), errp)) {
diff --git a/hw/loongarch/acpi-build.c b/hw/loongarch/acpi-build.c
index 6cb2472..8e3ce07 100644
--- a/hw/loongarch/acpi-build.c
+++ b/hw/loongarch/acpi-build.c
@@ -7,6 +7,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
+#include "qemu/error-report.h"
#include "qemu/bitmap.h"
#include "hw/pci/pci.h"
#include "hw/core/cpu.h"
diff --git a/hw/loongarch/virt.c b/hw/loongarch/virt.c
index 38ef7cc..b702c3f 100644
--- a/hw/loongarch/virt.c
+++ b/hw/loongarch/virt.c
@@ -44,6 +44,8 @@
#include "sysemu/tpm.h"
#include "sysemu/block-backend.h"
#include "hw/block/flash.h"
+#include "qemu/error-report.h"
+
static void virt_flash_create(LoongArchMachineState *lams)
{
diff --git a/hw/m68k/next-cube.c b/hw/m68k/next-cube.c
index e0d4a94..ce8ee50 100644
--- a/hw/m68k/next-cube.c
+++ b/hw/m68k/next-cube.c
@@ -24,6 +24,7 @@
#include "hw/block/fdc.h"
#include "hw/qdev-properties.h"
#include "qapi/error.h"
+#include "qemu/error-report.h"
#include "ui/console.h"
#include "target/m68k/cpu.h"
#include "migration/vmstate.h"
diff --git a/hw/m68k/q800.c b/hw/m68k/q800.c
index 9d52ca6..b35ecaf 100644
--- a/hw/m68k/q800.c
+++ b/hw/m68k/q800.c
@@ -45,6 +45,7 @@
#include "hw/block/swim.h"
#include "net/net.h"
#include "qapi/error.h"
+#include "qemu/error-report.h"
#include "sysemu/qtest.h"
#include "sysemu/runstate.h"
#include "sysemu/reset.h"
diff --git a/hw/m68k/virt.c b/hw/m68k/virt.c
index 4cb5bee..754b9bd 100644
--- a/hw/m68k/virt.c
+++ b/hw/m68k/virt.c
@@ -23,6 +23,7 @@
#include "bootinfo.h"
#include "net/net.h"
#include "qapi/error.h"
+#include "qemu/error-report.h"
#include "sysemu/qtest.h"
#include "sysemu/runstate.h"
#include "sysemu/reset.h"
diff --git a/hw/mem/cxl_type3.c b/hw/mem/cxl_type3.c
index 217a5e6..abe60b3 100644
--- a/hw/mem/cxl_type3.c
+++ b/hw/mem/cxl_type3.c
@@ -1,6 +1,7 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "qemu/error-report.h"
+#include "qapi/qapi-commands-cxl.h"
#include "hw/mem/memory-device.h"
#include "hw/mem/pc-dimm.h"
#include "hw/pci/pci.h"
@@ -250,6 +251,7 @@
pcie_doe_write_config(&ct3d->doe_cdat, addr, val, size);
pci_default_write_config(pci_dev, addr, val, size);
+ pcie_aer_write_config(pci_dev, addr, val, size);
}
/*
@@ -322,6 +324,66 @@
ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMITTED, 1);
}
+static int ct3d_qmp_uncor_err_to_cxl(CxlUncorErrorType qmp_err)
+{
+ switch (qmp_err) {
+ case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_PARITY:
+ return CXL_RAS_UNC_ERR_CACHE_DATA_PARITY;
+ case CXL_UNCOR_ERROR_TYPE_CACHE_ADDRESS_PARITY:
+ return CXL_RAS_UNC_ERR_CACHE_ADDRESS_PARITY;
+ case CXL_UNCOR_ERROR_TYPE_CACHE_BE_PARITY:
+ return CXL_RAS_UNC_ERR_CACHE_BE_PARITY;
+ case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_ECC:
+ return CXL_RAS_UNC_ERR_CACHE_DATA_ECC;
+ case CXL_UNCOR_ERROR_TYPE_MEM_DATA_PARITY:
+ return CXL_RAS_UNC_ERR_MEM_DATA_PARITY;
+ case CXL_UNCOR_ERROR_TYPE_MEM_ADDRESS_PARITY:
+ return CXL_RAS_UNC_ERR_MEM_ADDRESS_PARITY;
+ case CXL_UNCOR_ERROR_TYPE_MEM_BE_PARITY:
+ return CXL_RAS_UNC_ERR_MEM_BE_PARITY;
+ case CXL_UNCOR_ERROR_TYPE_MEM_DATA_ECC:
+ return CXL_RAS_UNC_ERR_MEM_DATA_ECC;
+ case CXL_UNCOR_ERROR_TYPE_REINIT_THRESHOLD:
+ return CXL_RAS_UNC_ERR_REINIT_THRESHOLD;
+ case CXL_UNCOR_ERROR_TYPE_RSVD_ENCODING:
+ return CXL_RAS_UNC_ERR_RSVD_ENCODING;
+ case CXL_UNCOR_ERROR_TYPE_POISON_RECEIVED:
+ return CXL_RAS_UNC_ERR_POISON_RECEIVED;
+ case CXL_UNCOR_ERROR_TYPE_RECEIVER_OVERFLOW:
+ return CXL_RAS_UNC_ERR_RECEIVER_OVERFLOW;
+ case CXL_UNCOR_ERROR_TYPE_INTERNAL:
+ return CXL_RAS_UNC_ERR_INTERNAL;
+ case CXL_UNCOR_ERROR_TYPE_CXL_IDE_TX:
+ return CXL_RAS_UNC_ERR_CXL_IDE_TX;
+ case CXL_UNCOR_ERROR_TYPE_CXL_IDE_RX:
+ return CXL_RAS_UNC_ERR_CXL_IDE_RX;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ct3d_qmp_cor_err_to_cxl(CxlCorErrorType qmp_err)
+{
+ switch (qmp_err) {
+ case CXL_COR_ERROR_TYPE_CACHE_DATA_ECC:
+ return CXL_RAS_COR_ERR_CACHE_DATA_ECC;
+ case CXL_COR_ERROR_TYPE_MEM_DATA_ECC:
+ return CXL_RAS_COR_ERR_MEM_DATA_ECC;
+ case CXL_COR_ERROR_TYPE_CRC_THRESHOLD:
+ return CXL_RAS_COR_ERR_CRC_THRESHOLD;
+ case CXL_COR_ERROR_TYPE_RETRY_THRESHOLD:
+ return CXL_RAS_COR_ERR_RETRY_THRESHOLD;
+ case CXL_COR_ERROR_TYPE_CACHE_POISON_RECEIVED:
+ return CXL_RAS_COR_ERR_CACHE_POISON_RECEIVED;
+ case CXL_COR_ERROR_TYPE_MEM_POISON_RECEIVED:
+ return CXL_RAS_COR_ERR_MEM_POISON_RECEIVED;
+ case CXL_COR_ERROR_TYPE_PHYSICAL:
+ return CXL_RAS_COR_ERR_PHYSICAL;
+ default:
+ return -EINVAL;
+ }
+}
+
static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value,
unsigned size)
{
@@ -340,6 +402,83 @@
should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
which_hdm = 0;
break;
+ case A_CXL_RAS_UNC_ERR_STATUS:
+ {
+ uint32_t capctrl = ldl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL);
+ uint32_t fe = FIELD_EX32(capctrl, CXL_RAS_ERR_CAP_CTRL, FIRST_ERROR_POINTER);
+ CXLError *cxl_err;
+ uint32_t unc_err;
+
+ /*
+ * If single bit written that corresponds to the first error
+ * pointer being cleared, update the status and header log.
+ */
+ if (!QTAILQ_EMPTY(&ct3d->error_list)) {
+ if ((1 << fe) ^ value) {
+ CXLError *cxl_next;
+ /*
+ * Software is using wrong flow for multiple header recording
+ * Following behavior in PCIe r6.0 and assuming multiple
+ * header support. Implementation defined choice to clear all
+ * matching records if more than one bit set - which corresponds
+ * closest to behavior of hardware not capable of multiple
+ * header recording.
+ */
+ QTAILQ_FOREACH_SAFE(cxl_err, &ct3d->error_list, node, cxl_next) {
+ if ((1 << cxl_err->type) & value) {
+ QTAILQ_REMOVE(&ct3d->error_list, cxl_err, node);
+ g_free(cxl_err);
+ }
+ }
+ } else {
+ /* Done with previous FE, so drop from list */
+ cxl_err = QTAILQ_FIRST(&ct3d->error_list);
+ QTAILQ_REMOVE(&ct3d->error_list, cxl_err, node);
+ g_free(cxl_err);
+ }
+
+ /*
+ * If there is another FE, then put that in place and update
+ * the header log
+ */
+ if (!QTAILQ_EMPTY(&ct3d->error_list)) {
+ uint32_t *header_log = &cache_mem[R_CXL_RAS_ERR_HEADER0];
+ int i;
+
+ cxl_err = QTAILQ_FIRST(&ct3d->error_list);
+ for (i = 0; i < CXL_RAS_ERR_HEADER_NUM; i++) {
+ stl_le_p(header_log + i, cxl_err->header[i]);
+ }
+ capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
+ FIRST_ERROR_POINTER, cxl_err->type);
+ } else {
+ /*
+ * If no more errors, then follow recomendation of PCI spec
+ * r6.0 6.2.4.2 to set the first error pointer to a status
+ * bit that will never be used.
+ */
+ capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
+ FIRST_ERROR_POINTER,
+ CXL_RAS_UNC_ERR_CXL_UNUSED);
+ }
+ stl_le_p((uint8_t *)cache_mem + A_CXL_RAS_ERR_CAP_CTRL, capctrl);
+ }
+ unc_err = 0;
+ QTAILQ_FOREACH(cxl_err, &ct3d->error_list, node) {
+ unc_err |= 1 << cxl_err->type;
+ }
+ stl_le_p((uint8_t *)cache_mem + offset, unc_err);
+
+ return;
+ }
+ case A_CXL_RAS_COR_ERR_STATUS:
+ {
+ uint32_t rw1c = value;
+ uint32_t temp = ldl_le_p((uint8_t *)cache_mem + offset);
+ temp &= ~rw1c;
+ stl_le_p((uint8_t *)cache_mem + offset, temp);
+ return;
+ }
default:
break;
}
@@ -403,6 +542,8 @@
unsigned short msix_num = 1;
int i, rc;
+ QTAILQ_INIT(&ct3d->error_list);
+
if (!cxl_setup_memory(ct3d, errp)) {
return;
}
@@ -452,8 +593,19 @@
cxl_cstate->cdat.free_cdat_table = ct3_free_cdat_table;
cxl_cstate->cdat.private = ct3d;
cxl_doe_cdat_init(cxl_cstate, errp);
+
+ pcie_cap_deverr_init(pci_dev);
+ /* Leave a bit of room for expansion */
+ rc = pcie_aer_init(pci_dev, PCI_ERR_VER, 0x200, PCI_ERR_SIZEOF, NULL);
+ if (rc) {
+ goto err_release_cdat;
+ }
+
return;
+err_release_cdat:
+ cxl_doe_cdat_release(cxl_cstate);
+ g_free(regs->special_ops);
err_address_space_free:
address_space_destroy(&ct3d->hostmem_as);
return;
@@ -465,6 +617,7 @@
CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
ComponentRegisters *regs = &cxl_cstate->crb;
+ pcie_aer_exit(pci_dev);
cxl_doe_cdat_release(cxl_cstate);
g_free(regs->special_ops);
address_space_destroy(&ct3d->hostmem_as);
@@ -618,6 +771,147 @@
*/
}
+/* For uncorrectable errors include support for multiple header recording */
+void qmp_cxl_inject_uncorrectable_errors(const char *path,
+ CXLUncorErrorRecordList *errors,
+ Error **errp)
+{
+ Object *obj = object_resolve_path(path, NULL);
+ static PCIEAERErr err = {};
+ CXLType3Dev *ct3d;
+ CXLError *cxl_err;
+ uint32_t *reg_state;
+ uint32_t unc_err;
+ bool first;
+
+ if (!obj) {
+ error_setg(errp, "Unable to resolve path");
+ return;
+ }
+
+ if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
+ error_setg(errp, "Path does not point to a CXL type 3 device");
+ return;
+ }
+
+ err.status = PCI_ERR_UNC_INTN;
+ err.source_id = pci_requester_id(PCI_DEVICE(obj));
+ err.flags = 0;
+
+ ct3d = CXL_TYPE3(obj);
+
+ first = QTAILQ_EMPTY(&ct3d->error_list);
+ reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
+ while (errors) {
+ uint32List *header = errors->value->header;
+ uint8_t header_count = 0;
+ int cxl_err_code;
+
+ cxl_err_code = ct3d_qmp_uncor_err_to_cxl(errors->value->type);
+ if (cxl_err_code < 0) {
+ error_setg(errp, "Unknown error code");
+ return;
+ }
+
+ /* If the error is masked, nothing to do here */
+ if (!((1 << cxl_err_code) &
+ ~ldl_le_p(reg_state + R_CXL_RAS_UNC_ERR_MASK))) {
+ errors = errors->next;
+ continue;
+ }
+
+ cxl_err = g_malloc0(sizeof(*cxl_err));
+ if (!cxl_err) {
+ return;
+ }
+
+ cxl_err->type = cxl_err_code;
+ while (header && header_count < 32) {
+ cxl_err->header[header_count++] = header->value;
+ header = header->next;
+ }
+ if (header_count > 32) {
+ error_setg(errp, "Header must be 32 DWORD or less");
+ return;
+ }
+ QTAILQ_INSERT_TAIL(&ct3d->error_list, cxl_err, node);
+
+ errors = errors->next;
+ }
+
+ if (first && !QTAILQ_EMPTY(&ct3d->error_list)) {
+ uint32_t *cache_mem = ct3d->cxl_cstate.crb.cache_mem_registers;
+ uint32_t capctrl = ldl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL);
+ uint32_t *header_log = &cache_mem[R_CXL_RAS_ERR_HEADER0];
+ int i;
+
+ cxl_err = QTAILQ_FIRST(&ct3d->error_list);
+ for (i = 0; i < CXL_RAS_ERR_HEADER_NUM; i++) {
+ stl_le_p(header_log + i, cxl_err->header[i]);
+ }
+
+ capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
+ FIRST_ERROR_POINTER, cxl_err->type);
+ stl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL, capctrl);
+ }
+
+ unc_err = 0;
+ QTAILQ_FOREACH(cxl_err, &ct3d->error_list, node) {
+ unc_err |= (1 << cxl_err->type);
+ }
+ if (!unc_err) {
+ return;
+ }
+
+ stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_STATUS, unc_err);
+ pcie_aer_inject_error(PCI_DEVICE(obj), &err);
+
+ return;
+}
+
+void qmp_cxl_inject_correctable_error(const char *path, CxlCorErrorType type,
+ Error **errp)
+{
+ static PCIEAERErr err = {};
+ Object *obj = object_resolve_path(path, NULL);
+ CXLType3Dev *ct3d;
+ uint32_t *reg_state;
+ uint32_t cor_err;
+ int cxl_err_type;
+
+ if (!obj) {
+ error_setg(errp, "Unable to resolve path");
+ return;
+ }
+ if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
+ error_setg(errp, "Path does not point to a CXL type 3 device");
+ return;
+ }
+
+ err.status = PCI_ERR_COR_INTERNAL;
+ err.source_id = pci_requester_id(PCI_DEVICE(obj));
+ err.flags = PCIE_AER_ERR_IS_CORRECTABLE;
+
+ ct3d = CXL_TYPE3(obj);
+ reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
+ cor_err = ldl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS);
+
+ cxl_err_type = ct3d_qmp_cor_err_to_cxl(type);
+ if (cxl_err_type < 0) {
+ error_setg(errp, "Invalid COR error");
+ return;
+ }
+ /* If the error is masked, nothting to do here */
+ if (!((1 << cxl_err_type) & ~ldl_le_p(reg_state + R_CXL_RAS_COR_ERR_MASK))) {
+ return;
+ }
+
+ cor_err |= (1 << cxl_err_type);
+ stl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS, cor_err);
+
+ pcie_aer_inject_error(PCI_DEVICE(obj), &err);
+}
+
static void ct3_class_init(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
diff --git a/hw/mem/cxl_type3_stubs.c b/hw/mem/cxl_type3_stubs.c
new file mode 100644
index 0000000..d574c58
--- /dev/null
+++ b/hw/mem/cxl_type3_stubs.c
@@ -0,0 +1,17 @@
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qapi/qapi-commands-cxl.h"
+
+void qmp_cxl_inject_uncorrectable_errors(const char *path,
+ CXLUncorErrorRecordList *errors,
+ Error **errp)
+{
+ error_setg(errp, "CXL Type 3 support is not compiled in");
+}
+
+void qmp_cxl_inject_correctable_error(const char *path, CxlCorErrorType type,
+ Error **errp)
+{
+ error_setg(errp, "CXL Type 3 support is not compiled in");
+}
diff --git a/hw/mem/memory-device.c b/hw/mem/memory-device.c
index d9f8301..1636db9 100644
--- a/hw/mem/memory-device.c
+++ b/hw/mem/memory-device.c
@@ -10,6 +10,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/error-report.h"
#include "hw/mem/memory-device.h"
#include "qapi/error.h"
#include "hw/boards.h"
diff --git a/hw/mem/meson.build b/hw/mem/meson.build
index 609b2b3..56c2618 100644
--- a/hw/mem/meson.build
+++ b/hw/mem/meson.build
@@ -4,6 +4,8 @@
mem_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_mc.c'))
mem_ss.add(when: 'CONFIG_NVDIMM', if_true: files('nvdimm.c'))
mem_ss.add(when: 'CONFIG_CXL_MEM_DEVICE', if_true: files('cxl_type3.c'))
+softmmu_ss.add(when: 'CONFIG_CXL_MEM_DEVICE', if_false: files('cxl_type3_stubs.c'))
+softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('cxl_type3_stubs.c'))
softmmu_ss.add_all(when: 'CONFIG_MEM_DEVICE', if_true: mem_ss)
diff --git a/hw/mem/sparse-mem.c b/hw/mem/sparse-mem.c
index 72f038d..6e8f4f8 100644
--- a/hw/mem/sparse-mem.c
+++ b/hw/mem/sparse-mem.c
@@ -11,6 +11,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/error-report.h"
#include "hw/qdev-properties.h"
#include "hw/sysbus.h"
diff --git a/hw/mips/boston.c b/hw/mips/boston.c
index a9d87f3..21ad844 100644
--- a/hw/mips/boston.c
+++ b/hw/mips/boston.c
@@ -702,7 +702,7 @@
object_initialize_child(OBJECT(machine), "cps", &s->cps, TYPE_MIPS_CPS);
object_property_set_str(OBJECT(&s->cps), "cpu-type", machine->cpu_type,
&error_fatal);
- object_property_set_int(OBJECT(&s->cps), "num-vp", machine->smp.cpus,
+ object_property_set_uint(OBJECT(&s->cps), "num-vp", machine->smp.cpus,
&error_fatal);
qdev_connect_clock_in(DEVICE(&s->cps), "clk-in",
qdev_get_clock_out(dev, "cpu-refclk"));
diff --git a/hw/mips/cps.c b/hw/mips/cps.c
index 2b43670..2b5269e 100644
--- a/hw/mips/cps.c
+++ b/hw/mips/cps.c
@@ -66,20 +66,17 @@
static void mips_cps_realize(DeviceState *dev, Error **errp)
{
MIPSCPSState *s = MIPS_CPS(dev);
- CPUMIPSState *env;
- MIPSCPU *cpu;
- int i;
target_ulong gcr_base;
bool itu_present = false;
- bool saar_present = false;
if (!clock_get(s->clock)) {
error_setg(errp, "CPS input clock is not connected to an output clock");
return;
}
- for (i = 0; i < s->num_vp; i++) {
- cpu = MIPS_CPU(object_new(s->cpu_type));
+ for (int i = 0; i < s->num_vp; i++) {
+ MIPSCPU *cpu = MIPS_CPU(object_new(s->cpu_type));
+ CPUMIPSState *env = &cpu->env;
/* All VPs are halted on reset. Leave powering up to CPC. */
if (!object_property_set_bool(OBJECT(cpu), "start-powered-off", true,
@@ -97,7 +94,6 @@
cpu_mips_irq_init_cpu(cpu);
cpu_mips_clock_init(cpu);
- env = &cpu->env;
if (cpu_mips_itu_supported(env)) {
itu_present = true;
/* Attach ITC Tag to the VP */
@@ -107,22 +103,15 @@
qemu_register_reset(main_cpu_reset, cpu);
}
- cpu = MIPS_CPU(first_cpu);
- env = &cpu->env;
- saar_present = (bool)env->saarp;
-
/* Inter-Thread Communication Unit */
if (itu_present) {
object_initialize_child(OBJECT(dev), "itu", &s->itu, TYPE_MIPS_ITU);
- object_property_set_int(OBJECT(&s->itu), "num-fifo", 16,
+ object_property_set_link(OBJECT(&s->itu), "cpu[0]",
+ OBJECT(first_cpu), &error_abort);
+ object_property_set_uint(OBJECT(&s->itu), "num-fifo", 16,
&error_abort);
- object_property_set_int(OBJECT(&s->itu), "num-semaphores", 16,
+ object_property_set_uint(OBJECT(&s->itu), "num-semaphores", 16,
&error_abort);
- object_property_set_bool(OBJECT(&s->itu), "saar-present", saar_present,
- &error_abort);
- if (saar_present) {
- s->itu.saar = &env->CP0_SAAR;
- }
if (!sysbus_realize(SYS_BUS_DEVICE(&s->itu), errp)) {
return;
}
@@ -133,7 +122,7 @@
/* Cluster Power Controller */
object_initialize_child(OBJECT(dev), "cpc", &s->cpc, TYPE_MIPS_CPC);
- object_property_set_int(OBJECT(&s->cpc), "num-vp", s->num_vp,
+ object_property_set_uint(OBJECT(&s->cpc), "num-vp", s->num_vp,
&error_abort);
object_property_set_int(OBJECT(&s->cpc), "vp-start-running", 1,
&error_abort);
@@ -146,9 +135,9 @@
/* Global Interrupt Controller */
object_initialize_child(OBJECT(dev), "gic", &s->gic, TYPE_MIPS_GIC);
- object_property_set_int(OBJECT(&s->gic), "num-vp", s->num_vp,
+ object_property_set_uint(OBJECT(&s->gic), "num-vp", s->num_vp,
&error_abort);
- object_property_set_int(OBJECT(&s->gic), "num-irq", 128,
+ object_property_set_uint(OBJECT(&s->gic), "num-irq", 128,
&error_abort);
if (!sysbus_realize(SYS_BUS_DEVICE(&s->gic), errp)) {
return;
@@ -158,10 +147,10 @@
sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->gic), 0));
/* Global Configuration Registers */
- gcr_base = env->CP0_CMGCRBase << 4;
+ gcr_base = MIPS_CPU(first_cpu)->env.CP0_CMGCRBase << 4;
object_initialize_child(OBJECT(dev), "gcr", &s->gcr, TYPE_MIPS_GCR);
- object_property_set_int(OBJECT(&s->gcr), "num-vp", s->num_vp,
+ object_property_set_uint(OBJECT(&s->gcr), "num-vp", s->num_vp,
&error_abort);
object_property_set_int(OBJECT(&s->gcr), "gcr-rev", 0x800,
&error_abort);
diff --git a/hw/mips/malta.c b/hw/mips/malta.c
index ec172b1..af90213 100644
--- a/hw/mips/malta.c
+++ b/hw/mips/malta.c
@@ -1066,7 +1066,7 @@
object_initialize_child(OBJECT(s), "cps", &s->cps, TYPE_MIPS_CPS);
object_property_set_str(OBJECT(&s->cps), "cpu-type", ms->cpu_type,
&error_fatal);
- object_property_set_int(OBJECT(&s->cps), "num-vp", ms->smp.cpus,
+ object_property_set_uint(OBJECT(&s->cps), "num-vp", ms->smp.cpus,
&error_fatal);
qdev_connect_clock_in(DEVICE(&s->cps), "clk-in", s->cpuclk);
sysbus_realize(SYS_BUS_DEVICE(&s->cps), &error_fatal);
diff --git a/hw/misc/edu.c b/hw/misc/edu.c
index e935c41..a1f8bc7 100644
--- a/hw/misc/edu.c
+++ b/hw/misc/edu.c
@@ -267,6 +267,8 @@
case 0x20:
if (val & EDU_STATUS_IRQFACT) {
qatomic_or(&edu->status, EDU_STATUS_IRQFACT);
+ /* Order check of the COMPUTING flag after setting IRQFACT. */
+ smp_mb__after_rmw();
} else {
qatomic_and(&edu->status, ~EDU_STATUS_IRQFACT);
}
@@ -349,6 +351,9 @@
qemu_mutex_unlock(&edu->thr_mutex);
qatomic_and(&edu->status, ~EDU_STATUS_COMPUTING);
+ /* Clear COMPUTING flag before checking IRQFACT. */
+ smp_mb__after_rmw();
+
if (qatomic_read(&edu->status) & EDU_STATUS_IRQFACT) {
qemu_mutex_lock_iothread();
edu_raise_irq(edu, FACT_IRQ);
diff --git a/hw/misc/mips_cmgcr.c b/hw/misc/mips_cmgcr.c
index 3c8b37f..66eb116 100644
--- a/hw/misc/mips_cmgcr.c
+++ b/hw/misc/mips_cmgcr.c
@@ -212,7 +212,7 @@
};
static Property mips_gcr_properties[] = {
- DEFINE_PROP_INT32("num-vp", MIPSGCRState, num_vps, 1),
+ DEFINE_PROP_UINT32("num-vp", MIPSGCRState, num_vps, 1),
DEFINE_PROP_INT32("gcr-rev", MIPSGCRState, gcr_rev, 0x800),
DEFINE_PROP_UINT64("gcr-base", MIPSGCRState, gcr_base, GCR_BASE_ADDR),
DEFINE_PROP_LINK("gic", MIPSGCRState, gic_mr, TYPE_MEMORY_REGION,
diff --git a/hw/misc/mips_itu.c b/hw/misc/mips_itu.c
index badef5c..0eda302 100644
--- a/hw/misc/mips_itu.c
+++ b/hw/misc/mips_itu.c
@@ -93,10 +93,10 @@
uint64_t size = (1 * KiB) + (am[1] & ITC_AM1_ADDR_MASK_MASK);
bool is_enabled = (am[0] & ITC_AM0_EN_MASK) != 0;
- if (tag->saar_present) {
- address = ((*(uint64_t *) tag->saar) & 0xFFFFFFFFE000ULL) << 4;
- size = 1ULL << ((*(uint64_t *) tag->saar >> 1) & 0x1f);
- is_enabled = *(uint64_t *) tag->saar & 1;
+ if (tag->saar) {
+ address = (tag->saar[0] & 0xFFFFFFFFE000ULL) << 4;
+ size = 1ULL << ((tag->saar[0] >> 1) & 0x1f);
+ is_enabled = tag->saar[0] & 1;
}
memory_region_transaction_begin();
@@ -157,7 +157,7 @@
static inline int get_cell_stride_shift(const MIPSITUState *s)
{
/* Minimum interval (for EntryGain = 0) is 128 B */
- if (s->saar_present) {
+ if (s->saar) {
return 7 + ((s->icr0 >> ITC_ICR0_BLK_GRAIN) &
ITC_ICR0_BLK_GRAIN_MASK);
} else {
@@ -515,6 +515,7 @@
static void mips_itu_realize(DeviceState *dev, Error **errp)
{
MIPSITUState *s = MIPS_ITU(dev);
+ CPUMIPSState *env;
if (s->num_fifo > ITC_FIFO_NUM_MAX) {
error_setg(errp, "Exceed maximum number of FIFO cells: %d",
@@ -526,6 +527,15 @@
s->num_semaphores);
return;
}
+ if (!s->cpu0) {
+ error_setg(errp, "Missing 'cpu[0]' property");
+ return;
+ }
+
+ env = &s->cpu0->env;
+ if (env->saarp) {
+ s->saar = env->CP0_SAAR;
+ }
s->cell = g_new(ITCStorageCell, get_num_cells(s));
}
@@ -534,8 +544,8 @@
{
MIPSITUState *s = MIPS_ITU(dev);
- if (s->saar_present) {
- *(uint64_t *) s->saar = 0x11 << 1;
+ if (s->saar) {
+ s->saar[0] = 0x11 << 1;
s->icr0 = get_num_cells(s) << ITC_ICR0_CELL_NUM;
} else {
s->ITCAddressMap[0] = 0;
@@ -549,11 +559,11 @@
}
static Property mips_itu_properties[] = {
- DEFINE_PROP_INT32("num-fifo", MIPSITUState, num_fifo,
+ DEFINE_PROP_UINT32("num-fifo", MIPSITUState, num_fifo,
ITC_FIFO_NUM_MAX),
- DEFINE_PROP_INT32("num-semaphores", MIPSITUState, num_semaphores,
+ DEFINE_PROP_UINT32("num-semaphores", MIPSITUState, num_semaphores,
ITC_SEMAPH_NUM_MAX),
- DEFINE_PROP_BOOL("saar-present", MIPSITUState, saar_present, false),
+ DEFINE_PROP_LINK("cpu[0]", MIPSITUState, cpu0, TYPE_MIPS_CPU, MIPSCPU *),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/net/Kconfig b/hw/net/Kconfig
index 1cc1c57..18c7851 100644
--- a/hw/net/Kconfig
+++ b/hw/net/Kconfig
@@ -44,6 +44,11 @@
default y if PCI_DEVICES
depends on PCI_EXPRESS && MSI_NONBROKEN
+config IGB_PCI_EXPRESS
+ bool
+ default y if PCI_DEVICES
+ depends on PCI_EXPRESS && MSI_NONBROKEN
+
config RTL8139_PCI
bool
default y if PCI_DEVICES
diff --git a/hw/net/e1000.c b/hw/net/e1000.c
index 7efb8a4..23d6606 100644
--- a/hw/net/e1000.c
+++ b/hw/net/e1000.c
@@ -26,6 +26,7 @@
#include "qemu/osdep.h"
+#include "hw/net/mii.h"
#include "hw/pci/pci_device.h"
#include "hw/qdev-properties.h"
#include "migration/vmstate.h"
@@ -38,12 +39,11 @@
#include "qemu/module.h"
#include "qemu/range.h"
+#include "e1000_common.h"
#include "e1000x_common.h"
#include "trace.h"
#include "qom/object.h"
-static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
/* #define E1000_DEBUG */
#ifdef E1000_DEBUG
@@ -66,9 +66,8 @@
#define IOPORT_SIZE 0x40
#define PNPMMIO_SIZE 0x20000
-#define MIN_BUF_SIZE 60 /* Min. octets in an ethernet frame sans FCS */
-#define MAXIMUM_ETHERNET_HDR_LEN (14+4)
+#define MAXIMUM_ETHERNET_HDR_LEN (ETH_HLEN + 4)
/*
* HW models:
@@ -181,67 +180,73 @@
static bool
have_autoneg(E1000State *s)
{
- return chkflag(AUTONEG) && (s->phy_reg[PHY_CTRL] & MII_CR_AUTO_NEG_EN);
+ return chkflag(AUTONEG) && (s->phy_reg[MII_BMCR] & MII_BMCR_AUTOEN);
}
static void
set_phy_ctrl(E1000State *s, int index, uint16_t val)
{
- /* bits 0-5 reserved; MII_CR_[RESTART_AUTO_NEG,RESET] are self clearing */
- s->phy_reg[PHY_CTRL] = val & ~(0x3f |
- MII_CR_RESET |
- MII_CR_RESTART_AUTO_NEG);
+ /* bits 0-5 reserved; MII_BMCR_[ANRESTART,RESET] are self clearing */
+ s->phy_reg[MII_BMCR] = val & ~(0x3f |
+ MII_BMCR_RESET |
+ MII_BMCR_ANRESTART);
/*
* QEMU 1.3 does not support link auto-negotiation emulation, so if we
* migrate during auto negotiation, after migration the link will be
* down.
*/
- if (have_autoneg(s) && (val & MII_CR_RESTART_AUTO_NEG)) {
+ if (have_autoneg(s) && (val & MII_BMCR_ANRESTART)) {
e1000x_restart_autoneg(s->mac_reg, s->phy_reg, s->autoneg_timer);
}
}
static void (*phyreg_writeops[])(E1000State *, int, uint16_t) = {
- [PHY_CTRL] = set_phy_ctrl,
+ [MII_BMCR] = set_phy_ctrl,
};
enum { NPHYWRITEOPS = ARRAY_SIZE(phyreg_writeops) };
enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W };
static const char phy_regcap[0x20] = {
- [PHY_STATUS] = PHY_R, [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
- [PHY_ID1] = PHY_R, [M88E1000_PHY_SPEC_CTRL] = PHY_RW,
- [PHY_CTRL] = PHY_RW, [PHY_1000T_CTRL] = PHY_RW,
- [PHY_LP_ABILITY] = PHY_R, [PHY_1000T_STATUS] = PHY_R,
- [PHY_AUTONEG_ADV] = PHY_RW, [M88E1000_RX_ERR_CNTR] = PHY_R,
- [PHY_ID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R,
- [PHY_AUTONEG_EXP] = PHY_R,
+ [MII_BMSR] = PHY_R, [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
+ [MII_PHYID1] = PHY_R, [M88E1000_PHY_SPEC_CTRL] = PHY_RW,
+ [MII_BMCR] = PHY_RW, [MII_CTRL1000] = PHY_RW,
+ [MII_ANLPAR] = PHY_R, [MII_STAT1000] = PHY_R,
+ [MII_ANAR] = PHY_RW, [M88E1000_RX_ERR_CNTR] = PHY_R,
+ [MII_PHYID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R,
+ [MII_ANER] = PHY_R,
};
-/* PHY_ID2 documented in 8254x_GBe_SDM.pdf, pp. 250 */
+/* MII_PHYID2 documented in 8254x_GBe_SDM.pdf, pp. 250 */
static const uint16_t phy_reg_init[] = {
- [PHY_CTRL] = MII_CR_SPEED_SELECT_MSB |
- MII_CR_FULL_DUPLEX |
- MII_CR_AUTO_NEG_EN,
+ [MII_BMCR] = MII_BMCR_SPEED1000 |
+ MII_BMCR_FD |
+ MII_BMCR_AUTOEN,
- [PHY_STATUS] = MII_SR_EXTENDED_CAPS |
- MII_SR_LINK_STATUS | /* link initially up */
- MII_SR_AUTONEG_CAPS |
- /* MII_SR_AUTONEG_COMPLETE: initially NOT completed */
- MII_SR_PREAMBLE_SUPPRESS |
- MII_SR_EXTENDED_STATUS |
- MII_SR_10T_HD_CAPS |
- MII_SR_10T_FD_CAPS |
- MII_SR_100X_HD_CAPS |
- MII_SR_100X_FD_CAPS,
+ [MII_BMSR] = MII_BMSR_EXTCAP |
+ MII_BMSR_LINK_ST | /* link initially up */
+ MII_BMSR_AUTONEG |
+ /* MII_BMSR_AN_COMP: initially NOT completed */
+ MII_BMSR_MFPS |
+ MII_BMSR_EXTSTAT |
+ MII_BMSR_10T_HD |
+ MII_BMSR_10T_FD |
+ MII_BMSR_100TX_HD |
+ MII_BMSR_100TX_FD,
- [PHY_ID1] = 0x141,
- /* [PHY_ID2] configured per DevId, from e1000_reset() */
- [PHY_AUTONEG_ADV] = 0xde1,
- [PHY_LP_ABILITY] = 0x1e0,
- [PHY_1000T_CTRL] = 0x0e00,
- [PHY_1000T_STATUS] = 0x3c00,
+ [MII_PHYID1] = 0x141,
+ /* [MII_PHYID2] configured per DevId, from e1000_reset() */
+ [MII_ANAR] = MII_ANAR_CSMACD | MII_ANAR_10 |
+ MII_ANAR_10FD | MII_ANAR_TX |
+ MII_ANAR_TXFD | MII_ANAR_PAUSE |
+ MII_ANAR_PAUSE_ASYM,
+ [MII_ANLPAR] = MII_ANLPAR_10 | MII_ANLPAR_10FD |
+ MII_ANLPAR_TX | MII_ANLPAR_TXFD,
+ [MII_CTRL1000] = MII_CTRL1000_FULL | MII_CTRL1000_PORT |
+ MII_CTRL1000_MASTER,
+ [MII_STAT1000] = MII_STAT1000_HALF | MII_STAT1000_FULL |
+ MII_STAT1000_ROK | MII_STAT1000_LOK,
[M88E1000_PHY_SPEC_CTRL] = 0x360,
[M88E1000_PHY_SPEC_STATUS] = 0xac00,
[M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60,
@@ -373,9 +378,9 @@
return chkflag(VET);
}
-static void e1000_reset(void *opaque)
+static void e1000_reset_hold(Object *obj)
{
- E1000State *d = opaque;
+ E1000State *d = E1000(obj);
E1000BaseClass *edc = E1000_GET_CLASS(d);
uint8_t *macaddr = d->conf.macaddr.a;
@@ -386,10 +391,10 @@
d->mit_irq_level = 0;
d->mit_ide = 0;
memset(d->phy_reg, 0, sizeof d->phy_reg);
- memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
- d->phy_reg[PHY_ID2] = edc->phy_id2;
+ memcpy(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
+ d->phy_reg[MII_PHYID2] = edc->phy_id2;
memset(d->mac_reg, 0, sizeof d->mac_reg);
- memmove(d->mac_reg, mac_reg_init, sizeof mac_reg_init);
+ memcpy(d->mac_reg, mac_reg_init, sizeof mac_reg_init);
d->rxbuf_min_shift = 1;
memset(&d->tx, 0, sizeof d->tx);
@@ -547,9 +552,9 @@
static inline void
inc_tx_bcast_or_mcast_count(E1000State *s, const unsigned char *arr)
{
- if (!memcmp(arr, bcast, sizeof bcast)) {
+ if (is_broadcast_ether_addr(arr)) {
e1000x_inc_reg_if_not_full(s->mac_reg, BPTC);
- } else if (arr[0] & 1) {
+ } else if (is_multicast_ether_addr(arr)) {
e1000x_inc_reg_if_not_full(s->mac_reg, MPTC);
}
}
@@ -561,13 +566,13 @@
PTC1023, PTC1522 };
NetClientState *nc = qemu_get_queue(s->nic);
- if (s->phy_reg[PHY_CTRL] & MII_CR_LOOPBACK) {
+ if (s->phy_reg[MII_BMCR] & MII_BMCR_LOOPBACK) {
qemu_receive_packet(nc, buf, size);
} else {
qemu_send_packet(nc, buf, size);
}
inc_tx_bcast_or_mcast_count(s, buf);
- e1000x_increase_size_stats(s->mac_reg, PTCregs, size);
+ e1000x_increase_size_stats(s->mac_reg, PTCregs, size + 4);
}
static void
@@ -631,7 +636,7 @@
}
e1000x_inc_reg_if_not_full(s->mac_reg, TPT);
- e1000x_grow_8reg_if_not_full(s->mac_reg, TOTL, s->tx.size);
+ e1000x_grow_8reg_if_not_full(s->mac_reg, TOTL, s->tx.size + 4);
s->mac_reg[GPTC] = s->mac_reg[TPT];
s->mac_reg[GOTCL] = s->mac_reg[TOTL];
s->mac_reg[GOTCH] = s->mac_reg[TOTH];
@@ -803,15 +808,18 @@
receive_filter(E1000State *s, const uint8_t *buf, int size)
{
uint32_t rctl = s->mac_reg[RCTL];
- int isbcast = !memcmp(buf, bcast, sizeof bcast), ismcast = (buf[0] & 1);
+ int isbcast = is_broadcast_ether_addr(buf);
+ int ismcast = is_multicast_ether_addr(buf);
if (e1000x_is_vlan_packet(buf, le16_to_cpu(s->mac_reg[VET])) &&
e1000x_vlan_rx_filter_enabled(s->mac_reg)) {
- uint16_t vid = lduw_be_p(buf + 14);
- uint32_t vfta = ldl_le_p((uint32_t*)(s->mac_reg + VFTA) +
- ((vid >> 5) & 0x7f));
- if ((vfta & (1 << (vid & 0x1f))) == 0)
+ uint16_t vid = lduw_be_p(&PKT_GET_VLAN_HDR(buf)->h_tci);
+ uint32_t vfta =
+ ldl_le_p((uint32_t *)(s->mac_reg + VFTA) +
+ ((vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK));
+ if ((vfta & (1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK))) == 0) {
return 0;
+ }
}
if (!isbcast && !ismcast && (rctl & E1000_RCTL_UPE)) { /* promiscuous ucast */
@@ -841,7 +849,7 @@
e1000x_update_regs_on_link_down(s->mac_reg, s->phy_reg);
} else {
if (have_autoneg(s) &&
- !(s->phy_reg[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
+ !(s->phy_reg[MII_BMSR] & MII_BMSR_AN_COMP)) {
e1000x_restart_autoneg(s->mac_reg, s->phy_reg, s->autoneg_timer);
} else {
e1000_link_up(s);
@@ -907,7 +915,7 @@
uint32_t rdh_start;
uint16_t vlan_special = 0;
uint8_t vlan_status = 0;
- uint8_t min_buf[MIN_BUF_SIZE];
+ uint8_t min_buf[ETH_ZLEN];
struct iovec min_iov;
uint8_t *filter_buf = iov->iov_base;
size_t size = iov_size(iov, iovcnt);
@@ -1061,30 +1069,6 @@
}
static uint32_t
-mac_low4_read(E1000State *s, int index)
-{
- return s->mac_reg[index] & 0xf;
-}
-
-static uint32_t
-mac_low11_read(E1000State *s, int index)
-{
- return s->mac_reg[index] & 0x7ff;
-}
-
-static uint32_t
-mac_low13_read(E1000State *s, int index)
-{
- return s->mac_reg[index] & 0x1fff;
-}
-
-static uint32_t
-mac_low16_read(E1000State *s, int index)
-{
- return s->mac_reg[index] & 0xffff;
-}
-
-static uint32_t
mac_icr_read(E1000State *s, int index)
{
uint32_t ret = s->mac_reg[ICR];
@@ -1136,11 +1120,17 @@
}
}
-static void
-set_16bit(E1000State *s, int index, uint32_t val)
-{
- s->mac_reg[index] = val & 0xffff;
-}
+#define LOW_BITS_SET_FUNC(num) \
+ static void \
+ set_##num##bit(E1000State *s, int index, uint32_t val) \
+ { \
+ s->mac_reg[index] = val & (BIT(num) - 1); \
+ }
+
+LOW_BITS_SET_FUNC(4)
+LOW_BITS_SET_FUNC(11)
+LOW_BITS_SET_FUNC(13)
+LOW_BITS_SET_FUNC(16)
static void
set_dlen(E1000State *s, int index, uint32_t val)
@@ -1194,7 +1184,9 @@
getreg(XONRXC), getreg(XONTXC), getreg(XOFFRXC), getreg(XOFFTXC),
getreg(RFC), getreg(RJC), getreg(RNBC), getreg(TSCTFC),
getreg(MGTPRC), getreg(MGTPDC), getreg(MGTPTC), getreg(GORCL),
- getreg(GOTCL),
+ getreg(GOTCL), getreg(RDFH), getreg(RDFT), getreg(RDFHS),
+ getreg(RDFTS), getreg(RDFPC), getreg(TDFH), getreg(TDFT),
+ getreg(TDFHS), getreg(TDFTS), getreg(TDFPC), getreg(AIT),
[TOTH] = mac_read_clr8, [TORH] = mac_read_clr8,
[GOTCH] = mac_read_clr8, [GORCH] = mac_read_clr8,
@@ -1212,24 +1204,17 @@
[MPTC] = mac_read_clr4,
[ICR] = mac_icr_read, [EECD] = get_eecd,
[EERD] = flash_eerd_read,
- [RDFH] = mac_low13_read, [RDFT] = mac_low13_read,
- [RDFHS] = mac_low13_read, [RDFTS] = mac_low13_read,
- [RDFPC] = mac_low13_read,
- [TDFH] = mac_low11_read, [TDFT] = mac_low11_read,
- [TDFHS] = mac_low13_read, [TDFTS] = mac_low13_read,
- [TDFPC] = mac_low13_read,
- [AIT] = mac_low16_read,
- [CRCERRS ... MPC] = &mac_readreg,
- [IP6AT ... IP6AT+3] = &mac_readreg, [IP4AT ... IP4AT+6] = &mac_readreg,
- [FFLT ... FFLT+6] = &mac_low11_read,
- [RA ... RA+31] = &mac_readreg,
- [WUPM ... WUPM+31] = &mac_readreg,
- [MTA ... MTA+127] = &mac_readreg,
- [VFTA ... VFTA+127] = &mac_readreg,
- [FFMT ... FFMT+254] = &mac_low4_read,
- [FFVT ... FFVT+254] = &mac_readreg,
- [PBM ... PBM+16383] = &mac_readreg,
+ [CRCERRS ... MPC] = &mac_readreg,
+ [IP6AT ... IP6AT + 3] = &mac_readreg, [IP4AT ... IP4AT + 6] = &mac_readreg,
+ [FFLT ... FFLT + 6] = &mac_readreg,
+ [RA ... RA + 31] = &mac_readreg,
+ [WUPM ... WUPM + 31] = &mac_readreg,
+ [MTA ... MTA + E1000_MC_TBL_SIZE - 1] = &mac_readreg,
+ [VFTA ... VFTA + E1000_VLAN_FILTER_TBL_SIZE - 1] = &mac_readreg,
+ [FFMT ... FFMT + 254] = &mac_readreg,
+ [FFVT ... FFVT + 254] = &mac_readreg,
+ [PBM ... PBM + 16383] = &mac_readreg,
};
enum { NREADOPS = ARRAY_SIZE(macreg_readops) };
@@ -1239,27 +1224,28 @@
putreg(PBA), putreg(EERD), putreg(SWSM), putreg(WUFC),
putreg(TDBAL), putreg(TDBAH), putreg(TXDCTL), putreg(RDBAH),
putreg(RDBAL), putreg(LEDCTL), putreg(VET), putreg(FCRUC),
- putreg(TDFH), putreg(TDFT), putreg(TDFHS), putreg(TDFTS),
- putreg(TDFPC), putreg(RDFH), putreg(RDFT), putreg(RDFHS),
- putreg(RDFTS), putreg(RDFPC), putreg(IPAV), putreg(WUC),
- putreg(WUS), putreg(AIT),
+ putreg(IPAV), putreg(WUC),
+ putreg(WUS),
- [TDLEN] = set_dlen, [RDLEN] = set_dlen, [TCTL] = set_tctl,
- [TDT] = set_tctl, [MDIC] = set_mdic, [ICS] = set_ics,
- [TDH] = set_16bit, [RDH] = set_16bit, [RDT] = set_rdt,
- [IMC] = set_imc, [IMS] = set_ims, [ICR] = set_icr,
- [EECD] = set_eecd, [RCTL] = set_rx_control, [CTRL] = set_ctrl,
- [RDTR] = set_16bit, [RADV] = set_16bit, [TADV] = set_16bit,
- [ITR] = set_16bit,
+ [TDLEN] = set_dlen, [RDLEN] = set_dlen, [TCTL] = set_tctl,
+ [TDT] = set_tctl, [MDIC] = set_mdic, [ICS] = set_ics,
+ [TDH] = set_16bit, [RDH] = set_16bit, [RDT] = set_rdt,
+ [IMC] = set_imc, [IMS] = set_ims, [ICR] = set_icr,
+ [EECD] = set_eecd, [RCTL] = set_rx_control, [CTRL] = set_ctrl,
+ [RDTR] = set_16bit, [RADV] = set_16bit, [TADV] = set_16bit,
+ [ITR] = set_16bit, [TDFH] = set_11bit, [TDFT] = set_11bit,
+ [TDFHS] = set_13bit, [TDFTS] = set_13bit, [TDFPC] = set_13bit,
+ [RDFH] = set_13bit, [RDFT] = set_13bit, [RDFHS] = set_13bit,
+ [RDFTS] = set_13bit, [RDFPC] = set_13bit, [AIT] = set_16bit,
- [IP6AT ... IP6AT+3] = &mac_writereg, [IP4AT ... IP4AT+6] = &mac_writereg,
- [FFLT ... FFLT+6] = &mac_writereg,
- [RA ... RA+31] = &mac_writereg,
- [WUPM ... WUPM+31] = &mac_writereg,
- [MTA ... MTA+127] = &mac_writereg,
- [VFTA ... VFTA+127] = &mac_writereg,
- [FFMT ... FFMT+254] = &mac_writereg, [FFVT ... FFVT+254] = &mac_writereg,
- [PBM ... PBM+16383] = &mac_writereg,
+ [IP6AT ... IP6AT + 3] = &mac_writereg, [IP4AT ... IP4AT + 6] = &mac_writereg,
+ [FFLT ... FFLT + 6] = &set_11bit,
+ [RA ... RA + 31] = &mac_writereg,
+ [WUPM ... WUPM + 31] = &mac_writereg,
+ [MTA ... MTA + E1000_MC_TBL_SIZE - 1] = &mac_writereg,
+ [VFTA ... VFTA + E1000_VLAN_FILTER_TBL_SIZE - 1] = &mac_writereg,
+ [FFMT ... FFMT + 254] = &set_4bit, [FFVT ... FFVT + 254] = &mac_writereg,
+ [PBM ... PBM + 16383] = &mac_writereg,
};
enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
@@ -1415,10 +1401,10 @@
/*
* If link is down and auto-negotiation is supported and ongoing,
* complete auto-negotiation immediately. This allows us to look
- * at MII_SR_AUTONEG_COMPLETE to infer link status on load.
+ * at MII_BMSR_AN_COMP to infer link status on load.
*/
if (nc->link_down && have_autoneg(s)) {
- s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
+ s->phy_reg[MII_BMSR] |= MII_BMSR_AN_COMP;
}
/* Decide which set of props to migrate in the main structure */
@@ -1457,8 +1443,7 @@
* Alternatively, restart link negotiation if it was in progress. */
nc->link_down = (s->mac_reg[STATUS] & E1000_STATUS_LU) == 0;
- if (have_autoneg(s) &&
- !(s->phy_reg[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
+ if (have_autoneg(s) && !(s->phy_reg[MII_BMSR] & MII_BMSR_AN_COMP)) {
nc->link_down = false;
timer_mod(s->autoneg_timer,
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
@@ -1624,8 +1609,9 @@
VMSTATE_UINT32(mac_reg[WUFC], E1000State),
VMSTATE_UINT32(mac_reg[VET], E1000State),
VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, RA, 32),
- VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, MTA, 128),
- VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, VFTA, 128),
+ VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, MTA, E1000_MC_TBL_SIZE),
+ VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, VFTA,
+ E1000_VLAN_FILTER_TBL_SIZE),
VMSTATE_END_OF_LIST()
},
.subsections = (const VMStateDescription*[]) {
@@ -1746,12 +1732,6 @@
e1000_flush_queue_timer, d);
}
-static void qdev_e1000_reset(DeviceState *dev)
-{
- E1000State *d = E1000(dev);
- e1000_reset(d);
-}
-
static Property e1000_properties[] = {
DEFINE_NIC_PROPERTIES(E1000State, conf),
DEFINE_PROP_BIT("autonegotiation", E1000State,
@@ -1777,6 +1757,7 @@
static void e1000_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
E1000BaseClass *e = E1000_CLASS(klass);
const E1000Info *info = data;
@@ -1789,9 +1770,9 @@
k->revision = info->revision;
e->phy_id2 = info->phy_id2;
k->class_id = PCI_CLASS_NETWORK_ETHERNET;
+ rc->phases.hold = e1000_reset_hold;
set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
dc->desc = "Intel Gigabit Ethernet";
- dc->reset = qdev_e1000_reset;
dc->vmsd = &vmstate_e1000;
device_class_set_props(dc, e1000_properties);
}
diff --git a/hw/net/e1000_common.h b/hw/net/e1000_common.h
new file mode 100644
index 0000000..48feda7
--- /dev/null
+++ b/hw/net/e1000_common.h
@@ -0,0 +1,102 @@
+/*
+ * QEMU e1000(e) emulation - shared definitions
+ *
+ * Copyright (c) 2008 Qumranet
+ *
+ * Based on work done by:
+ * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
+ * Copyright (c) 2007 Dan Aloni
+ * Copyright (c) 2004 Antony T Curtis
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_NET_E1000_COMMON_H
+#define HW_NET_E1000_COMMON_H
+
+#include "e1000_regs.h"
+
+#define defreg(x) x = (E1000_##x >> 2)
+enum {
+ defreg(CTRL), defreg(EECD), defreg(EERD), defreg(GPRC),
+ defreg(GPTC), defreg(ICR), defreg(ICS), defreg(IMC),
+ defreg(IMS), defreg(LEDCTL), defreg(MANC), defreg(MDIC),
+ defreg(MPC), defreg(PBA), defreg(RCTL), defreg(RDBAH0),
+ defreg(RDBAL0), defreg(RDH0), defreg(RDLEN0), defreg(RDT0),
+ defreg(STATUS), defreg(SWSM), defreg(TCTL), defreg(TDBAH),
+ defreg(TDBAL), defreg(TDH), defreg(TDLEN), defreg(TDT),
+ defreg(TDLEN1), defreg(TDBAL1), defreg(TDBAH1), defreg(TDH1),
+ defreg(TDT1), defreg(TORH), defreg(TORL), defreg(TOTH),
+ defreg(TOTL), defreg(TPR), defreg(TPT), defreg(TXDCTL),
+ defreg(WUFC), defreg(RA), defreg(MTA), defreg(CRCERRS),
+ defreg(VFTA), defreg(VET), defreg(RDTR), defreg(RADV),
+ defreg(TADV), defreg(ITR), defreg(SCC), defreg(ECOL),
+ defreg(MCC), defreg(LATECOL), defreg(COLC), defreg(DC),
+ defreg(TNCRS), defreg(SEQEC), defreg(CEXTERR), defreg(RLEC),
+ defreg(XONRXC), defreg(XONTXC), defreg(XOFFRXC), defreg(XOFFTXC),
+ defreg(FCRUC), defreg(AIT), defreg(TDFH), defreg(TDFT),
+ defreg(TDFHS), defreg(TDFTS), defreg(TDFPC), defreg(WUC),
+ defreg(WUS), defreg(POEMB), defreg(PBS), defreg(RDFH),
+ defreg(RDFT), defreg(RDFHS), defreg(RDFTS), defreg(RDFPC),
+ defreg(PBM), defreg(IPAV), defreg(IP4AT), defreg(IP6AT),
+ defreg(WUPM), defreg(FFLT), defreg(FFMT), defreg(FFVT),
+ defreg(TARC0), defreg(TARC1), defreg(IAM), defreg(EXTCNF_CTRL),
+ defreg(GCR), defreg(TIMINCA), defreg(EIAC), defreg(CTRL_EXT),
+ defreg(IVAR), defreg(MFUTP01), defreg(MFUTP23), defreg(MANC2H),
+ defreg(MFVAL), defreg(MDEF), defreg(FACTPS), defreg(FTFT),
+ defreg(RUC), defreg(ROC), defreg(RFC), defreg(RJC),
+ defreg(PRC64), defreg(PRC127), defreg(PRC255), defreg(PRC511),
+ defreg(PRC1023), defreg(PRC1522), defreg(PTC64), defreg(PTC127),
+ defreg(PTC255), defreg(PTC511), defreg(PTC1023), defreg(PTC1522),
+ defreg(GORCL), defreg(GORCH), defreg(GOTCL), defreg(GOTCH),
+ defreg(RNBC), defreg(BPRC), defreg(MPRC), defreg(RFCTL),
+ defreg(PSRCTL), defreg(MPTC), defreg(BPTC), defreg(TSCTFC),
+ defreg(IAC), defreg(MGTPRC), defreg(MGTPDC), defreg(MGTPTC),
+ defreg(TSCTC), defreg(RXCSUM), defreg(FUNCTAG), defreg(GSCL_1),
+ defreg(GSCL_2), defreg(GSCL_3), defreg(GSCL_4), defreg(GSCN_0),
+ defreg(GSCN_1), defreg(GSCN_2), defreg(GSCN_3), defreg(GCR2),
+ defreg(RAID), defreg(RSRPD), defreg(TIDV), defreg(EITR),
+ defreg(MRQC), defreg(RETA), defreg(RSSRK), defreg(RDBAH1),
+ defreg(RDBAL1), defreg(RDLEN1), defreg(RDH1), defreg(RDT1),
+ defreg(PBACLR), defreg(FCAL), defreg(FCAH), defreg(FCT),
+ defreg(FCRTH), defreg(FCRTL), defreg(FCTTV), defreg(FCRTV),
+ defreg(FLA), defreg(EEWR), defreg(FLOP), defreg(FLOL),
+ defreg(FLSWCTL), defreg(FLSWCNT), defreg(RXDCTL), defreg(RXDCTL1),
+ defreg(MAVTV0), defreg(MAVTV1), defreg(MAVTV2), defreg(MAVTV3),
+ defreg(TXSTMPL), defreg(TXSTMPH), defreg(SYSTIML), defreg(SYSTIMH),
+ defreg(RXCFGL), defreg(RXUDP), defreg(TIMADJL), defreg(TIMADJH),
+ defreg(RXSTMPH), defreg(RXSTMPL), defreg(RXSATRL), defreg(RXSATRH),
+ defreg(FLASHT), defreg(TIPG), defreg(RDH), defreg(RDT),
+ defreg(RDLEN), defreg(RDBAH), defreg(RDBAL),
+ defreg(TXDCTL1),
+ defreg(FLSWDATA),
+ defreg(CTRL_DUP),
+ defreg(EXTCNF_SIZE),
+ defreg(EEMNGCTL),
+ defreg(EEMNGDATA),
+ defreg(FLMNGCTL),
+ defreg(FLMNGDATA),
+ defreg(FLMNGCNT),
+ defreg(TSYNCRXCTL),
+ defreg(TSYNCTXCTL),
+
+ /* Aliases */
+ defreg(RDH0_A), defreg(RDT0_A), defreg(RDTR_A), defreg(RDFH_A),
+ defreg(RDFT_A), defreg(TDH_A), defreg(TDT_A), defreg(TIDV_A),
+ defreg(TDFH_A), defreg(TDFT_A), defreg(RA_A), defreg(RDBAL0_A),
+ defreg(TDBAL_A), defreg(TDLEN_A), defreg(VFTA_A), defreg(RDLEN0_A),
+ defreg(FCRTL_A), defreg(FCRTH_A)
+};
+
+#endif
diff --git a/hw/net/e1000_regs.h b/hw/net/e1000_regs.h
index 59e0507..8a4ce82 100644
--- a/hw/net/e1000_regs.h
+++ b/hw/net/e1000_regs.h
@@ -32,157 +32,35 @@
#ifndef HW_E1000_REGS_H
#define HW_E1000_REGS_H
-/* PCI Device IDs */
-#define E1000_DEV_ID_82542 0x1000
-#define E1000_DEV_ID_82543GC_FIBER 0x1001
-#define E1000_DEV_ID_82543GC_COPPER 0x1004
-#define E1000_DEV_ID_82544EI_COPPER 0x1008
-#define E1000_DEV_ID_82544EI_FIBER 0x1009
-#define E1000_DEV_ID_82544GC_COPPER 0x100C
-#define E1000_DEV_ID_82544GC_LOM 0x100D
-#define E1000_DEV_ID_82540EM 0x100E
-#define E1000_DEV_ID_82540EM_LOM 0x1015
-#define E1000_DEV_ID_82540EP_LOM 0x1016
-#define E1000_DEV_ID_82540EP 0x1017
-#define E1000_DEV_ID_82540EP_LP 0x101E
-#define E1000_DEV_ID_82545EM_COPPER 0x100F
-#define E1000_DEV_ID_82545EM_FIBER 0x1011
-#define E1000_DEV_ID_82545GM_COPPER 0x1026
-#define E1000_DEV_ID_82545GM_FIBER 0x1027
-#define E1000_DEV_ID_82545GM_SERDES 0x1028
-#define E1000_DEV_ID_82546EB_COPPER 0x1010
-#define E1000_DEV_ID_82546EB_FIBER 0x1012
-#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D
-#define E1000_DEV_ID_82541EI 0x1013
-#define E1000_DEV_ID_82541EI_MOBILE 0x1018
-#define E1000_DEV_ID_82541ER_LOM 0x1014
-#define E1000_DEV_ID_82541ER 0x1078
-#define E1000_DEV_ID_82547GI 0x1075
-#define E1000_DEV_ID_82541GI 0x1076
-#define E1000_DEV_ID_82541GI_MOBILE 0x1077
-#define E1000_DEV_ID_82541GI_LF 0x107C
-#define E1000_DEV_ID_82546GB_COPPER 0x1079
-#define E1000_DEV_ID_82546GB_FIBER 0x107A
-#define E1000_DEV_ID_82546GB_SERDES 0x107B
-#define E1000_DEV_ID_82546GB_PCIE 0x108A
-#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
-#define E1000_DEV_ID_82547EI 0x1019
-#define E1000_DEV_ID_82547EI_MOBILE 0x101A
-#define E1000_DEV_ID_82571EB_COPPER 0x105E
-#define E1000_DEV_ID_82571EB_FIBER 0x105F
-#define E1000_DEV_ID_82571EB_SERDES 0x1060
-#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
-#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5
-#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5
-#define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE 0x10BC
-#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9
-#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA
-#define E1000_DEV_ID_82572EI_COPPER 0x107D
-#define E1000_DEV_ID_82572EI_FIBER 0x107E
-#define E1000_DEV_ID_82572EI_SERDES 0x107F
-#define E1000_DEV_ID_82572EI 0x10B9
-#define E1000_DEV_ID_82573E 0x108B
-#define E1000_DEV_ID_82573E_IAMT 0x108C
-#define E1000_DEV_ID_82573L 0x109A
-#define E1000_DEV_ID_82574L 0x10D3
-#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
-#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096
-#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098
-#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA
-#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB
+#include "e1000x_regs.h"
-#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049
-#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A
-#define E1000_DEV_ID_ICH8_IGP_C 0x104B
-#define E1000_DEV_ID_ICH8_IFE 0x104C
-#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4
-#define E1000_DEV_ID_ICH8_IFE_G 0x10C5
-#define E1000_DEV_ID_ICH8_IGP_M 0x104D
-
-/* Device Specific Register Defaults */
-#define E1000_PHY_ID2_82541x 0x380
-#define E1000_PHY_ID2_82544x 0xC30
-#define E1000_PHY_ID2_8254xx_DEFAULT 0xC20 /* 82540x, 82545x, and 82546x */
-#define E1000_PHY_ID2_82573x 0xCC0
-#define E1000_PHY_ID2_82574x 0xCB1
-
-/* Register Set. (82543, 82544)
- *
- * Registers are defined to be 32 bits and should be accessed as 32 bit values.
- * These registers are physically located on the NIC, but are mapped into the
- * host memory address space.
- *
- * RW - register is both readable and writable
- * RO - register is read only
- * WO - register is write only
- * R/clr - register is read only and is cleared when read
- * A - register array
- */
-#define E1000_CTRL 0x00000 /* Device Control - RW */
-#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */
-#define E1000_STATUS 0x00008 /* Device Status - RO */
-#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
-#define E1000_EERD 0x00014 /* EEPROM Read - RW */
-#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
-#define E1000_FLA 0x0001C /* Flash Access - RW */
-#define E1000_MDIC 0x00020 /* MDI Control - RW */
-#define E1000_SCTL 0x00024 /* SerDes Control - RW */
-#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */
-#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
-#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
-#define E1000_FCT 0x00030 /* Flow Control Type - RW */
-#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
-#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
-#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
-#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
#define E1000_EIAC 0x000DC /* Ext. Interrupt Auto Clear - RW */
-#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
-#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
#define E1000_IVAR 0x000E4 /* Interrupt Vector Allocation Register - RW */
#define E1000_EITR 0x000E8 /* Extended Interrupt Throttling Rate - RW */
-#define E1000_RCTL 0x00100 /* RX Control - RW */
-#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */
#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */
#define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */
#define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */
#define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */
#define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */
-#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
#define E1000_FCRTV 0x05F40 /* Flow Control Refresh Timer Value - RW */
#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */
#define E1000_RXCW 0x00180 /* RX Configuration Word - RO */
-#define E1000_TCTL 0x00400 /* TX Control - RW */
-#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */
-#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */
#define E1000_TBT 0x00448 /* TX Burst Timer - RW */
#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
-#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */
#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */
#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */
-#define FEXTNVM_SW_CONFIG 0x0001
#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
#define E1000_PBM 0x10000 /* Packet Buffer Memory - RW */
#define E1000_PBS 0x01008 /* Packet Buffer Size - RW */
-#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
-#define E1000_EEMNGDATA 0x01014 /* MNG EEPROM Read/Write data */
-#define E1000_FLMNGCTL 0x01018 /* MNG Flash Control */
-#define E1000_FLMNGDATA 0x0101C /* MNG FLASH Read data */
-#define E1000_FLMNGCNT 0x01020 /* MNG FLASH Read Counter */
-#define E1000_FLASH_UPDATES 1000
-#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */
#define E1000_FLASHT 0x01028 /* FLASH Timer Register */
#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
#define E1000_FLSWCTL 0x01030 /* FLASH control register */
#define E1000_FLSWDATA 0x01034 /* FLASH data register */
#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */
-#define E1000_FLOP 0x0103C /* FLASH Opcode Register */
#define E1000_FLOL 0x01050 /* FEEP Auto Load */
#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */
-#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
-#define E1000_FCRTL_A 0x00168 /* Alias to FCRTL */
-#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
#define E1000_FCRTH_A 0x00160 /* Alias to FCRTH */
#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */
#define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */
@@ -208,23 +86,7 @@
#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */
#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */
#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */
-#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */
-#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */
#define E1000_POEMB 0x00F10 /* PHY OEM Bits Register - RW */
-#define E1000_RDFH 0x02410 /* Receive Data FIFO Head Register - RW */
-#define E1000_RDFH_A 0x08000 /* Alias to RDFH */
-#define E1000_RDFT 0x02418 /* Receive Data FIFO Tail Register - RW */
-#define E1000_RDFT_A 0x08008 /* Alias to RDFT */
-#define E1000_RDFHS 0x02420 /* Receive Data FIFO Head Saved Register - RW */
-#define E1000_RDFTS 0x02428 /* Receive Data FIFO Tail Saved Register - RW */
-#define E1000_RDFPC 0x02430 /* Receive Data FIFO Packet Count - RW */
-#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */
-#define E1000_TDFH_A 0x08010 /* Alias to TDFH */
-#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */
-#define E1000_TDFT_A 0x08018 /* Alias to TDFT */
-#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */
-#define E1000_TDFTS 0x03428 /* TX Data FIFO Tail Saved - RW */
-#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */
#define E1000_TDBAL 0x03800 /* TX Descriptor Base Address Low - RW */
#define E1000_TDBAL_A 0x00420 /* Alias to TDBAL */
#define E1000_TDBAH 0x03804 /* TX Descriptor Base Address High - RW */
@@ -248,174 +110,40 @@
#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */
#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */
#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */
-#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
-#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
-#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
-#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */
-#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */
-#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */
-#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */
-#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */
-#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */
-#define E1000_COLC 0x04028 /* Collision Count - R/clr */
-#define E1000_DC 0x04030 /* Defer Count - R/clr */
-#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */
#define E1000_SEQEC 0x04038 /* Sequence Error Count - R/clr */
#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
-#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */
-#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */
-#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */
-#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */
-#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */
-#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */
-#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */
-#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */
-#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */
-#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */
-#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */
-#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */
-#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */
-#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */
-#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */
-#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */
-#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */
-#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */
-#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */
-#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */
-#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */
-#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */
-#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */
-#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */
-#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */
-#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */
-#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
-#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */
-#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */
-#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */
-#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */
-#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */
-#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */
-#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */
-#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */
-#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */
-#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */
-#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */
-#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */
-#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */
-#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */
-#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */
-#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */
#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */
-#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
-#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */
#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */
#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */
#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */
#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */
#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */
-#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */
#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
-#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */
-#define E1000_RFCTL 0x05008 /* Receive Filter Control*/
-#define E1000_MAVTV0 0x05010 /* Management VLAN TAG Value 0 */
-#define E1000_MAVTV1 0x05014 /* Management VLAN TAG Value 1 */
-#define E1000_MAVTV2 0x05018 /* Management VLAN TAG Value 2 */
-#define E1000_MAVTV3 0x0501c /* Management VLAN TAG Value 3 */
-#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
-#define E1000_RA 0x05400 /* Receive Address - RW Array */
-#define E1000_RA_A 0x00040 /* Alias to RA */
-#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
-#define E1000_VFTA_A 0x00600 /* Alias to VFTA */
-#define E1000_WUC 0x05800 /* Wakeup Control - RW */
-#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
-#define E1000_WUS 0x05810 /* Wakeup Status - RO */
-#define E1000_MANC 0x05820 /* Management Control - RW */
-#define E1000_IPAV 0x05838 /* IP Address Valid - RW */
-#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */
-#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */
-#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */
-#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */
#define E1000_MFUTP01 0x05828 /* Management Flex UDP/TCP Ports 0/1 - RW */
#define E1000_MFUTP23 0x05830 /* Management Flex UDP/TCP Ports 2/3 - RW */
-#define E1000_MFVAL 0x05824 /* Manageability Filters Valid - RW */
-#define E1000_MDEF 0x05890 /* Manageability Decision Filters - RW Array */
#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */
#define E1000_HOST_IF 0x08800 /* Host Interface */
-#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */
-#define E1000_FTFT 0x09400 /* Flexible TCO Filter Table - RW Array */
#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */
#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */
#define E1000_MDPHYA 0x0003C /* PHY address - RW */
-#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */
-#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */
-#define E1000_GCR 0x05B00 /* PCI-Ex Control */
-#define E1000_FUNCTAG 0x05B08 /* Function-Tag Register */
-#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */
-#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */
-#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */
-#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */
-#define E1000_GSCN_0 0x05B20 /* 3GIO Statistic Counter Register #0 */
-#define E1000_GSCN_1 0x05B24 /* 3GIO Statistic Counter Register #1 */
-#define E1000_GSCN_2 0x05B28 /* 3GIO Statistic Counter Register #2 */
-#define E1000_GSCN_3 0x05B2C /* 3GIO Statistic Counter Register #3 */
-#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
-#define E1000_SWSM 0x05B50 /* SW Semaphore */
#define E1000_GCR2 0x05B64 /* 3GIO Control Register 2 */
-#define E1000_FWSM 0x05B54 /* FW Semaphore */
-#define E1000_PBACLR 0x05B68 /* MSI-X PBA Clear */
#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
#define E1000_HICR 0x08F00 /* Host Inteface Control */
-#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
-#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
-#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
-#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */
-#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */
-#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
-#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
-#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */
-#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
-#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
#define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */
#define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */
-#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */
-#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */
-#define E1000_TIMADJL 0x0B60C /* Time Adjustment Offset register Low - RW */
-#define E1000_TIMADJH 0x0B610 /* Time Adjustment Offset register High - RW */
#define E1000_RXCFGL 0x0B634 /* RX Ethertype and Message Type - RW*/
-/* RSS registers */
-#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */
-#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
-#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */
-#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */
-#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */
-#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */
-
#define E1000_MRQC_ENABLED(mrqc) (((mrqc) & (BIT(0) | BIT(1))) == BIT(0))
-#define E1000_RETA_IDX(hash) ((hash) & (BIT(7) - 1))
-#define E1000_RETA_VAL(reta, hash) (((uint8_t *)(reta))[E1000_RETA_IDX(hash)])
+#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */
+#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */
+#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */
+
#define E1000_RSS_QUEUE(reta, hash) ((E1000_RETA_VAL(reta, hash) & BIT(7)) >> 7)
-#define E1000_MRQC_EN_TCPIPV4(mrqc) ((mrqc) & BIT(16))
-#define E1000_MRQC_EN_IPV4(mrqc) ((mrqc) & BIT(17))
-#define E1000_MRQC_EN_TCPIPV6(mrqc) ((mrqc) & BIT(18))
-#define E1000_MRQC_EN_IPV6EX(mrqc) ((mrqc) & BIT(19))
-#define E1000_MRQC_EN_IPV6(mrqc) ((mrqc) & BIT(20))
-
-#define E1000_MRQ_RSS_TYPE_NONE (0)
-#define E1000_MRQ_RSS_TYPE_IPV4TCP (1)
-#define E1000_MRQ_RSS_TYPE_IPV4 (2)
-#define E1000_MRQ_RSS_TYPE_IPV6TCP (3)
-#define E1000_MRQ_RSS_TYPE_IPV6EX (4)
-#define E1000_MRQ_RSS_TYPE_IPV6 (5)
-
-#define E1000_ICR_ASSERTED BIT(31)
-#define E1000_EIAC_MASK 0x01F00000
-
/* [TR]DBAL and [TR]DLEN masks */
#define E1000_XDBAL_MASK (~(BIT(4) - 1))
#define E1000_XDLEN_MASK ((BIT(20) - 1) & (~(BIT(7) - 1)))
@@ -444,18 +172,8 @@
#define E1000_IVAR_TX_INT_EVERY_WB BIT(31)
-/* RFCTL register bits */
-#define E1000_RFCTL_ISCSI_DIS 0x00000001
-#define E1000_RFCTL_NFSW_DIS 0x00000040
-#define E1000_RFCTL_NFSR_DIS 0x00000080
-#define E1000_RFCTL_IPV6_DIS 0x00000400
-#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800
#define E1000_RFCTL_ACK_DIS 0x00001000
#define E1000_RFCTL_ACK_DATA_DIS 0x00002000
-#define E1000_RFCTL_IPFRSP_DIS 0x00004000
-#define E1000_RFCTL_EXTEN 0x00008000
-#define E1000_RFCTL_IPV6_EX_DIS 0x00010000
-#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
/* PSRCTL parsing */
#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F
@@ -470,24 +188,7 @@
#define E1000_PSRCTL_BUFFS_PER_DESC 4
-/* TARC* parsing */
-#define E1000_TARC_ENABLE BIT(10)
-
/* PHY 1000 MII Register/Bit Definitions */
-/* PHY Registers defined by IEEE */
-#define PHY_CTRL 0x00 /* Control Register */
-#define PHY_STATUS 0x01 /* Status Regiser */
-#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
-#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
-#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
-#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
-#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */
-#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */
-#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
-#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
-#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
-#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
-
/* 82574-specific registers */
#define PHY_COPPER_CTRL1 0x10 /* Copper Specific Control Register 1 */
#define PHY_COPPER_STAT1 0x11 /* Copper Specific Status Register 1 */
@@ -539,287 +240,6 @@
#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */
#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */
-/* PHY Control Register */
-#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
-#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
-#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
-#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
-#define MII_CR_POWER_DOWN 0x0800 /* Power down */
-#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
-#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
-#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
-
-/* PHY Status Register */
-#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
-#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
-#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
-#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
-#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
-#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
-#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
-#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
-#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
-#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
-#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
-#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
-#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
-#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
-#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
-
-/* PHY Link Partner Ability Register */
-#define MII_LPAR_LPACK 0x4000 /* Acked by link partner */
-
-/* Interrupt Cause Read */
-#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
-#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */
-#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
-#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */
-#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */
-#define E1000_ICR_RXO 0x00000040 /* rx overrun */
-#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */
-#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */
-#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */
-#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */
-#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */
-#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */
-#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */
-#define E1000_ICR_TXD_LOW 0x00008000
-#define E1000_ICR_SRPD 0x00010000
-#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */
-#define E1000_ICR_MNG 0x00040000 /* Manageability event */
-#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */
-#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
-#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */
-#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */
-#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */
-#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */
-#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */
-#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */
-#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */
-#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */
-#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */
-#define E1000_ICR_EPRST 0x00100000 /* ME handware reset occurs */
-#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */
-#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
-#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */
-#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */
-#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */
-
-#define E1000_ICR_OTHER_CAUSES (E1000_ICR_LSC | \
- E1000_ICR_RXO | \
- E1000_ICR_MDAC | \
- E1000_ICR_SRPD | \
- E1000_ICR_ACK | \
- E1000_ICR_MNG)
-
-/* Interrupt Cause Set */
-#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
-#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
-#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
-#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
-#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
-#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */
-#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
-#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */
-#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
-#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
-#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
-#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
-#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
-#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW
-#define E1000_ICS_SRPD E1000_ICR_SRPD
-#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */
-#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */
-#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */
-#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
-#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
-#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
-#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
-#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
-#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
-#define E1000_ICS_DSW E1000_ICR_DSW
-#define E1000_ICS_PHYINT E1000_ICR_PHYINT
-#define E1000_ICS_EPRST E1000_ICR_EPRST
-
-/* Interrupt Mask Set */
-#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
-#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
-#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
-#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
-#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
-#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */
-#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
-#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */
-#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
-#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
-#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
-#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
-#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
-#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW
-#define E1000_IMS_SRPD E1000_ICR_SRPD
-#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */
-#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */
-#define E1000_IMS_RXQ0 E1000_ICR_RXQ0
-#define E1000_IMS_RXQ1 E1000_ICR_RXQ1
-#define E1000_IMS_TXQ0 E1000_ICR_TXQ0
-#define E1000_IMS_TXQ1 E1000_ICR_TXQ1
-#define E1000_IMS_OTHER E1000_ICR_OTHER
-#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */
-#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
-#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
-#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
-#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
-#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
-#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
-#define E1000_IMS_DSW E1000_ICR_DSW
-#define E1000_IMS_PHYINT E1000_ICR_PHYINT
-#define E1000_IMS_EPRST E1000_ICR_EPRST
-
-/* Interrupt Mask Clear */
-#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */
-#define E1000_IMC_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
-#define E1000_IMC_LSC E1000_ICR_LSC /* Link Status Change */
-#define E1000_IMC_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
-#define E1000_IMC_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
-#define E1000_IMC_RXO E1000_ICR_RXO /* rx overrun */
-#define E1000_IMC_RXT0 E1000_ICR_RXT0 /* rx timer intr */
-#define E1000_IMC_MDAC E1000_ICR_MDAC /* MDIO access complete */
-#define E1000_IMC_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
-#define E1000_IMC_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
-#define E1000_IMC_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
-#define E1000_IMC_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
-#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
-#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW
-#define E1000_IMC_SRPD E1000_ICR_SRPD
-#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */
-#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */
-#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */
-#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
-#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
-#define E1000_IMC_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
-#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
-#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
-#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
-#define E1000_IMC_DSW E1000_ICR_DSW
-#define E1000_IMC_PHYINT E1000_ICR_PHYINT
-#define E1000_IMC_EPRST E1000_ICR_EPRST
-
-/* Receive Control */
-#define E1000_RCTL_RST 0x00000001 /* Software reset */
-#define E1000_RCTL_EN 0x00000002 /* enable */
-#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
-#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */
-#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */
-#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
-#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */
-#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
-#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */
-#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
-#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */
-#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
-#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */
-#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */
-#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */
-#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
-#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */
-#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */
-#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */
-#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
-#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */
-#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
-/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
-#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */
-#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */
-#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */
-#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */
-/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
-#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */
-#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */
-#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */
-#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
-#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
-#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */
-#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */
-#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
-#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
-#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
-#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */
-#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */
-
-
-#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */
-#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */
-#define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */
-#define E1000_EEPROM_RW_REG_DONE 0x10 /* Offset to READ/WRITE done bit */
-#define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */
-#define E1000_EEPROM_RW_ADDR_SHIFT 8 /* Shift to the address bits */
-#define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */
-#define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */
-
-/* 82574 EERD/EEWR registers layout */
-#define E1000_EERW_START BIT(0)
-#define E1000_EERW_DONE BIT(1)
-#define E1000_EERW_ADDR_SHIFT 2
-#define E1000_EERW_ADDR_MASK ((1L << 14) - 1)
-#define E1000_EERW_DATA_SHIFT 16
-#define E1000_EERW_DATA_MASK ((1L << 16) - 1)
-
-/* Register Bit Masks */
-/* Device Control */
-#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
-#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */
-#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */
-#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
-#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
-#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */
-#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */
-#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
-#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
-#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
-#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */
-#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */
-#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */
-#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
-#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */
-#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
-#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
-#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */
-#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */
-#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */
-#define E1000_CTRL_SPD_SHIFT 8 /* Speed Select Shift */
-
-#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* auto speed detection check */
-#define E1000_CTRL_EXT_EE_RST 0x00002000 /* EEPROM reset */
-#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */
-#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
-#define E1000_CTRL_EXT_EIAME 0x01000000
-#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */
-#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
-#define E1000_CTRL_EXT_INT_TIMERS_CLEAR_ENA 0x20000000
-#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
-
-#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
-#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
-#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */
-#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */
-#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
-#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */
-#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */
-#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */
-#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
-#define E1000_CTRL_RST 0x04000000 /* Global reset */
-#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
-#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
-#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */
-#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
-#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
-#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */
-
-/* Device Status */
-#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
-#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
#define E1000_STATUS_FUNC_SHIFT 2
#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */
@@ -827,9 +247,6 @@
#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */
#define E1000_STATUS_SPEED_MASK 0x000000C0
-#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
-#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
-#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion
by EEPROM/Flash */
#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */
@@ -837,9 +254,7 @@
#define E1000_STATUS_ASDV_100 0x00000100 /* ASDV 100Mb */
#define E1000_STATUS_ASDV_1000 0x00000200 /* ASDV 1Gb */
#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */
-#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */
-#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */
#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */
#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */
@@ -857,111 +272,6 @@
#define E1000_STATUS_SPEED_SHIFT 6
#define E1000_STATUS_ASDV_SHIFT 8
-/* EEPROM/Flash Control */
-#define E1000_EECD_SK 0x00000001 /* EEPROM Clock */
-#define E1000_EECD_CS 0x00000002 /* EEPROM Chip Select */
-#define E1000_EECD_DI 0x00000004 /* EEPROM Data In */
-#define E1000_EECD_DO 0x00000008 /* EEPROM Data Out */
-#define E1000_EECD_FWE_MASK 0x00000030
-#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */
-#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */
-#define E1000_EECD_FWE_SHIFT 4
-#define E1000_EECD_REQ 0x00000040 /* EEPROM Access Request */
-#define E1000_EECD_GNT 0x00000080 /* EEPROM Access Grant */
-#define E1000_EECD_PRES 0x00000100 /* EEPROM Present */
-#define E1000_EECD_SIZE 0x00000200 /* EEPROM Size (0=64 word 1=256 word) */
-#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type
- * (0-small, 1-large) */
-#define E1000_EECD_TYPE 0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */
-#ifndef E1000_EEPROM_GRANT_ATTEMPTS
-#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
-#endif
-#define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */
-#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */
-#define E1000_EECD_SIZE_EX_SHIFT 11
-#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */
-#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */
-#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */
-#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */
-#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */
-#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */
-#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
-
-
-#define E1000_EECD_SECVAL_SHIFT 22
-#define E1000_STM_OPCODE 0xDB00
-#define E1000_HICR_FW_RESET 0xC0
-
-#define E1000_SHADOW_RAM_WORDS 2048
-#define E1000_ICH_NVM_SIG_WORD 0x13
-#define E1000_ICH_NVM_SIG_MASK 0xC0
-
-/* MDI Control */
-#define E1000_MDIC_DATA_MASK 0x0000FFFF
-#define E1000_MDIC_REG_MASK 0x001F0000
-#define E1000_MDIC_REG_SHIFT 16
-#define E1000_MDIC_PHY_MASK 0x03E00000
-#define E1000_MDIC_PHY_SHIFT 21
-#define E1000_MDIC_OP_WRITE 0x04000000
-#define E1000_MDIC_OP_READ 0x08000000
-#define E1000_MDIC_READY 0x10000000
-#define E1000_MDIC_INT_EN 0x20000000
-#define E1000_MDIC_ERROR 0x40000000
-
-/* Rx Interrupt Delay Timer */
-#define E1000_RDTR_FPD BIT(31)
-
-/* Tx Interrupt Delay Timer */
-#define E1000_TIDV_FPD BIT(31)
-
-/* Delay increments in nanoseconds for delayed interrupts registers */
-#define E1000_INTR_DELAY_NS_RES (1024)
-
-/* Delay increments in nanoseconds for interrupt throttling registers */
-#define E1000_INTR_THROTTLING_NS_RES (256)
-
-/* EEPROM Commands - Microwire */
-#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */
-#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */
-#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7 /* EEPROM erase opcode */
-#define EEPROM_EWEN_OPCODE_MICROWIRE 0x13 /* EEPROM erase/write enable */
-#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erast/write disable */
-
-/* EEPROM Word Offsets */
-#define EEPROM_COMPAT 0x0003
-#define EEPROM_ID_LED_SETTINGS 0x0004
-#define EEPROM_VERSION 0x0005
-#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */
-#define EEPROM_PHY_CLASS_WORD 0x0007
-#define EEPROM_INIT_CONTROL1_REG 0x000A
-#define EEPROM_INIT_CONTROL2_REG 0x000F
-#define EEPROM_SWDEF_PINS_CTRL_PORT_1 0x0010
-#define EEPROM_INIT_CONTROL3_PORT_B 0x0014
-#define EEPROM_INIT_3GIO_3 0x001A
-#define EEPROM_SWDEF_PINS_CTRL_PORT_0 0x0020
-#define EEPROM_INIT_CONTROL3_PORT_A 0x0024
-#define EEPROM_CFG 0x0012
-#define EEPROM_FLASH_VERSION 0x0032
-#define EEPROM_CHECKSUM_REG 0x003F
-
-#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */
-#define E1000_EEPROM_CFG_DONE_PORT_1 0x00080000 /* ...for second port */
-
-/* PCI Express Control */
-/* 3GIO Control Register - GCR (0x05B00; RW) */
-#define E1000_L0S_ADJUST (1 << 9)
-#define E1000_L1_ENTRY_LATENCY_MSB (1 << 23)
-#define E1000_L1_ENTRY_LATENCY_LSB (1 << 25 | 1 << 26)
-
-#define E1000_L0S_ADJUST (1 << 9)
-#define E1000_L1_ENTRY_LATENCY_MSB (1 << 23)
-#define E1000_L1_ENTRY_LATENCY_LSB (1 << 25 | 1 << 26)
-
-#define E1000_GCR_RO_BITS (1 << 23 | 1 << 25 | 1 << 26)
-
-/* MSI-X PBA Clear register */
-#define E1000_PBACLR_VALID_MASK (BIT(5) - 1)
-
/* Transmit Descriptor */
struct e1000_tx_desc {
uint64_t buffer_addr; /* Address of the descriptor's data buffer */
@@ -983,269 +293,7 @@
} upper;
};
-/* Transmit Descriptor bit definitions */
-#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */
-#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */
#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
-#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
-#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
-#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */
-#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
-#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
-#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
-#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
-#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
-#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
-#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */
-#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */
-#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */
-#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */
-#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
-#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
-#define E1000_TXD_CMD_SNAP 0x40000000 /* Update SNAP header */
-#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
-#define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */
-
-/* Transmit Control */
-#define E1000_TCTL_RST 0x00000001 /* software reset */
-#define E1000_TCTL_EN 0x00000002 /* enable tx */
-#define E1000_TCTL_BCE 0x00000004 /* busy check enable */
-#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
-#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
-#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
-#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */
-#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */
-#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
-#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */
-#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */
-
-/* Legacy Receive Descriptor */
-struct e1000_rx_desc {
- uint64_t buffer_addr; /* Address of the descriptor's data buffer */
- uint16_t length; /* Length of data DMAed into data buffer */
- uint16_t csum; /* Packet checksum */
- uint8_t status; /* Descriptor status */
- uint8_t errors; /* Descriptor Errors */
- uint16_t special;
-};
-
-/* Extended Receive Descriptor */
-union e1000_rx_desc_extended {
- struct {
- uint64_t buffer_addr;
- uint64_t reserved;
- } read;
- struct {
- struct {
- uint32_t mrq; /* Multiple Rx Queues */
- union {
- uint32_t rss; /* RSS Hash */
- struct {
- uint16_t ip_id; /* IP id */
- uint16_t csum; /* Packet Checksum */
- } csum_ip;
- } hi_dword;
- } lower;
- struct {
- uint32_t status_error; /* ext status/error */
- uint16_t length;
- uint16_t vlan; /* VLAN tag */
- } upper;
- } wb; /* writeback */
-};
-
-#define MAX_PS_BUFFERS 4
-
-/* Number of packet split data buffers (not including the header buffer) */
-#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
-
-/* Receive Descriptor - Packet Split */
-union e1000_rx_desc_packet_split {
- struct {
- /* one buffer for protocol header(s), three data buffers */
- uint64_t buffer_addr[MAX_PS_BUFFERS];
- } read;
- struct {
- struct {
- uint32_t mrq; /* Multiple Rx Queues */
- union {
- uint32_t rss; /* RSS Hash */
- struct {
- uint16_t ip_id; /* IP id */
- uint16_t csum; /* Packet Checksum */
- } csum_ip;
- } hi_dword;
- } lower;
- struct {
- uint32_t status_error; /* ext status/error */
- uint16_t length0; /* length of buffer 0 */
- uint16_t vlan; /* VLAN tag */
- } middle;
- struct {
- uint16_t header_status;
- /* length of buffers 1-3 */
- uint16_t length[PS_PAGE_BUFFERS];
- } upper;
- uint64_t reserved;
- } wb; /* writeback */
-};
-
-/* Receive Checksum Control bits */
-#define E1000_RXCSUM_IPOFLD 0x100 /* IP Checksum Offload Enable */
-#define E1000_RXCSUM_TUOFLD 0x200 /* TCP/UDP Checksum Offload Enable */
-#define E1000_RXCSUM_PCSD 0x2000 /* Packet Checksum Disable */
-
-#define E1000_RING_DESC_LEN (16)
-#define E1000_RING_DESC_LEN_SHIFT (4)
-
-#define E1000_MIN_RX_DESC_LEN E1000_RING_DESC_LEN
-#define E1000_MAX_RX_DESC_LEN (sizeof(union e1000_rx_desc_packet_split))
-
-/* Receive Descriptor bit definitions */
-#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
-#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
-#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
-#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
-#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */
-#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
-#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
-#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */
-#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */
-#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
-#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
-#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
-#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
-#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
-#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
-#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
-#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */
-#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
-#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
-#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
-#define E1000_RXD_SPC_PRI_SHIFT 13
-#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */
-#define E1000_RXD_SPC_CFI_SHIFT 12
-
-/* RX packet types */
-#define E1000_RXD_PKT_MAC (0)
-#define E1000_RXD_PKT_IP4 (1)
-#define E1000_RXD_PKT_IP4_XDP (2)
-#define E1000_RXD_PKT_IP6 (5)
-#define E1000_RXD_PKT_IP6_XDP (6)
-
-#define E1000_RXD_PKT_TYPE(t) ((t) << 16)
-
-#define E1000_RXDEXT_STATERR_CE 0x01000000
-#define E1000_RXDEXT_STATERR_SE 0x02000000
-#define E1000_RXDEXT_STATERR_SEQ 0x04000000
-#define E1000_RXDEXT_STATERR_CXE 0x10000000
-#define E1000_RXDEXT_STATERR_TCPE 0x20000000
-#define E1000_RXDEXT_STATERR_IPE 0x40000000
-#define E1000_RXDEXT_STATERR_RXE 0x80000000
-
-#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000
-#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
-
-/* Receive Address */
-#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
-
-/* Offload Context Descriptor */
-struct e1000_context_desc {
- union {
- uint32_t ip_config;
- struct {
- uint8_t ipcss; /* IP checksum start */
- uint8_t ipcso; /* IP checksum offset */
- uint16_t ipcse; /* IP checksum end */
- } ip_fields;
- } lower_setup;
- union {
- uint32_t tcp_config;
- struct {
- uint8_t tucss; /* TCP checksum start */
- uint8_t tucso; /* TCP checksum offset */
- uint16_t tucse; /* TCP checksum end */
- } tcp_fields;
- } upper_setup;
- uint32_t cmd_and_length; /* */
- union {
- uint32_t data;
- struct {
- uint8_t status; /* Descriptor status */
- uint8_t hdr_len; /* Header length */
- uint16_t mss; /* Maximum segment size */
- } fields;
- } tcp_seg_setup;
-};
-
-/* Offload data descriptor */
-struct e1000_data_desc {
- uint64_t buffer_addr; /* Address of the descriptor's buffer address */
- union {
- uint32_t data;
- struct {
- uint16_t length; /* Data buffer length */
- uint8_t typ_len_ext; /* */
- uint8_t cmd; /* */
- } flags;
- } lower;
- union {
- uint32_t data;
- struct {
- uint8_t status; /* Descriptor status */
- uint8_t popts; /* Packet Options */
- uint16_t special; /* */
- } fields;
- } upper;
-};
-
-/* Management Control */
-#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
-#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
-#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */
-#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */
-#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */
-#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */
-#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */
-#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */
-#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
-#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery
- * Filtering */
-#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */
-#define E1000_MANC_DIS_IP_CHK_ARP 0x10000000 /* Disable IP address chacking */
- /*for ARP packets - in 82574 */
-#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */
-#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
-#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */
-#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */
-#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
-#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address
- * filtering */
-#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host
- * memory */
-#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address
- * filtering */
-#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */
-#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */
-#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */
-#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */
-#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */
-#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */
-#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */
-#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */
-
-#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */
-#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */
-
-/* FACTPS Control */
-#define E1000_FACTPS_LAN0_ON 0x00000004 /* Lan 0 enable */
-
-/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */
-#define EEPROM_SUM 0xBABA
-
-/* I/O-Mapped Access to Internal Registers, Memories, and Flash */
-#define E1000_IOADDR 0x00
-#define E1000_IODATA 0x04
#endif /* HW_E1000_REGS_H */
diff --git a/hw/net/e1000e.c b/hw/net/e1000e.c
index 7523e9f..c384879 100644
--- a/hw/net/e1000e.c
+++ b/hw/net/e1000e.c
@@ -1,37 +1,37 @@
/*
-* QEMU INTEL 82574 GbE NIC emulation
-*
-* Software developer's manuals:
-* http://www.intel.com/content/dam/doc/datasheet/82574l-gbe-controller-datasheet.pdf
-*
-* Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com)
-* Developed by Daynix Computing LTD (http://www.daynix.com)
-*
-* Authors:
-* Dmitry Fleytman <dmitry@daynix.com>
-* Leonid Bloch <leonid@daynix.com>
-* Yan Vugenfirer <yan@daynix.com>
-*
-* Based on work done by:
-* Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
-* Copyright (c) 2008 Qumranet
-* Based on work done by:
-* Copyright (c) 2007 Dan Aloni
-* Copyright (c) 2004 Antony T Curtis
-*
-* This library is free software; you can redistribute it and/or
-* modify it under the terms of the GNU Lesser General Public
-* License as published by the Free Software Foundation; either
-* version 2.1 of the License, or (at your option) any later version.
-*
-* This library is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-* Lesser General Public License for more details.
-*
-* You should have received a copy of the GNU Lesser General Public
-* License along with this library; if not, see <http://www.gnu.org/licenses/>.
-*/
+ * QEMU INTEL 82574 GbE NIC emulation
+ *
+ * Software developer's manuals:
+ * http://www.intel.com/content/dam/doc/datasheet/82574l-gbe-controller-datasheet.pdf
+ *
+ * Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com)
+ * Developed by Daynix Computing LTD (http://www.daynix.com)
+ *
+ * Authors:
+ * Dmitry Fleytman <dmitry@daynix.com>
+ * Leonid Bloch <leonid@daynix.com>
+ * Yan Vugenfirer <yan@daynix.com>
+ *
+ * Based on work done by:
+ * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
+ * Copyright (c) 2008 Qumranet
+ * Based on work done by:
+ * Copyright (c) 2007 Dan Aloni
+ * Copyright (c) 2004 Antony T Curtis
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
#include "qemu/osdep.h"
#include "qemu/units.h"
@@ -42,13 +42,13 @@
#include "qemu/range.h"
#include "sysemu/sysemu.h"
#include "hw/hw.h"
+#include "hw/net/mii.h"
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
#include "hw/qdev-properties.h"
#include "migration/vmstate.h"
-#include "e1000_regs.h"
-
+#include "e1000_common.h"
#include "e1000x_common.h"
#include "e1000e_core.h"
@@ -81,6 +81,7 @@
E1000ECore core;
bool init_vet;
+ bool timadj;
};
#define E1000E_MMIO_IDX 0
@@ -239,9 +240,9 @@
};
/*
-* EEPROM (NVM) contents documented in Table 36, section 6.1
-* and generally 6.1.2 Software accessed words.
-*/
+ * EEPROM (NVM) contents documented in Table 36, section 6.1
+ * and generally 6.1.2 Software accessed words.
+ */
static const uint16_t e1000e_eeprom_template[64] = {
/* Address | Compat. | ImVer | Compat. */
0x0000, 0x0000, 0x0000, 0x0420, 0xf746, 0x2010, 0xffff, 0xffff,
@@ -512,11 +513,11 @@
msi_uninit(pci_dev);
}
-static void e1000e_qdev_reset(DeviceState *dev)
+static void e1000e_qdev_reset_hold(Object *obj)
{
- E1000EState *s = E1000E(dev);
+ E1000EState *s = E1000E(obj);
- trace_e1000e_cb_qdev_reset();
+ trace_e1000e_cb_qdev_reset_hold();
e1000e_core_reset(&s->core);
@@ -553,6 +554,12 @@
return e1000e_core_post_load(&s->core);
}
+static bool e1000e_migrate_timadj(void *opaque, int version_id)
+{
+ E1000EState *s = opaque;
+ return s->timadj;
+}
+
static const VMStateDescription e1000e_vmstate_tx = {
.name = "e1000e-tx",
.version_id = 1,
@@ -630,12 +637,11 @@
VMSTATE_E1000E_INTR_DELAY_TIMER(core.tidv, E1000EState),
VMSTATE_E1000E_INTR_DELAY_TIMER(core.itr, E1000EState),
- VMSTATE_BOOL(core.itr_intr_pending, E1000EState),
+ VMSTATE_UNUSED(1),
VMSTATE_E1000E_INTR_DELAY_TIMER_ARRAY(core.eitr, E1000EState,
E1000E_MSIX_VEC_NUM),
- VMSTATE_BOOL_ARRAY(core.eitr_intr_pending, E1000EState,
- E1000E_MSIX_VEC_NUM),
+ VMSTATE_UNUSED(E1000E_MSIX_VEC_NUM),
VMSTATE_UINT32(core.itr_guest_value, E1000EState),
VMSTATE_UINT32_ARRAY(core.eitr_guest_value, E1000EState,
@@ -645,6 +651,9 @@
VMSTATE_STRUCT_ARRAY(core.tx, E1000EState, E1000E_NUM_QUEUES, 0,
e1000e_vmstate_tx, struct e1000e_tx),
+
+ VMSTATE_INT64_TEST(core.timadj, E1000EState, e1000e_migrate_timadj),
+
VMSTATE_END_OF_LIST()
}
};
@@ -663,12 +672,14 @@
DEFINE_PROP_SIGNED("subsys", E1000EState, subsys, 0,
e1000e_prop_subsys, uint16_t),
DEFINE_PROP_BOOL("init-vet", E1000EState, init_vet, true),
+ DEFINE_PROP_BOOL("migrate-timadj", E1000EState, timadj, true),
DEFINE_PROP_END_OF_LIST(),
};
static void e1000e_class_init(ObjectClass *class, void *data)
{
DeviceClass *dc = DEVICE_CLASS(class);
+ ResettableClass *rc = RESETTABLE_CLASS(class);
PCIDeviceClass *c = PCI_DEVICE_CLASS(class);
c->realize = e1000e_pci_realize;
@@ -679,8 +690,9 @@
c->romfile = "efi-e1000e.rom";
c->class_id = PCI_CLASS_NETWORK_ETHERNET;
+ rc->phases.hold = e1000e_qdev_reset_hold;
+
dc->desc = "Intel 82574L GbE Controller";
- dc->reset = e1000e_qdev_reset;
dc->vmsd = &e1000e_vmstate;
e1000e_prop_disable_vnet = qdev_prop_uint8;
diff --git a/hw/net/e1000e_core.c b/hw/net/e1000e_core.c
index fc9cdb4..4d9679c 100644
--- a/hw/net/e1000e_core.c
+++ b/hw/net/e1000e_core.c
@@ -1,42 +1,43 @@
/*
-* Core code for QEMU e1000e emulation
-*
-* Software developer's manuals:
-* http://www.intel.com/content/dam/doc/datasheet/82574l-gbe-controller-datasheet.pdf
-*
-* Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com)
-* Developed by Daynix Computing LTD (http://www.daynix.com)
-*
-* Authors:
-* Dmitry Fleytman <dmitry@daynix.com>
-* Leonid Bloch <leonid@daynix.com>
-* Yan Vugenfirer <yan@daynix.com>
-*
-* Based on work done by:
-* Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
-* Copyright (c) 2008 Qumranet
-* Based on work done by:
-* Copyright (c) 2007 Dan Aloni
-* Copyright (c) 2004 Antony T Curtis
-*
-* This library is free software; you can redistribute it and/or
-* modify it under the terms of the GNU Lesser General Public
-* License as published by the Free Software Foundation; either
-* version 2.1 of the License, or (at your option) any later version.
-*
-* This library is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-* Lesser General Public License for more details.
-*
-* You should have received a copy of the GNU Lesser General Public
-* License along with this library; if not, see <http://www.gnu.org/licenses/>.
-*/
+ * Core code for QEMU e1000e emulation
+ *
+ * Software developer's manuals:
+ * http://www.intel.com/content/dam/doc/datasheet/82574l-gbe-controller-datasheet.pdf
+ *
+ * Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com)
+ * Developed by Daynix Computing LTD (http://www.daynix.com)
+ *
+ * Authors:
+ * Dmitry Fleytman <dmitry@daynix.com>
+ * Leonid Bloch <leonid@daynix.com>
+ * Yan Vugenfirer <yan@daynix.com>
+ *
+ * Based on work done by:
+ * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
+ * Copyright (c) 2008 Qumranet
+ * Based on work done by:
+ * Copyright (c) 2007 Dan Aloni
+ * Copyright (c) 2004 Antony T Curtis
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "net/net.h"
#include "net/tap.h"
+#include "hw/net/mii.h"
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
#include "sysemu/runstate.h"
@@ -44,18 +45,32 @@
#include "net_tx_pkt.h"
#include "net_rx_pkt.h"
+#include "e1000_common.h"
#include "e1000x_common.h"
#include "e1000e_core.h"
#include "trace.h"
-#define E1000E_MIN_XITR (500) /* No more then 7813 interrupts per
- second according to spec 10.2.4.2 */
+/* No more then 7813 interrupts per second according to spec 10.2.4.2 */
+#define E1000E_MIN_XITR (500)
+
#define E1000E_MAX_TX_FRAGS (64)
+union e1000_rx_desc_union {
+ struct e1000_rx_desc legacy;
+ union e1000_rx_desc_extended extended;
+ union e1000_rx_desc_packet_split packet_split;
+};
+
+static ssize_t
+e1000e_receive_internal(E1000ECore *core, const struct iovec *iov, int iovcnt,
+ bool has_vnet);
+
static inline void
e1000e_set_interrupt_cause(E1000ECore *core, uint32_t val);
+static void e1000e_reset(E1000ECore *core, bool sw);
+
static inline void
e1000e_process_ts_option(E1000ECore *core, struct e1000_tx_desc *dp)
{
@@ -148,15 +163,8 @@
{
E1000IntrDelayTimer *timer = opaque;
- assert(!msix_enabled(timer->core->owner));
-
timer->running = false;
- if (!timer->core->itr_intr_pending) {
- trace_e1000e_irq_throttling_no_pending_interrupts();
- return;
- }
-
if (msi_enabled(timer->core->owner)) {
trace_e1000e_irq_msi_notify_postponed();
/* Clear msi_causes_pending to fire MSI eventually */
@@ -174,15 +182,8 @@
E1000IntrDelayTimer *timer = opaque;
int idx = timer - &timer->core->eitr[0];
- assert(msix_enabled(timer->core->owner));
-
timer->running = false;
- if (!timer->core->eitr_intr_pending[idx]) {
- trace_e1000e_irq_throttling_no_pending_vec(idx);
- return;
- }
-
trace_e1000e_irq_msix_notify_postponed_vec(idx);
msix_notify(timer->core->owner, idx);
}
@@ -282,14 +283,18 @@
core->delayed_causes |= *causes & delayable_causes;
*causes &= ~delayable_causes;
- /* Check if delayed RX interrupts disabled by client
- or if there are causes that cannot be delayed */
+ /*
+ * Check if delayed RX interrupts disabled by client
+ * or if there are causes that cannot be delayed
+ */
if ((rdtr == 0) || (*causes != 0)) {
return false;
}
- /* Check if delayed RX ACK interrupts disabled by client
- and there is an ACK packet received */
+ /*
+ * Check if delayed RX ACK interrupts disabled by client
+ * and there is an ACK packet received
+ */
if ((raid == 0) && (core->delayed_causes & E1000_ICR_ACK)) {
return false;
}
@@ -493,27 +498,27 @@
static uint32_t
e1000e_rss_get_hash_type(E1000ECore *core, struct NetRxPkt *pkt)
{
- bool isip4, isip6, isudp, istcp;
+ bool hasip4, hasip6;
+ EthL4HdrProto l4hdr_proto;
assert(e1000e_rss_enabled(core));
- net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp);
+ net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
- if (isip4) {
- bool fragment = net_rx_pkt_get_ip4_info(pkt)->fragment;
-
- trace_e1000e_rx_rss_ip4(fragment, istcp, core->mac[MRQC],
+ if (hasip4) {
+ trace_e1000e_rx_rss_ip4(l4hdr_proto, core->mac[MRQC],
E1000_MRQC_EN_TCPIPV4(core->mac[MRQC]),
E1000_MRQC_EN_IPV4(core->mac[MRQC]));
- if (!fragment && istcp && E1000_MRQC_EN_TCPIPV4(core->mac[MRQC])) {
+ if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP &&
+ E1000_MRQC_EN_TCPIPV4(core->mac[MRQC])) {
return E1000_MRQ_RSS_TYPE_IPV4TCP;
}
if (E1000_MRQC_EN_IPV4(core->mac[MRQC])) {
return E1000_MRQ_RSS_TYPE_IPV4;
}
- } else if (isip6) {
+ } else if (hasip6) {
eth_ip6_hdr_info *ip6info = net_rx_pkt_get_ip6_info(pkt);
bool ex_dis = core->mac[RFCTL] & E1000_RFCTL_IPV6_EX_DIS;
@@ -527,7 +532,7 @@
* backends like these.
*/
trace_e1000e_rx_rss_ip6_rfctl(core->mac[RFCTL]);
- trace_e1000e_rx_rss_ip6(ex_dis, new_ex_dis, istcp,
+ trace_e1000e_rx_rss_ip6(ex_dis, new_ex_dis, l4hdr_proto,
ip6info->has_ext_hdrs,
ip6info->rss_ex_dst_valid,
ip6info->rss_ex_src_valid,
@@ -540,7 +545,7 @@
(!new_ex_dis || !(ip6info->rss_ex_dst_valid ||
ip6info->rss_ex_src_valid))) {
- if (istcp && !ip6info->fragment &&
+ if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP &&
E1000_MRQC_EN_TCPIPV6(core->mac[MRQC])) {
return E1000_MRQ_RSS_TYPE_IPV6TCP;
}
@@ -625,23 +630,39 @@
info->queue = E1000_RSS_QUEUE(&core->mac[RETA], info->hash);
}
-static void
+static bool
e1000e_setup_tx_offloads(E1000ECore *core, struct e1000e_tx *tx)
{
if (tx->props.tse && tx->cptse) {
- net_tx_pkt_build_vheader(tx->tx_pkt, true, true, tx->props.mss);
+ if (!net_tx_pkt_build_vheader(tx->tx_pkt, true, true, tx->props.mss)) {
+ return false;
+ }
+
net_tx_pkt_update_ip_checksums(tx->tx_pkt);
e1000x_inc_reg_if_not_full(core->mac, TSCTC);
- return;
+ return true;
}
if (tx->sum_needed & E1000_TXD_POPTS_TXSM) {
- net_tx_pkt_build_vheader(tx->tx_pkt, false, true, 0);
+ if (!net_tx_pkt_build_vheader(tx->tx_pkt, false, true, 0)) {
+ return false;
+ }
}
if (tx->sum_needed & E1000_TXD_POPTS_IXSM) {
net_tx_pkt_update_ip_hdr_checksum(tx->tx_pkt);
}
+
+ return true;
+}
+
+static void e1000e_tx_pkt_callback(void *core,
+ const struct iovec *iov,
+ int iovcnt,
+ const struct iovec *virt_iov,
+ int virt_iovcnt)
+{
+ e1000e_receive_internal(core, virt_iov, virt_iovcnt, true);
}
static bool
@@ -650,13 +671,16 @@
int target_queue = MIN(core->max_queue_num, queue_index);
NetClientState *queue = qemu_get_subqueue(core->owner_nic, target_queue);
- e1000e_setup_tx_offloads(core, tx);
+ if (!e1000e_setup_tx_offloads(core, tx)) {
+ return false;
+ }
net_tx_pkt_dump(tx->tx_pkt);
- if ((core->phy[0][PHY_CTRL] & MII_CR_LOOPBACK) ||
+ if ((core->phy[0][MII_BMCR] & MII_BMCR_LOOPBACK) ||
((core->mac[RCTL] & E1000_RCTL_LBM_MAC) == E1000_RCTL_LBM_MAC)) {
- return net_tx_pkt_send_loopback(tx->tx_pkt, queue);
+ return net_tx_pkt_send_custom(tx->tx_pkt, false,
+ e1000e_tx_pkt_callback, core);
} else {
return net_tx_pkt_send(tx->tx_pkt, queue);
}
@@ -668,7 +692,7 @@
static const int PTCregs[6] = { PTC64, PTC127, PTC255, PTC511,
PTC1023, PTC1522 };
- size_t tot_len = net_tx_pkt_get_total_len(tx_pkt);
+ size_t tot_len = net_tx_pkt_get_total_len(tx_pkt) + 4;
e1000x_increase_size_stats(core->mac, PTCregs, tot_len);
e1000x_inc_reg_if_not_full(core->mac, TPT);
@@ -1016,10 +1040,11 @@
if (e1000x_is_vlan_packet(buf, core->mac[VET]) &&
e1000x_vlan_rx_filter_enabled(core->mac)) {
- uint16_t vid = lduw_be_p(buf + 14);
- uint32_t vfta = ldl_le_p((uint32_t *)(core->mac + VFTA) +
- ((vid >> 5) & 0x7f));
- if ((vfta & (1 << (vid & 0x1f))) == 0) {
+ uint16_t vid = lduw_be_p(&PKT_GET_VLAN_HDR(buf)->h_tci);
+ uint32_t vfta =
+ ldl_le_p((uint32_t *)(core->mac + VFTA) +
+ ((vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK));
+ if ((vfta & (1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK))) == 0) {
trace_e1000e_rx_flt_vlan_mismatch(vid);
return false;
} else {
@@ -1054,48 +1079,47 @@
}
static inline void
-e1000e_read_lgcy_rx_descr(E1000ECore *core, uint8_t *desc, hwaddr *buff_addr)
+e1000e_read_lgcy_rx_descr(E1000ECore *core, struct e1000_rx_desc *desc,
+ hwaddr *buff_addr)
{
- struct e1000_rx_desc *d = (struct e1000_rx_desc *) desc;
- *buff_addr = le64_to_cpu(d->buffer_addr);
+ *buff_addr = le64_to_cpu(desc->buffer_addr);
}
static inline void
-e1000e_read_ext_rx_descr(E1000ECore *core, uint8_t *desc, hwaddr *buff_addr)
+e1000e_read_ext_rx_descr(E1000ECore *core, union e1000_rx_desc_extended *desc,
+ hwaddr *buff_addr)
{
- union e1000_rx_desc_extended *d = (union e1000_rx_desc_extended *) desc;
- *buff_addr = le64_to_cpu(d->read.buffer_addr);
+ *buff_addr = le64_to_cpu(desc->read.buffer_addr);
}
static inline void
-e1000e_read_ps_rx_descr(E1000ECore *core, uint8_t *desc,
- hwaddr (*buff_addr)[MAX_PS_BUFFERS])
+e1000e_read_ps_rx_descr(E1000ECore *core,
+ union e1000_rx_desc_packet_split *desc,
+ hwaddr buff_addr[MAX_PS_BUFFERS])
{
int i;
- union e1000_rx_desc_packet_split *d =
- (union e1000_rx_desc_packet_split *) desc;
for (i = 0; i < MAX_PS_BUFFERS; i++) {
- (*buff_addr)[i] = le64_to_cpu(d->read.buffer_addr[i]);
+ buff_addr[i] = le64_to_cpu(desc->read.buffer_addr[i]);
}
- trace_e1000e_rx_desc_ps_read((*buff_addr)[0], (*buff_addr)[1],
- (*buff_addr)[2], (*buff_addr)[3]);
+ trace_e1000e_rx_desc_ps_read(buff_addr[0], buff_addr[1],
+ buff_addr[2], buff_addr[3]);
}
static inline void
-e1000e_read_rx_descr(E1000ECore *core, uint8_t *desc,
- hwaddr (*buff_addr)[MAX_PS_BUFFERS])
+e1000e_read_rx_descr(E1000ECore *core, union e1000_rx_desc_union *desc,
+ hwaddr buff_addr[MAX_PS_BUFFERS])
{
if (e1000e_rx_use_legacy_descriptor(core)) {
- e1000e_read_lgcy_rx_descr(core, desc, &(*buff_addr)[0]);
- (*buff_addr)[1] = (*buff_addr)[2] = (*buff_addr)[3] = 0;
+ e1000e_read_lgcy_rx_descr(core, &desc->legacy, &buff_addr[0]);
+ buff_addr[1] = buff_addr[2] = buff_addr[3] = 0;
} else {
if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) {
- e1000e_read_ps_rx_descr(core, desc, buff_addr);
+ e1000e_read_ps_rx_descr(core, &desc->packet_split, buff_addr);
} else {
- e1000e_read_ext_rx_descr(core, desc, &(*buff_addr)[0]);
- (*buff_addr)[1] = (*buff_addr)[2] = (*buff_addr)[3] = 0;
+ e1000e_read_ext_rx_descr(core, &desc->extended, &buff_addr[0]);
+ buff_addr[1] = buff_addr[2] = buff_addr[3] = 0;
}
}
}
@@ -1104,7 +1128,7 @@
e1000e_verify_csum_in_sw(E1000ECore *core,
struct NetRxPkt *pkt,
uint32_t *status_flags,
- bool istcp, bool isudp)
+ EthL4HdrProto l4hdr_proto)
{
bool csum_valid;
uint32_t csum_error;
@@ -1131,14 +1155,10 @@
}
csum_error = csum_valid ? 0 : E1000_RXDEXT_STATERR_TCPE;
+ *status_flags |= E1000_RXD_STAT_TCPCS | csum_error;
- if (istcp) {
- *status_flags |= E1000_RXD_STAT_TCPCS |
- csum_error;
- } else if (isudp) {
- *status_flags |= E1000_RXD_STAT_TCPCS |
- E1000_RXD_STAT_UDPCS |
- csum_error;
+ if (l4hdr_proto == ETH_L4_HDR_PROTO_UDP) {
+ *status_flags |= E1000_RXD_STAT_UDPCS;
}
}
@@ -1167,7 +1187,8 @@
uint16_t *vlan_tag)
{
struct virtio_net_hdr *vhdr;
- bool isip4, isip6, istcp, isudp;
+ bool hasip4, hasip6;
+ EthL4HdrProto l4hdr_proto;
uint32_t pkt_type;
*status_flags = E1000_RXD_STAT_DD;
@@ -1179,8 +1200,8 @@
*status_flags |= E1000_RXD_STAT_EOP;
- net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp);
- trace_e1000e_rx_metadata_protocols(isip4, isip6, isudp, istcp);
+ net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
+ trace_e1000e_rx_metadata_protocols(hasip4, hasip6, l4hdr_proto);
/* VLAN state */
if (net_rx_pkt_is_vlan_stripped(pkt)) {
@@ -1196,24 +1217,25 @@
*mrq = cpu_to_le32(rss_info->type | (rss_info->queue << 8));
trace_e1000e_rx_metadata_rss(*rss, *mrq);
}
- } else if (isip4) {
+ } else if (hasip4) {
*status_flags |= E1000_RXD_STAT_IPIDV;
*ip_id = cpu_to_le16(net_rx_pkt_get_ip_id(pkt));
trace_e1000e_rx_metadata_ip_id(*ip_id);
}
- if (istcp && e1000e_is_tcp_ack(core, pkt)) {
+ if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP && e1000e_is_tcp_ack(core, pkt)) {
*status_flags |= E1000_RXD_STAT_ACK;
trace_e1000e_rx_metadata_ack();
}
- if (isip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_DIS)) {
+ if (hasip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_DIS)) {
trace_e1000e_rx_metadata_ipv6_filtering_disabled();
pkt_type = E1000_RXD_PKT_MAC;
- } else if (istcp || isudp) {
- pkt_type = isip4 ? E1000_RXD_PKT_IP4_XDP : E1000_RXD_PKT_IP6_XDP;
- } else if (isip4 || isip6) {
- pkt_type = isip4 ? E1000_RXD_PKT_IP4 : E1000_RXD_PKT_IP6;
+ } else if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP ||
+ l4hdr_proto == ETH_L4_HDR_PROTO_UDP) {
+ pkt_type = hasip4 ? E1000_RXD_PKT_IP4_XDP : E1000_RXD_PKT_IP6_XDP;
+ } else if (hasip4 || hasip6) {
+ pkt_type = hasip4 ? E1000_RXD_PKT_IP4 : E1000_RXD_PKT_IP6;
} else {
pkt_type = E1000_RXD_PKT_MAC;
}
@@ -1222,37 +1244,38 @@
trace_e1000e_rx_metadata_pkt_type(pkt_type);
/* RX CSO information */
- if (isip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_XSUM_DIS)) {
+ if (hasip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_XSUM_DIS)) {
trace_e1000e_rx_metadata_ipv6_sum_disabled();
goto func_exit;
}
- if (!net_rx_pkt_has_virt_hdr(pkt)) {
- trace_e1000e_rx_metadata_no_virthdr();
- e1000e_verify_csum_in_sw(core, pkt, status_flags, istcp, isudp);
- goto func_exit;
- }
-
vhdr = net_rx_pkt_get_vhdr(pkt);
if (!(vhdr->flags & VIRTIO_NET_HDR_F_DATA_VALID) &&
!(vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) {
trace_e1000e_rx_metadata_virthdr_no_csum_info();
- e1000e_verify_csum_in_sw(core, pkt, status_flags, istcp, isudp);
+ e1000e_verify_csum_in_sw(core, pkt, status_flags, l4hdr_proto);
goto func_exit;
}
if (e1000e_rx_l3_cso_enabled(core)) {
- *status_flags |= isip4 ? E1000_RXD_STAT_IPCS : 0;
+ *status_flags |= hasip4 ? E1000_RXD_STAT_IPCS : 0;
} else {
trace_e1000e_rx_metadata_l3_cso_disabled();
}
if (e1000e_rx_l4_cso_enabled(core)) {
- if (istcp) {
+ switch (l4hdr_proto) {
+ case ETH_L4_HDR_PROTO_TCP:
*status_flags |= E1000_RXD_STAT_TCPCS;
- } else if (isudp) {
+ break;
+
+ case ETH_L4_HDR_PROTO_UDP:
*status_flags |= E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS;
+ break;
+
+ default:
+ break;
}
} else {
trace_e1000e_rx_metadata_l4_cso_disabled();
@@ -1265,7 +1288,7 @@
}
static inline void
-e1000e_write_lgcy_rx_descr(E1000ECore *core, uint8_t *desc,
+e1000e_write_lgcy_rx_descr(E1000ECore *core, struct e1000_rx_desc *desc,
struct NetRxPkt *pkt,
const E1000E_RSSInfo *rss_info,
uint16_t length)
@@ -1273,71 +1296,66 @@
uint32_t status_flags, rss, mrq;
uint16_t ip_id;
- struct e1000_rx_desc *d = (struct e1000_rx_desc *) desc;
-
assert(!rss_info->enabled);
- d->length = cpu_to_le16(length);
- d->csum = 0;
+ desc->length = cpu_to_le16(length);
+ desc->csum = 0;
e1000e_build_rx_metadata(core, pkt, pkt != NULL,
rss_info,
&rss, &mrq,
&status_flags, &ip_id,
- &d->special);
- d->errors = (uint8_t) (le32_to_cpu(status_flags) >> 24);
- d->status = (uint8_t) le32_to_cpu(status_flags);
+ &desc->special);
+ desc->errors = (uint8_t) (le32_to_cpu(status_flags) >> 24);
+ desc->status = (uint8_t) le32_to_cpu(status_flags);
}
static inline void
-e1000e_write_ext_rx_descr(E1000ECore *core, uint8_t *desc,
+e1000e_write_ext_rx_descr(E1000ECore *core, union e1000_rx_desc_extended *desc,
struct NetRxPkt *pkt,
const E1000E_RSSInfo *rss_info,
uint16_t length)
{
- union e1000_rx_desc_extended *d = (union e1000_rx_desc_extended *) desc;
+ memset(&desc->wb, 0, sizeof(desc->wb));
- memset(&d->wb, 0, sizeof(d->wb));
-
- d->wb.upper.length = cpu_to_le16(length);
+ desc->wb.upper.length = cpu_to_le16(length);
e1000e_build_rx_metadata(core, pkt, pkt != NULL,
rss_info,
- &d->wb.lower.hi_dword.rss,
- &d->wb.lower.mrq,
- &d->wb.upper.status_error,
- &d->wb.lower.hi_dword.csum_ip.ip_id,
- &d->wb.upper.vlan);
+ &desc->wb.lower.hi_dword.rss,
+ &desc->wb.lower.mrq,
+ &desc->wb.upper.status_error,
+ &desc->wb.lower.hi_dword.csum_ip.ip_id,
+ &desc->wb.upper.vlan);
}
static inline void
-e1000e_write_ps_rx_descr(E1000ECore *core, uint8_t *desc,
+e1000e_write_ps_rx_descr(E1000ECore *core,
+ union e1000_rx_desc_packet_split *desc,
struct NetRxPkt *pkt,
const E1000E_RSSInfo *rss_info,
size_t ps_hdr_len,
uint16_t(*written)[MAX_PS_BUFFERS])
{
int i;
- union e1000_rx_desc_packet_split *d =
- (union e1000_rx_desc_packet_split *) desc;
- memset(&d->wb, 0, sizeof(d->wb));
+ memset(&desc->wb, 0, sizeof(desc->wb));
- d->wb.middle.length0 = cpu_to_le16((*written)[0]);
+ desc->wb.middle.length0 = cpu_to_le16((*written)[0]);
for (i = 0; i < PS_PAGE_BUFFERS; i++) {
- d->wb.upper.length[i] = cpu_to_le16((*written)[i + 1]);
+ desc->wb.upper.length[i] = cpu_to_le16((*written)[i + 1]);
}
e1000e_build_rx_metadata(core, pkt, pkt != NULL,
rss_info,
- &d->wb.lower.hi_dword.rss,
- &d->wb.lower.mrq,
- &d->wb.middle.status_error,
- &d->wb.lower.hi_dword.csum_ip.ip_id,
- &d->wb.middle.vlan);
+ &desc->wb.lower.hi_dword.rss,
+ &desc->wb.lower.mrq,
+ &desc->wb.middle.status_error,
+ &desc->wb.lower.hi_dword.csum_ip.ip_id,
+ &desc->wb.middle.vlan);
- d->wb.upper.header_status =
+ desc->wb.upper.header_status =
cpu_to_le16(ps_hdr_len | (ps_hdr_len ? E1000_RXDPS_HDRSTAT_HDRSP : 0));
trace_e1000e_rx_desc_ps_write((*written)[0], (*written)[1],
@@ -1345,20 +1363,21 @@
}
static inline void
-e1000e_write_rx_descr(E1000ECore *core, uint8_t *desc,
+e1000e_write_rx_descr(E1000ECore *core, union e1000_rx_desc_union *desc,
struct NetRxPkt *pkt, const E1000E_RSSInfo *rss_info,
size_t ps_hdr_len, uint16_t(*written)[MAX_PS_BUFFERS])
{
if (e1000e_rx_use_legacy_descriptor(core)) {
assert(ps_hdr_len == 0);
- e1000e_write_lgcy_rx_descr(core, desc, pkt, rss_info, (*written)[0]);
+ e1000e_write_lgcy_rx_descr(core, &desc->legacy, pkt, rss_info,
+ (*written)[0]);
} else {
if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) {
- e1000e_write_ps_rx_descr(core, desc, pkt, rss_info,
+ e1000e_write_ps_rx_descr(core, &desc->packet_split, pkt, rss_info,
ps_hdr_len, written);
} else {
assert(ps_hdr_len == 0);
- e1000e_write_ext_rx_descr(core, desc, pkt, rss_info,
+ e1000e_write_ext_rx_descr(core, &desc->extended, pkt, rss_info,
(*written)[0]);
}
}
@@ -1366,12 +1385,12 @@
static inline void
e1000e_pci_dma_write_rx_desc(E1000ECore *core, dma_addr_t addr,
- uint8_t *desc, dma_addr_t len)
+ union e1000_rx_desc_union *desc, dma_addr_t len)
{
PCIDevice *dev = core->owner;
if (e1000e_rx_use_legacy_descriptor(core)) {
- struct e1000_rx_desc *d = (struct e1000_rx_desc *) desc;
+ struct e1000_rx_desc *d = &desc->legacy;
size_t offset = offsetof(struct e1000_rx_desc, status);
uint8_t status = d->status;
@@ -1384,8 +1403,7 @@
}
} else {
if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) {
- union e1000_rx_desc_packet_split *d =
- (union e1000_rx_desc_packet_split *) desc;
+ union e1000_rx_desc_packet_split *d = &desc->packet_split;
size_t offset = offsetof(union e1000_rx_desc_packet_split,
wb.middle.status_error);
uint32_t status = d->wb.middle.status_error;
@@ -1398,8 +1416,7 @@
pci_dma_write(dev, addr + offset, &status, sizeof(status));
}
} else {
- union e1000_rx_desc_extended *d =
- (union e1000_rx_desc_extended *) desc;
+ union e1000_rx_desc_extended *d = &desc->extended;
size_t offset = offsetof(union e1000_rx_desc_extended,
wb.upper.status_error);
uint32_t status = d->wb.upper.status_error;
@@ -1422,14 +1439,14 @@
static inline void
e1000e_write_hdr_to_rx_buffers(E1000ECore *core,
- hwaddr (*ba)[MAX_PS_BUFFERS],
+ hwaddr ba[MAX_PS_BUFFERS],
e1000e_ba_state *bastate,
const char *data,
dma_addr_t data_len)
{
assert(data_len <= core->rxbuf_sizes[0] - bastate->written[0]);
- pci_dma_write(core->owner, (*ba)[0] + bastate->written[0], data, data_len);
+ pci_dma_write(core->owner, ba[0] + bastate->written[0], data, data_len);
bastate->written[0] += data_len;
bastate->cur_idx = 1;
@@ -1437,7 +1454,7 @@
static void
e1000e_write_to_rx_buffers(E1000ECore *core,
- hwaddr (*ba)[MAX_PS_BUFFERS],
+ hwaddr ba[MAX_PS_BUFFERS],
e1000e_ba_state *bastate,
const char *data,
dma_addr_t data_len)
@@ -1449,13 +1466,13 @@
uint32_t bytes_to_write = MIN(data_len, cur_buf_bytes_left);
trace_e1000e_rx_desc_buff_write(bastate->cur_idx,
- (*ba)[bastate->cur_idx],
+ ba[bastate->cur_idx],
bastate->written[bastate->cur_idx],
data,
bytes_to_write);
pci_dma_write(core->owner,
- (*ba)[bastate->cur_idx] + bastate->written[bastate->cur_idx],
+ ba[bastate->cur_idx] + bastate->written[bastate->cur_idx],
data, bytes_to_write);
bastate->written[bastate->cur_idx] += bytes_to_write;
@@ -1501,18 +1518,19 @@
static bool
e1000e_do_ps(E1000ECore *core, struct NetRxPkt *pkt, size_t *hdr_len)
{
- bool isip4, isip6, isudp, istcp;
+ bool hasip4, hasip6;
+ EthL4HdrProto l4hdr_proto;
bool fragment;
if (!e1000e_rx_use_ps_descriptor(core)) {
return false;
}
- net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp);
+ net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
- if (isip4) {
+ if (hasip4) {
fragment = net_rx_pkt_get_ip4_info(pkt)->fragment;
- } else if (isip6) {
+ } else if (hasip6) {
fragment = net_rx_pkt_get_ip6_info(pkt)->fragment;
} else {
return false;
@@ -1522,7 +1540,8 @@
return false;
}
- if (!fragment && (isudp || istcp)) {
+ if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP ||
+ l4hdr_proto == ETH_L4_HDR_PROTO_UDP) {
*hdr_len = net_rx_pkt_get_l5_hdr_offset(pkt);
} else {
*hdr_len = net_rx_pkt_get_l4_hdr_offset(pkt);
@@ -1543,7 +1562,7 @@
{
PCIDevice *d = core->owner;
dma_addr_t base;
- uint8_t desc[E1000_MAX_RX_DESC_LEN];
+ union e1000_rx_desc_union desc;
size_t desc_size;
size_t desc_offset = 0;
size_t iov_ofs = 0;
@@ -1579,7 +1598,7 @@
trace_e1000e_rx_descr(rxi->idx, base, core->rx_desc_len);
- e1000e_read_rx_descr(core, desc, &ba);
+ e1000e_read_rx_descr(core, &desc, ba);
if (ba[0]) {
if (desc_offset < size) {
@@ -1598,7 +1617,7 @@
iov_copy = MIN(ps_hdr_len - ps_hdr_copied,
iov->iov_len - iov_ofs);
- e1000e_write_hdr_to_rx_buffers(core, &ba, &bastate,
+ e1000e_write_hdr_to_rx_buffers(core, ba, &bastate,
iov->iov_base, iov_copy);
copy_size -= iov_copy;
@@ -1615,7 +1634,7 @@
} else {
/* Leave buffer 0 of each descriptor except first */
/* empty as per spec 7.1.5.1 */
- e1000e_write_hdr_to_rx_buffers(core, &ba, &bastate,
+ e1000e_write_hdr_to_rx_buffers(core, ba, &bastate,
NULL, 0);
}
}
@@ -1624,7 +1643,7 @@
while (copy_size) {
iov_copy = MIN(copy_size, iov->iov_len - iov_ofs);
- e1000e_write_to_rx_buffers(core, &ba, &bastate,
+ e1000e_write_to_rx_buffers(core, ba, &bastate,
iov->iov_base + iov_ofs, iov_copy);
copy_size -= iov_copy;
@@ -1637,7 +1656,7 @@
if (desc_offset + desc_size >= total_size) {
/* Simulate FCS checksum presence in the last descriptor */
- e1000e_write_to_rx_buffers(core, &ba, &bastate,
+ e1000e_write_to_rx_buffers(core, ba, &bastate,
(const char *) &fcs_pad, e1000x_fcs_len(core->mac));
}
}
@@ -1649,9 +1668,9 @@
is_last = true;
}
- e1000e_write_rx_descr(core, desc, is_last ? core->rx_pkt : NULL,
+ e1000e_write_rx_descr(core, &desc, is_last ? core->rx_pkt : NULL,
rss_info, do_ps ? ps_hdr_len : 0, &bastate.written);
- e1000e_pci_dma_write_rx_desc(core, base, desc, core->rx_desc_len);
+ e1000e_pci_dma_write_rx_desc(core, base, &desc, core->rx_desc_len);
e1000e_ring_advance(core, rxi,
core->rx_desc_len / E1000_MIN_RX_DESC_LEN);
@@ -1664,25 +1683,27 @@
static inline void
e1000e_rx_fix_l4_csum(E1000ECore *core, struct NetRxPkt *pkt)
{
- if (net_rx_pkt_has_virt_hdr(pkt)) {
- struct virtio_net_hdr *vhdr = net_rx_pkt_get_vhdr(pkt);
+ struct virtio_net_hdr *vhdr = net_rx_pkt_get_vhdr(pkt);
- if (vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
- net_rx_pkt_fix_l4_csum(pkt);
- }
+ if (vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
+ net_rx_pkt_fix_l4_csum(pkt);
}
}
-/* Min. octets in an ethernet frame sans FCS */
-#define MIN_BUF_SIZE 60
-
ssize_t
e1000e_receive_iov(E1000ECore *core, const struct iovec *iov, int iovcnt)
{
- static const int maximum_ethernet_hdr_len = (14 + 4);
+ return e1000e_receive_internal(core, iov, iovcnt, core->has_vnet);
+}
+
+static ssize_t
+e1000e_receive_internal(E1000ECore *core, const struct iovec *iov, int iovcnt,
+ bool has_vnet)
+{
+ static const int maximum_ethernet_hdr_len = (ETH_HLEN + 4);
uint32_t n = 0;
- uint8_t min_buf[MIN_BUF_SIZE];
+ uint8_t min_buf[ETH_ZLEN];
struct iovec min_iov;
uint8_t *filter_buf;
size_t size, orig_size;
@@ -1700,9 +1721,11 @@
}
/* Pull virtio header in */
- if (core->has_vnet) {
+ if (has_vnet) {
net_rx_pkt_set_vhdr_iovec(core->rx_pkt, iov, iovcnt);
iov_ofs = sizeof(struct virtio_net_hdr);
+ } else {
+ net_rx_pkt_unset_vhdr(core->rx_pkt);
}
filter_buf = iov->iov_base + iov_ofs;
@@ -1744,8 +1767,6 @@
e1000e_rss_parse_packet(core, core->rx_pkt, &rss_info);
e1000e_rx_ring_init(core, &rxr, rss_info.queue);
- trace_e1000e_rx_rss_dispatched_to_queue(rxr.i->idx);
-
total_size = net_rx_pkt_get_total_len(core->rx_pkt) +
e1000x_fcs_len(core->mac);
@@ -1771,12 +1792,12 @@
rdmts_hit = e1000e_rx_descr_threshold_hit(core, rxr.i);
n |= e1000e_rx_wb_interrupt_cause(core, rxr.i->idx, rdmts_hit);
- trace_e1000e_rx_written_to_guest(n);
+ trace_e1000e_rx_written_to_guest(rxr.i->idx);
} else {
n |= E1000_ICS_RXO;
retval = 0;
- trace_e1000e_rx_not_written_to_guest(n);
+ trace_e1000e_rx_not_written_to_guest(rxr.i->idx);
}
if (!e1000e_intrmgr_delay_rx_causes(core, &n)) {
@@ -1792,13 +1813,13 @@
static inline bool
e1000e_have_autoneg(E1000ECore *core)
{
- return core->phy[0][PHY_CTRL] & MII_CR_AUTO_NEG_EN;
+ return core->phy[0][MII_BMCR] & MII_BMCR_AUTOEN;
}
static void e1000e_update_flowctl_status(E1000ECore *core)
{
if (e1000e_have_autoneg(core) &&
- core->phy[0][PHY_STATUS] & MII_SR_AUTONEG_COMPLETE) {
+ core->phy[0][MII_BMSR] & MII_BMSR_AN_COMP) {
trace_e1000e_link_autoneg_flowctl(true);
core->mac[CTRL] |= E1000_CTRL_TFCE | E1000_CTRL_RFCE;
} else {
@@ -1816,12 +1837,12 @@
static inline void
e1000e_set_phy_ctrl(E1000ECore *core, int index, uint16_t val)
{
- /* bits 0-5 reserved; MII_CR_[RESTART_AUTO_NEG,RESET] are self clearing */
- core->phy[0][PHY_CTRL] = val & ~(0x3f |
- MII_CR_RESET |
- MII_CR_RESTART_AUTO_NEG);
+ /* bits 0-5 reserved; MII_BMCR_[ANRESTART,RESET] are self clearing */
+ core->phy[0][MII_BMCR] = val & ~(0x3f |
+ MII_BMCR_RESET |
+ MII_BMCR_ANRESTART);
- if ((val & MII_CR_RESTART_AUTO_NEG) &&
+ if ((val & MII_BMCR_ANRESTART) &&
e1000e_have_autoneg(core)) {
e1000x_restart_autoneg(core->mac, core->phy[0], core->autoneg_timer);
}
@@ -1855,7 +1876,7 @@
e1000x_update_regs_on_link_down(core->mac, core->phy[0]);
} else {
if (e1000e_have_autoneg(core) &&
- !(core->phy[0][PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
+ !(core->phy[0][MII_BMSR] & MII_BMSR_AN_COMP)) {
e1000x_restart_autoneg(core->mac, core->phy[0],
core->autoneg_timer);
} else {
@@ -1888,7 +1909,7 @@
if (val & E1000_CTRL_RST) {
trace_e1000e_core_ctrl_sw_reset();
- e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac);
+ e1000e_reset(core, true);
}
if (val & E1000_CTRL_PHY_RST) {
@@ -1997,7 +2018,7 @@
void(*e1000e_phyreg_writeops[E1000E_PHY_PAGES][E1000E_PHY_PAGE_SIZE])
(E1000ECore *, int, uint16_t) = {
[0] = {
- [PHY_CTRL] = e1000e_set_phy_ctrl,
+ [MII_BMCR] = e1000e_set_phy_ctrl,
[PHY_PAGE] = e1000e_set_phy_page,
[PHY_OEM_BITS] = e1000e_set_phy_oem_bits
}
@@ -2011,13 +2032,11 @@
}
static inline bool
-e1000e_postpone_interrupt(bool *interrupt_pending,
- E1000IntrDelayTimer *timer)
+e1000e_postpone_interrupt(E1000IntrDelayTimer *timer)
{
if (timer->running) {
trace_e1000e_irq_postponed_by_xitr(timer->delay_reg << 2);
- *interrupt_pending = true;
return true;
}
@@ -2031,14 +2050,13 @@
static inline bool
e1000e_itr_should_postpone(E1000ECore *core)
{
- return e1000e_postpone_interrupt(&core->itr_intr_pending, &core->itr);
+ return e1000e_postpone_interrupt(&core->itr);
}
static inline bool
e1000e_eitr_should_postpone(E1000ECore *core, int idx)
{
- return e1000e_postpone_interrupt(&core->eitr_intr_pending[idx],
- &core->eitr[idx]);
+ return e1000e_postpone_interrupt(&core->eitr[idx]);
}
static void
@@ -2269,19 +2287,19 @@
static const char e1000e_phy_regcap[E1000E_PHY_PAGES][0x20] = {
[0] = {
- [PHY_CTRL] = PHY_ANYPAGE | PHY_RW,
- [PHY_STATUS] = PHY_ANYPAGE | PHY_R,
- [PHY_ID1] = PHY_ANYPAGE | PHY_R,
- [PHY_ID2] = PHY_ANYPAGE | PHY_R,
- [PHY_AUTONEG_ADV] = PHY_ANYPAGE | PHY_RW,
- [PHY_LP_ABILITY] = PHY_ANYPAGE | PHY_R,
- [PHY_AUTONEG_EXP] = PHY_ANYPAGE | PHY_R,
- [PHY_NEXT_PAGE_TX] = PHY_ANYPAGE | PHY_RW,
- [PHY_LP_NEXT_PAGE] = PHY_ANYPAGE | PHY_R,
- [PHY_1000T_CTRL] = PHY_ANYPAGE | PHY_RW,
- [PHY_1000T_STATUS] = PHY_ANYPAGE | PHY_R,
- [PHY_EXT_STATUS] = PHY_ANYPAGE | PHY_R,
- [PHY_PAGE] = PHY_ANYPAGE | PHY_RW,
+ [MII_BMCR] = PHY_ANYPAGE | PHY_RW,
+ [MII_BMSR] = PHY_ANYPAGE | PHY_R,
+ [MII_PHYID1] = PHY_ANYPAGE | PHY_R,
+ [MII_PHYID2] = PHY_ANYPAGE | PHY_R,
+ [MII_ANAR] = PHY_ANYPAGE | PHY_RW,
+ [MII_ANLPAR] = PHY_ANYPAGE | PHY_R,
+ [MII_ANER] = PHY_ANYPAGE | PHY_R,
+ [MII_ANNP] = PHY_ANYPAGE | PHY_RW,
+ [MII_ANLPRNP] = PHY_ANYPAGE | PHY_R,
+ [MII_CTRL1000] = PHY_ANYPAGE | PHY_RW,
+ [MII_STAT1000] = PHY_ANYPAGE | PHY_R,
+ [MII_EXTSTAT] = PHY_ANYPAGE | PHY_R,
+ [PHY_PAGE] = PHY_ANYPAGE | PHY_RW,
[PHY_COPPER_CTRL1] = PHY_RW,
[PHY_COPPER_STAT1] = PHY_R,
@@ -2434,17 +2452,19 @@
core->mac[FCRTL] = val & 0x8000FFF8;
}
-static inline void
-e1000e_set_16bit(E1000ECore *core, int index, uint32_t val)
-{
- core->mac[index] = val & 0xffff;
-}
+#define E1000E_LOW_BITS_SET_FUNC(num) \
+ static void \
+ e1000e_set_##num##bit(E1000ECore *core, int index, uint32_t val) \
+ { \
+ core->mac[index] = val & (BIT(num) - 1); \
+ }
-static void
-e1000e_set_12bit(E1000ECore *core, int index, uint32_t val)
-{
- core->mac[index] = val & 0xfff;
-}
+E1000E_LOW_BITS_SET_FUNC(4)
+E1000E_LOW_BITS_SET_FUNC(6)
+E1000E_LOW_BITS_SET_FUNC(11)
+E1000E_LOW_BITS_SET_FUNC(12)
+E1000E_LOW_BITS_SET_FUNC(13)
+E1000E_LOW_BITS_SET_FUNC(16)
static void
e1000e_set_vet(E1000ECore *core, int index, uint32_t val)
@@ -2515,7 +2535,8 @@
}
icr = core->mac[ICR] & ~val;
- /* Windows driver expects that the "receive overrun" bit and other
+ /*
+ * Windows driver expects that the "receive overrun" bit and other
* ones to be cleared when the "Other" bit (#24) is cleared.
*/
icr = (val & E1000_ICR_OTHER) ? (icr & ~E1000_ICR_OTHER_CAUSES) : icr;
@@ -2614,27 +2635,11 @@
return core->mac[IMS];
}
-#define E1000E_LOW_BITS_READ_FUNC(num) \
- static uint32_t \
- e1000e_mac_low##num##_read(E1000ECore *core, int index) \
- { \
- return core->mac[index] & (BIT(num) - 1); \
- } \
-
-#define E1000E_LOW_BITS_READ(num) \
- e1000e_mac_low##num##_read
-
-E1000E_LOW_BITS_READ_FUNC(4);
-E1000E_LOW_BITS_READ_FUNC(6);
-E1000E_LOW_BITS_READ_FUNC(11);
-E1000E_LOW_BITS_READ_FUNC(13);
-E1000E_LOW_BITS_READ_FUNC(16);
-
static uint32_t
e1000e_mac_swsm_read(E1000ECore *core, int index)
{
uint32_t val = core->mac[SWSM];
- core->mac[SWSM] = val | 1;
+ core->mac[SWSM] = val | E1000_SWSM_SMBI;
return val;
}
@@ -2908,6 +2913,35 @@
core->mac[GCR] = (val & ~E1000_GCR_RO_BITS) | ro_bits;
}
+static uint32_t e1000e_get_systiml(E1000ECore *core, int index)
+{
+ e1000x_timestamp(core->mac, core->timadj, SYSTIML, SYSTIMH);
+ return core->mac[SYSTIML];
+}
+
+static uint32_t e1000e_get_rxsatrh(E1000ECore *core, int index)
+{
+ core->mac[TSYNCRXCTL] &= ~E1000_TSYNCRXCTL_VALID;
+ return core->mac[RXSATRH];
+}
+
+static uint32_t e1000e_get_txstmph(E1000ECore *core, int index)
+{
+ core->mac[TSYNCTXCTL] &= ~E1000_TSYNCTXCTL_VALID;
+ return core->mac[TXSTMPH];
+}
+
+static void e1000e_set_timinca(E1000ECore *core, int index, uint32_t val)
+{
+ e1000x_set_timinca(core->mac, &core->timadj, val);
+}
+
+static void e1000e_set_timadjh(E1000ECore *core, int index, uint32_t val)
+{
+ core->mac[TIMADJH] = val;
+ core->timadj += core->mac[TIMADJL] | ((int64_t)core->mac[TIMADJH] << 32);
+}
+
#define e1000e_getreg(x) [x] = e1000e_mac_readreg
typedef uint32_t (*readops)(E1000ECore *, int);
static const readops e1000e_macreg_readops[] = {
@@ -2923,7 +2957,19 @@
e1000e_getreg(LATECOL),
e1000e_getreg(SEQEC),
e1000e_getreg(XONTXC),
+ e1000e_getreg(AIT),
+ e1000e_getreg(TDFH),
+ e1000e_getreg(TDFT),
+ e1000e_getreg(TDFHS),
+ e1000e_getreg(TDFTS),
+ e1000e_getreg(TDFPC),
e1000e_getreg(WUS),
+ e1000e_getreg(PBS),
+ e1000e_getreg(RDFH),
+ e1000e_getreg(RDFT),
+ e1000e_getreg(RDFHS),
+ e1000e_getreg(RDFTS),
+ e1000e_getreg(RDFPC),
e1000e_getreg(GORCL),
e1000e_getreg(MGTPRC),
e1000e_getreg(EERD),
@@ -2951,7 +2997,6 @@
e1000e_getreg(GSCL_2),
e1000e_getreg(RDBAH1),
e1000e_getreg(FLSWDATA),
- e1000e_getreg(RXSATRH),
e1000e_getreg(TIPG),
e1000e_getreg(FLMNGCTL),
e1000e_getreg(FLMNGCNT),
@@ -2992,7 +3037,6 @@
e1000e_getreg(FLSWCTL),
e1000e_getreg(RXDCTL1),
e1000e_getreg(RXSATRL),
- e1000e_getreg(SYSTIML),
e1000e_getreg(RXUDP),
e1000e_getreg(TORL),
e1000e_getreg(TDLEN1),
@@ -3032,7 +3076,6 @@
e1000e_getreg(FLOL),
e1000e_getreg(RXDCTL),
e1000e_getreg(RXSTMPL),
- e1000e_getreg(TXSTMPH),
e1000e_getreg(TIMADJH),
e1000e_getreg(FCRTL),
e1000e_getreg(TDBAH),
@@ -3059,16 +3102,9 @@
[MPTC] = e1000e_mac_read_clr4,
[IAC] = e1000e_mac_read_clr4,
[ICR] = e1000e_mac_icr_read,
- [RDFH] = E1000E_LOW_BITS_READ(13),
- [RDFHS] = E1000E_LOW_BITS_READ(13),
- [RDFPC] = E1000E_LOW_BITS_READ(13),
- [TDFH] = E1000E_LOW_BITS_READ(13),
- [TDFHS] = E1000E_LOW_BITS_READ(13),
[STATUS] = e1000e_get_status,
[TARC0] = e1000e_get_tarc,
- [PBS] = E1000E_LOW_BITS_READ(6),
[ICS] = e1000e_mac_ics_read,
- [AIT] = E1000E_LOW_BITS_READ(16),
[TORH] = e1000e_mac_read_clr8,
[GORCH] = e1000e_mac_read_clr8,
[PRC127] = e1000e_mac_read_clr4,
@@ -3084,27 +3120,25 @@
[BPTC] = e1000e_mac_read_clr4,
[TSCTC] = e1000e_mac_read_clr4,
[ITR] = e1000e_mac_itr_read,
- [RDFT] = E1000E_LOW_BITS_READ(13),
- [RDFTS] = E1000E_LOW_BITS_READ(13),
- [TDFPC] = E1000E_LOW_BITS_READ(13),
- [TDFT] = E1000E_LOW_BITS_READ(13),
- [TDFTS] = E1000E_LOW_BITS_READ(13),
[CTRL] = e1000e_get_ctrl,
[TARC1] = e1000e_get_tarc,
[SWSM] = e1000e_mac_swsm_read,
[IMS] = e1000e_mac_ims_read,
+ [SYSTIML] = e1000e_get_systiml,
+ [RXSATRH] = e1000e_get_rxsatrh,
+ [TXSTMPH] = e1000e_get_txstmph,
[CRCERRS ... MPC] = e1000e_mac_readreg,
[IP6AT ... IP6AT + 3] = e1000e_mac_readreg,
[IP4AT ... IP4AT + 6] = e1000e_mac_readreg,
[RA ... RA + 31] = e1000e_mac_readreg,
[WUPM ... WUPM + 31] = e1000e_mac_readreg,
- [MTA ... MTA + 127] = e1000e_mac_readreg,
- [VFTA ... VFTA + 127] = e1000e_mac_readreg,
- [FFMT ... FFMT + 254] = E1000E_LOW_BITS_READ(4),
+ [MTA ... MTA + E1000_MC_TBL_SIZE - 1] = e1000e_mac_readreg,
+ [VFTA ... VFTA + E1000_VLAN_FILTER_TBL_SIZE - 1] = e1000e_mac_readreg,
+ [FFMT ... FFMT + 254] = e1000e_mac_readreg,
[FFVT ... FFVT + 254] = e1000e_mac_readreg,
[MDEF ... MDEF + 7] = e1000e_mac_readreg,
- [FFLT ... FFLT + 10] = E1000E_LOW_BITS_READ(11),
+ [FFLT ... FFLT + 10] = e1000e_mac_readreg,
[FTFT ... FTFT + 254] = e1000e_mac_readreg,
[PBM ... PBM + 10239] = e1000e_mac_readreg,
[RETA ... RETA + 31] = e1000e_mac_readreg,
@@ -3127,22 +3161,10 @@
e1000e_putreg(LEDCTL),
e1000e_putreg(FCAL),
e1000e_putreg(FCRUC),
- e1000e_putreg(AIT),
- e1000e_putreg(TDFH),
- e1000e_putreg(TDFT),
- e1000e_putreg(TDFHS),
- e1000e_putreg(TDFTS),
- e1000e_putreg(TDFPC),
e1000e_putreg(WUC),
e1000e_putreg(WUS),
- e1000e_putreg(RDFH),
- e1000e_putreg(RDFT),
- e1000e_putreg(RDFHS),
- e1000e_putreg(RDFTS),
- e1000e_putreg(RDFPC),
e1000e_putreg(IPAV),
e1000e_putreg(TDBAH1),
- e1000e_putreg(TIMINCA),
e1000e_putreg(IAM),
e1000e_putreg(EIAC),
e1000e_putreg(IVAR),
@@ -3150,7 +3172,6 @@
e1000e_putreg(TARC1),
e1000e_putreg(FLSWDATA),
e1000e_putreg(POEMB),
- e1000e_putreg(PBS),
e1000e_putreg(MFUTP01),
e1000e_putreg(MFUTP23),
e1000e_putreg(MANC),
@@ -3186,7 +3207,6 @@
e1000e_putreg(SYSTIML),
e1000e_putreg(SYSTIMH),
e1000e_putreg(TIMADJL),
- e1000e_putreg(TIMADJH),
e1000e_putreg(RXUDP),
e1000e_putreg(RXCFGL),
e1000e_putreg(TSYNCRXCTL),
@@ -3215,6 +3235,18 @@
[TADV] = e1000e_set_16bit,
[ITR] = e1000e_set_itr,
[EERD] = e1000e_set_eerd,
+ [AIT] = e1000e_set_16bit,
+ [TDFH] = e1000e_set_13bit,
+ [TDFT] = e1000e_set_13bit,
+ [TDFHS] = e1000e_set_13bit,
+ [TDFTS] = e1000e_set_13bit,
+ [TDFPC] = e1000e_set_13bit,
+ [RDFH] = e1000e_set_13bit,
+ [RDFHS] = e1000e_set_13bit,
+ [RDFT] = e1000e_set_13bit,
+ [RDFTS] = e1000e_set_13bit,
+ [RDFPC] = e1000e_set_13bit,
+ [PBS] = e1000e_set_6bit,
[GCR] = e1000e_set_gcr,
[PSRCTL] = e1000e_set_psrctl,
[RXCSUM] = e1000e_set_rxcsum,
@@ -3247,18 +3279,20 @@
[CTRL_DUP] = e1000e_set_ctrl,
[RFCTL] = e1000e_set_rfctl,
[RA + 1] = e1000e_mac_setmacaddr,
+ [TIMINCA] = e1000e_set_timinca,
+ [TIMADJH] = e1000e_set_timadjh,
[IP6AT ... IP6AT + 3] = e1000e_mac_writereg,
[IP4AT ... IP4AT + 6] = e1000e_mac_writereg,
[RA + 2 ... RA + 31] = e1000e_mac_writereg,
[WUPM ... WUPM + 31] = e1000e_mac_writereg,
- [MTA ... MTA + 127] = e1000e_mac_writereg,
- [VFTA ... VFTA + 127] = e1000e_mac_writereg,
- [FFMT ... FFMT + 254] = e1000e_mac_writereg,
+ [MTA ... MTA + E1000_MC_TBL_SIZE - 1] = e1000e_mac_writereg,
+ [VFTA ... VFTA + E1000_VLAN_FILTER_TBL_SIZE - 1] = e1000e_mac_writereg,
+ [FFMT ... FFMT + 254] = e1000e_set_4bit,
[FFVT ... FFVT + 254] = e1000e_mac_writereg,
[PBM ... PBM + 10239] = e1000e_mac_writereg,
[MDEF ... MDEF + 7] = e1000e_mac_writereg,
- [FFLT ... FFLT + 10] = e1000e_mac_writereg,
+ [FFLT ... FFLT + 10] = e1000e_set_11bit,
[FTFT ... FTFT + 254] = e1000e_mac_writereg,
[RETA ... RETA + 31] = e1000e_mac_writereg,
[RSSRK ... RSSRK + 31] = e1000e_mac_writereg,
@@ -3269,10 +3303,12 @@
enum { MAC_ACCESS_PARTIAL = 1 };
-/* The array below combines alias offsets of the index values for the
+/*
+ * The array below combines alias offsets of the index values for the
* MAC registers that have aliases, with the indication of not fully
* implemented registers (lowest bit). This combination is possible
- * because all of the offsets are even. */
+ * because all of the offsets are even.
+ */
static const uint16_t mac_reg_access[E1000E_MAC_SIZE] = {
/* Alias index offsets */
[FCRTL_A] = 0x07fe, [FCRTH_A] = 0x0802,
@@ -3281,7 +3317,7 @@
[TDH_A] = 0x0cf8, [TDT_A] = 0x0cf8, [TIDV_A] = 0x0cf8,
[TDFH_A] = 0xed00, [TDFT_A] = 0xed00,
[RA_A ... RA_A + 31] = 0x14f0,
- [VFTA_A ... VFTA_A + 127] = 0x1400,
+ [VFTA_A ... VFTA_A + E1000_VLAN_FILTER_TBL_SIZE - 1] = 0x1400,
[RDBAL0_A ... RDLEN0_A] = 0x09bc,
[TDBAL_A ... TDLEN_A] = 0x0cf8,
/* Access options */
@@ -3347,7 +3383,7 @@
e1000e_autoneg_resume(E1000ECore *core)
{
if (e1000e_have_autoneg(core) &&
- !(core->phy[0][PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
+ !(core->phy[0][MII_BMSR] & MII_BMSR_AN_COMP)) {
qemu_get_queue(core->owner_nic)->link_down = false;
timer_mod(core->autoneg_timer,
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
@@ -3386,11 +3422,10 @@
qemu_add_vm_change_state_handler(e1000e_vm_state_change, core);
for (i = 0; i < E1000E_NUM_QUEUES; i++) {
- net_tx_pkt_init(&core->tx[i].tx_pkt, core->owner,
- E1000E_MAX_TX_FRAGS, core->has_vnet);
+ net_tx_pkt_init(&core->tx[i].tx_pkt, core->owner, E1000E_MAX_TX_FRAGS);
}
- net_rx_pkt_init(&core->rx_pkt, core->has_vnet);
+ net_rx_pkt_init(&core->rx_pkt);
e1000x_core_prepare_eeprom(core->eeprom,
eeprom_templ,
@@ -3422,29 +3457,36 @@
static const uint16_t
e1000e_phy_reg_init[E1000E_PHY_PAGES][E1000E_PHY_PAGE_SIZE] = {
[0] = {
- [PHY_CTRL] = MII_CR_SPEED_SELECT_MSB |
- MII_CR_FULL_DUPLEX |
- MII_CR_AUTO_NEG_EN,
+ [MII_BMCR] = MII_BMCR_SPEED1000 |
+ MII_BMCR_FD |
+ MII_BMCR_AUTOEN,
- [PHY_STATUS] = MII_SR_EXTENDED_CAPS |
- MII_SR_LINK_STATUS |
- MII_SR_AUTONEG_CAPS |
- MII_SR_PREAMBLE_SUPPRESS |
- MII_SR_EXTENDED_STATUS |
- MII_SR_10T_HD_CAPS |
- MII_SR_10T_FD_CAPS |
- MII_SR_100X_HD_CAPS |
- MII_SR_100X_FD_CAPS,
+ [MII_BMSR] = MII_BMSR_EXTCAP |
+ MII_BMSR_LINK_ST |
+ MII_BMSR_AUTONEG |
+ MII_BMSR_MFPS |
+ MII_BMSR_EXTSTAT |
+ MII_BMSR_10T_HD |
+ MII_BMSR_10T_FD |
+ MII_BMSR_100TX_HD |
+ MII_BMSR_100TX_FD,
- [PHY_ID1] = 0x141,
- [PHY_ID2] = E1000_PHY_ID2_82574x,
- [PHY_AUTONEG_ADV] = 0xde1,
- [PHY_LP_ABILITY] = 0x7e0,
- [PHY_AUTONEG_EXP] = BIT(2),
- [PHY_NEXT_PAGE_TX] = BIT(0) | BIT(13),
- [PHY_1000T_CTRL] = BIT(8) | BIT(9) | BIT(10) | BIT(11),
- [PHY_1000T_STATUS] = 0x3c00,
- [PHY_EXT_STATUS] = BIT(12) | BIT(13),
+ [MII_PHYID1] = 0x141,
+ [MII_PHYID2] = E1000_PHY_ID2_82574x,
+ [MII_ANAR] = MII_ANAR_CSMACD | MII_ANAR_10 |
+ MII_ANAR_10FD | MII_ANAR_TX |
+ MII_ANAR_TXFD | MII_ANAR_PAUSE |
+ MII_ANAR_PAUSE_ASYM,
+ [MII_ANLPAR] = MII_ANLPAR_10 | MII_ANLPAR_10FD |
+ MII_ANLPAR_TX | MII_ANLPAR_TXFD |
+ MII_ANLPAR_T4 | MII_ANLPAR_PAUSE,
+ [MII_ANER] = MII_ANER_NP | MII_ANER_NWAY,
+ [MII_ANNP] = 1 | MII_ANNP_MP,
+ [MII_CTRL1000] = MII_CTRL1000_HALF | MII_CTRL1000_FULL |
+ MII_CTRL1000_PORT | MII_CTRL1000_MASTER,
+ [MII_STAT1000] = MII_STAT1000_HALF | MII_STAT1000_FULL |
+ MII_STAT1000_ROK | MII_STAT1000_LOK,
+ [MII_EXTSTAT] = MII_EXTSTAT_1000T_HD | MII_EXTSTAT_1000T_FD,
[PHY_COPPER_CTRL1] = BIT(5) | BIT(6) | BIT(8) | BIT(9) |
BIT(12) | BIT(13),
@@ -3501,8 +3543,7 @@
[EITR...EITR + E1000E_MSIX_VEC_NUM - 1] = E1000E_MIN_XITR,
};
-void
-e1000e_core_reset(E1000ECore *core)
+static void e1000e_reset(E1000ECore *core, bool sw)
{
int i;
@@ -3511,9 +3552,16 @@
e1000e_intrmgr_reset(core);
memset(core->phy, 0, sizeof core->phy);
- memmove(core->phy, e1000e_phy_reg_init, sizeof e1000e_phy_reg_init);
- memset(core->mac, 0, sizeof core->mac);
- memmove(core->mac, e1000e_mac_reg_init, sizeof e1000e_mac_reg_init);
+ memcpy(core->phy, e1000e_phy_reg_init, sizeof e1000e_phy_reg_init);
+
+ for (i = 0; i < E1000E_MAC_SIZE; i++) {
+ if (sw && (i == PBA || i == PBS || i == FLA)) {
+ continue;
+ }
+
+ core->mac[i] = i < ARRAY_SIZE(e1000e_mac_reg_init) ?
+ e1000e_mac_reg_init[i] : 0;
+ }
core->rxbuf_min_shift = 1 + E1000_RING_DESC_LEN_SHIFT;
@@ -3530,18 +3578,24 @@
}
}
+void
+e1000e_core_reset(E1000ECore *core)
+{
+ e1000e_reset(core, false);
+}
+
void e1000e_core_pre_save(E1000ECore *core)
{
int i;
NetClientState *nc = qemu_get_queue(core->owner_nic);
/*
- * If link is down and auto-negotiation is supported and ongoing,
- * complete auto-negotiation immediately. This allows us to look
- * at MII_SR_AUTONEG_COMPLETE to infer link status on load.
- */
+ * If link is down and auto-negotiation is supported and ongoing,
+ * complete auto-negotiation immediately. This allows us to look
+ * at MII_BMSR_AN_COMP to infer link status on load.
+ */
if (nc->link_down && e1000e_have_autoneg(core)) {
- core->phy[0][PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
+ core->phy[0][MII_BMSR] |= MII_BMSR_AN_COMP;
e1000e_update_flowctl_status(core);
}
@@ -3557,7 +3611,8 @@
{
NetClientState *nc = qemu_get_queue(core->owner_nic);
- /* nc.link_down can't be migrated, so infer link_down according
+ /*
+ * nc.link_down can't be migrated, so infer link_down according
* to link status bit in core.mac[STATUS].
*/
nc->link_down = (core->mac[STATUS] & E1000_STATUS_LU) == 0;
diff --git a/hw/net/e1000e_core.h b/hw/net/e1000e_core.h
index 4ddb4d2..213a705 100644
--- a/hw/net/e1000e_core.h
+++ b/hw/net/e1000e_core.h
@@ -1,37 +1,37 @@
/*
-* Core code for QEMU e1000e emulation
-*
-* Software developer's manuals:
-* http://www.intel.com/content/dam/doc/datasheet/82574l-gbe-controller-datasheet.pdf
-*
-* Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com)
-* Developed by Daynix Computing LTD (http://www.daynix.com)
-*
-* Authors:
-* Dmitry Fleytman <dmitry@daynix.com>
-* Leonid Bloch <leonid@daynix.com>
-* Yan Vugenfirer <yan@daynix.com>
-*
-* Based on work done by:
-* Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
-* Copyright (c) 2008 Qumranet
-* Based on work done by:
-* Copyright (c) 2007 Dan Aloni
-* Copyright (c) 2004 Antony T Curtis
-*
-* This library is free software; you can redistribute it and/or
-* modify it under the terms of the GNU Lesser General Public
-* License as published by the Free Software Foundation; either
-* version 2.1 of the License, or (at your option) any later version.
-*
-* This library is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-* Lesser General Public License for more details.
-*
-* You should have received a copy of the GNU Lesser General Public
-* License along with this library; if not, see <http://www.gnu.org/licenses/>.
-*/
+ * Core code for QEMU e1000e emulation
+ *
+ * Software developer's manuals:
+ * http://www.intel.com/content/dam/doc/datasheet/82574l-gbe-controller-datasheet.pdf
+ *
+ * Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com)
+ * Developed by Daynix Computing LTD (http://www.daynix.com)
+ *
+ * Authors:
+ * Dmitry Fleytman <dmitry@daynix.com>
+ * Leonid Bloch <leonid@daynix.com>
+ * Yan Vugenfirer <yan@daynix.com>
+ *
+ * Based on work done by:
+ * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
+ * Copyright (c) 2008 Qumranet
+ * Based on work done by:
+ * Copyright (c) 2007 Dan Aloni
+ * Copyright (c) 2004 Antony T Curtis
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
#ifndef HW_NET_E1000E_CORE_H
#define HW_NET_E1000E_CORE_H
@@ -95,10 +95,8 @@
E1000IntrDelayTimer tidv;
E1000IntrDelayTimer itr;
- bool itr_intr_pending;
E1000IntrDelayTimer eitr[E1000E_MSIX_VEC_NUM];
- bool eitr_intr_pending[E1000E_MSIX_VEC_NUM];
VMChangeStateEntry *vmstate;
@@ -114,6 +112,8 @@
void (*owner_start_recv)(PCIDevice *d);
uint32_t msi_causes_pending;
+
+ int64_t timadj;
};
void
diff --git a/hw/net/e1000x_common.c b/hw/net/e1000x_common.c
index 2f43e8c..b844af5 100644
--- a/hw/net/e1000x_common.c
+++ b/hw/net/e1000x_common.c
@@ -24,9 +24,12 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
+#include "hw/net/mii.h"
#include "hw/pci/pci_device.h"
+#include "net/eth.h"
#include "net/net.h"
+#include "e1000_common.h"
#include "e1000x_common.h"
#include "trace.h"
@@ -45,9 +48,9 @@
return true;
}
-bool e1000x_is_vlan_packet(const uint8_t *buf, uint16_t vet)
+bool e1000x_is_vlan_packet(const void *buf, uint16_t vet)
{
- uint16_t eth_proto = lduw_be_p(buf + 12);
+ uint16_t eth_proto = lduw_be_p(&PKT_GET_ETH_HDR(buf)->h_proto);
bool res = (eth_proto == vet);
trace_e1000x_vlan_is_vlan_pkt(res, eth_proto, vet);
@@ -66,7 +69,7 @@
}
ra[0] = cpu_to_le32(rp[0]);
ra[1] = cpu_to_le32(rp[1]);
- if (!memcmp(buf, (uint8_t *)ra, 6)) {
+ if (!memcmp(buf, (uint8_t *)ra, ETH_ALEN)) {
trace_e1000x_rx_flt_ucast_match((int)(rp - mac - RA) / 2,
MAC_ARG(buf));
return true;
@@ -152,8 +155,8 @@
void e1000x_update_regs_on_autoneg_done(uint32_t *mac, uint16_t *phy)
{
e1000x_update_regs_on_link_up(mac, phy);
- phy[PHY_LP_ABILITY] |= MII_LPAR_LPACK;
- phy[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
+ phy[MII_ANLPAR] |= MII_ANLPAR_ACK;
+ phy[MII_BMSR] |= MII_BMSR_AN_COMP;
trace_e1000x_link_negotiation_done();
}
@@ -265,3 +268,28 @@
props->tcp = (op & E1000_TXD_CMD_TCP) ? 1 : 0;
props->tse = (op & E1000_TXD_CMD_TSE) ? 1 : 0;
}
+
+void e1000x_timestamp(uint32_t *mac, int64_t timadj, size_t lo, size_t hi)
+{
+ int64_t ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ uint32_t timinca = mac[TIMINCA];
+ uint32_t incvalue = timinca & E1000_TIMINCA_INCVALUE_MASK;
+ uint32_t incperiod = MAX(timinca >> E1000_TIMINCA_INCPERIOD_SHIFT, 1);
+ int64_t timestamp = timadj + muldiv64(ns, incvalue, incperiod * 16);
+
+ mac[lo] = timestamp & 0xffffffff;
+ mac[hi] = timestamp >> 32;
+}
+
+void e1000x_set_timinca(uint32_t *mac, int64_t *timadj, uint32_t val)
+{
+ int64_t ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ uint32_t old_val = mac[TIMINCA];
+ uint32_t old_incvalue = old_val & E1000_TIMINCA_INCVALUE_MASK;
+ uint32_t old_incperiod = MAX(old_val >> E1000_TIMINCA_INCPERIOD_SHIFT, 1);
+ uint32_t incvalue = val & E1000_TIMINCA_INCVALUE_MASK;
+ uint32_t incperiod = MAX(val >> E1000_TIMINCA_INCPERIOD_SHIFT, 1);
+
+ mac[TIMINCA] = val;
+ *timadj += (muldiv64(ns, incvalue, incperiod) - muldiv64(ns, old_incvalue, old_incperiod)) / 16;
+}
diff --git a/hw/net/e1000x_common.h b/hw/net/e1000x_common.h
index b774277..911abd8 100644
--- a/hw/net/e1000x_common.h
+++ b/hw/net/e1000x_common.h
@@ -1,108 +1,34 @@
/*
-* QEMU e1000(e) emulation - shared code
-*
-* Copyright (c) 2008 Qumranet
-*
-* Based on work done by:
-* Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
-* Copyright (c) 2007 Dan Aloni
-* Copyright (c) 2004 Antony T Curtis
-*
-* This library is free software; you can redistribute it and/or
-* modify it under the terms of the GNU Lesser General Public
-* License as published by the Free Software Foundation; either
-* version 2.1 of the License, or (at your option) any later version.
-*
-* This library is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-* Lesser General Public License for more details.
-*
-* You should have received a copy of the GNU Lesser General Public
-* License along with this library; if not, see <http://www.gnu.org/licenses/>.
-*/
+ * QEMU e1000(e) emulation - shared code
+ *
+ * Copyright (c) 2008 Qumranet
+ *
+ * Based on work done by:
+ * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
+ * Copyright (c) 2007 Dan Aloni
+ * Copyright (c) 2004 Antony T Curtis
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
#ifndef HW_NET_E1000X_COMMON_H
#define HW_NET_E1000X_COMMON_H
-#include "e1000_regs.h"
-
-#define defreg(x) x = (E1000_##x >> 2)
-enum {
- defreg(CTRL), defreg(EECD), defreg(EERD), defreg(GPRC),
- defreg(GPTC), defreg(ICR), defreg(ICS), defreg(IMC),
- defreg(IMS), defreg(LEDCTL), defreg(MANC), defreg(MDIC),
- defreg(MPC), defreg(PBA), defreg(RCTL), defreg(RDBAH0),
- defreg(RDBAL0), defreg(RDH0), defreg(RDLEN0), defreg(RDT0),
- defreg(STATUS), defreg(SWSM), defreg(TCTL), defreg(TDBAH),
- defreg(TDBAL), defreg(TDH), defreg(TDLEN), defreg(TDT),
- defreg(TDLEN1), defreg(TDBAL1), defreg(TDBAH1), defreg(TDH1),
- defreg(TDT1), defreg(TORH), defreg(TORL), defreg(TOTH),
- defreg(TOTL), defreg(TPR), defreg(TPT), defreg(TXDCTL),
- defreg(WUFC), defreg(RA), defreg(MTA), defreg(CRCERRS),
- defreg(VFTA), defreg(VET), defreg(RDTR), defreg(RADV),
- defreg(TADV), defreg(ITR), defreg(SCC), defreg(ECOL),
- defreg(MCC), defreg(LATECOL), defreg(COLC), defreg(DC),
- defreg(TNCRS), defreg(SEQEC), defreg(CEXTERR), defreg(RLEC),
- defreg(XONRXC), defreg(XONTXC), defreg(XOFFRXC), defreg(XOFFTXC),
- defreg(FCRUC), defreg(AIT), defreg(TDFH), defreg(TDFT),
- defreg(TDFHS), defreg(TDFTS), defreg(TDFPC), defreg(WUC),
- defreg(WUS), defreg(POEMB), defreg(PBS), defreg(RDFH),
- defreg(RDFT), defreg(RDFHS), defreg(RDFTS), defreg(RDFPC),
- defreg(PBM), defreg(IPAV), defreg(IP4AT), defreg(IP6AT),
- defreg(WUPM), defreg(FFLT), defreg(FFMT), defreg(FFVT),
- defreg(TARC0), defreg(TARC1), defreg(IAM), defreg(EXTCNF_CTRL),
- defreg(GCR), defreg(TIMINCA), defreg(EIAC), defreg(CTRL_EXT),
- defreg(IVAR), defreg(MFUTP01), defreg(MFUTP23), defreg(MANC2H),
- defreg(MFVAL), defreg(MDEF), defreg(FACTPS), defreg(FTFT),
- defreg(RUC), defreg(ROC), defreg(RFC), defreg(RJC),
- defreg(PRC64), defreg(PRC127), defreg(PRC255), defreg(PRC511),
- defreg(PRC1023), defreg(PRC1522), defreg(PTC64), defreg(PTC127),
- defreg(PTC255), defreg(PTC511), defreg(PTC1023), defreg(PTC1522),
- defreg(GORCL), defreg(GORCH), defreg(GOTCL), defreg(GOTCH),
- defreg(RNBC), defreg(BPRC), defreg(MPRC), defreg(RFCTL),
- defreg(PSRCTL), defreg(MPTC), defreg(BPTC), defreg(TSCTFC),
- defreg(IAC), defreg(MGTPRC), defreg(MGTPDC), defreg(MGTPTC),
- defreg(TSCTC), defreg(RXCSUM), defreg(FUNCTAG), defreg(GSCL_1),
- defreg(GSCL_2), defreg(GSCL_3), defreg(GSCL_4), defreg(GSCN_0),
- defreg(GSCN_1), defreg(GSCN_2), defreg(GSCN_3), defreg(GCR2),
- defreg(RAID), defreg(RSRPD), defreg(TIDV), defreg(EITR),
- defreg(MRQC), defreg(RETA), defreg(RSSRK), defreg(RDBAH1),
- defreg(RDBAL1), defreg(RDLEN1), defreg(RDH1), defreg(RDT1),
- defreg(PBACLR), defreg(FCAL), defreg(FCAH), defreg(FCT),
- defreg(FCRTH), defreg(FCRTL), defreg(FCTTV), defreg(FCRTV),
- defreg(FLA), defreg(EEWR), defreg(FLOP), defreg(FLOL),
- defreg(FLSWCTL), defreg(FLSWCNT), defreg(RXDCTL), defreg(RXDCTL1),
- defreg(MAVTV0), defreg(MAVTV1), defreg(MAVTV2), defreg(MAVTV3),
- defreg(TXSTMPL), defreg(TXSTMPH), defreg(SYSTIML), defreg(SYSTIMH),
- defreg(RXCFGL), defreg(RXUDP), defreg(TIMADJL), defreg(TIMADJH),
- defreg(RXSTMPH), defreg(RXSTMPL), defreg(RXSATRL), defreg(RXSATRH),
- defreg(FLASHT), defreg(TIPG), defreg(RDH), defreg(RDT),
- defreg(RDLEN), defreg(RDBAH), defreg(RDBAL),
- defreg(TXDCTL1),
- defreg(FLSWDATA),
- defreg(CTRL_DUP),
- defreg(EXTCNF_SIZE),
- defreg(EEMNGCTL),
- defreg(EEMNGDATA),
- defreg(FLMNGCTL),
- defreg(FLMNGDATA),
- defreg(FLMNGCNT),
- defreg(TSYNCRXCTL),
- defreg(TSYNCTXCTL),
-
- /* Aliases */
- defreg(RDH0_A), defreg(RDT0_A), defreg(RDTR_A), defreg(RDFH_A),
- defreg(RDFT_A), defreg(TDH_A), defreg(TDT_A), defreg(TIDV_A),
- defreg(TDFH_A), defreg(TDFT_A), defreg(RA_A), defreg(RDBAL0_A),
- defreg(TDBAL_A), defreg(TDLEN_A), defreg(VFTA_A), defreg(RDLEN0_A),
- defreg(FCRTL_A), defreg(FCRTH_A)
-};
-
static inline void
e1000x_inc_reg_if_not_full(uint32_t *mac, int index)
{
- if (mac[index] != 0xffffffff) {
+ if (mac[index] != UINT32_MAX) {
mac[index]++;
}
}
@@ -152,16 +78,16 @@
e1000x_update_regs_on_link_down(uint32_t *mac, uint16_t *phy)
{
mac[STATUS] &= ~E1000_STATUS_LU;
- phy[PHY_STATUS] &= ~MII_SR_LINK_STATUS;
- phy[PHY_STATUS] &= ~MII_SR_AUTONEG_COMPLETE;
- phy[PHY_LP_ABILITY] &= ~MII_LPAR_LPACK;
+ phy[MII_BMSR] &= ~MII_BMSR_LINK_ST;
+ phy[MII_BMSR] &= ~MII_BMSR_AN_COMP;
+ phy[MII_ANLPAR] &= ~MII_ANLPAR_ACK;
}
static inline void
e1000x_update_regs_on_link_up(uint32_t *mac, uint16_t *phy)
{
mac[STATUS] |= E1000_STATUS_LU;
- phy[PHY_STATUS] |= MII_SR_LINK_STATUS;
+ phy[MII_BMSR] |= MII_BMSR_LINK_ST;
}
void e1000x_update_rx_total_stats(uint32_t *mac,
@@ -178,7 +104,7 @@
bool e1000x_rx_ready(PCIDevice *d, uint32_t *mac);
-bool e1000x_is_vlan_packet(const uint8_t *buf, uint16_t vet);
+bool e1000x_is_vlan_packet(const void *buf, uint16_t vet);
bool e1000x_rx_group_filter(uint32_t *mac, const uint8_t *buf);
@@ -213,4 +139,7 @@
void e1000x_read_tx_ctx_descr(struct e1000_context_desc *d,
e1000x_txd_props *props);
+void e1000x_timestamp(uint32_t *mac, int64_t timadj, size_t lo, size_t hi);
+void e1000x_set_timinca(uint32_t *mac, int64_t *timadj, uint32_t val);
+
#endif
diff --git a/hw/net/e1000x_regs.h b/hw/net/e1000x_regs.h
new file mode 100644
index 0000000..c0832fa
--- /dev/null
+++ b/hw/net/e1000x_regs.h
@@ -0,0 +1,967 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2006 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, see <http://www.gnu.org/licenses/>.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* e1000_hw.h
+ * Structures, enums, and macros for the MAC
+ */
+
+#ifndef HW_E1000X_REGS_H
+#define HW_E1000X_REGS_H
+
+/* PCI Device IDs */
+#define E1000_DEV_ID_82542 0x1000
+#define E1000_DEV_ID_82543GC_FIBER 0x1001
+#define E1000_DEV_ID_82543GC_COPPER 0x1004
+#define E1000_DEV_ID_82544EI_COPPER 0x1008
+#define E1000_DEV_ID_82544EI_FIBER 0x1009
+#define E1000_DEV_ID_82544GC_COPPER 0x100C
+#define E1000_DEV_ID_82544GC_LOM 0x100D
+#define E1000_DEV_ID_82540EM 0x100E
+#define E1000_DEV_ID_82540EM_LOM 0x1015
+#define E1000_DEV_ID_82540EP_LOM 0x1016
+#define E1000_DEV_ID_82540EP 0x1017
+#define E1000_DEV_ID_82540EP_LP 0x101E
+#define E1000_DEV_ID_82545EM_COPPER 0x100F
+#define E1000_DEV_ID_82545EM_FIBER 0x1011
+#define E1000_DEV_ID_82545GM_COPPER 0x1026
+#define E1000_DEV_ID_82545GM_FIBER 0x1027
+#define E1000_DEV_ID_82545GM_SERDES 0x1028
+#define E1000_DEV_ID_82546EB_COPPER 0x1010
+#define E1000_DEV_ID_82546EB_FIBER 0x1012
+#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D
+#define E1000_DEV_ID_82541EI 0x1013
+#define E1000_DEV_ID_82541EI_MOBILE 0x1018
+#define E1000_DEV_ID_82541ER_LOM 0x1014
+#define E1000_DEV_ID_82541ER 0x1078
+#define E1000_DEV_ID_82547GI 0x1075
+#define E1000_DEV_ID_82541GI 0x1076
+#define E1000_DEV_ID_82541GI_MOBILE 0x1077
+#define E1000_DEV_ID_82541GI_LF 0x107C
+#define E1000_DEV_ID_82546GB_COPPER 0x1079
+#define E1000_DEV_ID_82546GB_FIBER 0x107A
+#define E1000_DEV_ID_82546GB_SERDES 0x107B
+#define E1000_DEV_ID_82546GB_PCIE 0x108A
+#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
+#define E1000_DEV_ID_82547EI 0x1019
+#define E1000_DEV_ID_82547EI_MOBILE 0x101A
+#define E1000_DEV_ID_82571EB_COPPER 0x105E
+#define E1000_DEV_ID_82571EB_FIBER 0x105F
+#define E1000_DEV_ID_82571EB_SERDES 0x1060
+#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
+#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5
+#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5
+#define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE 0x10BC
+#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9
+#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA
+#define E1000_DEV_ID_82572EI_COPPER 0x107D
+#define E1000_DEV_ID_82572EI_FIBER 0x107E
+#define E1000_DEV_ID_82572EI_SERDES 0x107F
+#define E1000_DEV_ID_82572EI 0x10B9
+#define E1000_DEV_ID_82573E 0x108B
+#define E1000_DEV_ID_82573E_IAMT 0x108C
+#define E1000_DEV_ID_82573L 0x109A
+#define E1000_DEV_ID_82574L 0x10D3
+#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
+#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096
+#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098
+#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA
+#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB
+
+#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049
+#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A
+#define E1000_DEV_ID_ICH8_IGP_C 0x104B
+#define E1000_DEV_ID_ICH8_IFE 0x104C
+#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4
+#define E1000_DEV_ID_ICH8_IFE_G 0x10C5
+#define E1000_DEV_ID_ICH8_IGP_M 0x104D
+
+/* Device Specific Register Defaults */
+#define E1000_PHY_ID2_82541x 0x380
+#define E1000_PHY_ID2_82544x 0xC30
+#define E1000_PHY_ID2_8254xx_DEFAULT 0xC20 /* 82540x, 82545x, and 82546x */
+#define E1000_PHY_ID2_82573x 0xCC0
+#define E1000_PHY_ID2_82574x 0xCB1
+
+/* Register Set. (82543, 82544)
+ *
+ * Registers are defined to be 32 bits and should be accessed as 32 bit values.
+ * These registers are physically located on the NIC, but are mapped into the
+ * host memory address space.
+ *
+ * RW - register is both readable and writable
+ * RO - register is read only
+ * WO - register is write only
+ * R/clr - register is read only and is cleared when read
+ * A - register array
+ */
+#define E1000_CTRL 0x00000 /* Device Control - RW */
+#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */
+#define E1000_STATUS 0x00008 /* Device Status - RO */
+#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
+#define E1000_EERD 0x00014 /* EEPROM Read - RW */
+#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
+#define E1000_FLA 0x0001C /* Flash Access - RW */
+#define E1000_MDIC 0x00020 /* MDI Control - RW */
+#define E1000_SCTL 0x00024 /* SerDes Control - RW */
+#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
+#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
+#define E1000_FCT 0x00030 /* Flow Control Type - RW */
+#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
+#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
+#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
+#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
+#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
+#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
+#define E1000_RCTL 0x00100 /* RX Control - RW */
+#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
+#define E1000_TCTL 0x00400 /* TX Control - RW */
+#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */
+#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */
+#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
+#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
+#define E1000_EEMNGDATA 0x01014 /* MNG EEPROM Read/Write data */
+#define E1000_FLMNGCTL 0x01018 /* MNG Flash Control */
+#define E1000_FLMNGDATA 0x0101C /* MNG FLASH Read data */
+#define E1000_FLMNGCNT 0x01020 /* MNG FLASH Read Counter */
+#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */
+#define E1000_FLOP 0x0103C /* FLASH Opcode Register */
+#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
+#define E1000_FCRTL_A 0x00168 /* Alias to FCRTL */
+#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
+#define E1000_RDFH 0x02410 /* Receive Data FIFO Head Register - RW */
+#define E1000_RDFH_A 0x08000 /* Alias to RDFH */
+#define E1000_RDFT 0x02418 /* Receive Data FIFO Tail Register - RW */
+#define E1000_RDFT_A 0x08008 /* Alias to RDFT */
+#define E1000_RDFHS 0x02420 /* Receive Data FIFO Head Saved Register - RW */
+#define E1000_RDFTS 0x02428 /* Receive Data FIFO Tail Saved Register - RW */
+#define E1000_RDFPC 0x02430 /* Receive Data FIFO Packet Count - RW */
+#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */
+#define E1000_TDFH_A 0x08010 /* Alias to TDFH */
+#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */
+#define E1000_TDFT_A 0x08018 /* Alias to TDFT */
+#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */
+#define E1000_TDFTS 0x03428 /* TX Data FIFO Tail Saved - RW */
+#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */
+#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
+#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
+#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
+#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */
+#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */
+#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */
+#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */
+#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */
+#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */
+#define E1000_COLC 0x04028 /* Collision Count - R/clr */
+#define E1000_DC 0x04030 /* Defer Count - R/clr */
+#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */
+#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */
+#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */
+#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */
+#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */
+#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */
+#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */
+#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */
+#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */
+#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */
+#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */
+#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */
+#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */
+#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */
+#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */
+#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */
+#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */
+#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */
+#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */
+#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */
+#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */
+#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */
+#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */
+#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */
+#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */
+#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */
+#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */
+#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
+#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */
+#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */
+#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */
+#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */
+#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */
+#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */
+#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */
+#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */
+#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */
+#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */
+#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */
+#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */
+#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */
+#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */
+#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */
+#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */
+#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
+#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */
+#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */
+#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */
+#define E1000_RFCTL 0x05008 /* Receive Filter Control*/
+#define E1000_MAVTV0 0x05010 /* Management VLAN TAG Value 0 */
+#define E1000_MAVTV1 0x05014 /* Management VLAN TAG Value 1 */
+#define E1000_MAVTV2 0x05018 /* Management VLAN TAG Value 2 */
+#define E1000_MAVTV3 0x0501c /* Management VLAN TAG Value 3 */
+#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
+#define E1000_RA 0x05400 /* Receive Address - RW Array */
+#define E1000_RA_A 0x00040 /* Alias to RA */
+#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
+#define E1000_VFTA_A 0x00600 /* Alias to VFTA */
+#define E1000_WUC 0x05800 /* Wakeup Control - RW */
+#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
+#define E1000_WUS 0x05810 /* Wakeup Status - RO */
+#define E1000_MANC 0x05820 /* Management Control - RW */
+#define E1000_IPAV 0x05838 /* IP Address Valid - RW */
+#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */
+#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */
+#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */
+#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */
+#define E1000_MFVAL 0x05824 /* Manageability Filters Valid - RW */
+#define E1000_MDEF 0x05890 /* Manageability Decision Filters - RW Array */
+#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */
+#define E1000_FTFT 0x09400 /* Flexible TCO Filter Table - RW Array */
+
+#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */
+#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */
+
+#define E1000_GCR 0x05B00 /* PCI-Ex Control */
+#define E1000_FUNCTAG 0x05B08 /* Function-Tag Register */
+#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */
+#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */
+#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */
+#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */
+#define E1000_GSCN_0 0x05B20 /* 3GIO Statistic Counter Register #0 */
+#define E1000_GSCN_1 0x05B24 /* 3GIO Statistic Counter Register #1 */
+#define E1000_GSCN_2 0x05B28 /* 3GIO Statistic Counter Register #2 */
+#define E1000_GSCN_3 0x05B2C /* 3GIO Statistic Counter Register #3 */
+#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
+#define E1000_SWSM 0x05B50 /* SW Semaphore */
+#define E1000_FWSM 0x05B54 /* FW Semaphore */
+#define E1000_PBACLR 0x05B68 /* MSI-X PBA Clear */
+
+#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
+#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
+#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
+#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */
+#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */
+#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
+#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
+#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */
+#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
+#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
+#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */
+#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */
+#define E1000_TIMADJL 0x0B60C /* Time Adjustment Offset register Low - RW */
+#define E1000_TIMADJH 0x0B610 /* Time Adjustment Offset register High - RW */
+
+/* RSS registers */
+#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
+#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */
+#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */
+
+#define E1000_RETA_IDX(hash) ((hash) & (BIT(7) - 1))
+#define E1000_RETA_VAL(reta, hash) (((uint8_t *)(reta))[E1000_RETA_IDX(hash)])
+
+#define E1000_MRQC_EN_TCPIPV4(mrqc) ((mrqc) & BIT(16))
+#define E1000_MRQC_EN_IPV4(mrqc) ((mrqc) & BIT(17))
+#define E1000_MRQC_EN_TCPIPV6(mrqc) ((mrqc) & BIT(18))
+#define E1000_MRQC_EN_IPV6EX(mrqc) ((mrqc) & BIT(19))
+#define E1000_MRQC_EN_IPV6(mrqc) ((mrqc) & BIT(20))
+
+#define E1000_MRQ_RSS_TYPE_NONE (0)
+#define E1000_MRQ_RSS_TYPE_IPV4TCP (1)
+#define E1000_MRQ_RSS_TYPE_IPV4 (2)
+#define E1000_MRQ_RSS_TYPE_IPV6TCP (3)
+#define E1000_MRQ_RSS_TYPE_IPV6EX (4)
+#define E1000_MRQ_RSS_TYPE_IPV6 (5)
+
+#define E1000_ICR_ASSERTED BIT(31)
+#define E1000_EIAC_MASK 0x01F00000
+
+/* RFCTL register bits */
+#define E1000_RFCTL_ISCSI_DIS 0x00000001
+#define E1000_RFCTL_NFSW_DIS 0x00000040
+#define E1000_RFCTL_NFSR_DIS 0x00000080
+#define E1000_RFCTL_IPV6_DIS 0x00000400
+#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800
+#define E1000_RFCTL_IPFRSP_DIS 0x00004000
+#define E1000_RFCTL_EXTEN 0x00008000
+#define E1000_RFCTL_IPV6_EX_DIS 0x00010000
+#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
+
+/* TARC* parsing */
+#define E1000_TARC_ENABLE BIT(10)
+
+/* SW Semaphore Register */
+#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
+#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
+#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */
+
+#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */
+
+/* Interrupt Cause Read */
+#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
+#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */
+#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
+#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */
+#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */
+#define E1000_ICR_RXO 0x00000040 /* rx overrun */
+#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */
+#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */
+#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */
+#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */
+#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */
+#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */
+#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */
+#define E1000_ICR_TXD_LOW 0x00008000
+#define E1000_ICR_SRPD 0x00010000
+#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */
+#define E1000_ICR_MNG 0x00040000 /* Manageability event */
+#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */
+#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
+#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */
+#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */
+#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */
+#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */
+#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */
+#define E1000_ICR_EPRST 0x00100000 /* ME handware reset occurs */
+#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */
+#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
+#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */
+#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */
+#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */
+
+#define E1000_ICR_OTHER_CAUSES (E1000_ICR_LSC | \
+ E1000_ICR_RXO | \
+ E1000_ICR_MDAC | \
+ E1000_ICR_SRPD | \
+ E1000_ICR_ACK | \
+ E1000_ICR_MNG)
+
+/* Interrupt Cause Set */
+#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
+#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
+#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
+#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
+#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */
+#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
+#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */
+#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
+#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
+#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
+#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
+#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
+#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW
+#define E1000_ICS_SRPD E1000_ICR_SRPD
+#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */
+#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */
+#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */
+#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
+#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
+#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_ICS_DSW E1000_ICR_DSW
+#define E1000_ICS_PHYINT E1000_ICR_PHYINT
+#define E1000_ICS_EPRST E1000_ICR_EPRST
+
+/* Interrupt Mask Set */
+#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
+#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
+#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
+#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
+#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */
+#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
+#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */
+#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
+#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
+#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
+#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
+#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
+#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW
+#define E1000_IMS_SRPD E1000_ICR_SRPD
+#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */
+#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */
+#define E1000_IMS_RXQ0 E1000_ICR_RXQ0
+#define E1000_IMS_RXQ1 E1000_ICR_RXQ1
+#define E1000_IMS_TXQ0 E1000_ICR_TXQ0
+#define E1000_IMS_TXQ1 E1000_ICR_TXQ1
+#define E1000_IMS_OTHER E1000_ICR_OTHER
+#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */
+#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
+#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
+#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_IMS_DSW E1000_ICR_DSW
+#define E1000_IMS_PHYINT E1000_ICR_PHYINT
+#define E1000_IMS_EPRST E1000_ICR_EPRST
+
+/* Interrupt Mask Clear */
+#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */
+#define E1000_IMC_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
+#define E1000_IMC_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_IMC_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
+#define E1000_IMC_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
+#define E1000_IMC_RXO E1000_ICR_RXO /* rx overrun */
+#define E1000_IMC_RXT0 E1000_ICR_RXT0 /* rx timer intr */
+#define E1000_IMC_MDAC E1000_ICR_MDAC /* MDIO access complete */
+#define E1000_IMC_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
+#define E1000_IMC_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
+#define E1000_IMC_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
+#define E1000_IMC_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
+#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
+#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW
+#define E1000_IMC_SRPD E1000_ICR_SRPD
+#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */
+#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */
+#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */
+#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_IMC_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
+#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
+#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_IMC_DSW E1000_ICR_DSW
+#define E1000_IMC_PHYINT E1000_ICR_PHYINT
+#define E1000_IMC_EPRST E1000_ICR_EPRST
+
+/* Receive Control */
+#define E1000_RCTL_RST 0x00000001 /* Software reset */
+#define E1000_RCTL_EN 0x00000002 /* enable */
+#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
+#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */
+#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */
+#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
+#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */
+#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
+#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */
+#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
+#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */
+#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
+#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */
+#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */
+#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */
+#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
+#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */
+#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */
+#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */
+#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
+#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */
+#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
+#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */
+#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */
+#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */
+#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
+#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */
+#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */
+#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */
+#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
+#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
+#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */
+#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */
+#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
+#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
+#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
+#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */
+#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */
+
+
+#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */
+#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */
+#define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */
+#define E1000_EEPROM_RW_REG_DONE 0x10 /* Offset to READ/WRITE done bit */
+#define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */
+#define E1000_EEPROM_RW_ADDR_SHIFT 8 /* Shift to the address bits */
+#define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */
+#define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */
+
+/* 82574 EERD/EEWR registers layout */
+#define E1000_EERW_START BIT(0)
+#define E1000_EERW_DONE BIT(1)
+#define E1000_EERW_ADDR_SHIFT 2
+#define E1000_EERW_ADDR_MASK ((1L << 14) - 1)
+#define E1000_EERW_DATA_SHIFT 16
+#define E1000_EERW_DATA_MASK ((1L << 16) - 1)
+
+/* Register Bit Masks */
+/* Device Control */
+#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
+#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */
+#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */
+#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
+#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
+#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */
+#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */
+#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
+#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
+#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
+#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */
+#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */
+#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */
+#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
+#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */
+#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
+#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
+#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */
+#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */
+#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */
+#define E1000_CTRL_SPD_SHIFT 8 /* Speed Select Shift */
+
+#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* auto speed detection check */
+#define E1000_CTRL_EXT_EE_RST 0x00002000 /* EEPROM reset */
+#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */
+#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
+#define E1000_CTRL_EXT_EIAME 0x01000000
+#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */
+#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
+#define E1000_CTRL_EXT_INT_TIMERS_CLEAR_ENA 0x20000000
+#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
+
+#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
+#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
+#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */
+#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */
+#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
+#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */
+#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */
+#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */
+#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
+#define E1000_CTRL_RST 0x04000000 /* Global reset */
+#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
+#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
+#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */
+#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
+#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
+#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */
+
+/* Device Status */
+#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
+#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
+#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
+#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
+#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
+#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
+#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000
+
+/* EEPROM/Flash Control */
+#define E1000_EECD_SK 0x00000001 /* EEPROM Clock */
+#define E1000_EECD_CS 0x00000002 /* EEPROM Chip Select */
+#define E1000_EECD_DI 0x00000004 /* EEPROM Data In */
+#define E1000_EECD_DO 0x00000008 /* EEPROM Data Out */
+#define E1000_EECD_FWE_MASK 0x00000030
+#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */
+#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */
+#define E1000_EECD_FWE_SHIFT 4
+#define E1000_EECD_REQ 0x00000040 /* EEPROM Access Request */
+#define E1000_EECD_GNT 0x00000080 /* EEPROM Access Grant */
+#define E1000_EECD_PRES 0x00000100 /* EEPROM Present */
+#define E1000_EECD_SIZE 0x00000200 /* EEPROM Size (0=64 word 1=256 word) */
+#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type
+ * (0-small, 1-large) */
+#define E1000_EECD_TYPE 0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */
+#ifndef E1000_EEPROM_GRANT_ATTEMPTS
+#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
+#endif
+#define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */
+#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */
+#define E1000_EECD_SIZE_EX_SHIFT 11
+#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */
+#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */
+#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */
+#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */
+#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */
+#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */
+#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
+
+
+#define E1000_EECD_SECVAL_SHIFT 22
+#define E1000_STM_OPCODE 0xDB00
+#define E1000_HICR_FW_RESET 0xC0
+
+#define E1000_SHADOW_RAM_WORDS 2048
+#define E1000_ICH_NVM_SIG_WORD 0x13
+#define E1000_ICH_NVM_SIG_MASK 0xC0
+
+/* MDI Control */
+#define E1000_MDIC_DATA_MASK 0x0000FFFF
+#define E1000_MDIC_REG_MASK 0x001F0000
+#define E1000_MDIC_REG_SHIFT 16
+#define E1000_MDIC_PHY_MASK 0x03E00000
+#define E1000_MDIC_PHY_SHIFT 21
+#define E1000_MDIC_OP_WRITE 0x04000000
+#define E1000_MDIC_OP_READ 0x08000000
+#define E1000_MDIC_READY 0x10000000
+#define E1000_MDIC_INT_EN 0x20000000
+#define E1000_MDIC_ERROR 0x40000000
+
+/* Rx Interrupt Delay Timer */
+#define E1000_RDTR_FPD BIT(31)
+
+/* Tx Interrupt Delay Timer */
+#define E1000_TIDV_FPD BIT(31)
+
+/* Delay increments in nanoseconds for delayed interrupts registers */
+#define E1000_INTR_DELAY_NS_RES (1024)
+
+/* Delay increments in nanoseconds for interrupt throttling registers */
+#define E1000_INTR_THROTTLING_NS_RES (256)
+
+/* EEPROM Commands - Microwire */
+#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */
+#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */
+#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7 /* EEPROM erase opcode */
+#define EEPROM_EWEN_OPCODE_MICROWIRE 0x13 /* EEPROM erase/write enable */
+#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erast/write disable */
+
+/* EEPROM Word Offsets */
+#define EEPROM_COMPAT 0x0003
+#define EEPROM_ID_LED_SETTINGS 0x0004
+#define EEPROM_VERSION 0x0005
+#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */
+#define EEPROM_PHY_CLASS_WORD 0x0007
+#define EEPROM_INIT_CONTROL1_REG 0x000A
+#define EEPROM_INIT_CONTROL2_REG 0x000F
+#define EEPROM_SWDEF_PINS_CTRL_PORT_1 0x0010
+#define EEPROM_INIT_CONTROL3_PORT_B 0x0014
+#define EEPROM_INIT_3GIO_3 0x001A
+#define EEPROM_SWDEF_PINS_CTRL_PORT_0 0x0020
+#define EEPROM_INIT_CONTROL3_PORT_A 0x0024
+#define EEPROM_CFG 0x0012
+#define EEPROM_FLASH_VERSION 0x0032
+#define EEPROM_CHECKSUM_REG 0x003F
+
+#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */
+#define E1000_EEPROM_CFG_DONE_PORT_1 0x00080000 /* ...for second port */
+
+/* HH Time Sync */
+#define E1000_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */
+#define E1000_TSYNCTXCTL_SYNC_COMP 0x40000000 /* sync complete */
+#define E1000_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */
+
+#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
+#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */
+
+#define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */
+#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */
+#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00
+#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02
+#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
+#define E1000_TSYNCRXCTL_TYPE_ALL 0x08
+#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
+#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */
+#define E1000_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */
+
+#define E1000_RXMTRL_PTP_V1_SYNC_MESSAGE 0x00000000
+#define E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE 0x00010000
+
+#define E1000_RXMTRL_PTP_V2_SYNC_MESSAGE 0x00000000
+#define E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE 0x01000000
+
+#define E1000_TIMINCA_INCPERIOD_SHIFT 24
+#define E1000_TIMINCA_INCVALUE_MASK 0x00FFFFFF
+
+/* PCI Express Control */
+/* 3GIO Control Register - GCR (0x05B00; RW) */
+#define E1000_L0S_ADJUST (1 << 9)
+#define E1000_L1_ENTRY_LATENCY_MSB (1 << 23)
+#define E1000_L1_ENTRY_LATENCY_LSB (1 << 25 | 1 << 26)
+
+#define E1000_L0S_ADJUST (1 << 9)
+#define E1000_L1_ENTRY_LATENCY_MSB (1 << 23)
+#define E1000_L1_ENTRY_LATENCY_LSB (1 << 25 | 1 << 26)
+
+#define E1000_GCR_RO_BITS (1 << 23 | 1 << 25 | 1 << 26)
+
+/* MSI-X PBA Clear register */
+#define E1000_PBACLR_VALID_MASK (BIT(5) - 1)
+
+/* Transmit Descriptor bit definitions */
+#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */
+#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */
+#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
+#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */
+#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
+#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
+#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
+#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
+#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
+#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */
+#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */
+#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */
+#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */
+#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
+#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
+#define E1000_TXD_CMD_SNAP 0x40000000 /* Update SNAP header */
+#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
+#define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */
+
+/* Transmit Control */
+#define E1000_TCTL_RST 0x00000001 /* software reset */
+#define E1000_TCTL_EN 0x00000002 /* enable tx */
+#define E1000_TCTL_BCE 0x00000004 /* busy check enable */
+#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
+#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
+#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
+#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */
+#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */
+#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
+#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */
+#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */
+
+/* Legacy Receive Descriptor */
+struct e1000_rx_desc {
+ uint64_t buffer_addr; /* Address of the descriptor's data buffer */
+ uint16_t length; /* Length of data DMAed into data buffer */
+ uint16_t csum; /* Packet checksum */
+ uint8_t status; /* Descriptor status */
+ uint8_t errors; /* Descriptor Errors */
+ uint16_t special;
+};
+
+/* Extended Receive Descriptor */
+union e1000_rx_desc_extended {
+ struct {
+ uint64_t buffer_addr;
+ uint64_t reserved;
+ } read;
+ struct {
+ struct {
+ uint32_t mrq; /* Multiple Rx Queues */
+ union {
+ uint32_t rss; /* RSS Hash */
+ struct {
+ uint16_t ip_id; /* IP id */
+ uint16_t csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ uint32_t status_error; /* ext status/error */
+ uint16_t length;
+ uint16_t vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+#define MAX_PS_BUFFERS 4
+
+/* Number of packet split data buffers (not including the header buffer) */
+#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
+
+/* Receive Descriptor - Packet Split */
+union e1000_rx_desc_packet_split {
+ struct {
+ /* one buffer for protocol header(s), three data buffers */
+ uint64_t buffer_addr[MAX_PS_BUFFERS];
+ } read;
+ struct {
+ struct {
+ uint32_t mrq; /* Multiple Rx Queues */
+ union {
+ uint32_t rss; /* RSS Hash */
+ struct {
+ uint16_t ip_id; /* IP id */
+ uint16_t csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ uint32_t status_error; /* ext status/error */
+ uint16_t length0; /* length of buffer 0 */
+ uint16_t vlan; /* VLAN tag */
+ } middle;
+ struct {
+ uint16_t header_status;
+ /* length of buffers 1-3 */
+ uint16_t length[PS_PAGE_BUFFERS];
+ } upper;
+ uint64_t reserved;
+ } wb; /* writeback */
+};
+
+/* Receive Checksum Control bits */
+#define E1000_RXCSUM_IPOFLD 0x100 /* IP Checksum Offload Enable */
+#define E1000_RXCSUM_TUOFLD 0x200 /* TCP/UDP Checksum Offload Enable */
+#define E1000_RXCSUM_PCSD 0x2000 /* Packet Checksum Disable */
+
+#define E1000_RING_DESC_LEN (16)
+#define E1000_RING_DESC_LEN_SHIFT (4)
+
+#define E1000_MIN_RX_DESC_LEN E1000_RING_DESC_LEN
+
+/* Receive Descriptor bit definitions */
+#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
+#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
+#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
+#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */
+#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
+#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
+#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */
+#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */
+#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
+#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
+#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
+#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
+#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
+#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
+#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */
+#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
+#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
+#define E1000_RXD_SPC_PRI_SHIFT 13
+#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */
+#define E1000_RXD_SPC_CFI_SHIFT 12
+
+/* RX packet types */
+#define E1000_RXD_PKT_MAC (0)
+#define E1000_RXD_PKT_IP4 (1)
+#define E1000_RXD_PKT_IP4_XDP (2)
+#define E1000_RXD_PKT_IP6 (5)
+#define E1000_RXD_PKT_IP6_XDP (6)
+
+#define E1000_RXD_PKT_TYPE(t) ((t) << 16)
+
+#define E1000_RXDEXT_STATERR_CE 0x01000000
+#define E1000_RXDEXT_STATERR_SE 0x02000000
+#define E1000_RXDEXT_STATERR_SEQ 0x04000000
+#define E1000_RXDEXT_STATERR_CXE 0x10000000
+#define E1000_RXDEXT_STATERR_TCPE 0x20000000
+#define E1000_RXDEXT_STATERR_IPE 0x40000000
+#define E1000_RXDEXT_STATERR_RXE 0x80000000
+
+#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000
+#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
+
+/* Receive Address */
+#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
+
+/* Offload Context Descriptor */
+struct e1000_context_desc {
+ union {
+ uint32_t ip_config;
+ struct {
+ uint8_t ipcss; /* IP checksum start */
+ uint8_t ipcso; /* IP checksum offset */
+ uint16_t ipcse; /* IP checksum end */
+ } ip_fields;
+ } lower_setup;
+ union {
+ uint32_t tcp_config;
+ struct {
+ uint8_t tucss; /* TCP checksum start */
+ uint8_t tucso; /* TCP checksum offset */
+ uint16_t tucse; /* TCP checksum end */
+ } tcp_fields;
+ } upper_setup;
+ uint32_t cmd_and_length; /* */
+ union {
+ uint32_t data;
+ struct {
+ uint8_t status; /* Descriptor status */
+ uint8_t hdr_len; /* Header length */
+ uint16_t mss; /* Maximum segment size */
+ } fields;
+ } tcp_seg_setup;
+};
+
+/* Filters */
+#define E1000_NUM_UNICAST 16 /* Unicast filter entries */
+#define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */
+#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
+
+/* Management Control */
+#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
+#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
+#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */
+#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */
+#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */
+#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */
+#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */
+#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */
+#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
+#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery
+ * Filtering */
+#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */
+#define E1000_MANC_DIS_IP_CHK_ARP 0x10000000 /* Disable IP address chacking */
+ /*for ARP packets - in 82574 */
+#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */
+#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
+#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */
+#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */
+#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
+#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address
+ * filtering */
+#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host
+ * memory */
+#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address
+ * filtering */
+#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */
+#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */
+#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */
+#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */
+#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */
+#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */
+#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */
+#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */
+
+#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */
+#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */
+
+/* FACTPS Control */
+#define E1000_FACTPS_LAN0_ON 0x00000004 /* Lan 0 enable */
+
+/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */
+#define EEPROM_SUM 0xBABA
+
+/* I/O-Mapped Access to Internal Registers, Memories, and Flash */
+#define E1000_IOADDR 0x00
+#define E1000_IODATA 0x04
+
+#define E1000_VFTA_ENTRY_SHIFT 5
+#define E1000_VFTA_ENTRY_MASK 0x7F
+#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
+
+#endif /* HW_E1000_REGS_H */
diff --git a/hw/net/fsl_etsec/etsec.c b/hw/net/fsl_etsec/etsec.c
index c753bfb..798ea33 100644
--- a/hw/net/fsl_etsec/etsec.c
+++ b/hw/net/fsl_etsec/etsec.c
@@ -29,6 +29,7 @@
#include "qemu/osdep.h"
#include "hw/sysbus.h"
#include "hw/irq.h"
+#include "hw/net/mii.h"
#include "hw/ptimer.h"
#include "hw/qdev-properties.h"
#include "etsec.h"
@@ -339,11 +340,11 @@
etsec->rx_buffer_len = 0;
etsec->phy_status =
- MII_SR_EXTENDED_CAPS | MII_SR_LINK_STATUS | MII_SR_AUTONEG_CAPS |
- MII_SR_AUTONEG_COMPLETE | MII_SR_PREAMBLE_SUPPRESS |
- MII_SR_EXTENDED_STATUS | MII_SR_100T2_HD_CAPS | MII_SR_100T2_FD_CAPS |
- MII_SR_10T_HD_CAPS | MII_SR_10T_FD_CAPS | MII_SR_100X_HD_CAPS |
- MII_SR_100X_FD_CAPS | MII_SR_100T4_CAPS;
+ MII_BMSR_EXTCAP | MII_BMSR_LINK_ST | MII_BMSR_AUTONEG |
+ MII_BMSR_AN_COMP | MII_BMSR_MFPS | MII_BMSR_EXTSTAT |
+ MII_BMSR_100T2_HD | MII_BMSR_100T2_FD |
+ MII_BMSR_10T_HD | MII_BMSR_10T_FD |
+ MII_BMSR_100TX_HD | MII_BMSR_100TX_FD | MII_BMSR_100T4;
etsec_update_irq(etsec);
}
diff --git a/hw/net/fsl_etsec/etsec.h b/hw/net/fsl_etsec/etsec.h
index 3c625c9..3860864 100644
--- a/hw/net/fsl_etsec/etsec.h
+++ b/hw/net/fsl_etsec/etsec.h
@@ -76,23 +76,6 @@
#define FCB_TX_CTU (1 << 1)
#define FCB_TX_NPH (1 << 0)
-/* PHY Status Register */
-#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
-#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
-#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
-#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
-#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
-#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
-#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
-#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
-#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
-#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
-#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
-#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
-#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
-#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
-#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
-
/* eTSEC */
/* Number of register in the device */
diff --git a/hw/net/fsl_etsec/miim.c b/hw/net/fsl_etsec/miim.c
index 6bba01c..b48d2cb 100644
--- a/hw/net/fsl_etsec/miim.c
+++ b/hw/net/fsl_etsec/miim.c
@@ -23,6 +23,7 @@
*/
#include "qemu/osdep.h"
+#include "hw/net/mii.h"
#include "etsec.h"
#include "registers.h"
@@ -140,8 +141,8 @@
{
/* Set link status */
if (nc->link_down) {
- etsec->phy_status &= ~MII_SR_LINK_STATUS;
+ etsec->phy_status &= ~MII_BMSR_LINK_ST;
} else {
- etsec->phy_status |= MII_SR_LINK_STATUS;
+ etsec->phy_status |= MII_BMSR_LINK_ST;
}
}
diff --git a/hw/net/igb.c b/hw/net/igb.c
new file mode 100644
index 0000000..c6d753d
--- /dev/null
+++ b/hw/net/igb.c
@@ -0,0 +1,623 @@
+/*
+ * QEMU Intel 82576 SR/IOV Ethernet Controller Emulation
+ *
+ * Datasheet:
+ * https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/82576eg-gbe-datasheet.pdf
+ *
+ * Copyright (c) 2020-2023 Red Hat, Inc.
+ * Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com)
+ * Developed by Daynix Computing LTD (http://www.daynix.com)
+ *
+ * Authors:
+ * Akihiko Odaki <akihiko.odaki@daynix.com>
+ * Gal Hammmer <gal.hammer@sap.com>
+ * Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
+ * Dmitry Fleytman <dmitry@daynix.com>
+ * Leonid Bloch <leonid@daynix.com>
+ * Yan Vugenfirer <yan@daynix.com>
+ *
+ * Based on work done by:
+ * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
+ * Copyright (c) 2008 Qumranet
+ * Based on work done by:
+ * Copyright (c) 2007 Dan Aloni
+ * Copyright (c) 2004 Antony T Curtis
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/units.h"
+#include "net/eth.h"
+#include "net/net.h"
+#include "net/tap.h"
+#include "qemu/module.h"
+#include "qemu/range.h"
+#include "sysemu/sysemu.h"
+#include "hw/hw.h"
+#include "hw/net/mii.h"
+#include "hw/pci/pci.h"
+#include "hw/pci/pcie.h"
+#include "hw/pci/pcie_sriov.h"
+#include "hw/pci/msi.h"
+#include "hw/pci/msix.h"
+#include "hw/qdev-properties.h"
+#include "migration/vmstate.h"
+
+#include "igb_common.h"
+#include "igb_core.h"
+
+#include "trace.h"
+#include "qapi/error.h"
+#include "qom/object.h"
+
+#define TYPE_IGB "igb"
+OBJECT_DECLARE_SIMPLE_TYPE(IGBState, IGB)
+
+struct IGBState {
+ PCIDevice parent_obj;
+ NICState *nic;
+ NICConf conf;
+
+ MemoryRegion mmio;
+ MemoryRegion flash;
+ MemoryRegion io;
+ MemoryRegion msix;
+
+ uint32_t ioaddr;
+
+ IGBCore core;
+};
+
+#define IGB_CAP_SRIOV_OFFSET (0x160)
+#define IGB_VF_OFFSET (0x80)
+#define IGB_VF_STRIDE (2)
+
+#define E1000E_MMIO_IDX 0
+#define E1000E_FLASH_IDX 1
+#define E1000E_IO_IDX 2
+#define E1000E_MSIX_IDX 3
+
+#define E1000E_MMIO_SIZE (128 * KiB)
+#define E1000E_FLASH_SIZE (128 * KiB)
+#define E1000E_IO_SIZE (32)
+#define E1000E_MSIX_SIZE (16 * KiB)
+
+static void igb_write_config(PCIDevice *dev, uint32_t addr,
+ uint32_t val, int len)
+{
+ IGBState *s = IGB(dev);
+
+ trace_igb_write_config(addr, val, len);
+ pci_default_write_config(dev, addr, val, len);
+
+ if (range_covers_byte(addr, len, PCI_COMMAND) &&
+ (dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
+ igb_start_recv(&s->core);
+ }
+}
+
+uint64_t
+igb_mmio_read(void *opaque, hwaddr addr, unsigned size)
+{
+ IGBState *s = opaque;
+ return igb_core_read(&s->core, addr, size);
+}
+
+void
+igb_mmio_write(void *opaque, hwaddr addr, uint64_t val, unsigned size)
+{
+ IGBState *s = opaque;
+ igb_core_write(&s->core, addr, val, size);
+}
+
+static bool
+igb_io_get_reg_index(IGBState *s, uint32_t *idx)
+{
+ if (s->ioaddr < 0x1FFFF) {
+ *idx = s->ioaddr;
+ return true;
+ }
+
+ if (s->ioaddr < 0x7FFFF) {
+ trace_e1000e_wrn_io_addr_undefined(s->ioaddr);
+ return false;
+ }
+
+ if (s->ioaddr < 0xFFFFF) {
+ trace_e1000e_wrn_io_addr_flash(s->ioaddr);
+ return false;
+ }
+
+ trace_e1000e_wrn_io_addr_unknown(s->ioaddr);
+ return false;
+}
+
+static uint64_t
+igb_io_read(void *opaque, hwaddr addr, unsigned size)
+{
+ IGBState *s = opaque;
+ uint32_t idx = 0;
+ uint64_t val;
+
+ switch (addr) {
+ case E1000_IOADDR:
+ trace_e1000e_io_read_addr(s->ioaddr);
+ return s->ioaddr;
+ case E1000_IODATA:
+ if (igb_io_get_reg_index(s, &idx)) {
+ val = igb_core_read(&s->core, idx, sizeof(val));
+ trace_e1000e_io_read_data(idx, val);
+ return val;
+ }
+ return 0;
+ default:
+ trace_e1000e_wrn_io_read_unknown(addr);
+ return 0;
+ }
+}
+
+static void
+igb_io_write(void *opaque, hwaddr addr, uint64_t val, unsigned size)
+{
+ IGBState *s = opaque;
+ uint32_t idx = 0;
+
+ switch (addr) {
+ case E1000_IOADDR:
+ trace_e1000e_io_write_addr(val);
+ s->ioaddr = (uint32_t) val;
+ return;
+ case E1000_IODATA:
+ if (igb_io_get_reg_index(s, &idx)) {
+ trace_e1000e_io_write_data(idx, val);
+ igb_core_write(&s->core, idx, val, sizeof(val));
+ }
+ return;
+ default:
+ trace_e1000e_wrn_io_write_unknown(addr);
+ return;
+ }
+}
+
+static const MemoryRegionOps mmio_ops = {
+ .read = igb_mmio_read,
+ .write = igb_mmio_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static const MemoryRegionOps io_ops = {
+ .read = igb_io_read,
+ .write = igb_io_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static bool
+igb_nc_can_receive(NetClientState *nc)
+{
+ IGBState *s = qemu_get_nic_opaque(nc);
+ return igb_can_receive(&s->core);
+}
+
+static ssize_t
+igb_nc_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt)
+{
+ IGBState *s = qemu_get_nic_opaque(nc);
+ return igb_receive_iov(&s->core, iov, iovcnt);
+}
+
+static ssize_t
+igb_nc_receive(NetClientState *nc, const uint8_t *buf, size_t size)
+{
+ IGBState *s = qemu_get_nic_opaque(nc);
+ return igb_receive(&s->core, buf, size);
+}
+
+static void
+igb_set_link_status(NetClientState *nc)
+{
+ IGBState *s = qemu_get_nic_opaque(nc);
+ igb_core_set_link_status(&s->core);
+}
+
+static NetClientInfo net_igb_info = {
+ .type = NET_CLIENT_DRIVER_NIC,
+ .size = sizeof(NICState),
+ .can_receive = igb_nc_can_receive,
+ .receive = igb_nc_receive,
+ .receive_iov = igb_nc_receive_iov,
+ .link_status_changed = igb_set_link_status,
+};
+
+/*
+ * EEPROM (NVM) contents documented in section 6.1, table 6-1:
+ * and in 6.10 Software accessed words.
+ */
+static const uint16_t igb_eeprom_template[] = {
+ /* Address |Compat.|OEM sp.| ImRev | OEM sp. */
+ 0x0000, 0x0000, 0x0000, 0x0d34, 0xffff, 0x2010, 0xffff, 0xffff,
+ /* PBA |ICtrl1 | SSID | SVID | DevID |-------|ICtrl2 */
+ 0x1040, 0xffff, 0x002b, 0x0000, 0x8086, 0x10c9, 0x0000, 0x70c3,
+ /* SwPin0| DevID | EESZ |-------|ICtrl3 |PCI-tc | MSIX | APtr */
+ 0x0004, 0x10c9, 0x5c00, 0x0000, 0x2880, 0x0014, 0x4a40, 0x0060,
+ /* PCIe Init. Conf 1,2,3 |PCICtrl| LD1,3 |DDevID |DevRev | LD0,2 */
+ 0x6cfb, 0xc7b0, 0x0abe, 0x0403, 0x0783, 0x10a6, 0x0001, 0x0602,
+ /* SwPin1| FunC |LAN-PWR|ManHwC |ICtrl3 | IOVct |VDevID |-------*/
+ 0x0004, 0x0020, 0x0000, 0x004a, 0x2080, 0x00f5, 0x10ca, 0x0000,
+ /*---------------| LD1,3 | LD0,2 | ROEnd | ROSta | Wdog | VPD */
+ 0x0000, 0x0000, 0x4784, 0x4602, 0x0000, 0x0000, 0x1000, 0xffff,
+ /* PCSet0| Ccfg0 |PXEver |IBAcap |PCSet1 | Ccfg1 |iSCVer | ?? */
+ 0x0100, 0x4000, 0x131f, 0x4013, 0x0100, 0x4000, 0xffff, 0xffff,
+ /* PCSet2| Ccfg2 |PCSet3 | Ccfg3 | ?? |AltMacP| ?? |CHKSUM */
+ 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x00e0, 0xffff, 0x0000,
+ /* NC-SIC */
+ 0x0003,
+};
+
+static void igb_core_realize(IGBState *s)
+{
+ s->core.owner = &s->parent_obj;
+ s->core.owner_nic = s->nic;
+}
+
+static void
+igb_init_msix(IGBState *s)
+{
+ int i, res;
+
+ res = msix_init(PCI_DEVICE(s), IGB_MSIX_VEC_NUM,
+ &s->msix,
+ E1000E_MSIX_IDX, 0,
+ &s->msix,
+ E1000E_MSIX_IDX, 0x2000,
+ 0x70, NULL);
+
+ if (res < 0) {
+ trace_e1000e_msix_init_fail(res);
+ } else {
+ for (i = 0; i < IGB_MSIX_VEC_NUM; i++) {
+ msix_vector_use(PCI_DEVICE(s), i);
+ }
+ }
+}
+
+static void
+igb_cleanup_msix(IGBState *s)
+{
+ msix_unuse_all_vectors(PCI_DEVICE(s));
+ msix_uninit(PCI_DEVICE(s), &s->msix, &s->msix);
+}
+
+static void
+igb_init_net_peer(IGBState *s, PCIDevice *pci_dev, uint8_t *macaddr)
+{
+ DeviceState *dev = DEVICE(pci_dev);
+ NetClientState *nc;
+ int i;
+
+ s->nic = qemu_new_nic(&net_igb_info, &s->conf,
+ object_get_typename(OBJECT(s)), dev->id, s);
+
+ s->core.max_queue_num = s->conf.peers.queues ? s->conf.peers.queues - 1 : 0;
+
+ trace_e1000e_mac_set_permanent(MAC_ARG(macaddr));
+ memcpy(s->core.permanent_mac, macaddr, sizeof(s->core.permanent_mac));
+
+ qemu_format_nic_info_str(qemu_get_queue(s->nic), macaddr);
+
+ /* Setup virtio headers */
+ for (i = 0; i < s->conf.peers.queues; i++) {
+ nc = qemu_get_subqueue(s->nic, i);
+ if (!nc->peer || !qemu_has_vnet_hdr(nc->peer)) {
+ trace_e1000e_cfg_support_virtio(false);
+ return;
+ }
+ }
+
+ trace_e1000e_cfg_support_virtio(true);
+ s->core.has_vnet = true;
+
+ for (i = 0; i < s->conf.peers.queues; i++) {
+ nc = qemu_get_subqueue(s->nic, i);
+ qemu_set_vnet_hdr_len(nc->peer, sizeof(struct virtio_net_hdr));
+ qemu_using_vnet_hdr(nc->peer, true);
+ }
+}
+
+static int
+igb_add_pm_capability(PCIDevice *pdev, uint8_t offset, uint16_t pmc)
+{
+ Error *local_err = NULL;
+ int ret = pci_add_capability(pdev, PCI_CAP_ID_PM, offset,
+ PCI_PM_SIZEOF, &local_err);
+
+ if (local_err) {
+ error_report_err(local_err);
+ return ret;
+ }
+
+ pci_set_word(pdev->config + offset + PCI_PM_PMC,
+ PCI_PM_CAP_VER_1_1 |
+ pmc);
+
+ pci_set_word(pdev->wmask + offset + PCI_PM_CTRL,
+ PCI_PM_CTRL_STATE_MASK |
+ PCI_PM_CTRL_PME_ENABLE |
+ PCI_PM_CTRL_DATA_SEL_MASK);
+
+ pci_set_word(pdev->w1cmask + offset + PCI_PM_CTRL,
+ PCI_PM_CTRL_PME_STATUS);
+
+ return ret;
+}
+
+static void igb_pci_realize(PCIDevice *pci_dev, Error **errp)
+{
+ IGBState *s = IGB(pci_dev);
+ uint8_t *macaddr;
+ int ret;
+
+ trace_e1000e_cb_pci_realize();
+
+ pci_dev->config_write = igb_write_config;
+
+ pci_dev->config[PCI_CACHE_LINE_SIZE] = 0x10;
+ pci_dev->config[PCI_INTERRUPT_PIN] = 1;
+
+ /* Define IO/MMIO regions */
+ memory_region_init_io(&s->mmio, OBJECT(s), &mmio_ops, s,
+ "igb-mmio", E1000E_MMIO_SIZE);
+ pci_register_bar(pci_dev, E1000E_MMIO_IDX,
+ PCI_BASE_ADDRESS_SPACE_MEMORY, &s->mmio);
+
+ /*
+ * We provide a dummy implementation for the flash BAR
+ * for drivers that may theoretically probe for its presence.
+ */
+ memory_region_init(&s->flash, OBJECT(s),
+ "igb-flash", E1000E_FLASH_SIZE);
+ pci_register_bar(pci_dev, E1000E_FLASH_IDX,
+ PCI_BASE_ADDRESS_SPACE_MEMORY, &s->flash);
+
+ memory_region_init_io(&s->io, OBJECT(s), &io_ops, s,
+ "igb-io", E1000E_IO_SIZE);
+ pci_register_bar(pci_dev, E1000E_IO_IDX,
+ PCI_BASE_ADDRESS_SPACE_IO, &s->io);
+
+ memory_region_init(&s->msix, OBJECT(s), "igb-msix",
+ E1000E_MSIX_SIZE);
+ pci_register_bar(pci_dev, E1000E_MSIX_IDX,
+ PCI_BASE_ADDRESS_MEM_TYPE_64, &s->msix);
+
+ /* Create networking backend */
+ qemu_macaddr_default_if_unset(&s->conf.macaddr);
+ macaddr = s->conf.macaddr.a;
+
+ /* Add PCI capabilities in reverse order */
+ assert(pcie_endpoint_cap_init(pci_dev, 0xa0) > 0);
+
+ igb_init_msix(s);
+
+ ret = msi_init(pci_dev, 0x50, 1, true, true, NULL);
+ if (ret) {
+ trace_e1000e_msi_init_fail(ret);
+ }
+
+ if (igb_add_pm_capability(pci_dev, 0x40, PCI_PM_CAP_DSI) < 0) {
+ hw_error("Failed to initialize PM capability");
+ }
+
+ /* PCIe extended capabilities (in order) */
+ if (pcie_aer_init(pci_dev, 1, 0x100, 0x40, errp) < 0) {
+ hw_error("Failed to initialize AER capability");
+ }
+
+ pcie_ari_init(pci_dev, 0x150, 1);
+
+ pcie_sriov_pf_init(pci_dev, IGB_CAP_SRIOV_OFFSET, "igbvf",
+ IGB_82576_VF_DEV_ID, IGB_MAX_VF_FUNCTIONS, IGB_MAX_VF_FUNCTIONS,
+ IGB_VF_OFFSET, IGB_VF_STRIDE);
+
+ pcie_sriov_pf_init_vf_bar(pci_dev, 0,
+ PCI_BASE_ADDRESS_MEM_TYPE_64 | PCI_BASE_ADDRESS_MEM_PREFETCH,
+ 16 * KiB);
+ pcie_sriov_pf_init_vf_bar(pci_dev, 3,
+ PCI_BASE_ADDRESS_MEM_TYPE_64 | PCI_BASE_ADDRESS_MEM_PREFETCH,
+ 16 * KiB);
+
+ igb_init_net_peer(s, pci_dev, macaddr);
+
+ /* Initialize core */
+ igb_core_realize(s);
+
+ igb_core_pci_realize(&s->core,
+ igb_eeprom_template,
+ sizeof(igb_eeprom_template),
+ macaddr);
+}
+
+static void igb_pci_uninit(PCIDevice *pci_dev)
+{
+ IGBState *s = IGB(pci_dev);
+
+ trace_e1000e_cb_pci_uninit();
+
+ igb_core_pci_uninit(&s->core);
+
+ pcie_sriov_pf_exit(pci_dev);
+ pcie_cap_exit(pci_dev);
+
+ qemu_del_nic(s->nic);
+
+ igb_cleanup_msix(s);
+ msi_uninit(pci_dev);
+}
+
+static void igb_qdev_reset_hold(Object *obj)
+{
+ PCIDevice *d = PCI_DEVICE(obj);
+ IGBState *s = IGB(obj);
+
+ trace_e1000e_cb_qdev_reset_hold();
+
+ pcie_sriov_pf_disable_vfs(d);
+ igb_core_reset(&s->core);
+}
+
+static int igb_pre_save(void *opaque)
+{
+ IGBState *s = opaque;
+
+ trace_e1000e_cb_pre_save();
+
+ igb_core_pre_save(&s->core);
+
+ return 0;
+}
+
+static int igb_post_load(void *opaque, int version_id)
+{
+ IGBState *s = opaque;
+
+ trace_e1000e_cb_post_load();
+ return igb_core_post_load(&s->core);
+}
+
+static const VMStateDescription igb_vmstate_tx = {
+ .name = "igb-tx",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT16(vlan, struct igb_tx),
+ VMSTATE_UINT16(mss, struct igb_tx),
+ VMSTATE_BOOL(tse, struct igb_tx),
+ VMSTATE_BOOL(ixsm, struct igb_tx),
+ VMSTATE_BOOL(txsm, struct igb_tx),
+ VMSTATE_BOOL(first, struct igb_tx),
+ VMSTATE_BOOL(skip_cp, struct igb_tx),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription igb_vmstate_intr_timer = {
+ .name = "igb-intr-timer",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_TIMER_PTR(timer, IGBIntrDelayTimer),
+ VMSTATE_BOOL(running, IGBIntrDelayTimer),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#define VMSTATE_IGB_INTR_DELAY_TIMER(_f, _s) \
+ VMSTATE_STRUCT(_f, _s, 0, \
+ igb_vmstate_intr_timer, IGBIntrDelayTimer)
+
+#define VMSTATE_IGB_INTR_DELAY_TIMER_ARRAY(_f, _s, _num) \
+ VMSTATE_STRUCT_ARRAY(_f, _s, _num, 0, \
+ igb_vmstate_intr_timer, IGBIntrDelayTimer)
+
+static const VMStateDescription igb_vmstate = {
+ .name = "igb",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .pre_save = igb_pre_save,
+ .post_load = igb_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_PCI_DEVICE(parent_obj, IGBState),
+ VMSTATE_MSIX(parent_obj, IGBState),
+
+ VMSTATE_UINT32(ioaddr, IGBState),
+ VMSTATE_UINT8(core.rx_desc_len, IGBState),
+ VMSTATE_UINT16_ARRAY(core.eeprom, IGBState, IGB_EEPROM_SIZE),
+ VMSTATE_UINT16_ARRAY(core.phy, IGBState, MAX_PHY_REG_ADDRESS + 1),
+ VMSTATE_UINT32_ARRAY(core.mac, IGBState, E1000E_MAC_SIZE),
+ VMSTATE_UINT8_ARRAY(core.permanent_mac, IGBState, ETH_ALEN),
+
+ VMSTATE_IGB_INTR_DELAY_TIMER_ARRAY(core.eitr, IGBState,
+ IGB_INTR_NUM),
+
+ VMSTATE_UINT32_ARRAY(core.eitr_guest_value, IGBState, IGB_INTR_NUM),
+
+ VMSTATE_STRUCT_ARRAY(core.tx, IGBState, IGB_NUM_QUEUES, 0,
+ igb_vmstate_tx, struct igb_tx),
+
+ VMSTATE_INT64(core.timadj, IGBState),
+
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static Property igb_properties[] = {
+ DEFINE_NIC_PROPERTIES(IGBState, conf),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void igb_class_init(ObjectClass *class, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(class);
+ ResettableClass *rc = RESETTABLE_CLASS(class);
+ PCIDeviceClass *c = PCI_DEVICE_CLASS(class);
+
+ c->realize = igb_pci_realize;
+ c->exit = igb_pci_uninit;
+ c->vendor_id = PCI_VENDOR_ID_INTEL;
+ c->device_id = E1000_DEV_ID_82576;
+ c->revision = 1;
+ c->class_id = PCI_CLASS_NETWORK_ETHERNET;
+
+ rc->phases.hold = igb_qdev_reset_hold;
+
+ dc->desc = "Intel 82576 Gigabit Ethernet Controller";
+ dc->vmsd = &igb_vmstate;
+
+ device_class_set_props(dc, igb_properties);
+ set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
+}
+
+static void igb_instance_init(Object *obj)
+{
+ IGBState *s = IGB(obj);
+ device_add_bootindex_property(obj, &s->conf.bootindex,
+ "bootindex", "/ethernet-phy@0",
+ DEVICE(obj));
+}
+
+static const TypeInfo igb_info = {
+ .name = TYPE_IGB,
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(IGBState),
+ .class_init = igb_class_init,
+ .instance_init = igb_instance_init,
+ .interfaces = (InterfaceInfo[]) {
+ { INTERFACE_PCIE_DEVICE },
+ { }
+ },
+};
+
+static void igb_register_types(void)
+{
+ type_register_static(&igb_info);
+}
+
+type_init(igb_register_types)
diff --git a/hw/net/igb_common.h b/hw/net/igb_common.h
new file mode 100644
index 0000000..69ac490
--- /dev/null
+++ b/hw/net/igb_common.h
@@ -0,0 +1,146 @@
+/*
+ * QEMU igb emulation - shared definitions
+ *
+ * Copyright (c) 2020-2023 Red Hat, Inc.
+ * Copyright (c) 2008 Qumranet
+ *
+ * Based on work done by:
+ * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
+ * Copyright (c) 2007 Dan Aloni
+ * Copyright (c) 2004 Antony T Curtis
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_NET_IGB_COMMON_H
+#define HW_NET_IGB_COMMON_H
+
+#include "igb_regs.h"
+
+#define defreg(x) x = (E1000_##x >> 2)
+#define defreg_indexed(x, i) x##i = (E1000_##x(i) >> 2)
+#define defreg_indexeda(x, i) x##i##_A = (E1000_##x##_A(i) >> 2)
+
+#define defregd(x) defreg_indexed(x, 0), defreg_indexed(x, 1), \
+ defreg_indexed(x, 2), defreg_indexed(x, 3), \
+ defreg_indexed(x, 4), defreg_indexed(x, 5), \
+ defreg_indexed(x, 6), defreg_indexed(x, 7), \
+ defreg_indexed(x, 8), defreg_indexed(x, 9), \
+ defreg_indexed(x, 10), defreg_indexed(x, 11), \
+ defreg_indexed(x, 12), defreg_indexed(x, 13), \
+ defreg_indexed(x, 14), defreg_indexed(x, 15), \
+ defreg_indexeda(x, 0), defreg_indexeda(x, 1), \
+ defreg_indexeda(x, 2), defreg_indexeda(x, 3)
+
+#define defregv(x) defreg_indexed(x, 0), defreg_indexed(x, 1), \
+ defreg_indexed(x, 2), defreg_indexed(x, 3), \
+ defreg_indexed(x, 4), defreg_indexed(x, 5), \
+ defreg_indexed(x, 6), defreg_indexed(x, 7)
+
+enum {
+ defreg(CTRL), defreg(EECD), defreg(EERD), defreg(GPRC),
+ defreg(GPTC), defreg(ICR), defreg(ICS), defreg(IMC),
+ defreg(IMS), defreg(LEDCTL), defreg(MANC), defreg(MDIC),
+ defreg(MPC), defreg(RCTL),
+ defreg(STATUS), defreg(SWSM), defreg(TCTL),
+ defreg(TORH), defreg(TORL), defreg(TOTH),
+ defreg(TOTL), defreg(TPR), defreg(TPT),
+ defreg(WUFC), defreg(RA), defreg(MTA), defreg(CRCERRS),
+ defreg(VFTA), defreg(VET),
+ defreg(SCC), defreg(ECOL),
+ defreg(MCC), defreg(LATECOL), defreg(COLC), defreg(DC),
+ defreg(TNCRS), defreg(RLEC),
+ defreg(XONRXC), defreg(XONTXC), defreg(XOFFRXC), defreg(XOFFTXC),
+ defreg(FCRUC), defreg(TDFH), defreg(TDFT),
+ defreg(TDFHS), defreg(TDFTS), defreg(TDFPC), defreg(WUC),
+ defreg(WUS), defreg(RDFH),
+ defreg(RDFT), defreg(RDFHS), defreg(RDFTS), defreg(RDFPC),
+ defreg(IPAV), defreg(IP4AT), defreg(IP6AT),
+ defreg(WUPM), defreg(FFMT),
+ defreg(IAM),
+ defreg(GCR), defreg(TIMINCA), defreg(EIAC), defreg(CTRL_EXT),
+ defreg(IVAR0), defreg(MANC2H),
+ defreg(MFVAL), defreg(MDEF), defreg(FACTPS), defreg(FTFT),
+ defreg(RUC), defreg(ROC), defreg(RFC), defreg(RJC),
+ defreg(PRC64), defreg(PRC127), defreg(PRC255), defreg(PRC511),
+ defreg(PRC1023), defreg(PRC1522), defreg(PTC64), defreg(PTC127),
+ defreg(PTC255), defreg(PTC511), defreg(PTC1023), defreg(PTC1522),
+ defreg(GORCL), defreg(GORCH), defreg(GOTCL), defreg(GOTCH),
+ defreg(RNBC), defreg(BPRC), defreg(MPRC), defreg(RFCTL),
+ defreg(MPTC), defreg(BPTC),
+ defreg(IAC), defreg(MGTPRC), defreg(MGTPDC), defreg(MGTPTC),
+ defreg(TSCTC), defreg(RXCSUM), defreg(FUNCTAG), defreg(GSCL_1),
+ defreg(GSCL_2), defreg(GSCL_3), defreg(GSCL_4), defreg(GSCN_0),
+ defreg(GSCN_1), defreg(GSCN_2), defreg(GSCN_3),
+ defreg_indexed(EITR, 0),
+ defreg(MRQC), defreg(RETA), defreg(RSSRK),
+ defreg(PBACLR), defreg(FCAL), defreg(FCAH), defreg(FCT),
+ defreg(FCRTH), defreg(FCRTL), defreg(FCTTV), defreg(FCRTV),
+ defreg(FLA), defreg(FLOP),
+ defreg(MAVTV0), defreg(MAVTV1), defreg(MAVTV2), defreg(MAVTV3),
+ defreg(TXSTMPL), defreg(TXSTMPH), defreg(SYSTIML), defreg(SYSTIMH),
+ defreg(TIMADJL), defreg(TIMADJH),
+ defreg(RXSTMPH), defreg(RXSTMPL), defreg(RXSATRL), defreg(RXSATRH),
+ defreg(TIPG),
+ defreg(CTRL_DUP),
+ defreg(EEMNGCTL),
+ defreg(EEMNGDATA),
+ defreg(FLMNGCTL),
+ defreg(FLMNGDATA),
+ defreg(FLMNGCNT),
+ defreg(TSYNCRXCTL),
+ defreg(TSYNCTXCTL),
+ defreg(RLPML),
+ defreg(UTA),
+
+ /* Aliases */
+ defreg(RDFH_A), defreg(RDFT_A), defreg(TDFH_A), defreg(TDFT_A),
+ defreg(RA_A), defreg(VFTA_A), defreg(FCRTL_A),
+
+ /* Additional regs used by IGB */
+ defreg(FWSM), defreg(SW_FW_SYNC),
+
+ defreg(EICS), defreg(EIMS), defreg(EIMC), defreg(EIAM),
+ defreg(EICR), defreg(IVAR_MISC), defreg(GPIE),
+
+ defreg(RXPBS), defregd(RDBAL), defregd(RDBAH), defregd(RDLEN),
+ defregd(SRRCTL), defregd(RDH), defregd(RDT),
+ defregd(RXDCTL), defregd(RXCTL), defregd(RQDPC), defreg(RA2),
+
+ defreg(TXPBS), defreg(TCTL_EXT), defreg(DTXCTL), defreg(HTCBDPC),
+ defregd(TDBAL), defregd(TDBAH), defregd(TDLEN), defregd(TDH),
+ defregd(TDT), defregd(TXDCTL), defregd(TXCTL),
+ defregd(TDWBAL), defregd(TDWBAH),
+
+ defreg(VT_CTL),
+
+ defregv(P2VMAILBOX), defregv(V2PMAILBOX), defreg(MBVFICR), defreg(MBVFIMR),
+ defreg(VFLRE), defreg(VFRE), defreg(VFTE), defreg(WVBR),
+ defreg(QDE), defreg(DTXSWC), defreg_indexed(VLVF, 0),
+ defregv(VMOLR), defreg(RPLOLR), defregv(VMBMEM), defregv(VMVIR),
+
+ defregv(PVTCTRL), defregv(PVTEICS), defregv(PVTEIMS), defregv(PVTEIMC),
+ defregv(PVTEIAC), defregv(PVTEIAM), defregv(PVTEICR), defregv(PVFGPRC),
+ defregv(PVFGPTC), defregv(PVFGORC), defregv(PVFGOTC), defregv(PVFMPRC),
+ defregv(PVFGPRLBC), defregv(PVFGPTLBC), defregv(PVFGORLBC), defregv(PVFGOTLBC),
+
+ defreg(MTA_A),
+
+ defreg(VTIVAR), defreg(VTIVAR_MISC),
+};
+
+uint64_t igb_mmio_read(void *opaque, hwaddr addr, unsigned size);
+void igb_mmio_write(void *opaque, hwaddr addr, uint64_t val, unsigned size);
+
+#endif
diff --git a/hw/net/igb_core.c b/hw/net/igb_core.c
new file mode 100644
index 0000000..a7c7bfd
--- /dev/null
+++ b/hw/net/igb_core.c
@@ -0,0 +1,4077 @@
+/*
+ * Core code for QEMU igb emulation
+ *
+ * Datasheet:
+ * https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/82576eg-gbe-datasheet.pdf
+ *
+ * Copyright (c) 2020-2023 Red Hat, Inc.
+ * Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com)
+ * Developed by Daynix Computing LTD (http://www.daynix.com)
+ *
+ * Authors:
+ * Akihiko Odaki <akihiko.odaki@daynix.com>
+ * Gal Hammmer <gal.hammer@sap.com>
+ * Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
+ * Dmitry Fleytman <dmitry@daynix.com>
+ * Leonid Bloch <leonid@daynix.com>
+ * Yan Vugenfirer <yan@daynix.com>
+ *
+ * Based on work done by:
+ * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
+ * Copyright (c) 2008 Qumranet
+ * Based on work done by:
+ * Copyright (c) 2007 Dan Aloni
+ * Copyright (c) 2004 Antony T Curtis
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "net/net.h"
+#include "net/tap.h"
+#include "hw/net/mii.h"
+#include "hw/pci/msi.h"
+#include "hw/pci/msix.h"
+#include "sysemu/runstate.h"
+
+#include "net_tx_pkt.h"
+#include "net_rx_pkt.h"
+
+#include "igb_common.h"
+#include "e1000x_common.h"
+#include "igb_core.h"
+
+#include "trace.h"
+
+#define E1000E_MAX_TX_FRAGS (64)
+
+union e1000_rx_desc_union {
+ struct e1000_rx_desc legacy;
+ union e1000_adv_rx_desc adv;
+};
+
+typedef struct IGBTxPktVmdqCallbackContext {
+ IGBCore *core;
+ NetClientState *nc;
+} IGBTxPktVmdqCallbackContext;
+
+static ssize_t
+igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
+ bool has_vnet, bool *external_tx);
+
+static inline void
+igb_set_interrupt_cause(IGBCore *core, uint32_t val);
+
+static void igb_update_interrupt_state(IGBCore *core);
+static void igb_reset(IGBCore *core, bool sw);
+
+static inline void
+igb_raise_legacy_irq(IGBCore *core)
+{
+ trace_e1000e_irq_legacy_notify(true);
+ e1000x_inc_reg_if_not_full(core->mac, IAC);
+ pci_set_irq(core->owner, 1);
+}
+
+static inline void
+igb_lower_legacy_irq(IGBCore *core)
+{
+ trace_e1000e_irq_legacy_notify(false);
+ pci_set_irq(core->owner, 0);
+}
+
+static void igb_msix_notify(IGBCore *core, unsigned int vector)
+{
+ PCIDevice *dev = core->owner;
+ uint16_t vfn;
+
+ vfn = 8 - (vector + 2) / IGBVF_MSIX_VEC_NUM;
+ if (vfn < pcie_sriov_num_vfs(core->owner)) {
+ dev = pcie_sriov_get_vf_at_index(core->owner, vfn);
+ assert(dev);
+ vector = (vector + 2) % IGBVF_MSIX_VEC_NUM;
+ } else if (vector >= IGB_MSIX_VEC_NUM) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "igb: Tried to use vector unavailable for PF");
+ return;
+ }
+
+ msix_notify(dev, vector);
+}
+
+static inline void
+igb_intrmgr_rearm_timer(IGBIntrDelayTimer *timer)
+{
+ int64_t delay_ns = (int64_t) timer->core->mac[timer->delay_reg] *
+ timer->delay_resolution_ns;
+
+ trace_e1000e_irq_rearm_timer(timer->delay_reg << 2, delay_ns);
+
+ timer_mod(timer->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + delay_ns);
+
+ timer->running = true;
+}
+
+static void
+igb_intmgr_timer_resume(IGBIntrDelayTimer *timer)
+{
+ if (timer->running) {
+ igb_intrmgr_rearm_timer(timer);
+ }
+}
+
+static void
+igb_intmgr_timer_pause(IGBIntrDelayTimer *timer)
+{
+ if (timer->running) {
+ timer_del(timer->timer);
+ }
+}
+
+static void
+igb_intrmgr_on_msix_throttling_timer(void *opaque)
+{
+ IGBIntrDelayTimer *timer = opaque;
+ int idx = timer - &timer->core->eitr[0];
+
+ timer->running = false;
+
+ trace_e1000e_irq_msix_notify_postponed_vec(idx);
+ igb_msix_notify(timer->core, idx);
+}
+
+static void
+igb_intrmgr_initialize_all_timers(IGBCore *core, bool create)
+{
+ int i;
+
+ for (i = 0; i < IGB_INTR_NUM; i++) {
+ core->eitr[i].core = core;
+ core->eitr[i].delay_reg = EITR0 + i;
+ core->eitr[i].delay_resolution_ns = E1000_INTR_DELAY_NS_RES;
+ }
+
+ if (!create) {
+ return;
+ }
+
+ for (i = 0; i < IGB_INTR_NUM; i++) {
+ core->eitr[i].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+ igb_intrmgr_on_msix_throttling_timer,
+ &core->eitr[i]);
+ }
+}
+
+static void
+igb_intrmgr_resume(IGBCore *core)
+{
+ int i;
+
+ for (i = 0; i < IGB_INTR_NUM; i++) {
+ igb_intmgr_timer_resume(&core->eitr[i]);
+ }
+}
+
+static void
+igb_intrmgr_pause(IGBCore *core)
+{
+ int i;
+
+ for (i = 0; i < IGB_INTR_NUM; i++) {
+ igb_intmgr_timer_pause(&core->eitr[i]);
+ }
+}
+
+static void
+igb_intrmgr_reset(IGBCore *core)
+{
+ int i;
+
+ for (i = 0; i < IGB_INTR_NUM; i++) {
+ if (core->eitr[i].running) {
+ timer_del(core->eitr[i].timer);
+ igb_intrmgr_on_msix_throttling_timer(&core->eitr[i]);
+ }
+ }
+}
+
+static void
+igb_intrmgr_pci_unint(IGBCore *core)
+{
+ int i;
+
+ for (i = 0; i < IGB_INTR_NUM; i++) {
+ timer_free(core->eitr[i].timer);
+ }
+}
+
+static void
+igb_intrmgr_pci_realize(IGBCore *core)
+{
+ igb_intrmgr_initialize_all_timers(core, true);
+}
+
+static inline bool
+igb_rx_csum_enabled(IGBCore *core)
+{
+ return (core->mac[RXCSUM] & E1000_RXCSUM_PCSD) ? false : true;
+}
+
+static inline bool
+igb_rx_use_legacy_descriptor(IGBCore *core)
+{
+ /*
+ * TODO: If SRRCTL[n],DESCTYPE = 000b, the 82576 uses the legacy Rx
+ * descriptor.
+ */
+ return false;
+}
+
+static inline bool
+igb_rss_enabled(IGBCore *core)
+{
+ return (core->mac[MRQC] & 3) == E1000_MRQC_ENABLE_RSS_MQ &&
+ !igb_rx_csum_enabled(core) &&
+ !igb_rx_use_legacy_descriptor(core);
+}
+
+typedef struct E1000E_RSSInfo_st {
+ bool enabled;
+ uint32_t hash;
+ uint32_t queue;
+ uint32_t type;
+} E1000E_RSSInfo;
+
+static uint32_t
+igb_rss_get_hash_type(IGBCore *core, struct NetRxPkt *pkt)
+{
+ bool hasip4, hasip6;
+ EthL4HdrProto l4hdr_proto;
+
+ assert(igb_rss_enabled(core));
+
+ net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
+
+ if (hasip4) {
+ trace_e1000e_rx_rss_ip4(l4hdr_proto, core->mac[MRQC],
+ E1000_MRQC_EN_TCPIPV4(core->mac[MRQC]),
+ E1000_MRQC_EN_IPV4(core->mac[MRQC]));
+
+ if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP &&
+ E1000_MRQC_EN_TCPIPV4(core->mac[MRQC])) {
+ return E1000_MRQ_RSS_TYPE_IPV4TCP;
+ }
+
+ if (E1000_MRQC_EN_IPV4(core->mac[MRQC])) {
+ return E1000_MRQ_RSS_TYPE_IPV4;
+ }
+ } else if (hasip6) {
+ eth_ip6_hdr_info *ip6info = net_rx_pkt_get_ip6_info(pkt);
+
+ bool ex_dis = core->mac[RFCTL] & E1000_RFCTL_IPV6_EX_DIS;
+ bool new_ex_dis = core->mac[RFCTL] & E1000_RFCTL_NEW_IPV6_EXT_DIS;
+
+ /*
+ * Following two traces must not be combined because resulting
+ * event will have 11 arguments totally and some trace backends
+ * (at least "ust") have limitation of maximum 10 arguments per
+ * event. Events with more arguments fail to compile for
+ * backends like these.
+ */
+ trace_e1000e_rx_rss_ip6_rfctl(core->mac[RFCTL]);
+ trace_e1000e_rx_rss_ip6(ex_dis, new_ex_dis, l4hdr_proto,
+ ip6info->has_ext_hdrs,
+ ip6info->rss_ex_dst_valid,
+ ip6info->rss_ex_src_valid,
+ core->mac[MRQC],
+ E1000_MRQC_EN_TCPIPV6(core->mac[MRQC]),
+ E1000_MRQC_EN_IPV6EX(core->mac[MRQC]),
+ E1000_MRQC_EN_IPV6(core->mac[MRQC]));
+
+ if ((!ex_dis || !ip6info->has_ext_hdrs) &&
+ (!new_ex_dis || !(ip6info->rss_ex_dst_valid ||
+ ip6info->rss_ex_src_valid))) {
+
+ if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP &&
+ E1000_MRQC_EN_TCPIPV6(core->mac[MRQC])) {
+ return E1000_MRQ_RSS_TYPE_IPV6TCP;
+ }
+
+ if (E1000_MRQC_EN_IPV6EX(core->mac[MRQC])) {
+ return E1000_MRQ_RSS_TYPE_IPV6EX;
+ }
+
+ }
+
+ if (E1000_MRQC_EN_IPV6(core->mac[MRQC])) {
+ return E1000_MRQ_RSS_TYPE_IPV6;
+ }
+
+ }
+
+ return E1000_MRQ_RSS_TYPE_NONE;
+}
+
+static uint32_t
+igb_rss_calc_hash(IGBCore *core, struct NetRxPkt *pkt, E1000E_RSSInfo *info)
+{
+ NetRxPktRssType type;
+
+ assert(igb_rss_enabled(core));
+
+ switch (info->type) {
+ case E1000_MRQ_RSS_TYPE_IPV4:
+ type = NetPktRssIpV4;
+ break;
+ case E1000_MRQ_RSS_TYPE_IPV4TCP:
+ type = NetPktRssIpV4Tcp;
+ break;
+ case E1000_MRQ_RSS_TYPE_IPV6TCP:
+ type = NetPktRssIpV6TcpEx;
+ break;
+ case E1000_MRQ_RSS_TYPE_IPV6:
+ type = NetPktRssIpV6;
+ break;
+ case E1000_MRQ_RSS_TYPE_IPV6EX:
+ type = NetPktRssIpV6Ex;
+ break;
+ default:
+ assert(false);
+ return 0;
+ }
+
+ return net_rx_pkt_calc_rss_hash(pkt, type, (uint8_t *) &core->mac[RSSRK]);
+}
+
+static void
+igb_rss_parse_packet(IGBCore *core, struct NetRxPkt *pkt, bool tx,
+ E1000E_RSSInfo *info)
+{
+ trace_e1000e_rx_rss_started();
+
+ if (tx || !igb_rss_enabled(core)) {
+ info->enabled = false;
+ info->hash = 0;
+ info->queue = 0;
+ info->type = 0;
+ trace_e1000e_rx_rss_disabled();
+ return;
+ }
+
+ info->enabled = true;
+
+ info->type = igb_rss_get_hash_type(core, pkt);
+
+ trace_e1000e_rx_rss_type(info->type);
+
+ if (info->type == E1000_MRQ_RSS_TYPE_NONE) {
+ info->hash = 0;
+ info->queue = 0;
+ return;
+ }
+
+ info->hash = igb_rss_calc_hash(core, pkt, info);
+ info->queue = E1000_RSS_QUEUE(&core->mac[RETA], info->hash);
+}
+
+static bool
+igb_setup_tx_offloads(IGBCore *core, struct igb_tx *tx)
+{
+ if (tx->tse) {
+ if (!net_tx_pkt_build_vheader(tx->tx_pkt, true, true, tx->mss)) {
+ return false;
+ }
+
+ net_tx_pkt_update_ip_checksums(tx->tx_pkt);
+ e1000x_inc_reg_if_not_full(core->mac, TSCTC);
+ return true;
+ }
+
+ if (tx->txsm) {
+ if (!net_tx_pkt_build_vheader(tx->tx_pkt, false, true, 0)) {
+ return false;
+ }
+ }
+
+ if (tx->ixsm) {
+ net_tx_pkt_update_ip_hdr_checksum(tx->tx_pkt);
+ }
+
+ return true;
+}
+
+static void igb_tx_pkt_mac_callback(void *core,
+ const struct iovec *iov,
+ int iovcnt,
+ const struct iovec *virt_iov,
+ int virt_iovcnt)
+{
+ igb_receive_internal(core, virt_iov, virt_iovcnt, true, NULL);
+}
+
+static void igb_tx_pkt_vmdq_callback(void *opaque,
+ const struct iovec *iov,
+ int iovcnt,
+ const struct iovec *virt_iov,
+ int virt_iovcnt)
+{
+ IGBTxPktVmdqCallbackContext *context = opaque;
+ bool external_tx;
+
+ igb_receive_internal(context->core, virt_iov, virt_iovcnt, true,
+ &external_tx);
+
+ if (external_tx) {
+ if (context->core->has_vnet) {
+ qemu_sendv_packet(context->nc, virt_iov, virt_iovcnt);
+ } else {
+ qemu_sendv_packet(context->nc, iov, iovcnt);
+ }
+ }
+}
+
+/* TX Packets Switching (7.10.3.6) */
+static bool igb_tx_pkt_switch(IGBCore *core, struct igb_tx *tx,
+ NetClientState *nc)
+{
+ IGBTxPktVmdqCallbackContext context;
+
+ /* TX switching is only used to serve VM to VM traffic. */
+ if (!(core->mac[MRQC] & 1)) {
+ goto send_out;
+ }
+
+ /* TX switching requires DTXSWC.Loopback_en bit enabled. */
+ if (!(core->mac[DTXSWC] & E1000_DTXSWC_VMDQ_LOOPBACK_EN)) {
+ goto send_out;
+ }
+
+ context.core = core;
+ context.nc = nc;
+
+ return net_tx_pkt_send_custom(tx->tx_pkt, false,
+ igb_tx_pkt_vmdq_callback, &context);
+
+send_out:
+ return net_tx_pkt_send(tx->tx_pkt, nc);
+}
+
+static bool
+igb_tx_pkt_send(IGBCore *core, struct igb_tx *tx, int queue_index)
+{
+ int target_queue = MIN(core->max_queue_num, queue_index);
+ NetClientState *queue = qemu_get_subqueue(core->owner_nic, target_queue);
+
+ if (!igb_setup_tx_offloads(core, tx)) {
+ return false;
+ }
+
+ net_tx_pkt_dump(tx->tx_pkt);
+
+ if ((core->phy[MII_BMCR] & MII_BMCR_LOOPBACK) ||
+ ((core->mac[RCTL] & E1000_RCTL_LBM_MAC) == E1000_RCTL_LBM_MAC)) {
+ return net_tx_pkt_send_custom(tx->tx_pkt, false,
+ igb_tx_pkt_mac_callback, core);
+ } else {
+ return igb_tx_pkt_switch(core, tx, queue);
+ }
+}
+
+static void
+igb_on_tx_done_update_stats(IGBCore *core, struct NetTxPkt *tx_pkt)
+{
+ static const int PTCregs[6] = { PTC64, PTC127, PTC255, PTC511,
+ PTC1023, PTC1522 };
+
+ size_t tot_len = net_tx_pkt_get_total_len(tx_pkt) + 4;
+
+ e1000x_increase_size_stats(core->mac, PTCregs, tot_len);
+ e1000x_inc_reg_if_not_full(core->mac, TPT);
+ e1000x_grow_8reg_if_not_full(core->mac, TOTL, tot_len);
+
+ switch (net_tx_pkt_get_packet_type(tx_pkt)) {
+ case ETH_PKT_BCAST:
+ e1000x_inc_reg_if_not_full(core->mac, BPTC);
+ break;
+ case ETH_PKT_MCAST:
+ e1000x_inc_reg_if_not_full(core->mac, MPTC);
+ break;
+ case ETH_PKT_UCAST:
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ core->mac[GPTC] = core->mac[TPT];
+ core->mac[GOTCL] = core->mac[TOTL];
+ core->mac[GOTCH] = core->mac[TOTH];
+}
+
+static void
+igb_process_tx_desc(IGBCore *core,
+ struct igb_tx *tx,
+ union e1000_adv_tx_desc *tx_desc,
+ int queue_index)
+{
+ struct e1000_adv_tx_context_desc *tx_ctx_desc;
+ uint32_t cmd_type_len;
+ uint32_t olinfo_status;
+ uint64_t buffer_addr;
+ uint16_t length;
+
+ cmd_type_len = le32_to_cpu(tx_desc->read.cmd_type_len);
+
+ if (cmd_type_len & E1000_ADVTXD_DCMD_DEXT) {
+ if ((cmd_type_len & E1000_ADVTXD_DTYP_DATA) ==
+ E1000_ADVTXD_DTYP_DATA) {
+ /* advanced transmit data descriptor */
+ if (tx->first) {
+ olinfo_status = le32_to_cpu(tx_desc->read.olinfo_status);
+
+ tx->tse = !!(cmd_type_len & E1000_ADVTXD_DCMD_TSE);
+ tx->ixsm = !!(olinfo_status & E1000_ADVTXD_POTS_IXSM);
+ tx->txsm = !!(olinfo_status & E1000_ADVTXD_POTS_TXSM);
+
+ tx->first = false;
+ }
+ } else if ((cmd_type_len & E1000_ADVTXD_DTYP_CTXT) ==
+ E1000_ADVTXD_DTYP_CTXT) {
+ /* advanced transmit context descriptor */
+ tx_ctx_desc = (struct e1000_adv_tx_context_desc *)tx_desc;
+ tx->vlan = le32_to_cpu(tx_ctx_desc->vlan_macip_lens) >> 16;
+ tx->mss = le32_to_cpu(tx_ctx_desc->mss_l4len_idx) >> 16;
+ return;
+ } else {
+ /* unknown descriptor type */
+ return;
+ }
+ } else {
+ /* legacy descriptor */
+
+ /* TODO: Implement a support for legacy descriptors (7.2.2.1). */
+ }
+
+ buffer_addr = le64_to_cpu(tx_desc->read.buffer_addr);
+ length = cmd_type_len & 0xFFFF;
+
+ if (!tx->skip_cp) {
+ if (!net_tx_pkt_add_raw_fragment(tx->tx_pkt, buffer_addr, length)) {
+ tx->skip_cp = true;
+ }
+ }
+
+ if (cmd_type_len & E1000_TXD_CMD_EOP) {
+ if (!tx->skip_cp && net_tx_pkt_parse(tx->tx_pkt)) {
+ if (cmd_type_len & E1000_TXD_CMD_VLE) {
+ net_tx_pkt_setup_vlan_header_ex(tx->tx_pkt, tx->vlan,
+ core->mac[VET] & 0xffff);
+ }
+ if (igb_tx_pkt_send(core, tx, queue_index)) {
+ igb_on_tx_done_update_stats(core, tx->tx_pkt);
+ }
+ }
+
+ tx->first = true;
+ tx->skip_cp = false;
+ net_tx_pkt_reset(tx->tx_pkt);
+ }
+}
+
+static uint32_t igb_tx_wb_eic(IGBCore *core, int queue_idx)
+{
+ uint32_t n, ent = 0;
+
+ n = igb_ivar_entry_tx(queue_idx);
+ ent = (core->mac[IVAR0 + n / 4] >> (8 * (n % 4))) & 0xff;
+
+ return (ent & E1000_IVAR_VALID) ? BIT(ent & 0x1f) : 0;
+}
+
+static uint32_t igb_rx_wb_eic(IGBCore *core, int queue_idx)
+{
+ uint32_t n, ent = 0;
+
+ n = igb_ivar_entry_rx(queue_idx);
+ ent = (core->mac[IVAR0 + n / 4] >> (8 * (n % 4))) & 0xff;
+
+ return (ent & E1000_IVAR_VALID) ? BIT(ent & 0x1f) : 0;
+}
+
+typedef struct E1000E_RingInfo_st {
+ int dbah;
+ int dbal;
+ int dlen;
+ int dh;
+ int dt;
+ int idx;
+} E1000E_RingInfo;
+
+static inline bool
+igb_ring_empty(IGBCore *core, const E1000E_RingInfo *r)
+{
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
+}
+
+static inline uint64_t
+igb_ring_base(IGBCore *core, const E1000E_RingInfo *r)
+{
+ uint64_t bah = core->mac[r->dbah];
+ uint64_t bal = core->mac[r->dbal];
+
+ return (bah << 32) + bal;
+}
+
+static inline uint64_t
+igb_ring_head_descr(IGBCore *core, const E1000E_RingInfo *r)
+{
+ return igb_ring_base(core, r) + E1000_RING_DESC_LEN * core->mac[r->dh];
+}
+
+static inline void
+igb_ring_advance(IGBCore *core, const E1000E_RingInfo *r, uint32_t count)
+{
+ core->mac[r->dh] += count;
+
+ if (core->mac[r->dh] * E1000_RING_DESC_LEN >= core->mac[r->dlen]) {
+ core->mac[r->dh] = 0;
+ }
+}
+
+static inline uint32_t
+igb_ring_free_descr_num(IGBCore *core, const E1000E_RingInfo *r)
+{
+ trace_e1000e_ring_free_space(r->idx, core->mac[r->dlen],
+ core->mac[r->dh], core->mac[r->dt]);
+
+ if (core->mac[r->dh] <= core->mac[r->dt]) {
+ return core->mac[r->dt] - core->mac[r->dh];
+ }
+
+ if (core->mac[r->dh] > core->mac[r->dt]) {
+ return core->mac[r->dlen] / E1000_RING_DESC_LEN +
+ core->mac[r->dt] - core->mac[r->dh];
+ }
+
+ g_assert_not_reached();
+ return 0;
+}
+
+static inline bool
+igb_ring_enabled(IGBCore *core, const E1000E_RingInfo *r)
+{
+ return core->mac[r->dlen] > 0;
+}
+
+typedef struct IGB_TxRing_st {
+ const E1000E_RingInfo *i;
+ struct igb_tx *tx;
+} IGB_TxRing;
+
+static inline int
+igb_mq_queue_idx(int base_reg_idx, int reg_idx)
+{
+ return (reg_idx - base_reg_idx) / 16;
+}
+
+static inline void
+igb_tx_ring_init(IGBCore *core, IGB_TxRing *txr, int idx)
+{
+ static const E1000E_RingInfo i[IGB_NUM_QUEUES] = {
+ { TDBAH0, TDBAL0, TDLEN0, TDH0, TDT0, 0 },
+ { TDBAH1, TDBAL1, TDLEN1, TDH1, TDT1, 1 },
+ { TDBAH2, TDBAL2, TDLEN2, TDH2, TDT2, 2 },
+ { TDBAH3, TDBAL3, TDLEN3, TDH3, TDT3, 3 },
+ { TDBAH4, TDBAL4, TDLEN4, TDH4, TDT4, 4 },
+ { TDBAH5, TDBAL5, TDLEN5, TDH5, TDT5, 5 },
+ { TDBAH6, TDBAL6, TDLEN6, TDH6, TDT6, 6 },
+ { TDBAH7, TDBAL7, TDLEN7, TDH7, TDT7, 7 },
+ { TDBAH8, TDBAL8, TDLEN8, TDH8, TDT8, 8 },
+ { TDBAH9, TDBAL9, TDLEN9, TDH9, TDT9, 9 },
+ { TDBAH10, TDBAL10, TDLEN10, TDH10, TDT10, 10 },
+ { TDBAH11, TDBAL11, TDLEN11, TDH11, TDT11, 11 },
+ { TDBAH12, TDBAL12, TDLEN12, TDH12, TDT12, 12 },
+ { TDBAH13, TDBAL13, TDLEN13, TDH13, TDT13, 13 },
+ { TDBAH14, TDBAL14, TDLEN14, TDH14, TDT14, 14 },
+ { TDBAH15, TDBAL15, TDLEN15, TDH15, TDT15, 15 }
+ };
+
+ assert(idx < ARRAY_SIZE(i));
+
+ txr->i = &i[idx];
+ txr->tx = &core->tx[idx];
+}
+
+typedef struct E1000E_RxRing_st {
+ const E1000E_RingInfo *i;
+} E1000E_RxRing;
+
+static inline void
+igb_rx_ring_init(IGBCore *core, E1000E_RxRing *rxr, int idx)
+{
+ static const E1000E_RingInfo i[IGB_NUM_QUEUES] = {
+ { RDBAH0, RDBAL0, RDLEN0, RDH0, RDT0, 0 },
+ { RDBAH1, RDBAL1, RDLEN1, RDH1, RDT1, 1 },
+ { RDBAH2, RDBAL2, RDLEN2, RDH2, RDT2, 2 },
+ { RDBAH3, RDBAL3, RDLEN3, RDH3, RDT3, 3 },
+ { RDBAH4, RDBAL4, RDLEN4, RDH4, RDT4, 4 },
+ { RDBAH5, RDBAL5, RDLEN5, RDH5, RDT5, 5 },
+ { RDBAH6, RDBAL6, RDLEN6, RDH6, RDT6, 6 },
+ { RDBAH7, RDBAL7, RDLEN7, RDH7, RDT7, 7 },
+ { RDBAH8, RDBAL8, RDLEN8, RDH8, RDT8, 8 },
+ { RDBAH9, RDBAL9, RDLEN9, RDH9, RDT9, 9 },
+ { RDBAH10, RDBAL10, RDLEN10, RDH10, RDT10, 10 },
+ { RDBAH11, RDBAL11, RDLEN11, RDH11, RDT11, 11 },
+ { RDBAH12, RDBAL12, RDLEN12, RDH12, RDT12, 12 },
+ { RDBAH13, RDBAL13, RDLEN13, RDH13, RDT13, 13 },
+ { RDBAH14, RDBAL14, RDLEN14, RDH14, RDT14, 14 },
+ { RDBAH15, RDBAL15, RDLEN15, RDH15, RDT15, 15 }
+ };
+
+ assert(idx < ARRAY_SIZE(i));
+
+ rxr->i = &i[idx];
+}
+
+static uint32_t
+igb_txdesc_writeback(IGBCore *core, dma_addr_t base,
+ union e1000_adv_tx_desc *tx_desc,
+ const E1000E_RingInfo *txi)
+{
+ PCIDevice *d;
+ uint32_t cmd_type_len = le32_to_cpu(tx_desc->read.cmd_type_len);
+ uint64_t tdwba;
+
+ tdwba = core->mac[E1000_TDWBAL(txi->idx) >> 2];
+ tdwba |= (uint64_t)core->mac[E1000_TDWBAH(txi->idx) >> 2] << 32;
+
+ if (!(cmd_type_len & E1000_TXD_CMD_RS)) {
+ return 0;
+ }
+
+ d = pcie_sriov_get_vf_at_index(core->owner, txi->idx % 8);
+ if (!d) {
+ d = core->owner;
+ }
+
+ if (tdwba & 1) {
+ uint32_t buffer = cpu_to_le32(core->mac[txi->dh]);
+ pci_dma_write(d, tdwba & ~3, &buffer, sizeof(buffer));
+ } else {
+ uint32_t status = le32_to_cpu(tx_desc->wb.status) | E1000_TXD_STAT_DD;
+
+ tx_desc->wb.status = cpu_to_le32(status);
+ pci_dma_write(d, base + offsetof(union e1000_adv_tx_desc, wb),
+ &tx_desc->wb, sizeof(tx_desc->wb));
+ }
+
+ return igb_tx_wb_eic(core, txi->idx);
+}
+
+static void
+igb_start_xmit(IGBCore *core, const IGB_TxRing *txr)
+{
+ PCIDevice *d;
+ dma_addr_t base;
+ union e1000_adv_tx_desc desc;
+ const E1000E_RingInfo *txi = txr->i;
+ uint32_t eic = 0;
+
+ /* TODO: check if the queue itself is enabled too. */
+ if (!(core->mac[TCTL] & E1000_TCTL_EN)) {
+ trace_e1000e_tx_disabled();
+ return;
+ }
+
+ d = pcie_sriov_get_vf_at_index(core->owner, txi->idx % 8);
+ if (!d) {
+ d = core->owner;
+ }
+
+ while (!igb_ring_empty(core, txi)) {
+ base = igb_ring_head_descr(core, txi);
+
+ pci_dma_read(d, base, &desc, sizeof(desc));
+
+ trace_e1000e_tx_descr((void *)(intptr_t)desc.read.buffer_addr,
+ desc.read.cmd_type_len, desc.wb.status);
+
+ igb_process_tx_desc(core, txr->tx, &desc, txi->idx);
+ igb_ring_advance(core, txi, 1);
+ eic |= igb_txdesc_writeback(core, base, &desc, txi);
+ }
+
+ if (eic) {
+ core->mac[EICR] |= eic;
+ igb_set_interrupt_cause(core, E1000_ICR_TXDW);
+ }
+}
+
+static uint32_t
+igb_rxbufsize(IGBCore *core, const E1000E_RingInfo *r)
+{
+ uint32_t srrctl = core->mac[E1000_SRRCTL(r->idx) >> 2];
+ uint32_t bsizepkt = srrctl & E1000_SRRCTL_BSIZEPKT_MASK;
+ if (bsizepkt) {
+ return bsizepkt << E1000_SRRCTL_BSIZEPKT_SHIFT;
+ }
+
+ return e1000x_rxbufsize(core->mac[RCTL]);
+}
+
+static bool
+igb_has_rxbufs(IGBCore *core, const E1000E_RingInfo *r, size_t total_size)
+{
+ uint32_t bufs = igb_ring_free_descr_num(core, r);
+ uint32_t bufsize = igb_rxbufsize(core, r);
+
+ trace_e1000e_rx_has_buffers(r->idx, bufs, total_size, bufsize);
+
+ return total_size <= bufs / (core->rx_desc_len / E1000_MIN_RX_DESC_LEN) *
+ bufsize;
+}
+
+void
+igb_start_recv(IGBCore *core)
+{
+ int i;
+
+ trace_e1000e_rx_start_recv();
+
+ for (i = 0; i <= core->max_queue_num; i++) {
+ qemu_flush_queued_packets(qemu_get_subqueue(core->owner_nic, i));
+ }
+}
+
+bool
+igb_can_receive(IGBCore *core)
+{
+ int i;
+
+ if (!e1000x_rx_ready(core->owner, core->mac)) {
+ return false;
+ }
+
+ for (i = 0; i < IGB_NUM_QUEUES; i++) {
+ E1000E_RxRing rxr;
+
+ igb_rx_ring_init(core, &rxr, i);
+ if (igb_ring_enabled(core, rxr.i) && igb_has_rxbufs(core, rxr.i, 1)) {
+ trace_e1000e_rx_can_recv();
+ return true;
+ }
+ }
+
+ trace_e1000e_rx_can_recv_rings_full();
+ return false;
+}
+
+ssize_t
+igb_receive(IGBCore *core, const uint8_t *buf, size_t size)
+{
+ const struct iovec iov = {
+ .iov_base = (uint8_t *)buf,
+ .iov_len = size
+ };
+
+ return igb_receive_iov(core, &iov, 1);
+}
+
+static inline bool
+igb_rx_l3_cso_enabled(IGBCore *core)
+{
+ return !!(core->mac[RXCSUM] & E1000_RXCSUM_IPOFLD);
+}
+
+static inline bool
+igb_rx_l4_cso_enabled(IGBCore *core)
+{
+ return !!(core->mac[RXCSUM] & E1000_RXCSUM_TUOFLD);
+}
+
+static uint16_t igb_receive_assign(IGBCore *core, const struct eth_header *ehdr,
+ E1000E_RSSInfo *rss_info, bool *external_tx)
+{
+ static const int ta_shift[] = { 4, 3, 2, 0 };
+ uint32_t f, ra[2], *macp, rctl = core->mac[RCTL];
+ uint16_t queues = 0;
+ uint16_t vid = lduw_be_p(&PKT_GET_VLAN_HDR(ehdr)->h_tci) & VLAN_VID_MASK;
+ bool accepted = false;
+ int i;
+
+ memset(rss_info, 0, sizeof(E1000E_RSSInfo));
+
+ if (external_tx) {
+ *external_tx = true;
+ }
+
+ if (e1000x_is_vlan_packet(ehdr, core->mac[VET] & 0xffff) &&
+ e1000x_vlan_rx_filter_enabled(core->mac)) {
+ uint32_t vfta =
+ ldl_le_p((uint32_t *)(core->mac + VFTA) +
+ ((vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK));
+ if ((vfta & (1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK))) == 0) {
+ trace_e1000e_rx_flt_vlan_mismatch(vid);
+ return queues;
+ } else {
+ trace_e1000e_rx_flt_vlan_match(vid);
+ }
+ }
+
+ if (core->mac[MRQC] & 1) {
+ if (is_broadcast_ether_addr(ehdr->h_dest)) {
+ for (i = 0; i < 8; i++) {
+ if (core->mac[VMOLR0 + i] & E1000_VMOLR_BAM) {
+ queues |= BIT(i);
+ }
+ }
+ } else {
+ for (macp = core->mac + RA; macp < core->mac + RA + 32; macp += 2) {
+ if (!(macp[1] & E1000_RAH_AV)) {
+ continue;
+ }
+ ra[0] = cpu_to_le32(macp[0]);
+ ra[1] = cpu_to_le32(macp[1]);
+ if (!memcmp(ehdr->h_dest, (uint8_t *)ra, ETH_ALEN)) {
+ queues |= (macp[1] & E1000_RAH_POOL_MASK) / E1000_RAH_POOL_1;
+ }
+ }
+
+ for (macp = core->mac + RA2; macp < core->mac + RA2 + 16; macp += 2) {
+ if (!(macp[1] & E1000_RAH_AV)) {
+ continue;
+ }
+ ra[0] = cpu_to_le32(macp[0]);
+ ra[1] = cpu_to_le32(macp[1]);
+ if (!memcmp(ehdr->h_dest, (uint8_t *)ra, ETH_ALEN)) {
+ queues |= (macp[1] & E1000_RAH_POOL_MASK) / E1000_RAH_POOL_1;
+ }
+ }
+
+ if (!queues) {
+ macp = core->mac + (is_multicast_ether_addr(ehdr->h_dest) ? MTA : UTA);
+
+ f = ta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3];
+ f = (((ehdr->h_dest[5] << 8) | ehdr->h_dest[4]) >> f) & 0xfff;
+ if (macp[f >> 5] & (1 << (f & 0x1f))) {
+ for (i = 0; i < 8; i++) {
+ if (core->mac[VMOLR0 + i] & E1000_VMOLR_ROMPE) {
+ queues |= BIT(i);
+ }
+ }
+ }
+ } else if (is_unicast_ether_addr(ehdr->h_dest) && external_tx) {
+ *external_tx = false;
+ }
+ }
+
+ if (e1000x_vlan_rx_filter_enabled(core->mac)) {
+ uint16_t mask = 0;
+
+ if (e1000x_is_vlan_packet(ehdr, core->mac[VET] & 0xffff)) {
+ for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
+ if ((core->mac[VLVF0 + i] & E1000_VLVF_VLANID_MASK) == vid &&
+ (core->mac[VLVF0 + i] & E1000_VLVF_VLANID_ENABLE)) {
+ uint32_t poolsel = core->mac[VLVF0 + i] & E1000_VLVF_POOLSEL_MASK;
+ mask |= poolsel >> E1000_VLVF_POOLSEL_SHIFT;
+ }
+ }
+ } else {
+ for (i = 0; i < 8; i++) {
+ if (core->mac[VMOLR0 + i] & E1000_VMOLR_AUPE) {
+ mask |= BIT(i);
+ }
+ }
+ }
+
+ queues &= mask;
+ }
+
+ if (is_unicast_ether_addr(ehdr->h_dest) && !queues && !external_tx &&
+ !(core->mac[VT_CTL] & E1000_VT_CTL_DISABLE_DEF_POOL)) {
+ uint32_t def_pl = core->mac[VT_CTL] & E1000_VT_CTL_DEFAULT_POOL_MASK;
+ queues = BIT(def_pl >> E1000_VT_CTL_DEFAULT_POOL_SHIFT);
+ }
+
+ igb_rss_parse_packet(core, core->rx_pkt, external_tx != NULL, rss_info);
+ if (rss_info->queue & 1) {
+ queues <<= 8;
+ }
+ } else {
+ switch (net_rx_pkt_get_packet_type(core->rx_pkt)) {
+ case ETH_PKT_UCAST:
+ if (rctl & E1000_RCTL_UPE) {
+ accepted = true; /* promiscuous ucast */
+ }
+ break;
+
+ case ETH_PKT_BCAST:
+ if (rctl & E1000_RCTL_BAM) {
+ accepted = true; /* broadcast enabled */
+ }
+ break;
+
+ case ETH_PKT_MCAST:
+ if (rctl & E1000_RCTL_MPE) {
+ accepted = true; /* promiscuous mcast */
+ }
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+
+ if (!accepted) {
+ accepted = e1000x_rx_group_filter(core->mac, ehdr->h_dest);
+ }
+
+ if (!accepted) {
+ for (macp = core->mac + RA2; macp < core->mac + RA2 + 16; macp += 2) {
+ if (!(macp[1] & E1000_RAH_AV)) {
+ continue;
+ }
+ ra[0] = cpu_to_le32(macp[0]);
+ ra[1] = cpu_to_le32(macp[1]);
+ if (!memcmp(ehdr->h_dest, (uint8_t *)ra, ETH_ALEN)) {
+ trace_e1000x_rx_flt_ucast_match((int)(macp - core->mac - RA2) / 2,
+ MAC_ARG(ehdr->h_dest));
+
+ accepted = true;
+ break;
+ }
+ }
+ }
+
+ if (accepted) {
+ igb_rss_parse_packet(core, core->rx_pkt, false, rss_info);
+ queues = BIT(rss_info->queue);
+ }
+ }
+
+ return queues;
+}
+
+static inline void
+igb_read_lgcy_rx_descr(IGBCore *core, struct e1000_rx_desc *desc,
+ hwaddr *buff_addr)
+{
+ *buff_addr = le64_to_cpu(desc->buffer_addr);
+}
+
+static inline void
+igb_read_adv_rx_descr(IGBCore *core, union e1000_adv_rx_desc *desc,
+ hwaddr *buff_addr)
+{
+ *buff_addr = le64_to_cpu(desc->read.pkt_addr);
+}
+
+static inline void
+igb_read_rx_descr(IGBCore *core, union e1000_rx_desc_union *desc,
+ hwaddr *buff_addr)
+{
+ if (igb_rx_use_legacy_descriptor(core)) {
+ igb_read_lgcy_rx_descr(core, &desc->legacy, buff_addr);
+ } else {
+ igb_read_adv_rx_descr(core, &desc->adv, buff_addr);
+ }
+}
+
+static void
+igb_verify_csum_in_sw(IGBCore *core,
+ struct NetRxPkt *pkt,
+ uint32_t *status_flags,
+ EthL4HdrProto l4hdr_proto)
+{
+ bool csum_valid;
+ uint32_t csum_error;
+
+ if (igb_rx_l3_cso_enabled(core)) {
+ if (!net_rx_pkt_validate_l3_csum(pkt, &csum_valid)) {
+ trace_e1000e_rx_metadata_l3_csum_validation_failed();
+ } else {
+ csum_error = csum_valid ? 0 : E1000_RXDEXT_STATERR_IPE;
+ *status_flags |= E1000_RXD_STAT_IPCS | csum_error;
+ }
+ } else {
+ trace_e1000e_rx_metadata_l3_cso_disabled();
+ }
+
+ if (!igb_rx_l4_cso_enabled(core)) {
+ trace_e1000e_rx_metadata_l4_cso_disabled();
+ return;
+ }
+
+ if (!net_rx_pkt_validate_l4_csum(pkt, &csum_valid)) {
+ trace_e1000e_rx_metadata_l4_csum_validation_failed();
+ return;
+ }
+
+ csum_error = csum_valid ? 0 : E1000_RXDEXT_STATERR_TCPE;
+ *status_flags |= E1000_RXD_STAT_TCPCS | csum_error;
+
+ if (l4hdr_proto == ETH_L4_HDR_PROTO_UDP) {
+ *status_flags |= E1000_RXD_STAT_UDPCS;
+ }
+}
+
+static void
+igb_build_rx_metadata(IGBCore *core,
+ struct NetRxPkt *pkt,
+ bool is_eop,
+ const E1000E_RSSInfo *rss_info,
+ uint16_t *pkt_info, uint16_t *hdr_info,
+ uint32_t *rss,
+ uint32_t *status_flags,
+ uint16_t *ip_id,
+ uint16_t *vlan_tag)
+{
+ struct virtio_net_hdr *vhdr;
+ bool hasip4, hasip6;
+ EthL4HdrProto l4hdr_proto;
+ uint32_t pkt_type;
+
+ *status_flags = E1000_RXD_STAT_DD;
+
+ /* No additional metadata needed for non-EOP descriptors */
+ /* TODO: EOP apply only to status so don't skip whole function. */
+ if (!is_eop) {
+ goto func_exit;
+ }
+
+ *status_flags |= E1000_RXD_STAT_EOP;
+
+ net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
+ trace_e1000e_rx_metadata_protocols(hasip4, hasip6, l4hdr_proto);
+
+ /* VLAN state */
+ if (net_rx_pkt_is_vlan_stripped(pkt)) {
+ *status_flags |= E1000_RXD_STAT_VP;
+ *vlan_tag = cpu_to_le16(net_rx_pkt_get_vlan_tag(pkt));
+ trace_e1000e_rx_metadata_vlan(*vlan_tag);
+ }
+
+ /* Packet parsing results */
+ if ((core->mac[RXCSUM] & E1000_RXCSUM_PCSD) != 0) {
+ if (rss_info->enabled) {
+ *rss = cpu_to_le32(rss_info->hash);
+ trace_igb_rx_metadata_rss(*rss);
+ }
+ } else if (hasip4) {
+ *status_flags |= E1000_RXD_STAT_IPIDV;
+ *ip_id = cpu_to_le16(net_rx_pkt_get_ip_id(pkt));
+ trace_e1000e_rx_metadata_ip_id(*ip_id);
+ }
+
+ if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP && net_rx_pkt_is_tcp_ack(pkt)) {
+ *status_flags |= E1000_RXD_STAT_ACK;
+ trace_e1000e_rx_metadata_ack();
+ }
+
+ if (hasip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_DIS)) {
+ trace_e1000e_rx_metadata_ipv6_filtering_disabled();
+ pkt_type = E1000_RXD_PKT_MAC;
+ } else if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP ||
+ l4hdr_proto == ETH_L4_HDR_PROTO_UDP) {
+ pkt_type = hasip4 ? E1000_RXD_PKT_IP4_XDP : E1000_RXD_PKT_IP6_XDP;
+ } else if (hasip4 || hasip6) {
+ pkt_type = hasip4 ? E1000_RXD_PKT_IP4 : E1000_RXD_PKT_IP6;
+ } else {
+ pkt_type = E1000_RXD_PKT_MAC;
+ }
+
+ trace_e1000e_rx_metadata_pkt_type(pkt_type);
+
+ if (pkt_info) {
+ if (rss_info->enabled) {
+ *pkt_info = rss_info->type;
+ }
+
+ *pkt_info |= (pkt_type << 4);
+ } else {
+ *status_flags |= E1000_RXD_PKT_TYPE(pkt_type);
+ }
+
+ if (hdr_info) {
+ *hdr_info = 0;
+ }
+
+ /* RX CSO information */
+ if (hasip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_XSUM_DIS)) {
+ trace_e1000e_rx_metadata_ipv6_sum_disabled();
+ goto func_exit;
+ }
+
+ vhdr = net_rx_pkt_get_vhdr(pkt);
+
+ if (!(vhdr->flags & VIRTIO_NET_HDR_F_DATA_VALID) &&
+ !(vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) {
+ trace_e1000e_rx_metadata_virthdr_no_csum_info();
+ igb_verify_csum_in_sw(core, pkt, status_flags, l4hdr_proto);
+ goto func_exit;
+ }
+
+ if (igb_rx_l3_cso_enabled(core)) {
+ *status_flags |= hasip4 ? E1000_RXD_STAT_IPCS : 0;
+ } else {
+ trace_e1000e_rx_metadata_l3_cso_disabled();
+ }
+
+ if (igb_rx_l4_cso_enabled(core)) {
+ switch (l4hdr_proto) {
+ case ETH_L4_HDR_PROTO_TCP:
+ *status_flags |= E1000_RXD_STAT_TCPCS;
+ break;
+
+ case ETH_L4_HDR_PROTO_UDP:
+ *status_flags |= E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS;
+ break;
+
+ default:
+ goto func_exit;
+ }
+ } else {
+ trace_e1000e_rx_metadata_l4_cso_disabled();
+ }
+
+ trace_e1000e_rx_metadata_status_flags(*status_flags);
+
+func_exit:
+ *status_flags = cpu_to_le32(*status_flags);
+}
+
+static inline void
+igb_write_lgcy_rx_descr(IGBCore *core, struct e1000_rx_desc *desc,
+ struct NetRxPkt *pkt,
+ const E1000E_RSSInfo *rss_info,
+ uint16_t length)
+{
+ uint32_t status_flags, rss;
+ uint16_t ip_id;
+
+ assert(!rss_info->enabled);
+ desc->length = cpu_to_le16(length);
+ desc->csum = 0;
+
+ igb_build_rx_metadata(core, pkt, pkt != NULL,
+ rss_info,
+ NULL, NULL, &rss,
+ &status_flags, &ip_id,
+ &desc->special);
+ desc->errors = (uint8_t) (le32_to_cpu(status_flags) >> 24);
+ desc->status = (uint8_t) le32_to_cpu(status_flags);
+}
+
+static inline void
+igb_write_adv_rx_descr(IGBCore *core, union e1000_adv_rx_desc *desc,
+ struct NetRxPkt *pkt,
+ const E1000E_RSSInfo *rss_info,
+ uint16_t length)
+{
+ memset(&desc->wb, 0, sizeof(desc->wb));
+
+ desc->wb.upper.length = cpu_to_le16(length);
+
+ igb_build_rx_metadata(core, pkt, pkt != NULL,
+ rss_info,
+ &desc->wb.lower.lo_dword.pkt_info,
+ &desc->wb.lower.lo_dword.hdr_info,
+ &desc->wb.lower.hi_dword.rss,
+ &desc->wb.upper.status_error,
+ &desc->wb.lower.hi_dword.csum_ip.ip_id,
+ &desc->wb.upper.vlan);
+}
+
+static inline void
+igb_write_rx_descr(IGBCore *core, union e1000_rx_desc_union *desc,
+struct NetRxPkt *pkt, const E1000E_RSSInfo *rss_info, uint16_t length)
+{
+ if (igb_rx_use_legacy_descriptor(core)) {
+ igb_write_lgcy_rx_descr(core, &desc->legacy, pkt, rss_info, length);
+ } else {
+ igb_write_adv_rx_descr(core, &desc->adv, pkt, rss_info, length);
+ }
+}
+
+static inline void
+igb_pci_dma_write_rx_desc(IGBCore *core, PCIDevice *dev, dma_addr_t addr,
+ union e1000_rx_desc_union *desc, dma_addr_t len)
+{
+ if (igb_rx_use_legacy_descriptor(core)) {
+ struct e1000_rx_desc *d = &desc->legacy;
+ size_t offset = offsetof(struct e1000_rx_desc, status);
+ uint8_t status = d->status;
+
+ d->status &= ~E1000_RXD_STAT_DD;
+ pci_dma_write(dev, addr, desc, len);
+
+ if (status & E1000_RXD_STAT_DD) {
+ d->status = status;
+ pci_dma_write(dev, addr + offset, &status, sizeof(status));
+ }
+ } else {
+ union e1000_adv_rx_desc *d = &desc->adv;
+ size_t offset =
+ offsetof(union e1000_adv_rx_desc, wb.upper.status_error);
+ uint32_t status = d->wb.upper.status_error;
+
+ d->wb.upper.status_error &= ~E1000_RXD_STAT_DD;
+ pci_dma_write(dev, addr, desc, len);
+
+ if (status & E1000_RXD_STAT_DD) {
+ d->wb.upper.status_error = status;
+ pci_dma_write(dev, addr + offset, &status, sizeof(status));
+ }
+ }
+}
+
+static void
+igb_write_to_rx_buffers(IGBCore *core,
+ PCIDevice *d,
+ hwaddr ba,
+ uint16_t *written,
+ const char *data,
+ dma_addr_t data_len)
+{
+ trace_igb_rx_desc_buff_write(ba, *written, data, data_len);
+ pci_dma_write(d, ba + *written, data, data_len);
+ *written += data_len;
+}
+
+static void
+igb_update_rx_stats(IGBCore *core, size_t data_size, size_t data_fcs_size)
+{
+ e1000x_update_rx_total_stats(core->mac, data_size, data_fcs_size);
+
+ switch (net_rx_pkt_get_packet_type(core->rx_pkt)) {
+ case ETH_PKT_BCAST:
+ e1000x_inc_reg_if_not_full(core->mac, BPRC);
+ break;
+
+ case ETH_PKT_MCAST:
+ e1000x_inc_reg_if_not_full(core->mac, MPRC);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static inline bool
+igb_rx_descr_threshold_hit(IGBCore *core, const E1000E_RingInfo *rxi)
+{
+ return igb_ring_free_descr_num(core, rxi) ==
+ ((core->mac[E1000_SRRCTL(rxi->idx) >> 2] >> 20) & 31) * 16;
+}
+
+static void
+igb_write_packet_to_guest(IGBCore *core, struct NetRxPkt *pkt,
+ const E1000E_RxRing *rxr,
+ const E1000E_RSSInfo *rss_info)
+{
+ PCIDevice *d;
+ dma_addr_t base;
+ union e1000_rx_desc_union desc;
+ size_t desc_size;
+ size_t desc_offset = 0;
+ size_t iov_ofs = 0;
+
+ struct iovec *iov = net_rx_pkt_get_iovec(pkt);
+ size_t size = net_rx_pkt_get_total_len(pkt);
+ size_t total_size = size + e1000x_fcs_len(core->mac);
+ const E1000E_RingInfo *rxi = rxr->i;
+ size_t bufsize = igb_rxbufsize(core, rxi);
+
+ d = pcie_sriov_get_vf_at_index(core->owner, rxi->idx % 8);
+ if (!d) {
+ d = core->owner;
+ }
+
+ do {
+ hwaddr ba;
+ uint16_t written = 0;
+ bool is_last = false;
+
+ desc_size = total_size - desc_offset;
+
+ if (desc_size > bufsize) {
+ desc_size = bufsize;
+ }
+
+ if (igb_ring_empty(core, rxi)) {
+ return;
+ }
+
+ base = igb_ring_head_descr(core, rxi);
+
+ pci_dma_read(d, base, &desc, core->rx_desc_len);
+
+ trace_e1000e_rx_descr(rxi->idx, base, core->rx_desc_len);
+
+ igb_read_rx_descr(core, &desc, &ba);
+
+ if (ba) {
+ if (desc_offset < size) {
+ static const uint32_t fcs_pad;
+ size_t iov_copy;
+ size_t copy_size = size - desc_offset;
+ if (copy_size > bufsize) {
+ copy_size = bufsize;
+ }
+
+ /* Copy packet payload */
+ while (copy_size) {
+ iov_copy = MIN(copy_size, iov->iov_len - iov_ofs);
+
+ igb_write_to_rx_buffers(core, d, ba, &written,
+ iov->iov_base + iov_ofs, iov_copy);
+
+ copy_size -= iov_copy;
+ iov_ofs += iov_copy;
+ if (iov_ofs == iov->iov_len) {
+ iov++;
+ iov_ofs = 0;
+ }
+ }
+
+ if (desc_offset + desc_size >= total_size) {
+ /* Simulate FCS checksum presence in the last descriptor */
+ igb_write_to_rx_buffers(core, d, ba, &written,
+ (const char *) &fcs_pad, e1000x_fcs_len(core->mac));
+ }
+ }
+ } else { /* as per intel docs; skip descriptors with null buf addr */
+ trace_e1000e_rx_null_descriptor();
+ }
+ desc_offset += desc_size;
+ if (desc_offset >= total_size) {
+ is_last = true;
+ }
+
+ igb_write_rx_descr(core, &desc, is_last ? core->rx_pkt : NULL,
+ rss_info, written);
+ igb_pci_dma_write_rx_desc(core, d, base, &desc, core->rx_desc_len);
+
+ igb_ring_advance(core, rxi, core->rx_desc_len / E1000_MIN_RX_DESC_LEN);
+
+ } while (desc_offset < total_size);
+
+ igb_update_rx_stats(core, size, total_size);
+}
+
+static inline void
+igb_rx_fix_l4_csum(IGBCore *core, struct NetRxPkt *pkt)
+{
+ struct virtio_net_hdr *vhdr = net_rx_pkt_get_vhdr(pkt);
+
+ if (vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
+ net_rx_pkt_fix_l4_csum(pkt);
+ }
+}
+
+ssize_t
+igb_receive_iov(IGBCore *core, const struct iovec *iov, int iovcnt)
+{
+ return igb_receive_internal(core, iov, iovcnt, core->has_vnet, NULL);
+}
+
+static ssize_t
+igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
+ bool has_vnet, bool *external_tx)
+{
+ static const int maximum_ethernet_hdr_len = (ETH_HLEN + 4);
+
+ uint16_t queues = 0;
+ uint32_t n = 0;
+ uint8_t min_buf[ETH_ZLEN];
+ struct iovec min_iov;
+ struct eth_header *ehdr;
+ uint8_t *filter_buf;
+ size_t size, orig_size;
+ size_t iov_ofs = 0;
+ E1000E_RxRing rxr;
+ E1000E_RSSInfo rss_info;
+ size_t total_size;
+ int i;
+
+ trace_e1000e_rx_receive_iov(iovcnt);
+
+ if (external_tx) {
+ *external_tx = true;
+ }
+
+ if (!e1000x_hw_rx_enabled(core->mac)) {
+ return -1;
+ }
+
+ /* Pull virtio header in */
+ if (has_vnet) {
+ net_rx_pkt_set_vhdr_iovec(core->rx_pkt, iov, iovcnt);
+ iov_ofs = sizeof(struct virtio_net_hdr);
+ } else {
+ net_rx_pkt_unset_vhdr(core->rx_pkt);
+ }
+
+ filter_buf = iov->iov_base + iov_ofs;
+ orig_size = iov_size(iov, iovcnt);
+ size = orig_size - iov_ofs;
+
+ /* Pad to minimum Ethernet frame length */
+ if (size < sizeof(min_buf)) {
+ iov_to_buf(iov, iovcnt, iov_ofs, min_buf, size);
+ memset(&min_buf[size], 0, sizeof(min_buf) - size);
+ e1000x_inc_reg_if_not_full(core->mac, RUC);
+ min_iov.iov_base = filter_buf = min_buf;
+ min_iov.iov_len = size = sizeof(min_buf);
+ iovcnt = 1;
+ iov = &min_iov;
+ iov_ofs = 0;
+ } else if (iov->iov_len < maximum_ethernet_hdr_len) {
+ /* This is very unlikely, but may happen. */
+ iov_to_buf(iov, iovcnt, iov_ofs, min_buf, maximum_ethernet_hdr_len);
+ filter_buf = min_buf;
+ }
+
+ /* Discard oversized packets if !LPE and !SBP. */
+ if (e1000x_is_oversized(core->mac, size)) {
+ return orig_size;
+ }
+
+ ehdr = PKT_GET_ETH_HDR(filter_buf);
+ net_rx_pkt_set_packet_type(core->rx_pkt, get_eth_packet_type(ehdr));
+
+ net_rx_pkt_attach_iovec_ex(core->rx_pkt, iov, iovcnt, iov_ofs,
+ e1000x_vlan_enabled(core->mac),
+ core->mac[VET] & 0xffff);
+
+ queues = igb_receive_assign(core, ehdr, &rss_info, external_tx);
+ if (!queues) {
+ trace_e1000e_rx_flt_dropped();
+ return orig_size;
+ }
+
+ total_size = net_rx_pkt_get_total_len(core->rx_pkt) +
+ e1000x_fcs_len(core->mac);
+
+ for (i = 0; i < IGB_NUM_QUEUES; i++) {
+ if (!(queues & BIT(i))) {
+ continue;
+ }
+
+ igb_rx_ring_init(core, &rxr, i);
+
+ if (!igb_has_rxbufs(core, rxr.i, total_size)) {
+ n |= E1000_ICS_RXO;
+ trace_e1000e_rx_not_written_to_guest(rxr.i->idx);
+ continue;
+ }
+
+ n |= E1000_ICR_RXT0;
+
+ igb_rx_fix_l4_csum(core, core->rx_pkt);
+ igb_write_packet_to_guest(core, core->rx_pkt, &rxr, &rss_info);
+
+ /* Check if receive descriptor minimum threshold hit */
+ if (igb_rx_descr_threshold_hit(core, rxr.i)) {
+ n |= E1000_ICS_RXDMT0;
+ }
+
+ core->mac[EICR] |= igb_rx_wb_eic(core, rxr.i->idx);
+
+ trace_e1000e_rx_written_to_guest(rxr.i->idx);
+ }
+
+ trace_e1000e_rx_interrupt_set(n);
+ igb_set_interrupt_cause(core, n);
+
+ return orig_size;
+}
+
+static inline bool
+igb_have_autoneg(IGBCore *core)
+{
+ return core->phy[MII_BMCR] & MII_BMCR_AUTOEN;
+}
+
+static void igb_update_flowctl_status(IGBCore *core)
+{
+ if (igb_have_autoneg(core) && core->phy[MII_BMSR] & MII_BMSR_AN_COMP) {
+ trace_e1000e_link_autoneg_flowctl(true);
+ core->mac[CTRL] |= E1000_CTRL_TFCE | E1000_CTRL_RFCE;
+ } else {
+ trace_e1000e_link_autoneg_flowctl(false);
+ }
+}
+
+static inline void
+igb_link_down(IGBCore *core)
+{
+ e1000x_update_regs_on_link_down(core->mac, core->phy);
+ igb_update_flowctl_status(core);
+}
+
+static inline void
+igb_set_phy_ctrl(IGBCore *core, uint16_t val)
+{
+ /* bits 0-5 reserved; MII_BMCR_[ANRESTART,RESET] are self clearing */
+ core->phy[MII_BMCR] = val & ~(0x3f | MII_BMCR_RESET | MII_BMCR_ANRESTART);
+
+ if ((val & MII_BMCR_ANRESTART) && igb_have_autoneg(core)) {
+ e1000x_restart_autoneg(core->mac, core->phy, core->autoneg_timer);
+ }
+}
+
+void igb_core_set_link_status(IGBCore *core)
+{
+ NetClientState *nc = qemu_get_queue(core->owner_nic);
+ uint32_t old_status = core->mac[STATUS];
+
+ trace_e1000e_link_status_changed(nc->link_down ? false : true);
+
+ if (nc->link_down) {
+ e1000x_update_regs_on_link_down(core->mac, core->phy);
+ } else {
+ if (igb_have_autoneg(core) &&
+ !(core->phy[MII_BMSR] & MII_BMSR_AN_COMP)) {
+ e1000x_restart_autoneg(core->mac, core->phy,
+ core->autoneg_timer);
+ } else {
+ e1000x_update_regs_on_link_up(core->mac, core->phy);
+ igb_start_recv(core);
+ }
+ }
+
+ if (core->mac[STATUS] != old_status) {
+ igb_set_interrupt_cause(core, E1000_ICR_LSC);
+ }
+}
+
+static void
+igb_set_ctrl(IGBCore *core, int index, uint32_t val)
+{
+ trace_e1000e_core_ctrl_write(index, val);
+
+ /* RST is self clearing */
+ core->mac[CTRL] = val & ~E1000_CTRL_RST;
+ core->mac[CTRL_DUP] = core->mac[CTRL];
+
+ trace_e1000e_link_set_params(
+ !!(val & E1000_CTRL_ASDE),
+ (val & E1000_CTRL_SPD_SEL) >> E1000_CTRL_SPD_SHIFT,
+ !!(val & E1000_CTRL_FRCSPD),
+ !!(val & E1000_CTRL_FRCDPX),
+ !!(val & E1000_CTRL_RFCE),
+ !!(val & E1000_CTRL_TFCE));
+
+ if (val & E1000_CTRL_RST) {
+ trace_e1000e_core_ctrl_sw_reset();
+ igb_reset(core, true);
+ }
+
+ if (val & E1000_CTRL_PHY_RST) {
+ trace_e1000e_core_ctrl_phy_reset();
+ core->mac[STATUS] |= E1000_STATUS_PHYRA;
+ }
+}
+
+static void
+igb_set_rfctl(IGBCore *core, int index, uint32_t val)
+{
+ trace_e1000e_rx_set_rfctl(val);
+
+ if (!(val & E1000_RFCTL_ISCSI_DIS)) {
+ trace_e1000e_wrn_iscsi_filtering_not_supported();
+ }
+
+ if (!(val & E1000_RFCTL_NFSW_DIS)) {
+ trace_e1000e_wrn_nfsw_filtering_not_supported();
+ }
+
+ if (!(val & E1000_RFCTL_NFSR_DIS)) {
+ trace_e1000e_wrn_nfsr_filtering_not_supported();
+ }
+
+ core->mac[RFCTL] = val;
+}
+
+static void
+igb_calc_rxdesclen(IGBCore *core)
+{
+ if (igb_rx_use_legacy_descriptor(core)) {
+ core->rx_desc_len = sizeof(struct e1000_rx_desc);
+ } else {
+ core->rx_desc_len = sizeof(union e1000_adv_rx_desc);
+ }
+ trace_e1000e_rx_desc_len(core->rx_desc_len);
+}
+
+static void
+igb_set_rx_control(IGBCore *core, int index, uint32_t val)
+{
+ core->mac[RCTL] = val;
+ trace_e1000e_rx_set_rctl(core->mac[RCTL]);
+
+ if (val & E1000_RCTL_DTYP_MASK) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "igb: RCTL.DTYP must be zero for compatibility");
+ }
+
+ if (val & E1000_RCTL_EN) {
+ igb_calc_rxdesclen(core);
+ igb_start_recv(core);
+ }
+}
+
+static inline void
+igb_clear_ims_bits(IGBCore *core, uint32_t bits)
+{
+ trace_e1000e_irq_clear_ims(bits, core->mac[IMS], core->mac[IMS] & ~bits);
+ core->mac[IMS] &= ~bits;
+}
+
+static inline bool
+igb_postpone_interrupt(IGBIntrDelayTimer *timer)
+{
+ if (timer->running) {
+ trace_e1000e_irq_postponed_by_xitr(timer->delay_reg << 2);
+
+ return true;
+ }
+
+ if (timer->core->mac[timer->delay_reg] != 0) {
+ igb_intrmgr_rearm_timer(timer);
+ }
+
+ return false;
+}
+
+static inline bool
+igb_eitr_should_postpone(IGBCore *core, int idx)
+{
+ return igb_postpone_interrupt(&core->eitr[idx]);
+}
+
+static void igb_send_msix(IGBCore *core)
+{
+ uint32_t causes = core->mac[EICR] & core->mac[EIMS];
+ uint32_t effective_eiac;
+ int vector;
+
+ for (vector = 0; vector < IGB_INTR_NUM; ++vector) {
+ if ((causes & BIT(vector)) && !igb_eitr_should_postpone(core, vector)) {
+
+ trace_e1000e_irq_msix_notify_vec(vector);
+ igb_msix_notify(core, vector);
+
+ trace_e1000e_irq_icr_clear_eiac(core->mac[EICR], core->mac[EIAC]);
+ effective_eiac = core->mac[EIAC] & BIT(vector);
+ core->mac[EICR] &= ~effective_eiac;
+ }
+ }
+}
+
+static inline void
+igb_fix_icr_asserted(IGBCore *core)
+{
+ core->mac[ICR] &= ~E1000_ICR_ASSERTED;
+ if (core->mac[ICR]) {
+ core->mac[ICR] |= E1000_ICR_ASSERTED;
+ }
+
+ trace_e1000e_irq_fix_icr_asserted(core->mac[ICR]);
+}
+
+static void
+igb_update_interrupt_state(IGBCore *core)
+{
+ uint32_t icr;
+ uint32_t causes;
+ uint32_t int_alloc;
+
+ icr = core->mac[ICR] & core->mac[IMS];
+
+ if (msix_enabled(core->owner)) {
+ if (icr) {
+ causes = 0;
+ if (icr & E1000_ICR_DRSTA) {
+ int_alloc = core->mac[IVAR_MISC] & 0xff;
+ if (int_alloc & E1000_IVAR_VALID) {
+ causes |= BIT(int_alloc & 0x1f);
+ }
+ }
+ /* Check if other bits (excluding the TCP Timer) are enabled. */
+ if (icr & ~E1000_ICR_DRSTA) {
+ int_alloc = (core->mac[IVAR_MISC] >> 8) & 0xff;
+ if (int_alloc & E1000_IVAR_VALID) {
+ causes |= BIT(int_alloc & 0x1f);
+ }
+ trace_e1000e_irq_add_msi_other(core->mac[EICR]);
+ }
+ core->mac[EICR] |= causes;
+ }
+
+ if ((core->mac[EICR] & core->mac[EIMS])) {
+ igb_send_msix(core);
+ }
+ } else {
+ igb_fix_icr_asserted(core);
+
+ if (icr) {
+ core->mac[EICR] |= (icr & E1000_ICR_DRSTA) | E1000_EICR_OTHER;
+ } else {
+ core->mac[EICR] &= ~E1000_EICR_OTHER;
+ }
+
+ trace_e1000e_irq_pending_interrupts(core->mac[ICR] & core->mac[IMS],
+ core->mac[ICR], core->mac[IMS]);
+
+ if (msi_enabled(core->owner)) {
+ if (icr) {
+ msi_notify(core->owner, 0);
+ }
+ } else {
+ if (icr) {
+ igb_raise_legacy_irq(core);
+ } else {
+ igb_lower_legacy_irq(core);
+ }
+ }
+ }
+}
+
+static void
+igb_set_interrupt_cause(IGBCore *core, uint32_t val)
+{
+ trace_e1000e_irq_set_cause_entry(val, core->mac[ICR]);
+
+ core->mac[ICR] |= val;
+
+ trace_e1000e_irq_set_cause_exit(val, core->mac[ICR]);
+
+ igb_update_interrupt_state(core);
+}
+
+static void igb_set_eics(IGBCore *core, int index, uint32_t val)
+{
+ bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE);
+
+ trace_igb_irq_write_eics(val, msix);
+
+ core->mac[EICS] |=
+ val & (msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK);
+
+ /*
+ * TODO: Move to igb_update_interrupt_state if EICS is modified in other
+ * places.
+ */
+ core->mac[EICR] = core->mac[EICS];
+
+ igb_update_interrupt_state(core);
+}
+
+static void igb_set_eims(IGBCore *core, int index, uint32_t val)
+{
+ bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE);
+
+ trace_igb_irq_write_eims(val, msix);
+
+ core->mac[EIMS] |=
+ val & (msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK);
+
+ igb_update_interrupt_state(core);
+}
+
+static void igb_vf_reset(IGBCore *core, uint16_t vfn)
+{
+ /* TODO: Reset of the queue enable and the interrupt registers of the VF. */
+
+ core->mac[V2PMAILBOX0 + vfn] &= ~E1000_V2PMAILBOX_RSTI;
+ core->mac[V2PMAILBOX0 + vfn] = E1000_V2PMAILBOX_RSTD;
+}
+
+static void mailbox_interrupt_to_vf(IGBCore *core, uint16_t vfn)
+{
+ uint32_t ent = core->mac[VTIVAR_MISC + vfn];
+
+ if ((ent & E1000_IVAR_VALID)) {
+ core->mac[EICR] |= (ent & 0x3) << (22 - vfn * IGBVF_MSIX_VEC_NUM);
+ igb_update_interrupt_state(core);
+ }
+}
+
+static void mailbox_interrupt_to_pf(IGBCore *core)
+{
+ igb_set_interrupt_cause(core, E1000_ICR_VMMB);
+}
+
+static void igb_set_pfmailbox(IGBCore *core, int index, uint32_t val)
+{
+ uint16_t vfn = index - P2VMAILBOX0;
+
+ trace_igb_set_pfmailbox(vfn, val);
+
+ if (val & E1000_P2VMAILBOX_STS) {
+ core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_PFSTS;
+ mailbox_interrupt_to_vf(core, vfn);
+ }
+
+ if (val & E1000_P2VMAILBOX_ACK) {
+ core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_PFACK;
+ mailbox_interrupt_to_vf(core, vfn);
+ }
+
+ /* Buffer Taken by PF (can be set only if the VFU is cleared). */
+ if (val & E1000_P2VMAILBOX_PFU) {
+ if (!(core->mac[index] & E1000_P2VMAILBOX_VFU)) {
+ core->mac[index] |= E1000_P2VMAILBOX_PFU;
+ core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_PFU;
+ }
+ } else {
+ core->mac[index] &= ~E1000_P2VMAILBOX_PFU;
+ core->mac[V2PMAILBOX0 + vfn] &= ~E1000_V2PMAILBOX_PFU;
+ }
+
+ if (val & E1000_P2VMAILBOX_RVFU) {
+ core->mac[V2PMAILBOX0 + vfn] &= ~E1000_V2PMAILBOX_VFU;
+ core->mac[MBVFICR] &= ~((E1000_MBVFICR_VFACK_VF1 << vfn) |
+ (E1000_MBVFICR_VFREQ_VF1 << vfn));
+ }
+}
+
+static void igb_set_vfmailbox(IGBCore *core, int index, uint32_t val)
+{
+ uint16_t vfn = index - V2PMAILBOX0;
+
+ trace_igb_set_vfmailbox(vfn, val);
+
+ if (val & E1000_V2PMAILBOX_REQ) {
+ core->mac[MBVFICR] |= E1000_MBVFICR_VFREQ_VF1 << vfn;
+ mailbox_interrupt_to_pf(core);
+ }
+
+ if (val & E1000_V2PMAILBOX_ACK) {
+ core->mac[MBVFICR] |= E1000_MBVFICR_VFACK_VF1 << vfn;
+ mailbox_interrupt_to_pf(core);
+ }
+
+ /* Buffer Taken by VF (can be set only if the PFU is cleared). */
+ if (val & E1000_V2PMAILBOX_VFU) {
+ if (!(core->mac[index] & E1000_V2PMAILBOX_PFU)) {
+ core->mac[index] |= E1000_V2PMAILBOX_VFU;
+ core->mac[P2VMAILBOX0 + vfn] |= E1000_P2VMAILBOX_VFU;
+ }
+ } else {
+ core->mac[index] &= ~E1000_V2PMAILBOX_VFU;
+ core->mac[P2VMAILBOX0 + vfn] &= ~E1000_P2VMAILBOX_VFU;
+ }
+}
+
+static void igb_w1c(IGBCore *core, int index, uint32_t val)
+{
+ core->mac[index] &= ~val;
+}
+
+static void igb_set_eimc(IGBCore *core, int index, uint32_t val)
+{
+ bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE);
+
+ /* Interrupts are disabled via a write to EIMC and reflected in EIMS. */
+ core->mac[EIMS] &=
+ ~(val & (msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK));
+
+ trace_igb_irq_write_eimc(val, core->mac[EIMS], msix);
+ igb_update_interrupt_state(core);
+}
+
+static void igb_set_eiac(IGBCore *core, int index, uint32_t val)
+{
+ bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE);
+
+ if (msix) {
+ trace_igb_irq_write_eiac(val);
+
+ /*
+ * TODO: When using IOV, the bits that correspond to MSI-X vectors
+ * that are assigned to a VF are read-only.
+ */
+ core->mac[EIAC] |= (val & E1000_EICR_MSIX_MASK);
+ }
+}
+
+static void igb_set_eiam(IGBCore *core, int index, uint32_t val)
+{
+ bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE);
+
+ /*
+ * TODO: When using IOV, the bits that correspond to MSI-X vectors that
+ * are assigned to a VF are read-only.
+ */
+ core->mac[EIAM] |=
+ ~(val & (msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK));
+
+ trace_igb_irq_write_eiam(val, msix);
+}
+
+static void igb_set_eicr(IGBCore *core, int index, uint32_t val)
+{
+ bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE);
+
+ /*
+ * TODO: In IOV mode, only bit zero of this vector is available for the PF
+ * function.
+ */
+ core->mac[EICR] &=
+ ~(val & (msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK));
+
+ trace_igb_irq_write_eicr(val, msix);
+ igb_update_interrupt_state(core);
+}
+
+static void igb_set_vtctrl(IGBCore *core, int index, uint32_t val)
+{
+ uint16_t vfn;
+
+ if (val & E1000_CTRL_RST) {
+ vfn = (index - PVTCTRL0) / 0x40;
+ igb_vf_reset(core, vfn);
+ }
+}
+
+static void igb_set_vteics(IGBCore *core, int index, uint32_t val)
+{
+ uint16_t vfn = (index - PVTEICS0) / 0x40;
+
+ core->mac[index] = val;
+ igb_set_eics(core, EICS, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM));
+}
+
+static void igb_set_vteims(IGBCore *core, int index, uint32_t val)
+{
+ uint16_t vfn = (index - PVTEIMS0) / 0x40;
+
+ core->mac[index] = val;
+ igb_set_eims(core, EIMS, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM));
+}
+
+static void igb_set_vteimc(IGBCore *core, int index, uint32_t val)
+{
+ uint16_t vfn = (index - PVTEIMC0) / 0x40;
+
+ core->mac[index] = val;
+ igb_set_eimc(core, EIMC, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM));
+}
+
+static void igb_set_vteiac(IGBCore *core, int index, uint32_t val)
+{
+ uint16_t vfn = (index - PVTEIAC0) / 0x40;
+
+ core->mac[index] = val;
+ igb_set_eiac(core, EIAC, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM));
+}
+
+static void igb_set_vteiam(IGBCore *core, int index, uint32_t val)
+{
+ uint16_t vfn = (index - PVTEIAM0) / 0x40;
+
+ core->mac[index] = val;
+ igb_set_eiam(core, EIAM, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM));
+}
+
+static void igb_set_vteicr(IGBCore *core, int index, uint32_t val)
+{
+ uint16_t vfn = (index - PVTEICR0) / 0x40;
+
+ core->mac[index] = val;
+ igb_set_eicr(core, EICR, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM));
+}
+
+static void igb_set_vtivar(IGBCore *core, int index, uint32_t val)
+{
+ uint16_t vfn = (index - VTIVAR);
+ uint16_t qn = vfn;
+ uint8_t ent;
+ int n;
+
+ core->mac[index] = val;
+
+ /* Get assigned vector associated with queue Rx#0. */
+ if ((val & E1000_IVAR_VALID)) {
+ n = igb_ivar_entry_rx(qn);
+ ent = E1000_IVAR_VALID | (24 - vfn * IGBVF_MSIX_VEC_NUM - (2 - (val & 0x7)));
+ core->mac[IVAR0 + n / 4] |= ent << 8 * (n % 4);
+ }
+
+ /* Get assigned vector associated with queue Tx#0 */
+ ent = val >> 8;
+ if ((ent & E1000_IVAR_VALID)) {
+ n = igb_ivar_entry_tx(qn);
+ ent = E1000_IVAR_VALID | (24 - vfn * IGBVF_MSIX_VEC_NUM - (2 - (ent & 0x7)));
+ core->mac[IVAR0 + n / 4] |= ent << 8 * (n % 4);
+ }
+
+ /*
+ * Ignoring assigned vectors associated with queues Rx#1 and Tx#1 for now.
+ */
+}
+
+static inline void
+igb_autoneg_timer(void *opaque)
+{
+ IGBCore *core = opaque;
+ if (!qemu_get_queue(core->owner_nic)->link_down) {
+ e1000x_update_regs_on_autoneg_done(core->mac, core->phy);
+ igb_start_recv(core);
+
+ igb_update_flowctl_status(core);
+ /* signal link status change to the guest */
+ igb_set_interrupt_cause(core, E1000_ICR_LSC);
+ }
+}
+
+static inline uint16_t
+igb_get_reg_index_with_offset(const uint16_t *mac_reg_access, hwaddr addr)
+{
+ uint16_t index = (addr & 0x1ffff) >> 2;
+ return index + (mac_reg_access[index] & 0xfffe);
+}
+
+static const char igb_phy_regcap[MAX_PHY_REG_ADDRESS + 1] = {
+ [MII_BMCR] = PHY_RW,
+ [MII_BMSR] = PHY_R,
+ [MII_PHYID1] = PHY_R,
+ [MII_PHYID2] = PHY_R,
+ [MII_ANAR] = PHY_RW,
+ [MII_ANLPAR] = PHY_R,
+ [MII_ANER] = PHY_R,
+ [MII_ANNP] = PHY_RW,
+ [MII_ANLPRNP] = PHY_R,
+ [MII_CTRL1000] = PHY_RW,
+ [MII_STAT1000] = PHY_R,
+ [MII_EXTSTAT] = PHY_R,
+
+ [IGP01E1000_PHY_PORT_CONFIG] = PHY_RW,
+ [IGP01E1000_PHY_PORT_STATUS] = PHY_R,
+ [IGP01E1000_PHY_PORT_CTRL] = PHY_RW,
+ [IGP01E1000_PHY_LINK_HEALTH] = PHY_R,
+ [IGP02E1000_PHY_POWER_MGMT] = PHY_RW,
+ [IGP01E1000_PHY_PAGE_SELECT] = PHY_W
+};
+
+static void
+igb_phy_reg_write(IGBCore *core, uint32_t addr, uint16_t data)
+{
+ assert(addr <= MAX_PHY_REG_ADDRESS);
+
+ if (addr == MII_BMCR) {
+ igb_set_phy_ctrl(core, data);
+ } else {
+ core->phy[addr] = data;
+ }
+}
+
+static void
+igb_set_mdic(IGBCore *core, int index, uint32_t val)
+{
+ uint32_t data = val & E1000_MDIC_DATA_MASK;
+ uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
+
+ if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) { /* phy # */
+ val = core->mac[MDIC] | E1000_MDIC_ERROR;
+ } else if (val & E1000_MDIC_OP_READ) {
+ if (!(igb_phy_regcap[addr] & PHY_R)) {
+ trace_igb_core_mdic_read_unhandled(addr);
+ val |= E1000_MDIC_ERROR;
+ } else {
+ val = (val ^ data) | core->phy[addr];
+ trace_igb_core_mdic_read(addr, val);
+ }
+ } else if (val & E1000_MDIC_OP_WRITE) {
+ if (!(igb_phy_regcap[addr] & PHY_W)) {
+ trace_igb_core_mdic_write_unhandled(addr);
+ val |= E1000_MDIC_ERROR;
+ } else {
+ trace_igb_core_mdic_write(addr, data);
+ igb_phy_reg_write(core, addr, data);
+ }
+ }
+ core->mac[MDIC] = val | E1000_MDIC_READY;
+
+ if (val & E1000_MDIC_INT_EN) {
+ igb_set_interrupt_cause(core, E1000_ICR_MDAC);
+ }
+}
+
+static void
+igb_set_rdt(IGBCore *core, int index, uint32_t val)
+{
+ core->mac[index] = val & 0xffff;
+ trace_e1000e_rx_set_rdt(igb_mq_queue_idx(RDT0, index), val);
+ igb_start_recv(core);
+}
+
+static void
+igb_set_status(IGBCore *core, int index, uint32_t val)
+{
+ if ((val & E1000_STATUS_PHYRA) == 0) {
+ core->mac[index] &= ~E1000_STATUS_PHYRA;
+ }
+}
+
+static void
+igb_set_ctrlext(IGBCore *core, int index, uint32_t val)
+{
+ trace_e1000e_link_set_ext_params(!!(val & E1000_CTRL_EXT_ASDCHK),
+ !!(val & E1000_CTRL_EXT_SPD_BYPS));
+
+ /* TODO: PFRSTD */
+
+ /* Zero self-clearing bits */
+ val &= ~(E1000_CTRL_EXT_ASDCHK | E1000_CTRL_EXT_EE_RST);
+ core->mac[CTRL_EXT] = val;
+}
+
+static void
+igb_set_pbaclr(IGBCore *core, int index, uint32_t val)
+{
+ int i;
+
+ core->mac[PBACLR] = val & E1000_PBACLR_VALID_MASK;
+
+ if (!msix_enabled(core->owner)) {
+ return;
+ }
+
+ for (i = 0; i < IGB_INTR_NUM; i++) {
+ if (core->mac[PBACLR] & BIT(i)) {
+ msix_clr_pending(core->owner, i);
+ }
+ }
+}
+
+static void
+igb_set_fcrth(IGBCore *core, int index, uint32_t val)
+{
+ core->mac[FCRTH] = val & 0xFFF8;
+}
+
+static void
+igb_set_fcrtl(IGBCore *core, int index, uint32_t val)
+{
+ core->mac[FCRTL] = val & 0x8000FFF8;
+}
+
+#define IGB_LOW_BITS_SET_FUNC(num) \
+ static void \
+ igb_set_##num##bit(IGBCore *core, int index, uint32_t val) \
+ { \
+ core->mac[index] = val & (BIT(num) - 1); \
+ }
+
+IGB_LOW_BITS_SET_FUNC(4)
+IGB_LOW_BITS_SET_FUNC(13)
+IGB_LOW_BITS_SET_FUNC(16)
+
+static void
+igb_set_dlen(IGBCore *core, int index, uint32_t val)
+{
+ core->mac[index] = val & 0xffff0;
+}
+
+static void
+igb_set_dbal(IGBCore *core, int index, uint32_t val)
+{
+ core->mac[index] = val & E1000_XDBAL_MASK;
+}
+
+static void
+igb_set_tdt(IGBCore *core, int index, uint32_t val)
+{
+ IGB_TxRing txr;
+ int qn = igb_mq_queue_idx(TDT0, index);
+
+ core->mac[index] = val & 0xffff;
+
+ igb_tx_ring_init(core, &txr, qn);
+ igb_start_xmit(core, &txr);
+}
+
+static void
+igb_set_ics(IGBCore *core, int index, uint32_t val)
+{
+ trace_e1000e_irq_write_ics(val);
+ igb_set_interrupt_cause(core, val);
+}
+
+static void
+igb_set_imc(IGBCore *core, int index, uint32_t val)
+{
+ trace_e1000e_irq_ims_clear_set_imc(val);
+ igb_clear_ims_bits(core, val);
+ igb_update_interrupt_state(core);
+}
+
+static void
+igb_set_ims(IGBCore *core, int index, uint32_t val)
+{
+ uint32_t valid_val = val & 0x77D4FBFD;
+
+ trace_e1000e_irq_set_ims(val, core->mac[IMS], core->mac[IMS] | valid_val);
+ core->mac[IMS] |= valid_val;
+ igb_update_interrupt_state(core);
+}
+
+static void igb_commit_icr(IGBCore *core)
+{
+ /*
+ * If GPIE.NSICR = 0, then the copy of IAM to IMS will occur only if at
+ * least one bit is set in the IMS and there is a true interrupt as
+ * reflected in ICR.INTA.
+ */
+ if ((core->mac[GPIE] & E1000_GPIE_NSICR) ||
+ (core->mac[IMS] && (core->mac[ICR] & E1000_ICR_INT_ASSERTED))) {
+ igb_set_ims(core, IMS, core->mac[IAM]);
+ } else {
+ igb_update_interrupt_state(core);
+ }
+}
+
+static void igb_set_icr(IGBCore *core, int index, uint32_t val)
+{
+ uint32_t icr = core->mac[ICR] & ~val;
+
+ trace_igb_irq_icr_write(val, core->mac[ICR], icr);
+ core->mac[ICR] = icr;
+ igb_commit_icr(core);
+}
+
+static uint32_t
+igb_mac_readreg(IGBCore *core, int index)
+{
+ return core->mac[index];
+}
+
+static uint32_t
+igb_mac_ics_read(IGBCore *core, int index)
+{
+ trace_e1000e_irq_read_ics(core->mac[ICS]);
+ return core->mac[ICS];
+}
+
+static uint32_t
+igb_mac_ims_read(IGBCore *core, int index)
+{
+ trace_e1000e_irq_read_ims(core->mac[IMS]);
+ return core->mac[IMS];
+}
+
+static uint32_t
+igb_mac_swsm_read(IGBCore *core, int index)
+{
+ uint32_t val = core->mac[SWSM];
+ core->mac[SWSM] = val | E1000_SWSM_SMBI;
+ return val;
+}
+
+static uint32_t
+igb_mac_eitr_read(IGBCore *core, int index)
+{
+ return core->eitr_guest_value[index - EITR0];
+}
+
+static uint32_t igb_mac_vfmailbox_read(IGBCore *core, int index)
+{
+ uint32_t val = core->mac[index];
+
+ core->mac[index] &= ~(E1000_V2PMAILBOX_PFSTS | E1000_V2PMAILBOX_PFACK |
+ E1000_V2PMAILBOX_RSTD);
+
+ return val;
+}
+
+static uint32_t
+igb_mac_icr_read(IGBCore *core, int index)
+{
+ uint32_t ret = core->mac[ICR];
+ trace_e1000e_irq_icr_read_entry(ret);
+
+ if (core->mac[GPIE] & E1000_GPIE_NSICR) {
+ trace_igb_irq_icr_clear_gpie_nsicr();
+ core->mac[ICR] = 0;
+ } else if (core->mac[IMS] == 0) {
+ trace_e1000e_irq_icr_clear_zero_ims();
+ core->mac[ICR] = 0;
+ } else if (!msix_enabled(core->owner)) {
+ trace_e1000e_irq_icr_clear_nonmsix_icr_read();
+ core->mac[ICR] = 0;
+ }
+
+ trace_e1000e_irq_icr_read_exit(core->mac[ICR]);
+ igb_commit_icr(core);
+ return ret;
+}
+
+static uint32_t
+igb_mac_read_clr4(IGBCore *core, int index)
+{
+ uint32_t ret = core->mac[index];
+
+ core->mac[index] = 0;
+ return ret;
+}
+
+static uint32_t
+igb_mac_read_clr8(IGBCore *core, int index)
+{
+ uint32_t ret = core->mac[index];
+
+ core->mac[index] = 0;
+ core->mac[index - 1] = 0;
+ return ret;
+}
+
+static uint32_t
+igb_get_ctrl(IGBCore *core, int index)
+{
+ uint32_t val = core->mac[CTRL];
+
+ trace_e1000e_link_read_params(
+ !!(val & E1000_CTRL_ASDE),
+ (val & E1000_CTRL_SPD_SEL) >> E1000_CTRL_SPD_SHIFT,
+ !!(val & E1000_CTRL_FRCSPD),
+ !!(val & E1000_CTRL_FRCDPX),
+ !!(val & E1000_CTRL_RFCE),
+ !!(val & E1000_CTRL_TFCE));
+
+ return val;
+}
+
+static uint32_t igb_get_status(IGBCore *core, int index)
+{
+ uint32_t res = core->mac[STATUS];
+ uint16_t num_vfs = pcie_sriov_num_vfs(core->owner);
+
+ if (core->mac[CTRL] & E1000_CTRL_FRCDPX) {
+ res |= (core->mac[CTRL] & E1000_CTRL_FD) ? E1000_STATUS_FD : 0;
+ } else {
+ res |= E1000_STATUS_FD;
+ }
+
+ if ((core->mac[CTRL] & E1000_CTRL_FRCSPD) ||
+ (core->mac[CTRL_EXT] & E1000_CTRL_EXT_SPD_BYPS)) {
+ switch (core->mac[CTRL] & E1000_CTRL_SPD_SEL) {
+ case E1000_CTRL_SPD_10:
+ res |= E1000_STATUS_SPEED_10;
+ break;
+ case E1000_CTRL_SPD_100:
+ res |= E1000_STATUS_SPEED_100;
+ break;
+ case E1000_CTRL_SPD_1000:
+ default:
+ res |= E1000_STATUS_SPEED_1000;
+ break;
+ }
+ } else {
+ res |= E1000_STATUS_SPEED_1000;
+ }
+
+ if (num_vfs) {
+ res |= num_vfs << E1000_STATUS_NUM_VFS_SHIFT;
+ res |= E1000_STATUS_IOV_MODE;
+ }
+
+ /*
+ * Windows driver 12.18.9.23 resets if E1000_STATUS_GIO_MASTER_ENABLE is
+ * left set after E1000_CTRL_LRST is set.
+ */
+ if (!(core->mac[CTRL] & E1000_CTRL_GIO_MASTER_DISABLE) &&
+ !(core->mac[CTRL] & E1000_CTRL_LRST)) {
+ res |= E1000_STATUS_GIO_MASTER_ENABLE;
+ }
+
+ return res;
+}
+
+static void
+igb_mac_writereg(IGBCore *core, int index, uint32_t val)
+{
+ core->mac[index] = val;
+}
+
+static void
+igb_mac_setmacaddr(IGBCore *core, int index, uint32_t val)
+{
+ uint32_t macaddr[2];
+
+ core->mac[index] = val;
+
+ macaddr[0] = cpu_to_le32(core->mac[RA]);
+ macaddr[1] = cpu_to_le32(core->mac[RA + 1]);
+ qemu_format_nic_info_str(qemu_get_queue(core->owner_nic),
+ (uint8_t *) macaddr);
+
+ trace_e1000e_mac_set_sw(MAC_ARG(macaddr));
+}
+
+static void
+igb_set_eecd(IGBCore *core, int index, uint32_t val)
+{
+ static const uint32_t ro_bits = E1000_EECD_PRES |
+ E1000_EECD_AUTO_RD |
+ E1000_EECD_SIZE_EX_MASK;
+
+ core->mac[EECD] = (core->mac[EECD] & ro_bits) | (val & ~ro_bits);
+}
+
+static void
+igb_set_eerd(IGBCore *core, int index, uint32_t val)
+{
+ uint32_t addr = (val >> E1000_EERW_ADDR_SHIFT) & E1000_EERW_ADDR_MASK;
+ uint32_t flags = 0;
+ uint32_t data = 0;
+
+ if ((addr < IGB_EEPROM_SIZE) && (val & E1000_EERW_START)) {
+ data = core->eeprom[addr];
+ flags = E1000_EERW_DONE;
+ }
+
+ core->mac[EERD] = flags |
+ (addr << E1000_EERW_ADDR_SHIFT) |
+ (data << E1000_EERW_DATA_SHIFT);
+}
+
+static void
+igb_set_eitr(IGBCore *core, int index, uint32_t val)
+{
+ uint32_t eitr_num = index - EITR0;
+
+ trace_igb_irq_eitr_set(eitr_num, val);
+
+ core->eitr_guest_value[eitr_num] = val & ~E1000_EITR_CNT_IGNR;
+ core->mac[index] = val & 0x7FFE;
+}
+
+static void
+igb_update_rx_offloads(IGBCore *core)
+{
+ int cso_state = igb_rx_l4_cso_enabled(core);
+
+ trace_e1000e_rx_set_cso(cso_state);
+
+ if (core->has_vnet) {
+ qemu_set_offload(qemu_get_queue(core->owner_nic)->peer,
+ cso_state, 0, 0, 0, 0);
+ }
+}
+
+static void
+igb_set_rxcsum(IGBCore *core, int index, uint32_t val)
+{
+ core->mac[RXCSUM] = val;
+ igb_update_rx_offloads(core);
+}
+
+static void
+igb_set_gcr(IGBCore *core, int index, uint32_t val)
+{
+ uint32_t ro_bits = core->mac[GCR] & E1000_GCR_RO_BITS;
+ core->mac[GCR] = (val & ~E1000_GCR_RO_BITS) | ro_bits;
+}
+
+static uint32_t igb_get_systiml(IGBCore *core, int index)
+{
+ e1000x_timestamp(core->mac, core->timadj, SYSTIML, SYSTIMH);
+ return core->mac[SYSTIML];
+}
+
+static uint32_t igb_get_rxsatrh(IGBCore *core, int index)
+{
+ core->mac[TSYNCRXCTL] &= ~E1000_TSYNCRXCTL_VALID;
+ return core->mac[RXSATRH];
+}
+
+static uint32_t igb_get_txstmph(IGBCore *core, int index)
+{
+ core->mac[TSYNCTXCTL] &= ~E1000_TSYNCTXCTL_VALID;
+ return core->mac[TXSTMPH];
+}
+
+static void igb_set_timinca(IGBCore *core, int index, uint32_t val)
+{
+ e1000x_set_timinca(core->mac, &core->timadj, val);
+}
+
+static void igb_set_timadjh(IGBCore *core, int index, uint32_t val)
+{
+ core->mac[TIMADJH] = val;
+ core->timadj += core->mac[TIMADJL] | ((int64_t)core->mac[TIMADJH] << 32);
+}
+
+#define igb_getreg(x) [x] = igb_mac_readreg
+typedef uint32_t (*readops)(IGBCore *, int);
+static const readops igb_macreg_readops[] = {
+ igb_getreg(WUFC),
+ igb_getreg(MANC),
+ igb_getreg(TOTL),
+ igb_getreg(RDT0),
+ igb_getreg(RDT1),
+ igb_getreg(RDT2),
+ igb_getreg(RDT3),
+ igb_getreg(RDT4),
+ igb_getreg(RDT5),
+ igb_getreg(RDT6),
+ igb_getreg(RDT7),
+ igb_getreg(RDT8),
+ igb_getreg(RDT9),
+ igb_getreg(RDT10),
+ igb_getreg(RDT11),
+ igb_getreg(RDT12),
+ igb_getreg(RDT13),
+ igb_getreg(RDT14),
+ igb_getreg(RDT15),
+ igb_getreg(RDBAH0),
+ igb_getreg(RDBAH1),
+ igb_getreg(RDBAH2),
+ igb_getreg(RDBAH3),
+ igb_getreg(RDBAH4),
+ igb_getreg(RDBAH5),
+ igb_getreg(RDBAH6),
+ igb_getreg(RDBAH7),
+ igb_getreg(RDBAH8),
+ igb_getreg(RDBAH9),
+ igb_getreg(RDBAH10),
+ igb_getreg(RDBAH11),
+ igb_getreg(RDBAH12),
+ igb_getreg(RDBAH13),
+ igb_getreg(RDBAH14),
+ igb_getreg(RDBAH15),
+ igb_getreg(TDBAL0),
+ igb_getreg(TDBAL1),
+ igb_getreg(TDBAL2),
+ igb_getreg(TDBAL3),
+ igb_getreg(TDBAL4),
+ igb_getreg(TDBAL5),
+ igb_getreg(TDBAL6),
+ igb_getreg(TDBAL7),
+ igb_getreg(TDBAL8),
+ igb_getreg(TDBAL9),
+ igb_getreg(TDBAL10),
+ igb_getreg(TDBAL11),
+ igb_getreg(TDBAL12),
+ igb_getreg(TDBAL13),
+ igb_getreg(TDBAL14),
+ igb_getreg(TDBAL15),
+ igb_getreg(RDLEN0),
+ igb_getreg(RDLEN1),
+ igb_getreg(RDLEN2),
+ igb_getreg(RDLEN3),
+ igb_getreg(RDLEN4),
+ igb_getreg(RDLEN5),
+ igb_getreg(RDLEN6),
+ igb_getreg(RDLEN7),
+ igb_getreg(RDLEN8),
+ igb_getreg(RDLEN9),
+ igb_getreg(RDLEN10),
+ igb_getreg(RDLEN11),
+ igb_getreg(RDLEN12),
+ igb_getreg(RDLEN13),
+ igb_getreg(RDLEN14),
+ igb_getreg(RDLEN15),
+ igb_getreg(SRRCTL0),
+ igb_getreg(SRRCTL1),
+ igb_getreg(SRRCTL2),
+ igb_getreg(SRRCTL3),
+ igb_getreg(SRRCTL4),
+ igb_getreg(SRRCTL5),
+ igb_getreg(SRRCTL6),
+ igb_getreg(SRRCTL7),
+ igb_getreg(SRRCTL8),
+ igb_getreg(SRRCTL9),
+ igb_getreg(SRRCTL10),
+ igb_getreg(SRRCTL11),
+ igb_getreg(SRRCTL12),
+ igb_getreg(SRRCTL13),
+ igb_getreg(SRRCTL14),
+ igb_getreg(SRRCTL15),
+ igb_getreg(LATECOL),
+ igb_getreg(XONTXC),
+ igb_getreg(TDFH),
+ igb_getreg(TDFT),
+ igb_getreg(TDFHS),
+ igb_getreg(TDFTS),
+ igb_getreg(TDFPC),
+ igb_getreg(WUS),
+ igb_getreg(RDFH),
+ igb_getreg(RDFT),
+ igb_getreg(RDFHS),
+ igb_getreg(RDFTS),
+ igb_getreg(RDFPC),
+ igb_getreg(GORCL),
+ igb_getreg(MGTPRC),
+ igb_getreg(EERD),
+ igb_getreg(EIAC),
+ igb_getreg(MANC2H),
+ igb_getreg(RXCSUM),
+ igb_getreg(GSCL_3),
+ igb_getreg(GSCN_2),
+ igb_getreg(FCAH),
+ igb_getreg(FCRTH),
+ igb_getreg(FLOP),
+ igb_getreg(RXSTMPH),
+ igb_getreg(TXSTMPL),
+ igb_getreg(TIMADJL),
+ igb_getreg(RDH0),
+ igb_getreg(RDH1),
+ igb_getreg(RDH2),
+ igb_getreg(RDH3),
+ igb_getreg(RDH4),
+ igb_getreg(RDH5),
+ igb_getreg(RDH6),
+ igb_getreg(RDH7),
+ igb_getreg(RDH8),
+ igb_getreg(RDH9),
+ igb_getreg(RDH10),
+ igb_getreg(RDH11),
+ igb_getreg(RDH12),
+ igb_getreg(RDH13),
+ igb_getreg(RDH14),
+ igb_getreg(RDH15),
+ igb_getreg(TDT0),
+ igb_getreg(TDT1),
+ igb_getreg(TDT2),
+ igb_getreg(TDT3),
+ igb_getreg(TDT4),
+ igb_getreg(TDT5),
+ igb_getreg(TDT6),
+ igb_getreg(TDT7),
+ igb_getreg(TDT8),
+ igb_getreg(TDT9),
+ igb_getreg(TDT10),
+ igb_getreg(TDT11),
+ igb_getreg(TDT12),
+ igb_getreg(TDT13),
+ igb_getreg(TDT14),
+ igb_getreg(TDT15),
+ igb_getreg(TNCRS),
+ igb_getreg(RJC),
+ igb_getreg(IAM),
+ igb_getreg(GSCL_2),
+ igb_getreg(TIPG),
+ igb_getreg(FLMNGCTL),
+ igb_getreg(FLMNGCNT),
+ igb_getreg(TSYNCTXCTL),
+ igb_getreg(EEMNGDATA),
+ igb_getreg(CTRL_EXT),
+ igb_getreg(SYSTIMH),
+ igb_getreg(EEMNGCTL),
+ igb_getreg(FLMNGDATA),
+ igb_getreg(TSYNCRXCTL),
+ igb_getreg(LEDCTL),
+ igb_getreg(TCTL),
+ igb_getreg(TCTL_EXT),
+ igb_getreg(DTXCTL),
+ igb_getreg(RXPBS),
+ igb_getreg(TDH0),
+ igb_getreg(TDH1),
+ igb_getreg(TDH2),
+ igb_getreg(TDH3),
+ igb_getreg(TDH4),
+ igb_getreg(TDH5),
+ igb_getreg(TDH6),
+ igb_getreg(TDH7),
+ igb_getreg(TDH8),
+ igb_getreg(TDH9),
+ igb_getreg(TDH10),
+ igb_getreg(TDH11),
+ igb_getreg(TDH12),
+ igb_getreg(TDH13),
+ igb_getreg(TDH14),
+ igb_getreg(TDH15),
+ igb_getreg(ECOL),
+ igb_getreg(DC),
+ igb_getreg(RLEC),
+ igb_getreg(XOFFTXC),
+ igb_getreg(RFC),
+ igb_getreg(RNBC),
+ igb_getreg(MGTPTC),
+ igb_getreg(TIMINCA),
+ igb_getreg(FACTPS),
+ igb_getreg(GSCL_1),
+ igb_getreg(GSCN_0),
+ igb_getreg(PBACLR),
+ igb_getreg(FCTTV),
+ igb_getreg(RXSATRL),
+ igb_getreg(TORL),
+ igb_getreg(TDLEN0),
+ igb_getreg(TDLEN1),
+ igb_getreg(TDLEN2),
+ igb_getreg(TDLEN3),
+ igb_getreg(TDLEN4),
+ igb_getreg(TDLEN5),
+ igb_getreg(TDLEN6),
+ igb_getreg(TDLEN7),
+ igb_getreg(TDLEN8),
+ igb_getreg(TDLEN9),
+ igb_getreg(TDLEN10),
+ igb_getreg(TDLEN11),
+ igb_getreg(TDLEN12),
+ igb_getreg(TDLEN13),
+ igb_getreg(TDLEN14),
+ igb_getreg(TDLEN15),
+ igb_getreg(MCC),
+ igb_getreg(WUC),
+ igb_getreg(EECD),
+ igb_getreg(FCRTV),
+ igb_getreg(TXDCTL0),
+ igb_getreg(TXDCTL1),
+ igb_getreg(TXDCTL2),
+ igb_getreg(TXDCTL3),
+ igb_getreg(TXDCTL4),
+ igb_getreg(TXDCTL5),
+ igb_getreg(TXDCTL6),
+ igb_getreg(TXDCTL7),
+ igb_getreg(TXDCTL8),
+ igb_getreg(TXDCTL9),
+ igb_getreg(TXDCTL10),
+ igb_getreg(TXDCTL11),
+ igb_getreg(TXDCTL12),
+ igb_getreg(TXDCTL13),
+ igb_getreg(TXDCTL14),
+ igb_getreg(TXDCTL15),
+ igb_getreg(TXCTL0),
+ igb_getreg(TXCTL1),
+ igb_getreg(TXCTL2),
+ igb_getreg(TXCTL3),
+ igb_getreg(TXCTL4),
+ igb_getreg(TXCTL5),
+ igb_getreg(TXCTL6),
+ igb_getreg(TXCTL7),
+ igb_getreg(TXCTL8),
+ igb_getreg(TXCTL9),
+ igb_getreg(TXCTL10),
+ igb_getreg(TXCTL11),
+ igb_getreg(TXCTL12),
+ igb_getreg(TXCTL13),
+ igb_getreg(TXCTL14),
+ igb_getreg(TXCTL15),
+ igb_getreg(TDWBAL0),
+ igb_getreg(TDWBAL1),
+ igb_getreg(TDWBAL2),
+ igb_getreg(TDWBAL3),
+ igb_getreg(TDWBAL4),
+ igb_getreg(TDWBAL5),
+ igb_getreg(TDWBAL6),
+ igb_getreg(TDWBAL7),
+ igb_getreg(TDWBAL8),
+ igb_getreg(TDWBAL9),
+ igb_getreg(TDWBAL10),
+ igb_getreg(TDWBAL11),
+ igb_getreg(TDWBAL12),
+ igb_getreg(TDWBAL13),
+ igb_getreg(TDWBAL14),
+ igb_getreg(TDWBAL15),
+ igb_getreg(TDWBAH0),
+ igb_getreg(TDWBAH1),
+ igb_getreg(TDWBAH2),
+ igb_getreg(TDWBAH3),
+ igb_getreg(TDWBAH4),
+ igb_getreg(TDWBAH5),
+ igb_getreg(TDWBAH6),
+ igb_getreg(TDWBAH7),
+ igb_getreg(TDWBAH8),
+ igb_getreg(TDWBAH9),
+ igb_getreg(TDWBAH10),
+ igb_getreg(TDWBAH11),
+ igb_getreg(TDWBAH12),
+ igb_getreg(TDWBAH13),
+ igb_getreg(TDWBAH14),
+ igb_getreg(TDWBAH15),
+ igb_getreg(PVTCTRL0),
+ igb_getreg(PVTCTRL1),
+ igb_getreg(PVTCTRL2),
+ igb_getreg(PVTCTRL3),
+ igb_getreg(PVTCTRL4),
+ igb_getreg(PVTCTRL5),
+ igb_getreg(PVTCTRL6),
+ igb_getreg(PVTCTRL7),
+ igb_getreg(PVTEIMS0),
+ igb_getreg(PVTEIMS1),
+ igb_getreg(PVTEIMS2),
+ igb_getreg(PVTEIMS3),
+ igb_getreg(PVTEIMS4),
+ igb_getreg(PVTEIMS5),
+ igb_getreg(PVTEIMS6),
+ igb_getreg(PVTEIMS7),
+ igb_getreg(PVTEIAC0),
+ igb_getreg(PVTEIAC1),
+ igb_getreg(PVTEIAC2),
+ igb_getreg(PVTEIAC3),
+ igb_getreg(PVTEIAC4),
+ igb_getreg(PVTEIAC5),
+ igb_getreg(PVTEIAC6),
+ igb_getreg(PVTEIAC7),
+ igb_getreg(PVTEIAM0),
+ igb_getreg(PVTEIAM1),
+ igb_getreg(PVTEIAM2),
+ igb_getreg(PVTEIAM3),
+ igb_getreg(PVTEIAM4),
+ igb_getreg(PVTEIAM5),
+ igb_getreg(PVTEIAM6),
+ igb_getreg(PVTEIAM7),
+ igb_getreg(PVFGPRC0),
+ igb_getreg(PVFGPRC1),
+ igb_getreg(PVFGPRC2),
+ igb_getreg(PVFGPRC3),
+ igb_getreg(PVFGPRC4),
+ igb_getreg(PVFGPRC5),
+ igb_getreg(PVFGPRC6),
+ igb_getreg(PVFGPRC7),
+ igb_getreg(PVFGPTC0),
+ igb_getreg(PVFGPTC1),
+ igb_getreg(PVFGPTC2),
+ igb_getreg(PVFGPTC3),
+ igb_getreg(PVFGPTC4),
+ igb_getreg(PVFGPTC5),
+ igb_getreg(PVFGPTC6),
+ igb_getreg(PVFGPTC7),
+ igb_getreg(PVFGORC0),
+ igb_getreg(PVFGORC1),
+ igb_getreg(PVFGORC2),
+ igb_getreg(PVFGORC3),
+ igb_getreg(PVFGORC4),
+ igb_getreg(PVFGORC5),
+ igb_getreg(PVFGORC6),
+ igb_getreg(PVFGORC7),
+ igb_getreg(PVFGOTC0),
+ igb_getreg(PVFGOTC1),
+ igb_getreg(PVFGOTC2),
+ igb_getreg(PVFGOTC3),
+ igb_getreg(PVFGOTC4),
+ igb_getreg(PVFGOTC5),
+ igb_getreg(PVFGOTC6),
+ igb_getreg(PVFGOTC7),
+ igb_getreg(PVFMPRC0),
+ igb_getreg(PVFMPRC1),
+ igb_getreg(PVFMPRC2),
+ igb_getreg(PVFMPRC3),
+ igb_getreg(PVFMPRC4),
+ igb_getreg(PVFMPRC5),
+ igb_getreg(PVFMPRC6),
+ igb_getreg(PVFMPRC7),
+ igb_getreg(PVFGPRLBC0),
+ igb_getreg(PVFGPRLBC1),
+ igb_getreg(PVFGPRLBC2),
+ igb_getreg(PVFGPRLBC3),
+ igb_getreg(PVFGPRLBC4),
+ igb_getreg(PVFGPRLBC5),
+ igb_getreg(PVFGPRLBC6),
+ igb_getreg(PVFGPRLBC7),
+ igb_getreg(PVFGPTLBC0),
+ igb_getreg(PVFGPTLBC1),
+ igb_getreg(PVFGPTLBC2),
+ igb_getreg(PVFGPTLBC3),
+ igb_getreg(PVFGPTLBC4),
+ igb_getreg(PVFGPTLBC5),
+ igb_getreg(PVFGPTLBC6),
+ igb_getreg(PVFGPTLBC7),
+ igb_getreg(PVFGORLBC0),
+ igb_getreg(PVFGORLBC1),
+ igb_getreg(PVFGORLBC2),
+ igb_getreg(PVFGORLBC3),
+ igb_getreg(PVFGORLBC4),
+ igb_getreg(PVFGORLBC5),
+ igb_getreg(PVFGORLBC6),
+ igb_getreg(PVFGORLBC7),
+ igb_getreg(PVFGOTLBC0),
+ igb_getreg(PVFGOTLBC1),
+ igb_getreg(PVFGOTLBC2),
+ igb_getreg(PVFGOTLBC3),
+ igb_getreg(PVFGOTLBC4),
+ igb_getreg(PVFGOTLBC5),
+ igb_getreg(PVFGOTLBC6),
+ igb_getreg(PVFGOTLBC7),
+ igb_getreg(RCTL),
+ igb_getreg(MDIC),
+ igb_getreg(FCRUC),
+ igb_getreg(VET),
+ igb_getreg(RDBAL0),
+ igb_getreg(RDBAL1),
+ igb_getreg(RDBAL2),
+ igb_getreg(RDBAL3),
+ igb_getreg(RDBAL4),
+ igb_getreg(RDBAL5),
+ igb_getreg(RDBAL6),
+ igb_getreg(RDBAL7),
+ igb_getreg(RDBAL8),
+ igb_getreg(RDBAL9),
+ igb_getreg(RDBAL10),
+ igb_getreg(RDBAL11),
+ igb_getreg(RDBAL12),
+ igb_getreg(RDBAL13),
+ igb_getreg(RDBAL14),
+ igb_getreg(RDBAL15),
+ igb_getreg(TDBAH0),
+ igb_getreg(TDBAH1),
+ igb_getreg(TDBAH2),
+ igb_getreg(TDBAH3),
+ igb_getreg(TDBAH4),
+ igb_getreg(TDBAH5),
+ igb_getreg(TDBAH6),
+ igb_getreg(TDBAH7),
+ igb_getreg(TDBAH8),
+ igb_getreg(TDBAH9),
+ igb_getreg(TDBAH10),
+ igb_getreg(TDBAH11),
+ igb_getreg(TDBAH12),
+ igb_getreg(TDBAH13),
+ igb_getreg(TDBAH14),
+ igb_getreg(TDBAH15),
+ igb_getreg(SCC),
+ igb_getreg(COLC),
+ igb_getreg(XOFFRXC),
+ igb_getreg(IPAV),
+ igb_getreg(GOTCL),
+ igb_getreg(MGTPDC),
+ igb_getreg(GCR),
+ igb_getreg(MFVAL),
+ igb_getreg(FUNCTAG),
+ igb_getreg(GSCL_4),
+ igb_getreg(GSCN_3),
+ igb_getreg(MRQC),
+ igb_getreg(FCT),
+ igb_getreg(FLA),
+ igb_getreg(RXDCTL0),
+ igb_getreg(RXDCTL1),
+ igb_getreg(RXDCTL2),
+ igb_getreg(RXDCTL3),
+ igb_getreg(RXDCTL4),
+ igb_getreg(RXDCTL5),
+ igb_getreg(RXDCTL6),
+ igb_getreg(RXDCTL7),
+ igb_getreg(RXDCTL8),
+ igb_getreg(RXDCTL9),
+ igb_getreg(RXDCTL10),
+ igb_getreg(RXDCTL11),
+ igb_getreg(RXDCTL12),
+ igb_getreg(RXDCTL13),
+ igb_getreg(RXDCTL14),
+ igb_getreg(RXDCTL15),
+ igb_getreg(RXSTMPL),
+ igb_getreg(TIMADJH),
+ igb_getreg(FCRTL),
+ igb_getreg(XONRXC),
+ igb_getreg(RFCTL),
+ igb_getreg(GSCN_1),
+ igb_getreg(FCAL),
+ igb_getreg(GPIE),
+ igb_getreg(TXPBS),
+ igb_getreg(RLPML),
+
+ [TOTH] = igb_mac_read_clr8,
+ [GOTCH] = igb_mac_read_clr8,
+ [PRC64] = igb_mac_read_clr4,
+ [PRC255] = igb_mac_read_clr4,
+ [PRC1023] = igb_mac_read_clr4,
+ [PTC64] = igb_mac_read_clr4,
+ [PTC255] = igb_mac_read_clr4,
+ [PTC1023] = igb_mac_read_clr4,
+ [GPRC] = igb_mac_read_clr4,
+ [TPT] = igb_mac_read_clr4,
+ [RUC] = igb_mac_read_clr4,
+ [BPRC] = igb_mac_read_clr4,
+ [MPTC] = igb_mac_read_clr4,
+ [IAC] = igb_mac_read_clr4,
+ [ICR] = igb_mac_icr_read,
+ [STATUS] = igb_get_status,
+ [ICS] = igb_mac_ics_read,
+ /*
+ * 8.8.10: Reading the IMC register returns the value of the IMS register.
+ */
+ [IMC] = igb_mac_ims_read,
+ [TORH] = igb_mac_read_clr8,
+ [GORCH] = igb_mac_read_clr8,
+ [PRC127] = igb_mac_read_clr4,
+ [PRC511] = igb_mac_read_clr4,
+ [PRC1522] = igb_mac_read_clr4,
+ [PTC127] = igb_mac_read_clr4,
+ [PTC511] = igb_mac_read_clr4,
+ [PTC1522] = igb_mac_read_clr4,
+ [GPTC] = igb_mac_read_clr4,
+ [TPR] = igb_mac_read_clr4,
+ [ROC] = igb_mac_read_clr4,
+ [MPRC] = igb_mac_read_clr4,
+ [BPTC] = igb_mac_read_clr4,
+ [TSCTC] = igb_mac_read_clr4,
+ [CTRL] = igb_get_ctrl,
+ [SWSM] = igb_mac_swsm_read,
+ [IMS] = igb_mac_ims_read,
+ [SYSTIML] = igb_get_systiml,
+ [RXSATRH] = igb_get_rxsatrh,
+ [TXSTMPH] = igb_get_txstmph,
+
+ [CRCERRS ... MPC] = igb_mac_readreg,
+ [IP6AT ... IP6AT + 3] = igb_mac_readreg,
+ [IP4AT ... IP4AT + 6] = igb_mac_readreg,
+ [RA ... RA + 31] = igb_mac_readreg,
+ [RA2 ... RA2 + 31] = igb_mac_readreg,
+ [WUPM ... WUPM + 31] = igb_mac_readreg,
+ [MTA ... MTA + E1000_MC_TBL_SIZE - 1] = igb_mac_readreg,
+ [VFTA ... VFTA + E1000_VLAN_FILTER_TBL_SIZE - 1] = igb_mac_readreg,
+ [FFMT ... FFMT + 254] = igb_mac_readreg,
+ [MDEF ... MDEF + 7] = igb_mac_readreg,
+ [FTFT ... FTFT + 254] = igb_mac_readreg,
+ [RETA ... RETA + 31] = igb_mac_readreg,
+ [RSSRK ... RSSRK + 9] = igb_mac_readreg,
+ [MAVTV0 ... MAVTV3] = igb_mac_readreg,
+ [EITR0 ... EITR0 + IGB_INTR_NUM - 1] = igb_mac_eitr_read,
+ [PVTEICR0] = igb_mac_read_clr4,
+ [PVTEICR1] = igb_mac_read_clr4,
+ [PVTEICR2] = igb_mac_read_clr4,
+ [PVTEICR3] = igb_mac_read_clr4,
+ [PVTEICR4] = igb_mac_read_clr4,
+ [PVTEICR5] = igb_mac_read_clr4,
+ [PVTEICR6] = igb_mac_read_clr4,
+ [PVTEICR7] = igb_mac_read_clr4,
+
+ /* IGB specific: */
+ [FWSM] = igb_mac_readreg,
+ [SW_FW_SYNC] = igb_mac_readreg,
+ [HTCBDPC] = igb_mac_read_clr4,
+ [EICR] = igb_mac_read_clr4,
+ [EIMS] = igb_mac_readreg,
+ [EIAM] = igb_mac_readreg,
+ [IVAR0 ... IVAR0 + 7] = igb_mac_readreg,
+ igb_getreg(IVAR_MISC),
+ igb_getreg(VT_CTL),
+ [P2VMAILBOX0 ... P2VMAILBOX7] = igb_mac_readreg,
+ [V2PMAILBOX0 ... V2PMAILBOX7] = igb_mac_vfmailbox_read,
+ igb_getreg(MBVFICR),
+ [VMBMEM0 ... VMBMEM0 + 127] = igb_mac_readreg,
+ igb_getreg(MBVFIMR),
+ igb_getreg(VFLRE),
+ igb_getreg(VFRE),
+ igb_getreg(VFTE),
+ igb_getreg(QDE),
+ igb_getreg(DTXSWC),
+ igb_getreg(RPLOLR),
+ [VLVF0 ... VLVF0 + E1000_VLVF_ARRAY_SIZE - 1] = igb_mac_readreg,
+ [VMVIR0 ... VMVIR7] = igb_mac_readreg,
+ [VMOLR0 ... VMOLR7] = igb_mac_readreg,
+ [WVBR] = igb_mac_read_clr4,
+ [RQDPC0] = igb_mac_read_clr4,
+ [RQDPC1] = igb_mac_read_clr4,
+ [RQDPC2] = igb_mac_read_clr4,
+ [RQDPC3] = igb_mac_read_clr4,
+ [RQDPC4] = igb_mac_read_clr4,
+ [RQDPC5] = igb_mac_read_clr4,
+ [RQDPC6] = igb_mac_read_clr4,
+ [RQDPC7] = igb_mac_read_clr4,
+ [RQDPC8] = igb_mac_read_clr4,
+ [RQDPC9] = igb_mac_read_clr4,
+ [RQDPC10] = igb_mac_read_clr4,
+ [RQDPC11] = igb_mac_read_clr4,
+ [RQDPC12] = igb_mac_read_clr4,
+ [RQDPC13] = igb_mac_read_clr4,
+ [RQDPC14] = igb_mac_read_clr4,
+ [RQDPC15] = igb_mac_read_clr4,
+ [VTIVAR ... VTIVAR + 7] = igb_mac_readreg,
+ [VTIVAR_MISC ... VTIVAR_MISC + 7] = igb_mac_readreg,
+};
+enum { IGB_NREADOPS = ARRAY_SIZE(igb_macreg_readops) };
+
+#define igb_putreg(x) [x] = igb_mac_writereg
+typedef void (*writeops)(IGBCore *, int, uint32_t);
+static const writeops igb_macreg_writeops[] = {
+ igb_putreg(SWSM),
+ igb_putreg(WUFC),
+ igb_putreg(RDBAH0),
+ igb_putreg(RDBAH1),
+ igb_putreg(RDBAH2),
+ igb_putreg(RDBAH3),
+ igb_putreg(RDBAH4),
+ igb_putreg(RDBAH5),
+ igb_putreg(RDBAH6),
+ igb_putreg(RDBAH7),
+ igb_putreg(RDBAH8),
+ igb_putreg(RDBAH9),
+ igb_putreg(RDBAH10),
+ igb_putreg(RDBAH11),
+ igb_putreg(RDBAH12),
+ igb_putreg(RDBAH13),
+ igb_putreg(RDBAH14),
+ igb_putreg(RDBAH15),
+ igb_putreg(SRRCTL0),
+ igb_putreg(SRRCTL1),
+ igb_putreg(SRRCTL2),
+ igb_putreg(SRRCTL3),
+ igb_putreg(SRRCTL4),
+ igb_putreg(SRRCTL5),
+ igb_putreg(SRRCTL6),
+ igb_putreg(SRRCTL7),
+ igb_putreg(SRRCTL8),
+ igb_putreg(SRRCTL9),
+ igb_putreg(SRRCTL10),
+ igb_putreg(SRRCTL11),
+ igb_putreg(SRRCTL12),
+ igb_putreg(SRRCTL13),
+ igb_putreg(SRRCTL14),
+ igb_putreg(SRRCTL15),
+ igb_putreg(RXDCTL0),
+ igb_putreg(RXDCTL1),
+ igb_putreg(RXDCTL2),
+ igb_putreg(RXDCTL3),
+ igb_putreg(RXDCTL4),
+ igb_putreg(RXDCTL5),
+ igb_putreg(RXDCTL6),
+ igb_putreg(RXDCTL7),
+ igb_putreg(RXDCTL8),
+ igb_putreg(RXDCTL9),
+ igb_putreg(RXDCTL10),
+ igb_putreg(RXDCTL11),
+ igb_putreg(RXDCTL12),
+ igb_putreg(RXDCTL13),
+ igb_putreg(RXDCTL14),
+ igb_putreg(RXDCTL15),
+ igb_putreg(LEDCTL),
+ igb_putreg(TCTL),
+ igb_putreg(TCTL_EXT),
+ igb_putreg(DTXCTL),
+ igb_putreg(RXPBS),
+ igb_putreg(RQDPC0),
+ igb_putreg(FCAL),
+ igb_putreg(FCRUC),
+ igb_putreg(WUC),
+ igb_putreg(WUS),
+ igb_putreg(IPAV),
+ igb_putreg(TDBAH0),
+ igb_putreg(TDBAH1),
+ igb_putreg(TDBAH2),
+ igb_putreg(TDBAH3),
+ igb_putreg(TDBAH4),
+ igb_putreg(TDBAH5),
+ igb_putreg(TDBAH6),
+ igb_putreg(TDBAH7),
+ igb_putreg(TDBAH8),
+ igb_putreg(TDBAH9),
+ igb_putreg(TDBAH10),
+ igb_putreg(TDBAH11),
+ igb_putreg(TDBAH12),
+ igb_putreg(TDBAH13),
+ igb_putreg(TDBAH14),
+ igb_putreg(TDBAH15),
+ igb_putreg(IAM),
+ igb_putreg(MANC),
+ igb_putreg(MANC2H),
+ igb_putreg(MFVAL),
+ igb_putreg(FACTPS),
+ igb_putreg(FUNCTAG),
+ igb_putreg(GSCL_1),
+ igb_putreg(GSCL_2),
+ igb_putreg(GSCL_3),
+ igb_putreg(GSCL_4),
+ igb_putreg(GSCN_0),
+ igb_putreg(GSCN_1),
+ igb_putreg(GSCN_2),
+ igb_putreg(GSCN_3),
+ igb_putreg(MRQC),
+ igb_putreg(FLOP),
+ igb_putreg(FLA),
+ igb_putreg(TXDCTL0),
+ igb_putreg(TXDCTL1),
+ igb_putreg(TXDCTL2),
+ igb_putreg(TXDCTL3),
+ igb_putreg(TXDCTL4),
+ igb_putreg(TXDCTL5),
+ igb_putreg(TXDCTL6),
+ igb_putreg(TXDCTL7),
+ igb_putreg(TXDCTL8),
+ igb_putreg(TXDCTL9),
+ igb_putreg(TXDCTL10),
+ igb_putreg(TXDCTL11),
+ igb_putreg(TXDCTL12),
+ igb_putreg(TXDCTL13),
+ igb_putreg(TXDCTL14),
+ igb_putreg(TXDCTL15),
+ igb_putreg(TXCTL0),
+ igb_putreg(TXCTL1),
+ igb_putreg(TXCTL2),
+ igb_putreg(TXCTL3),
+ igb_putreg(TXCTL4),
+ igb_putreg(TXCTL5),
+ igb_putreg(TXCTL6),
+ igb_putreg(TXCTL7),
+ igb_putreg(TXCTL8),
+ igb_putreg(TXCTL9),
+ igb_putreg(TXCTL10),
+ igb_putreg(TXCTL11),
+ igb_putreg(TXCTL12),
+ igb_putreg(TXCTL13),
+ igb_putreg(TXCTL14),
+ igb_putreg(TXCTL15),
+ igb_putreg(TDWBAL0),
+ igb_putreg(TDWBAL1),
+ igb_putreg(TDWBAL2),
+ igb_putreg(TDWBAL3),
+ igb_putreg(TDWBAL4),
+ igb_putreg(TDWBAL5),
+ igb_putreg(TDWBAL6),
+ igb_putreg(TDWBAL7),
+ igb_putreg(TDWBAL8),
+ igb_putreg(TDWBAL9),
+ igb_putreg(TDWBAL10),
+ igb_putreg(TDWBAL11),
+ igb_putreg(TDWBAL12),
+ igb_putreg(TDWBAL13),
+ igb_putreg(TDWBAL14),
+ igb_putreg(TDWBAL15),
+ igb_putreg(TDWBAH0),
+ igb_putreg(TDWBAH1),
+ igb_putreg(TDWBAH2),
+ igb_putreg(TDWBAH3),
+ igb_putreg(TDWBAH4),
+ igb_putreg(TDWBAH5),
+ igb_putreg(TDWBAH6),
+ igb_putreg(TDWBAH7),
+ igb_putreg(TDWBAH8),
+ igb_putreg(TDWBAH9),
+ igb_putreg(TDWBAH10),
+ igb_putreg(TDWBAH11),
+ igb_putreg(TDWBAH12),
+ igb_putreg(TDWBAH13),
+ igb_putreg(TDWBAH14),
+ igb_putreg(TDWBAH15),
+ igb_putreg(TIPG),
+ igb_putreg(RXSTMPH),
+ igb_putreg(RXSTMPL),
+ igb_putreg(RXSATRL),
+ igb_putreg(RXSATRH),
+ igb_putreg(TXSTMPL),
+ igb_putreg(TXSTMPH),
+ igb_putreg(SYSTIML),
+ igb_putreg(SYSTIMH),
+ igb_putreg(TIMADJL),
+ igb_putreg(TSYNCRXCTL),
+ igb_putreg(TSYNCTXCTL),
+ igb_putreg(EEMNGCTL),
+ igb_putreg(GPIE),
+ igb_putreg(TXPBS),
+ igb_putreg(RLPML),
+ igb_putreg(VET),
+
+ [TDH0] = igb_set_16bit,
+ [TDH1] = igb_set_16bit,
+ [TDH2] = igb_set_16bit,
+ [TDH3] = igb_set_16bit,
+ [TDH4] = igb_set_16bit,
+ [TDH5] = igb_set_16bit,
+ [TDH6] = igb_set_16bit,
+ [TDH7] = igb_set_16bit,
+ [TDH8] = igb_set_16bit,
+ [TDH9] = igb_set_16bit,
+ [TDH10] = igb_set_16bit,
+ [TDH11] = igb_set_16bit,
+ [TDH12] = igb_set_16bit,
+ [TDH13] = igb_set_16bit,
+ [TDH14] = igb_set_16bit,
+ [TDH15] = igb_set_16bit,
+ [TDT0] = igb_set_tdt,
+ [TDT1] = igb_set_tdt,
+ [TDT2] = igb_set_tdt,
+ [TDT3] = igb_set_tdt,
+ [TDT4] = igb_set_tdt,
+ [TDT5] = igb_set_tdt,
+ [TDT6] = igb_set_tdt,
+ [TDT7] = igb_set_tdt,
+ [TDT8] = igb_set_tdt,
+ [TDT9] = igb_set_tdt,
+ [TDT10] = igb_set_tdt,
+ [TDT11] = igb_set_tdt,
+ [TDT12] = igb_set_tdt,
+ [TDT13] = igb_set_tdt,
+ [TDT14] = igb_set_tdt,
+ [TDT15] = igb_set_tdt,
+ [MDIC] = igb_set_mdic,
+ [ICS] = igb_set_ics,
+ [RDH0] = igb_set_16bit,
+ [RDH1] = igb_set_16bit,
+ [RDH2] = igb_set_16bit,
+ [RDH3] = igb_set_16bit,
+ [RDH4] = igb_set_16bit,
+ [RDH5] = igb_set_16bit,
+ [RDH6] = igb_set_16bit,
+ [RDH7] = igb_set_16bit,
+ [RDH8] = igb_set_16bit,
+ [RDH9] = igb_set_16bit,
+ [RDH10] = igb_set_16bit,
+ [RDH11] = igb_set_16bit,
+ [RDH12] = igb_set_16bit,
+ [RDH13] = igb_set_16bit,
+ [RDH14] = igb_set_16bit,
+ [RDH15] = igb_set_16bit,
+ [RDT0] = igb_set_rdt,
+ [RDT1] = igb_set_rdt,
+ [RDT2] = igb_set_rdt,
+ [RDT3] = igb_set_rdt,
+ [RDT4] = igb_set_rdt,
+ [RDT5] = igb_set_rdt,
+ [RDT6] = igb_set_rdt,
+ [RDT7] = igb_set_rdt,
+ [RDT8] = igb_set_rdt,
+ [RDT9] = igb_set_rdt,
+ [RDT10] = igb_set_rdt,
+ [RDT11] = igb_set_rdt,
+ [RDT12] = igb_set_rdt,
+ [RDT13] = igb_set_rdt,
+ [RDT14] = igb_set_rdt,
+ [RDT15] = igb_set_rdt,
+ [IMC] = igb_set_imc,
+ [IMS] = igb_set_ims,
+ [ICR] = igb_set_icr,
+ [EECD] = igb_set_eecd,
+ [RCTL] = igb_set_rx_control,
+ [CTRL] = igb_set_ctrl,
+ [EERD] = igb_set_eerd,
+ [TDFH] = igb_set_13bit,
+ [TDFT] = igb_set_13bit,
+ [TDFHS] = igb_set_13bit,
+ [TDFTS] = igb_set_13bit,
+ [TDFPC] = igb_set_13bit,
+ [RDFH] = igb_set_13bit,
+ [RDFT] = igb_set_13bit,
+ [RDFHS] = igb_set_13bit,
+ [RDFTS] = igb_set_13bit,
+ [RDFPC] = igb_set_13bit,
+ [GCR] = igb_set_gcr,
+ [RXCSUM] = igb_set_rxcsum,
+ [TDLEN0] = igb_set_dlen,
+ [TDLEN1] = igb_set_dlen,
+ [TDLEN2] = igb_set_dlen,
+ [TDLEN3] = igb_set_dlen,
+ [TDLEN4] = igb_set_dlen,
+ [TDLEN5] = igb_set_dlen,
+ [TDLEN6] = igb_set_dlen,
+ [TDLEN7] = igb_set_dlen,
+ [TDLEN8] = igb_set_dlen,
+ [TDLEN9] = igb_set_dlen,
+ [TDLEN10] = igb_set_dlen,
+ [TDLEN11] = igb_set_dlen,
+ [TDLEN12] = igb_set_dlen,
+ [TDLEN13] = igb_set_dlen,
+ [TDLEN14] = igb_set_dlen,
+ [TDLEN15] = igb_set_dlen,
+ [RDLEN0] = igb_set_dlen,
+ [RDLEN1] = igb_set_dlen,
+ [RDLEN2] = igb_set_dlen,
+ [RDLEN3] = igb_set_dlen,
+ [RDLEN4] = igb_set_dlen,
+ [RDLEN5] = igb_set_dlen,
+ [RDLEN6] = igb_set_dlen,
+ [RDLEN7] = igb_set_dlen,
+ [RDLEN8] = igb_set_dlen,
+ [RDLEN9] = igb_set_dlen,
+ [RDLEN10] = igb_set_dlen,
+ [RDLEN11] = igb_set_dlen,
+ [RDLEN12] = igb_set_dlen,
+ [RDLEN13] = igb_set_dlen,
+ [RDLEN14] = igb_set_dlen,
+ [RDLEN15] = igb_set_dlen,
+ [TDBAL0] = igb_set_dbal,
+ [TDBAL1] = igb_set_dbal,
+ [TDBAL2] = igb_set_dbal,
+ [TDBAL3] = igb_set_dbal,
+ [TDBAL4] = igb_set_dbal,
+ [TDBAL5] = igb_set_dbal,
+ [TDBAL6] = igb_set_dbal,
+ [TDBAL7] = igb_set_dbal,
+ [TDBAL8] = igb_set_dbal,
+ [TDBAL9] = igb_set_dbal,
+ [TDBAL10] = igb_set_dbal,
+ [TDBAL11] = igb_set_dbal,
+ [TDBAL12] = igb_set_dbal,
+ [TDBAL13] = igb_set_dbal,
+ [TDBAL14] = igb_set_dbal,
+ [TDBAL15] = igb_set_dbal,
+ [RDBAL0] = igb_set_dbal,
+ [RDBAL1] = igb_set_dbal,
+ [RDBAL2] = igb_set_dbal,
+ [RDBAL3] = igb_set_dbal,
+ [RDBAL4] = igb_set_dbal,
+ [RDBAL5] = igb_set_dbal,
+ [RDBAL6] = igb_set_dbal,
+ [RDBAL7] = igb_set_dbal,
+ [RDBAL8] = igb_set_dbal,
+ [RDBAL9] = igb_set_dbal,
+ [RDBAL10] = igb_set_dbal,
+ [RDBAL11] = igb_set_dbal,
+ [RDBAL12] = igb_set_dbal,
+ [RDBAL13] = igb_set_dbal,
+ [RDBAL14] = igb_set_dbal,
+ [RDBAL15] = igb_set_dbal,
+ [STATUS] = igb_set_status,
+ [PBACLR] = igb_set_pbaclr,
+ [CTRL_EXT] = igb_set_ctrlext,
+ [FCAH] = igb_set_16bit,
+ [FCT] = igb_set_16bit,
+ [FCTTV] = igb_set_16bit,
+ [FCRTV] = igb_set_16bit,
+ [FCRTH] = igb_set_fcrth,
+ [FCRTL] = igb_set_fcrtl,
+ [CTRL_DUP] = igb_set_ctrl,
+ [RFCTL] = igb_set_rfctl,
+ [TIMINCA] = igb_set_timinca,
+ [TIMADJH] = igb_set_timadjh,
+
+ [IP6AT ... IP6AT + 3] = igb_mac_writereg,
+ [IP4AT ... IP4AT + 6] = igb_mac_writereg,
+ [RA] = igb_mac_writereg,
+ [RA + 1] = igb_mac_setmacaddr,
+ [RA + 2 ... RA + 31] = igb_mac_writereg,
+ [RA2 ... RA2 + 31] = igb_mac_writereg,
+ [WUPM ... WUPM + 31] = igb_mac_writereg,
+ [MTA ... MTA + E1000_MC_TBL_SIZE - 1] = igb_mac_writereg,
+ [VFTA ... VFTA + E1000_VLAN_FILTER_TBL_SIZE - 1] = igb_mac_writereg,
+ [FFMT ... FFMT + 254] = igb_set_4bit,
+ [MDEF ... MDEF + 7] = igb_mac_writereg,
+ [FTFT ... FTFT + 254] = igb_mac_writereg,
+ [RETA ... RETA + 31] = igb_mac_writereg,
+ [RSSRK ... RSSRK + 9] = igb_mac_writereg,
+ [MAVTV0 ... MAVTV3] = igb_mac_writereg,
+ [EITR0 ... EITR0 + IGB_INTR_NUM - 1] = igb_set_eitr,
+
+ /* IGB specific: */
+ [FWSM] = igb_mac_writereg,
+ [SW_FW_SYNC] = igb_mac_writereg,
+ [EICR] = igb_set_eicr,
+ [EICS] = igb_set_eics,
+ [EIAC] = igb_set_eiac,
+ [EIAM] = igb_set_eiam,
+ [EIMC] = igb_set_eimc,
+ [EIMS] = igb_set_eims,
+ [IVAR0 ... IVAR0 + 7] = igb_mac_writereg,
+ igb_putreg(IVAR_MISC),
+ igb_putreg(VT_CTL),
+ [P2VMAILBOX0 ... P2VMAILBOX7] = igb_set_pfmailbox,
+ [V2PMAILBOX0 ... V2PMAILBOX7] = igb_set_vfmailbox,
+ [MBVFICR] = igb_w1c,
+ [VMBMEM0 ... VMBMEM0 + 127] = igb_mac_writereg,
+ igb_putreg(MBVFIMR),
+ [VFLRE] = igb_w1c,
+ igb_putreg(VFRE),
+ igb_putreg(VFTE),
+ igb_putreg(QDE),
+ igb_putreg(DTXSWC),
+ igb_putreg(RPLOLR),
+ [VLVF0 ... VLVF0 + E1000_VLVF_ARRAY_SIZE - 1] = igb_mac_writereg,
+ [VMVIR0 ... VMVIR7] = igb_mac_writereg,
+ [VMOLR0 ... VMOLR7] = igb_mac_writereg,
+ [UTA ... UTA + E1000_MC_TBL_SIZE - 1] = igb_mac_writereg,
+ [PVTCTRL0] = igb_set_vtctrl,
+ [PVTCTRL1] = igb_set_vtctrl,
+ [PVTCTRL2] = igb_set_vtctrl,
+ [PVTCTRL3] = igb_set_vtctrl,
+ [PVTCTRL4] = igb_set_vtctrl,
+ [PVTCTRL5] = igb_set_vtctrl,
+ [PVTCTRL6] = igb_set_vtctrl,
+ [PVTCTRL7] = igb_set_vtctrl,
+ [PVTEICS0] = igb_set_vteics,
+ [PVTEICS1] = igb_set_vteics,
+ [PVTEICS2] = igb_set_vteics,
+ [PVTEICS3] = igb_set_vteics,
+ [PVTEICS4] = igb_set_vteics,
+ [PVTEICS5] = igb_set_vteics,
+ [PVTEICS6] = igb_set_vteics,
+ [PVTEICS7] = igb_set_vteics,
+ [PVTEIMS0] = igb_set_vteims,
+ [PVTEIMS1] = igb_set_vteims,
+ [PVTEIMS2] = igb_set_vteims,
+ [PVTEIMS3] = igb_set_vteims,
+ [PVTEIMS4] = igb_set_vteims,
+ [PVTEIMS5] = igb_set_vteims,
+ [PVTEIMS6] = igb_set_vteims,
+ [PVTEIMS7] = igb_set_vteims,
+ [PVTEIMC0] = igb_set_vteimc,
+ [PVTEIMC1] = igb_set_vteimc,
+ [PVTEIMC2] = igb_set_vteimc,
+ [PVTEIMC3] = igb_set_vteimc,
+ [PVTEIMC4] = igb_set_vteimc,
+ [PVTEIMC5] = igb_set_vteimc,
+ [PVTEIMC6] = igb_set_vteimc,
+ [PVTEIMC7] = igb_set_vteimc,
+ [PVTEIAC0] = igb_set_vteiac,
+ [PVTEIAC1] = igb_set_vteiac,
+ [PVTEIAC2] = igb_set_vteiac,
+ [PVTEIAC3] = igb_set_vteiac,
+ [PVTEIAC4] = igb_set_vteiac,
+ [PVTEIAC5] = igb_set_vteiac,
+ [PVTEIAC6] = igb_set_vteiac,
+ [PVTEIAC7] = igb_set_vteiac,
+ [PVTEIAM0] = igb_set_vteiam,
+ [PVTEIAM1] = igb_set_vteiam,
+ [PVTEIAM2] = igb_set_vteiam,
+ [PVTEIAM3] = igb_set_vteiam,
+ [PVTEIAM4] = igb_set_vteiam,
+ [PVTEIAM5] = igb_set_vteiam,
+ [PVTEIAM6] = igb_set_vteiam,
+ [PVTEIAM7] = igb_set_vteiam,
+ [PVTEICR0] = igb_set_vteicr,
+ [PVTEICR1] = igb_set_vteicr,
+ [PVTEICR2] = igb_set_vteicr,
+ [PVTEICR3] = igb_set_vteicr,
+ [PVTEICR4] = igb_set_vteicr,
+ [PVTEICR5] = igb_set_vteicr,
+ [PVTEICR6] = igb_set_vteicr,
+ [PVTEICR7] = igb_set_vteicr,
+ [VTIVAR ... VTIVAR + 7] = igb_set_vtivar,
+ [VTIVAR_MISC ... VTIVAR_MISC + 7] = igb_mac_writereg
+};
+enum { IGB_NWRITEOPS = ARRAY_SIZE(igb_macreg_writeops) };
+
+enum { MAC_ACCESS_PARTIAL = 1 };
+
+/*
+ * The array below combines alias offsets of the index values for the
+ * MAC registers that have aliases, with the indication of not fully
+ * implemented registers (lowest bit). This combination is possible
+ * because all of the offsets are even.
+ */
+static const uint16_t mac_reg_access[E1000E_MAC_SIZE] = {
+ /* Alias index offsets */
+ [FCRTL_A] = 0x07fe,
+ [RDFH_A] = 0xe904, [RDFT_A] = 0xe904,
+ [TDFH_A] = 0xed00, [TDFT_A] = 0xed00,
+ [RA_A ... RA_A + 31] = 0x14f0,
+ [VFTA_A ... VFTA_A + E1000_VLAN_FILTER_TBL_SIZE - 1] = 0x1400,
+
+ [RDBAL0_A] = 0x2600,
+ [RDBAH0_A] = 0x2600,
+ [RDLEN0_A] = 0x2600,
+ [SRRCTL0_A] = 0x2600,
+ [RDH0_A] = 0x2600,
+ [RDT0_A] = 0x2600,
+ [RXDCTL0_A] = 0x2600,
+ [RXCTL0_A] = 0x2600,
+ [RQDPC0_A] = 0x2600,
+ [RDBAL1_A] = 0x25D0,
+ [RDBAL2_A] = 0x25A0,
+ [RDBAL3_A] = 0x2570,
+ [RDBAH1_A] = 0x25D0,
+ [RDBAH2_A] = 0x25A0,
+ [RDBAH3_A] = 0x2570,
+ [RDLEN1_A] = 0x25D0,
+ [RDLEN2_A] = 0x25A0,
+ [RDLEN3_A] = 0x2570,
+ [SRRCTL1_A] = 0x25D0,
+ [SRRCTL2_A] = 0x25A0,
+ [SRRCTL3_A] = 0x2570,
+ [RDH1_A] = 0x25D0,
+ [RDH2_A] = 0x25A0,
+ [RDH3_A] = 0x2570,
+ [RDT1_A] = 0x25D0,
+ [RDT2_A] = 0x25A0,
+ [RDT3_A] = 0x2570,
+ [RXDCTL1_A] = 0x25D0,
+ [RXDCTL2_A] = 0x25A0,
+ [RXDCTL3_A] = 0x2570,
+ [RXCTL1_A] = 0x25D0,
+ [RXCTL2_A] = 0x25A0,
+ [RXCTL3_A] = 0x2570,
+ [RQDPC1_A] = 0x25D0,
+ [RQDPC2_A] = 0x25A0,
+ [RQDPC3_A] = 0x2570,
+ [TDBAL0_A] = 0x2A00,
+ [TDBAH0_A] = 0x2A00,
+ [TDLEN0_A] = 0x2A00,
+ [TDH0_A] = 0x2A00,
+ [TDT0_A] = 0x2A00,
+ [TXCTL0_A] = 0x2A00,
+ [TDWBAL0_A] = 0x2A00,
+ [TDWBAH0_A] = 0x2A00,
+ [TDBAL1_A] = 0x29D0,
+ [TDBAL2_A] = 0x29A0,
+ [TDBAL3_A] = 0x2970,
+ [TDBAH1_A] = 0x29D0,
+ [TDBAH2_A] = 0x29A0,
+ [TDBAH3_A] = 0x2970,
+ [TDLEN1_A] = 0x29D0,
+ [TDLEN2_A] = 0x29A0,
+ [TDLEN3_A] = 0x2970,
+ [TDH1_A] = 0x29D0,
+ [TDH2_A] = 0x29A0,
+ [TDH3_A] = 0x2970,
+ [TDT1_A] = 0x29D0,
+ [TDT2_A] = 0x29A0,
+ [TDT3_A] = 0x2970,
+ [TXDCTL0_A] = 0x2A00,
+ [TXDCTL1_A] = 0x29D0,
+ [TXDCTL2_A] = 0x29A0,
+ [TXDCTL3_A] = 0x2970,
+ [TXCTL1_A] = 0x29D0,
+ [TXCTL2_A] = 0x29A0,
+ [TXCTL3_A] = 0x29D0,
+ [TDWBAL1_A] = 0x29D0,
+ [TDWBAL2_A] = 0x29A0,
+ [TDWBAL3_A] = 0x2970,
+ [TDWBAH1_A] = 0x29D0,
+ [TDWBAH2_A] = 0x29A0,
+ [TDWBAH3_A] = 0x2970,
+
+ /* Access options */
+ [RDFH] = MAC_ACCESS_PARTIAL, [RDFT] = MAC_ACCESS_PARTIAL,
+ [RDFHS] = MAC_ACCESS_PARTIAL, [RDFTS] = MAC_ACCESS_PARTIAL,
+ [RDFPC] = MAC_ACCESS_PARTIAL,
+ [TDFH] = MAC_ACCESS_PARTIAL, [TDFT] = MAC_ACCESS_PARTIAL,
+ [TDFHS] = MAC_ACCESS_PARTIAL, [TDFTS] = MAC_ACCESS_PARTIAL,
+ [TDFPC] = MAC_ACCESS_PARTIAL, [EECD] = MAC_ACCESS_PARTIAL,
+ [FLA] = MAC_ACCESS_PARTIAL,
+ [FCAL] = MAC_ACCESS_PARTIAL, [FCAH] = MAC_ACCESS_PARTIAL,
+ [FCT] = MAC_ACCESS_PARTIAL, [FCTTV] = MAC_ACCESS_PARTIAL,
+ [FCRTV] = MAC_ACCESS_PARTIAL, [FCRTL] = MAC_ACCESS_PARTIAL,
+ [FCRTH] = MAC_ACCESS_PARTIAL,
+ [MAVTV0 ... MAVTV3] = MAC_ACCESS_PARTIAL
+};
+
+void
+igb_core_write(IGBCore *core, hwaddr addr, uint64_t val, unsigned size)
+{
+ uint16_t index = igb_get_reg_index_with_offset(mac_reg_access, addr);
+
+ if (index < IGB_NWRITEOPS && igb_macreg_writeops[index]) {
+ if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) {
+ trace_e1000e_wrn_regs_write_trivial(index << 2);
+ }
+ trace_e1000e_core_write(index << 2, size, val);
+ igb_macreg_writeops[index](core, index, val);
+ } else if (index < IGB_NREADOPS && igb_macreg_readops[index]) {
+ trace_e1000e_wrn_regs_write_ro(index << 2, size, val);
+ } else {
+ trace_e1000e_wrn_regs_write_unknown(index << 2, size, val);
+ }
+}
+
+uint64_t
+igb_core_read(IGBCore *core, hwaddr addr, unsigned size)
+{
+ uint64_t val;
+ uint16_t index = igb_get_reg_index_with_offset(mac_reg_access, addr);
+
+ if (index < IGB_NREADOPS && igb_macreg_readops[index]) {
+ if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) {
+ trace_e1000e_wrn_regs_read_trivial(index << 2);
+ }
+ val = igb_macreg_readops[index](core, index);
+ trace_e1000e_core_read(index << 2, size, val);
+ return val;
+ } else {
+ trace_e1000e_wrn_regs_read_unknown(index << 2, size);
+ }
+ return 0;
+}
+
+static inline void
+igb_autoneg_pause(IGBCore *core)
+{
+ timer_del(core->autoneg_timer);
+}
+
+static void
+igb_autoneg_resume(IGBCore *core)
+{
+ if (igb_have_autoneg(core) &&
+ !(core->phy[MII_BMSR] & MII_BMSR_AN_COMP)) {
+ qemu_get_queue(core->owner_nic)->link_down = false;
+ timer_mod(core->autoneg_timer,
+ qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
+ }
+}
+
+static void
+igb_vm_state_change(void *opaque, bool running, RunState state)
+{
+ IGBCore *core = opaque;
+
+ if (running) {
+ trace_e1000e_vm_state_running();
+ igb_intrmgr_resume(core);
+ igb_autoneg_resume(core);
+ } else {
+ trace_e1000e_vm_state_stopped();
+ igb_autoneg_pause(core);
+ igb_intrmgr_pause(core);
+ }
+}
+
+void
+igb_core_pci_realize(IGBCore *core,
+ const uint16_t *eeprom_templ,
+ uint32_t eeprom_size,
+ const uint8_t *macaddr)
+{
+ int i;
+
+ core->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
+ igb_autoneg_timer, core);
+ igb_intrmgr_pci_realize(core);
+
+ core->vmstate = qemu_add_vm_change_state_handler(igb_vm_state_change, core);
+
+ for (i = 0; i < IGB_NUM_QUEUES; i++) {
+ net_tx_pkt_init(&core->tx[i].tx_pkt, core->owner, E1000E_MAX_TX_FRAGS);
+ }
+
+ net_rx_pkt_init(&core->rx_pkt);
+
+ e1000x_core_prepare_eeprom(core->eeprom,
+ eeprom_templ,
+ eeprom_size,
+ PCI_DEVICE_GET_CLASS(core->owner)->device_id,
+ macaddr);
+ igb_update_rx_offloads(core);
+}
+
+void
+igb_core_pci_uninit(IGBCore *core)
+{
+ int i;
+
+ timer_free(core->autoneg_timer);
+
+ igb_intrmgr_pci_unint(core);
+
+ qemu_del_vm_change_state_handler(core->vmstate);
+
+ for (i = 0; i < IGB_NUM_QUEUES; i++) {
+ net_tx_pkt_reset(core->tx[i].tx_pkt);
+ net_tx_pkt_uninit(core->tx[i].tx_pkt);
+ }
+
+ net_rx_pkt_uninit(core->rx_pkt);
+}
+
+static const uint16_t
+igb_phy_reg_init[] = {
+ [MII_BMCR] = MII_BMCR_SPEED1000 |
+ MII_BMCR_FD |
+ MII_BMCR_AUTOEN,
+
+ [MII_BMSR] = MII_BMSR_EXTCAP |
+ MII_BMSR_LINK_ST |
+ MII_BMSR_AUTONEG |
+ MII_BMSR_MFPS |
+ MII_BMSR_EXTSTAT |
+ MII_BMSR_10T_HD |
+ MII_BMSR_10T_FD |
+ MII_BMSR_100TX_HD |
+ MII_BMSR_100TX_FD,
+
+ [MII_PHYID1] = IGP03E1000_E_PHY_ID >> 16,
+ [MII_PHYID2] = (IGP03E1000_E_PHY_ID & 0xfff0) | 1,
+ [MII_ANAR] = MII_ANAR_CSMACD | MII_ANAR_10 |
+ MII_ANAR_10FD | MII_ANAR_TX |
+ MII_ANAR_TXFD | MII_ANAR_PAUSE |
+ MII_ANAR_PAUSE_ASYM,
+ [MII_ANLPAR] = MII_ANLPAR_10 | MII_ANLPAR_10FD |
+ MII_ANLPAR_TX | MII_ANLPAR_TXFD |
+ MII_ANLPAR_T4 | MII_ANLPAR_PAUSE,
+ [MII_ANER] = MII_ANER_NP | MII_ANER_NWAY,
+ [MII_ANNP] = 0x1 | MII_ANNP_MP,
+ [MII_CTRL1000] = MII_CTRL1000_HALF | MII_CTRL1000_FULL |
+ MII_CTRL1000_PORT | MII_CTRL1000_MASTER,
+ [MII_STAT1000] = MII_STAT1000_HALF | MII_STAT1000_FULL |
+ MII_STAT1000_ROK | MII_STAT1000_LOK,
+ [MII_EXTSTAT] = MII_EXTSTAT_1000T_HD | MII_EXTSTAT_1000T_FD,
+
+ [IGP01E1000_PHY_PORT_CONFIG] = BIT(5) | BIT(8),
+ [IGP01E1000_PHY_PORT_STATUS] = IGP01E1000_PSSR_SPEED_1000MBPS,
+ [IGP02E1000_PHY_POWER_MGMT] = BIT(0) | BIT(3) | IGP02E1000_PM_D3_LPLU |
+ IGP01E1000_PSCFR_SMART_SPEED
+};
+
+static const uint32_t igb_mac_reg_init[] = {
+ [LEDCTL] = 2 | (3 << 8) | BIT(15) | (6 << 16) | (7 << 24),
+ [EEMNGCTL] = BIT(31),
+ [RXDCTL0] = E1000_RXDCTL_QUEUE_ENABLE | (1 << 16),
+ [RXDCTL1] = 1 << 16,
+ [RXDCTL2] = 1 << 16,
+ [RXDCTL3] = 1 << 16,
+ [RXDCTL4] = 1 << 16,
+ [RXDCTL5] = 1 << 16,
+ [RXDCTL6] = 1 << 16,
+ [RXDCTL7] = 1 << 16,
+ [RXDCTL8] = 1 << 16,
+ [RXDCTL9] = 1 << 16,
+ [RXDCTL10] = 1 << 16,
+ [RXDCTL11] = 1 << 16,
+ [RXDCTL12] = 1 << 16,
+ [RXDCTL13] = 1 << 16,
+ [RXDCTL14] = 1 << 16,
+ [RXDCTL15] = 1 << 16,
+ [TIPG] = 0x08 | (0x04 << 10) | (0x06 << 20),
+ [CTRL] = E1000_CTRL_FD | E1000_CTRL_LRST | E1000_CTRL_SPD_1000 |
+ E1000_CTRL_ADVD3WUC,
+ [STATUS] = E1000_STATUS_PHYRA | BIT(31),
+ [EECD] = E1000_EECD_FWE_DIS | E1000_EECD_PRES |
+ (2 << E1000_EECD_SIZE_EX_SHIFT),
+ [GCR] = E1000_L0S_ADJUST |
+ E1000_GCR_CMPL_TMOUT_RESEND |
+ E1000_GCR_CAP_VER2 |
+ E1000_L1_ENTRY_LATENCY_MSB |
+ E1000_L1_ENTRY_LATENCY_LSB,
+ [RXCSUM] = E1000_RXCSUM_IPOFLD | E1000_RXCSUM_TUOFLD,
+ [TXPBS] = 0x28,
+ [RXPBS] = 0x40,
+ [TCTL] = E1000_TCTL_PSP | (0xF << E1000_CT_SHIFT) |
+ (0x40 << E1000_COLD_SHIFT) | (0x1 << 26) | (0xA << 28),
+ [TCTL_EXT] = 0x40 | (0x42 << 10),
+ [DTXCTL] = E1000_DTXCTL_8023LL | E1000_DTXCTL_SPOOF_INT,
+ [VET] = ETH_P_VLAN | (ETH_P_VLAN << 16),
+
+ [V2PMAILBOX0 ... V2PMAILBOX0 + IGB_MAX_VF_FUNCTIONS - 1] = E1000_V2PMAILBOX_RSTI,
+ [MBVFIMR] = 0xFF,
+ [VFRE] = 0xFF,
+ [VFTE] = 0xFF,
+ [VMOLR0 ... VMOLR0 + 7] = 0x2600 | E1000_VMOLR_STRCRC,
+ [RPLOLR] = E1000_RPLOLR_STRCRC,
+ [RLPML] = 0x2600,
+ [TXCTL0] = E1000_DCA_TXCTRL_DATA_RRO_EN |
+ E1000_DCA_TXCTRL_TX_WB_RO_EN |
+ E1000_DCA_TXCTRL_DESC_RRO_EN,
+ [TXCTL1] = E1000_DCA_TXCTRL_DATA_RRO_EN |
+ E1000_DCA_TXCTRL_TX_WB_RO_EN |
+ E1000_DCA_TXCTRL_DESC_RRO_EN,
+ [TXCTL2] = E1000_DCA_TXCTRL_DATA_RRO_EN |
+ E1000_DCA_TXCTRL_TX_WB_RO_EN |
+ E1000_DCA_TXCTRL_DESC_RRO_EN,
+ [TXCTL3] = E1000_DCA_TXCTRL_DATA_RRO_EN |
+ E1000_DCA_TXCTRL_TX_WB_RO_EN |
+ E1000_DCA_TXCTRL_DESC_RRO_EN,
+ [TXCTL4] = E1000_DCA_TXCTRL_DATA_RRO_EN |
+ E1000_DCA_TXCTRL_TX_WB_RO_EN |
+ E1000_DCA_TXCTRL_DESC_RRO_EN,
+ [TXCTL5] = E1000_DCA_TXCTRL_DATA_RRO_EN |
+ E1000_DCA_TXCTRL_TX_WB_RO_EN |
+ E1000_DCA_TXCTRL_DESC_RRO_EN,
+ [TXCTL6] = E1000_DCA_TXCTRL_DATA_RRO_EN |
+ E1000_DCA_TXCTRL_TX_WB_RO_EN |
+ E1000_DCA_TXCTRL_DESC_RRO_EN,
+ [TXCTL7] = E1000_DCA_TXCTRL_DATA_RRO_EN |
+ E1000_DCA_TXCTRL_TX_WB_RO_EN |
+ E1000_DCA_TXCTRL_DESC_RRO_EN,
+ [TXCTL8] = E1000_DCA_TXCTRL_DATA_RRO_EN |
+ E1000_DCA_TXCTRL_TX_WB_RO_EN |
+ E1000_DCA_TXCTRL_DESC_RRO_EN,
+ [TXCTL9] = E1000_DCA_TXCTRL_DATA_RRO_EN |
+ E1000_DCA_TXCTRL_TX_WB_RO_EN |
+ E1000_DCA_TXCTRL_DESC_RRO_EN,
+ [TXCTL10] = E1000_DCA_TXCTRL_DATA_RRO_EN |
+ E1000_DCA_TXCTRL_TX_WB_RO_EN |
+ E1000_DCA_TXCTRL_DESC_RRO_EN,
+ [TXCTL11] = E1000_DCA_TXCTRL_DATA_RRO_EN |
+ E1000_DCA_TXCTRL_TX_WB_RO_EN |
+ E1000_DCA_TXCTRL_DESC_RRO_EN,
+ [TXCTL12] = E1000_DCA_TXCTRL_DATA_RRO_EN |
+ E1000_DCA_TXCTRL_TX_WB_RO_EN |
+ E1000_DCA_TXCTRL_DESC_RRO_EN,
+ [TXCTL13] = E1000_DCA_TXCTRL_DATA_RRO_EN |
+ E1000_DCA_TXCTRL_TX_WB_RO_EN |
+ E1000_DCA_TXCTRL_DESC_RRO_EN,
+ [TXCTL14] = E1000_DCA_TXCTRL_DATA_RRO_EN |
+ E1000_DCA_TXCTRL_TX_WB_RO_EN |
+ E1000_DCA_TXCTRL_DESC_RRO_EN,
+ [TXCTL15] = E1000_DCA_TXCTRL_DATA_RRO_EN |
+ E1000_DCA_TXCTRL_TX_WB_RO_EN |
+ E1000_DCA_TXCTRL_DESC_RRO_EN,
+};
+
+static void igb_reset(IGBCore *core, bool sw)
+{
+ struct igb_tx *tx;
+ int i;
+
+ timer_del(core->autoneg_timer);
+
+ igb_intrmgr_reset(core);
+
+ memset(core->phy, 0, sizeof core->phy);
+ memcpy(core->phy, igb_phy_reg_init, sizeof igb_phy_reg_init);
+
+ for (i = 0; i < E1000E_MAC_SIZE; i++) {
+ if (sw &&
+ (i == RXPBS || i == TXPBS ||
+ (i >= EITR0 && i < EITR0 + IGB_INTR_NUM))) {
+ continue;
+ }
+
+ core->mac[i] = i < ARRAY_SIZE(igb_mac_reg_init) ?
+ igb_mac_reg_init[i] : 0;
+ }
+
+ if (qemu_get_queue(core->owner_nic)->link_down) {
+ igb_link_down(core);
+ }
+
+ e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac);
+
+ for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
+ tx = &core->tx[i];
+ net_tx_pkt_reset(tx->tx_pkt);
+ tx->vlan = 0;
+ tx->mss = 0;
+ tx->tse = false;
+ tx->ixsm = false;
+ tx->txsm = false;
+ tx->first = true;
+ tx->skip_cp = false;
+ }
+}
+
+void
+igb_core_reset(IGBCore *core)
+{
+ igb_reset(core, false);
+}
+
+void igb_core_pre_save(IGBCore *core)
+{
+ int i;
+ NetClientState *nc = qemu_get_queue(core->owner_nic);
+
+ /*
+ * If link is down and auto-negotiation is supported and ongoing,
+ * complete auto-negotiation immediately. This allows us to look
+ * at MII_BMSR_AN_COMP to infer link status on load.
+ */
+ if (nc->link_down && igb_have_autoneg(core)) {
+ core->phy[MII_BMSR] |= MII_BMSR_AN_COMP;
+ igb_update_flowctl_status(core);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
+ if (net_tx_pkt_has_fragments(core->tx[i].tx_pkt)) {
+ core->tx[i].skip_cp = true;
+ }
+ }
+}
+
+int
+igb_core_post_load(IGBCore *core)
+{
+ NetClientState *nc = qemu_get_queue(core->owner_nic);
+
+ /*
+ * nc.link_down can't be migrated, so infer link_down according
+ * to link status bit in core.mac[STATUS].
+ */
+ nc->link_down = (core->mac[STATUS] & E1000_STATUS_LU) == 0;
+
+ return 0;
+}
diff --git a/hw/net/igb_core.h b/hw/net/igb_core.h
new file mode 100644
index 0000000..814c1e2
--- /dev/null
+++ b/hw/net/igb_core.h
@@ -0,0 +1,146 @@
+/*
+ * Core code for QEMU igb emulation
+ *
+ * Datasheet:
+ * https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/82576eg-gbe-datasheet.pdf
+ *
+ * Copyright (c) 2020-2023 Red Hat, Inc.
+ * Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com)
+ * Developed by Daynix Computing LTD (http://www.daynix.com)
+ *
+ * Authors:
+ * Akihiko Odaki <akihiko.odaki@daynix.com>
+ * Gal Hammmer <gal.hammer@sap.com>
+ * Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
+ * Dmitry Fleytman <dmitry@daynix.com>
+ * Leonid Bloch <leonid@daynix.com>
+ * Yan Vugenfirer <yan@daynix.com>
+ *
+ * Based on work done by:
+ * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
+ * Copyright (c) 2008 Qumranet
+ * Based on work done by:
+ * Copyright (c) 2007 Dan Aloni
+ * Copyright (c) 2004 Antony T Curtis
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_NET_IGB_CORE_H
+#define HW_NET_IGB_CORE_H
+
+#define E1000E_MAC_SIZE (0x8000)
+#define IGB_EEPROM_SIZE (1024)
+
+#define IGB_INTR_NUM (25)
+#define IGB_MSIX_VEC_NUM (10)
+#define IGBVF_MSIX_VEC_NUM (3)
+#define IGB_NUM_QUEUES (16)
+
+typedef struct IGBCore IGBCore;
+
+enum { PHY_R = BIT(0),
+ PHY_W = BIT(1),
+ PHY_RW = PHY_R | PHY_W };
+
+typedef struct IGBIntrDelayTimer_st {
+ QEMUTimer *timer;
+ bool running;
+ uint32_t delay_reg;
+ uint32_t delay_resolution_ns;
+ IGBCore *core;
+} IGBIntrDelayTimer;
+
+struct IGBCore {
+ uint32_t mac[E1000E_MAC_SIZE];
+ uint16_t phy[MAX_PHY_REG_ADDRESS + 1];
+ uint16_t eeprom[IGB_EEPROM_SIZE];
+
+ uint8_t rx_desc_len;
+
+ QEMUTimer *autoneg_timer;
+
+ struct igb_tx {
+ uint16_t vlan; /* VLAN Tag */
+ uint16_t mss; /* Maximum Segment Size */
+ bool tse; /* TCP/UDP Segmentation Enable */
+ bool ixsm; /* Insert IP Checksum */
+ bool txsm; /* Insert TCP/UDP Checksum */
+
+ bool first;
+ bool skip_cp;
+
+ struct NetTxPkt *tx_pkt;
+ } tx[IGB_NUM_QUEUES];
+
+ struct NetRxPkt *rx_pkt;
+
+ bool has_vnet;
+ int max_queue_num;
+
+ IGBIntrDelayTimer eitr[IGB_INTR_NUM];
+
+ VMChangeStateEntry *vmstate;
+
+ uint32_t eitr_guest_value[IGB_INTR_NUM];
+
+ uint8_t permanent_mac[ETH_ALEN];
+
+ NICState *owner_nic;
+ PCIDevice *owner;
+ void (*owner_start_recv)(PCIDevice *d);
+
+ int64_t timadj;
+};
+
+void
+igb_core_write(IGBCore *core, hwaddr addr, uint64_t val, unsigned size);
+
+uint64_t
+igb_core_read(IGBCore *core, hwaddr addr, unsigned size);
+
+void
+igb_core_pci_realize(IGBCore *regs,
+ const uint16_t *eeprom_templ,
+ uint32_t eeprom_size,
+ const uint8_t *macaddr);
+
+void
+igb_core_reset(IGBCore *core);
+
+void
+igb_core_pre_save(IGBCore *core);
+
+int
+igb_core_post_load(IGBCore *core);
+
+void
+igb_core_set_link_status(IGBCore *core);
+
+void
+igb_core_pci_uninit(IGBCore *core);
+
+bool
+igb_can_receive(IGBCore *core);
+
+ssize_t
+igb_receive(IGBCore *core, const uint8_t *buf, size_t size);
+
+ssize_t
+igb_receive_iov(IGBCore *core, const struct iovec *iov, int iovcnt);
+
+void
+igb_start_recv(IGBCore *core);
+
+#endif
diff --git a/hw/net/igb_regs.h b/hw/net/igb_regs.h
new file mode 100644
index 0000000..00934d4
--- /dev/null
+++ b/hw/net/igb_regs.h
@@ -0,0 +1,648 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This is copied + edited from kernel header files in
+ * drivers/net/ethernet/intel/igb
+ */
+
+#ifndef HW_IGB_REGS_H_
+#define HW_IGB_REGS_H_
+
+#include "e1000x_regs.h"
+
+/* from igb/e1000_hw.h */
+
+#define E1000_DEV_ID_82576 0x10C9
+#define E1000_DEV_ID_82576_FIBER 0x10E6
+#define E1000_DEV_ID_82576_SERDES 0x10E7
+#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
+#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526
+#define E1000_DEV_ID_82576_NS 0x150A
+#define E1000_DEV_ID_82576_NS_SERDES 0x1518
+#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D
+
+/* Context Descriptor */
+struct e1000_adv_tx_context_desc {
+ uint32_t vlan_macip_lens;
+ uint32_t seqnum_seed;
+ uint32_t type_tucmd_mlhl;
+ uint32_t mss_l4len_idx;
+};
+
+/* Advanced Transmit Descriptor */
+union e1000_adv_tx_desc {
+ struct {
+ uint64_t buffer_addr; /* Address of descriptor's data buffer */
+ uint32_t cmd_type_len;
+ uint32_t olinfo_status;
+ } read;
+ struct {
+ uint64_t rsvd; /* Reserved */
+ uint32_t nxtseq_seed;
+ uint32_t status;
+ } wb;
+};
+
+#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
+#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
+#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor Extension (1=Adv) */
+#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP/UDP Segmentation Enable */
+
+#define E1000_ADVTXD_POTS_IXSM 0x00000100 /* Insert TCP/UDP Checksum */
+#define E1000_ADVTXD_POTS_TXSM 0x00000200 /* Insert TCP/UDP Checksum */
+
+#define E1000_TXD_POPTS_IXSM 0x00000001 /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x00000002 /* Insert TCP/UDP checksum */
+
+/* Receive Descriptor - Advanced */
+union e1000_adv_rx_desc {
+ struct {
+ uint64_t pkt_addr; /* Packet Buffer Address */
+ uint64_t hdr_addr; /* Header Buffer Address */
+ } read;
+ struct {
+ struct {
+ struct {
+ uint16_t pkt_info; /* RSS Type, Packet Type */
+ uint16_t hdr_info; /* Split Head, Buffer Length */
+ } lo_dword;
+ union {
+ uint32_t rss; /* RSS Hash */
+ struct {
+ uint16_t ip_id; /* IP Id */
+ uint16_t csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ uint32_t status_error; /* Ext Status/Error */
+ uint16_t length; /* Packet Length */
+ uint16_t vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+/* from igb/e1000_phy.h */
+
+/* IGP01E1000 Specific Registers */
+#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
+#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
+#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
+#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
+#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
+#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
+#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
+#define IGP01E1000_PHY_POLARITY_MASK 0x0078
+#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
+#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
+
+/* Enable flexible speed on link-up */
+#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
+#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
+#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
+#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
+#define IGP01E1000_PSSR_MDIX 0x0800
+#define IGP01E1000_PSSR_SPEED_MASK 0xC000
+#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
+#define IGP02E1000_PHY_CHANNEL_NUM 4
+#define IGP02E1000_PHY_AGC_A 0x11B1
+#define IGP02E1000_PHY_AGC_B 0x12B1
+#define IGP02E1000_PHY_AGC_C 0x14B1
+#define IGP02E1000_PHY_AGC_D 0x18B1
+#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */
+#define IGP02E1000_AGC_LENGTH_MASK 0x7F
+#define IGP02E1000_AGC_RANGE 15
+
+/* from igb/igb.h */
+
+#define E1000_PCS_CFG_IGN_SD 1
+
+/* Interrupt defines */
+#define IGB_START_ITR 648 /* ~6000 ints/sec */
+#define IGB_4K_ITR 980
+#define IGB_20K_ITR 196
+#define IGB_70K_ITR 56
+
+/* TX/RX descriptor defines */
+#define IGB_DEFAULT_TXD 256
+#define IGB_DEFAULT_TX_WORK 128
+#define IGB_MIN_TXD 80
+#define IGB_MAX_TXD 4096
+
+#define IGB_DEFAULT_RXD 256
+#define IGB_MIN_RXD 80
+#define IGB_MAX_RXD 4096
+
+#define IGB_DEFAULT_ITR 3 /* dynamic */
+#define IGB_MAX_ITR_USECS 10000
+#define IGB_MIN_ITR_USECS 10
+#define NON_Q_VECTORS 1
+#define MAX_Q_VECTORS 8
+#define MAX_MSIX_ENTRIES 10
+
+/* Transmit and receive queues */
+#define IGB_MAX_RX_QUEUES 8
+#define IGB_MAX_RX_QUEUES_82575 4
+#define IGB_MAX_RX_QUEUES_I211 2
+#define IGB_MAX_TX_QUEUES 8
+#define IGB_MAX_VF_MC_ENTRIES 30
+#define IGB_MAX_VF_FUNCTIONS 8
+#define IGB_MAX_VFTA_ENTRIES 128
+#define IGB_82576_VF_DEV_ID 0x10CA
+#define IGB_I350_VF_DEV_ID 0x1520
+
+/* from igb/e1000_82575.h */
+
+#define E1000_MRQC_ENABLE_RSS_MQ 0x00000002
+#define E1000_MRQC_ENABLE_VMDQ 0x00000003
+#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
+#define E1000_MRQC_ENABLE_VMDQ_RSS_MQ 0x00000005
+#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
+#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
+
+/* Additional Receive Descriptor Control definitions */
+#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */
+
+/* Direct Cache Access (DCA) definitions */
+#define E1000_DCA_CTRL_DCA_MODE_DISABLE 0x01 /* DCA Disable */
+#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
+
+#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
+#define E1000_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* DCA Rx Desc enable */
+#define E1000_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* DCA Rx Desc header enable */
+#define E1000_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* DCA Rx Desc payload enable */
+#define E1000_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* DCA Rx rd Desc Relax Order */
+
+#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
+#define E1000_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */
+#define E1000_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */
+#define E1000_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */
+#define E1000_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */
+
+/* Additional DCA related definitions, note change in position of CPUID */
+#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
+#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */
+#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */
+#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */
+
+#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof control */
+#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */
+#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */
+#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
+#define E1000_DTXSWC_VMDQ_LOOPBACK_EN BIT(31) /* global VF LB enable */
+
+/* Easy defines for setting default pool, would normally be left a zero */
+#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
+#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT)
+
+/* Other useful VMD_CTL register defines */
+#define E1000_VT_CTL_IGNORE_MAC BIT(28)
+#define E1000_VT_CTL_DISABLE_DEF_POOL BIT(29)
+#define E1000_VT_CTL_VM_REPL_EN BIT(30)
+
+/* Per VM Offload register setup */
+#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
+#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */
+#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */
+#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */
+#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */
+#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */
+#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */
+#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */
+#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
+#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */
+
+#define E1000_DVMOLR_HIDEVLAN 0x20000000 /* Hide vlan enable */
+#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
+#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */
+
+#define E1000_VLVF_ARRAY_SIZE 32
+#define E1000_VLVF_VLANID_MASK 0x00000FFF
+#define E1000_VLVF_POOLSEL_SHIFT 12
+#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT)
+#define E1000_VLVF_LVLAN 0x00100000
+#define E1000_VLVF_VLANID_ENABLE 0x80000000
+
+#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
+#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
+
+#define E1000_IOVCTL 0x05BBC
+#define E1000_IOVCTL_REUSE_VFQ 0x00000001
+
+#define E1000_RPLOLR_STRVLAN 0x40000000
+#define E1000_RPLOLR_STRCRC 0x80000000
+
+#define E1000_DTXCTL_8023LL 0x0004
+#define E1000_DTXCTL_VLAN_ADDED 0x0008
+#define E1000_DTXCTL_OOS_ENABLE 0x0010
+#define E1000_DTXCTL_MDP_EN 0x0020
+#define E1000_DTXCTL_SPOOF_INT 0x0040
+
+/* from igb/e1000_defines.h */
+
+#define E1000_IVAR_VALID 0x80
+#define E1000_GPIE_NSICR 0x00000001
+#define E1000_GPIE_MSIX_MODE 0x00000010
+#define E1000_GPIE_EIAME 0x40000000
+#define E1000_GPIE_PBA 0x80000000
+
+/* Transmit Control */
+#define E1000_TCTL_EN 0x00000002 /* enable tx */
+#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
+#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
+#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
+#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
+
+/* Collision related configuration parameters */
+#define E1000_COLLISION_THRESHOLD 15
+#define E1000_CT_SHIFT 4
+#define E1000_COLLISION_DISTANCE 63
+#define E1000_COLD_SHIFT 12
+
+#define E1000_RAH_POOL_MASK 0x03FC0000
+#define E1000_RAH_POOL_1 0x00040000
+
+#define E1000_ICR_VMMB 0x00000100 /* VM MB event */
+#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */
+#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */
+/* If this bit asserted, the driver should claim the interrupt */
+#define E1000_ICR_INT_ASSERTED 0x80000000
+/* LAN connected device generates an interrupt */
+#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */
+
+/* Extended Interrupt Cause Read */
+#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */
+#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */
+#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */
+#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */
+#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */
+#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */
+#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */
+#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */
+#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
+
+/* Extended Interrupt Cause Set */
+/* E1000_EITR_CNT_IGNR is only for 82576 and newer */
+#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */
+
+/* PCI Express Control */
+#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000
+#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000
+#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000
+#define E1000_GCR_CAP_VER2 0x00040000
+
+#define PHY_REVISION_MASK 0xFFFFFFF0
+#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
+#define MAX_PHY_MULTI_PAGE_REG 0xF
+
+#define IGP03E1000_E_PHY_ID 0x02A80390
+
+/* from igb/e1000_mbox.h */
+
+#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */
+#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
+#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
+
+#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */
+#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
+#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */
+#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
+
+#define E1000_V2PMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
+
+/*
+ * If it's a E1000_VF_* msg then it originates in the VF and is sent to the
+ * PF. The reverse is true if it is E1000_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+/* Messages below or'd with this are the ACK */
+#define E1000_VT_MSGTYPE_ACK 0x80000000
+/* Messages below or'd with this are the NACK */
+#define E1000_VT_MSGTYPE_NACK 0x40000000
+/* Indicates that VF is still clear to send requests */
+#define E1000_VT_MSGTYPE_CTS 0x20000000
+#define E1000_VT_MSGINFO_SHIFT 16
+/* bits 23:16 are used for exra info for certain messages */
+#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
+
+#define E1000_VF_RESET 0x01 /* VF requests reset */
+#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */
+/* VF requests to clear all unicast MAC filters */
+#define E1000_VF_MAC_FILTER_CLR (0x01 << E1000_VT_MSGINFO_SHIFT)
+/* VF requests to add unicast MAC filter */
+#define E1000_VF_MAC_FILTER_ADD (0x02 << E1000_VT_MSGINFO_SHIFT)
+#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */
+#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */
+#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */
+#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/
+#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT)
+
+#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
+
+/* from igb/e1000_regs.h */
+
+#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */
+#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
+#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */
+#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */
+#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
+#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */
+#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
+#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable; RW */
+#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation Register - RW */
+#define E1000_IVAR_MISC 0x01740 /* Interrupt Vector Allocation Register (last) - RW */
+#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */
+#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */
+
+#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
+
+#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
+#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
+
+#define E1000_DTXCTL 0x03590 /* DMA TX Control - RW */
+
+#define E1000_HTCBDPC 0x04124 /* Host TX Circuit Breaker Dropped Count */
+#define E1000_RLPML 0x05004 /* RX Long Packet Max Length */
+#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */
+#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
+#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */
+
+/* VT Registers */
+#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */
+#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */
+#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */
+#define E1000_VFRE 0x00C8C /* VF Receive Enables */
+#define E1000_VFTE 0x00C90 /* VF Transmit Enables */
+#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */
+#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */
+#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */
+#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */
+#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */
+#define E1000_IOVTCL 0x05BBC /* IOV Control Register */
+#define E1000_TXSWC 0x05ACC /* Tx Switch Control */
+#define E1000_LVMMC 0x03548 /* Last VM Misbehavior cause */
+/* These act per VF so an array friendly macro is used */
+#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n)))
+#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
+#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
+#define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n)))
+#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN VM Filter */
+#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
+
+/* from igbvf/defines.h */
+
+/* SRRCTL bit definitions */
+#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
+#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00
+#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
+#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000
+#define E1000_SRRCTL_DROP_EN 0x80000000
+
+#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F
+#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00
+
+/* from igbvf/mbox.h */
+
+#define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
+#define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */
+#define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
+#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
+#define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */
+#define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
+#define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
+
+#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
+
+/*
+ * If it's a E1000_VF_* msg then it originates in the VF and is sent to the
+ * PF. The reverse is true if it is E1000_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+/* Messages below or'd with this are the ACK */
+#define E1000_VT_MSGTYPE_ACK 0x80000000
+/* Messages below or'd with this are the NACK */
+#define E1000_VT_MSGTYPE_NACK 0x40000000
+/* Indicates that VF is still clear to send requests */
+#define E1000_VT_MSGTYPE_CTS 0x20000000
+
+/* We have a total wait time of 1s for vf mailbox posted messages */
+#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* retry count for mbx timeout */
+#define E1000_VF_MBX_INIT_DELAY 500 /* usec delay between retries */
+
+#define E1000_VT_MSGINFO_SHIFT 16
+/* bits 23:16 are used for exra info for certain messages */
+#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
+
+#define E1000_VF_RESET 0x01 /* VF requests reset */
+#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+/* VF requests PF to clear all unicast MAC filters */
+#define E1000_VF_MAC_FILTER_CLR (0x01 << E1000_VT_MSGINFO_SHIFT)
+/* VF requests PF to add unicast MAC filter */
+#define E1000_VF_MAC_FILTER_ADD (0x02 << E1000_VT_MSGINFO_SHIFT)
+#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+
+#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
+
+/* from igbvf/regs.h */
+
+/* Statistics registers */
+#define E1000_VFGPRC 0x00F10
+#define E1000_VFGORC 0x00F18
+#define E1000_VFMPRC 0x00F3C
+#define E1000_VFGPTC 0x00F14
+#define E1000_VFGOTC 0x00F34
+#define E1000_VFGOTLBC 0x00F50
+#define E1000_VFGPTLBC 0x00F44
+#define E1000_VFGORLBC 0x00F48
+#define E1000_VFGPRLBC 0x00F40
+
+/* These act per VF so an array friendly macro is used */
+#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n)))
+#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
+
+/* from igbvf/vf.h */
+
+#define E1000_DEV_ID_82576_VF 0x10CA
+
+/* new */
+
+/* Receive Registers */
+
+/* RX Descriptor Base Low; RW */
+#define E1000_RDBAL(_n) (0x0C000 + (0x40 * (_n)))
+#define E1000_RDBAL_A(_n) (0x02800 + (0x100 * (_n)))
+
+/* RX Descriptor Base High; RW */
+#define E1000_RDBAH(_n) (0x0C004 + (0x40 * (_n)))
+#define E1000_RDBAH_A(_n) (0x02804 + (0x100 * (_n)))
+
+/* RX Descriptor Ring Length; RW */
+#define E1000_RDLEN(_n) (0x0C008 + (0x40 * (_n)))
+#define E1000_RDLEN_A(_n) (0x02808 + (0x100 * (_n)))
+
+/* Split and Replication Receive Control; RW */
+#define E1000_SRRCTL(_n) (0x0C00C + (0x40 * (_n)))
+#define E1000_SRRCTL_A(_n) (0x0280C + (0x100 * (_n)))
+
+/* RX Descriptor Head; RW */
+#define E1000_RDH(_n) (0x0C010 + (0x40 * (_n)))
+#define E1000_RDH_A(_n) (0x02810 + (0x100 * (_n)))
+
+/* RX DCA Control; RW */
+#define E1000_RXCTL(_n) (0x0C014 + (0x40 * (_n)))
+#define E1000_RXCTL_A(_n) (0x02814 + (0x100 * (_n)))
+
+/* RX Descriptor Tail; RW */
+#define E1000_RDT(_n) (0x0C018 + (0x40 * (_n)))
+#define E1000_RDT_A(_n) (0x02818 + (0x100 * (_n)))
+
+/* RX Descriptor Control; RW */
+#define E1000_RXDCTL(_n) (0x0C028 + (0x40 * (_n)))
+#define E1000_RXDCTL_A(_n) (0x02828 + (0x100 * (_n)))
+
+/* RX Queue Drop Packet Count; RC */
+#define E1000_RQDPC_A(_n) (0x02830 + (0x100 * (_n)))
+
+/* Transmit Registers */
+
+/* TX Descriptor Base Low; RW */
+#define E1000_TDBAL(_n) (0x0E000 + (0x40 * (_n)))
+#define E1000_TDBAL_A(_n) (0x03800 + (0x100 * (_n)))
+
+/* TX Descriptor Base High; RW */
+#define E1000_TDBAH(_n) (0x0E004 + (0x40 * (_n)))
+#define E1000_TDBAH_A(_n) (0x03804 + (0x100 * (_n)))
+
+/* TX Descriptor Ring Length; RW */
+#define E1000_TDLEN(_n) (0x0E008 + (0x40 * (_n)))
+#define E1000_TDLEN_A(_n) (0x03808 + (0x100 * (_n)))
+
+/* TX Descriptor Head; RW */
+#define E1000_TDH(_n) (0x0E010 + (0x40 * (_n)))
+#define E1000_TDH_A(_n) (0x03810 + (0x100 * (_n)))
+
+/* TX DCA Control; RW */
+#define E1000_TXCTL(_n) (0x0E014 + (0x40 * (_n)))
+#define E1000_TXCTL_A(_n) (0x03814 + (0x100 * (_n)))
+
+/* TX Descriptor Tail; RW */
+#define E1000_TDT(_n) (0x0E018 + (0x40 * (_n)))
+#define E1000_TDT_A(_n) (0x03818 + (0x100 * (_n)))
+
+/* TX Descriptor Control; RW */
+#define E1000_TXDCTL(_n) (0x0E028 + (0x40 * (_n)))
+#define E1000_TXDCTL_A(_n) (0x03828 + (0x100 * (_n)))
+
+/* TX Descriptor Completion Write–Back Address Low; RW */
+#define E1000_TDWBAL(_n) (0x0E038 + (0x40 * (_n)))
+#define E1000_TDWBAL_A(_n) (0x03838 + (0x100 * (_n)))
+
+/* TX Descriptor Completion Write–Back Address High; RW */
+#define E1000_TDWBAH(_n) (0x0E03C + (0x40 * (_n)))
+#define E1000_TDWBAH_A(_n) (0x0383C + (0x100 * (_n)))
+
+#define E1000_MTA_A 0x0200
+
+#define E1000_XDBAL_MASK (~(BIT(5) - 1)) /* TDBAL and RDBAL Registers Mask */
+
+#define E1000_ICR_MACSEC 0x00000020 /* MACSec */
+#define E1000_ICR_RX0 0x00000040 /* Receiver Overrun */
+#define E1000_ICR_GPI_SDP0 0x00000800 /* General Purpose, SDP0 pin */
+#define E1000_ICR_GPI_SDP1 0x00001000 /* General Purpose, SDP1 pin */
+#define E1000_ICR_GPI_SDP2 0x00002000 /* General Purpose, SDP2 pin */
+#define E1000_ICR_GPI_SDP3 0x00004000 /* General Purpose, SDP3 pin */
+#define E1000_ICR_PTRAP 0x00008000 /* Probe Trap */
+#define E1000_ICR_MNG 0x00040000 /* Management Event */
+#define E1000_ICR_OMED 0x00100000 /* Other Media Energy Detected */
+#define E1000_ICR_FER 0x00400000 /* Fatal Error */
+#define E1000_ICR_NFER 0x00800000 /* Non Fatal Error */
+#define E1000_ICR_CSRTO 0x01000000 /* CSR access Time Out Indication */
+#define E1000_ICR_SCE 0x02000000 /* Storm Control Event */
+#define E1000_ICR_SW_WD 0x04000000 /* Software Watchdog */
+
+/* Extended Interrupts */
+
+#define E1000_EICR_MSIX_MASK 0x01FFFFFF /* Bits used in MSI-X mode */
+#define E1000_EICR_LEGACY_MASK 0x4000FFFF /* Bits used in non MSI-X mode */
+
+/* Mirror VF Control (only RST bit); RW */
+#define E1000_PVTCTRL(_n) (0x10000 + (_n) * 0x100)
+
+/* Mirror Good Packets Received Count; RO */
+#define E1000_PVFGPRC(_n) (0x10010 + (_n) * 0x100)
+
+/* Mirror Good Packets Transmitted Count; RO */
+#define E1000_PVFGPTC(_n) (0x10014 + (_n) * 0x100)
+
+/* Mirror Good Octets Received Count; RO */
+#define E1000_PVFGORC(_n) (0x10018 + (_n) * 0x100)
+
+/* Mirror Extended Interrupt Cause Set; WO */
+#define E1000_PVTEICS(_n) (0x10020 + (_n) * 0x100)
+
+/* Mirror Extended Interrupt Mask Set/Read; RW */
+#define E1000_PVTEIMS(_n) (0x10024 + (_n) * 0x100)
+
+/* Mirror Extended Interrupt Mask Clear; WO */
+#define E1000_PVTEIMC(_n) (0x10028 + (_n) * 0x100)
+
+/* Mirror Extended Interrupt Auto Clear; RW */
+#define E1000_PVTEIAC(_n) (0x1002C + (_n) * 0x100)
+
+/* Mirror Extended Interrupt Auto Mask Enable; RW */
+#define E1000_PVTEIAM(_n) (0x10030 + (_n) * 0x100)
+
+/* Mirror Good Octets Transmitted Count; RO */
+#define E1000_PVFGOTC(_n) (0x10034 + (_n) * 0x100)
+
+/* Mirror Multicast Packets Received Count; RO */
+#define E1000_PVFMPRC(_n) (0x1003C + (_n) * 0x100)
+
+/* Mirror Good RX Packets loopback Count; RO */
+#define E1000_PVFGPRLBC(_n) (0x10040 + (_n) * 0x100)
+
+/* Mirror Good TX packets loopback Count; RO */
+#define E1000_PVFGPTLBC(_n) (0x10044 + (_n) * 0x100)
+
+/* Mirror Good RX Octets loopback Count; RO */
+#define E1000_PVFGORLBC(_n) (0x10048 + (_n) * 0x100)
+
+/* Mirror Good TX Octets loopback Count; RO */
+#define E1000_PVFGOTLBC(_n) (0x10050 + (_n) * 0x100)
+
+/* Mirror Extended Interrupt Cause Set; RC/W1C */
+#define E1000_PVTEICR(_n) (0x10080 + (_n) * 0x100)
+
+/*
+ * These are fake addresses that, according to the specification, the device
+ * is not using. They are used to distinguish between the PF and the VFs
+ * accessing their VTIVAR register (which is the same address, 0x1700)
+ */
+#define E1000_VTIVAR 0x11700
+#define E1000_VTIVAR_MISC 0x11720
+
+#define E1000_RSS_QUEUE(reta, hash) (E1000_RETA_VAL(reta, hash) & 0x0F)
+
+#define E1000_STATUS_IOV_MODE 0x00040000
+
+#define E1000_STATUS_NUM_VFS_SHIFT 14
+
+static inline uint8_t igb_ivar_entry_rx(uint8_t i)
+{
+ return i < 8 ? i * 4 : (i - 8) * 4 + 2;
+}
+
+static inline uint8_t igb_ivar_entry_tx(uint8_t i)
+{
+ return i < 8 ? i * 4 + 1 : (i - 8) * 4 + 3;
+}
+
+#endif
diff --git a/hw/net/igbvf.c b/hw/net/igbvf.c
new file mode 100644
index 0000000..70beb7a
--- /dev/null
+++ b/hw/net/igbvf.c
@@ -0,0 +1,327 @@
+/*
+ * QEMU Intel 82576 SR/IOV Ethernet Controller Emulation
+ *
+ * Datasheet:
+ * https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/82576eg-gbe-datasheet.pdf
+ *
+ * Copyright (c) 2020-2023 Red Hat, Inc.
+ * Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com)
+ * Developed by Daynix Computing LTD (http://www.daynix.com)
+ *
+ * Authors:
+ * Akihiko Odaki <akihiko.odaki@daynix.com>
+ * Gal Hammmer <gal.hammer@sap.com>
+ * Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
+ * Dmitry Fleytman <dmitry@daynix.com>
+ * Leonid Bloch <leonid@daynix.com>
+ * Yan Vugenfirer <yan@daynix.com>
+ *
+ * Based on work done by:
+ * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
+ * Copyright (c) 2008 Qumranet
+ * Based on work done by:
+ * Copyright (c) 2007 Dan Aloni
+ * Copyright (c) 2004 Antony T Curtis
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/hw.h"
+#include "hw/net/mii.h"
+#include "hw/pci/pci_device.h"
+#include "hw/pci/pcie.h"
+#include "hw/pci/msix.h"
+#include "net/eth.h"
+#include "net/net.h"
+#include "igb_common.h"
+#include "igb_core.h"
+#include "trace.h"
+#include "qapi/error.h"
+
+#define TYPE_IGBVF "igbvf"
+OBJECT_DECLARE_SIMPLE_TYPE(IgbVfState, IGBVF)
+
+#define IGBVF_MMIO_BAR_IDX (0)
+#define IGBVF_MSIX_BAR_IDX (3)
+
+#define IGBVF_MMIO_SIZE (16 * 1024)
+#define IGBVF_MSIX_SIZE (16 * 1024)
+
+struct IgbVfState {
+ PCIDevice parent_obj;
+
+ MemoryRegion mmio;
+ MemoryRegion msix;
+};
+
+static hwaddr vf_to_pf_addr(hwaddr addr, uint16_t vfn, bool write)
+{
+ switch (addr) {
+ case E1000_CTRL:
+ case E1000_CTRL_DUP:
+ return E1000_PVTCTRL(vfn);
+ case E1000_EICS:
+ return E1000_PVTEICS(vfn);
+ case E1000_EIMS:
+ return E1000_PVTEIMS(vfn);
+ case E1000_EIMC:
+ return E1000_PVTEIMC(vfn);
+ case E1000_EIAC:
+ return E1000_PVTEIAC(vfn);
+ case E1000_EIAM:
+ return E1000_PVTEIAM(vfn);
+ case E1000_EICR:
+ return E1000_PVTEICR(vfn);
+ case E1000_EITR(0):
+ case E1000_EITR(1):
+ case E1000_EITR(2):
+ return E1000_EITR(22) + (addr - E1000_EITR(0)) - vfn * 0xC;
+ case E1000_IVAR0:
+ return E1000_VTIVAR + vfn * 4;
+ case E1000_IVAR_MISC:
+ return E1000_VTIVAR_MISC + vfn * 4;
+ case 0x0F04: /* PBACL */
+ return E1000_PBACLR;
+ case 0x0F0C: /* PSRTYPE */
+ return E1000_PSRTYPE(vfn);
+ case E1000_V2PMAILBOX(0):
+ return E1000_V2PMAILBOX(vfn);
+ case E1000_VMBMEM(0) ... E1000_VMBMEM(0) + 0x3F:
+ return addr + vfn * 0x40;
+ case E1000_RDBAL_A(0):
+ return E1000_RDBAL(vfn);
+ case E1000_RDBAL_A(1):
+ return E1000_RDBAL(vfn + IGB_MAX_VF_FUNCTIONS);
+ case E1000_RDBAH_A(0):
+ return E1000_RDBAH(vfn);
+ case E1000_RDBAH_A(1):
+ return E1000_RDBAH(vfn + IGB_MAX_VF_FUNCTIONS);
+ case E1000_RDLEN_A(0):
+ return E1000_RDLEN(vfn);
+ case E1000_RDLEN_A(1):
+ return E1000_RDLEN(vfn + IGB_MAX_VF_FUNCTIONS);
+ case E1000_SRRCTL_A(0):
+ return E1000_SRRCTL(vfn);
+ case E1000_SRRCTL_A(1):
+ return E1000_SRRCTL(vfn + IGB_MAX_VF_FUNCTIONS);
+ case E1000_RDH_A(0):
+ return E1000_RDH(vfn);
+ case E1000_RDH_A(1):
+ return E1000_RDH(vfn + IGB_MAX_VF_FUNCTIONS);
+ case E1000_RXCTL_A(0):
+ return E1000_RXCTL(vfn);
+ case E1000_RXCTL_A(1):
+ return E1000_RXCTL(vfn + IGB_MAX_VF_FUNCTIONS);
+ case E1000_RDT_A(0):
+ return E1000_RDT(vfn);
+ case E1000_RDT_A(1):
+ return E1000_RDT(vfn + IGB_MAX_VF_FUNCTIONS);
+ case E1000_RXDCTL_A(0):
+ return E1000_RXDCTL(vfn);
+ case E1000_RXDCTL_A(1):
+ return E1000_RXDCTL(vfn + IGB_MAX_VF_FUNCTIONS);
+ case E1000_RQDPC_A(0):
+ return E1000_RQDPC(vfn);
+ case E1000_RQDPC_A(1):
+ return E1000_RQDPC(vfn + IGB_MAX_VF_FUNCTIONS);
+ case E1000_TDBAL_A(0):
+ return E1000_TDBAL(vfn);
+ case E1000_TDBAL_A(1):
+ return E1000_TDBAL(vfn + IGB_MAX_VF_FUNCTIONS);
+ case E1000_TDBAH_A(0):
+ return E1000_TDBAH(vfn);
+ case E1000_TDBAH_A(1):
+ return E1000_TDBAH(vfn + IGB_MAX_VF_FUNCTIONS);
+ case E1000_TDLEN_A(0):
+ return E1000_TDLEN(vfn);
+ case E1000_TDLEN_A(1):
+ return E1000_TDLEN(vfn + IGB_MAX_VF_FUNCTIONS);
+ case E1000_TDH_A(0):
+ return E1000_TDH(vfn);
+ case E1000_TDH_A(1):
+ return E1000_TDH(vfn + IGB_MAX_VF_FUNCTIONS);
+ case E1000_TXCTL_A(0):
+ return E1000_TXCTL(vfn);
+ case E1000_TXCTL_A(1):
+ return E1000_TXCTL(vfn + IGB_MAX_VF_FUNCTIONS);
+ case E1000_TDT_A(0):
+ return E1000_TDT(vfn);
+ case E1000_TDT_A(1):
+ return E1000_TDT(vfn + IGB_MAX_VF_FUNCTIONS);
+ case E1000_TXDCTL_A(0):
+ return E1000_TXDCTL(vfn);
+ case E1000_TXDCTL_A(1):
+ return E1000_TXDCTL(vfn + IGB_MAX_VF_FUNCTIONS);
+ case E1000_TDWBAL_A(0):
+ return E1000_TDWBAL(vfn);
+ case E1000_TDWBAL_A(1):
+ return E1000_TDWBAL(vfn + IGB_MAX_VF_FUNCTIONS);
+ case E1000_TDWBAH_A(0):
+ return E1000_TDWBAH(vfn);
+ case E1000_TDWBAH_A(1):
+ return E1000_TDWBAH(vfn + IGB_MAX_VF_FUNCTIONS);
+ case E1000_VFGPRC:
+ return E1000_PVFGPRC(vfn);
+ case E1000_VFGPTC:
+ return E1000_PVFGPTC(vfn);
+ case E1000_VFGORC:
+ return E1000_PVFGORC(vfn);
+ case E1000_VFGOTC:
+ return E1000_PVFGOTC(vfn);
+ case E1000_VFMPRC:
+ return E1000_PVFMPRC(vfn);
+ case E1000_VFGPRLBC:
+ return E1000_PVFGPRLBC(vfn);
+ case E1000_VFGPTLBC:
+ return E1000_PVFGPTLBC(vfn);
+ case E1000_VFGORLBC:
+ return E1000_PVFGORLBC(vfn);
+ case E1000_VFGOTLBC:
+ return E1000_PVFGOTLBC(vfn);
+ case E1000_STATUS:
+ case E1000_FRTIMER:
+ if (write) {
+ return HWADDR_MAX;
+ }
+ /* fallthrough */
+ case 0x34E8: /* PBTWAC */
+ case 0x24E8: /* PBRWAC */
+ return addr;
+ }
+
+ trace_igbvf_wrn_io_addr_unknown(addr);
+
+ return HWADDR_MAX;
+}
+
+static void igbvf_write_config(PCIDevice *dev, uint32_t addr, uint32_t val,
+ int len)
+{
+ trace_igbvf_write_config(addr, val, len);
+ pci_default_write_config(dev, addr, val, len);
+}
+
+static uint64_t igbvf_mmio_read(void *opaque, hwaddr addr, unsigned size)
+{
+ PCIDevice *vf = PCI_DEVICE(opaque);
+ PCIDevice *pf = pcie_sriov_get_pf(vf);
+
+ addr = vf_to_pf_addr(addr, pcie_sriov_vf_number(vf), false);
+ return addr == HWADDR_MAX ? 0 : igb_mmio_read(pf, addr, size);
+}
+
+static void igbvf_mmio_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned size)
+{
+ PCIDevice *vf = PCI_DEVICE(opaque);
+ PCIDevice *pf = pcie_sriov_get_pf(vf);
+
+ addr = vf_to_pf_addr(addr, pcie_sriov_vf_number(vf), true);
+ if (addr != HWADDR_MAX) {
+ igb_mmio_write(pf, addr, val, size);
+ }
+}
+
+static const MemoryRegionOps mmio_ops = {
+ .read = igbvf_mmio_read,
+ .write = igbvf_mmio_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static void igbvf_pci_realize(PCIDevice *dev, Error **errp)
+{
+ IgbVfState *s = IGBVF(dev);
+ int ret;
+ int i;
+
+ dev->config_write = igbvf_write_config;
+
+ memory_region_init_io(&s->mmio, OBJECT(dev), &mmio_ops, s, "igbvf-mmio",
+ IGBVF_MMIO_SIZE);
+ pcie_sriov_vf_register_bar(dev, IGBVF_MMIO_BAR_IDX, &s->mmio);
+
+ memory_region_init(&s->msix, OBJECT(dev), "igbvf-msix", IGBVF_MSIX_SIZE);
+ pcie_sriov_vf_register_bar(dev, IGBVF_MSIX_BAR_IDX, &s->msix);
+
+ ret = msix_init(dev, IGBVF_MSIX_VEC_NUM, &s->msix, IGBVF_MSIX_BAR_IDX, 0,
+ &s->msix, IGBVF_MSIX_BAR_IDX, 0x2000, 0x70, errp);
+ if (ret) {
+ return;
+ }
+
+ for (i = 0; i < IGBVF_MSIX_VEC_NUM; i++) {
+ msix_vector_use(dev, i);
+ }
+
+ if (pcie_endpoint_cap_init(dev, 0xa0) < 0) {
+ hw_error("Failed to initialize PCIe capability");
+ }
+
+ if (pcie_aer_init(dev, 1, 0x100, 0x40, errp) < 0) {
+ hw_error("Failed to initialize AER capability");
+ }
+
+ pcie_ari_init(dev, 0x150, 1);
+}
+
+static void igbvf_pci_uninit(PCIDevice *dev)
+{
+ IgbVfState *s = IGBVF(dev);
+
+ pcie_aer_exit(dev);
+ pcie_cap_exit(dev);
+ msix_unuse_all_vectors(dev);
+ msix_uninit(dev, &s->msix, &s->msix);
+}
+
+static void igbvf_class_init(ObjectClass *class, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(class);
+ PCIDeviceClass *c = PCI_DEVICE_CLASS(class);
+
+ c->realize = igbvf_pci_realize;
+ c->exit = igbvf_pci_uninit;
+ c->vendor_id = PCI_VENDOR_ID_INTEL;
+ c->device_id = E1000_DEV_ID_82576_VF;
+ c->revision = 1;
+ c->class_id = PCI_CLASS_NETWORK_ETHERNET;
+
+ dc->desc = "Intel 82576 Virtual Function";
+ dc->user_creatable = false;
+
+ set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
+}
+
+static const TypeInfo igbvf_info = {
+ .name = TYPE_IGBVF,
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(IgbVfState),
+ .class_init = igbvf_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { INTERFACE_PCIE_DEVICE },
+ { }
+ },
+};
+
+static void igb_register_types(void)
+{
+ type_register_static(&igbvf_info);
+}
+
+type_init(igb_register_types)
diff --git a/hw/net/meson.build b/hw/net/meson.build
index 4285145..e2be065 100644
--- a/hw/net/meson.build
+++ b/hw/net/meson.build
@@ -10,6 +10,8 @@
softmmu_ss.add(when: 'CONFIG_E1000_PCI', if_true: files('e1000.c', 'e1000x_common.c'))
softmmu_ss.add(when: 'CONFIG_E1000E_PCI_EXPRESS', if_true: files('net_tx_pkt.c', 'net_rx_pkt.c'))
softmmu_ss.add(when: 'CONFIG_E1000E_PCI_EXPRESS', if_true: files('e1000e.c', 'e1000e_core.c', 'e1000x_common.c'))
+softmmu_ss.add(when: 'CONFIG_IGB_PCI_EXPRESS', if_true: files('net_tx_pkt.c', 'net_rx_pkt.c'))
+softmmu_ss.add(when: 'CONFIG_IGB_PCI_EXPRESS', if_true: files('igb.c', 'igbvf.c', 'igb_core.c'))
softmmu_ss.add(when: 'CONFIG_RTL8139_PCI', if_true: files('rtl8139.c'))
softmmu_ss.add(when: 'CONFIG_TULIP', if_true: files('tulip.c'))
softmmu_ss.add(when: 'CONFIG_VMXNET3_PCI', if_true: files('net_tx_pkt.c', 'net_rx_pkt.c'))
diff --git a/hw/net/net_rx_pkt.c b/hw/net/net_rx_pkt.c
index 1e1c504..39cdea0 100644
--- a/hw/net/net_rx_pkt.c
+++ b/hw/net/net_rx_pkt.c
@@ -30,14 +30,11 @@
uint32_t tot_len;
uint16_t tci;
size_t ehdr_buf_len;
- bool has_virt_hdr;
eth_pkt_types_e packet_type;
/* Analysis results */
- bool isip4;
- bool isip6;
- bool isudp;
- bool istcp;
+ bool hasip4;
+ bool hasip6;
size_t l3hdr_off;
size_t l4hdr_off;
@@ -48,10 +45,9 @@
eth_l4_hdr_info l4hdr_info;
};
-void net_rx_pkt_init(struct NetRxPkt **pkt, bool has_virt_hdr)
+void net_rx_pkt_init(struct NetRxPkt **pkt)
{
struct NetRxPkt *p = g_malloc0(sizeof *p);
- p->has_virt_hdr = has_virt_hdr;
p->vec = NULL;
p->vec_len_total = 0;
*pkt = p;
@@ -107,12 +103,11 @@
iov, iovcnt, ploff, pkt->tot_len);
}
- eth_get_protocols(pkt->vec, pkt->vec_len, &pkt->isip4, &pkt->isip6,
- &pkt->isudp, &pkt->istcp,
+ eth_get_protocols(pkt->vec, pkt->vec_len, &pkt->hasip4, &pkt->hasip6,
&pkt->l3hdr_off, &pkt->l4hdr_off, &pkt->l5hdr_off,
&pkt->ip6hdr_info, &pkt->ip4hdr_info, &pkt->l4hdr_info);
- trace_net_rx_pkt_parsed(pkt->isip4, pkt->isip6, pkt->isudp, pkt->istcp,
+ trace_net_rx_pkt_parsed(pkt->hasip4, pkt->hasip6, pkt->l4hdr_info.proto,
pkt->l3hdr_off, pkt->l4hdr_off, pkt->l5hdr_off);
}
@@ -201,22 +196,20 @@
assert(pkt);
- eth_get_protocols(&iov, 1, &pkt->isip4, &pkt->isip6,
- &pkt->isudp, &pkt->istcp,
+ eth_get_protocols(&iov, 1, &pkt->hasip4, &pkt->hasip6,
&pkt->l3hdr_off, &pkt->l4hdr_off, &pkt->l5hdr_off,
&pkt->ip6hdr_info, &pkt->ip4hdr_info, &pkt->l4hdr_info);
}
void net_rx_pkt_get_protocols(struct NetRxPkt *pkt,
- bool *isip4, bool *isip6,
- bool *isudp, bool *istcp)
+ bool *hasip4, bool *hasip6,
+ EthL4HdrProto *l4hdr_proto)
{
assert(pkt);
- *isip4 = pkt->isip4;
- *isip6 = pkt->isip6;
- *isudp = pkt->isudp;
- *istcp = pkt->istcp;
+ *hasip4 = pkt->hasip4;
+ *hasip6 = pkt->hasip6;
+ *l4hdr_proto = pkt->l4hdr_info.proto;
}
size_t net_rx_pkt_get_l3_hdr_offset(struct NetRxPkt *pkt)
@@ -333,58 +326,58 @@
switch (type) {
case NetPktRssIpV4:
- assert(pkt->isip4);
+ assert(pkt->hasip4);
trace_net_rx_pkt_rss_ip4();
_net_rx_rss_prepare_ip4(&rss_input[0], pkt, &rss_length);
break;
case NetPktRssIpV4Tcp:
- assert(pkt->isip4);
- assert(pkt->istcp);
+ assert(pkt->hasip4);
+ assert(pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_TCP);
trace_net_rx_pkt_rss_ip4_tcp();
_net_rx_rss_prepare_ip4(&rss_input[0], pkt, &rss_length);
_net_rx_rss_prepare_tcp(&rss_input[0], pkt, &rss_length);
break;
case NetPktRssIpV6Tcp:
- assert(pkt->isip6);
- assert(pkt->istcp);
+ assert(pkt->hasip6);
+ assert(pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_TCP);
trace_net_rx_pkt_rss_ip6_tcp();
_net_rx_rss_prepare_ip6(&rss_input[0], pkt, false, &rss_length);
_net_rx_rss_prepare_tcp(&rss_input[0], pkt, &rss_length);
break;
case NetPktRssIpV6:
- assert(pkt->isip6);
+ assert(pkt->hasip6);
trace_net_rx_pkt_rss_ip6();
_net_rx_rss_prepare_ip6(&rss_input[0], pkt, false, &rss_length);
break;
case NetPktRssIpV6Ex:
- assert(pkt->isip6);
+ assert(pkt->hasip6);
trace_net_rx_pkt_rss_ip6_ex();
_net_rx_rss_prepare_ip6(&rss_input[0], pkt, true, &rss_length);
break;
case NetPktRssIpV6TcpEx:
- assert(pkt->isip6);
- assert(pkt->istcp);
+ assert(pkt->hasip6);
+ assert(pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_TCP);
trace_net_rx_pkt_rss_ip6_ex_tcp();
_net_rx_rss_prepare_ip6(&rss_input[0], pkt, true, &rss_length);
_net_rx_rss_prepare_tcp(&rss_input[0], pkt, &rss_length);
break;
case NetPktRssIpV4Udp:
- assert(pkt->isip4);
- assert(pkt->isudp);
+ assert(pkt->hasip4);
+ assert(pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_UDP);
trace_net_rx_pkt_rss_ip4_udp();
_net_rx_rss_prepare_ip4(&rss_input[0], pkt, &rss_length);
_net_rx_rss_prepare_udp(&rss_input[0], pkt, &rss_length);
break;
case NetPktRssIpV6Udp:
- assert(pkt->isip6);
- assert(pkt->isudp);
+ assert(pkt->hasip6);
+ assert(pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_UDP);
trace_net_rx_pkt_rss_ip6_udp();
_net_rx_rss_prepare_ip6(&rss_input[0], pkt, false, &rss_length);
_net_rx_rss_prepare_udp(&rss_input[0], pkt, &rss_length);
break;
case NetPktRssIpV6UdpEx:
- assert(pkt->isip6);
- assert(pkt->isudp);
+ assert(pkt->hasip6);
+ assert(pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_UDP);
trace_net_rx_pkt_rss_ip6_ex_udp();
_net_rx_rss_prepare_ip6(&rss_input[0], pkt, true, &rss_length);
_net_rx_rss_prepare_udp(&rss_input[0], pkt, &rss_length);
@@ -406,7 +399,7 @@
{
assert(pkt);
- if (pkt->isip4) {
+ if (pkt->hasip4) {
return be16_to_cpu(pkt->ip4hdr_info.ip4_hdr.ip_id);
}
@@ -417,7 +410,7 @@
{
assert(pkt);
- if (pkt->istcp) {
+ if (pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_TCP) {
return TCP_HEADER_FLAGS(&pkt->l4hdr_info.hdr.tcp) & TCP_FLAG_ACK;
}
@@ -428,7 +421,7 @@
{
assert(pkt);
- if (pkt->istcp) {
+ if (pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_TCP) {
return pkt->l4hdr_info.has_tcp_data;
}
@@ -465,6 +458,13 @@
iov_to_buf(iov, iovcnt, 0, &pkt->virt_hdr, sizeof pkt->virt_hdr);
}
+void net_rx_pkt_unset_vhdr(struct NetRxPkt *pkt)
+{
+ assert(pkt);
+
+ memset(&pkt->virt_hdr, 0, sizeof(pkt->virt_hdr));
+}
+
bool net_rx_pkt_is_vlan_stripped(struct NetRxPkt *pkt)
{
assert(pkt);
@@ -472,13 +472,6 @@
return pkt->ehdr_buf_len ? true : false;
}
-bool net_rx_pkt_has_virt_hdr(struct NetRxPkt *pkt)
-{
- assert(pkt);
-
- return pkt->has_virt_hdr;
-}
-
uint16_t net_rx_pkt_get_vlan_tag(struct NetRxPkt *pkt)
{
assert(pkt);
@@ -494,7 +487,7 @@
trace_net_rx_pkt_l3_csum_validate_entry();
- if (!pkt->isip4) {
+ if (!pkt->hasip4) {
trace_net_rx_pkt_l3_csum_validate_not_ip4();
return false;
}
@@ -525,8 +518,8 @@
trace_net_rx_pkt_l4_csum_calc_entry();
- if (pkt->isip4) {
- if (pkt->isudp) {
+ if (pkt->hasip4) {
+ if (pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_UDP) {
csl = be16_to_cpu(pkt->l4hdr_info.hdr.udp.uh_ulen);
trace_net_rx_pkt_l4_csum_calc_ip4_udp();
} else {
@@ -539,7 +532,7 @@
csl, &cso);
trace_net_rx_pkt_l4_csum_calc_ph_csum(cntr, csl);
} else {
- if (pkt->isudp) {
+ if (pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_UDP) {
csl = be16_to_cpu(pkt->l4hdr_info.hdr.udp.uh_ulen);
trace_net_rx_pkt_l4_csum_calc_ip6_udp();
} else {
@@ -573,17 +566,19 @@
trace_net_rx_pkt_l4_csum_validate_entry();
- if (!pkt->istcp && !pkt->isudp) {
+ if (pkt->l4hdr_info.proto != ETH_L4_HDR_PROTO_TCP &&
+ pkt->l4hdr_info.proto != ETH_L4_HDR_PROTO_UDP) {
trace_net_rx_pkt_l4_csum_validate_not_xxp();
return false;
}
- if (pkt->isudp && (pkt->l4hdr_info.hdr.udp.uh_sum == 0)) {
+ if (pkt->l4hdr_info.proto == ETH_L4_HDR_PROTO_UDP &&
+ pkt->l4hdr_info.hdr.udp.uh_sum == 0) {
trace_net_rx_pkt_l4_csum_validate_udp_with_no_checksum();
return false;
}
- if (pkt->isip4 && pkt->ip4hdr_info.fragment) {
+ if (pkt->hasip4 && pkt->ip4hdr_info.fragment) {
trace_net_rx_pkt_l4_csum_validate_ip4_fragment();
return false;
}
@@ -604,22 +599,27 @@
trace_net_rx_pkt_l4_csum_fix_entry();
- if (pkt->istcp) {
+ switch (pkt->l4hdr_info.proto) {
+ case ETH_L4_HDR_PROTO_TCP:
l4_cso = offsetof(struct tcp_header, th_sum);
trace_net_rx_pkt_l4_csum_fix_tcp(l4_cso);
- } else if (pkt->isudp) {
+ break;
+
+ case ETH_L4_HDR_PROTO_UDP:
if (pkt->l4hdr_info.hdr.udp.uh_sum == 0) {
trace_net_rx_pkt_l4_csum_fix_udp_with_no_checksum();
return false;
}
l4_cso = offsetof(struct udp_header, uh_sum);
trace_net_rx_pkt_l4_csum_fix_udp(l4_cso);
- } else {
+ break;
+
+ default:
trace_net_rx_pkt_l4_csum_fix_not_xxp();
return false;
}
- if (pkt->isip4 && pkt->ip4hdr_info.fragment) {
+ if (pkt->hasip4 && pkt->ip4hdr_info.fragment) {
trace_net_rx_pkt_l4_csum_fix_ip4_fragment();
return false;
}
diff --git a/hw/net/net_rx_pkt.h b/hw/net/net_rx_pkt.h
index 048e346..d00b484 100644
--- a/hw/net/net_rx_pkt.h
+++ b/hw/net/net_rx_pkt.h
@@ -37,10 +37,9 @@
* Init function for rx packet functionality
*
* @pkt: packet pointer
- * @has_virt_hdr: device uses virtio header
*
*/
-void net_rx_pkt_init(struct NetRxPkt **pkt, bool has_virt_hdr);
+void net_rx_pkt_init(struct NetRxPkt **pkt);
/**
* returns total length of data attached to rx context
@@ -67,15 +66,14 @@
* fetches packet analysis results
*
* @pkt: packet
- * @isip4: whether the packet given is IPv4
- * @isip6: whether the packet given is IPv6
- * @isudp: whether the packet given is UDP
- * @istcp: whether the packet given is TCP
+ * @hasip4: whether the packet has an IPv4 header
+ * @hasip6: whether the packet has an IPv6 header
+ * @l4hdr_proto: protocol of L4 header
*
*/
void net_rx_pkt_get_protocols(struct NetRxPkt *pkt,
- bool *isip4, bool *isip6,
- bool *isudp, bool *istcp);
+ bool *hasip4, bool *hasip6,
+ EthL4HdrProto *l4hdr_proto);
/**
* fetches L3 header offset
@@ -215,15 +213,6 @@
bool net_rx_pkt_is_vlan_stripped(struct NetRxPkt *pkt);
/**
- * notifies caller if the packet has virtio header
- *
- * @pkt: packet
- * @ret: true if packet has virtio header, false otherwize
- *
- */
-bool net_rx_pkt_has_virt_hdr(struct NetRxPkt *pkt);
-
-/**
* attach scatter-gather data to rx packet
*
* @pkt: packet
@@ -323,6 +312,14 @@
const struct iovec *iov, int iovcnt);
/**
+ * unset vhdr data from packet context
+ *
+ * @pkt: packet
+ *
+ */
+void net_rx_pkt_unset_vhdr(struct NetRxPkt *pkt);
+
+/**
* save packet type in packet context
*
* @pkt: packet
diff --git a/hw/net/net_tx_pkt.c b/hw/net/net_tx_pkt.c
index 2533ea2..986a3ad 100644
--- a/hw/net/net_tx_pkt.c
+++ b/hw/net/net_tx_pkt.c
@@ -35,7 +35,6 @@
PCIDevice *pci_dev;
struct virtio_net_hdr virt_hdr;
- bool has_virt_hdr;
struct iovec *raw;
uint32_t raw_frags;
@@ -54,12 +53,10 @@
uint16_t hdr_len;
eth_pkt_types_e packet_type;
uint8_t l4proto;
-
- bool is_loopback;
};
void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
- uint32_t max_frags, bool has_virt_hdr)
+ uint32_t max_frags)
{
struct NetTxPkt *p = g_malloc0(sizeof *p);
@@ -71,10 +68,8 @@
p->max_payload_frags = max_frags;
p->max_raw_frags = max_frags;
- p->has_virt_hdr = has_virt_hdr;
p->vec[NET_TX_PKT_VHDR_FRAG].iov_base = &p->virt_hdr;
- p->vec[NET_TX_PKT_VHDR_FRAG].iov_len =
- p->has_virt_hdr ? sizeof p->virt_hdr : 0;
+ p->vec[NET_TX_PKT_VHDR_FRAG].iov_len = sizeof p->virt_hdr;
p->vec[NET_TX_PKT_L2HDR_FRAG].iov_base = &p->l2_hdr;
p->vec[NET_TX_PKT_L3HDR_FRAG].iov_base = &p->l3_hdr;
@@ -304,10 +299,11 @@
return rc;
}
-void net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable,
+bool net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable,
bool csum_enable, uint32_t gso_size)
{
struct tcp_hdr l4hdr;
+ size_t bytes_read;
assert(pkt);
/* csum has to be enabled if tso is. */
@@ -328,8 +324,13 @@
case VIRTIO_NET_HDR_GSO_TCPV4:
case VIRTIO_NET_HDR_GSO_TCPV6:
- iov_to_buf(&pkt->vec[NET_TX_PKT_PL_START_FRAG], pkt->payload_frags,
- 0, &l4hdr, sizeof(l4hdr));
+ bytes_read = iov_to_buf(&pkt->vec[NET_TX_PKT_PL_START_FRAG],
+ pkt->payload_frags, 0, &l4hdr, sizeof(l4hdr));
+ if (bytes_read < sizeof(l4hdr) ||
+ l4hdr.th_off * sizeof(uint32_t) < sizeof(l4hdr)) {
+ return false;
+ }
+
pkt->virt_hdr.hdr_len = pkt->hdr_len + l4hdr.th_off * sizeof(uint32_t);
pkt->virt_hdr.gso_size = gso_size;
break;
@@ -341,11 +342,17 @@
if (csum_enable) {
switch (pkt->l4proto) {
case IP_PROTO_TCP:
+ if (pkt->payload_len < sizeof(struct tcp_hdr)) {
+ return false;
+ }
pkt->virt_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
pkt->virt_hdr.csum_start = pkt->hdr_len;
pkt->virt_hdr.csum_offset = offsetof(struct tcp_hdr, th_sum);
break;
case IP_PROTO_UDP:
+ if (pkt->payload_len < sizeof(struct udp_hdr)) {
+ return false;
+ }
pkt->virt_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
pkt->virt_hdr.csum_start = pkt->hdr_len;
pkt->virt_hdr.csum_offset = offsetof(struct udp_hdr, uh_sum);
@@ -354,6 +361,8 @@
break;
}
}
+
+ return true;
}
void net_tx_pkt_setup_vlan_header_ex(struct NetTxPkt *pkt,
@@ -464,15 +473,14 @@
pkt->l4proto = 0;
}
-static void net_tx_pkt_do_sw_csum(struct NetTxPkt *pkt)
+static void net_tx_pkt_do_sw_csum(struct NetTxPkt *pkt,
+ struct iovec *iov, uint32_t iov_len,
+ uint16_t csl)
{
- struct iovec *iov = &pkt->vec[NET_TX_PKT_L2HDR_FRAG];
uint32_t csum_cntr;
uint16_t csum = 0;
uint32_t cso;
/* num of iovec without vhdr */
- uint32_t iov_len = pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - 1;
- uint16_t csl;
size_t csum_offset = pkt->virt_hdr.csum_start + pkt->virt_hdr.csum_offset;
uint16_t l3_proto = eth_get_l3_proto(iov, 1, iov->iov_len);
@@ -480,8 +488,6 @@
iov_from_buf(iov, iov_len, csum_offset, &csum, sizeof csum);
/* Calculate L4 TCP/UDP checksum */
- csl = pkt->payload_len;
-
csum_cntr = 0;
cso = 0;
/* add pseudo header to csum */
@@ -504,23 +510,16 @@
iov_from_buf(iov, iov_len, csum_offset, &csum, sizeof csum);
}
-enum {
- NET_TX_PKT_FRAGMENT_L2_HDR_POS = 0,
- NET_TX_PKT_FRAGMENT_L3_HDR_POS,
- NET_TX_PKT_FRAGMENT_HEADER_NUM
-};
-
#define NET_MAX_FRAG_SG_LIST (64)
static size_t net_tx_pkt_fetch_fragment(struct NetTxPkt *pkt,
- int *src_idx, size_t *src_offset, struct iovec *dst, int *dst_idx)
+ int *src_idx, size_t *src_offset, size_t src_len,
+ struct iovec *dst, int *dst_idx)
{
size_t fetched = 0;
struct iovec *src = pkt->vec;
- *dst_idx = NET_TX_PKT_FRAGMENT_HEADER_NUM;
-
- while (fetched < IP_FRAG_ALIGN_SIZE(pkt->virt_hdr.gso_size)) {
+ while (fetched < src_len) {
/* no more place in fragment iov */
if (*dst_idx == NET_MAX_FRAG_SG_LIST) {
@@ -535,7 +534,7 @@
dst[*dst_idx].iov_base = src[*src_idx].iov_base + *src_offset;
dst[*dst_idx].iov_len = MIN(src[*src_idx].iov_len - *src_offset,
- IP_FRAG_ALIGN_SIZE(pkt->virt_hdr.gso_size) - fetched);
+ src_len - fetched);
*src_offset += dst[*dst_idx].iov_len;
fetched += dst[*dst_idx].iov_len;
@@ -551,71 +550,250 @@
return fetched;
}
-static inline void net_tx_pkt_sendv(struct NetTxPkt *pkt,
- NetClientState *nc, const struct iovec *iov, int iov_cnt)
+static void net_tx_pkt_sendv(
+ void *opaque, const struct iovec *iov, int iov_cnt,
+ const struct iovec *virt_iov, int virt_iov_cnt)
{
- if (pkt->is_loopback) {
- qemu_receive_packet_iov(nc, iov, iov_cnt);
+ NetClientState *nc = opaque;
+
+ if (qemu_get_using_vnet_hdr(nc->peer)) {
+ qemu_sendv_packet(nc, virt_iov, virt_iov_cnt);
} else {
qemu_sendv_packet(nc, iov, iov_cnt);
}
}
-static bool net_tx_pkt_do_sw_fragmentation(struct NetTxPkt *pkt,
- NetClientState *nc)
+static bool net_tx_pkt_tcp_fragment_init(struct NetTxPkt *pkt,
+ struct iovec *fragment,
+ int *pl_idx,
+ size_t *l4hdr_len,
+ int *src_idx,
+ size_t *src_offset,
+ size_t *src_len)
{
+ struct iovec *l4 = fragment + NET_TX_PKT_PL_START_FRAG;
+ size_t bytes_read = 0;
+ struct tcp_hdr *th;
+
+ if (!pkt->payload_frags) {
+ return false;
+ }
+
+ l4->iov_len = pkt->virt_hdr.hdr_len - pkt->hdr_len;
+ l4->iov_base = g_malloc(l4->iov_len);
+
+ *src_idx = NET_TX_PKT_PL_START_FRAG;
+ while (pkt->vec[*src_idx].iov_len < l4->iov_len - bytes_read) {
+ memcpy((char *)l4->iov_base + bytes_read, pkt->vec[*src_idx].iov_base,
+ pkt->vec[*src_idx].iov_len);
+
+ bytes_read += pkt->vec[*src_idx].iov_len;
+
+ (*src_idx)++;
+ if (*src_idx >= pkt->payload_frags + NET_TX_PKT_PL_START_FRAG) {
+ g_free(l4->iov_base);
+ return false;
+ }
+ }
+
+ *src_offset = l4->iov_len - bytes_read;
+ memcpy((char *)l4->iov_base + bytes_read, pkt->vec[*src_idx].iov_base,
+ *src_offset);
+
+ th = l4->iov_base;
+ th->th_flags &= ~(TH_FIN | TH_PUSH);
+
+ *pl_idx = NET_TX_PKT_PL_START_FRAG + 1;
+ *l4hdr_len = l4->iov_len;
+ *src_len = pkt->virt_hdr.gso_size;
+
+ return true;
+}
+
+static void net_tx_pkt_tcp_fragment_deinit(struct iovec *fragment)
+{
+ g_free(fragment[NET_TX_PKT_PL_START_FRAG].iov_base);
+}
+
+static void net_tx_pkt_tcp_fragment_fix(struct NetTxPkt *pkt,
+ struct iovec *fragment,
+ size_t fragment_len,
+ uint8_t gso_type)
+{
+ struct iovec *l3hdr = fragment + NET_TX_PKT_L3HDR_FRAG;
+ struct iovec *l4hdr = fragment + NET_TX_PKT_PL_START_FRAG;
+ struct ip_header *ip = l3hdr->iov_base;
+ struct ip6_header *ip6 = l3hdr->iov_base;
+ size_t len = l3hdr->iov_len + l4hdr->iov_len + fragment_len;
+
+ switch (gso_type) {
+ case VIRTIO_NET_HDR_GSO_TCPV4:
+ ip->ip_len = cpu_to_be16(len);
+ eth_fix_ip4_checksum(l3hdr->iov_base, l3hdr->iov_len);
+ break;
+
+ case VIRTIO_NET_HDR_GSO_TCPV6:
+ len -= sizeof(struct ip6_header);
+ ip6->ip6_ctlun.ip6_un1.ip6_un1_plen = cpu_to_be16(len);
+ break;
+ }
+}
+
+static void net_tx_pkt_tcp_fragment_advance(struct NetTxPkt *pkt,
+ struct iovec *fragment,
+ size_t fragment_len,
+ uint8_t gso_type)
+{
+ struct iovec *l3hdr = fragment + NET_TX_PKT_L3HDR_FRAG;
+ struct iovec *l4hdr = fragment + NET_TX_PKT_PL_START_FRAG;
+ struct ip_header *ip = l3hdr->iov_base;
+ struct tcp_hdr *th = l4hdr->iov_base;
+
+ if (gso_type == VIRTIO_NET_HDR_GSO_TCPV4) {
+ ip->ip_id = cpu_to_be16(be16_to_cpu(ip->ip_id) + 1);
+ }
+
+ th->th_seq = cpu_to_be32(be32_to_cpu(th->th_seq) + fragment_len);
+ th->th_flags &= ~TH_CWR;
+}
+
+static void net_tx_pkt_udp_fragment_init(struct NetTxPkt *pkt,
+ int *pl_idx,
+ size_t *l4hdr_len,
+ int *src_idx, size_t *src_offset,
+ size_t *src_len)
+{
+ *pl_idx = NET_TX_PKT_PL_START_FRAG;
+ *l4hdr_len = 0;
+ *src_idx = NET_TX_PKT_PL_START_FRAG;
+ *src_offset = 0;
+ *src_len = IP_FRAG_ALIGN_SIZE(pkt->virt_hdr.gso_size);
+}
+
+static void net_tx_pkt_udp_fragment_fix(struct NetTxPkt *pkt,
+ struct iovec *fragment,
+ size_t fragment_offset,
+ size_t fragment_len)
+{
+ bool more_frags = fragment_offset + fragment_len < pkt->payload_len;
+ uint16_t orig_flags;
+ struct iovec *l3hdr = fragment + NET_TX_PKT_L3HDR_FRAG;
+ struct ip_header *ip = l3hdr->iov_base;
+ uint16_t frag_off_units = fragment_offset / IP_FRAG_UNIT_SIZE;
+ uint16_t new_ip_off;
+
+ assert(fragment_offset % IP_FRAG_UNIT_SIZE == 0);
+ assert((frag_off_units & ~IP_OFFMASK) == 0);
+
+ orig_flags = be16_to_cpu(ip->ip_off) & ~(IP_OFFMASK | IP_MF);
+ new_ip_off = frag_off_units | orig_flags | (more_frags ? IP_MF : 0);
+ ip->ip_off = cpu_to_be16(new_ip_off);
+ ip->ip_len = cpu_to_be16(l3hdr->iov_len + fragment_len);
+
+ eth_fix_ip4_checksum(l3hdr->iov_base, l3hdr->iov_len);
+}
+
+static bool net_tx_pkt_do_sw_fragmentation(struct NetTxPkt *pkt,
+ NetTxPktCallback callback,
+ void *context)
+{
+ uint8_t gso_type = pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN;
+
struct iovec fragment[NET_MAX_FRAG_SG_LIST];
- size_t fragment_len = 0;
- bool more_frags = false;
+ size_t fragment_len;
+ size_t l4hdr_len;
+ size_t src_len;
- /* some pointers for shorter code */
- void *l2_iov_base, *l3_iov_base;
- size_t l2_iov_len, l3_iov_len;
- int src_idx = NET_TX_PKT_PL_START_FRAG, dst_idx;
- size_t src_offset = 0;
+ int src_idx, dst_idx, pl_idx;
+ size_t src_offset;
size_t fragment_offset = 0;
-
- l2_iov_base = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_base;
- l2_iov_len = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len;
- l3_iov_base = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base;
- l3_iov_len = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len;
+ struct virtio_net_hdr virt_hdr = {
+ .flags = pkt->virt_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM ?
+ VIRTIO_NET_HDR_F_DATA_VALID : 0
+ };
/* Copy headers */
- fragment[NET_TX_PKT_FRAGMENT_L2_HDR_POS].iov_base = l2_iov_base;
- fragment[NET_TX_PKT_FRAGMENT_L2_HDR_POS].iov_len = l2_iov_len;
- fragment[NET_TX_PKT_FRAGMENT_L3_HDR_POS].iov_base = l3_iov_base;
- fragment[NET_TX_PKT_FRAGMENT_L3_HDR_POS].iov_len = l3_iov_len;
+ fragment[NET_TX_PKT_VHDR_FRAG].iov_base = &virt_hdr;
+ fragment[NET_TX_PKT_VHDR_FRAG].iov_len = sizeof(virt_hdr);
+ fragment[NET_TX_PKT_L2HDR_FRAG] = pkt->vec[NET_TX_PKT_L2HDR_FRAG];
+ fragment[NET_TX_PKT_L3HDR_FRAG] = pkt->vec[NET_TX_PKT_L3HDR_FRAG];
+ switch (gso_type) {
+ case VIRTIO_NET_HDR_GSO_TCPV4:
+ case VIRTIO_NET_HDR_GSO_TCPV6:
+ if (!net_tx_pkt_tcp_fragment_init(pkt, fragment, &pl_idx, &l4hdr_len,
+ &src_idx, &src_offset, &src_len)) {
+ return false;
+ }
+ break;
+
+ case VIRTIO_NET_HDR_GSO_UDP:
+ net_tx_pkt_do_sw_csum(pkt, &pkt->vec[NET_TX_PKT_L2HDR_FRAG],
+ pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - 1,
+ pkt->payload_len);
+ net_tx_pkt_udp_fragment_init(pkt, &pl_idx, &l4hdr_len,
+ &src_idx, &src_offset, &src_len);
+ break;
+
+ default:
+ abort();
+ }
/* Put as much data as possible and send */
- do {
- fragment_len = net_tx_pkt_fetch_fragment(pkt, &src_idx, &src_offset,
- fragment, &dst_idx);
+ while (true) {
+ dst_idx = pl_idx;
+ fragment_len = net_tx_pkt_fetch_fragment(pkt,
+ &src_idx, &src_offset, src_len, fragment, &dst_idx);
+ if (!fragment_len) {
+ break;
+ }
- more_frags = (fragment_offset + fragment_len < pkt->payload_len);
+ switch (gso_type) {
+ case VIRTIO_NET_HDR_GSO_TCPV4:
+ case VIRTIO_NET_HDR_GSO_TCPV6:
+ net_tx_pkt_tcp_fragment_fix(pkt, fragment, fragment_len, gso_type);
+ net_tx_pkt_do_sw_csum(pkt, fragment + NET_TX_PKT_L2HDR_FRAG,
+ dst_idx - NET_TX_PKT_L2HDR_FRAG,
+ l4hdr_len + fragment_len);
+ break;
- eth_setup_ip4_fragmentation(l2_iov_base, l2_iov_len, l3_iov_base,
- l3_iov_len, fragment_len, fragment_offset, more_frags);
+ case VIRTIO_NET_HDR_GSO_UDP:
+ net_tx_pkt_udp_fragment_fix(pkt, fragment, fragment_offset,
+ fragment_len);
+ break;
+ }
- eth_fix_ip4_checksum(l3_iov_base, l3_iov_len);
+ callback(context,
+ fragment + NET_TX_PKT_L2HDR_FRAG, dst_idx - NET_TX_PKT_L2HDR_FRAG,
+ fragment + NET_TX_PKT_VHDR_FRAG, dst_idx - NET_TX_PKT_VHDR_FRAG);
- net_tx_pkt_sendv(pkt, nc, fragment, dst_idx);
+ if (gso_type == VIRTIO_NET_HDR_GSO_TCPV4 ||
+ gso_type == VIRTIO_NET_HDR_GSO_TCPV6) {
+ net_tx_pkt_tcp_fragment_advance(pkt, fragment, fragment_len,
+ gso_type);
+ }
fragment_offset += fragment_len;
+ }
- } while (fragment_len && more_frags);
+ if (gso_type == VIRTIO_NET_HDR_GSO_TCPV4 ||
+ gso_type == VIRTIO_NET_HDR_GSO_TCPV6) {
+ net_tx_pkt_tcp_fragment_deinit(fragment);
+ }
return true;
}
bool net_tx_pkt_send(struct NetTxPkt *pkt, NetClientState *nc)
{
- assert(pkt);
+ bool offload = qemu_get_using_vnet_hdr(nc->peer);
+ return net_tx_pkt_send_custom(pkt, offload, net_tx_pkt_sendv, nc);
+}
- if (!pkt->has_virt_hdr &&
- pkt->virt_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
- net_tx_pkt_do_sw_csum(pkt);
- }
+bool net_tx_pkt_send_custom(struct NetTxPkt *pkt, bool offload,
+ NetTxPktCallback callback, void *context)
+{
+ assert(pkt);
/*
* Since underlying infrastructure does not support IP datagrams longer
@@ -629,26 +807,22 @@
}
}
- if (pkt->has_virt_hdr ||
- pkt->virt_hdr.gso_type == VIRTIO_NET_HDR_GSO_NONE) {
+ if (offload || pkt->virt_hdr.gso_type == VIRTIO_NET_HDR_GSO_NONE) {
+ if (!offload && pkt->virt_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
+ net_tx_pkt_do_sw_csum(pkt, &pkt->vec[NET_TX_PKT_L2HDR_FRAG],
+ pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - 1,
+ pkt->payload_len);
+ }
+
net_tx_pkt_fix_ip6_payload_len(pkt);
- net_tx_pkt_sendv(pkt, nc, pkt->vec,
- pkt->payload_frags + NET_TX_PKT_PL_START_FRAG);
+ callback(context, pkt->vec + NET_TX_PKT_L2HDR_FRAG,
+ pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - NET_TX_PKT_L2HDR_FRAG,
+ pkt->vec + NET_TX_PKT_VHDR_FRAG,
+ pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - NET_TX_PKT_VHDR_FRAG);
return true;
}
- return net_tx_pkt_do_sw_fragmentation(pkt, nc);
-}
-
-bool net_tx_pkt_send_loopback(struct NetTxPkt *pkt, NetClientState *nc)
-{
- bool res;
-
- pkt->is_loopback = true;
- res = net_tx_pkt_send(pkt, nc);
- pkt->is_loopback = false;
-
- return res;
+ return net_tx_pkt_do_sw_fragmentation(pkt, callback, context);
}
void net_tx_pkt_fix_ip6_payload_len(struct NetTxPkt *pkt)
diff --git a/hw/net/net_tx_pkt.h b/hw/net/net_tx_pkt.h
index 4ec8bbe..f57b4e0 100644
--- a/hw/net/net_tx_pkt.h
+++ b/hw/net/net_tx_pkt.h
@@ -26,16 +26,17 @@
struct NetTxPkt;
+typedef void (* NetTxPktCallback)(void *, const struct iovec *, int, const struct iovec *, int);
+
/**
* Init function for tx packet functionality
*
* @pkt: packet pointer
* @pci_dev: PCI device processing this packet
* @max_frags: max tx ip fragments
- * @has_virt_hdr: device uses virtio header.
*/
void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
- uint32_t max_frags, bool has_virt_hdr);
+ uint32_t max_frags);
/**
* Clean all tx packet resources.
@@ -59,9 +60,10 @@
* @tso_enable: TSO enabled
* @csum_enable: CSO enabled
* @gso_size: MSS size for TSO
+ * @ret: operation result
*
*/
-void net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable,
+bool net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable,
bool csum_enable, uint32_t gso_size);
/**
@@ -161,15 +163,16 @@
bool net_tx_pkt_send(struct NetTxPkt *pkt, NetClientState *nc);
/**
-* Redirect packet directly to receive path (emulate loopback phy).
-* Handles sw offloads if vhdr is not supported.
-*
-* @pkt: packet
-* @nc: NetClientState
-* @ret: operation result
-*
-*/
-bool net_tx_pkt_send_loopback(struct NetTxPkt *pkt, NetClientState *nc);
+ * Send packet with a custom function.
+ *
+ * @pkt: packet
+ * @offload: whether the callback implements offloading
+ * @callback: a function to be called back for each transformed packet
+ * @context: a pointer to be passed to the callback.
+ * @ret: operation result
+ */
+bool net_tx_pkt_send_custom(struct NetTxPkt *pkt, bool offload,
+ NetTxPktCallback callback, void *context);
/**
* parse raw packet data and analyze offload requirements.
diff --git a/hw/net/trace-events b/hw/net/trace-events
index 4c0ec3f..6575341 100644
--- a/hw/net/trace-events
+++ b/hw/net/trace-events
@@ -61,7 +61,7 @@
pcnet_ioport_write(void *opaque, uint64_t addr, uint64_t data, unsigned size) "opaque=%p addr=0x%"PRIx64" data=0x%"PRIx64" size=%d"
# net_rx_pkt.c
-net_rx_pkt_parsed(bool ip4, bool ip6, bool udp, bool tcp, size_t l3o, size_t l4o, size_t l5o) "RX packet parsed: ip4: %d, ip6: %d, udp: %d, tcp: %d, l3 offset: %zu, l4 offset: %zu, l5 offset: %zu"
+net_rx_pkt_parsed(bool ip4, bool ip6, int l4proto, size_t l3o, size_t l4o, size_t l5o) "RX packet parsed: ip4: %d, ip6: %d, l4 protocol: %d, l3 offset: %zu, l4 offset: %zu, l5 offset: %zu"
net_rx_pkt_l4_csum_validate_entry(void) "Starting L4 checksum validation"
net_rx_pkt_l4_csum_validate_not_xxp(void) "Not a TCP/UDP packet"
net_rx_pkt_l4_csum_validate_udp_with_no_checksum(void) "UDP packet without checksum"
@@ -165,8 +165,8 @@
e1000e_rx_set_rctl(uint32_t rctl) "RCTL = 0x%x"
e1000e_rx_receive_iov(int iovcnt) "Received vector of %d fragments"
e1000e_rx_flt_dropped(void) "Received packet dropped by RX filter"
-e1000e_rx_written_to_guest(uint32_t causes) "Received packet written to guest (ICR causes %u)"
-e1000e_rx_not_written_to_guest(uint32_t causes) "Received packet NOT written to guest (ICR causes %u)"
+e1000e_rx_written_to_guest(int queue_idx) "Received packet written to guest (queue %d)"
+e1000e_rx_not_written_to_guest(int queue_idx) "Received packet NOT written to guest (queue %d)"
e1000e_rx_interrupt_set(uint32_t causes) "Receive interrupt set (ICR causes %u)"
e1000e_rx_interrupt_delayed(uint32_t causes) "Receive interrupt delayed (ICR causes %u)"
e1000e_rx_set_cso(int cso_state) "RX CSO state set to %d"
@@ -177,18 +177,16 @@
e1000e_rx_rss_started(void) "Starting RSS processing"
e1000e_rx_rss_disabled(void) "RSS is disabled"
e1000e_rx_rss_type(uint32_t type) "RSS type is %u"
-e1000e_rx_rss_ip4(bool isfragment, bool istcp, uint32_t mrqc, bool tcpipv4_enabled, bool ipv4_enabled) "RSS IPv4: fragment %d, tcp %d, mrqc 0x%X, tcpipv4 enabled %d, ipv4 enabled %d"
+e1000e_rx_rss_ip4(int l4hdr_proto, uint32_t mrqc, bool tcpipv4_enabled, bool ipv4_enabled) "RSS IPv4: L4 header protocol %d, mrqc 0x%X, tcpipv4 enabled %d, ipv4 enabled %d"
e1000e_rx_rss_ip6_rfctl(uint32_t rfctl) "RSS IPv6: rfctl 0x%X"
-e1000e_rx_rss_ip6(bool ex_dis, bool new_ex_dis, bool istcp, bool has_ext_headers, bool ex_dst_valid, bool ex_src_valid, uint32_t mrqc, bool tcpipv6_enabled, bool ipv6ex_enabled, bool ipv6_enabled) "RSS IPv6: ex_dis: %d, new_ex_dis: %d, tcp %d, has_ext_headers %d, ex_dst_valid %d, ex_src_valid %d, mrqc 0x%X, tcpipv6 enabled %d, ipv6ex enabled %d, ipv6 enabled %d"
-e1000e_rx_rss_dispatched_to_queue(int queue_idx) "Packet being dispatched to queue %d"
+e1000e_rx_rss_ip6(bool ex_dis, bool new_ex_dis, int l4hdr_proto, bool has_ext_headers, bool ex_dst_valid, bool ex_src_valid, uint32_t mrqc, bool tcpipv6_enabled, bool ipv6ex_enabled, bool ipv6_enabled) "RSS IPv6: ex_dis: %d, new_ex_dis: %d, L4 header protocol %d, has_ext_headers %d, ex_dst_valid %d, ex_src_valid %d, mrqc 0x%X, tcpipv6 enabled %d, ipv6ex enabled %d, ipv6 enabled %d"
-e1000e_rx_metadata_protocols(bool isip4, bool isip6, bool isudp, bool istcp) "protocols: ip4: %d, ip6: %d, udp: %d, tcp: %d"
+e1000e_rx_metadata_protocols(bool hasip4, bool hasip6, int l4hdr_protocol) "protocols: ip4: %d, ip6: %d, l4hdr: %d"
e1000e_rx_metadata_vlan(uint16_t vlan_tag) "VLAN tag is 0x%X"
e1000e_rx_metadata_rss(uint32_t rss, uint32_t mrq) "RSS data: rss: 0x%X, mrq: 0x%X"
e1000e_rx_metadata_ip_id(uint16_t ip_id) "the IPv4 ID is 0x%X"
e1000e_rx_metadata_ack(void) "the packet is TCP ACK"
e1000e_rx_metadata_pkt_type(uint32_t pkt_type) "the packet type is %u"
-e1000e_rx_metadata_no_virthdr(void) "the packet has no virt-header"
e1000e_rx_metadata_virthdr_no_csum_info(void) "virt-header does not contain checksum info"
e1000e_rx_metadata_l3_cso_disabled(void) "IP4 CSO is disabled"
e1000e_rx_metadata_l4_cso_disabled(void) "TCP/UDP CSO is disabled"
@@ -201,10 +199,8 @@
e1000e_vlan_vet(uint16_t vet) "Setting VLAN ethernet type 0x%X"
e1000e_irq_msi_notify(uint32_t cause) "MSI notify 0x%x"
-e1000e_irq_throttling_no_pending_interrupts(void) "No pending interrupts to notify"
e1000e_irq_msi_notify_postponed(void) "Sending MSI postponed by ITR"
e1000e_irq_legacy_notify_postponed(void) "Raising legacy IRQ postponed by ITR"
-e1000e_irq_throttling_no_pending_vec(int idx) "No pending interrupts for vector %d"
e1000e_irq_msix_notify_postponed_vec(int idx) "Sending MSI-X postponed by EITR[%d]"
e1000e_irq_legacy_notify(bool level) "IRQ line state: %d"
e1000e_irq_msix_notify_vec(uint32_t vector) "MSI-X notify vector 0x%x"
@@ -253,7 +249,7 @@
# e1000e.c
e1000e_cb_pci_realize(void) "E1000E PCI realize entry"
e1000e_cb_pci_uninit(void) "E1000E PCI unit entry"
-e1000e_cb_qdev_reset(void) "E1000E qdev reset entry"
+e1000e_cb_qdev_reset_hold(void) "E1000E qdev reset hold"
e1000e_cb_pre_save(void) "E1000E pre save entry"
e1000e_cb_post_load(void) "E1000E post load entry"
@@ -274,6 +270,38 @@
e1000e_mac_set_permanent(uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5) "Set permanent MAC: %02x:%02x:%02x:%02x:%02x:%02x"
e1000e_cfg_support_virtio(bool support) "Virtio header supported: %d"
+# igb.c
+igb_write_config(uint32_t address, uint32_t val, int len) "CONFIG write 0x%"PRIx32", value: 0x%"PRIx32", len: %"PRId32
+igbvf_write_config(uint32_t address, uint32_t val, int len) "CONFIG write 0x%"PRIx32", value: 0x%"PRIx32", len: %"PRId32
+
+# igb_core.c
+igb_core_mdic_read(uint32_t addr, uint32_t data) "MDIC READ: PHY[%u] = 0x%x"
+igb_core_mdic_read_unhandled(uint32_t addr) "MDIC READ: PHY[%u] UNHANDLED"
+igb_core_mdic_write(uint32_t addr, uint32_t data) "MDIC WRITE: PHY[%u] = 0x%x"
+igb_core_mdic_write_unhandled(uint32_t addr) "MDIC WRITE: PHY[%u] UNHANDLED"
+
+igb_rx_desc_buff_size(uint32_t b) "buffer size: %u"
+igb_rx_desc_buff_write(uint64_t addr, uint16_t offset, const void* source, uint32_t len) "addr: 0x%"PRIx64", offset: %u, from: %p, length: %u"
+
+igb_rx_metadata_rss(uint32_t rss) "RSS data: 0x%X"
+
+igb_irq_icr_clear_gpie_nsicr(void) "Clearing ICR on read due to GPIE.NSICR enabled"
+igb_irq_icr_write(uint32_t bits, uint32_t old_icr, uint32_t new_icr) "Clearing ICR bits 0x%x: 0x%x --> 0x%x"
+igb_irq_set_iam(uint32_t icr) "Update IAM: 0x%x"
+igb_irq_read_iam(uint32_t icr) "Current IAM: 0x%x"
+igb_irq_write_eics(uint32_t val, bool msix) "Update EICS: 0x%x MSI-X: %d"
+igb_irq_write_eims(uint32_t val, bool msix) "Update EIMS: 0x%x MSI-X: %d"
+igb_irq_write_eimc(uint32_t val, uint32_t eims, bool msix) "Update EIMC: 0x%x EIMS: 0x%x MSI-X: %d"
+igb_irq_write_eiac(uint32_t val) "Update EIAC: 0x%x"
+igb_irq_write_eiam(uint32_t val, bool msix) "Update EIAM: 0x%x MSI-X: %d"
+igb_irq_write_eicr(uint32_t val, bool msix) "Update EICR: 0x%x MSI-X: %d"
+igb_irq_eitr_set(uint32_t eitr_num, uint32_t val) "EITR[%u] = 0x%x"
+igb_set_pfmailbox(uint32_t vf_num, uint32_t val) "PFMailbox[%d]: 0x%x"
+igb_set_vfmailbox(uint32_t vf_num, uint32_t val) "VFMailbox[%d]: 0x%x"
+
+# igbvf.c
+igbvf_wrn_io_addr_unknown(uint64_t addr) "IO unknown register 0x%"PRIx64
+
# spapr_llan.c
spapr_vlan_get_rx_bd_from_pool_found(int pool, int32_t count, uint32_t rx_bufs) "pool=%d count=%"PRId32" rxbufs=%"PRIu32
spapr_vlan_get_rx_bd_from_page(int buf_ptr, uint64_t bd) "use_buf_ptr=%d bd=0x%016"PRIx64
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 09d5c7a..53e1c32 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -1746,39 +1746,61 @@
return 0;
}
-static uint8_t virtio_net_get_hash_type(bool isip4,
- bool isip6,
- bool isudp,
- bool istcp,
+static uint8_t virtio_net_get_hash_type(bool hasip4,
+ bool hasip6,
+ EthL4HdrProto l4hdr_proto,
uint32_t types)
{
- if (isip4) {
- if (istcp && (types & VIRTIO_NET_RSS_HASH_TYPE_TCPv4)) {
- return NetPktRssIpV4Tcp;
+ if (hasip4) {
+ switch (l4hdr_proto) {
+ case ETH_L4_HDR_PROTO_TCP:
+ if (types & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) {
+ return NetPktRssIpV4Tcp;
+ }
+ break;
+
+ case ETH_L4_HDR_PROTO_UDP:
+ if (types & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) {
+ return NetPktRssIpV4Udp;
+ }
+ break;
+
+ default:
+ break;
}
- if (isudp && (types & VIRTIO_NET_RSS_HASH_TYPE_UDPv4)) {
- return NetPktRssIpV4Udp;
- }
+
if (types & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
return NetPktRssIpV4;
}
- } else if (isip6) {
- uint32_t mask = VIRTIO_NET_RSS_HASH_TYPE_TCP_EX |
- VIRTIO_NET_RSS_HASH_TYPE_TCPv6;
+ } else if (hasip6) {
+ switch (l4hdr_proto) {
+ case ETH_L4_HDR_PROTO_TCP:
+ if (types & VIRTIO_NET_RSS_HASH_TYPE_TCP_EX) {
+ return NetPktRssIpV6TcpEx;
+ }
+ if (types & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) {
+ return NetPktRssIpV6Tcp;
+ }
+ break;
- if (istcp && (types & mask)) {
- return (types & VIRTIO_NET_RSS_HASH_TYPE_TCP_EX) ?
- NetPktRssIpV6TcpEx : NetPktRssIpV6Tcp;
+ case ETH_L4_HDR_PROTO_UDP:
+ if (types & VIRTIO_NET_RSS_HASH_TYPE_UDP_EX) {
+ return NetPktRssIpV6UdpEx;
+ }
+ if (types & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) {
+ return NetPktRssIpV6Udp;
+ }
+ break;
+
+ default:
+ break;
}
- mask = VIRTIO_NET_RSS_HASH_TYPE_UDP_EX | VIRTIO_NET_RSS_HASH_TYPE_UDPv6;
- if (isudp && (types & mask)) {
- return (types & VIRTIO_NET_RSS_HASH_TYPE_UDP_EX) ?
- NetPktRssIpV6UdpEx : NetPktRssIpV6Udp;
+
+ if (types & VIRTIO_NET_RSS_HASH_TYPE_IP_EX) {
+ return NetPktRssIpV6Ex;
}
- mask = VIRTIO_NET_RSS_HASH_TYPE_IP_EX | VIRTIO_NET_RSS_HASH_TYPE_IPv6;
- if (types & mask) {
- return (types & VIRTIO_NET_RSS_HASH_TYPE_IP_EX) ?
- NetPktRssIpV6Ex : NetPktRssIpV6;
+ if (types & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
+ return NetPktRssIpV6;
}
}
return 0xff;
@@ -1800,7 +1822,8 @@
struct NetRxPkt *pkt = n->rx_pkt;
uint8_t net_hash_type;
uint32_t hash;
- bool isip4, isip6, isudp, istcp;
+ bool hasip4, hasip6;
+ EthL4HdrProto l4hdr_proto;
static const uint8_t reports[NetPktRssIpV6UdpEx + 1] = {
VIRTIO_NET_HASH_REPORT_IPv4,
VIRTIO_NET_HASH_REPORT_TCPv4,
@@ -1815,14 +1838,8 @@
net_rx_pkt_set_protocols(pkt, buf + n->host_hdr_len,
size - n->host_hdr_len);
- net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp);
- if (isip4 && (net_rx_pkt_get_ip4_info(pkt)->fragment)) {
- istcp = isudp = false;
- }
- if (isip6 && (net_rx_pkt_get_ip6_info(pkt)->fragment)) {
- istcp = isudp = false;
- }
- net_hash_type = virtio_net_get_hash_type(isip4, isip6, isudp, istcp,
+ net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
+ net_hash_type = virtio_net_get_hash_type(hasip4, hasip6, l4hdr_proto,
n->rss_data.hash_types);
if (net_hash_type > NetPktRssIpV6UdpEx) {
if (n->rss_data.populate_hash) {
@@ -3718,7 +3735,7 @@
QTAILQ_INIT(&n->rsc_chains);
n->qdev = dev;
- net_rx_pkt_init(&n->rx_pkt, false);
+ net_rx_pkt_init(&n->rx_pkt);
if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
virtio_net_load_ebpf(n);
diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c
index 56559cd..1068b80 100644
--- a/hw/net/vmxnet3.c
+++ b/hw/net/vmxnet3.c
@@ -440,19 +440,19 @@
{
switch (s->offload_mode) {
case VMXNET3_OM_NONE:
- net_tx_pkt_build_vheader(s->tx_pkt, false, false, 0);
- break;
+ return net_tx_pkt_build_vheader(s->tx_pkt, false, false, 0);
case VMXNET3_OM_CSUM:
- net_tx_pkt_build_vheader(s->tx_pkt, false, true, 0);
VMW_PKPRN("L4 CSO requested\n");
- break;
+ return net_tx_pkt_build_vheader(s->tx_pkt, false, true, 0);
case VMXNET3_OM_TSO:
- net_tx_pkt_build_vheader(s->tx_pkt, true, true,
- s->cso_or_gso_size);
- net_tx_pkt_update_ip_checksums(s->tx_pkt);
VMW_PKPRN("GSO offload requested.");
+ if (!net_tx_pkt_build_vheader(s->tx_pkt, true, true,
+ s->cso_or_gso_size)) {
+ return false;
+ }
+ net_tx_pkt_update_ip_checksums(s->tx_pkt);
break;
default:
@@ -847,21 +847,20 @@
size_t pkt_len)
{
struct virtio_net_hdr *vhdr;
- bool isip4, isip6, istcp, isudp;
+ bool hasip4, hasip6;
+ EthL4HdrProto l4hdr_proto;
uint8_t *data;
int len;
- if (!net_rx_pkt_has_virt_hdr(pkt)) {
- return;
- }
-
vhdr = net_rx_pkt_get_vhdr(pkt);
if (!VMXNET_FLAG_IS_SET(vhdr->flags, VIRTIO_NET_HDR_F_NEEDS_CSUM)) {
return;
}
- net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp);
- if (!(isip4 || isip6) || !(istcp || isudp)) {
+ net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
+ if (!(hasip4 || hasip6) ||
+ (l4hdr_proto != ETH_L4_HDR_PROTO_TCP &&
+ l4hdr_proto != ETH_L4_HDR_PROTO_UDP)) {
return;
}
@@ -889,7 +888,8 @@
struct Vmxnet3_RxCompDesc *rxcd)
{
int csum_ok, is_gso;
- bool isip4, isip6, istcp, isudp;
+ bool hasip4, hasip6;
+ EthL4HdrProto l4hdr_proto;
struct virtio_net_hdr *vhdr;
uint8_t offload_type;
@@ -898,10 +898,6 @@
rxcd->tci = net_rx_pkt_get_vlan_tag(pkt);
}
- if (!net_rx_pkt_has_virt_hdr(pkt)) {
- goto nocsum;
- }
-
vhdr = net_rx_pkt_get_vhdr(pkt);
/*
* Checksum is valid when lower level tell so or when lower level
@@ -919,16 +915,18 @@
goto nocsum;
}
- net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp);
- if ((!istcp && !isudp) || (!isip4 && !isip6)) {
+ net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
+ if ((l4hdr_proto != ETH_L4_HDR_PROTO_TCP &&
+ l4hdr_proto != ETH_L4_HDR_PROTO_UDP) ||
+ (!hasip4 && !hasip6)) {
goto nocsum;
}
rxcd->cnc = 0;
- rxcd->v4 = isip4 ? 1 : 0;
- rxcd->v6 = isip6 ? 1 : 0;
- rxcd->tcp = istcp ? 1 : 0;
- rxcd->udp = isudp ? 1 : 0;
+ rxcd->v4 = hasip4 ? 1 : 0;
+ rxcd->v6 = hasip6 ? 1 : 0;
+ rxcd->tcp = l4hdr_proto == ETH_L4_HDR_PROTO_TCP;
+ rxcd->udp = l4hdr_proto == ETH_L4_HDR_PROTO_UDP;
rxcd->fcs = rxcd->tuc = rxcd->ipc = 1;
return;
@@ -1521,9 +1519,8 @@
/* Preallocate TX packet wrapper */
VMW_CFPRN("Max TX fragments is %u", s->max_tx_frags);
- net_tx_pkt_init(&s->tx_pkt, PCI_DEVICE(s),
- s->max_tx_frags, s->peer_has_vhdr);
- net_rx_pkt_init(&s->rx_pkt, s->peer_has_vhdr);
+ net_tx_pkt_init(&s->tx_pkt, PCI_DEVICE(s), s->max_tx_frags);
+ net_rx_pkt_init(&s->rx_pkt);
/* Read rings memory locations for RX queues */
for (i = 0; i < s->rxq_num; i++) {
@@ -2402,9 +2399,8 @@
{
VMXNET3State *s = opaque;
- net_tx_pkt_init(&s->tx_pkt, PCI_DEVICE(s),
- s->max_tx_frags, s->peer_has_vhdr);
- net_rx_pkt_init(&s->rx_pkt, s->peer_has_vhdr);
+ net_tx_pkt_init(&s->tx_pkt, PCI_DEVICE(s), s->max_tx_frags);
+ net_rx_pkt_init(&s->rx_pkt);
if (s->msix_used) {
vmxnet3_use_msix_vectors(s, VMXNET3_MAX_INTRS);
diff --git a/hw/net/xen_nic.c b/hw/net/xen_nic.c
index 7d92c2d..9bbf659 100644
--- a/hw/net/xen_nic.c
+++ b/hw/net/xen_nic.c
@@ -145,7 +145,7 @@
continue;
}
- if ((txreq.offset + txreq.size) > XC_PAGE_SIZE) {
+ if ((txreq.offset + txreq.size) > XEN_PAGE_SIZE) {
xen_pv_printf(&netdev->xendev, 0, "error: page crossing\n");
net_tx_error(netdev, &txreq, rc);
continue;
@@ -171,7 +171,7 @@
if (txreq.flags & NETTXF_csum_blank) {
/* have read-only mapping -> can't fill checksum in-place */
if (!tmpbuf) {
- tmpbuf = g_malloc(XC_PAGE_SIZE);
+ tmpbuf = g_malloc(XEN_PAGE_SIZE);
}
memcpy(tmpbuf, page + txreq.offset, txreq.size);
net_checksum_calculate(tmpbuf, txreq.size, CSUM_ALL);
@@ -181,7 +181,7 @@
qemu_send_packet(qemu_get_queue(netdev->nic),
page + txreq.offset, txreq.size);
}
- xen_be_unmap_grant_ref(&netdev->xendev, page);
+ xen_be_unmap_grant_ref(&netdev->xendev, page, txreq.gref);
net_tx_response(netdev, &txreq, NETIF_RSP_OKAY);
}
if (!netdev->tx_work) {
@@ -243,9 +243,9 @@
if (rc == rp || RING_REQUEST_CONS_OVERFLOW(&netdev->rx_ring, rc)) {
return 0;
}
- if (size > XC_PAGE_SIZE - NET_IP_ALIGN) {
+ if (size > XEN_PAGE_SIZE - NET_IP_ALIGN) {
xen_pv_printf(&netdev->xendev, 0, "packet too big (%lu > %ld)",
- (unsigned long)size, XC_PAGE_SIZE - NET_IP_ALIGN);
+ (unsigned long)size, XEN_PAGE_SIZE - NET_IP_ALIGN);
return -1;
}
@@ -261,7 +261,7 @@
return -1;
}
memcpy(page + NET_IP_ALIGN, buf, size);
- xen_be_unmap_grant_ref(&netdev->xendev, page);
+ xen_be_unmap_grant_ref(&netdev->xendev, page, rxreq.gref);
net_rx_response(netdev, &rxreq, NETIF_RSP_OKAY, NET_IP_ALIGN, size, 0);
return size;
@@ -343,12 +343,13 @@
netdev->rx_ring_ref,
PROT_READ | PROT_WRITE);
if (!netdev->rxs) {
- xen_be_unmap_grant_ref(&netdev->xendev, netdev->txs);
+ xen_be_unmap_grant_ref(&netdev->xendev, netdev->txs,
+ netdev->tx_ring_ref);
netdev->txs = NULL;
return -1;
}
- BACK_RING_INIT(&netdev->tx_ring, netdev->txs, XC_PAGE_SIZE);
- BACK_RING_INIT(&netdev->rx_ring, netdev->rxs, XC_PAGE_SIZE);
+ BACK_RING_INIT(&netdev->tx_ring, netdev->txs, XEN_PAGE_SIZE);
+ BACK_RING_INIT(&netdev->rx_ring, netdev->rxs, XEN_PAGE_SIZE);
xen_be_bind_evtchn(&netdev->xendev);
@@ -368,11 +369,13 @@
xen_pv_unbind_evtchn(&netdev->xendev);
if (netdev->txs) {
- xen_be_unmap_grant_ref(&netdev->xendev, netdev->txs);
+ xen_be_unmap_grant_ref(&netdev->xendev, netdev->txs,
+ netdev->tx_ring_ref);
netdev->txs = NULL;
}
if (netdev->rxs) {
- xen_be_unmap_grant_ref(&netdev->xendev, netdev->rxs);
+ xen_be_unmap_grant_ref(&netdev->xendev, netdev->rxs,
+ netdev->rx_ring_ref);
netdev->rxs = NULL;
}
}
diff --git a/hw/openrisc/boot.c b/hw/openrisc/boot.c
index 007e80c..55475aa 100644
--- a/hw/openrisc/boot.c
+++ b/hw/openrisc/boot.c
@@ -15,6 +15,7 @@
#include "sysemu/device_tree.h"
#include "sysemu/qtest.h"
#include "sysemu/reset.h"
+#include "qemu/error-report.h"
#include <libfdt.h>
diff --git a/hw/pci-bridge/cxl_root_port.c b/hw/pci-bridge/cxl_root_port.c
index 6664783..7dfd20a 100644
--- a/hw/pci-bridge/cxl_root_port.c
+++ b/hw/pci-bridge/cxl_root_port.c
@@ -22,6 +22,7 @@
#include "qemu/range.h"
#include "hw/pci/pci_bridge.h"
#include "hw/pci/pcie_port.h"
+#include "hw/pci/msi.h"
#include "hw/qdev-properties.h"
#include "hw/sysbus.h"
#include "qapi/error.h"
@@ -29,6 +30,10 @@
#define CXL_ROOT_PORT_DID 0x7075
+#define CXL_RP_MSI_OFFSET 0x60
+#define CXL_RP_MSI_SUPPORTED_FLAGS PCI_MSI_FLAGS_MASKBIT
+#define CXL_RP_MSI_NR_VECTOR 2
+
/* Copied from the gen root port which we derive */
#define GEN_PCIE_ROOT_PORT_AER_OFFSET 0x100
#define GEN_PCIE_ROOT_PORT_ACS_OFFSET \
@@ -47,6 +52,49 @@
#define TYPE_CXL_ROOT_PORT "cxl-rp"
DECLARE_INSTANCE_CHECKER(CXLRootPort, CXL_ROOT_PORT, TYPE_CXL_ROOT_PORT)
+/*
+ * If two MSI vector are allocated, Advanced Error Interrupt Message Number
+ * is 1. otherwise 0.
+ * 17.12.5.10 RPERRSTS, 32:27 bit Advanced Error Interrupt Message Number.
+ */
+static uint8_t cxl_rp_aer_vector(const PCIDevice *d)
+{
+ switch (msi_nr_vectors_allocated(d)) {
+ case 1:
+ return 0;
+ case 2:
+ return 1;
+ case 4:
+ case 8:
+ case 16:
+ case 32:
+ default:
+ break;
+ }
+ abort();
+ return 0;
+}
+
+static int cxl_rp_interrupts_init(PCIDevice *d, Error **errp)
+{
+ int rc;
+
+ rc = msi_init(d, CXL_RP_MSI_OFFSET, CXL_RP_MSI_NR_VECTOR,
+ CXL_RP_MSI_SUPPORTED_FLAGS & PCI_MSI_FLAGS_64BIT,
+ CXL_RP_MSI_SUPPORTED_FLAGS & PCI_MSI_FLAGS_MASKBIT,
+ errp);
+ if (rc < 0) {
+ assert(rc == -ENOTSUP);
+ }
+
+ return rc;
+}
+
+static void cxl_rp_interrupts_uninit(PCIDevice *d)
+{
+ msi_uninit(d);
+}
+
static void latch_registers(CXLRootPort *crp)
{
uint32_t *reg_state = crp->cxl_cstate.crb.cache_mem_registers;
@@ -183,16 +231,29 @@
}
}
+static void cxl_rp_aer_vector_update(PCIDevice *d)
+{
+ PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(d);
+
+ if (rpc->aer_vector) {
+ pcie_aer_root_set_vector(d, rpc->aer_vector(d));
+ }
+}
+
static void cxl_rp_write_config(PCIDevice *d, uint32_t address, uint32_t val,
int len)
{
uint16_t slt_ctl, slt_sta;
+ uint32_t root_cmd =
+ pci_get_long(d->config + d->exp.aer_cap + PCI_ERR_ROOT_COMMAND);
pcie_cap_slot_get(d, &slt_ctl, &slt_sta);
pci_bridge_write_config(d, address, val, len);
+ cxl_rp_aer_vector_update(d);
pcie_cap_flr_write_config(d, address, val, len);
pcie_cap_slot_write_config(d, slt_ctl, slt_sta, address, val, len);
pcie_aer_write_config(d, address, val, len);
+ pcie_aer_root_write_config(d, address, val, len, root_cmd);
cxl_rp_dvsec_write_config(d, address, val, len);
}
@@ -217,6 +278,9 @@
rpc->aer_offset = GEN_PCIE_ROOT_PORT_AER_OFFSET;
rpc->acs_offset = GEN_PCIE_ROOT_PORT_ACS_OFFSET;
+ rpc->aer_vector = cxl_rp_aer_vector;
+ rpc->interrupts_init = cxl_rp_interrupts_init;
+ rpc->interrupts_uninit = cxl_rp_interrupts_uninit;
dc->hotpluggable = false;
}
diff --git a/hw/pci-bridge/pci_expander_bridge.c b/hw/pci-bridge/pci_expander_bridge.c
index e752a21..ead33f0 100644
--- a/hw/pci-bridge/pci_expander_bridge.c
+++ b/hw/pci-bridge/pci_expander_bridge.c
@@ -15,6 +15,7 @@
#include "hw/pci/pci.h"
#include "hw/pci/pci_bus.h"
#include "hw/pci/pci_host.h"
+#include "hw/pci/pcie_port.h"
#include "hw/qdev-properties.h"
#include "hw/pci/pci_bridge.h"
#include "hw/pci-bridge/pci_expander_bridge.h"
@@ -79,6 +80,13 @@
return &host->cxl_cstate;
}
+bool cxl_get_hb_passthrough(PCIHostState *hb)
+{
+ CXLHost *host = PXB_CXL_HOST(hb);
+
+ return host->passthrough;
+}
+
static int pxb_bus_num(PCIBus *bus)
{
PXBDev *pxb = convert_to_pxb(bus->parent_dev);
@@ -289,15 +297,32 @@
return pin - PCI_SLOT(pxb->devfn);
}
-static void pxb_dev_reset(DeviceState *dev)
+static void pxb_cxl_dev_reset(DeviceState *dev)
{
CXLHost *cxl = PXB_CXL_DEV(dev)->cxl.cxl_host_bridge;
CXLComponentState *cxl_cstate = &cxl->cxl_cstate;
+ PCIHostState *hb = PCI_HOST_BRIDGE(cxl);
uint32_t *reg_state = cxl_cstate->crb.cache_mem_registers;
uint32_t *write_msk = cxl_cstate->crb.cache_mem_regs_write_mask;
+ int dsp_count = 0;
cxl_component_register_init_common(reg_state, write_msk, CXL2_ROOT_PORT);
- ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, TARGET_COUNT, 8);
+ /*
+ * The CXL specification allows for host bridges with no HDM decoders
+ * if they only have a single root port.
+ */
+ if (!PXB_DEV(dev)->hdm_for_passthrough) {
+ dsp_count = pcie_count_ds_ports(hb->bus);
+ }
+ /* Initial reset will have 0 dsp so wait until > 0 */
+ if (dsp_count == 1) {
+ cxl->passthrough = true;
+ /* Set Capability ID in header to NONE */
+ ARRAY_FIELD_DP32(reg_state, CXL_HDM_CAPABILITY_HEADER, ID, 0);
+ } else {
+ ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, TARGET_COUNT,
+ 8);
+ }
}
static gint pxb_compare(gconstpointer a, gconstpointer b)
@@ -481,9 +506,18 @@
}
pxb_dev_realize_common(dev, CXL, errp);
- pxb_dev_reset(DEVICE(dev));
+ pxb_cxl_dev_reset(DEVICE(dev));
}
+static Property pxb_cxl_dev_properties[] = {
+ /* Note: 0 is not a legal PXB bus number. */
+ DEFINE_PROP_UINT8("bus_nr", PXBDev, bus_nr, 0),
+ DEFINE_PROP_UINT16("numa_node", PXBDev, numa_node, NUMA_NODE_UNASSIGNED),
+ DEFINE_PROP_BOOL("bypass_iommu", PXBDev, bypass_iommu, false),
+ DEFINE_PROP_BOOL("hdm_for_passthrough", PXBDev, hdm_for_passthrough, false),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
static void pxb_cxl_dev_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -497,12 +531,12 @@
*/
dc->desc = "CXL Host Bridge";
- device_class_set_props(dc, pxb_dev_properties);
+ device_class_set_props(dc, pxb_cxl_dev_properties);
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
/* Host bridges aren't hotpluggable. FIXME: spec reference */
dc->hotpluggable = false;
- dc->reset = pxb_dev_reset;
+ dc->reset = pxb_cxl_dev_reset;
}
static const TypeInfo pxb_cxl_dev_info = {
diff --git a/hw/pci-host/mv64361.c b/hw/pci-host/mv64361.c
index 298564f..19e8031 100644
--- a/hw/pci-host/mv64361.c
+++ b/hw/pci-host/mv64361.c
@@ -873,10 +873,6 @@
}
sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->cpu_irq);
qdev_init_gpio_in_named(dev, mv64361_gpp_irq, "gpp", 32);
- /* FIXME: PCI IRQ connections may be board specific */
- for (i = 0; i < PCI_NUM_PINS; i++) {
- s->pci[1].irq[i] = qdev_get_gpio_in_named(dev, "gpp", 12 + i);
- }
}
static void mv64361_reset(DeviceState *dev)
diff --git a/hw/pci/pci-internal.h b/hw/pci/pci-internal.h
index 2ea356b..a7d6d8a 100644
--- a/hw/pci/pci-internal.h
+++ b/hw/pci/pci-internal.h
@@ -20,6 +20,5 @@
int pcie_aer_parse_error_string(const char *error_name,
uint32_t *status, bool *correctable);
-int pcie_aer_inject_error(PCIDevice *dev, const PCIEAERErr *err);
#endif
diff --git a/hw/pci/pci.c b/hw/pci/pci.c
index 034fe49..def5000 100644
--- a/hw/pci/pci.c
+++ b/hw/pci/pci.c
@@ -95,6 +95,21 @@
}
};
+static gint g_cmp_uint32(gconstpointer a, gconstpointer b, gpointer user_data)
+{
+ return a - b;
+}
+
+static GSequence *pci_acpi_index_list(void)
+{
+ static GSequence *used_acpi_index_list;
+
+ if (!used_acpi_index_list) {
+ used_acpi_index_list = g_sequence_new(NULL);
+ }
+ return used_acpi_index_list;
+}
+
static void pci_init_bus_master(PCIDevice *pci_dev)
{
AddressSpace *dma_as = pci_device_iommu_address_space(pci_dev);
@@ -1246,6 +1261,17 @@
do_pci_unregister_device(pci_dev);
pci_dev->msi_trigger = NULL;
+
+ /*
+ * clean up acpi-index so it could reused by another device
+ */
+ if (pci_dev->acpi_index) {
+ GSequence *used_indexes = pci_acpi_index_list();
+
+ g_sequence_remove(g_sequence_lookup(used_indexes,
+ GINT_TO_POINTER(pci_dev->acpi_index),
+ g_cmp_uint32, NULL));
+ }
}
void pci_register_bar(PCIDevice *pci_dev, int region_num,
@@ -2005,6 +2031,8 @@
return bus->devices[devfn];
}
+#define ONBOARD_INDEX_MAX (16 * 1024 - 1)
+
static void pci_qdev_realize(DeviceState *qdev, Error **errp)
{
PCIDevice *pci_dev = (PCIDevice *)qdev;
@@ -2014,6 +2042,35 @@
bool is_default_rom;
uint16_t class_id;
+ /*
+ * capped by systemd (see: udev-builtin-net_id.c)
+ * as it's the only known user honor it to avoid users
+ * misconfigure QEMU and then wonder why acpi-index doesn't work
+ */
+ if (pci_dev->acpi_index > ONBOARD_INDEX_MAX) {
+ error_setg(errp, "acpi-index should be less or equal to %u",
+ ONBOARD_INDEX_MAX);
+ return;
+ }
+
+ /*
+ * make sure that acpi-index is unique across all present PCI devices
+ */
+ if (pci_dev->acpi_index) {
+ GSequence *used_indexes = pci_acpi_index_list();
+
+ if (g_sequence_lookup(used_indexes,
+ GINT_TO_POINTER(pci_dev->acpi_index),
+ g_cmp_uint32, NULL)) {
+ error_setg(errp, "a PCI device with acpi-index = %" PRIu32
+ " already exist", pci_dev->acpi_index);
+ return;
+ }
+ g_sequence_insert_sorted(used_indexes,
+ GINT_TO_POINTER(pci_dev->acpi_index),
+ g_cmp_uint32, NULL);
+ }
+
if (pci_dev->romsize != -1 && !is_power_of_2(pci_dev->romsize)) {
error_setg(errp, "ROM size %u is not a power of two", pci_dev->romsize);
return;
diff --git a/hw/pci/pcie_aer.c b/hw/pci/pcie_aer.c
index 9a19be4..103667c 100644
--- a/hw/pci/pcie_aer.c
+++ b/hw/pci/pcie_aer.c
@@ -112,6 +112,10 @@
pci_set_long(dev->w1cmask + offset + PCI_ERR_UNCOR_STATUS,
PCI_ERR_UNC_SUPPORTED);
+ pci_set_long(dev->config + offset + PCI_ERR_UNCOR_MASK,
+ PCI_ERR_UNC_MASK_DEFAULT);
+ pci_set_long(dev->wmask + offset + PCI_ERR_UNCOR_MASK,
+ PCI_ERR_UNC_SUPPORTED);
pci_set_long(dev->config + offset + PCI_ERR_UNCOR_SEVER,
PCI_ERR_UNC_SEVERITY_DEFAULT);
@@ -188,8 +192,16 @@
static bool
pcie_aer_msg_alldev(PCIDevice *dev, const PCIEAERMsg *msg)
{
+ uint16_t devctl = pci_get_word(dev->config + dev->exp.exp_cap +
+ PCI_EXP_DEVCTL);
if (!(pcie_aer_msg_is_uncor(msg) &&
- (pci_get_word(dev->config + PCI_COMMAND) & PCI_COMMAND_SERR))) {
+ (pci_get_word(dev->config + PCI_COMMAND) & PCI_COMMAND_SERR)) &&
+ !((msg->severity == PCI_ERR_ROOT_CMD_NONFATAL_EN) &&
+ (devctl & PCI_EXP_DEVCTL_NFERE)) &&
+ !((msg->severity == PCI_ERR_ROOT_CMD_COR_EN) &&
+ (devctl & PCI_EXP_DEVCTL_CERE)) &&
+ !((msg->severity == PCI_ERR_ROOT_CMD_FATAL_EN) &&
+ (devctl & PCI_EXP_DEVCTL_FERE))) {
return false;
}
diff --git a/hw/pci/pcie_port.c b/hw/pci/pcie_port.c
index 65a397a..20ff2b3 100644
--- a/hw/pci/pcie_port.c
+++ b/hw/pci/pcie_port.c
@@ -161,6 +161,51 @@
return NULL;
}
+/* Find first port in devfn number order */
+PCIDevice *pcie_find_port_first(PCIBus *bus)
+{
+ int devfn;
+
+ for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) {
+ PCIDevice *d = bus->devices[devfn];
+
+ if (!d || !pci_is_express(d) || !d->exp.exp_cap) {
+ continue;
+ }
+
+ if (object_dynamic_cast(OBJECT(d), TYPE_PCIE_PORT)) {
+ return d;
+ }
+ }
+
+ return NULL;
+}
+
+int pcie_count_ds_ports(PCIBus *bus)
+{
+ int dsp_count = 0;
+ int devfn;
+
+ for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) {
+ PCIDevice *d = bus->devices[devfn];
+
+ if (!d || !pci_is_express(d) || !d->exp.exp_cap) {
+ continue;
+ }
+ if (object_dynamic_cast(OBJECT(d), TYPE_PCIE_PORT)) {
+ dsp_count++;
+ }
+ }
+ return dsp_count;
+}
+
+static bool pcie_slot_is_hotpluggbale_bus(HotplugHandler *plug_handler,
+ BusState *bus)
+{
+ PCIESlot *s = PCIE_SLOT(bus->parent);
+ return s->hotplug;
+}
+
static const TypeInfo pcie_port_type_info = {
.name = TYPE_PCIE_PORT,
.parent = TYPE_PCI_BRIDGE,
@@ -188,6 +233,7 @@
hc->plug = pcie_cap_slot_plug_cb;
hc->unplug = pcie_cap_slot_unplug_cb;
hc->unplug_request = pcie_cap_slot_unplug_request_cb;
+ hc->is_hotpluggable_bus = pcie_slot_is_hotpluggbale_bus;
}
static const TypeInfo pcie_slot_type_info = {
diff --git a/hw/pci/pcie_sriov.c b/hw/pci/pcie_sriov.c
index f0bd72e..aa5a757 100644
--- a/hw/pci/pcie_sriov.c
+++ b/hw/pci/pcie_sriov.c
@@ -300,3 +300,8 @@
}
return NULL;
}
+
+uint16_t pcie_sriov_num_vfs(PCIDevice *dev)
+{
+ return dev->exp.sriov_pf.num_vfs;
+}
diff --git a/hw/ppc/pegasos2.c b/hw/ppc/pegasos2.c
index 7cc375d..f1650be 100644
--- a/hw/ppc/pegasos2.c
+++ b/hw/ppc/pegasos2.c
@@ -73,6 +73,8 @@
MachineState parent_obj;
PowerPCCPU *cpu;
DeviceState *mv;
+ qemu_irq mv_pirq[PCI_NUM_PINS];
+ qemu_irq via_pirq[PCI_NUM_PINS];
Vof *vof;
void *fdt_blob;
uint64_t kernel_addr;
@@ -95,6 +97,15 @@
}
}
+static void pegasos2_pci_irq(void *opaque, int n, int level)
+{
+ Pegasos2MachineState *pm = opaque;
+
+ /* PCI interrupt lines are connected to both MV64361 and VT8231 */
+ qemu_set_irq(pm->mv_pirq[n], level);
+ qemu_set_irq(pm->via_pirq[n], level);
+}
+
static void pegasos2_init(MachineState *machine)
{
Pegasos2MachineState *pm = PEGASOS2_MACHINE(machine);
@@ -106,7 +117,7 @@
I2CBus *i2c_bus;
const char *fwname = machine->firmware ?: PROM_FILENAME;
char *filename;
- int sz;
+ int i, sz;
uint8_t *spd_data;
/* init CPU */
@@ -156,11 +167,18 @@
/* Marvell Discovery II system controller */
pm->mv = DEVICE(sysbus_create_simple(TYPE_MV64361, -1,
qdev_get_gpio_in(DEVICE(pm->cpu), PPC6xx_INPUT_INT)));
+ for (i = 0; i < PCI_NUM_PINS; i++) {
+ pm->mv_pirq[i] = qdev_get_gpio_in_named(pm->mv, "gpp", 12 + i);
+ }
pci_bus = mv64361_get_pci_bus(pm->mv, 1);
+ pci_bus_irqs(pci_bus, pegasos2_pci_irq, pm, PCI_NUM_PINS);
/* VIA VT8231 South Bridge (multifunction PCI device) */
via = OBJECT(pci_create_simple_multifunction(pci_bus, PCI_DEVFN(12, 0),
true, TYPE_VT8231_ISA));
+ for (i = 0; i < PCI_NUM_PINS; i++) {
+ pm->via_pirq[i] = qdev_get_gpio_in_named(DEVICE(via), "pirq", i);
+ }
object_property_add_alias(OBJECT(machine), "rtc-time",
object_resolve_path_component(via, "rtc"),
"date");
@@ -267,6 +285,12 @@
PCI_INTERRUPT_LINE, 2, 0x9);
pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 0) << 8) |
0x50, 1, 0x2);
+ pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 0) << 8) |
+ 0x55, 1, 0x90);
+ pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 0) << 8) |
+ 0x56, 1, 0x99);
+ pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 0) << 8) |
+ 0x57, 1, 0x90);
pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 1) << 8) |
PCI_INTERRUPT_LINE, 2, 0x109);
diff --git a/hw/ppc/ppc4xx_sdram.c b/hw/ppc/ppc4xx_sdram.c
index 4501fb2..c0c87ff 100644
--- a/hw/ppc/ppc4xx_sdram.c
+++ b/hw/ppc/ppc4xx_sdram.c
@@ -33,6 +33,7 @@
#include "qemu/units.h"
#include "qapi/error.h"
#include "qemu/log.h"
+#include "qemu/error-report.h"
#include "exec/address-spaces.h" /* get_system_memory() */
#include "hw/irq.h"
#include "hw/qdev-properties.h"
diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
index 925ff52..ec4def6 100644
--- a/hw/ppc/spapr_hcall.c
+++ b/hw/ppc/spapr_hcall.c
@@ -8,6 +8,7 @@
#include "qemu/module.h"
#include "qemu/error-report.h"
#include "exec/exec-all.h"
+#include "exec/tb-flush.h"
#include "helper_regs.h"
#include "hw/ppc/ppc.h"
#include "hw/ppc/spapr.h"
diff --git a/hw/ppc/spapr_softmmu.c b/hw/ppc/spapr_softmmu.c
index 5170a33..2786663 100644
--- a/hw/ppc/spapr_softmmu.c
+++ b/hw/ppc/spapr_softmmu.c
@@ -1,12 +1,14 @@
#include "qemu/osdep.h"
#include "qemu/cutils.h"
#include "qemu/memalign.h"
+#include "qemu/error-report.h"
#include "cpu.h"
#include "helper_regs.h"
#include "hw/ppc/spapr.h"
#include "mmu-hash64.h"
#include "mmu-book3s-v3.h"
+
static inline bool valid_ptex(PowerPCCPU *cpu, target_ulong ptex)
{
/*
diff --git a/hw/riscv/opentitan.c b/hw/riscv/opentitan.c
index b06944d..bc67876 100644
--- a/hw/riscv/opentitan.c
+++ b/hw/riscv/opentitan.c
@@ -22,6 +22,7 @@
#include "qemu/cutils.h"
#include "hw/riscv/opentitan.h"
#include "qapi/error.h"
+#include "qemu/error-report.h"
#include "hw/boards.h"
#include "hw/misc/unimp.h"
#include "hw/riscv/boot.h"
diff --git a/hw/riscv/shakti_c.c b/hw/riscv/shakti_c.c
index e43cc94..12ea74b 100644
--- a/hw/riscv/shakti_c.c
+++ b/hw/riscv/shakti_c.c
@@ -20,6 +20,7 @@
#include "hw/boards.h"
#include "hw/riscv/shakti_c.h"
#include "qapi/error.h"
+#include "qemu/error-report.h"
#include "hw/intc/sifive_plic.h"
#include "hw/intc/riscv_aclint.h"
#include "sysemu/sysemu.h"
diff --git a/hw/riscv/virt-acpi-build.c b/hw/riscv/virt-acpi-build.c
index 82da0a2..7331248 100644
--- a/hw/riscv/virt-acpi-build.c
+++ b/hw/riscv/virt-acpi-build.c
@@ -29,6 +29,7 @@
#include "hw/acpi/aml-build.h"
#include "hw/acpi/utils.h"
#include "qapi/error.h"
+#include "qemu/error-report.h"
#include "sysemu/reset.h"
#include "migration/vmstate.h"
#include "hw/riscv/virt.h"
diff --git a/hw/rtc/m48t59.c b/hw/rtc/m48t59.c
index 74345d9..ec3e56e 100644
--- a/hw/rtc/m48t59.c
+++ b/hw/rtc/m48t59.c
@@ -93,9 +93,9 @@
qemu_set_irq(NVRAM->IRQ, 1);
if ((NVRAM->buffer[0x1FF5] & 0x80) == 0 &&
- (NVRAM->buffer[0x1FF4] & 0x80) == 0 &&
- (NVRAM->buffer[0x1FF3] & 0x80) == 0 &&
- (NVRAM->buffer[0x1FF2] & 0x80) == 0) {
+ (NVRAM->buffer[0x1FF4] & 0x80) == 0 &&
+ (NVRAM->buffer[0x1FF3] & 0x80) == 0 &&
+ (NVRAM->buffer[0x1FF2] & 0x80) == 0) {
/* Repeat once a month */
qemu_get_timedate(&tm, NVRAM->time_offset);
tm.tm_mon++;
@@ -105,21 +105,21 @@
}
next_time = qemu_timedate_diff(&tm) - NVRAM->time_offset;
} else if ((NVRAM->buffer[0x1FF5] & 0x80) != 0 &&
- (NVRAM->buffer[0x1FF4] & 0x80) == 0 &&
- (NVRAM->buffer[0x1FF3] & 0x80) == 0 &&
- (NVRAM->buffer[0x1FF2] & 0x80) == 0) {
+ (NVRAM->buffer[0x1FF4] & 0x80) == 0 &&
+ (NVRAM->buffer[0x1FF3] & 0x80) == 0 &&
+ (NVRAM->buffer[0x1FF2] & 0x80) == 0) {
/* Repeat once a day */
next_time = 24 * 60 * 60;
} else if ((NVRAM->buffer[0x1FF5] & 0x80) != 0 &&
- (NVRAM->buffer[0x1FF4] & 0x80) != 0 &&
- (NVRAM->buffer[0x1FF3] & 0x80) == 0 &&
- (NVRAM->buffer[0x1FF2] & 0x80) == 0) {
+ (NVRAM->buffer[0x1FF4] & 0x80) != 0 &&
+ (NVRAM->buffer[0x1FF3] & 0x80) == 0 &&
+ (NVRAM->buffer[0x1FF2] & 0x80) == 0) {
/* Repeat once an hour */
next_time = 60 * 60;
} else if ((NVRAM->buffer[0x1FF5] & 0x80) != 0 &&
- (NVRAM->buffer[0x1FF4] & 0x80) != 0 &&
- (NVRAM->buffer[0x1FF3] & 0x80) != 0 &&
- (NVRAM->buffer[0x1FF2] & 0x80) == 0) {
+ (NVRAM->buffer[0x1FF4] & 0x80) != 0 &&
+ (NVRAM->buffer[0x1FF3] & 0x80) != 0 &&
+ (NVRAM->buffer[0x1FF2] & 0x80) == 0) {
/* Repeat once a minute */
next_time = 60;
} else {
@@ -161,13 +161,13 @@
NVRAM->buffer[0x1FF0] |= 0x80;
if (NVRAM->buffer[0x1FF7] & 0x80) {
- NVRAM->buffer[0x1FF7] = 0x00;
- NVRAM->buffer[0x1FFC] &= ~0x40;
+ NVRAM->buffer[0x1FF7] = 0x00;
+ NVRAM->buffer[0x1FFC] &= ~0x40;
/* May it be a hw CPU Reset instead ? */
qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
} else {
- qemu_set_irq(NVRAM->IRQ, 1);
- qemu_set_irq(NVRAM->IRQ, 0);
+ qemu_set_irq(NVRAM->IRQ, 1);
+ qemu_set_irq(NVRAM->IRQ, 0);
}
}
@@ -262,80 +262,80 @@
case 0x1FF9:
case 0x07F9:
/* seconds (BCD) */
- tmp = from_bcd(val & 0x7F);
- if (tmp >= 0 && tmp <= 59) {
- get_time(NVRAM, &tm);
- tm.tm_sec = tmp;
- set_time(NVRAM, &tm);
- }
+ tmp = from_bcd(val & 0x7F);
+ if (tmp >= 0 && tmp <= 59) {
+ get_time(NVRAM, &tm);
+ tm.tm_sec = tmp;
+ set_time(NVRAM, &tm);
+ }
if ((val & 0x80) ^ (NVRAM->buffer[addr] & 0x80)) {
- if (val & 0x80) {
- NVRAM->stop_time = time(NULL);
- } else {
- NVRAM->time_offset += NVRAM->stop_time - time(NULL);
- NVRAM->stop_time = 0;
- }
- }
+ if (val & 0x80) {
+ NVRAM->stop_time = time(NULL);
+ } else {
+ NVRAM->time_offset += NVRAM->stop_time - time(NULL);
+ NVRAM->stop_time = 0;
+ }
+ }
NVRAM->buffer[addr] = val & 0x80;
break;
case 0x1FFA:
case 0x07FA:
/* minutes (BCD) */
- tmp = from_bcd(val & 0x7F);
- if (tmp >= 0 && tmp <= 59) {
- get_time(NVRAM, &tm);
- tm.tm_min = tmp;
- set_time(NVRAM, &tm);
- }
+ tmp = from_bcd(val & 0x7F);
+ if (tmp >= 0 && tmp <= 59) {
+ get_time(NVRAM, &tm);
+ tm.tm_min = tmp;
+ set_time(NVRAM, &tm);
+ }
break;
case 0x1FFB:
case 0x07FB:
/* hours (BCD) */
- tmp = from_bcd(val & 0x3F);
- if (tmp >= 0 && tmp <= 23) {
- get_time(NVRAM, &tm);
- tm.tm_hour = tmp;
- set_time(NVRAM, &tm);
- }
+ tmp = from_bcd(val & 0x3F);
+ if (tmp >= 0 && tmp <= 23) {
+ get_time(NVRAM, &tm);
+ tm.tm_hour = tmp;
+ set_time(NVRAM, &tm);
+ }
break;
case 0x1FFC:
case 0x07FC:
/* day of the week / century */
- tmp = from_bcd(val & 0x07);
- get_time(NVRAM, &tm);
- tm.tm_wday = tmp;
- set_time(NVRAM, &tm);
+ tmp = from_bcd(val & 0x07);
+ get_time(NVRAM, &tm);
+ tm.tm_wday = tmp;
+ set_time(NVRAM, &tm);
NVRAM->buffer[addr] = val & 0x40;
break;
case 0x1FFD:
case 0x07FD:
/* date (BCD) */
- tmp = from_bcd(val & 0x3F);
- if (tmp != 0) {
- get_time(NVRAM, &tm);
- tm.tm_mday = tmp;
- set_time(NVRAM, &tm);
- }
+ tmp = from_bcd(val & 0x3F);
+ if (tmp != 0) {
+ get_time(NVRAM, &tm);
+ tm.tm_mday = tmp;
+ set_time(NVRAM, &tm);
+ }
break;
case 0x1FFE:
case 0x07FE:
/* month */
- tmp = from_bcd(val & 0x1F);
- if (tmp >= 1 && tmp <= 12) {
- get_time(NVRAM, &tm);
- tm.tm_mon = tmp - 1;
- set_time(NVRAM, &tm);
- }
+ tmp = from_bcd(val & 0x1F);
+ if (tmp >= 1 && tmp <= 12) {
+ get_time(NVRAM, &tm);
+ tm.tm_mon = tmp - 1;
+ set_time(NVRAM, &tm);
+ }
break;
case 0x1FFF:
case 0x07FF:
/* year */
- tmp = from_bcd(val);
- if (tmp >= 0 && tmp <= 99) {
- get_time(NVRAM, &tm);
+ tmp = from_bcd(val);
+ if (tmp >= 0 && tmp <= 99) {
+ get_time(NVRAM, &tm);
tm.tm_year = from_bcd(val) + NVRAM->base_year - 1900;
- set_time(NVRAM, &tm);
- }
+ set_time(NVRAM, &tm);
+ }
break;
default:
/* Check lock registers state */
@@ -346,7 +346,7 @@
do_write:
if (addr < NVRAM->size) {
NVRAM->buffer[addr] = val & 0xFF;
- }
+ }
break;
}
}
@@ -367,34 +367,34 @@
switch (addr) {
case 0x1FF0:
/* flags register */
- goto do_read;
+ goto do_read;
case 0x1FF1:
/* unused */
- retval = 0;
+ retval = 0;
break;
case 0x1FF2:
/* alarm seconds */
- goto do_read;
+ goto do_read;
case 0x1FF3:
/* alarm minutes */
- goto do_read;
+ goto do_read;
case 0x1FF4:
/* alarm hours */
- goto do_read;
+ goto do_read;
case 0x1FF5:
/* alarm date */
- goto do_read;
+ goto do_read;
case 0x1FF6:
/* interrupts */
- goto do_read;
+ goto do_read;
case 0x1FF7:
- /* A read resets the watchdog */
- set_up_watchdog(NVRAM, NVRAM->buffer[0x1FF7]);
- goto do_read;
+ /* A read resets the watchdog */
+ set_up_watchdog(NVRAM, NVRAM->buffer[0x1FF7]);
+ goto do_read;
case 0x1FF8:
case 0x07F8:
/* control */
- goto do_read;
+ goto do_read;
case 0x1FF9:
case 0x07F9:
/* seconds (BCD) */
@@ -446,7 +446,7 @@
do_read:
if (addr < NVRAM->size) {
retval = NVRAM->buffer[addr];
- }
+ }
break;
}
trace_m48txx_nvram_mem_read(addr, retval);
diff --git a/hw/rtc/twl92230.c b/hw/rtc/twl92230.c
index e8d5eda..d8534da 100644
--- a/hw/rtc/twl92230.c
+++ b/hw/rtc/twl92230.c
@@ -112,19 +112,19 @@
s->rtc.alm_sec --;
s->rtc.next += 1000;
timer_mod(s->rtc.hz_tm, s->rtc.next);
- if ((s->rtc.ctrl >> 3) & 3) { /* EVERY */
+ if ((s->rtc.ctrl >> 3) & 3) { /* EVERY */
menelaus_rtc_update(s);
if (((s->rtc.ctrl >> 3) & 3) == 1 && !s->rtc.tm.tm_sec)
- s->status |= 1 << 8; /* RTCTMR */
+ s->status |= 1 << 8; /* RTCTMR */
else if (((s->rtc.ctrl >> 3) & 3) == 2 && !s->rtc.tm.tm_min)
- s->status |= 1 << 8; /* RTCTMR */
+ s->status |= 1 << 8; /* RTCTMR */
else if (!s->rtc.tm.tm_hour)
- s->status |= 1 << 8; /* RTCTMR */
+ s->status |= 1 << 8; /* RTCTMR */
} else
- s->status |= 1 << 8; /* RTCTMR */
- if ((s->rtc.ctrl >> 1) & 1) { /* RTC_AL_EN */
+ s->status |= 1 << 8; /* RTCTMR */
+ if ((s->rtc.ctrl >> 1) & 1) { /* RTC_AL_EN */
if (s->rtc.alm_sec == 0)
- s->status |= 1 << 9; /* RTCALM */
+ s->status |= 1 << 9; /* RTCALM */
/* TODO: wake-up */
}
if (s->rtc.next_comp <= 0) {
@@ -140,19 +140,19 @@
s->reg = 0x00;
- s->vcore[0] = 0x0c; /* XXX: X-loader needs 0x8c? check! */
+ s->vcore[0] = 0x0c; /* XXX: X-loader needs 0x8c? check! */
s->vcore[1] = 0x05;
s->vcore[2] = 0x02;
s->vcore[3] = 0x0c;
s->vcore[4] = 0x03;
- s->dcdc[0] = 0x33; /* Depends on wiring */
+ s->dcdc[0] = 0x33; /* Depends on wiring */
s->dcdc[1] = 0x03;
s->dcdc[2] = 0x00;
s->ldo[0] = 0x95;
s->ldo[1] = 0x7e;
s->ldo[2] = 0x00;
- s->ldo[3] = 0x00; /* Depends on wiring */
- s->ldo[4] = 0x03; /* Depends on wiring */
+ s->ldo[3] = 0x00; /* Depends on wiring */
+ s->ldo[4] = 0x03; /* Depends on wiring */
s->ldo[5] = 0x00;
s->ldo[6] = 0x00;
s->ldo[7] = 0x00;
@@ -203,70 +203,70 @@
}
if (!s->pwrbtn_state && level) {
- s->status |= 1 << 11; /* PSHBTN */
+ s->status |= 1 << 11; /* PSHBTN */
menelaus_update(s);
}
s->pwrbtn_state = level;
}
-#define MENELAUS_REV 0x01
-#define MENELAUS_VCORE_CTRL1 0x02
-#define MENELAUS_VCORE_CTRL2 0x03
-#define MENELAUS_VCORE_CTRL3 0x04
-#define MENELAUS_VCORE_CTRL4 0x05
-#define MENELAUS_VCORE_CTRL5 0x06
-#define MENELAUS_DCDC_CTRL1 0x07
-#define MENELAUS_DCDC_CTRL2 0x08
-#define MENELAUS_DCDC_CTRL3 0x09
-#define MENELAUS_LDO_CTRL1 0x0a
-#define MENELAUS_LDO_CTRL2 0x0b
-#define MENELAUS_LDO_CTRL3 0x0c
-#define MENELAUS_LDO_CTRL4 0x0d
-#define MENELAUS_LDO_CTRL5 0x0e
-#define MENELAUS_LDO_CTRL6 0x0f
-#define MENELAUS_LDO_CTRL7 0x10
-#define MENELAUS_LDO_CTRL8 0x11
-#define MENELAUS_SLEEP_CTRL1 0x12
-#define MENELAUS_SLEEP_CTRL2 0x13
-#define MENELAUS_DEVICE_OFF 0x14
-#define MENELAUS_OSC_CTRL 0x15
-#define MENELAUS_DETECT_CTRL 0x16
-#define MENELAUS_INT_MASK1 0x17
-#define MENELAUS_INT_MASK2 0x18
-#define MENELAUS_INT_STATUS1 0x19
-#define MENELAUS_INT_STATUS2 0x1a
-#define MENELAUS_INT_ACK1 0x1b
-#define MENELAUS_INT_ACK2 0x1c
-#define MENELAUS_GPIO_CTRL 0x1d
-#define MENELAUS_GPIO_IN 0x1e
-#define MENELAUS_GPIO_OUT 0x1f
-#define MENELAUS_BBSMS 0x20
-#define MENELAUS_RTC_CTRL 0x21
-#define MENELAUS_RTC_UPDATE 0x22
-#define MENELAUS_RTC_SEC 0x23
-#define MENELAUS_RTC_MIN 0x24
-#define MENELAUS_RTC_HR 0x25
-#define MENELAUS_RTC_DAY 0x26
-#define MENELAUS_RTC_MON 0x27
-#define MENELAUS_RTC_YR 0x28
-#define MENELAUS_RTC_WKDAY 0x29
-#define MENELAUS_RTC_AL_SEC 0x2a
-#define MENELAUS_RTC_AL_MIN 0x2b
-#define MENELAUS_RTC_AL_HR 0x2c
-#define MENELAUS_RTC_AL_DAY 0x2d
-#define MENELAUS_RTC_AL_MON 0x2e
-#define MENELAUS_RTC_AL_YR 0x2f
-#define MENELAUS_RTC_COMP_MSB 0x30
-#define MENELAUS_RTC_COMP_LSB 0x31
-#define MENELAUS_S1_PULL_EN 0x32
-#define MENELAUS_S1_PULL_DIR 0x33
-#define MENELAUS_S2_PULL_EN 0x34
-#define MENELAUS_S2_PULL_DIR 0x35
-#define MENELAUS_MCT_CTRL1 0x36
-#define MENELAUS_MCT_CTRL2 0x37
-#define MENELAUS_MCT_CTRL3 0x38
-#define MENELAUS_MCT_PIN_ST 0x39
-#define MENELAUS_DEBOUNCE1 0x3a
+#define MENELAUS_REV 0x01
+#define MENELAUS_VCORE_CTRL1 0x02
+#define MENELAUS_VCORE_CTRL2 0x03
+#define MENELAUS_VCORE_CTRL3 0x04
+#define MENELAUS_VCORE_CTRL4 0x05
+#define MENELAUS_VCORE_CTRL5 0x06
+#define MENELAUS_DCDC_CTRL1 0x07
+#define MENELAUS_DCDC_CTRL2 0x08
+#define MENELAUS_DCDC_CTRL3 0x09
+#define MENELAUS_LDO_CTRL1 0x0a
+#define MENELAUS_LDO_CTRL2 0x0b
+#define MENELAUS_LDO_CTRL3 0x0c
+#define MENELAUS_LDO_CTRL4 0x0d
+#define MENELAUS_LDO_CTRL5 0x0e
+#define MENELAUS_LDO_CTRL6 0x0f
+#define MENELAUS_LDO_CTRL7 0x10
+#define MENELAUS_LDO_CTRL8 0x11
+#define MENELAUS_SLEEP_CTRL1 0x12
+#define MENELAUS_SLEEP_CTRL2 0x13
+#define MENELAUS_DEVICE_OFF 0x14
+#define MENELAUS_OSC_CTRL 0x15
+#define MENELAUS_DETECT_CTRL 0x16
+#define MENELAUS_INT_MASK1 0x17
+#define MENELAUS_INT_MASK2 0x18
+#define MENELAUS_INT_STATUS1 0x19
+#define MENELAUS_INT_STATUS2 0x1a
+#define MENELAUS_INT_ACK1 0x1b
+#define MENELAUS_INT_ACK2 0x1c
+#define MENELAUS_GPIO_CTRL 0x1d
+#define MENELAUS_GPIO_IN 0x1e
+#define MENELAUS_GPIO_OUT 0x1f
+#define MENELAUS_BBSMS 0x20
+#define MENELAUS_RTC_CTRL 0x21
+#define MENELAUS_RTC_UPDATE 0x22
+#define MENELAUS_RTC_SEC 0x23
+#define MENELAUS_RTC_MIN 0x24
+#define MENELAUS_RTC_HR 0x25
+#define MENELAUS_RTC_DAY 0x26
+#define MENELAUS_RTC_MON 0x27
+#define MENELAUS_RTC_YR 0x28
+#define MENELAUS_RTC_WKDAY 0x29
+#define MENELAUS_RTC_AL_SEC 0x2a
+#define MENELAUS_RTC_AL_MIN 0x2b
+#define MENELAUS_RTC_AL_HR 0x2c
+#define MENELAUS_RTC_AL_DAY 0x2d
+#define MENELAUS_RTC_AL_MON 0x2e
+#define MENELAUS_RTC_AL_YR 0x2f
+#define MENELAUS_RTC_COMP_MSB 0x30
+#define MENELAUS_RTC_COMP_LSB 0x31
+#define MENELAUS_S1_PULL_EN 0x32
+#define MENELAUS_S1_PULL_DIR 0x33
+#define MENELAUS_S2_PULL_EN 0x34
+#define MENELAUS_S2_PULL_DIR 0x35
+#define MENELAUS_MCT_CTRL1 0x36
+#define MENELAUS_MCT_CTRL2 0x37
+#define MENELAUS_MCT_CTRL3 0x38
+#define MENELAUS_MCT_PIN_ST 0x39
+#define MENELAUS_DEBOUNCE1 0x3a
static uint8_t menelaus_read(void *opaque, uint8_t addr)
{
@@ -293,7 +293,7 @@
return 0;
case MENELAUS_OSC_CTRL:
- return s->osc | (1 << 7); /* CLK32K_GOOD */
+ return s->osc | (1 << 7); /* CLK32K_GOOD */
case MENELAUS_DETECT_CTRL:
return s->detect;
@@ -334,9 +334,9 @@
return to_bcd(s->rtc.tm.tm_min);
case MENELAUS_RTC_HR:
menelaus_rtc_update(s);
- if ((s->rtc.ctrl >> 2) & 1) /* MODE12_n24 */
+ if ((s->rtc.ctrl >> 2) & 1) /* MODE12_n24 */
return to_bcd((s->rtc.tm.tm_hour % 12) + 1) |
- (!!(s->rtc.tm.tm_hour >= 12) << 7); /* PM_nAM */
+ (!!(s->rtc.tm.tm_hour >= 12) << 7); /* PM_nAM */
else
return to_bcd(s->rtc.tm.tm_hour);
case MENELAUS_RTC_DAY:
@@ -356,7 +356,7 @@
case MENELAUS_RTC_AL_MIN:
return to_bcd(s->rtc.alm.tm_min);
case MENELAUS_RTC_AL_HR:
- if ((s->rtc.ctrl >> 2) & 1) /* MODE12_n24 */
+ if ((s->rtc.ctrl >> 2) & 1) /* MODE12_n24 */
return to_bcd((s->rtc.alm.tm_hour % 12) + 1) |
(!!(s->rtc.alm.tm_hour >= 12) << 7);/* AL_PM_nAM */
else
@@ -541,7 +541,7 @@
break;
case MENELAUS_RTC_CTRL:
- if ((s->rtc.ctrl ^ value) & 1) { /* RTC_EN */
+ if ((s->rtc.ctrl ^ value) & 1) { /* RTC_EN */
if (value & 1)
menelaus_rtc_start(s);
else
@@ -603,7 +603,7 @@
default:
fprintf(stderr, "%s: bad RTC_UPDATE value %02x\n",
__func__, value);
- s->status |= 1 << 10; /* RTCERR */
+ s->status |= 1 << 10; /* RTCERR */
menelaus_update(s);
}
s->rtc.sec_offset = qemu_timedate_diff(&tm);
@@ -615,7 +615,7 @@
s->rtc.tm.tm_min = from_bcd(value & 0x7f);
break;
case MENELAUS_RTC_HR:
- s->rtc.tm.tm_hour = (s->rtc.ctrl & (1 << 2)) ? /* MODE12_n24 */
+ s->rtc.tm.tm_hour = (s->rtc.ctrl & (1 << 2)) ? /* MODE12_n24 */
MIN(from_bcd(value & 0x3f), 12) + ((value >> 7) ? 11 : -1) :
from_bcd(value & 0x3f);
break;
@@ -640,7 +640,7 @@
menelaus_alm_update(s);
break;
case MENELAUS_RTC_AL_HR:
- s->rtc.alm.tm_hour = (s->rtc.ctrl & (1 << 2)) ? /* MODE12_n24 */
+ s->rtc.alm.tm_hour = (s->rtc.ctrl & (1 << 2)) ? /* MODE12_n24 */
MIN(from_bcd(value & 0x3f), 12) + ((value >> 7) ? 11 : -1) :
from_bcd(value & 0x3f);
menelaus_alm_update(s);
@@ -792,14 +792,14 @@
{
MenelausState *s = opaque;
- if (s->rtc.ctrl & 1) /* RTC_EN */
+ if (s->rtc.ctrl & 1) /* RTC_EN */
menelaus_rtc_stop(s);
s->rtc.next = s->rtc_next_vmstate;
menelaus_alm_update(s);
menelaus_update(s);
- if (s->rtc.ctrl & 1) /* RTC_EN */
+ if (s->rtc.ctrl & 1) /* RTC_EN */
menelaus_rtc_start(s);
return 0;
}
diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c
index ceceafb..c971761 100644
--- a/hw/scsi/scsi-bus.c
+++ b/hw/scsi/scsi-bus.c
@@ -199,8 +199,8 @@
}
static bool scsi_bus_is_address_free(SCSIBus *bus,
- int channel, int target, int lun,
- SCSIDevice **p_dev)
+ int channel, int target, int lun,
+ SCSIDevice **p_dev)
{
SCSIDevice *d;
diff --git a/hw/sd/sd.c b/hw/sd/sd.c
index da5bdd1..77a717d 100644
--- a/hw/sd/sd.c
+++ b/hw/sd/sd.c
@@ -342,39 +342,39 @@
sd->scr[7] = 0x00;
}
-#define MID 0xaa
-#define OID "XY"
-#define PNM "QEMU!"
-#define PRV 0x01
-#define MDT_YR 2006
-#define MDT_MON 2
+#define MID 0xaa
+#define OID "XY"
+#define PNM "QEMU!"
+#define PRV 0x01
+#define MDT_YR 2006
+#define MDT_MON 2
static void sd_set_cid(SDState *sd)
{
- sd->cid[0] = MID; /* Fake card manufacturer ID (MID) */
- sd->cid[1] = OID[0]; /* OEM/Application ID (OID) */
+ sd->cid[0] = MID; /* Fake card manufacturer ID (MID) */
+ sd->cid[1] = OID[0]; /* OEM/Application ID (OID) */
sd->cid[2] = OID[1];
- sd->cid[3] = PNM[0]; /* Fake product name (PNM) */
+ sd->cid[3] = PNM[0]; /* Fake product name (PNM) */
sd->cid[4] = PNM[1];
sd->cid[5] = PNM[2];
sd->cid[6] = PNM[3];
sd->cid[7] = PNM[4];
- sd->cid[8] = PRV; /* Fake product revision (PRV) */
- sd->cid[9] = 0xde; /* Fake serial number (PSN) */
+ sd->cid[8] = PRV; /* Fake product revision (PRV) */
+ sd->cid[9] = 0xde; /* Fake serial number (PSN) */
sd->cid[10] = 0xad;
sd->cid[11] = 0xbe;
sd->cid[12] = 0xef;
- sd->cid[13] = 0x00 | /* Manufacture date (MDT) */
+ sd->cid[13] = 0x00 | /* Manufacture date (MDT) */
((MDT_YR - 2000) / 10);
sd->cid[14] = ((MDT_YR % 10) << 4) | MDT_MON;
sd->cid[15] = (sd_crc7(sd->cid, 15) << 1) | 1;
}
-#define HWBLOCK_SHIFT 9 /* 512 bytes */
-#define SECTOR_SHIFT 5 /* 16 kilobytes */
-#define WPGROUP_SHIFT 7 /* 2 megs */
-#define CMULT_SHIFT 9 /* 512 times HWBLOCK_SIZE */
-#define WPGROUP_SIZE (1 << (HWBLOCK_SHIFT + SECTOR_SHIFT + WPGROUP_SHIFT))
+#define HWBLOCK_SHIFT 9 /* 512 bytes */
+#define SECTOR_SHIFT 5 /* 16 kilobytes */
+#define WPGROUP_SHIFT 7 /* 2 megs */
+#define CMULT_SHIFT 9 /* 512 times HWBLOCK_SIZE */
+#define WPGROUP_SIZE (1 << (HWBLOCK_SHIFT + SECTOR_SHIFT + WPGROUP_SHIFT))
static const uint8_t sd_csd_rw_mask[16] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -395,31 +395,31 @@
csize = (size >> (CMULT_SHIFT + hwblock_shift)) - 1;
if (size <= SDSC_MAX_CAPACITY) { /* Standard Capacity SD */
- sd->csd[0] = 0x00; /* CSD structure */
- sd->csd[1] = 0x26; /* Data read access-time-1 */
- sd->csd[2] = 0x00; /* Data read access-time-2 */
+ sd->csd[0] = 0x00; /* CSD structure */
+ sd->csd[1] = 0x26; /* Data read access-time-1 */
+ sd->csd[2] = 0x00; /* Data read access-time-2 */
sd->csd[3] = 0x32; /* Max. data transfer rate: 25 MHz */
- sd->csd[4] = 0x5f; /* Card Command Classes */
- sd->csd[5] = 0x50 | /* Max. read data block length */
+ sd->csd[4] = 0x5f; /* Card Command Classes */
+ sd->csd[5] = 0x50 | /* Max. read data block length */
hwblock_shift;
- sd->csd[6] = 0xe0 | /* Partial block for read allowed */
+ sd->csd[6] = 0xe0 | /* Partial block for read allowed */
((csize >> 10) & 0x03);
- sd->csd[7] = 0x00 | /* Device size */
+ sd->csd[7] = 0x00 | /* Device size */
((csize >> 2) & 0xff);
- sd->csd[8] = 0x3f | /* Max. read current */
+ sd->csd[8] = 0x3f | /* Max. read current */
((csize << 6) & 0xc0);
- sd->csd[9] = 0xfc | /* Max. write current */
+ sd->csd[9] = 0xfc | /* Max. write current */
((CMULT_SHIFT - 2) >> 1);
- sd->csd[10] = 0x40 | /* Erase sector size */
+ sd->csd[10] = 0x40 | /* Erase sector size */
(((CMULT_SHIFT - 2) << 7) & 0x80) | (sectsize >> 1);
- sd->csd[11] = 0x00 | /* Write protect group size */
+ sd->csd[11] = 0x00 | /* Write protect group size */
((sectsize << 7) & 0x80) | wpsize;
- sd->csd[12] = 0x90 | /* Write speed factor */
+ sd->csd[12] = 0x90 | /* Write speed factor */
(hwblock_shift >> 2);
- sd->csd[13] = 0x20 | /* Max. write data block length */
+ sd->csd[13] = 0x20 | /* Max. write data block length */
((hwblock_shift << 6) & 0xc0);
- sd->csd[14] = 0x00; /* File format group */
- } else { /* SDHC */
+ sd->csd[14] = 0x00; /* File format group */
+ } else { /* SDHC */
size /= 512 * KiB;
size -= 1;
sd->csd[0] = 0x40;
@@ -513,7 +513,7 @@
buffer[0] = 0x40 | req->cmd;
stl_be_p(&buffer[1], req->arg);
return 0;
- return sd_crc7(buffer, 5) != req->crc; /* TODO */
+ return sd_crc7(buffer, 5) != req->crc; /* TODO */
}
static void sd_response_r1_make(SDState *sd, uint8_t *response)
@@ -851,19 +851,19 @@
int i, mode, new_func;
mode = !!(arg & 0x80000000);
- sd->data[0] = 0x00; /* Maximum current consumption */
+ sd->data[0] = 0x00; /* Maximum current consumption */
sd->data[1] = 0x01;
- sd->data[2] = 0x80; /* Supported group 6 functions */
+ sd->data[2] = 0x80; /* Supported group 6 functions */
sd->data[3] = 0x01;
- sd->data[4] = 0x80; /* Supported group 5 functions */
+ sd->data[4] = 0x80; /* Supported group 5 functions */
sd->data[5] = 0x01;
- sd->data[6] = 0x80; /* Supported group 4 functions */
+ sd->data[6] = 0x80; /* Supported group 4 functions */
sd->data[7] = 0x01;
- sd->data[8] = 0x80; /* Supported group 3 functions */
+ sd->data[8] = 0x80; /* Supported group 3 functions */
sd->data[9] = 0x01;
- sd->data[10] = 0x80; /* Supported group 2 functions */
+ sd->data[10] = 0x80; /* Supported group 2 functions */
sd->data[11] = 0x43;
- sd->data[12] = 0x80; /* Supported group 1 functions */
+ sd->data[12] = 0x80; /* Supported group 1 functions */
sd->data[13] = 0x03;
memset(&sd->data[14], 0, 3);
@@ -1001,7 +1001,7 @@
switch (req.cmd) {
/* Basic commands (Class 0 and Class 1) */
- case 0: /* CMD0: GO_IDLE_STATE */
+ case 0: /* CMD0: GO_IDLE_STATE */
switch (sd->state) {
case sd_inactive_state:
return sd->spi ? sd_r1 : sd_r0;
@@ -1013,14 +1013,14 @@
}
break;
- case 1: /* CMD1: SEND_OP_CMD */
+ case 1: /* CMD1: SEND_OP_CMD */
if (!sd->spi)
goto bad_cmd;
sd->state = sd_transfer_state;
return sd_r1;
- case 2: /* CMD2: ALL_SEND_CID */
+ case 2: /* CMD2: ALL_SEND_CID */
if (sd->spi)
goto bad_cmd;
switch (sd->state) {
@@ -1033,7 +1033,7 @@
}
break;
- case 3: /* CMD3: SEND_RELATIVE_ADDR */
+ case 3: /* CMD3: SEND_RELATIVE_ADDR */
if (sd->spi)
goto bad_cmd;
switch (sd->state) {
@@ -1048,7 +1048,7 @@
}
break;
- case 4: /* CMD4: SEND_DSR */
+ case 4: /* CMD4: SEND_DSR */
if (sd->spi)
goto bad_cmd;
switch (sd->state) {
@@ -1063,7 +1063,7 @@
case 5: /* CMD5: reserved for SDIO cards */
return sd_illegal;
- case 6: /* CMD6: SWITCH_FUNCTION */
+ case 6: /* CMD6: SWITCH_FUNCTION */
switch (sd->mode) {
case sd_data_transfer_mode:
sd_function_switch(sd, req.arg);
@@ -1077,7 +1077,7 @@
}
break;
- case 7: /* CMD7: SELECT/DESELECT_CARD */
+ case 7: /* CMD7: SELECT/DESELECT_CARD */
if (sd->spi)
goto bad_cmd;
switch (sd->state) {
@@ -1115,7 +1115,7 @@
}
break;
- case 8: /* CMD8: SEND_IF_COND */
+ case 8: /* CMD8: SEND_IF_COND */
if (sd->spec_version < SD_PHY_SPECv2_00_VERS) {
break;
}
@@ -1133,7 +1133,7 @@
sd->vhs = req.arg;
return sd_r7;
- case 9: /* CMD9: SEND_CSD */
+ case 9: /* CMD9: SEND_CSD */
switch (sd->state) {
case sd_standby_state:
if (sd->rca != rca)
@@ -1155,7 +1155,7 @@
}
break;
- case 10: /* CMD10: SEND_CID */
+ case 10: /* CMD10: SEND_CID */
switch (sd->state) {
case sd_standby_state:
if (sd->rca != rca)
@@ -1177,7 +1177,7 @@
}
break;
- case 12: /* CMD12: STOP_TRANSMISSION */
+ case 12: /* CMD12: STOP_TRANSMISSION */
switch (sd->state) {
case sd_sendingdata_state:
sd->state = sd_transfer_state;
@@ -1194,7 +1194,7 @@
}
break;
- case 13: /* CMD13: SEND_STATUS */
+ case 13: /* CMD13: SEND_STATUS */
switch (sd->mode) {
case sd_data_transfer_mode:
if (!sd->spi && sd->rca != rca) {
@@ -1208,7 +1208,7 @@
}
break;
- case 15: /* CMD15: GO_INACTIVE_STATE */
+ case 15: /* CMD15: GO_INACTIVE_STATE */
if (sd->spi)
goto bad_cmd;
switch (sd->mode) {
@@ -1225,7 +1225,7 @@
break;
/* Block read commands (Classs 2) */
- case 16: /* CMD16: SET_BLOCKLEN */
+ case 16: /* CMD16: SET_BLOCKLEN */
switch (sd->state) {
case sd_transfer_state:
if (req.arg > (1 << HWBLOCK_SHIFT)) {
@@ -1242,8 +1242,8 @@
}
break;
- case 17: /* CMD17: READ_SINGLE_BLOCK */
- case 18: /* CMD18: READ_MULTIPLE_BLOCK */
+ case 17: /* CMD17: READ_SINGLE_BLOCK */
+ case 18: /* CMD18: READ_MULTIPLE_BLOCK */
switch (sd->state) {
case sd_transfer_state:
@@ -1287,8 +1287,8 @@
break;
/* Block write commands (Class 4) */
- case 24: /* CMD24: WRITE_SINGLE_BLOCK */
- case 25: /* CMD25: WRITE_MULTIPLE_BLOCK */
+ case 24: /* CMD24: WRITE_SINGLE_BLOCK */
+ case 25: /* CMD25: WRITE_MULTIPLE_BLOCK */
switch (sd->state) {
case sd_transfer_state:
@@ -1316,7 +1316,7 @@
}
break;
- case 26: /* CMD26: PROGRAM_CID */
+ case 26: /* CMD26: PROGRAM_CID */
if (sd->spi)
goto bad_cmd;
switch (sd->state) {
@@ -1331,7 +1331,7 @@
}
break;
- case 27: /* CMD27: PROGRAM_CSD */
+ case 27: /* CMD27: PROGRAM_CSD */
switch (sd->state) {
case sd_transfer_state:
sd->state = sd_receivingdata_state;
@@ -1345,7 +1345,7 @@
break;
/* Write protection (Class 6) */
- case 28: /* CMD28: SET_WRITE_PROT */
+ case 28: /* CMD28: SET_WRITE_PROT */
if (sd->size > SDSC_MAX_CAPACITY) {
return sd_illegal;
}
@@ -1367,7 +1367,7 @@
}
break;
- case 29: /* CMD29: CLR_WRITE_PROT */
+ case 29: /* CMD29: CLR_WRITE_PROT */
if (sd->size > SDSC_MAX_CAPACITY) {
return sd_illegal;
}
@@ -1389,7 +1389,7 @@
}
break;
- case 30: /* CMD30: SEND_WRITE_PROT */
+ case 30: /* CMD30: SEND_WRITE_PROT */
if (sd->size > SDSC_MAX_CAPACITY) {
return sd_illegal;
}
@@ -1413,7 +1413,7 @@
break;
/* Erase commands (Class 5) */
- case 32: /* CMD32: ERASE_WR_BLK_START */
+ case 32: /* CMD32: ERASE_WR_BLK_START */
switch (sd->state) {
case sd_transfer_state:
sd->erase_start = req.arg;
@@ -1424,7 +1424,7 @@
}
break;
- case 33: /* CMD33: ERASE_WR_BLK_END */
+ case 33: /* CMD33: ERASE_WR_BLK_END */
switch (sd->state) {
case sd_transfer_state:
sd->erase_end = req.arg;
@@ -1435,7 +1435,7 @@
}
break;
- case 38: /* CMD38: ERASE */
+ case 38: /* CMD38: ERASE */
switch (sd->state) {
case sd_transfer_state:
if (sd->csd[14] & 0x30) {
@@ -1455,7 +1455,7 @@
break;
/* Lock card commands (Class 7) */
- case 42: /* CMD42: LOCK_UNLOCK */
+ case 42: /* CMD42: LOCK_UNLOCK */
switch (sd->state) {
case sd_transfer_state:
sd->state = sd_receivingdata_state;
@@ -1478,7 +1478,7 @@
return sd_illegal;
/* Application specific commands (Class 8) */
- case 55: /* CMD55: APP_CMD */
+ case 55: /* CMD55: APP_CMD */
switch (sd->state) {
case sd_ready_state:
case sd_identification_state:
@@ -1501,7 +1501,7 @@
sd->card_status |= APP_CMD;
return sd_r1;
- case 56: /* CMD56: GEN_CMD */
+ case 56: /* CMD56: GEN_CMD */
switch (sd->state) {
case sd_transfer_state:
sd->data_offset = 0;
@@ -1546,7 +1546,7 @@
req.cmd, req.arg, sd_state_name(sd->state));
sd->card_status |= APP_CMD;
switch (req.cmd) {
- case 6: /* ACMD6: SET_BUS_WIDTH */
+ case 6: /* ACMD6: SET_BUS_WIDTH */
if (sd->spi) {
goto unimplemented_spi_cmd;
}
@@ -1561,7 +1561,7 @@
}
break;
- case 13: /* ACMD13: SD_STATUS */
+ case 13: /* ACMD13: SD_STATUS */
switch (sd->state) {
case sd_transfer_state:
sd->state = sd_sendingdata_state;
@@ -1574,7 +1574,7 @@
}
break;
- case 22: /* ACMD22: SEND_NUM_WR_BLOCKS */
+ case 22: /* ACMD22: SEND_NUM_WR_BLOCKS */
switch (sd->state) {
case sd_transfer_state:
*(uint32_t *) sd->data = sd->blk_written;
@@ -1589,7 +1589,7 @@
}
break;
- case 23: /* ACMD23: SET_WR_BLK_ERASE_COUNT */
+ case 23: /* ACMD23: SET_WR_BLK_ERASE_COUNT */
switch (sd->state) {
case sd_transfer_state:
return sd_r1;
@@ -1599,7 +1599,7 @@
}
break;
- case 41: /* ACMD41: SD_APP_OP_COND */
+ case 41: /* ACMD41: SD_APP_OP_COND */
if (sd->spi) {
/* SEND_OP_CMD */
sd->state = sd_transfer_state;
@@ -1641,7 +1641,7 @@
return sd_r3;
- case 42: /* ACMD42: SET_CLR_CARD_DETECT */
+ case 42: /* ACMD42: SET_CLR_CARD_DETECT */
switch (sd->state) {
case sd_transfer_state:
/* Bringing in the 50KOhm pull-up resistor... Done. */
@@ -1652,7 +1652,7 @@
}
break;
- case 51: /* ACMD51: SEND_SCR */
+ case 51: /* ACMD51: SEND_SCR */
switch (sd->state) {
case sd_transfer_state:
sd->state = sd_sendingdata_state;
@@ -1840,7 +1840,7 @@
sd_acmd_name(sd->current_cmd),
sd->current_cmd, value);
switch (sd->current_cmd) {
- case 24: /* CMD24: WRITE_SINGLE_BLOCK */
+ case 24: /* CMD24: WRITE_SINGLE_BLOCK */
sd->data[sd->data_offset ++] = value;
if (sd->data_offset >= sd->blk_len) {
/* TODO: Check CRC before committing */
@@ -1853,7 +1853,7 @@
}
break;
- case 25: /* CMD25: WRITE_MULTIPLE_BLOCK */
+ case 25: /* CMD25: WRITE_MULTIPLE_BLOCK */
if (sd->data_offset == 0) {
/* Start of the block - let's check the address is valid */
if (!address_in_range(sd, "WRITE_MULTIPLE_BLOCK",
@@ -1890,7 +1890,7 @@
}
break;
- case 26: /* CMD26: PROGRAM_CID */
+ case 26: /* CMD26: PROGRAM_CID */
sd->data[sd->data_offset ++] = value;
if (sd->data_offset >= sizeof(sd->cid)) {
/* TODO: Check CRC before committing */
@@ -1909,7 +1909,7 @@
}
break;
- case 27: /* CMD27: PROGRAM_CSD */
+ case 27: /* CMD27: PROGRAM_CSD */
sd->data[sd->data_offset ++] = value;
if (sd->data_offset >= sizeof(sd->csd)) {
/* TODO: Check CRC before committing */
@@ -1933,7 +1933,7 @@
}
break;
- case 42: /* CMD42: LOCK_UNLOCK */
+ case 42: /* CMD42: LOCK_UNLOCK */
sd->data[sd->data_offset ++] = value;
if (sd->data_offset >= sd->blk_len) {
/* TODO: Check CRC before committing */
@@ -1944,7 +1944,7 @@
}
break;
- case 56: /* CMD56: GEN_CMD */
+ case 56: /* CMD56: GEN_CMD */
sd->data[sd->data_offset ++] = value;
if (sd->data_offset >= sd->blk_len) {
APP_WRITE_BLOCK(sd->data_start, sd->data_offset);
@@ -1996,29 +1996,29 @@
sd_acmd_name(sd->current_cmd),
sd->current_cmd, io_len);
switch (sd->current_cmd) {
- case 6: /* CMD6: SWITCH_FUNCTION */
+ case 6: /* CMD6: SWITCH_FUNCTION */
ret = sd->data[sd->data_offset ++];
if (sd->data_offset >= 64)
sd->state = sd_transfer_state;
break;
- case 9: /* CMD9: SEND_CSD */
- case 10: /* CMD10: SEND_CID */
+ case 9: /* CMD9: SEND_CSD */
+ case 10: /* CMD10: SEND_CID */
ret = sd->data[sd->data_offset ++];
if (sd->data_offset >= 16)
sd->state = sd_transfer_state;
break;
- case 13: /* ACMD13: SD_STATUS */
+ case 13: /* ACMD13: SD_STATUS */
ret = sd->sd_status[sd->data_offset ++];
if (sd->data_offset >= sizeof(sd->sd_status))
sd->state = sd_transfer_state;
break;
- case 17: /* CMD17: READ_SINGLE_BLOCK */
+ case 17: /* CMD17: READ_SINGLE_BLOCK */
if (sd->data_offset == 0)
BLK_READ_BLOCK(sd->data_start, io_len);
ret = sd->data[sd->data_offset ++];
@@ -2027,7 +2027,7 @@
sd->state = sd_transfer_state;
break;
- case 18: /* CMD18: READ_MULTIPLE_BLOCK */
+ case 18: /* CMD18: READ_MULTIPLE_BLOCK */
if (sd->data_offset == 0) {
if (!address_in_range(sd, "READ_MULTIPLE_BLOCK",
sd->data_start, io_len)) {
@@ -2058,28 +2058,28 @@
ret = sd_tuning_block_pattern[sd->data_offset++];
break;
- case 22: /* ACMD22: SEND_NUM_WR_BLOCKS */
+ case 22: /* ACMD22: SEND_NUM_WR_BLOCKS */
ret = sd->data[sd->data_offset ++];
if (sd->data_offset >= 4)
sd->state = sd_transfer_state;
break;
- case 30: /* CMD30: SEND_WRITE_PROT */
+ case 30: /* CMD30: SEND_WRITE_PROT */
ret = sd->data[sd->data_offset ++];
if (sd->data_offset >= 4)
sd->state = sd_transfer_state;
break;
- case 51: /* ACMD51: SEND_SCR */
+ case 51: /* ACMD51: SEND_SCR */
ret = sd->scr[sd->data_offset ++];
if (sd->data_offset >= sizeof(sd->scr))
sd->state = sd_transfer_state;
break;
- case 56: /* CMD56: GEN_CMD */
+ case 56: /* CMD56: GEN_CMD */
if (sd->data_offset == 0)
APP_READ_BLOCK(sd->data_start, sd->blk_len);
ret = sd->data[sd->data_offset ++];
diff --git a/hw/usb/hcd-ohci.c b/hw/usb/hcd-ohci.c
index 6f8b543..88d2b4b 100644
--- a/hw/usb/hcd-ohci.c
+++ b/hw/usb/hcd-ohci.c
@@ -1410,6 +1410,18 @@
}
}
+/* This is the one state transition the controller can do by itself */
+static bool ohci_resume(OHCIState *s)
+{
+ if ((s->ctl & OHCI_CTL_HCFS) == OHCI_USB_SUSPEND) {
+ trace_usb_ohci_remote_wakeup(s->name);
+ s->ctl &= ~OHCI_CTL_HCFS;
+ s->ctl |= OHCI_USB_RESUME;
+ return true;
+ }
+ return false;
+}
+
/*
* Sets a flag in a port status reg but only set it if the port is connected.
* If not set ConnectStatusChange flag. If flag is enabled return 1.
@@ -1426,7 +1438,10 @@
if (!(ohci->rhport[i].ctrl & OHCI_PORT_CCS)) {
ohci->rhport[i].ctrl |= OHCI_PORT_CSC;
if (ohci->rhstatus & OHCI_RHS_DRWE) {
- /* TODO: CSC is a wakeup event */
+ /* CSC is a wakeup event */
+ if (ohci_resume(ohci)) {
+ ohci_set_interrupt(ohci, OHCI_INTR_RD);
+ }
}
return 0;
}
@@ -1828,11 +1843,7 @@
intr = OHCI_INTR_RHSC;
}
/* Note that the controller can be suspended even if this port is not */
- if ((s->ctl & OHCI_CTL_HCFS) == OHCI_USB_SUSPEND) {
- trace_usb_ohci_remote_wakeup(s->name);
- /* This is the one state transition the controller can do by itself */
- s->ctl &= ~OHCI_CTL_HCFS;
- s->ctl |= OHCI_USB_RESUME;
+ if (ohci_resume(s)) {
/*
* In suspend mode only ResumeDetected is possible, not RHSC:
* see the OHCI spec 5.1.2.3.
diff --git a/hw/usb/imx-usb-phy.c b/hw/usb/imx-usb-phy.c
index 5d7a549..1a97b36 100644
--- a/hw/usb/imx-usb-phy.c
+++ b/hw/usb/imx-usb-phy.c
@@ -13,6 +13,7 @@
#include "qemu/osdep.h"
#include "hw/usb/imx-usb-phy.h"
#include "migration/vmstate.h"
+#include "qemu/log.h"
#include "qemu/module.h"
static const VMStateDescription vmstate_imx_usbphy = {
@@ -90,7 +91,15 @@
value = s->usbphy[index - 3];
break;
default:
- value = s->usbphy[index];
+ if (index < USBPHY_MAX) {
+ value = s->usbphy[index];
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Read from non-existing USB PHY register 0x%"
+ HWADDR_PRIx "\n",
+ __func__, offset);
+ value = 0;
+ }
break;
}
return (uint64_t)value;
@@ -168,7 +177,13 @@
s->usbphy[index - 3] ^= value;
break;
default:
- /* Other registers are read-only */
+ /* Other registers are read-only or do not exist */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Write to %s USB PHY register 0x%"
+ HWADDR_PRIx "\n",
+ __func__,
+ index >= USBPHY_MAX ? "non-existing" : "read-only",
+ offset);
break;
}
}
diff --git a/hw/usb/meson.build b/hw/usb/meson.build
index bdf34cb..599dc24 100644
--- a/hw/usb/meson.build
+++ b/hw/usb/meson.build
@@ -84,6 +84,6 @@
hw_usb_modules += {'host': usbhost_ss}
endif
-softmmu_ss.add(when: ['CONFIG_USB', 'CONFIG_XEN', libusb], if_true: files('xen-usb.c'))
+softmmu_ss.add(when: ['CONFIG_USB', 'CONFIG_XEN_BUS', libusb], if_true: files('xen-usb.c'))
modules += { 'hw-usb': hw_usb_modules }
diff --git a/hw/usb/vt82c686-uhci-pci.c b/hw/usb/vt82c686-uhci-pci.c
index 46a901f..b4884c9 100644
--- a/hw/usb/vt82c686-uhci-pci.c
+++ b/hw/usb/vt82c686-uhci-pci.c
@@ -1,17 +1,7 @@
#include "qemu/osdep.h"
-#include "hw/irq.h"
#include "hw/isa/vt82c686.h"
#include "hcd-uhci.h"
-static void uhci_isa_set_irq(void *opaque, int irq_num, int level)
-{
- UHCIState *s = opaque;
- uint8_t irq = pci_get_byte(s->dev.config + PCI_INTERRUPT_LINE);
- if (irq > 0 && irq < 15) {
- via_isa_set_irq(pci_get_function_0(&s->dev), irq, level);
- }
-}
-
static void usb_uhci_vt82c686b_realize(PCIDevice *dev, Error **errp)
{
UHCIState *s = UHCI(dev);
@@ -25,8 +15,6 @@
pci_set_long(pci_conf + 0xc0, 0x00002000);
usb_uhci_common_realize(dev, errp);
- object_unref(s->irq);
- s->irq = qemu_allocate_irq(uhci_isa_set_irq, s, 0);
}
static UHCIInfo uhci_info[] = {
diff --git a/hw/usb/xen-usb.c b/hw/usb/xen-usb.c
index 0f7369e..66cb3f7 100644
--- a/hw/usb/xen-usb.c
+++ b/hw/usb/xen-usb.c
@@ -101,6 +101,8 @@
struct usbback_info {
struct XenLegacyDevice xendev; /* must be first */
USBBus bus;
+ uint32_t urb_ring_ref;
+ uint32_t conn_ring_ref;
void *urb_sring;
void *conn_sring;
struct usbif_urb_back_ring urb_ring;
@@ -159,7 +161,7 @@
for (i = 0; i < nr_segs; i++) {
if ((unsigned)usbback_req->req.seg[i].offset +
- (unsigned)usbback_req->req.seg[i].length > XC_PAGE_SIZE) {
+ (unsigned)usbback_req->req.seg[i].length > XEN_PAGE_SIZE) {
xen_pv_printf(xendev, 0, "segment crosses page boundary\n");
return -EINVAL;
}
@@ -183,7 +185,7 @@
for (i = 0; i < usbback_req->nr_buffer_segs; i++) {
seg = usbback_req->req.seg + i;
- addr = usbback_req->buffer + i * XC_PAGE_SIZE + seg->offset;
+ addr = usbback_req->buffer + i * XEN_PAGE_SIZE + seg->offset;
qemu_iovec_add(&usbback_req->packet.iov, addr, seg->length);
}
}
@@ -277,10 +279,11 @@
static void usbback_do_response(struct usbback_req *usbback_req, int32_t status,
int32_t actual_length, int32_t error_count)
{
+ uint32_t ref[USBIF_MAX_SEGMENTS_PER_REQUEST];
struct usbback_info *usbif;
struct usbif_urb_response *res;
struct XenLegacyDevice *xendev;
- unsigned int notify;
+ unsigned int notify, i;
usbif = usbback_req->usbif;
xendev = &usbif->xendev;
@@ -293,13 +296,19 @@
}
if (usbback_req->buffer) {
- xen_be_unmap_grant_refs(xendev, usbback_req->buffer,
+ for (i = 0; i < usbback_req->nr_buffer_segs; i++) {
+ ref[i] = usbback_req->req.seg[i].gref;
+ }
+ xen_be_unmap_grant_refs(xendev, usbback_req->buffer, ref,
usbback_req->nr_buffer_segs);
usbback_req->buffer = NULL;
}
if (usbback_req->isoc_buffer) {
- xen_be_unmap_grant_refs(xendev, usbback_req->isoc_buffer,
+ for (i = 0; i < usbback_req->nr_extra_segs; i++) {
+ ref[i] = usbback_req->req.seg[i + usbback_req->req.nr_buffer_segs].gref;
+ }
+ xen_be_unmap_grant_refs(xendev, usbback_req->isoc_buffer, ref,
usbback_req->nr_extra_segs);
usbback_req->isoc_buffer = NULL;
}
@@ -832,11 +841,11 @@
xen_pv_unbind_evtchn(xendev);
if (usbif->urb_sring) {
- xen_be_unmap_grant_ref(xendev, usbif->urb_sring);
+ xen_be_unmap_grant_ref(xendev, usbif->urb_sring, usbif->urb_ring_ref);
usbif->urb_sring = NULL;
}
if (usbif->conn_sring) {
- xen_be_unmap_grant_ref(xendev, usbif->conn_sring);
+ xen_be_unmap_grant_ref(xendev, usbif->conn_sring, usbif->conn_ring_ref);
usbif->conn_sring = NULL;
}
@@ -889,10 +898,12 @@
return -1;
}
+ usbif->urb_ring_ref = urb_ring_ref;
+ usbif->conn_ring_ref = conn_ring_ref;
urb_sring = usbif->urb_sring;
conn_sring = usbif->conn_sring;
- BACK_RING_INIT(&usbif->urb_ring, urb_sring, XC_PAGE_SIZE);
- BACK_RING_INIT(&usbif->conn_ring, conn_sring, XC_PAGE_SIZE);
+ BACK_RING_INIT(&usbif->urb_ring, urb_sring, XEN_PAGE_SIZE);
+ BACK_RING_INIT(&usbif->conn_ring, conn_sring, XEN_PAGE_SIZE);
xen_be_bind_evtchn(xendev);
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index bab83c0..4d01ea3 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -42,6 +42,7 @@
#include "migration/migration.h"
#include "migration/misc.h"
#include "migration/blocker.h"
+#include "migration/qemu-file.h"
#include "sysemu/tpm.h"
VFIOGroupList vfio_group_list =
@@ -319,6 +320,28 @@
* Device state interfaces
*/
+typedef struct {
+ unsigned long *bitmap;
+ hwaddr size;
+ hwaddr pages;
+} VFIOBitmap;
+
+static int vfio_bitmap_alloc(VFIOBitmap *vbmap, hwaddr size)
+{
+ vbmap->pages = REAL_HOST_PAGE_ALIGN(size) / qemu_real_host_page_size();
+ vbmap->size = ROUND_UP(vbmap->pages, sizeof(__u64) * BITS_PER_BYTE) /
+ BITS_PER_BYTE;
+ vbmap->bitmap = g_try_malloc0(vbmap->size);
+ if (!vbmap->bitmap) {
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
+ uint64_t size, ram_addr_t ram_addr);
+
bool vfio_mig_active(void)
{
VFIOGroup *group;
@@ -339,6 +362,7 @@
}
static Error *multiple_devices_migration_blocker;
+static Error *giommu_migration_blocker;
static unsigned int vfio_migratable_device_num(void)
{
@@ -390,6 +414,64 @@
multiple_devices_migration_blocker = NULL;
}
+static bool vfio_viommu_preset(void)
+{
+ VFIOAddressSpace *space;
+
+ QLIST_FOREACH(space, &vfio_address_spaces, list) {
+ if (space->as != &address_space_memory) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+int vfio_block_giommu_migration(Error **errp)
+{
+ int ret;
+
+ if (giommu_migration_blocker ||
+ !vfio_viommu_preset()) {
+ return 0;
+ }
+
+ error_setg(&giommu_migration_blocker,
+ "Migration is currently not supported with vIOMMU enabled");
+ ret = migrate_add_blocker(giommu_migration_blocker, errp);
+ if (ret < 0) {
+ error_free(giommu_migration_blocker);
+ giommu_migration_blocker = NULL;
+ }
+
+ return ret;
+}
+
+void vfio_migration_finalize(void)
+{
+ if (!giommu_migration_blocker ||
+ vfio_viommu_preset()) {
+ return;
+ }
+
+ migrate_del_blocker(giommu_migration_blocker);
+ error_free(giommu_migration_blocker);
+ giommu_migration_blocker = NULL;
+}
+
+static void vfio_set_migration_error(int err)
+{
+ MigrationState *ms = migrate_get_current();
+
+ if (migration_is_setup_or_active(ms->state)) {
+ WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) {
+ if (ms->to_dst_file) {
+ qemu_file_set_error(ms->to_dst_file, err);
+ }
+ }
+ }
+}
+
static bool vfio_devices_all_dirty_tracking(VFIOContainer *container)
{
VFIOGroup *group;
@@ -417,6 +499,22 @@
return true;
}
+static bool vfio_devices_all_device_dirty_tracking(VFIOContainer *container)
+{
+ VFIOGroup *group;
+ VFIODevice *vbasedev;
+
+ QLIST_FOREACH(group, &container->group_list, container_next) {
+ QLIST_FOREACH(vbasedev, &group->device_list, next) {
+ if (!vbasedev->dirty_pages_supported) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
/*
* Check if all VFIO devices are running and migration is active, which is
* essentially equivalent to the migration being in pre-copy phase.
@@ -454,9 +552,14 @@
{
struct vfio_iommu_type1_dma_unmap *unmap;
struct vfio_bitmap *bitmap;
- uint64_t pages = REAL_HOST_PAGE_ALIGN(size) / qemu_real_host_page_size();
+ VFIOBitmap vbmap;
int ret;
+ ret = vfio_bitmap_alloc(&vbmap, size);
+ if (ret) {
+ return ret;
+ }
+
unmap = g_malloc0(sizeof(*unmap) + sizeof(*bitmap));
unmap->argsz = sizeof(*unmap) + sizeof(*bitmap);
@@ -470,35 +573,28 @@
* qemu_real_host_page_size to mark those dirty. Hence set bitmap_pgsize
* to qemu_real_host_page_size.
*/
-
bitmap->pgsize = qemu_real_host_page_size();
- bitmap->size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) /
- BITS_PER_BYTE;
+ bitmap->size = vbmap.size;
+ bitmap->data = (__u64 *)vbmap.bitmap;
- if (bitmap->size > container->max_dirty_bitmap_size) {
- error_report("UNMAP: Size of bitmap too big 0x%"PRIx64,
- (uint64_t)bitmap->size);
+ if (vbmap.size > container->max_dirty_bitmap_size) {
+ error_report("UNMAP: Size of bitmap too big 0x%"PRIx64, vbmap.size);
ret = -E2BIG;
goto unmap_exit;
}
- bitmap->data = g_try_malloc0(bitmap->size);
- if (!bitmap->data) {
- ret = -ENOMEM;
- goto unmap_exit;
- }
-
ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap);
if (!ret) {
- cpu_physical_memory_set_dirty_lebitmap((unsigned long *)bitmap->data,
- iotlb->translated_addr, pages);
+ cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap,
+ iotlb->translated_addr, vbmap.pages);
} else {
error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m");
}
- g_free(bitmap->data);
unmap_exit:
g_free(unmap);
+ g_free(vbmap.bitmap);
+
return ret;
}
@@ -515,10 +611,16 @@
.iova = iova,
.size = size,
};
+ bool need_dirty_sync = false;
+ int ret;
- if (iotlb && container->dirty_pages_supported &&
- vfio_devices_all_running_and_mig_active(container)) {
- return vfio_dma_unmap_bitmap(container, iova, size, iotlb);
+ if (iotlb && vfio_devices_all_running_and_mig_active(container)) {
+ if (!vfio_devices_all_device_dirty_tracking(container) &&
+ container->dirty_pages_supported) {
+ return vfio_dma_unmap_bitmap(container, iova, size, iotlb);
+ }
+
+ need_dirty_sync = true;
}
while (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
@@ -544,10 +646,12 @@
return -errno;
}
- if (iotlb && vfio_devices_all_running_and_mig_active(container)) {
- cpu_physical_memory_set_dirty_range(iotlb->translated_addr, size,
- tcg_enabled() ? DIRTY_CLIENTS_ALL :
- DIRTY_CLIENTS_NOCODE);
+ if (need_dirty_sync) {
+ ret = vfio_get_dirty_bitmap(container, iova, size,
+ iotlb->translated_addr);
+ if (ret) {
+ return ret;
+ }
}
return 0;
@@ -680,6 +784,7 @@
if (iotlb->target_as != &address_space_memory) {
error_report("Wrong target AS \"%s\", only system memory is allowed",
iotlb->target_as->name ? iotlb->target_as->name : "none");
+ vfio_set_migration_error(-EINVAL);
return;
}
@@ -703,17 +808,18 @@
read_only);
if (ret) {
error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
- "0x%"HWADDR_PRIx", %p) = %d (%m)",
+ "0x%"HWADDR_PRIx", %p) = %d (%s)",
container, iova,
- iotlb->addr_mask + 1, vaddr, ret);
+ iotlb->addr_mask + 1, vaddr, ret, strerror(-ret));
}
} else {
ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1, iotlb);
if (ret) {
error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
- "0x%"HWADDR_PRIx") = %d (%m)",
+ "0x%"HWADDR_PRIx") = %d (%s)",
container, iova,
- iotlb->addr_mask + 1, ret);
+ iotlb->addr_mask + 1, ret, strerror(-ret));
+ vfio_set_migration_error(ret);
}
}
out:
@@ -868,6 +974,22 @@
g_free(vrdl);
}
+static VFIOHostDMAWindow *vfio_find_hostwin(VFIOContainer *container,
+ hwaddr iova, hwaddr end)
+{
+ VFIOHostDMAWindow *hostwin;
+ bool hostwin_found = false;
+
+ QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
+ if (hostwin->min_iova <= iova && end <= hostwin->max_iova) {
+ hostwin_found = true;
+ break;
+ }
+ }
+
+ return hostwin_found ? hostwin : NULL;
+}
+
static bool vfio_known_safe_misalignment(MemoryRegionSection *section)
{
MemoryRegion *mr = section->mr;
@@ -884,24 +1006,15 @@
return true;
}
-static void vfio_listener_region_add(MemoryListener *listener,
- MemoryRegionSection *section)
+static bool vfio_listener_valid_section(MemoryRegionSection *section,
+ const char *name)
{
- VFIOContainer *container = container_of(listener, VFIOContainer, listener);
- hwaddr iova, end;
- Int128 llend, llsize;
- void *vaddr;
- int ret;
- VFIOHostDMAWindow *hostwin;
- bool hostwin_found;
- Error *err = NULL;
-
if (vfio_listener_skipped_section(section)) {
- trace_vfio_listener_region_add_skip(
+ trace_vfio_listener_region_skip(name,
section->offset_within_address_space,
section->offset_within_address_space +
int128_get64(int128_sub(section->size, int128_one())));
- return;
+ return false;
}
if (unlikely((section->offset_within_address_space &
@@ -916,15 +1029,53 @@
section->offset_within_region,
qemu_real_host_page_size());
}
- return;
+ return false;
}
+ return true;
+}
+
+static bool vfio_get_section_iova_range(VFIOContainer *container,
+ MemoryRegionSection *section,
+ hwaddr *out_iova, hwaddr *out_end,
+ Int128 *out_llend)
+{
+ Int128 llend;
+ hwaddr iova;
+
iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space);
llend = int128_make64(section->offset_within_address_space);
llend = int128_add(llend, section->size);
llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask()));
if (int128_ge(int128_make64(iova), llend)) {
+ return false;
+ }
+
+ *out_iova = iova;
+ *out_end = int128_get64(int128_sub(llend, int128_one()));
+ if (out_llend) {
+ *out_llend = llend;
+ }
+ return true;
+}
+
+static void vfio_listener_region_add(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ VFIOContainer *container = container_of(listener, VFIOContainer, listener);
+ hwaddr iova, end;
+ Int128 llend, llsize;
+ void *vaddr;
+ int ret;
+ VFIOHostDMAWindow *hostwin;
+ Error *err = NULL;
+
+ if (!vfio_listener_valid_section(section, "region_add")) {
+ return;
+ }
+
+ if (!vfio_get_section_iova_range(container, section, &iova, &end, &llend)) {
if (memory_region_is_ram_device(section->mr)) {
trace_vfio_listener_region_add_no_dma_map(
memory_region_name(section->mr),
@@ -934,7 +1085,6 @@
}
return;
}
- end = int128_get64(int128_sub(llend, int128_one()));
if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
hwaddr pgsize = 0;
@@ -994,15 +1144,8 @@
#endif
}
- hostwin_found = false;
- QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
- if (hostwin->min_iova <= iova && end <= hostwin->max_iova) {
- hostwin_found = true;
- break;
- }
- }
-
- if (!hostwin_found) {
+ hostwin = vfio_find_hostwin(container, iova, end);
+ if (!hostwin) {
error_setg(&err, "Container %p can't map guest IOVA region"
" 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx, container, iova, end);
goto fail;
@@ -1095,8 +1238,9 @@
vaddr, section->readonly);
if (ret) {
error_setg(&err, "vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
- "0x%"HWADDR_PRIx", %p) = %d (%m)",
- container, iova, int128_get64(llsize), vaddr, ret);
+ "0x%"HWADDR_PRIx", %p) = %d (%s)",
+ container, iova, int128_get64(llsize), vaddr, ret,
+ strerror(-ret));
if (memory_region_is_ram_device(section->mr)) {
/* Allow unexpected mappings not to be fatal for RAM devices */
error_report_err(err);
@@ -1140,26 +1284,7 @@
int ret;
bool try_unmap = true;
- if (vfio_listener_skipped_section(section)) {
- trace_vfio_listener_region_del_skip(
- section->offset_within_address_space,
- section->offset_within_address_space +
- int128_get64(int128_sub(section->size, int128_one())));
- return;
- }
-
- if (unlikely((section->offset_within_address_space &
- ~qemu_real_host_page_mask()) !=
- (section->offset_within_region & ~qemu_real_host_page_mask()))) {
- if (!vfio_known_safe_misalignment(section)) {
- error_report("%s received unaligned region %s iova=0x%"PRIx64
- " offset_within_region=0x%"PRIx64
- " qemu_real_host_page_size=0x%"PRIxPTR,
- __func__, memory_region_name(section->mr),
- section->offset_within_address_space,
- section->offset_within_region,
- qemu_real_host_page_size());
- }
+ if (!vfio_listener_valid_section(section, "region_del")) {
return;
}
@@ -1186,15 +1311,9 @@
*/
}
- iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space);
- llend = int128_make64(section->offset_within_address_space);
- llend = int128_add(llend, section->size);
- llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask()));
-
- if (int128_ge(int128_make64(iova), llend)) {
+ if (!vfio_get_section_iova_range(container, section, &iova, &end, &llend)) {
return;
}
- end = int128_get64(int128_sub(llend, int128_one()));
llsize = int128_sub(llend, int128_make64(iova));
@@ -1203,15 +1322,9 @@
if (memory_region_is_ram_device(section->mr)) {
hwaddr pgmask;
VFIOHostDMAWindow *hostwin;
- bool hostwin_found = false;
- QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
- if (hostwin->min_iova <= iova && end <= hostwin->max_iova) {
- hostwin_found = true;
- break;
- }
- }
- assert(hostwin_found); /* or region_add() would have failed */
+ hostwin = vfio_find_hostwin(container, iova, end);
+ assert(hostwin); /* or region_add() would have failed */
pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1;
try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask));
@@ -1228,16 +1341,18 @@
ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL);
if (ret) {
error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
- "0x%"HWADDR_PRIx") = %d (%m)",
- container, iova, int128_get64(llsize), ret);
+ "0x%"HWADDR_PRIx") = %d (%s)",
+ container, iova, int128_get64(llsize), ret,
+ strerror(-ret));
}
iova += int128_get64(llsize);
}
ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL);
if (ret) {
error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
- "0x%"HWADDR_PRIx") = %d (%m)",
- container, iova, int128_get64(llsize), ret);
+ "0x%"HWADDR_PRIx") = %d (%s)",
+ container, iova, int128_get64(llsize), ret,
+ strerror(-ret));
}
}
@@ -1256,7 +1371,7 @@
}
}
-static void vfio_set_dirty_page_tracking(VFIOContainer *container, bool start)
+static int vfio_set_dirty_page_tracking(VFIOContainer *container, bool start)
{
int ret;
struct vfio_iommu_type1_dirty_bitmap dirty = {
@@ -1264,7 +1379,7 @@
};
if (!container->dirty_pages_supported) {
- return;
+ return 0;
}
if (start) {
@@ -1275,40 +1390,327 @@
ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, &dirty);
if (ret) {
+ ret = -errno;
error_report("Failed to set dirty tracking flag 0x%x errno: %d",
dirty.flags, errno);
}
+
+ return ret;
+}
+
+typedef struct VFIODirtyRanges {
+ hwaddr min32;
+ hwaddr max32;
+ hwaddr min64;
+ hwaddr max64;
+} VFIODirtyRanges;
+
+typedef struct VFIODirtyRangesListener {
+ VFIOContainer *container;
+ VFIODirtyRanges ranges;
+ MemoryListener listener;
+} VFIODirtyRangesListener;
+
+static void vfio_dirty_tracking_update(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ VFIODirtyRangesListener *dirty = container_of(listener,
+ VFIODirtyRangesListener,
+ listener);
+ VFIODirtyRanges *range = &dirty->ranges;
+ hwaddr iova, end, *min, *max;
+
+ if (!vfio_listener_valid_section(section, "tracking_update") ||
+ !vfio_get_section_iova_range(dirty->container, section,
+ &iova, &end, NULL)) {
+ return;
+ }
+
+ /*
+ * The address space passed to the dirty tracker is reduced to two ranges:
+ * one for 32-bit DMA ranges, and another one for 64-bit DMA ranges.
+ * The underlying reports of dirty will query a sub-interval of each of
+ * these ranges.
+ *
+ * The purpose of the dual range handling is to handle known cases of big
+ * holes in the address space, like the x86 AMD 1T hole. The alternative
+ * would be an IOVATree but that has a much bigger runtime overhead and
+ * unnecessary complexity.
+ */
+ min = (end <= UINT32_MAX) ? &range->min32 : &range->min64;
+ max = (end <= UINT32_MAX) ? &range->max32 : &range->max64;
+
+ if (*min > iova) {
+ *min = iova;
+ }
+ if (*max < end) {
+ *max = end;
+ }
+
+ trace_vfio_device_dirty_tracking_update(iova, end, *min, *max);
+ return;
+}
+
+static const MemoryListener vfio_dirty_tracking_listener = {
+ .name = "vfio-tracking",
+ .region_add = vfio_dirty_tracking_update,
+};
+
+static void vfio_dirty_tracking_init(VFIOContainer *container,
+ VFIODirtyRanges *ranges)
+{
+ VFIODirtyRangesListener dirty;
+
+ memset(&dirty, 0, sizeof(dirty));
+ dirty.ranges.min32 = UINT32_MAX;
+ dirty.ranges.min64 = UINT64_MAX;
+ dirty.listener = vfio_dirty_tracking_listener;
+ dirty.container = container;
+
+ memory_listener_register(&dirty.listener,
+ container->space->as);
+
+ *ranges = dirty.ranges;
+
+ /*
+ * The memory listener is synchronous, and used to calculate the range
+ * to dirty tracking. Unregister it after we are done as we are not
+ * interested in any follow-up updates.
+ */
+ memory_listener_unregister(&dirty.listener);
+}
+
+static void vfio_devices_dma_logging_stop(VFIOContainer *container)
+{
+ uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature),
+ sizeof(uint64_t))] = {};
+ struct vfio_device_feature *feature = (struct vfio_device_feature *)buf;
+ VFIODevice *vbasedev;
+ VFIOGroup *group;
+
+ feature->argsz = sizeof(buf);
+ feature->flags = VFIO_DEVICE_FEATURE_SET |
+ VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP;
+
+ QLIST_FOREACH(group, &container->group_list, container_next) {
+ QLIST_FOREACH(vbasedev, &group->device_list, next) {
+ if (!vbasedev->dirty_tracking) {
+ continue;
+ }
+
+ if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) {
+ warn_report("%s: Failed to stop DMA logging, err %d (%s)",
+ vbasedev->name, -errno, strerror(errno));
+ }
+ vbasedev->dirty_tracking = false;
+ }
+ }
+}
+
+static struct vfio_device_feature *
+vfio_device_feature_dma_logging_start_create(VFIOContainer *container,
+ VFIODirtyRanges *tracking)
+{
+ struct vfio_device_feature *feature;
+ size_t feature_size;
+ struct vfio_device_feature_dma_logging_control *control;
+ struct vfio_device_feature_dma_logging_range *ranges;
+
+ feature_size = sizeof(struct vfio_device_feature) +
+ sizeof(struct vfio_device_feature_dma_logging_control);
+ feature = g_try_malloc0(feature_size);
+ if (!feature) {
+ errno = ENOMEM;
+ return NULL;
+ }
+ feature->argsz = feature_size;
+ feature->flags = VFIO_DEVICE_FEATURE_SET |
+ VFIO_DEVICE_FEATURE_DMA_LOGGING_START;
+
+ control = (struct vfio_device_feature_dma_logging_control *)feature->data;
+ control->page_size = qemu_real_host_page_size();
+
+ /*
+ * DMA logging uAPI guarantees to support at least a number of ranges that
+ * fits into a single host kernel base page.
+ */
+ control->num_ranges = !!tracking->max32 + !!tracking->max64;
+ ranges = g_try_new0(struct vfio_device_feature_dma_logging_range,
+ control->num_ranges);
+ if (!ranges) {
+ g_free(feature);
+ errno = ENOMEM;
+
+ return NULL;
+ }
+
+ control->ranges = (__u64)(uintptr_t)ranges;
+ if (tracking->max32) {
+ ranges->iova = tracking->min32;
+ ranges->length = (tracking->max32 - tracking->min32) + 1;
+ ranges++;
+ }
+ if (tracking->max64) {
+ ranges->iova = tracking->min64;
+ ranges->length = (tracking->max64 - tracking->min64) + 1;
+ }
+
+ trace_vfio_device_dirty_tracking_start(control->num_ranges,
+ tracking->min32, tracking->max32,
+ tracking->min64, tracking->max64);
+
+ return feature;
+}
+
+static void vfio_device_feature_dma_logging_start_destroy(
+ struct vfio_device_feature *feature)
+{
+ struct vfio_device_feature_dma_logging_control *control =
+ (struct vfio_device_feature_dma_logging_control *)feature->data;
+ struct vfio_device_feature_dma_logging_range *ranges =
+ (struct vfio_device_feature_dma_logging_range *)(uintptr_t)control->ranges;
+
+ g_free(ranges);
+ g_free(feature);
+}
+
+static int vfio_devices_dma_logging_start(VFIOContainer *container)
+{
+ struct vfio_device_feature *feature;
+ VFIODirtyRanges ranges;
+ VFIODevice *vbasedev;
+ VFIOGroup *group;
+ int ret = 0;
+
+ vfio_dirty_tracking_init(container, &ranges);
+ feature = vfio_device_feature_dma_logging_start_create(container,
+ &ranges);
+ if (!feature) {
+ return -errno;
+ }
+
+ QLIST_FOREACH(group, &container->group_list, container_next) {
+ QLIST_FOREACH(vbasedev, &group->device_list, next) {
+ if (vbasedev->dirty_tracking) {
+ continue;
+ }
+
+ ret = ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature);
+ if (ret) {
+ ret = -errno;
+ error_report("%s: Failed to start DMA logging, err %d (%s)",
+ vbasedev->name, ret, strerror(errno));
+ goto out;
+ }
+ vbasedev->dirty_tracking = true;
+ }
+ }
+
+out:
+ if (ret) {
+ vfio_devices_dma_logging_stop(container);
+ }
+
+ vfio_device_feature_dma_logging_start_destroy(feature);
+
+ return ret;
}
static void vfio_listener_log_global_start(MemoryListener *listener)
{
VFIOContainer *container = container_of(listener, VFIOContainer, listener);
+ int ret;
- vfio_set_dirty_page_tracking(container, true);
+ if (vfio_devices_all_device_dirty_tracking(container)) {
+ ret = vfio_devices_dma_logging_start(container);
+ } else {
+ ret = vfio_set_dirty_page_tracking(container, true);
+ }
+
+ if (ret) {
+ error_report("vfio: Could not start dirty page tracking, err: %d (%s)",
+ ret, strerror(-ret));
+ vfio_set_migration_error(ret);
+ }
}
static void vfio_listener_log_global_stop(MemoryListener *listener)
{
VFIOContainer *container = container_of(listener, VFIOContainer, listener);
+ int ret = 0;
- vfio_set_dirty_page_tracking(container, false);
+ if (vfio_devices_all_device_dirty_tracking(container)) {
+ vfio_devices_dma_logging_stop(container);
+ } else {
+ ret = vfio_set_dirty_page_tracking(container, false);
+ }
+
+ if (ret) {
+ error_report("vfio: Could not stop dirty page tracking, err: %d (%s)",
+ ret, strerror(-ret));
+ vfio_set_migration_error(ret);
+ }
}
-static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
- uint64_t size, ram_addr_t ram_addr)
+static int vfio_device_dma_logging_report(VFIODevice *vbasedev, hwaddr iova,
+ hwaddr size, void *bitmap)
+{
+ uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature) +
+ sizeof(struct vfio_device_feature_dma_logging_report),
+ sizeof(__u64))] = {};
+ struct vfio_device_feature *feature = (struct vfio_device_feature *)buf;
+ struct vfio_device_feature_dma_logging_report *report =
+ (struct vfio_device_feature_dma_logging_report *)feature->data;
+
+ report->iova = iova;
+ report->length = size;
+ report->page_size = qemu_real_host_page_size();
+ report->bitmap = (__u64)(uintptr_t)bitmap;
+
+ feature->argsz = sizeof(buf);
+ feature->flags = VFIO_DEVICE_FEATURE_GET |
+ VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT;
+
+ if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) {
+ return -errno;
+ }
+
+ return 0;
+}
+
+static int vfio_devices_query_dirty_bitmap(VFIOContainer *container,
+ VFIOBitmap *vbmap, hwaddr iova,
+ hwaddr size)
+{
+ VFIODevice *vbasedev;
+ VFIOGroup *group;
+ int ret;
+
+ QLIST_FOREACH(group, &container->group_list, container_next) {
+ QLIST_FOREACH(vbasedev, &group->device_list, next) {
+ ret = vfio_device_dma_logging_report(vbasedev, iova, size,
+ vbmap->bitmap);
+ if (ret) {
+ error_report("%s: Failed to get DMA logging report, iova: "
+ "0x%" HWADDR_PRIx ", size: 0x%" HWADDR_PRIx
+ ", err: %d (%s)",
+ vbasedev->name, iova, size, ret, strerror(-ret));
+
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int vfio_query_dirty_bitmap(VFIOContainer *container, VFIOBitmap *vbmap,
+ hwaddr iova, hwaddr size)
{
struct vfio_iommu_type1_dirty_bitmap *dbitmap;
struct vfio_iommu_type1_dirty_bitmap_get *range;
- uint64_t pages;
int ret;
- if (!container->dirty_pages_supported) {
- cpu_physical_memory_set_dirty_range(ram_addr, size,
- tcg_enabled() ? DIRTY_CLIENTS_ALL :
- DIRTY_CLIENTS_NOCODE);
- return 0;
- }
-
dbitmap = g_malloc0(sizeof(*dbitmap) + sizeof(*range));
dbitmap->argsz = sizeof(*dbitmap) + sizeof(*range);
@@ -1323,36 +1725,63 @@
* to qemu_real_host_page_size.
*/
range->bitmap.pgsize = qemu_real_host_page_size();
-
- pages = REAL_HOST_PAGE_ALIGN(range->size) / qemu_real_host_page_size();
- range->bitmap.size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) /
- BITS_PER_BYTE;
- range->bitmap.data = g_try_malloc0(range->bitmap.size);
- if (!range->bitmap.data) {
- ret = -ENOMEM;
- goto err_out;
- }
+ range->bitmap.size = vbmap->size;
+ range->bitmap.data = (__u64 *)vbmap->bitmap;
ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, dbitmap);
if (ret) {
+ ret = -errno;
error_report("Failed to get dirty bitmap for iova: 0x%"PRIx64
" size: 0x%"PRIx64" err: %d", (uint64_t)range->iova,
(uint64_t)range->size, errno);
- goto err_out;
}
- cpu_physical_memory_set_dirty_lebitmap((unsigned long *)range->bitmap.data,
- ram_addr, pages);
-
- trace_vfio_get_dirty_bitmap(container->fd, range->iova, range->size,
- range->bitmap.size, ram_addr);
-err_out:
- g_free(range->bitmap.data);
g_free(dbitmap);
return ret;
}
+static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
+ uint64_t size, ram_addr_t ram_addr)
+{
+ bool all_device_dirty_tracking =
+ vfio_devices_all_device_dirty_tracking(container);
+ VFIOBitmap vbmap;
+ int ret;
+
+ if (!container->dirty_pages_supported && !all_device_dirty_tracking) {
+ cpu_physical_memory_set_dirty_range(ram_addr, size,
+ tcg_enabled() ? DIRTY_CLIENTS_ALL :
+ DIRTY_CLIENTS_NOCODE);
+ return 0;
+ }
+
+ ret = vfio_bitmap_alloc(&vbmap, size);
+ if (ret) {
+ return ret;
+ }
+
+ if (all_device_dirty_tracking) {
+ ret = vfio_devices_query_dirty_bitmap(container, &vbmap, iova, size);
+ } else {
+ ret = vfio_query_dirty_bitmap(container, &vbmap, iova, size);
+ }
+
+ if (ret) {
+ goto out;
+ }
+
+ cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, ram_addr,
+ vbmap.pages);
+
+ trace_vfio_get_dirty_bitmap(container->fd, iova, size, vbmap.size,
+ ram_addr);
+out:
+ g_free(vbmap.bitmap);
+
+ return ret;
+}
+
typedef struct {
IOMMUNotifier n;
VFIOGuestIOMMU *giommu;
@@ -1366,29 +1795,33 @@
VFIOContainer *container = giommu->container;
hwaddr iova = iotlb->iova + giommu->iommu_offset;
ram_addr_t translated_addr;
+ int ret = -EINVAL;
trace_vfio_iommu_map_dirty_notify(iova, iova + iotlb->addr_mask);
if (iotlb->target_as != &address_space_memory) {
error_report("Wrong target AS \"%s\", only system memory is allowed",
iotlb->target_as->name ? iotlb->target_as->name : "none");
- return;
+ goto out;
}
rcu_read_lock();
if (vfio_get_xlat_addr(iotlb, NULL, &translated_addr, NULL)) {
- int ret;
-
ret = vfio_get_dirty_bitmap(container, iova, iotlb->addr_mask + 1,
translated_addr);
if (ret) {
error_report("vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx", "
- "0x%"HWADDR_PRIx") = %d (%m)",
- container, iova,
- iotlb->addr_mask + 1, ret);
+ "0x%"HWADDR_PRIx") = %d (%s)",
+ container, iova, iotlb->addr_mask + 1, ret,
+ strerror(-ret));
}
}
rcu_read_unlock();
+
+out:
+ if (ret) {
+ vfio_set_migration_error(ret);
+ }
}
static int vfio_ram_discard_get_dirty_bitmap(MemoryRegionSection *section,
@@ -1481,13 +1914,19 @@
MemoryRegionSection *section)
{
VFIOContainer *container = container_of(listener, VFIOContainer, listener);
+ int ret;
if (vfio_listener_skipped_section(section)) {
return;
}
if (vfio_devices_all_dirty_tracking(container)) {
- vfio_sync_dirty_bitmap(container, section);
+ ret = vfio_sync_dirty_bitmap(container, section);
+ if (ret) {
+ error_report("vfio: Failed to sync dirty bitmap, err: %d (%s)", ret,
+ strerror(-ret));
+ vfio_set_migration_error(ret);
+ }
}
}
diff --git a/hw/vfio/display.c b/hw/vfio/display.c
index 78f4d82..bec864f 100644
--- a/hw/vfio/display.c
+++ b/hw/vfio/display.c
@@ -14,6 +14,7 @@
#include <linux/vfio.h>
#include <sys/ioctl.h>
+#include "qemu/error-report.h"
#include "hw/display/edid.h"
#include "ui/console.h"
#include "qapi/error.h"
diff --git a/hw/vfio/igd.c b/hw/vfio/igd.c
index afe3fe7..b31ee79 100644
--- a/hw/vfio/igd.c
+++ b/hw/vfio/igd.c
@@ -12,6 +12,7 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
+#include "qemu/error-report.h"
#include "qapi/error.h"
#include "hw/hw.h"
#include "hw/nvram/fw_cfg.h"
diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c
index a2c3d9b..6b58ddd 100644
--- a/hw/vfio/migration.c
+++ b/hw/vfio/migration.c
@@ -11,6 +11,7 @@
#include "qemu/main-loop.h"
#include "qemu/cutils.h"
#include "qemu/units.h"
+#include "qemu/error-report.h"
#include <linux/vfio.h>
#include <sys/ioctl.h>
@@ -521,7 +522,7 @@
}
}
-static void vfio_migration_exit(VFIODevice *vbasedev)
+static void vfio_migration_free(VFIODevice *vbasedev)
{
g_free(vbasedev->migration);
vbasedev->migration = NULL;
@@ -555,6 +556,19 @@
return 0;
}
+static bool vfio_dma_logging_supported(VFIODevice *vbasedev)
+{
+ uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature),
+ sizeof(uint64_t))] = {};
+ struct vfio_device_feature *feature = (struct vfio_device_feature *)buf;
+
+ feature->argsz = sizeof(buf);
+ feature->flags = VFIO_DEVICE_FEATURE_PROBE |
+ VFIO_DEVICE_FEATURE_DMA_LOGGING_START;
+
+ return !ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature);
+}
+
static int vfio_migration_init(VFIODevice *vbasedev)
{
int ret;
@@ -589,6 +603,8 @@
migration->device_state = VFIO_DEVICE_STATE_RUNNING;
migration->data_fd = -1;
+ vbasedev->dirty_pages_supported = vfio_dma_logging_supported(vbasedev);
+
oid = vmstate_if_get_id(VMSTATE_IF(DEVICE(obj)));
if (oid) {
path = g_strdup_printf("%s/vfio", oid);
@@ -616,7 +632,7 @@
return bytes_transferred;
}
-int vfio_migration_probe(VFIODevice *vbasedev, Error **errp)
+int vfio_migration_realize(VFIODevice *vbasedev, Error **errp)
{
int ret = -ENOTSUP;
@@ -634,6 +650,11 @@
return ret;
}
+ ret = vfio_block_giommu_migration(errp);
+ if (ret) {
+ return ret;
+ }
+
trace_vfio_migration_probe(vbasedev->name);
return 0;
@@ -649,7 +670,7 @@
return ret;
}
-void vfio_migration_finalize(VFIODevice *vbasedev)
+void vfio_migration_exit(VFIODevice *vbasedev)
{
if (vbasedev->migration) {
VFIOMigration *migration = vbasedev->migration;
@@ -657,7 +678,7 @@
remove_migration_state_change_notifier(&migration->migration_state);
qemu_del_vm_change_state_handler(migration->vm_state);
unregister_savevm(VMSTATE_IF(vbasedev->dev), "vfio", vbasedev);
- vfio_migration_exit(vbasedev);
+ vfio_migration_free(vbasedev);
vfio_unblock_multiple_devices_migration();
}
diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
index 939dcc3..ec9a854 100644
--- a/hw/vfio/pci.c
+++ b/hw/vfio/pci.c
@@ -3145,7 +3145,7 @@
}
if (!pdev->failover_pair_id) {
- ret = vfio_migration_probe(vbasedev, errp);
+ ret = vfio_migration_realize(vbasedev, errp);
if (ret) {
error_report("%s: Migration disabled", vbasedev->name);
}
@@ -3185,6 +3185,7 @@
*/
vfio_put_device(vdev);
vfio_put_group(group);
+ vfio_migration_finalize();
}
static void vfio_exitfn(PCIDevice *pdev)
@@ -3203,7 +3204,7 @@
}
vfio_teardown_msi(vdev);
vfio_bars_exit(vdev);
- vfio_migration_finalize(&vdev->vbasedev);
+ vfio_migration_exit(&vdev->vbasedev);
}
static void vfio_pci_reset(DeviceState *dev)
diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events
index 669d9fe..646e42f 100644
--- a/hw/vfio/trace-events
+++ b/hw/vfio/trace-events
@@ -96,14 +96,15 @@
vfio_region_write(const char *name, int index, uint64_t addr, uint64_t data, unsigned size) " (%s:region%d+0x%"PRIx64", 0x%"PRIx64 ", %d)"
vfio_region_read(char *name, int index, uint64_t addr, unsigned size, uint64_t data) " (%s:region%d+0x%"PRIx64", %d) = 0x%"PRIx64
vfio_iommu_map_notify(const char *op, uint64_t iova_start, uint64_t iova_end) "iommu %s @ 0x%"PRIx64" - 0x%"PRIx64
-vfio_listener_region_add_skip(uint64_t start, uint64_t end) "SKIPPING region_add 0x%"PRIx64" - 0x%"PRIx64
+vfio_listener_region_skip(const char *name, uint64_t start, uint64_t end) "SKIPPING %s 0x%"PRIx64" - 0x%"PRIx64
vfio_spapr_group_attach(int groupfd, int tablefd) "Attached groupfd %d to liobn fd %d"
vfio_listener_region_add_iommu(uint64_t start, uint64_t end) "region_add [iommu] 0x%"PRIx64" - 0x%"PRIx64
vfio_listener_region_add_ram(uint64_t iova_start, uint64_t iova_end, void *vaddr) "region_add [ram] 0x%"PRIx64" - 0x%"PRIx64" [%p]"
vfio_known_safe_misalignment(const char *name, uint64_t iova, uint64_t offset_within_region, uintptr_t page_size) "Region \"%s\" iova=0x%"PRIx64" offset_within_region=0x%"PRIx64" qemu_real_host_page_size=0x%"PRIxPTR
vfio_listener_region_add_no_dma_map(const char *name, uint64_t iova, uint64_t size, uint64_t page_size) "Region \"%s\" 0x%"PRIx64" size=0x%"PRIx64" is not aligned to 0x%"PRIx64" and cannot be mapped for DMA"
-vfio_listener_region_del_skip(uint64_t start, uint64_t end) "SKIPPING region_del 0x%"PRIx64" - 0x%"PRIx64
vfio_listener_region_del(uint64_t start, uint64_t end) "region_del 0x%"PRIx64" - 0x%"PRIx64
+vfio_device_dirty_tracking_update(uint64_t start, uint64_t end, uint64_t min, uint64_t max) "section 0x%"PRIx64" - 0x%"PRIx64" -> update [0x%"PRIx64" - 0x%"PRIx64"]"
+vfio_device_dirty_tracking_start(int nr_ranges, uint64_t min32, uint64_t max32, uint64_t min64, uint64_t max64) "nr_ranges %d 32:[0x%"PRIx64" - 0x%"PRIx64"], 64:[0x%"PRIx64" - 0x%"PRIx64"]"
vfio_disconnect_container(int fd) "close container->fd=%d"
vfio_put_group(int fd) "close group->fd=%d"
vfio_get_device(const char * name, unsigned int flags, unsigned int num_regions, unsigned int num_irqs) "Device %s flags: %u, regions: %u, irqs: %u"
@@ -117,7 +118,7 @@
vfio_region_unmap(const char *name, unsigned long offset, unsigned long end) "Region %s unmap [0x%lx - 0x%lx]"
vfio_region_sparse_mmap_header(const char *name, int index, int nr_areas) "Device %s region %d: %d sparse mmap entries"
vfio_region_sparse_mmap_entry(int i, unsigned long start, unsigned long end) "sparse entry %d [0x%lx - 0x%lx]"
-vfio_get_dev_region(const char *name, int index, uint32_t type, uint32_t subtype) "%s index %d, %08x/%0x8"
+vfio_get_dev_region(const char *name, int index, uint32_t type, uint32_t subtype) "%s index %d, %08x/%08x"
vfio_dma_unmap_overflow_workaround(void) ""
vfio_get_dirty_bitmap(int fd, uint64_t iova, uint64_t size, uint64_t bitmap_size, uint64_t start) "container fd=%d, iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64" start=0x%"PRIx64
vfio_iommu_map_dirty_notify(uint64_t iova_start, uint64_t iova_end) "iommu dirty @ 0x%"PRIx64" - 0x%"PRIx64
diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
index a87c5f3..8f8d05c 100644
--- a/hw/virtio/trace-events
+++ b/hw/virtio/trace-events
@@ -50,6 +50,7 @@
vhost_vdpa_dump_config(void *dev, const char *line) "dev: %p %s"
vhost_vdpa_set_config(void *dev, uint32_t offset, uint32_t size, uint32_t flags) "dev: %p offset: %"PRIu32" size: %"PRIu32" flags: 0x%"PRIx32
vhost_vdpa_get_config(void *dev, void *config, uint32_t config_len) "dev: %p config: %p config_len: %"PRIu32
+vhost_vdpa_suspend(void *dev) "dev: %p"
vhost_vdpa_dev_start(void *dev, bool started) "dev: %p started: %d"
vhost_vdpa_set_log_base(void *dev, uint64_t base, unsigned long long size, int refcnt, int fd, void *log) "dev: %p base: 0x%"PRIx64" size: %llu refcnt: %d fd: %d log: %p"
vhost_vdpa_set_vring_addr(void *dev, unsigned int index, unsigned int flags, uint64_t desc_user_addr, uint64_t used_user_addr, uint64_t avail_user_addr, uint64_t log_guest_addr) "dev: %p index: %u flags: 0x%x desc_user_addr: 0x%"PRIx64" used_user_addr: 0x%"PRIx64" avail_user_addr: 0x%"PRIx64" log_guest_addr: 0x%"PRIx64
diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
index 515ccf8..8361e70 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -694,13 +694,17 @@
g_autofree VirtQueueElement *elem = NULL;
elem = g_steal_pointer(&svq->desc_state[i].elem);
if (elem) {
- virtqueue_detach_element(svq->vq, elem, 0);
+ /*
+ * TODO: This is ok for networking, but other kinds of devices
+ * might have problems with just unpop these.
+ */
+ virtqueue_unpop(svq->vq, elem, 0);
}
}
next_avail_elem = g_steal_pointer(&svq->next_guest_avail_elem);
if (next_avail_elem) {
- virtqueue_detach_element(svq->vq, next_avail_elem, 0);
+ virtqueue_unpop(svq->vq, next_avail_elem, 0);
}
svq->vq = NULL;
g_free(svq->desc_next);
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 8968541..e5285df 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -2031,8 +2031,8 @@
} else {
if (virtio_has_feature(protocol_features,
VHOST_USER_PROTOCOL_F_CONFIG)) {
- warn_reportf_err(*errp, "vhost-user backend supports "
- "VHOST_USER_PROTOCOL_F_CONFIG but QEMU does not.");
+ warn_report("vhost-user backend supports "
+ "VHOST_USER_PROTOCOL_F_CONFIG but QEMU does not.");
protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_CONFIG);
}
}
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index df3a1e9..bc6bad2 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -431,6 +431,33 @@
trace_vhost_vdpa_init(dev, opaque);
int ret;
+ v = opaque;
+ v->dev = dev;
+ dev->opaque = opaque ;
+ v->listener = vhost_vdpa_memory_listener;
+ v->msg_type = VHOST_IOTLB_MSG_V2;
+ vhost_vdpa_init_svq(dev, v);
+
+ error_propagate(&dev->migration_blocker, v->migration_blocker);
+ if (!vhost_vdpa_first_dev(dev)) {
+ return 0;
+ }
+
+ /*
+ * If dev->shadow_vqs_enabled at initialization that means the device has
+ * been started with x-svq=on, so don't block migration
+ */
+ if (dev->migration_blocker == NULL && !v->shadow_vqs_enabled) {
+ /* We don't have dev->features yet */
+ uint64_t features;
+ ret = vhost_vdpa_get_dev_features(dev, &features);
+ if (unlikely(ret)) {
+ error_setg_errno(errp, -ret, "Could not get device features");
+ return ret;
+ }
+ vhost_svq_valid_features(features, &dev->migration_blocker);
+ }
+
/*
* Similar to VFIO, we end up pinning all guest memory and have to
* disable discarding of RAM.
@@ -441,17 +468,6 @@
return ret;
}
- v = opaque;
- v->dev = dev;
- dev->opaque = opaque ;
- v->listener = vhost_vdpa_memory_listener;
- v->msg_type = VHOST_IOTLB_MSG_V2;
- vhost_vdpa_init_svq(dev, v);
-
- if (!vhost_vdpa_first_dev(dev)) {
- return 0;
- }
-
vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
VIRTIO_CONFIG_S_DRIVER);
@@ -577,12 +593,15 @@
assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
v = dev->opaque;
trace_vhost_vdpa_cleanup(dev, v);
+ if (vhost_vdpa_first_dev(dev)) {
+ ram_block_discard_disable(false);
+ }
+
vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
memory_listener_unregister(&v->listener);
vhost_vdpa_svq_cleanup(dev);
dev->opaque = NULL;
- ram_block_discard_disable(false);
return 0;
}
@@ -659,7 +678,8 @@
uint64_t features;
uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 |
0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH |
- 0x1ULL << VHOST_BACKEND_F_IOTLB_ASID;
+ 0x1ULL << VHOST_BACKEND_F_IOTLB_ASID |
+ 0x1ULL << VHOST_BACKEND_F_SUSPEND;
int r;
if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
@@ -691,11 +711,13 @@
static int vhost_vdpa_reset_device(struct vhost_dev *dev)
{
+ struct vhost_vdpa *v = dev->opaque;
int ret;
uint8_t status = 0;
ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
trace_vhost_vdpa_reset_device(dev, status);
+ v->suspended = false;
return ret;
}
@@ -1094,6 +1116,29 @@
}
}
+static void vhost_vdpa_suspend(struct vhost_dev *dev)
+{
+ struct vhost_vdpa *v = dev->opaque;
+ int r;
+
+ if (!vhost_vdpa_first_dev(dev)) {
+ return;
+ }
+
+ if (dev->backend_cap & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) {
+ trace_vhost_vdpa_suspend(dev);
+ r = ioctl(v->device_fd, VHOST_VDPA_SUSPEND);
+ if (unlikely(r)) {
+ error_report("Cannot suspend: %s(%d)", g_strerror(errno), errno);
+ } else {
+ v->suspended = true;
+ return;
+ }
+ }
+
+ vhost_vdpa_reset_device(dev);
+}
+
static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
{
struct vhost_vdpa *v = dev->opaque;
@@ -1108,6 +1153,7 @@
}
vhost_vdpa_set_vring_ready(dev);
} else {
+ vhost_vdpa_suspend(dev);
vhost_vdpa_svqs_stop(dev);
vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
}
@@ -1119,14 +1165,23 @@
if (started) {
memory_listener_register(&v->listener, &address_space_memory);
return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
- } else {
- vhost_vdpa_reset_device(dev);
- vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
- VIRTIO_CONFIG_S_DRIVER);
- memory_listener_unregister(&v->listener);
-
- return 0;
}
+
+ return 0;
+}
+
+static void vhost_vdpa_reset_status(struct vhost_dev *dev)
+{
+ struct vhost_vdpa *v = dev->opaque;
+
+ if (dev->vq_index + dev->nvqs != dev->vq_index_end) {
+ return;
+ }
+
+ vhost_vdpa_reset_device(dev);
+ vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
+ VIRTIO_CONFIG_S_DRIVER);
+ memory_listener_unregister(&v->listener);
}
static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
@@ -1169,18 +1224,7 @@
struct vhost_vring_state *ring)
{
struct vhost_vdpa *v = dev->opaque;
- VirtQueue *vq = virtio_get_queue(dev->vdev, ring->index);
- /*
- * vhost-vdpa devices does not support in-flight requests. Set all of them
- * as available.
- *
- * TODO: This is ok for networking, but other kinds of devices might
- * have problems with these retransmissions.
- */
- while (virtqueue_rewind(vq, 1)) {
- continue;
- }
if (v->shadow_vqs_enabled) {
/*
* Device vring base was set at device start. SVQ base is handled by
@@ -1203,6 +1247,14 @@
return 0;
}
+ if (!v->suspended) {
+ /*
+ * Cannot trust in value returned by device, let vhost recover used
+ * idx from guest.
+ */
+ return -1;
+ }
+
ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num);
return ret;
@@ -1227,25 +1279,24 @@
struct vhost_vring_file *file)
{
struct vhost_vdpa *v = dev->opaque;
+ int vdpa_idx = file->index - dev->vq_index;
+ VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx);
+ /* Remember last call fd because we can switch to SVQ anytime. */
+ vhost_svq_set_svq_call_fd(svq, file->fd);
if (v->shadow_vqs_enabled) {
- int vdpa_idx = file->index - dev->vq_index;
- VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx);
-
- vhost_svq_set_svq_call_fd(svq, file->fd);
return 0;
- } else {
- return vhost_vdpa_set_vring_dev_call(dev, file);
}
+
+ return vhost_vdpa_set_vring_dev_call(dev, file);
}
static int vhost_vdpa_get_features(struct vhost_dev *dev,
uint64_t *features)
{
- struct vhost_vdpa *v = dev->opaque;
int ret = vhost_vdpa_get_dev_features(dev, features);
- if (ret == 0 && v->shadow_vqs_enabled) {
+ if (ret == 0) {
/* Add SVQ logging capabilities */
*features |= BIT_ULL(VHOST_F_LOG_ALL);
}
@@ -1313,4 +1364,5 @@
.vhost_vq_get_addr = vhost_vdpa_vq_get_addr,
.vhost_force_iommu = vhost_vdpa_force_iommu,
.vhost_set_config_call = vhost_vdpa_set_config_call,
+ .vhost_reset_status = vhost_vdpa_reset_status,
};
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index eb8c4c3..a266396 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -2049,6 +2049,9 @@
hdev->vqs + i,
hdev->vq_index + i);
}
+ if (hdev->vhost_ops->vhost_reset_status) {
+ hdev->vhost_ops->vhost_reset_status(hdev);
+ }
if (vhost_dev_has_iommu(hdev)) {
if (hdev->vhost_ops->vhost_set_iotlb_callback) {
diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c
index 516425e..802e1b9 100644
--- a/hw/virtio/virtio-crypto.c
+++ b/hw/virtio/virtio-crypto.c
@@ -462,7 +462,7 @@
req->in_iov = NULL;
req->in_num = 0;
req->in_len = 0;
- req->flags = CRYPTODEV_BACKEND_ALG__MAX;
+ req->flags = QCRYPTODEV_BACKEND_ALG__MAX;
memset(&req->op_info, 0x00, sizeof(req->op_info));
}
@@ -472,7 +472,7 @@
return;
}
- if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
+ if (req->flags == QCRYPTODEV_BACKEND_ALG_SYM) {
size_t max_len;
CryptoDevBackendSymOpInfo *op_info = req->op_info.u.sym_op_info;
@@ -485,7 +485,7 @@
/* Zeroize and free request data structure */
memset(op_info, 0, sizeof(*op_info) + max_len);
g_free(op_info);
- } else if (req->flags == CRYPTODEV_BACKEND_ALG_ASYM) {
+ } else if (req->flags == QCRYPTODEV_BACKEND_ALG_ASYM) {
CryptoDevBackendAsymOpInfo *op_info = req->op_info.u.asym_op_info;
if (op_info) {
g_free(op_info->src);
@@ -570,10 +570,10 @@
VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
uint8_t status = -ret;
- if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
+ if (req->flags == QCRYPTODEV_BACKEND_ALG_SYM) {
virtio_crypto_sym_input_data_helper(vdev, req, status,
req->op_info.u.sym_op_info);
- } else if (req->flags == CRYPTODEV_BACKEND_ALG_ASYM) {
+ } else if (req->flags == QCRYPTODEV_BACKEND_ALG_ASYM) {
virtio_crypto_akcipher_input_data_helper(vdev, req, status,
req->op_info.u.asym_op_info);
}
@@ -871,11 +871,14 @@
opcode = ldl_le_p(&req.header.opcode);
op_info->session_id = ldq_le_p(&req.header.session_id);
op_info->op_code = opcode;
+ op_info->queue_index = queue_index;
+ op_info->cb = virtio_crypto_req_complete;
+ op_info->opaque = request;
switch (opcode) {
case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
case VIRTIO_CRYPTO_CIPHER_DECRYPT:
- op_info->algtype = request->flags = CRYPTODEV_BACKEND_ALG_SYM;
+ op_info->algtype = request->flags = QCRYPTODEV_BACKEND_ALG_SYM;
ret = virtio_crypto_handle_sym_req(vcrypto,
&req.u.sym_req, op_info,
out_iov, out_num);
@@ -885,7 +888,7 @@
case VIRTIO_CRYPTO_AKCIPHER_DECRYPT:
case VIRTIO_CRYPTO_AKCIPHER_SIGN:
case VIRTIO_CRYPTO_AKCIPHER_VERIFY:
- op_info->algtype = request->flags = CRYPTODEV_BACKEND_ALG_ASYM;
+ op_info->algtype = request->flags = QCRYPTODEV_BACKEND_ALG_ASYM;
ret = virtio_crypto_handle_asym_req(vcrypto,
&req.u.akcipher_req, op_info,
out_iov, out_num);
@@ -898,9 +901,7 @@
virtio_crypto_req_complete(request, -VIRTIO_CRYPTO_NOTSUPP);
} else {
ret = cryptodev_backend_crypto_operation(vcrypto->cryptodev,
- request, queue_index,
- virtio_crypto_req_complete,
- request);
+ op_info);
if (ret < 0) {
virtio_crypto_req_complete(request, ret);
}
@@ -997,12 +998,35 @@
}
}
+static uint32_t virtio_crypto_init_services(uint32_t qservices)
+{
+ uint32_t vservices = 0;
+
+ if (qservices & (1 << QCRYPTODEV_BACKEND_SERVICE_CIPHER)) {
+ vservices |= (1 << VIRTIO_CRYPTO_SERVICE_CIPHER);
+ }
+ if (qservices & (1 << QCRYPTODEV_BACKEND_SERVICE_HASH)) {
+ vservices |= (1 << VIRTIO_CRYPTO_SERVICE_HASH);
+ }
+ if (qservices & (1 << QCRYPTODEV_BACKEND_SERVICE_MAC)) {
+ vservices |= (1 << VIRTIO_CRYPTO_SERVICE_MAC);
+ }
+ if (qservices & (1 << QCRYPTODEV_BACKEND_SERVICE_AEAD)) {
+ vservices |= (1 << VIRTIO_CRYPTO_SERVICE_AEAD);
+ }
+ if (qservices & (1 << QCRYPTODEV_BACKEND_SERVICE_AKCIPHER)) {
+ vservices |= (1 << VIRTIO_CRYPTO_SERVICE_AKCIPHER);
+ }
+
+ return vservices;
+}
+
static void virtio_crypto_init_config(VirtIODevice *vdev)
{
VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
- vcrypto->conf.crypto_services =
- vcrypto->conf.cryptodev->conf.crypto_services;
+ vcrypto->conf.crypto_services = virtio_crypto_init_services(
+ vcrypto->conf.cryptodev->conf.crypto_services);
vcrypto->conf.cipher_algo_l =
vcrypto->conf.cryptodev->conf.cipher_algo_l;
vcrypto->conf.cipher_algo_h =
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index f35178f..98c4819 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -1069,7 +1069,7 @@
VRingMemoryRegionCaches *caches)
{
VirtIODevice *vdev = vq->vdev;
- unsigned int max, idx;
+ unsigned int idx;
unsigned int total_bufs, in_total, out_total;
MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
int64_t len = 0;
@@ -1078,13 +1078,12 @@
idx = vq->last_avail_idx;
total_bufs = in_total = out_total = 0;
- max = vq->vring.num;
-
while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
MemoryRegionCache *desc_cache = &caches->desc;
unsigned int num_bufs;
VRingDesc desc;
unsigned int i;
+ unsigned int max = vq->vring.num;
num_bufs = total_bufs;
@@ -1206,7 +1205,7 @@
VRingMemoryRegionCaches *caches)
{
VirtIODevice *vdev = vq->vdev;
- unsigned int max, idx;
+ unsigned int idx;
unsigned int total_bufs, in_total, out_total;
MemoryRegionCache *desc_cache;
MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
@@ -1218,14 +1217,14 @@
wrap_counter = vq->last_avail_wrap_counter;
total_bufs = in_total = out_total = 0;
- max = vq->vring.num;
-
for (;;) {
unsigned int num_bufs = total_bufs;
unsigned int i = idx;
int rc;
+ unsigned int max = vq->vring.num;
desc_cache = &caches->desc;
+
vring_packed_desc_read(vdev, &desc, desc_cache, idx, true);
if (!is_desc_avail(desc.flags, wrap_counter)) {
break;
diff --git a/hw/watchdog/wdt_ib700.c b/hw/watchdog/wdt_ib700.c
index b116c3a..a1750a4 100644
--- a/hw/watchdog/wdt_ib700.c
+++ b/hw/watchdog/wdt_ib700.c
@@ -30,7 +30,7 @@
/*#define IB700_DEBUG 1*/
#ifdef IB700_DEBUG
-#define ib700_debug(fs,...) \
+#define ib700_debug(fs,...) \
fprintf(stderr,"ib700: %s: "fs,__func__,##__VA_ARGS__)
#else
#define ib700_debug(fs,...)
diff --git a/hw/xen/meson.build b/hw/xen/meson.build
index ae0ace3..19c6aab 100644
--- a/hw/xen/meson.build
+++ b/hw/xen/meson.build
@@ -1,4 +1,4 @@
-softmmu_ss.add(when: ['CONFIG_XEN', xen], if_true: files(
+softmmu_ss.add(when: ['CONFIG_XEN_BUS'], if_true: files(
'xen-backend.c',
'xen-bus-helper.c',
'xen-bus.c',
@@ -7,6 +7,10 @@
'xen_pvdev.c',
))
+softmmu_ss.add(when: ['CONFIG_XEN', xen], if_true: files(
+ 'xen-operations.c',
+))
+
xen_specific_ss = ss.source_set()
if have_xen_pci_passthrough
xen_specific_ss.add(files(
diff --git a/hw/xen/trace-events b/hw/xen/trace-events
index 3da3fd8..55c9e1d 100644
--- a/hw/xen/trace-events
+++ b/hw/xen/trace-events
@@ -1,6 +1,6 @@
# See docs/devel/tracing.rst for syntax documentation.
-# ../../include/hw/xen/xen_common.h
+# ../../include/hw/xen/xen_native.h
xen_default_ioreq_server(void) ""
xen_ioreq_server_create(uint32_t id) "id: %u"
xen_ioreq_server_destroy(uint32_t id) "id: %u"
diff --git a/hw/xen/xen-bus-helper.c b/hw/xen/xen-bus-helper.c
index 5a1e12b..b2b2cc9 100644
--- a/hw/xen/xen-bus-helper.c
+++ b/hw/xen/xen-bus-helper.c
@@ -10,6 +10,7 @@
#include "hw/xen/xen-bus.h"
#include "hw/xen/xen-bus-helper.h"
#include "qapi/error.h"
+#include "trace.h"
#include <glib/gprintf.h>
@@ -46,34 +47,28 @@
return "INVALID";
}
-void xs_node_create(struct xs_handle *xsh, xs_transaction_t tid,
- const char *node, struct xs_permissions perms[],
- unsigned int nr_perms, Error **errp)
+void xs_node_create(struct qemu_xs_handle *h, xs_transaction_t tid,
+ const char *node, unsigned int owner, unsigned int domid,
+ unsigned int perms, Error **errp)
{
trace_xs_node_create(node);
- if (!xs_write(xsh, tid, node, "", 0)) {
+ if (!qemu_xen_xs_create(h, tid, owner, domid, perms, node)) {
error_setg_errno(errp, errno, "failed to create node '%s'", node);
- return;
- }
-
- if (!xs_set_permissions(xsh, tid, node, perms, nr_perms)) {
- error_setg_errno(errp, errno, "failed to set node '%s' permissions",
- node);
}
}
-void xs_node_destroy(struct xs_handle *xsh, xs_transaction_t tid,
+void xs_node_destroy(struct qemu_xs_handle *h, xs_transaction_t tid,
const char *node, Error **errp)
{
trace_xs_node_destroy(node);
- if (!xs_rm(xsh, tid, node)) {
+ if (!qemu_xen_xs_destroy(h, tid, node)) {
error_setg_errno(errp, errno, "failed to destroy node '%s'", node);
}
}
-void xs_node_vprintf(struct xs_handle *xsh, xs_transaction_t tid,
+void xs_node_vprintf(struct qemu_xs_handle *h, xs_transaction_t tid,
const char *node, const char *key, Error **errp,
const char *fmt, va_list ap)
{
@@ -86,7 +81,7 @@
trace_xs_node_vprintf(path, value);
- if (!xs_write(xsh, tid, path, value, len)) {
+ if (!qemu_xen_xs_write(h, tid, path, value, len)) {
error_setg_errno(errp, errno, "failed to write '%s' to '%s'",
value, path);
}
@@ -95,18 +90,18 @@
g_free(path);
}
-void xs_node_printf(struct xs_handle *xsh, xs_transaction_t tid,
+void xs_node_printf(struct qemu_xs_handle *h, xs_transaction_t tid,
const char *node, const char *key, Error **errp,
const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
- xs_node_vprintf(xsh, tid, node, key, errp, fmt, ap);
+ xs_node_vprintf(h, tid, node, key, errp, fmt, ap);
va_end(ap);
}
-int xs_node_vscanf(struct xs_handle *xsh, xs_transaction_t tid,
+int xs_node_vscanf(struct qemu_xs_handle *h, xs_transaction_t tid,
const char *node, const char *key, Error **errp,
const char *fmt, va_list ap)
{
@@ -115,7 +110,7 @@
path = (strlen(node) != 0) ? g_strdup_printf("%s/%s", node, key) :
g_strdup(key);
- value = xs_read(xsh, tid, path, NULL);
+ value = qemu_xen_xs_read(h, tid, path, NULL);
trace_xs_node_vscanf(path, value);
@@ -133,7 +128,7 @@
return rc;
}
-int xs_node_scanf(struct xs_handle *xsh, xs_transaction_t tid,
+int xs_node_scanf(struct qemu_xs_handle *h, xs_transaction_t tid,
const char *node, const char *key, Error **errp,
const char *fmt, ...)
{
@@ -141,42 +136,35 @@
int rc;
va_start(ap, fmt);
- rc = xs_node_vscanf(xsh, tid, node, key, errp, fmt, ap);
+ rc = xs_node_vscanf(h, tid, node, key, errp, fmt, ap);
va_end(ap);
return rc;
}
-void xs_node_watch(struct xs_handle *xsh, const char *node, const char *key,
- char *token, Error **errp)
+struct qemu_xs_watch *xs_node_watch(struct qemu_xs_handle *h, const char *node,
+ const char *key, xs_watch_fn fn,
+ void *opaque, Error **errp)
{
char *path;
+ struct qemu_xs_watch *w;
path = (strlen(node) != 0) ? g_strdup_printf("%s/%s", node, key) :
g_strdup(key);
trace_xs_node_watch(path);
- if (!xs_watch(xsh, path, token)) {
+ w = qemu_xen_xs_watch(h, path, fn, opaque);
+ if (!w) {
error_setg_errno(errp, errno, "failed to watch node '%s'", path);
}
g_free(path);
+
+ return w;
}
-void xs_node_unwatch(struct xs_handle *xsh, const char *node,
- const char *key, const char *token, Error **errp)
+void xs_node_unwatch(struct qemu_xs_handle *h, struct qemu_xs_watch *w)
{
- char *path;
-
- path = (strlen(node) != 0) ? g_strdup_printf("%s/%s", node, key) :
- g_strdup(key);
-
- trace_xs_node_unwatch(path);
-
- if (!xs_unwatch(xsh, path, token)) {
- error_setg_errno(errp, errno, "failed to unwatch node '%s'", path);
- }
-
- g_free(path);
+ qemu_xen_xs_unwatch(h, w);
}
diff --git a/hw/xen/xen-bus.c b/hw/xen/xen-bus.c
index df3f6b9..c59850b 100644
--- a/hw/xen/xen-bus.c
+++ b/hw/xen/xen-bus.c
@@ -62,7 +62,7 @@
/* Mimic the way the Xen toolstack does an unplug */
again:
- tid = xs_transaction_start(xenbus->xsh);
+ tid = qemu_xen_xs_transaction_start(xenbus->xsh);
if (tid == XBT_NULL) {
error_setg_errno(errp, errno, "failed xs_transaction_start");
return;
@@ -80,7 +80,7 @@
goto abort;
}
- if (!xs_transaction_end(xenbus->xsh, tid, false)) {
+ if (!qemu_xen_xs_transaction_end(xenbus->xsh, tid, false)) {
if (errno == EAGAIN) {
goto again;
}
@@ -95,7 +95,7 @@
* We only abort if there is already a failure so ignore any error
* from ending the transaction.
*/
- xs_transaction_end(xenbus->xsh, tid, true);
+ qemu_xen_xs_transaction_end(xenbus->xsh, tid, true);
}
static void xen_bus_print_dev(Monitor *mon, DeviceState *dev, int indent)
@@ -111,143 +111,6 @@
return xen_device_get_backend_path(XEN_DEVICE(dev));
}
-struct XenWatch {
- char *node, *key;
- char *token;
- XenWatchHandler handler;
- void *opaque;
- Notifier notifier;
-};
-
-static void watch_notify(Notifier *n, void *data)
-{
- XenWatch *watch = container_of(n, XenWatch, notifier);
- const char *token = data;
-
- if (!strcmp(watch->token, token)) {
- watch->handler(watch->opaque);
- }
-}
-
-static XenWatch *new_watch(const char *node, const char *key,
- XenWatchHandler handler, void *opaque)
-{
- XenWatch *watch = g_new0(XenWatch, 1);
- QemuUUID uuid;
-
- qemu_uuid_generate(&uuid);
-
- watch->token = qemu_uuid_unparse_strdup(&uuid);
- watch->node = g_strdup(node);
- watch->key = g_strdup(key);
- watch->handler = handler;
- watch->opaque = opaque;
- watch->notifier.notify = watch_notify;
-
- return watch;
-}
-
-static void free_watch(XenWatch *watch)
-{
- g_free(watch->token);
- g_free(watch->key);
- g_free(watch->node);
-
- g_free(watch);
-}
-
-struct XenWatchList {
- struct xs_handle *xsh;
- NotifierList notifiers;
-};
-
-static void watch_list_event(void *opaque)
-{
- XenWatchList *watch_list = opaque;
- char **v;
- const char *token;
-
- v = xs_check_watch(watch_list->xsh);
- if (!v) {
- return;
- }
-
- token = v[XS_WATCH_TOKEN];
-
- notifier_list_notify(&watch_list->notifiers, (void *)token);
-
- free(v);
-}
-
-static XenWatchList *watch_list_create(struct xs_handle *xsh)
-{
- XenWatchList *watch_list = g_new0(XenWatchList, 1);
-
- g_assert(xsh);
-
- watch_list->xsh = xsh;
- notifier_list_init(&watch_list->notifiers);
- qemu_set_fd_handler(xs_fileno(watch_list->xsh), watch_list_event, NULL,
- watch_list);
-
- return watch_list;
-}
-
-static void watch_list_destroy(XenWatchList *watch_list)
-{
- g_assert(notifier_list_empty(&watch_list->notifiers));
- qemu_set_fd_handler(xs_fileno(watch_list->xsh), NULL, NULL, NULL);
- g_free(watch_list);
-}
-
-static XenWatch *watch_list_add(XenWatchList *watch_list, const char *node,
- const char *key, XenWatchHandler handler,
- void *opaque, Error **errp)
-{
- ERRP_GUARD();
- XenWatch *watch = new_watch(node, key, handler, opaque);
-
- notifier_list_add(&watch_list->notifiers, &watch->notifier);
-
- xs_node_watch(watch_list->xsh, node, key, watch->token, errp);
- if (*errp) {
- notifier_remove(&watch->notifier);
- free_watch(watch);
-
- return NULL;
- }
-
- return watch;
-}
-
-static void watch_list_remove(XenWatchList *watch_list, XenWatch *watch,
- Error **errp)
-{
- xs_node_unwatch(watch_list->xsh, watch->node, watch->key, watch->token,
- errp);
-
- notifier_remove(&watch->notifier);
- free_watch(watch);
-}
-
-static XenWatch *xen_bus_add_watch(XenBus *xenbus, const char *node,
- const char *key, XenWatchHandler handler,
- Error **errp)
-{
- trace_xen_bus_add_watch(node, key);
-
- return watch_list_add(xenbus->watch_list, node, key, handler, xenbus,
- errp);
-}
-
-static void xen_bus_remove_watch(XenBus *xenbus, XenWatch *watch,
- Error **errp)
-{
- trace_xen_bus_remove_watch(watch->node, watch->key);
-
- watch_list_remove(xenbus->watch_list, watch, errp);
-}
-
static void xen_bus_backend_create(XenBus *xenbus, const char *type,
const char *name, char *path,
Error **errp)
@@ -261,15 +124,15 @@
trace_xen_bus_backend_create(type, path);
again:
- tid = xs_transaction_start(xenbus->xsh);
+ tid = qemu_xen_xs_transaction_start(xenbus->xsh);
if (tid == XBT_NULL) {
error_setg(errp, "failed xs_transaction_start");
return;
}
- key = xs_directory(xenbus->xsh, tid, path, &n);
+ key = qemu_xen_xs_directory(xenbus->xsh, tid, path, &n);
if (!key) {
- if (!xs_transaction_end(xenbus->xsh, tid, true)) {
+ if (!qemu_xen_xs_transaction_end(xenbus->xsh, tid, true)) {
error_setg_errno(errp, errno, "failed xs_transaction_end");
}
return;
@@ -300,7 +163,7 @@
free(key);
- if (!xs_transaction_end(xenbus->xsh, tid, false)) {
+ if (!qemu_xen_xs_transaction_end(xenbus->xsh, tid, false)) {
qobject_unref(opts);
if (errno == EAGAIN) {
@@ -327,7 +190,7 @@
trace_xen_bus_type_enumerate(type);
- backend = xs_directory(xenbus->xsh, XBT_NULL, domain_path, &n);
+ backend = qemu_xen_xs_directory(xenbus->xsh, XBT_NULL, domain_path, &n);
if (!backend) {
goto out;
}
@@ -372,7 +235,7 @@
trace_xen_bus_enumerate();
- type = xs_directory(xenbus->xsh, XBT_NULL, "backend", &n);
+ type = qemu_xen_xs_directory(xenbus->xsh, XBT_NULL, "backend", &n);
if (!type) {
return;
}
@@ -415,7 +278,7 @@
}
}
-static void xen_bus_backend_changed(void *opaque)
+static void xen_bus_backend_changed(void *opaque, const char *path)
{
XenBus *xenbus = opaque;
@@ -434,7 +297,7 @@
for (i = 0; i < xenbus->backend_types; i++) {
if (xenbus->backend_watch[i]) {
- xen_bus_remove_watch(xenbus, xenbus->backend_watch[i], NULL);
+ xs_node_unwatch(xenbus->xsh, xenbus->backend_watch[i]);
}
}
@@ -442,13 +305,8 @@
xenbus->backend_watch = NULL;
}
- if (xenbus->watch_list) {
- watch_list_destroy(xenbus->watch_list);
- xenbus->watch_list = NULL;
- }
-
if (xenbus->xsh) {
- xs_close(xenbus->xsh);
+ qemu_xen_xs_close(xenbus->xsh);
}
}
@@ -463,7 +321,7 @@
trace_xen_bus_realize();
- xenbus->xsh = xs_open(0);
+ xenbus->xsh = qemu_xen_xs_open();
if (!xenbus->xsh) {
error_setg_errno(errp, errno, "failed xs_open");
goto fail;
@@ -476,19 +334,18 @@
xenbus->backend_id = 0; /* Assume lack of node means dom0 */
}
- xenbus->watch_list = watch_list_create(xenbus->xsh);
-
module_call_init(MODULE_INIT_XEN_BACKEND);
type = xen_backend_get_types(&xenbus->backend_types);
- xenbus->backend_watch = g_new(XenWatch *, xenbus->backend_types);
+ xenbus->backend_watch = g_new(struct qemu_xs_watch *,
+ xenbus->backend_types);
for (i = 0; i < xenbus->backend_types; i++) {
char *node = g_strdup_printf("backend/%s", type[i]);
xenbus->backend_watch[i] =
- xen_bus_add_watch(xenbus, node, key, xen_bus_backend_changed,
- &local_err);
+ xs_node_watch(xenbus->xsh, node, key, xen_bus_backend_changed,
+ xenbus, &local_err);
if (local_err) {
/* This need not be treated as a hard error so don't propagate */
error_reportf_err(local_err,
@@ -631,7 +488,7 @@
}
}
-static void xen_device_backend_changed(void *opaque)
+static void xen_device_backend_changed(void *opaque, const char *path)
{
XenDevice *xendev = opaque;
const char *type = object_get_typename(OBJECT(xendev));
@@ -685,66 +542,35 @@
}
}
-static XenWatch *xen_device_add_watch(XenDevice *xendev, const char *node,
- const char *key,
- XenWatchHandler handler,
- Error **errp)
-{
- const char *type = object_get_typename(OBJECT(xendev));
-
- trace_xen_device_add_watch(type, xendev->name, node, key);
-
- return watch_list_add(xendev->watch_list, node, key, handler, xendev,
- errp);
-}
-
-static void xen_device_remove_watch(XenDevice *xendev, XenWatch *watch,
- Error **errp)
-{
- const char *type = object_get_typename(OBJECT(xendev));
-
- trace_xen_device_remove_watch(type, xendev->name, watch->node,
- watch->key);
-
- watch_list_remove(xendev->watch_list, watch, errp);
-}
-
-
static void xen_device_backend_create(XenDevice *xendev, Error **errp)
{
ERRP_GUARD();
XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev)));
- struct xs_permissions perms[2];
xendev->backend_path = xen_device_get_backend_path(xendev);
- perms[0].id = xenbus->backend_id;
- perms[0].perms = XS_PERM_NONE;
- perms[1].id = xendev->frontend_id;
- perms[1].perms = XS_PERM_READ;
-
g_assert(xenbus->xsh);
- xs_node_create(xenbus->xsh, XBT_NULL, xendev->backend_path, perms,
- ARRAY_SIZE(perms), errp);
+ xs_node_create(xenbus->xsh, XBT_NULL, xendev->backend_path,
+ xenbus->backend_id, xendev->frontend_id, XS_PERM_READ, errp);
if (*errp) {
error_prepend(errp, "failed to create backend: ");
return;
}
xendev->backend_state_watch =
- xen_device_add_watch(xendev, xendev->backend_path,
- "state", xen_device_backend_changed,
- errp);
+ xs_node_watch(xendev->xsh, xendev->backend_path,
+ "state", xen_device_backend_changed, xendev,
+ errp);
if (*errp) {
error_prepend(errp, "failed to watch backend state: ");
return;
}
xendev->backend_online_watch =
- xen_device_add_watch(xendev, xendev->backend_path,
- "online", xen_device_backend_changed,
- errp);
+ xs_node_watch(xendev->xsh, xendev->backend_path,
+ "online", xen_device_backend_changed, xendev,
+ errp);
if (*errp) {
error_prepend(errp, "failed to watch backend online: ");
return;
@@ -757,12 +583,12 @@
Error *local_err = NULL;
if (xendev->backend_online_watch) {
- xen_device_remove_watch(xendev, xendev->backend_online_watch, NULL);
+ xs_node_unwatch(xendev->xsh, xendev->backend_online_watch);
xendev->backend_online_watch = NULL;
}
if (xendev->backend_state_watch) {
- xen_device_remove_watch(xendev, xendev->backend_state_watch, NULL);
+ xs_node_unwatch(xendev->xsh, xendev->backend_state_watch);
xendev->backend_state_watch = NULL;
}
@@ -837,7 +663,7 @@
}
}
-static void xen_device_frontend_changed(void *opaque)
+static void xen_device_frontend_changed(void *opaque, const char *path)
{
XenDevice *xendev = opaque;
XenDeviceClass *xendev_class = XEN_DEVICE_GET_CLASS(xendev);
@@ -885,7 +711,6 @@
{
ERRP_GUARD();
XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev)));
- struct xs_permissions perms[2];
xendev->frontend_path = xen_device_get_frontend_path(xendev);
@@ -894,15 +719,11 @@
* toolstack.
*/
if (!xen_device_frontend_exists(xendev)) {
- perms[0].id = xendev->frontend_id;
- perms[0].perms = XS_PERM_NONE;
- perms[1].id = xenbus->backend_id;
- perms[1].perms = XS_PERM_READ | XS_PERM_WRITE;
-
g_assert(xenbus->xsh);
- xs_node_create(xenbus->xsh, XBT_NULL, xendev->frontend_path, perms,
- ARRAY_SIZE(perms), errp);
+ xs_node_create(xenbus->xsh, XBT_NULL, xendev->frontend_path,
+ xendev->frontend_id, xenbus->backend_id,
+ XS_PERM_READ | XS_PERM_WRITE, errp);
if (*errp) {
error_prepend(errp, "failed to create frontend: ");
return;
@@ -910,8 +731,8 @@
}
xendev->frontend_state_watch =
- xen_device_add_watch(xendev, xendev->frontend_path, "state",
- xen_device_frontend_changed, errp);
+ xs_node_watch(xendev->xsh, xendev->frontend_path, "state",
+ xen_device_frontend_changed, xendev, errp);
if (*errp) {
error_prepend(errp, "failed to watch frontend state: ");
}
@@ -923,8 +744,7 @@
Error *local_err = NULL;
if (xendev->frontend_state_watch) {
- xen_device_remove_watch(xendev, xendev->frontend_state_watch,
- NULL);
+ xs_node_unwatch(xendev->xsh, xendev->frontend_state_watch);
xendev->frontend_state_watch = NULL;
}
@@ -947,7 +767,7 @@
void xen_device_set_max_grant_refs(XenDevice *xendev, unsigned int nr_refs,
Error **errp)
{
- if (xengnttab_set_max_grants(xendev->xgth, nr_refs)) {
+ if (qemu_xen_gnttab_set_max_grants(xendev->xgth, nr_refs)) {
error_setg_errno(errp, errno, "xengnttab_set_max_grants failed");
}
}
@@ -956,9 +776,8 @@
unsigned int nr_refs, int prot,
Error **errp)
{
- void *map = xengnttab_map_domain_grant_refs(xendev->xgth, nr_refs,
- xendev->frontend_id, refs,
- prot);
+ void *map = qemu_xen_gnttab_map_refs(xendev->xgth, nr_refs,
+ xendev->frontend_id, refs, prot);
if (!map) {
error_setg_errno(errp, errno,
@@ -968,112 +787,20 @@
return map;
}
-void xen_device_unmap_grant_refs(XenDevice *xendev, void *map,
+void xen_device_unmap_grant_refs(XenDevice *xendev, void *map, uint32_t *refs,
unsigned int nr_refs, Error **errp)
{
- if (xengnttab_unmap(xendev->xgth, map, nr_refs)) {
+ if (qemu_xen_gnttab_unmap(xendev->xgth, map, refs, nr_refs)) {
error_setg_errno(errp, errno, "xengnttab_unmap failed");
}
}
-static void compat_copy_grant_refs(XenDevice *xendev, bool to_domain,
- XenDeviceGrantCopySegment segs[],
- unsigned int nr_segs, Error **errp)
-{
- uint32_t *refs = g_new(uint32_t, nr_segs);
- int prot = to_domain ? PROT_WRITE : PROT_READ;
- void *map;
- unsigned int i;
-
- for (i = 0; i < nr_segs; i++) {
- XenDeviceGrantCopySegment *seg = &segs[i];
-
- refs[i] = to_domain ? seg->dest.foreign.ref :
- seg->source.foreign.ref;
- }
-
- map = xengnttab_map_domain_grant_refs(xendev->xgth, nr_segs,
- xendev->frontend_id, refs,
- prot);
- if (!map) {
- error_setg_errno(errp, errno,
- "xengnttab_map_domain_grant_refs failed");
- goto done;
- }
-
- for (i = 0; i < nr_segs; i++) {
- XenDeviceGrantCopySegment *seg = &segs[i];
- void *page = map + (i * XC_PAGE_SIZE);
-
- if (to_domain) {
- memcpy(page + seg->dest.foreign.offset, seg->source.virt,
- seg->len);
- } else {
- memcpy(seg->dest.virt, page + seg->source.foreign.offset,
- seg->len);
- }
- }
-
- if (xengnttab_unmap(xendev->xgth, map, nr_segs)) {
- error_setg_errno(errp, errno, "xengnttab_unmap failed");
- }
-
-done:
- g_free(refs);
-}
-
void xen_device_copy_grant_refs(XenDevice *xendev, bool to_domain,
XenDeviceGrantCopySegment segs[],
unsigned int nr_segs, Error **errp)
{
- xengnttab_grant_copy_segment_t *xengnttab_segs;
- unsigned int i;
-
- if (!xendev->feature_grant_copy) {
- compat_copy_grant_refs(xendev, to_domain, segs, nr_segs, errp);
- return;
- }
-
- xengnttab_segs = g_new0(xengnttab_grant_copy_segment_t, nr_segs);
-
- for (i = 0; i < nr_segs; i++) {
- XenDeviceGrantCopySegment *seg = &segs[i];
- xengnttab_grant_copy_segment_t *xengnttab_seg = &xengnttab_segs[i];
-
- if (to_domain) {
- xengnttab_seg->flags = GNTCOPY_dest_gref;
- xengnttab_seg->dest.foreign.domid = xendev->frontend_id;
- xengnttab_seg->dest.foreign.ref = seg->dest.foreign.ref;
- xengnttab_seg->dest.foreign.offset = seg->dest.foreign.offset;
- xengnttab_seg->source.virt = seg->source.virt;
- } else {
- xengnttab_seg->flags = GNTCOPY_source_gref;
- xengnttab_seg->source.foreign.domid = xendev->frontend_id;
- xengnttab_seg->source.foreign.ref = seg->source.foreign.ref;
- xengnttab_seg->source.foreign.offset =
- seg->source.foreign.offset;
- xengnttab_seg->dest.virt = seg->dest.virt;
- }
-
- xengnttab_seg->len = seg->len;
- }
-
- if (xengnttab_grant_copy(xendev->xgth, nr_segs, xengnttab_segs)) {
- error_setg_errno(errp, errno, "xengnttab_grant_copy failed");
- goto done;
- }
-
- for (i = 0; i < nr_segs; i++) {
- xengnttab_grant_copy_segment_t *xengnttab_seg = &xengnttab_segs[i];
-
- if (xengnttab_seg->status != GNTST_okay) {
- error_setg(errp, "xengnttab_grant_copy seg[%u] failed", i);
- break;
- }
- }
-
-done:
- g_free(xengnttab_segs);
+ qemu_xen_gnttab_grant_copy(xendev->xgth, to_domain, xendev->frontend_id,
+ (XenGrantCopySegment *)segs, nr_segs, errp);
}
struct XenEventChannel {
@@ -1095,12 +822,12 @@
static void xen_device_event(void *opaque)
{
XenEventChannel *channel = opaque;
- unsigned long port = xenevtchn_pending(channel->xeh);
+ unsigned long port = qemu_xen_evtchn_pending(channel->xeh);
if (port == channel->local_port) {
xen_device_poll(channel);
- xenevtchn_unmask(channel->xeh, port);
+ qemu_xen_evtchn_unmask(channel->xeh, port);
}
}
@@ -1115,11 +842,11 @@
}
if (channel->ctx)
- aio_set_fd_handler(channel->ctx, xenevtchn_fd(channel->xeh), true,
+ aio_set_fd_handler(channel->ctx, qemu_xen_evtchn_fd(channel->xeh), true,
NULL, NULL, NULL, NULL, NULL);
channel->ctx = ctx;
- aio_set_fd_handler(channel->ctx, xenevtchn_fd(channel->xeh), true,
+ aio_set_fd_handler(channel->ctx, qemu_xen_evtchn_fd(channel->xeh), true,
xen_device_event, NULL, xen_device_poll, NULL, channel);
}
@@ -1131,13 +858,13 @@
XenEventChannel *channel = g_new0(XenEventChannel, 1);
xenevtchn_port_or_error_t local_port;
- channel->xeh = xenevtchn_open(NULL, 0);
+ channel->xeh = qemu_xen_evtchn_open();
if (!channel->xeh) {
error_setg_errno(errp, errno, "failed xenevtchn_open");
goto fail;
}
- local_port = xenevtchn_bind_interdomain(channel->xeh,
+ local_port = qemu_xen_evtchn_bind_interdomain(channel->xeh,
xendev->frontend_id,
port);
if (local_port < 0) {
@@ -1160,7 +887,7 @@
fail:
if (channel->xeh) {
- xenevtchn_close(channel->xeh);
+ qemu_xen_evtchn_close(channel->xeh);
}
g_free(channel);
@@ -1177,7 +904,7 @@
return;
}
- if (xenevtchn_notify(channel->xeh, channel->local_port) < 0) {
+ if (qemu_xen_evtchn_notify(channel->xeh, channel->local_port) < 0) {
error_setg_errno(errp, errno, "xenevtchn_notify failed");
}
}
@@ -1193,14 +920,14 @@
QLIST_REMOVE(channel, list);
- aio_set_fd_handler(channel->ctx, xenevtchn_fd(channel->xeh), true,
+ aio_set_fd_handler(channel->ctx, qemu_xen_evtchn_fd(channel->xeh), true,
NULL, NULL, NULL, NULL, NULL);
- if (xenevtchn_unbind(channel->xeh, channel->local_port) < 0) {
+ if (qemu_xen_evtchn_unbind(channel->xeh, channel->local_port) < 0) {
error_setg_errno(errp, errno, "xenevtchn_unbind failed");
}
- xenevtchn_close(channel->xeh);
+ qemu_xen_evtchn_close(channel->xeh);
g_free(channel);
}
@@ -1235,17 +962,12 @@
xen_device_backend_destroy(xendev);
if (xendev->xgth) {
- xengnttab_close(xendev->xgth);
+ qemu_xen_gnttab_close(xendev->xgth);
xendev->xgth = NULL;
}
- if (xendev->watch_list) {
- watch_list_destroy(xendev->watch_list);
- xendev->watch_list = NULL;
- }
-
if (xendev->xsh) {
- xs_close(xendev->xsh);
+ qemu_xen_xs_close(xendev->xsh);
xendev->xsh = NULL;
}
@@ -1290,23 +1012,18 @@
trace_xen_device_realize(type, xendev->name);
- xendev->xsh = xs_open(0);
+ xendev->xsh = qemu_xen_xs_open();
if (!xendev->xsh) {
error_setg_errno(errp, errno, "failed xs_open");
goto unrealize;
}
- xendev->watch_list = watch_list_create(xendev->xsh);
-
- xendev->xgth = xengnttab_open(NULL, 0);
+ xendev->xgth = qemu_xen_gnttab_open();
if (!xendev->xgth) {
error_setg_errno(errp, errno, "failed xengnttab_open");
goto unrealize;
}
- xendev->feature_grant_copy =
- (xengnttab_grant_copy(xendev->xgth, 0, NULL) == 0);
-
xen_device_backend_create(xendev, errp);
if (*errp) {
goto unrealize;
@@ -1317,13 +1034,6 @@
goto unrealize;
}
- if (xendev_class->realize) {
- xendev_class->realize(xendev, errp);
- if (*errp) {
- goto unrealize;
- }
- }
-
xen_device_backend_printf(xendev, "frontend", "%s",
xendev->frontend_path);
xen_device_backend_printf(xendev, "frontend-id", "%u",
@@ -1342,6 +1052,13 @@
xen_device_frontend_set_state(xendev, XenbusStateInitialising, true);
}
+ if (xendev_class->realize) {
+ xendev_class->realize(xendev, errp);
+ if (*errp) {
+ goto unrealize;
+ }
+ }
+
xendev->exit.notify = xen_device_exit;
qemu_add_exit_notifier(&xendev->exit);
return;
diff --git a/hw/xen/xen-legacy-backend.c b/hw/xen/xen-legacy-backend.c
index afba71f..4ded3ce 100644
--- a/hw/xen/xen-legacy-backend.c
+++ b/hw/xen/xen-legacy-backend.c
@@ -39,11 +39,10 @@
/* ------------------------------------------------------------- */
/* public */
-struct xs_handle *xenstore;
+struct qemu_xs_handle *xenstore;
const char *xen_protocol;
/* private */
-static bool xen_feature_grant_copy;
static int debug;
int xenstore_write_be_str(struct XenLegacyDevice *xendev, const char *node,
@@ -113,7 +112,7 @@
{
assert(xendev->ops->flags & DEVOPS_FLAG_NEED_GNTDEV);
- if (xengnttab_set_max_grants(xendev->gnttabdev, nr_refs)) {
+ if (qemu_xen_gnttab_set_max_grants(xendev->gnttabdev, nr_refs)) {
xen_pv_printf(xendev, 0, "xengnttab_set_max_grants failed: %s\n",
strerror(errno));
}
@@ -126,8 +125,8 @@
assert(xendev->ops->flags & DEVOPS_FLAG_NEED_GNTDEV);
- ptr = xengnttab_map_domain_grant_refs(xendev->gnttabdev, nr_refs,
- xen_domid, refs, prot);
+ ptr = qemu_xen_gnttab_map_refs(xendev->gnttabdev, nr_refs, xen_domid, refs,
+ prot);
if (!ptr) {
xen_pv_printf(xendev, 0,
"xengnttab_map_domain_grant_refs failed: %s\n",
@@ -138,123 +137,31 @@
}
void xen_be_unmap_grant_refs(struct XenLegacyDevice *xendev, void *ptr,
- unsigned int nr_refs)
+ uint32_t *refs, unsigned int nr_refs)
{
assert(xendev->ops->flags & DEVOPS_FLAG_NEED_GNTDEV);
- if (xengnttab_unmap(xendev->gnttabdev, ptr, nr_refs)) {
+ if (qemu_xen_gnttab_unmap(xendev->gnttabdev, ptr, refs, nr_refs)) {
xen_pv_printf(xendev, 0, "xengnttab_unmap failed: %s\n",
strerror(errno));
}
}
-static int compat_copy_grant_refs(struct XenLegacyDevice *xendev,
- bool to_domain,
- XenGrantCopySegment segs[],
- unsigned int nr_segs)
-{
- uint32_t *refs = g_new(uint32_t, nr_segs);
- int prot = to_domain ? PROT_WRITE : PROT_READ;
- void *pages;
- unsigned int i;
-
- for (i = 0; i < nr_segs; i++) {
- XenGrantCopySegment *seg = &segs[i];
-
- refs[i] = to_domain ?
- seg->dest.foreign.ref : seg->source.foreign.ref;
- }
-
- pages = xengnttab_map_domain_grant_refs(xendev->gnttabdev, nr_segs,
- xen_domid, refs, prot);
- if (!pages) {
- xen_pv_printf(xendev, 0,
- "xengnttab_map_domain_grant_refs failed: %s\n",
- strerror(errno));
- g_free(refs);
- return -1;
- }
-
- for (i = 0; i < nr_segs; i++) {
- XenGrantCopySegment *seg = &segs[i];
- void *page = pages + (i * XC_PAGE_SIZE);
-
- if (to_domain) {
- memcpy(page + seg->dest.foreign.offset, seg->source.virt,
- seg->len);
- } else {
- memcpy(seg->dest.virt, page + seg->source.foreign.offset,
- seg->len);
- }
- }
-
- if (xengnttab_unmap(xendev->gnttabdev, pages, nr_segs)) {
- xen_pv_printf(xendev, 0, "xengnttab_unmap failed: %s\n",
- strerror(errno));
- }
-
- g_free(refs);
- return 0;
-}
-
int xen_be_copy_grant_refs(struct XenLegacyDevice *xendev,
bool to_domain,
XenGrantCopySegment segs[],
unsigned int nr_segs)
{
- xengnttab_grant_copy_segment_t *xengnttab_segs;
- unsigned int i;
int rc;
assert(xendev->ops->flags & DEVOPS_FLAG_NEED_GNTDEV);
- if (!xen_feature_grant_copy) {
- return compat_copy_grant_refs(xendev, to_domain, segs, nr_segs);
- }
-
- xengnttab_segs = g_new0(xengnttab_grant_copy_segment_t, nr_segs);
-
- for (i = 0; i < nr_segs; i++) {
- XenGrantCopySegment *seg = &segs[i];
- xengnttab_grant_copy_segment_t *xengnttab_seg = &xengnttab_segs[i];
-
- if (to_domain) {
- xengnttab_seg->flags = GNTCOPY_dest_gref;
- xengnttab_seg->dest.foreign.domid = xen_domid;
- xengnttab_seg->dest.foreign.ref = seg->dest.foreign.ref;
- xengnttab_seg->dest.foreign.offset = seg->dest.foreign.offset;
- xengnttab_seg->source.virt = seg->source.virt;
- } else {
- xengnttab_seg->flags = GNTCOPY_source_gref;
- xengnttab_seg->source.foreign.domid = xen_domid;
- xengnttab_seg->source.foreign.ref = seg->source.foreign.ref;
- xengnttab_seg->source.foreign.offset =
- seg->source.foreign.offset;
- xengnttab_seg->dest.virt = seg->dest.virt;
- }
-
- xengnttab_seg->len = seg->len;
- }
-
- rc = xengnttab_grant_copy(xendev->gnttabdev, nr_segs, xengnttab_segs);
-
+ rc = qemu_xen_gnttab_grant_copy(xendev->gnttabdev, to_domain, xen_domid,
+ segs, nr_segs, NULL);
if (rc) {
- xen_pv_printf(xendev, 0, "xengnttab_copy failed: %s\n",
- strerror(errno));
+ xen_pv_printf(xendev, 0, "xengnttab_grant_copy failed: %s\n",
+ strerror(-rc));
}
-
- for (i = 0; i < nr_segs; i++) {
- xengnttab_grant_copy_segment_t *xengnttab_seg =
- &xengnttab_segs[i];
-
- if (xengnttab_seg->status != GNTST_okay) {
- xen_pv_printf(xendev, 0, "segment[%u] status: %d\n", i,
- xengnttab_seg->status);
- rc = -1;
- }
- }
-
- g_free(xengnttab_segs);
return rc;
}
@@ -294,13 +201,13 @@
xendev->debug = debug;
xendev->local_port = -1;
- xendev->evtchndev = xenevtchn_open(NULL, 0);
+ xendev->evtchndev = qemu_xen_evtchn_open();
if (xendev->evtchndev == NULL) {
xen_pv_printf(NULL, 0, "can't open evtchn device\n");
qdev_unplug(DEVICE(xendev), NULL);
return NULL;
}
- qemu_set_cloexec(xenevtchn_fd(xendev->evtchndev));
+ qemu_set_cloexec(qemu_xen_evtchn_fd(xendev->evtchndev));
xen_pv_insert_xendev(xendev);
@@ -367,6 +274,25 @@
}
}
+static void xenstore_update_fe(void *opaque, const char *watch)
+{
+ struct XenLegacyDevice *xendev = opaque;
+ const char *node;
+ unsigned int len;
+
+ len = strlen(xendev->fe);
+ if (strncmp(xendev->fe, watch, len) != 0) {
+ return;
+ }
+ if (watch[len] != '/') {
+ return;
+ }
+ node = watch + len + 1;
+
+ xen_be_frontend_changed(xendev, node);
+ xen_be_check_state(xendev);
+}
+
/* ------------------------------------------------------------- */
/* Check for possible state transitions and perform them. */
@@ -380,7 +306,6 @@
*/
static int xen_be_try_setup(struct XenLegacyDevice *xendev)
{
- char token[XEN_BUFSIZE];
int be_state;
if (xenstore_read_be_int(xendev, "state", &be_state) == -1) {
@@ -401,8 +326,9 @@
}
/* setup frontend watch */
- snprintf(token, sizeof(token), "fe:%p", xendev);
- if (!xs_watch(xenstore, xendev->fe, token)) {
+ xendev->watch = qemu_xen_xs_watch(xenstore, xendev->fe, xenstore_update_fe,
+ xendev);
+ if (!xendev->watch) {
xen_pv_printf(xendev, 0, "watching frontend path (%s) failed\n",
xendev->fe);
return -1;
@@ -466,7 +392,7 @@
}
if (xendev->ops->flags & DEVOPS_FLAG_NEED_GNTDEV) {
- xendev->gnttabdev = xengnttab_open(NULL, 0);
+ xendev->gnttabdev = qemu_xen_gnttab_open();
if (xendev->gnttabdev == NULL) {
xen_pv_printf(NULL, 0, "can't open gnttab device\n");
return -1;
@@ -524,7 +450,7 @@
xendev->ops->disconnect(xendev);
}
if (xendev->gnttabdev) {
- xengnttab_close(xendev->gnttabdev);
+ qemu_xen_gnttab_close(xendev->gnttabdev);
xendev->gnttabdev = NULL;
}
if (xendev->be_state != state) {
@@ -591,24 +517,67 @@
/* ------------------------------------------------------------- */
+struct xenstore_be {
+ const char *type;
+ int dom;
+ struct XenDevOps *ops;
+};
+
+static void xenstore_update_be(void *opaque, const char *watch)
+{
+ struct xenstore_be *be = opaque;
+ struct XenLegacyDevice *xendev;
+ char path[XEN_BUFSIZE], *bepath;
+ unsigned int len, dev;
+
+ len = snprintf(path, sizeof(path), "backend/%s/%d", be->type, be->dom);
+ if (strncmp(path, watch, len) != 0) {
+ return;
+ }
+ if (sscanf(watch + len, "/%u/%255s", &dev, path) != 2) {
+ strcpy(path, "");
+ if (sscanf(watch + len, "/%u", &dev) != 1) {
+ dev = -1;
+ }
+ }
+ if (dev == -1) {
+ return;
+ }
+
+ xendev = xen_be_get_xendev(be->type, be->dom, dev, be->ops);
+ if (xendev != NULL) {
+ bepath = qemu_xen_xs_read(xenstore, 0, xendev->be, &len);
+ if (bepath == NULL) {
+ xen_pv_del_xendev(xendev);
+ } else {
+ free(bepath);
+ xen_be_backend_changed(xendev, path);
+ xen_be_check_state(xendev);
+ }
+ }
+}
+
static int xenstore_scan(const char *type, int dom, struct XenDevOps *ops)
{
struct XenLegacyDevice *xendev;
- char path[XEN_BUFSIZE], token[XEN_BUFSIZE];
+ char path[XEN_BUFSIZE];
+ struct xenstore_be *be = g_new0(struct xenstore_be, 1);
char **dev = NULL;
unsigned int cdev, j;
/* setup watch */
- snprintf(token, sizeof(token), "be:%p:%d:%p", type, dom, ops);
+ be->type = type;
+ be->dom = dom;
+ be->ops = ops;
snprintf(path, sizeof(path), "backend/%s/%d", type, dom);
- if (!xs_watch(xenstore, path, token)) {
+ if (!qemu_xen_xs_watch(xenstore, path, xenstore_update_be, be)) {
xen_pv_printf(NULL, 0, "xen be: watching backend path (%s) failed\n",
path);
return -1;
}
/* look for backends */
- dev = xs_directory(xenstore, 0, path, &cdev);
+ dev = qemu_xen_xs_directory(xenstore, 0, path, &cdev);
if (!dev) {
return 0;
}
@@ -623,57 +592,6 @@
return 0;
}
-void xenstore_update_be(char *watch, char *type, int dom,
- struct XenDevOps *ops)
-{
- struct XenLegacyDevice *xendev;
- char path[XEN_BUFSIZE], *bepath;
- unsigned int len, dev;
-
- len = snprintf(path, sizeof(path), "backend/%s/%d", type, dom);
- if (strncmp(path, watch, len) != 0) {
- return;
- }
- if (sscanf(watch + len, "/%u/%255s", &dev, path) != 2) {
- strcpy(path, "");
- if (sscanf(watch + len, "/%u", &dev) != 1) {
- dev = -1;
- }
- }
- if (dev == -1) {
- return;
- }
-
- xendev = xen_be_get_xendev(type, dom, dev, ops);
- if (xendev != NULL) {
- bepath = xs_read(xenstore, 0, xendev->be, &len);
- if (bepath == NULL) {
- xen_pv_del_xendev(xendev);
- } else {
- free(bepath);
- xen_be_backend_changed(xendev, path);
- xen_be_check_state(xendev);
- }
- }
-}
-
-void xenstore_update_fe(char *watch, struct XenLegacyDevice *xendev)
-{
- char *node;
- unsigned int len;
-
- len = strlen(xendev->fe);
- if (strncmp(xendev->fe, watch, len) != 0) {
- return;
- }
- if (watch[len] != '/') {
- return;
- }
- node = watch + len + 1;
-
- xen_be_frontend_changed(xendev, node);
- xen_be_check_state(xendev);
-}
/* -------------------------------------------------------------------- */
static void xen_set_dynamic_sysbus(void)
@@ -687,29 +605,17 @@
void xen_be_init(void)
{
- xengnttab_handle *gnttabdev;
-
- xenstore = xs_daemon_open();
+ xenstore = qemu_xen_xs_open();
if (!xenstore) {
xen_pv_printf(NULL, 0, "can't connect to xenstored\n");
exit(1);
}
- qemu_set_fd_handler(xs_fileno(xenstore), xenstore_update, NULL, NULL);
-
- if (xen_xc == NULL || xen_fmem == NULL) {
+ if (xen_evtchn_ops == NULL || xen_gnttab_ops == NULL) {
xen_pv_printf(NULL, 0, "Xen operations not set up\n");
exit(1);
}
- gnttabdev = xengnttab_open(NULL, 0);
- if (gnttabdev != NULL) {
- if (xengnttab_grant_copy(gnttabdev, 0, NULL) == 0) {
- xen_feature_grant_copy = true;
- }
- xengnttab_close(gnttabdev);
- }
-
xen_sysdev = qdev_new(TYPE_XENSYSDEV);
sysbus_realize_and_unref(SYS_BUS_DEVICE(xen_sysdev), &error_fatal);
xen_sysbus = qbus_new(TYPE_XENSYSBUS, xen_sysdev, "xen-sysbus");
@@ -751,14 +657,14 @@
if (xendev->local_port != -1) {
return 0;
}
- xendev->local_port = xenevtchn_bind_interdomain
+ xendev->local_port = qemu_xen_evtchn_bind_interdomain
(xendev->evtchndev, xendev->dom, xendev->remote_port);
if (xendev->local_port == -1) {
xen_pv_printf(xendev, 0, "xenevtchn_bind_interdomain failed\n");
return -1;
}
xen_pv_printf(xendev, 2, "bind evtchn port %d\n", xendev->local_port);
- qemu_set_fd_handler(xenevtchn_fd(xendev->evtchndev),
+ qemu_set_fd_handler(qemu_xen_evtchn_fd(xendev->evtchndev),
xen_pv_evtchn_event, NULL, xendev);
return 0;
}
diff --git a/hw/xen/xen-operations.c b/hw/xen/xen-operations.c
new file mode 100644
index 0000000..4b78fbf
--- /dev/null
+++ b/hw/xen/xen-operations.c
@@ -0,0 +1,478 @@
+/*
+ * QEMU Xen backend support: Operations for true Xen
+ *
+ * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Authors: David Woodhouse <dwmw2@infradead.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/uuid.h"
+#include "qapi/error.h"
+
+#include "hw/xen/xen_native.h"
+#include "hw/xen/xen_backend_ops.h"
+
+/*
+ * If we have new enough libxenctrl then we do not want/need these compat
+ * interfaces, despite what the user supplied cflags might say. They
+ * must be undefined before including xenctrl.h
+ */
+#undef XC_WANT_COMPAT_EVTCHN_API
+#undef XC_WANT_COMPAT_GNTTAB_API
+#undef XC_WANT_COMPAT_MAP_FOREIGN_API
+
+#include <xenctrl.h>
+
+/*
+ * We don't support Xen prior to 4.2.0.
+ */
+
+/* Xen 4.2 through 4.6 */
+#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701
+
+typedef xc_evtchn xenevtchn_handle;
+typedef evtchn_port_or_error_t xenevtchn_port_or_error_t;
+
+#define xenevtchn_open(l, f) xc_evtchn_open(l, f);
+#define xenevtchn_close(h) xc_evtchn_close(h)
+#define xenevtchn_fd(h) xc_evtchn_fd(h)
+#define xenevtchn_pending(h) xc_evtchn_pending(h)
+#define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
+#define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
+#define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
+#define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
+
+typedef xc_gnttab xengnttab_handle;
+
+#define xengnttab_open(l, f) xc_gnttab_open(l, f)
+#define xengnttab_close(h) xc_gnttab_close(h)
+#define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
+#define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
+#define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
+#define xengnttab_map_grant_refs(h, c, d, r, p) \
+ xc_gnttab_map_grant_refs(h, c, d, r, p)
+#define xengnttab_map_domain_grant_refs(h, c, d, r, p) \
+ xc_gnttab_map_domain_grant_refs(h, c, d, r, p)
+
+typedef xc_interface xenforeignmemory_handle;
+
+#else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */
+
+#include <xenevtchn.h>
+#include <xengnttab.h>
+#include <xenforeignmemory.h>
+
+#endif
+
+/* Xen before 4.8 */
+
+static int libxengnttab_fallback_grant_copy(xengnttab_handle *xgt,
+ bool to_domain, uint32_t domid,
+ XenGrantCopySegment segs[],
+ unsigned int nr_segs, Error **errp)
+{
+ uint32_t *refs = g_new(uint32_t, nr_segs);
+ int prot = to_domain ? PROT_WRITE : PROT_READ;
+ void *map;
+ unsigned int i;
+ int rc = 0;
+
+ for (i = 0; i < nr_segs; i++) {
+ XenGrantCopySegment *seg = &segs[i];
+
+ refs[i] = to_domain ? seg->dest.foreign.ref :
+ seg->source.foreign.ref;
+ }
+ map = xengnttab_map_domain_grant_refs(xgt, nr_segs, domid, refs, prot);
+ if (!map) {
+ if (errp) {
+ error_setg_errno(errp, errno,
+ "xengnttab_map_domain_grant_refs failed");
+ }
+ rc = -errno;
+ goto done;
+ }
+
+ for (i = 0; i < nr_segs; i++) {
+ XenGrantCopySegment *seg = &segs[i];
+ void *page = map + (i * XEN_PAGE_SIZE);
+
+ if (to_domain) {
+ memcpy(page + seg->dest.foreign.offset, seg->source.virt,
+ seg->len);
+ } else {
+ memcpy(seg->dest.virt, page + seg->source.foreign.offset,
+ seg->len);
+ }
+ }
+
+ if (xengnttab_unmap(xgt, map, nr_segs)) {
+ if (errp) {
+ error_setg_errno(errp, errno, "xengnttab_unmap failed");
+ }
+ rc = -errno;
+ }
+
+done:
+ g_free(refs);
+ return rc;
+}
+
+#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40800
+
+static int libxengnttab_backend_grant_copy(xengnttab_handle *xgt,
+ bool to_domain, uint32_t domid,
+ XenGrantCopySegment *segs,
+ uint32_t nr_segs, Error **errp)
+{
+ xengnttab_grant_copy_segment_t *xengnttab_segs;
+ unsigned int i;
+ int rc;
+
+ xengnttab_segs = g_new0(xengnttab_grant_copy_segment_t, nr_segs);
+
+ for (i = 0; i < nr_segs; i++) {
+ XenGrantCopySegment *seg = &segs[i];
+ xengnttab_grant_copy_segment_t *xengnttab_seg = &xengnttab_segs[i];
+
+ if (to_domain) {
+ xengnttab_seg->flags = GNTCOPY_dest_gref;
+ xengnttab_seg->dest.foreign.domid = domid;
+ xengnttab_seg->dest.foreign.ref = seg->dest.foreign.ref;
+ xengnttab_seg->dest.foreign.offset = seg->dest.foreign.offset;
+ xengnttab_seg->source.virt = seg->source.virt;
+ } else {
+ xengnttab_seg->flags = GNTCOPY_source_gref;
+ xengnttab_seg->source.foreign.domid = domid;
+ xengnttab_seg->source.foreign.ref = seg->source.foreign.ref;
+ xengnttab_seg->source.foreign.offset =
+ seg->source.foreign.offset;
+ xengnttab_seg->dest.virt = seg->dest.virt;
+ }
+
+ xengnttab_seg->len = seg->len;
+ }
+
+ if (xengnttab_grant_copy(xgt, nr_segs, xengnttab_segs)) {
+ if (errp) {
+ error_setg_errno(errp, errno, "xengnttab_grant_copy failed");
+ }
+ rc = -errno;
+ goto done;
+ }
+
+ rc = 0;
+ for (i = 0; i < nr_segs; i++) {
+ xengnttab_grant_copy_segment_t *xengnttab_seg = &xengnttab_segs[i];
+
+ if (xengnttab_seg->status != GNTST_okay) {
+ if (errp) {
+ error_setg(errp, "xengnttab_grant_copy seg[%u] failed", i);
+ }
+ rc = -EIO;
+ break;
+ }
+ }
+
+done:
+ g_free(xengnttab_segs);
+ return rc;
+}
+#endif
+
+static xenevtchn_handle *libxenevtchn_backend_open(void)
+{
+ return xenevtchn_open(NULL, 0);
+}
+
+struct evtchn_backend_ops libxenevtchn_backend_ops = {
+ .open = libxenevtchn_backend_open,
+ .close = xenevtchn_close,
+ .bind_interdomain = xenevtchn_bind_interdomain,
+ .unbind = xenevtchn_unbind,
+ .get_fd = xenevtchn_fd,
+ .notify = xenevtchn_notify,
+ .unmask = xenevtchn_unmask,
+ .pending = xenevtchn_pending,
+};
+
+static xengnttab_handle *libxengnttab_backend_open(void)
+{
+ return xengnttab_open(NULL, 0);
+}
+
+static int libxengnttab_backend_unmap(xengnttab_handle *xgt,
+ void *start_address, uint32_t *refs,
+ uint32_t count)
+{
+ return xengnttab_unmap(xgt, start_address, count);
+}
+
+
+static struct gnttab_backend_ops libxengnttab_backend_ops = {
+ .features = XEN_GNTTAB_OP_FEATURE_MAP_MULTIPLE,
+ .open = libxengnttab_backend_open,
+ .close = xengnttab_close,
+ .grant_copy = libxengnttab_fallback_grant_copy,
+ .set_max_grants = xengnttab_set_max_grants,
+ .map_refs = xengnttab_map_domain_grant_refs,
+ .unmap = libxengnttab_backend_unmap,
+};
+
+#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701
+
+static void *libxenforeignmem_backend_map(uint32_t dom, void *addr, int prot,
+ size_t pages, xfn_pfn_t *pfns,
+ int *errs)
+{
+ if (errs) {
+ return xc_map_foreign_bulk(xen_xc, dom, prot, pfns, errs, pages);
+ } else {
+ return xc_map_foreign_pages(xen_xc, dom, prot, pfns, pages);
+ }
+}
+
+static int libxenforeignmem_backend_unmap(void *addr, size_t pages)
+{
+ return munmap(addr, pages * XC_PAGE_SIZE);
+}
+
+#else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */
+
+static void *libxenforeignmem_backend_map(uint32_t dom, void *addr, int prot,
+ size_t pages, xen_pfn_t *pfns,
+ int *errs)
+{
+ return xenforeignmemory_map2(xen_fmem, dom, addr, prot, 0, pages, pfns,
+ errs);
+}
+
+static int libxenforeignmem_backend_unmap(void *addr, size_t pages)
+{
+ return xenforeignmemory_unmap(xen_fmem, addr, pages);
+}
+
+#endif
+
+struct foreignmem_backend_ops libxenforeignmem_backend_ops = {
+ .map = libxenforeignmem_backend_map,
+ .unmap = libxenforeignmem_backend_unmap,
+};
+
+struct qemu_xs_handle {
+ struct xs_handle *xsh;
+ NotifierList notifiers;
+};
+
+static void watch_event(void *opaque)
+{
+ struct qemu_xs_handle *h = opaque;
+
+ for (;;) {
+ char **v = xs_check_watch(h->xsh);
+
+ if (!v) {
+ break;
+ }
+
+ notifier_list_notify(&h->notifiers, v);
+ free(v);
+ }
+}
+
+static struct qemu_xs_handle *libxenstore_open(void)
+{
+ struct xs_handle *xsh = xs_open(0);
+ struct qemu_xs_handle *h = g_new0(struct qemu_xs_handle, 1);
+
+ if (!xsh) {
+ return NULL;
+ }
+
+ h = g_new0(struct qemu_xs_handle, 1);
+ h->xsh = xsh;
+
+ notifier_list_init(&h->notifiers);
+ qemu_set_fd_handler(xs_fileno(h->xsh), watch_event, NULL, h);
+
+ return h;
+}
+
+static void libxenstore_close(struct qemu_xs_handle *h)
+{
+ g_assert(notifier_list_empty(&h->notifiers));
+ qemu_set_fd_handler(xs_fileno(h->xsh), NULL, NULL, NULL);
+ xs_close(h->xsh);
+ g_free(h);
+}
+
+static char *libxenstore_get_domain_path(struct qemu_xs_handle *h,
+ unsigned int domid)
+{
+ return xs_get_domain_path(h->xsh, domid);
+}
+
+static char **libxenstore_directory(struct qemu_xs_handle *h,
+ xs_transaction_t t, const char *path,
+ unsigned int *num)
+{
+ return xs_directory(h->xsh, t, path, num);
+}
+
+static void *libxenstore_read(struct qemu_xs_handle *h, xs_transaction_t t,
+ const char *path, unsigned int *len)
+{
+ return xs_read(h->xsh, t, path, len);
+}
+
+static bool libxenstore_write(struct qemu_xs_handle *h, xs_transaction_t t,
+ const char *path, const void *data,
+ unsigned int len)
+{
+ return xs_write(h->xsh, t, path, data, len);
+}
+
+static bool libxenstore_create(struct qemu_xs_handle *h, xs_transaction_t t,
+ unsigned int owner, unsigned int domid,
+ unsigned int perms, const char *path)
+{
+ struct xs_permissions perms_list[] = {
+ {
+ .id = owner,
+ .perms = XS_PERM_NONE,
+ },
+ {
+ .id = domid,
+ .perms = perms,
+ },
+ };
+
+ if (!xs_mkdir(h->xsh, t, path)) {
+ return false;
+ }
+
+ return xs_set_permissions(h->xsh, t, path, perms_list,
+ ARRAY_SIZE(perms_list));
+}
+
+static bool libxenstore_destroy(struct qemu_xs_handle *h, xs_transaction_t t,
+ const char *path)
+{
+ return xs_rm(h->xsh, t, path);
+}
+
+struct qemu_xs_watch {
+ char *path;
+ char *token;
+ xs_watch_fn fn;
+ void *opaque;
+ Notifier notifier;
+};
+
+static void watch_notify(Notifier *n, void *data)
+{
+ struct qemu_xs_watch *w = container_of(n, struct qemu_xs_watch, notifier);
+ const char **v = data;
+
+ if (!strcmp(w->token, v[XS_WATCH_TOKEN])) {
+ w->fn(w->opaque, v[XS_WATCH_PATH]);
+ }
+}
+
+static struct qemu_xs_watch *new_watch(const char *path, xs_watch_fn fn,
+ void *opaque)
+{
+ struct qemu_xs_watch *w = g_new0(struct qemu_xs_watch, 1);
+ QemuUUID uuid;
+
+ qemu_uuid_generate(&uuid);
+
+ w->token = qemu_uuid_unparse_strdup(&uuid);
+ w->path = g_strdup(path);
+ w->fn = fn;
+ w->opaque = opaque;
+ w->notifier.notify = watch_notify;
+
+ return w;
+}
+
+static void free_watch(struct qemu_xs_watch *w)
+{
+ g_free(w->token);
+ g_free(w->path);
+
+ g_free(w);
+}
+
+static struct qemu_xs_watch *libxenstore_watch(struct qemu_xs_handle *h,
+ const char *path, xs_watch_fn fn,
+ void *opaque)
+{
+ struct qemu_xs_watch *w = new_watch(path, fn, opaque);
+
+ notifier_list_add(&h->notifiers, &w->notifier);
+
+ if (!xs_watch(h->xsh, path, w->token)) {
+ notifier_remove(&w->notifier);
+ free_watch(w);
+ return NULL;
+ }
+
+ return w;
+}
+
+static void libxenstore_unwatch(struct qemu_xs_handle *h,
+ struct qemu_xs_watch *w)
+{
+ xs_unwatch(h->xsh, w->path, w->token);
+ notifier_remove(&w->notifier);
+ free_watch(w);
+}
+
+static xs_transaction_t libxenstore_transaction_start(struct qemu_xs_handle *h)
+{
+ return xs_transaction_start(h->xsh);
+}
+
+static bool libxenstore_transaction_end(struct qemu_xs_handle *h,
+ xs_transaction_t t, bool abort)
+{
+ return xs_transaction_end(h->xsh, t, abort);
+}
+
+struct xenstore_backend_ops libxenstore_backend_ops = {
+ .open = libxenstore_open,
+ .close = libxenstore_close,
+ .get_domain_path = libxenstore_get_domain_path,
+ .directory = libxenstore_directory,
+ .read = libxenstore_read,
+ .write = libxenstore_write,
+ .create = libxenstore_create,
+ .destroy = libxenstore_destroy,
+ .watch = libxenstore_watch,
+ .unwatch = libxenstore_unwatch,
+ .transaction_start = libxenstore_transaction_start,
+ .transaction_end = libxenstore_transaction_end,
+};
+
+void setup_xen_backend_ops(void)
+{
+#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40800
+ xengnttab_handle *xgt = xengnttab_open(NULL, 0);
+
+ if (xgt) {
+ if (xengnttab_grant_copy(xgt, 0, NULL) == 0) {
+ libxengnttab_backend_ops.grant_copy = libxengnttab_backend_grant_copy;
+ }
+ xengnttab_close(xgt);
+ }
+#endif
+ xen_evtchn_ops = &libxenevtchn_backend_ops;
+ xen_gnttab_ops = &libxengnttab_backend_ops;
+ xen_foreignmem_ops = &libxenforeignmem_backend_ops;
+ xen_xenstore_ops = &libxenstore_backend_ops;
+}
diff --git a/hw/xen/xen_devconfig.c b/hw/xen/xen_devconfig.c
index 46ee4a7..9b7304e 100644
--- a/hw/xen/xen_devconfig.c
+++ b/hw/xen/xen_devconfig.c
@@ -11,11 +11,11 @@
{
char *dom;
- dom = xs_get_domain_path(xenstore, xen_domid);
+ dom = qemu_xen_xs_get_domain_path(xenstore, xen_domid);
snprintf(fe, len, "%s/device/%s/%d", dom, ftype, vdev);
free(dom);
- dom = xs_get_domain_path(xenstore, 0);
+ dom = qemu_xen_xs_get_domain_path(xenstore, 0);
snprintf(be, len, "%s/backend/%s/%d/%d", dom, btype, xen_domid, vdev);
free(dom);
diff --git a/hw/xen/xen_pt.c b/hw/xen/xen_pt.c
index 85c93cf..2d33d17 100644
--- a/hw/xen/xen_pt.c
+++ b/hw/xen/xen_pt.c
@@ -60,9 +60,9 @@
#include "hw/pci/pci_bus.h"
#include "hw/qdev-properties.h"
#include "hw/qdev-properties-system.h"
+#include "xen_pt.h"
#include "hw/xen/xen.h"
#include "hw/xen/xen-legacy-backend.h"
-#include "xen_pt.h"
#include "qemu/range.h"
static bool has_igd_gfx_passthru;
diff --git a/hw/xen/xen_pt.h b/hw/xen/xen_pt.h
index e184699..b20744f 100644
--- a/hw/xen/xen_pt.h
+++ b/hw/xen/xen_pt.h
@@ -1,7 +1,7 @@
#ifndef XEN_PT_H
#define XEN_PT_H
-#include "hw/xen/xen_common.h"
+#include "hw/xen/xen_native.h"
#include "xen-host-pci-device.h"
#include "qom/object.h"
diff --git a/hw/xen/xen_pt_config_init.c b/hw/xen/xen_pt_config_init.c
index 8b9b554..2b8680b 100644
--- a/hw/xen/xen_pt_config_init.c
+++ b/hw/xen/xen_pt_config_init.c
@@ -15,8 +15,8 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu/timer.h"
-#include "hw/xen/xen-legacy-backend.h"
#include "xen_pt.h"
+#include "hw/xen/xen-legacy-backend.h"
#define XEN_PT_MERGE_VALUE(value, data, val_mask) \
(((value) & (val_mask)) | ((data) & ~(val_mask)))
diff --git a/hw/xen/xen_pt_graphics.c b/hw/xen/xen_pt_graphics.c
index f303f67..0aed3bb 100644
--- a/hw/xen/xen_pt_graphics.c
+++ b/hw/xen/xen_pt_graphics.c
@@ -5,7 +5,6 @@
#include "qapi/error.h"
#include "xen_pt.h"
#include "xen-host-pci-device.h"
-#include "hw/xen/xen-legacy-backend.h"
static unsigned long igd_guest_opregion;
static unsigned long igd_host_opregion;
diff --git a/hw/xen/xen_pt_msi.c b/hw/xen/xen_pt_msi.c
index b71563f..09cca4e 100644
--- a/hw/xen/xen_pt_msi.c
+++ b/hw/xen/xen_pt_msi.c
@@ -11,9 +11,9 @@
#include "qemu/osdep.h"
-#include "hw/xen/xen-legacy-backend.h"
-#include "xen_pt.h"
#include "hw/i386/apic-msidef.h"
+#include "xen_pt.h"
+#include "hw/xen/xen-legacy-backend.h"
#define XEN_PT_AUTO_ASSIGN -1
diff --git a/hw/xen/xen_pvdev.c b/hw/xen/xen_pvdev.c
index 1a5177b..be1504b 100644
--- a/hw/xen/xen_pvdev.c
+++ b/hw/xen/xen_pvdev.c
@@ -54,31 +54,17 @@
struct xs_dirs *d;
QTAILQ_FOREACH(d, &xs_cleanup, list) {
- xs_rm(xenstore, 0, d->xs_dir);
+ qemu_xen_xs_destroy(xenstore, 0, d->xs_dir);
}
}
int xenstore_mkdir(char *path, int p)
{
- struct xs_permissions perms[2] = {
- {
- .id = 0, /* set owner: dom0 */
- }, {
- .id = xen_domid,
- .perms = p,
- }
- };
-
- if (!xs_mkdir(xenstore, 0, path)) {
+ if (!qemu_xen_xs_create(xenstore, 0, 0, xen_domid, p, path)) {
xen_pv_printf(NULL, 0, "xs_mkdir %s: failed\n", path);
return -1;
}
xenstore_cleanup_dir(g_strdup(path));
-
- if (!xs_set_permissions(xenstore, 0, path, perms, 2)) {
- xen_pv_printf(NULL, 0, "xs_set_permissions %s: failed\n", path);
- return -1;
- }
return 0;
}
@@ -87,7 +73,7 @@
char abspath[XEN_BUFSIZE];
snprintf(abspath, sizeof(abspath), "%s/%s", base, node);
- if (!xs_write(xenstore, 0, abspath, val, strlen(val))) {
+ if (!qemu_xen_xs_write(xenstore, 0, abspath, val, strlen(val))) {
return -1;
}
return 0;
@@ -100,7 +86,7 @@
char *str, *ret = NULL;
snprintf(abspath, sizeof(abspath), "%s/%s", base, node);
- str = xs_read(xenstore, 0, abspath, &len);
+ str = qemu_xen_xs_read(xenstore, 0, abspath, &len);
if (str != NULL) {
/* move to qemu-allocated memory to make sure
* callers can savely g_free() stuff. */
@@ -152,29 +138,6 @@
return rc;
}
-void xenstore_update(void *unused)
-{
- char **vec = NULL;
- intptr_t type, ops, ptr;
- unsigned int dom, count;
-
- vec = xs_read_watch(xenstore, &count);
- if (vec == NULL) {
- goto cleanup;
- }
-
- if (sscanf(vec[XS_WATCH_TOKEN], "be:%" PRIxPTR ":%d:%" PRIxPTR,
- &type, &dom, &ops) == 3) {
- xenstore_update_be(vec[XS_WATCH_PATH], (void *)type, dom, (void*)ops);
- }
- if (sscanf(vec[XS_WATCH_TOKEN], "fe:%" PRIxPTR, &ptr) == 1) {
- xenstore_update_fe(vec[XS_WATCH_PATH], (void *)ptr);
- }
-
-cleanup:
- free(vec);
-}
-
const char *xenbus_strstate(enum xenbus_state state)
{
static const char *const name[] = {
@@ -238,14 +201,14 @@
struct XenLegacyDevice *xendev = opaque;
evtchn_port_t port;
- port = xenevtchn_pending(xendev->evtchndev);
+ port = qemu_xen_evtchn_pending(xendev->evtchndev);
if (port != xendev->local_port) {
xen_pv_printf(xendev, 0,
"xenevtchn_pending returned %d (expected %d)\n",
port, xendev->local_port);
return;
}
- xenevtchn_unmask(xendev->evtchndev, port);
+ qemu_xen_evtchn_unmask(xendev->evtchndev, port);
if (xendev->ops->event) {
xendev->ops->event(xendev);
@@ -257,15 +220,15 @@
if (xendev->local_port == -1) {
return;
}
- qemu_set_fd_handler(xenevtchn_fd(xendev->evtchndev), NULL, NULL, NULL);
- xenevtchn_unbind(xendev->evtchndev, xendev->local_port);
+ qemu_set_fd_handler(qemu_xen_evtchn_fd(xendev->evtchndev), NULL, NULL, NULL);
+ qemu_xen_evtchn_unbind(xendev->evtchndev, xendev->local_port);
xen_pv_printf(xendev, 2, "unbind evtchn port %d\n", xendev->local_port);
xendev->local_port = -1;
}
int xen_pv_send_notify(struct XenLegacyDevice *xendev)
{
- return xenevtchn_notify(xendev->evtchndev, xendev->local_port);
+ return qemu_xen_evtchn_notify(xendev->evtchndev, xendev->local_port);
}
/* ------------------------------------------------------------- */
@@ -299,17 +262,15 @@
}
if (xendev->fe) {
- char token[XEN_BUFSIZE];
- snprintf(token, sizeof(token), "fe:%p", xendev);
- xs_unwatch(xenstore, xendev->fe, token);
+ qemu_xen_xs_unwatch(xenstore, xendev->watch);
g_free(xendev->fe);
}
if (xendev->evtchndev != NULL) {
- xenevtchn_close(xendev->evtchndev);
+ qemu_xen_evtchn_close(xendev->evtchndev);
}
if (xendev->gnttabdev != NULL) {
- xengnttab_close(xendev->gnttabdev);
+ qemu_xen_gnttab_close(xendev->gnttabdev);
}
QTAILQ_REMOVE(&xendevs, xendev, next);
diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h
index dd9a7f6..da13357 100644
--- a/include/block/aio-wait.h
+++ b/include/block/aio-wait.h
@@ -85,7 +85,7 @@
/* Increment wait_->num_waiters before evaluating cond. */ \
qatomic_inc(&wait_->num_waiters); \
/* Paired with smp_mb in aio_wait_kick(). */ \
- smp_mb(); \
+ smp_mb__after_rmw(); \
if (ctx_ && in_aio_context_home_thread(ctx_)) { \
while ((cond)) { \
aio_poll(ctx_, true); \
diff --git a/include/block/aio.h b/include/block/aio.h
index 8fba6a3..543717f 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -482,14 +482,6 @@
IOHandler *io_poll_ready,
void *opaque);
-/* Set polling begin/end callbacks for a file descriptor that has already been
- * registered with aio_set_fd_handler. Do nothing if the file descriptor is
- * not registered.
- */
-void aio_set_fd_poll(AioContext *ctx, int fd,
- IOHandler *io_poll_begin,
- IOHandler *io_poll_end);
-
/* Register an event notifier and associated callbacks. Behaves very similarly
* to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks
* will be invoked when using aio_poll().
diff --git a/include/crypto/aes.h b/include/crypto/aes.h
index ba297d6..822d645 100644
--- a/include/crypto/aes.h
+++ b/include/crypto/aes.h
@@ -18,14 +18,14 @@
#define AES_decrypt QEMU_AES_decrypt
int AES_set_encrypt_key(const unsigned char *userKey, const int bits,
- AES_KEY *key);
+ AES_KEY *key);
int AES_set_decrypt_key(const unsigned char *userKey, const int bits,
- AES_KEY *key);
+ AES_KEY *key);
void AES_encrypt(const unsigned char *in, unsigned char *out,
- const AES_KEY *key);
+ const AES_KEY *key);
void AES_decrypt(const unsigned char *in, unsigned char *out,
- const AES_KEY *key);
+ const AES_KEY *key);
extern const uint8_t AES_sbox[256];
extern const uint8_t AES_isbox[256];
diff --git a/include/crypto/desrfb.h b/include/crypto/desrfb.h
index 7ca596c..af8d122 100644
--- a/include/crypto/desrfb.h
+++ b/include/crypto/desrfb.h
@@ -15,30 +15,30 @@
/* d3des.h -
*
- * Headers and defines for d3des.c
- * Graven Imagery, 1992.
+ * Headers and defines for d3des.c
+ * Graven Imagery, 1992.
*
* Copyright (c) 1988,1989,1990,1991,1992 by Richard Outerbridge
- * (GEnie : OUTER; CIS : [71755,204])
+ * (GEnie : OUTER; CIS : [71755,204])
*/
-#define EN0 0 /* MODE == encrypt */
-#define DE1 1 /* MODE == decrypt */
+#define EN0 0 /* MODE == encrypt */
+#define DE1 1 /* MODE == decrypt */
void deskey(unsigned char *, int);
-/* hexkey[8] MODE
+/* hexkey[8] MODE
* Sets the internal key register according to the hexadecimal
* key contained in the 8 bytes of hexkey, according to the DES,
* for encryption or decryption according to MODE.
*/
void usekey(unsigned long *);
-/* cookedkey[32]
+/* cookedkey[32]
* Loads the internal key register with the data in cookedkey.
*/
void des(unsigned char *, unsigned char *);
-/* from[8] to[8]
+/* from[8] to[8]
* Encrypts/Decrypts (according to the key currently loaded in the
* internal key register) one block of eight bytes at address 'from'
* into the block at address 'to'. They can be the same.
diff --git a/include/disas/dis-asm.h b/include/disas/dis-asm.h
index 32cda9e..2f6f91c 100644
--- a/include/disas/dis-asm.h
+++ b/include/disas/dis-asm.h
@@ -188,20 +188,20 @@
#define bfd_mach_alpha_ev5 0x20
#define bfd_mach_alpha_ev6 0x30
bfd_arch_arm, /* Advanced Risc Machines ARM */
-#define bfd_mach_arm_unknown 0
-#define bfd_mach_arm_2 1
-#define bfd_mach_arm_2a 2
-#define bfd_mach_arm_3 3
-#define bfd_mach_arm_3M 4
-#define bfd_mach_arm_4 5
-#define bfd_mach_arm_4T 6
-#define bfd_mach_arm_5 7
-#define bfd_mach_arm_5T 8
-#define bfd_mach_arm_5TE 9
-#define bfd_mach_arm_XScale 10
-#define bfd_mach_arm_ep9312 11
-#define bfd_mach_arm_iWMMXt 12
-#define bfd_mach_arm_iWMMXt2 13
+#define bfd_mach_arm_unknown 0
+#define bfd_mach_arm_2 1
+#define bfd_mach_arm_2a 2
+#define bfd_mach_arm_3 3
+#define bfd_mach_arm_3M 4
+#define bfd_mach_arm_4 5
+#define bfd_mach_arm_4T 6
+#define bfd_mach_arm_5 7
+#define bfd_mach_arm_5T 8
+#define bfd_mach_arm_5TE 9
+#define bfd_mach_arm_XScale 10
+#define bfd_mach_arm_ep9312 11
+#define bfd_mach_arm_iWMMXt 12
+#define bfd_mach_arm_iWMMXt2 13
bfd_arch_ns32k, /* National Semiconductors ns32000 */
bfd_arch_w65, /* WDC 65816 */
bfd_arch_tic30, /* Texas Instruments TMS320C30 */
@@ -241,7 +241,7 @@
bfd_arch_ia64, /* HP/Intel ia64 */
#define bfd_mach_ia64_elf64 64
#define bfd_mach_ia64_elf32 32
- bfd_arch_nios2, /* Nios II */
+ bfd_arch_nios2, /* Nios II */
#define bfd_mach_nios2 0
#define bfd_mach_nios2r1 1
#define bfd_mach_nios2r2 2
@@ -269,14 +269,14 @@
G_GNUC_PRINTF(2, 3);
enum dis_insn_type {
- dis_noninsn, /* Not a valid instruction */
- dis_nonbranch, /* Not a branch instruction */
- dis_branch, /* Unconditional branch */
- dis_condbranch, /* Conditional branch */
- dis_jsr, /* Jump to subroutine */
- dis_condjsr, /* Conditional jump to subroutine */
- dis_dref, /* Data reference instruction */
- dis_dref2 /* Two data references in instruction */
+ dis_noninsn, /* Not a valid instruction */
+ dis_nonbranch, /* Not a branch instruction */
+ dis_branch, /* Unconditional branch */
+ dis_condbranch, /* Conditional branch */
+ dis_jsr, /* Jump to subroutine */
+ dis_condjsr, /* Conditional jump to subroutine */
+ dis_dref, /* Data reference instruction */
+ dis_dref2 /* Two data references in instruction */
};
/* This struct is passed into the instruction decoding routine,
@@ -319,8 +319,8 @@
The top 16 bits are reserved for public use (and are documented here).
The bottom 16 bits are for the internal use of the disassembler. */
unsigned long flags;
-#define INSN_HAS_RELOC 0x80000000
-#define INSN_ARM_BE32 0x00010000
+#define INSN_HAS_RELOC 0x80000000
+#define INSN_ARM_BE32 0x00010000
PTR private_data;
/* Function used to get bytes to disassemble. MEMADDR is the
@@ -330,7 +330,7 @@
Returns an errno value or 0 for success. */
int (*read_memory_func)
(bfd_vma memaddr, bfd_byte *myaddr, int length,
- struct disassemble_info *info);
+ struct disassemble_info *info);
/* Function which should be called if we get an error that we can't
recover from. STATUS is the errno value from read_memory_func and
@@ -384,14 +384,14 @@
To determine whether this decoder supports this information, set
insn_info_valid to 0, decode an instruction, then check it. */
- char insn_info_valid; /* Branch info has been set. */
- char branch_delay_insns; /* How many sequential insn's will run before
- a branch takes effect. (0 = normal) */
- char data_size; /* Size of data reference in insn, in bytes */
- enum dis_insn_type insn_type; /* Type of instruction */
- bfd_vma target; /* Target address of branch or dref, if known;
- zero if unknown. */
- bfd_vma target2; /* Second target address for dref2 */
+ char insn_info_valid; /* Branch info has been set. */
+ char branch_delay_insns; /* How many sequential insn's will run before
+ a branch takes effect. (0 = normal) */
+ char data_size; /* Size of data reference in insn, in bytes */
+ enum dis_insn_type insn_type; /* Type of instruction */
+ bfd_vma target; /* Target address of branch or dref, if known;
+ zero if unknown. */
+ bfd_vma target2; /* Second target address for dref2 */
/* Command line options specific to the target disassembler. */
char * disassembler_options;
diff --git a/include/elf.h b/include/elf.h
index 8bf1e72..2f4d0e5 100644
--- a/include/elf.h
+++ b/include/elf.h
@@ -11,25 +11,25 @@
/* 64-bit ELF base types. */
typedef uint64_t Elf64_Addr;
typedef uint16_t Elf64_Half;
-typedef int16_t Elf64_SHalf;
+typedef int16_t Elf64_SHalf;
typedef uint64_t Elf64_Off;
-typedef int32_t Elf64_Sword;
+typedef int32_t Elf64_Sword;
typedef uint32_t Elf64_Word;
typedef uint64_t Elf64_Xword;
typedef int64_t Elf64_Sxword;
/* These constants are for the segment types stored in the image headers */
-#define PT_NULL 0
-#define PT_LOAD 1
-#define PT_DYNAMIC 2
-#define PT_INTERP 3
-#define PT_NOTE 4
-#define PT_SHLIB 5
-#define PT_PHDR 6
-#define PT_LOOS 0x60000000
-#define PT_HIOS 0x6fffffff
-#define PT_LOPROC 0x70000000
-#define PT_HIPROC 0x7fffffff
+#define PT_NULL 0
+#define PT_LOAD 1
+#define PT_DYNAMIC 2
+#define PT_INTERP 3
+#define PT_NOTE 4
+#define PT_SHLIB 5
+#define PT_PHDR 6
+#define PT_LOOS 0x60000000
+#define PT_HIOS 0x6fffffff
+#define PT_LOPROC 0x70000000
+#define PT_HIPROC 0x7fffffff
#define PT_GNU_STACK (PT_LOOS + 0x474e551)
#define PT_GNU_PROPERTY (PT_LOOS + 0x474e553)
@@ -41,34 +41,34 @@
/* Flags in the e_flags field of the header */
/* MIPS architecture level. */
-#define EF_MIPS_ARCH 0xf0000000
+#define EF_MIPS_ARCH 0xf0000000
/* Legal values for MIPS architecture level. */
-#define EF_MIPS_ARCH_1 0x00000000 /* -mips1 code. */
-#define EF_MIPS_ARCH_2 0x10000000 /* -mips2 code. */
-#define EF_MIPS_ARCH_3 0x20000000 /* -mips3 code. */
-#define EF_MIPS_ARCH_4 0x30000000 /* -mips4 code. */
-#define EF_MIPS_ARCH_5 0x40000000 /* -mips5 code. */
-#define EF_MIPS_ARCH_32 0x50000000 /* MIPS32 code. */
-#define EF_MIPS_ARCH_64 0x60000000 /* MIPS64 code. */
-#define EF_MIPS_ARCH_32R2 0x70000000 /* MIPS32r2 code. */
-#define EF_MIPS_ARCH_64R2 0x80000000 /* MIPS64r2 code. */
-#define EF_MIPS_ARCH_32R6 0x90000000 /* MIPS32r6 code. */
-#define EF_MIPS_ARCH_64R6 0xa0000000 /* MIPS64r6 code. */
+#define EF_MIPS_ARCH_1 0x00000000 /* -mips1 code. */
+#define EF_MIPS_ARCH_2 0x10000000 /* -mips2 code. */
+#define EF_MIPS_ARCH_3 0x20000000 /* -mips3 code. */
+#define EF_MIPS_ARCH_4 0x30000000 /* -mips4 code. */
+#define EF_MIPS_ARCH_5 0x40000000 /* -mips5 code. */
+#define EF_MIPS_ARCH_32 0x50000000 /* MIPS32 code. */
+#define EF_MIPS_ARCH_64 0x60000000 /* MIPS64 code. */
+#define EF_MIPS_ARCH_32R2 0x70000000 /* MIPS32r2 code. */
+#define EF_MIPS_ARCH_64R2 0x80000000 /* MIPS64r2 code. */
+#define EF_MIPS_ARCH_32R6 0x90000000 /* MIPS32r6 code. */
+#define EF_MIPS_ARCH_64R6 0xa0000000 /* MIPS64r6 code. */
/* The ABI of a file. */
-#define EF_MIPS_ABI_O32 0x00001000 /* O32 ABI. */
-#define EF_MIPS_ABI_O64 0x00002000 /* O32 extended for 64 bit. */
+#define EF_MIPS_ABI_O32 0x00001000 /* O32 ABI. */
+#define EF_MIPS_ABI_O64 0x00002000 /* O32 extended for 64 bit. */
-#define EF_MIPS_NOREORDER 0x00000001
-#define EF_MIPS_PIC 0x00000002
-#define EF_MIPS_CPIC 0x00000004
-#define EF_MIPS_ABI2 0x00000020
-#define EF_MIPS_OPTIONS_FIRST 0x00000080
-#define EF_MIPS_32BITMODE 0x00000100
-#define EF_MIPS_ABI 0x0000f000
-#define EF_MIPS_FP64 0x00000200
-#define EF_MIPS_NAN2008 0x00000400
+#define EF_MIPS_NOREORDER 0x00000001
+#define EF_MIPS_PIC 0x00000002
+#define EF_MIPS_CPIC 0x00000004
+#define EF_MIPS_ABI2 0x00000020
+#define EF_MIPS_OPTIONS_FIRST 0x00000080
+#define EF_MIPS_32BITMODE 0x00000100
+#define EF_MIPS_ABI 0x0000f000
+#define EF_MIPS_FP64 0x00000200
+#define EF_MIPS_NAN2008 0x00000400
/* MIPS machine variant */
#define EF_MIPS_MACH_NONE 0x00000000 /* A standard MIPS implementation */
@@ -129,162 +129,162 @@
#define ET_HIPROC 0xffff
/* These constants define the various ELF target machines */
-#define EM_NONE 0
-#define EM_M32 1
-#define EM_SPARC 2
-#define EM_386 3
-#define EM_68K 4
-#define EM_88K 5
-#define EM_486 6 /* Perhaps disused */
-#define EM_860 7
+#define EM_NONE 0
+#define EM_M32 1
+#define EM_SPARC 2
+#define EM_386 3
+#define EM_68K 4
+#define EM_88K 5
+#define EM_486 6 /* Perhaps disused */
+#define EM_860 7
-#define EM_MIPS 8 /* MIPS R3000 (officially, big-endian only) */
+#define EM_MIPS 8 /* MIPS R3000 (officially, big-endian only) */
-#define EM_MIPS_RS4_BE 10 /* MIPS R4000 big-endian */
+#define EM_MIPS_RS4_BE 10 /* MIPS R4000 big-endian */
-#define EM_PARISC 15 /* HPPA */
+#define EM_PARISC 15 /* HPPA */
-#define EM_SPARC32PLUS 18 /* Sun's "v8plus" */
+#define EM_SPARC32PLUS 18 /* Sun's "v8plus" */
-#define EM_PPC 20 /* PowerPC */
-#define EM_PPC64 21 /* PowerPC64 */
+#define EM_PPC 20 /* PowerPC */
+#define EM_PPC64 21 /* PowerPC64 */
-#define EM_ARM 40 /* ARM */
+#define EM_ARM 40 /* ARM */
-#define EM_SH 42 /* SuperH */
+#define EM_SH 42 /* SuperH */
-#define EM_SPARCV9 43 /* SPARC v9 64-bit */
+#define EM_SPARCV9 43 /* SPARC v9 64-bit */
-#define EM_TRICORE 44 /* Infineon TriCore */
+#define EM_TRICORE 44 /* Infineon TriCore */
-#define EM_IA_64 50 /* HP/Intel IA-64 */
+#define EM_IA_64 50 /* HP/Intel IA-64 */
-#define EM_X86_64 62 /* AMD x86-64 */
+#define EM_X86_64 62 /* AMD x86-64 */
-#define EM_S390 22 /* IBM S/390 */
+#define EM_S390 22 /* IBM S/390 */
-#define EM_CRIS 76 /* Axis Communications 32-bit embedded processor */
+#define EM_CRIS 76 /* Axis Communications 32-bit embedded processor */
-#define EM_AVR 83 /* AVR 8-bit microcontroller */
+#define EM_AVR 83 /* AVR 8-bit microcontroller */
-#define EM_V850 87 /* NEC v850 */
+#define EM_V850 87 /* NEC v850 */
-#define EM_H8_300H 47 /* Hitachi H8/300H */
-#define EM_H8S 48 /* Hitachi H8S */
-#define EM_LATTICEMICO32 138 /* LatticeMico32 */
+#define EM_H8_300H 47 /* Hitachi H8/300H */
+#define EM_H8S 48 /* Hitachi H8S */
+#define EM_LATTICEMICO32 138 /* LatticeMico32 */
-#define EM_OPENRISC 92 /* OpenCores OpenRISC */
+#define EM_OPENRISC 92 /* OpenCores OpenRISC */
-#define EM_HEXAGON 164 /* Qualcomm Hexagon */
+#define EM_HEXAGON 164 /* Qualcomm Hexagon */
-#define EM_RX 173 /* Renesas RX family */
+#define EM_RX 173 /* Renesas RX family */
-#define EM_RISCV 243 /* RISC-V */
+#define EM_RISCV 243 /* RISC-V */
-#define EM_NANOMIPS 249 /* Wave Computing nanoMIPS */
+#define EM_NANOMIPS 249 /* Wave Computing nanoMIPS */
-#define EM_LOONGARCH 258 /* LoongArch */
+#define EM_LOONGARCH 258 /* LoongArch */
/*
* This is an interim value that we will use until the committee comes
* up with a final number.
*/
-#define EM_ALPHA 0x9026
+#define EM_ALPHA 0x9026
/* Bogus old v850 magic number, used by old tools. */
-#define EM_CYGNUS_V850 0x9080
+#define EM_CYGNUS_V850 0x9080
/*
* This is the old interim value for S/390 architecture
*/
-#define EM_S390_OLD 0xA390
+#define EM_S390_OLD 0xA390
-#define EM_ALTERA_NIOS2 113 /* Altera Nios II soft-core processor */
+#define EM_ALTERA_NIOS2 113 /* Altera Nios II soft-core processor */
-#define EM_MICROBLAZE 189
-#define EM_MICROBLAZE_OLD 0xBAAB
+#define EM_MICROBLAZE 189
+#define EM_MICROBLAZE_OLD 0xBAAB
-#define EM_XTENSA 94 /* Tensilica Xtensa */
+#define EM_XTENSA 94 /* Tensilica Xtensa */
-#define EM_AARCH64 183
+#define EM_AARCH64 183
-#define EF_AVR_MACH 0x7F /* Mask for AVR e_flags to get core type */
+#define EF_AVR_MACH 0x7F /* Mask for AVR e_flags to get core type */
/* This is the info that is needed to parse the dynamic section of the file */
-#define DT_NULL 0
-#define DT_NEEDED 1
-#define DT_PLTRELSZ 2
-#define DT_PLTGOT 3
-#define DT_HASH 4
-#define DT_STRTAB 5
-#define DT_SYMTAB 6
-#define DT_RELA 7
-#define DT_RELASZ 8
-#define DT_RELAENT 9
-#define DT_STRSZ 10
-#define DT_SYMENT 11
-#define DT_INIT 12
-#define DT_FINI 13
-#define DT_SONAME 14
-#define DT_RPATH 15
-#define DT_SYMBOLIC 16
-#define DT_REL 17
-#define DT_RELSZ 18
-#define DT_RELENT 19
-#define DT_PLTREL 20
-#define DT_DEBUG 21
-#define DT_TEXTREL 22
-#define DT_JMPREL 23
-#define DT_BINDNOW 24
-#define DT_INIT_ARRAY 25
-#define DT_FINI_ARRAY 26
-#define DT_INIT_ARRAYSZ 27
-#define DT_FINI_ARRAYSZ 28
-#define DT_RUNPATH 29
-#define DT_FLAGS 30
-#define DT_LOOS 0x6000000d
-#define DT_HIOS 0x6ffff000
-#define DT_LOPROC 0x70000000
-#define DT_HIPROC 0x7fffffff
+#define DT_NULL 0
+#define DT_NEEDED 1
+#define DT_PLTRELSZ 2
+#define DT_PLTGOT 3
+#define DT_HASH 4
+#define DT_STRTAB 5
+#define DT_SYMTAB 6
+#define DT_RELA 7
+#define DT_RELASZ 8
+#define DT_RELAENT 9
+#define DT_STRSZ 10
+#define DT_SYMENT 11
+#define DT_INIT 12
+#define DT_FINI 13
+#define DT_SONAME 14
+#define DT_RPATH 15
+#define DT_SYMBOLIC 16
+#define DT_REL 17
+#define DT_RELSZ 18
+#define DT_RELENT 19
+#define DT_PLTREL 20
+#define DT_DEBUG 21
+#define DT_TEXTREL 22
+#define DT_JMPREL 23
+#define DT_BINDNOW 24
+#define DT_INIT_ARRAY 25
+#define DT_FINI_ARRAY 26
+#define DT_INIT_ARRAYSZ 27
+#define DT_FINI_ARRAYSZ 28
+#define DT_RUNPATH 29
+#define DT_FLAGS 30
+#define DT_LOOS 0x6000000d
+#define DT_HIOS 0x6ffff000
+#define DT_LOPROC 0x70000000
+#define DT_HIPROC 0x7fffffff
/* DT_ entries which fall between DT_VALRNGLO and DT_VALRNDHI use
the d_val field of the Elf*_Dyn structure. I.e. they contain scalars. */
-#define DT_VALRNGLO 0x6ffffd00
-#define DT_VALRNGHI 0x6ffffdff
+#define DT_VALRNGLO 0x6ffffd00
+#define DT_VALRNGHI 0x6ffffdff
/* DT_ entries which fall between DT_ADDRRNGLO and DT_ADDRRNGHI use
the d_ptr field of the Elf*_Dyn structure. I.e. they contain pointers. */
-#define DT_ADDRRNGLO 0x6ffffe00
-#define DT_ADDRRNGHI 0x6ffffeff
+#define DT_ADDRRNGLO 0x6ffffe00
+#define DT_ADDRRNGHI 0x6ffffeff
-#define DT_VERSYM 0x6ffffff0
-#define DT_RELACOUNT 0x6ffffff9
-#define DT_RELCOUNT 0x6ffffffa
-#define DT_FLAGS_1 0x6ffffffb
-#define DT_VERDEF 0x6ffffffc
-#define DT_VERDEFNUM 0x6ffffffd
-#define DT_VERNEED 0x6ffffffe
-#define DT_VERNEEDNUM 0x6fffffff
+#define DT_VERSYM 0x6ffffff0
+#define DT_RELACOUNT 0x6ffffff9
+#define DT_RELCOUNT 0x6ffffffa
+#define DT_FLAGS_1 0x6ffffffb
+#define DT_VERDEF 0x6ffffffc
+#define DT_VERDEFNUM 0x6ffffffd
+#define DT_VERNEED 0x6ffffffe
+#define DT_VERNEEDNUM 0x6fffffff
-#define DT_MIPS_RLD_VERSION 0x70000001
-#define DT_MIPS_TIME_STAMP 0x70000002
-#define DT_MIPS_ICHECKSUM 0x70000003
-#define DT_MIPS_IVERSION 0x70000004
-#define DT_MIPS_FLAGS 0x70000005
- #define RHF_NONE 0
- #define RHF_HARDWAY 1
- #define RHF_NOTPOT 2
-#define DT_MIPS_BASE_ADDRESS 0x70000006
-#define DT_MIPS_CONFLICT 0x70000008
-#define DT_MIPS_LIBLIST 0x70000009
-#define DT_MIPS_LOCAL_GOTNO 0x7000000a
-#define DT_MIPS_CONFLICTNO 0x7000000b
-#define DT_MIPS_LIBLISTNO 0x70000010
-#define DT_MIPS_SYMTABNO 0x70000011
-#define DT_MIPS_UNREFEXTNO 0x70000012
-#define DT_MIPS_GOTSYM 0x70000013
-#define DT_MIPS_HIPAGENO 0x70000014
-#define DT_MIPS_RLD_MAP 0x70000016
+#define DT_MIPS_RLD_VERSION 0x70000001
+#define DT_MIPS_TIME_STAMP 0x70000002
+#define DT_MIPS_ICHECKSUM 0x70000003
+#define DT_MIPS_IVERSION 0x70000004
+#define DT_MIPS_FLAGS 0x70000005
+ #define RHF_NONE 0
+ #define RHF_HARDWAY 1
+ #define RHF_NOTPOT 2
+#define DT_MIPS_BASE_ADDRESS 0x70000006
+#define DT_MIPS_CONFLICT 0x70000008
+#define DT_MIPS_LIBLIST 0x70000009
+#define DT_MIPS_LOCAL_GOTNO 0x7000000a
+#define DT_MIPS_CONFLICTNO 0x7000000b
+#define DT_MIPS_LIBLISTNO 0x70000010
+#define DT_MIPS_SYMTABNO 0x70000011
+#define DT_MIPS_UNREFEXTNO 0x70000012
+#define DT_MIPS_GOTSYM 0x70000013
+#define DT_MIPS_HIPAGENO 0x70000014
+#define DT_MIPS_RLD_MAP 0x70000016
/* This info is needed when parsing the symbol table */
#define STB_LOCAL 0
@@ -297,61 +297,61 @@
#define STT_SECTION 3
#define STT_FILE 4
-#define ELF_ST_BIND(x) ((x) >> 4)
-#define ELF_ST_TYPE(x) (((unsigned int) x) & 0xf)
+#define ELF_ST_BIND(x) ((x) >> 4)
+#define ELF_ST_TYPE(x) (((unsigned int) x) & 0xf)
#define ELF_ST_INFO(bind, type) (((bind) << 4) | ((type) & 0xf))
-#define ELF32_ST_BIND(x) ELF_ST_BIND(x)
-#define ELF32_ST_TYPE(x) ELF_ST_TYPE(x)
-#define ELF64_ST_BIND(x) ELF_ST_BIND(x)
-#define ELF64_ST_TYPE(x) ELF_ST_TYPE(x)
+#define ELF32_ST_BIND(x) ELF_ST_BIND(x)
+#define ELF32_ST_TYPE(x) ELF_ST_TYPE(x)
+#define ELF64_ST_BIND(x) ELF_ST_BIND(x)
+#define ELF64_ST_TYPE(x) ELF_ST_TYPE(x)
/* Symbolic values for the entries in the auxiliary table
put on the initial stack */
-#define AT_NULL 0 /* end of vector */
-#define AT_IGNORE 1 /* entry should be ignored */
-#define AT_EXECFD 2 /* file descriptor of program */
-#define AT_PHDR 3 /* program headers for program */
-#define AT_PHENT 4 /* size of program header entry */
-#define AT_PHNUM 5 /* number of program headers */
-#define AT_PAGESZ 6 /* system page size */
-#define AT_BASE 7 /* base address of interpreter */
-#define AT_FLAGS 8 /* flags */
-#define AT_ENTRY 9 /* entry point of program */
-#define AT_NOTELF 10 /* program is not ELF */
-#define AT_UID 11 /* real uid */
-#define AT_EUID 12 /* effective uid */
-#define AT_GID 13 /* real gid */
-#define AT_EGID 14 /* effective gid */
-#define AT_PLATFORM 15 /* string identifying CPU for optimizations */
-#define AT_HWCAP 16 /* arch dependent hints at CPU capabilities */
-#define AT_CLKTCK 17 /* frequency at which times() increments */
-#define AT_FPUCW 18 /* info about fpu initialization by kernel */
-#define AT_DCACHEBSIZE 19 /* data cache block size */
-#define AT_ICACHEBSIZE 20 /* instruction cache block size */
-#define AT_UCACHEBSIZE 21 /* unified cache block size */
-#define AT_IGNOREPPC 22 /* ppc only; entry should be ignored */
-#define AT_SECURE 23 /* boolean, was exec suid-like? */
-#define AT_BASE_PLATFORM 24 /* string identifying real platforms */
-#define AT_RANDOM 25 /* address of 16 random bytes */
-#define AT_HWCAP2 26 /* extension of AT_HWCAP */
-#define AT_EXECFN 31 /* filename of the executable */
-#define AT_SYSINFO 32 /* address of kernel entry point */
-#define AT_SYSINFO_EHDR 33 /* address of kernel vdso */
-#define AT_L1I_CACHESHAPE 34 /* shapes of the caches: */
-#define AT_L1D_CACHESHAPE 35 /* bits 0-3: cache associativity. */
-#define AT_L2_CACHESHAPE 36 /* bits 4-7: log2 of line size. */
-#define AT_L3_CACHESHAPE 37 /* val&~255: cache size. */
+#define AT_NULL 0 /* end of vector */
+#define AT_IGNORE 1 /* entry should be ignored */
+#define AT_EXECFD 2 /* file descriptor of program */
+#define AT_PHDR 3 /* program headers for program */
+#define AT_PHENT 4 /* size of program header entry */
+#define AT_PHNUM 5 /* number of program headers */
+#define AT_PAGESZ 6 /* system page size */
+#define AT_BASE 7 /* base address of interpreter */
+#define AT_FLAGS 8 /* flags */
+#define AT_ENTRY 9 /* entry point of program */
+#define AT_NOTELF 10 /* program is not ELF */
+#define AT_UID 11 /* real uid */
+#define AT_EUID 12 /* effective uid */
+#define AT_GID 13 /* real gid */
+#define AT_EGID 14 /* effective gid */
+#define AT_PLATFORM 15 /* string identifying CPU for optimizations */
+#define AT_HWCAP 16 /* arch dependent hints at CPU capabilities */
+#define AT_CLKTCK 17 /* frequency at which times() increments */
+#define AT_FPUCW 18 /* info about fpu initialization by kernel */
+#define AT_DCACHEBSIZE 19 /* data cache block size */
+#define AT_ICACHEBSIZE 20 /* instruction cache block size */
+#define AT_UCACHEBSIZE 21 /* unified cache block size */
+#define AT_IGNOREPPC 22 /* ppc only; entry should be ignored */
+#define AT_SECURE 23 /* boolean, was exec suid-like? */
+#define AT_BASE_PLATFORM 24 /* string identifying real platforms */
+#define AT_RANDOM 25 /* address of 16 random bytes */
+#define AT_HWCAP2 26 /* extension of AT_HWCAP */
+#define AT_EXECFN 31 /* filename of the executable */
+#define AT_SYSINFO 32 /* address of kernel entry point */
+#define AT_SYSINFO_EHDR 33 /* address of kernel vdso */
+#define AT_L1I_CACHESHAPE 34 /* shapes of the caches: */
+#define AT_L1D_CACHESHAPE 35 /* bits 0-3: cache associativity. */
+#define AT_L2_CACHESHAPE 36 /* bits 4-7: log2 of line size. */
+#define AT_L3_CACHESHAPE 37 /* val&~255: cache size. */
typedef struct dynamic{
Elf32_Sword d_tag;
union{
- Elf32_Sword d_val;
- Elf32_Addr d_ptr;
+ Elf32_Sword d_val;
+ Elf32_Addr d_ptr;
} d_un;
} Elf32_Dyn;
typedef struct {
- Elf64_Sxword d_tag; /* entry tag value */
+ Elf64_Sxword d_tag; /* entry tag value */
union {
Elf64_Xword d_val;
Elf64_Addr d_ptr;
@@ -362,72 +362,72 @@
#define ELF32_R_SYM(x) ((x) >> 8)
#define ELF32_R_TYPE(x) ((x) & 0xff)
-#define ELF64_R_SYM(i) ((i) >> 32)
-#define ELF64_R_TYPE(i) ((i) & 0xffffffff)
+#define ELF64_R_SYM(i) ((i) >> 32)
+#define ELF64_R_TYPE(i) ((i) & 0xffffffff)
#define ELF64_R_TYPE_DATA(i) (((ELF64_R_TYPE(i) >> 8) ^ 0x00800000) - 0x00800000)
-#define R_386_NONE 0
-#define R_386_32 1
-#define R_386_PC32 2
-#define R_386_GOT32 3
-#define R_386_PLT32 4
-#define R_386_COPY 5
-#define R_386_GLOB_DAT 6
-#define R_386_JMP_SLOT 7
-#define R_386_RELATIVE 8
-#define R_386_GOTOFF 9
-#define R_386_GOTPC 10
-#define R_386_NUM 11
+#define R_386_NONE 0
+#define R_386_32 1
+#define R_386_PC32 2
+#define R_386_GOT32 3
+#define R_386_PLT32 4
+#define R_386_COPY 5
+#define R_386_GLOB_DAT 6
+#define R_386_JMP_SLOT 7
+#define R_386_RELATIVE 8
+#define R_386_GOTOFF 9
+#define R_386_GOTPC 10
+#define R_386_NUM 11
/* Not a dynamic reloc, so not included in R_386_NUM. Used in TCG. */
-#define R_386_PC8 23
+#define R_386_PC8 23
-#define R_MIPS_NONE 0
-#define R_MIPS_16 1
-#define R_MIPS_32 2
-#define R_MIPS_REL32 3
-#define R_MIPS_26 4
-#define R_MIPS_HI16 5
-#define R_MIPS_LO16 6
-#define R_MIPS_GPREL16 7
-#define R_MIPS_LITERAL 8
-#define R_MIPS_GOT16 9
-#define R_MIPS_PC16 10
-#define R_MIPS_CALL16 11
-#define R_MIPS_GPREL32 12
+#define R_MIPS_NONE 0
+#define R_MIPS_16 1
+#define R_MIPS_32 2
+#define R_MIPS_REL32 3
+#define R_MIPS_26 4
+#define R_MIPS_HI16 5
+#define R_MIPS_LO16 6
+#define R_MIPS_GPREL16 7
+#define R_MIPS_LITERAL 8
+#define R_MIPS_GOT16 9
+#define R_MIPS_PC16 10
+#define R_MIPS_CALL16 11
+#define R_MIPS_GPREL32 12
/* The remaining relocs are defined on Irix, although they are not
in the MIPS ELF ABI. */
-#define R_MIPS_UNUSED1 13
-#define R_MIPS_UNUSED2 14
-#define R_MIPS_UNUSED3 15
-#define R_MIPS_SHIFT5 16
-#define R_MIPS_SHIFT6 17
-#define R_MIPS_64 18
-#define R_MIPS_GOT_DISP 19
-#define R_MIPS_GOT_PAGE 20
-#define R_MIPS_GOT_OFST 21
+#define R_MIPS_UNUSED1 13
+#define R_MIPS_UNUSED2 14
+#define R_MIPS_UNUSED3 15
+#define R_MIPS_SHIFT5 16
+#define R_MIPS_SHIFT6 17
+#define R_MIPS_64 18
+#define R_MIPS_GOT_DISP 19
+#define R_MIPS_GOT_PAGE 20
+#define R_MIPS_GOT_OFST 21
/*
* The following two relocation types are specified in the MIPS ABI
* conformance guide version 1.2 but not yet in the psABI.
*/
-#define R_MIPS_GOTHI16 22
-#define R_MIPS_GOTLO16 23
-#define R_MIPS_SUB 24
-#define R_MIPS_INSERT_A 25
-#define R_MIPS_INSERT_B 26
-#define R_MIPS_DELETE 27
-#define R_MIPS_HIGHER 28
-#define R_MIPS_HIGHEST 29
+#define R_MIPS_GOTHI16 22
+#define R_MIPS_GOTLO16 23
+#define R_MIPS_SUB 24
+#define R_MIPS_INSERT_A 25
+#define R_MIPS_INSERT_B 26
+#define R_MIPS_DELETE 27
+#define R_MIPS_HIGHER 28
+#define R_MIPS_HIGHEST 29
/*
* The following two relocation types are specified in the MIPS ABI
* conformance guide version 1.2 but not yet in the psABI.
*/
-#define R_MIPS_CALLHI16 30
-#define R_MIPS_CALLLO16 31
+#define R_MIPS_CALLHI16 30
+#define R_MIPS_CALLLO16 31
/*
* This range is reserved for vendor specific relocations.
*/
-#define R_MIPS_LOVENDOR 100
-#define R_MIPS_HIVENDOR 127
+#define R_MIPS_LOVENDOR 100
+#define R_MIPS_HIVENDOR 127
/* SUN SPARC specific definitions. */
@@ -448,48 +448,48 @@
/*
* Sparc ELF relocation types
*/
-#define R_SPARC_NONE 0
-#define R_SPARC_8 1
-#define R_SPARC_16 2
-#define R_SPARC_32 3
-#define R_SPARC_DISP8 4
-#define R_SPARC_DISP16 5
-#define R_SPARC_DISP32 6
-#define R_SPARC_WDISP30 7
-#define R_SPARC_WDISP22 8
-#define R_SPARC_HI22 9
-#define R_SPARC_22 10
-#define R_SPARC_13 11
-#define R_SPARC_LO10 12
-#define R_SPARC_GOT10 13
-#define R_SPARC_GOT13 14
-#define R_SPARC_GOT22 15
-#define R_SPARC_PC10 16
-#define R_SPARC_PC22 17
-#define R_SPARC_WPLT30 18
-#define R_SPARC_COPY 19
-#define R_SPARC_GLOB_DAT 20
-#define R_SPARC_JMP_SLOT 21
-#define R_SPARC_RELATIVE 22
-#define R_SPARC_UA32 23
-#define R_SPARC_PLT32 24
-#define R_SPARC_HIPLT22 25
-#define R_SPARC_LOPLT10 26
-#define R_SPARC_PCPLT32 27
-#define R_SPARC_PCPLT22 28
-#define R_SPARC_PCPLT10 29
-#define R_SPARC_10 30
-#define R_SPARC_11 31
-#define R_SPARC_64 32
-#define R_SPARC_OLO10 33
-#define R_SPARC_HH22 34
-#define R_SPARC_HM10 35
-#define R_SPARC_LM22 36
-#define R_SPARC_WDISP16 40
-#define R_SPARC_WDISP19 41
-#define R_SPARC_7 43
-#define R_SPARC_5 44
-#define R_SPARC_6 45
+#define R_SPARC_NONE 0
+#define R_SPARC_8 1
+#define R_SPARC_16 2
+#define R_SPARC_32 3
+#define R_SPARC_DISP8 4
+#define R_SPARC_DISP16 5
+#define R_SPARC_DISP32 6
+#define R_SPARC_WDISP30 7
+#define R_SPARC_WDISP22 8
+#define R_SPARC_HI22 9
+#define R_SPARC_22 10
+#define R_SPARC_13 11
+#define R_SPARC_LO10 12
+#define R_SPARC_GOT10 13
+#define R_SPARC_GOT13 14
+#define R_SPARC_GOT22 15
+#define R_SPARC_PC10 16
+#define R_SPARC_PC22 17
+#define R_SPARC_WPLT30 18
+#define R_SPARC_COPY 19
+#define R_SPARC_GLOB_DAT 20
+#define R_SPARC_JMP_SLOT 21
+#define R_SPARC_RELATIVE 22
+#define R_SPARC_UA32 23
+#define R_SPARC_PLT32 24
+#define R_SPARC_HIPLT22 25
+#define R_SPARC_LOPLT10 26
+#define R_SPARC_PCPLT32 27
+#define R_SPARC_PCPLT22 28
+#define R_SPARC_PCPLT10 29
+#define R_SPARC_10 30
+#define R_SPARC_11 31
+#define R_SPARC_64 32
+#define R_SPARC_OLO10 33
+#define R_SPARC_HH22 34
+#define R_SPARC_HM10 35
+#define R_SPARC_LM22 36
+#define R_SPARC_WDISP16 40
+#define R_SPARC_WDISP19 41
+#define R_SPARC_7 43
+#define R_SPARC_5 44
+#define R_SPARC_6 45
/* Bits present in AT_HWCAP for ARM. */
@@ -647,29 +647,29 @@
/*
* 68k ELF relocation types
*/
-#define R_68K_NONE 0
-#define R_68K_32 1
-#define R_68K_16 2
-#define R_68K_8 3
-#define R_68K_PC32 4
-#define R_68K_PC16 5
-#define R_68K_PC8 6
-#define R_68K_GOT32 7
-#define R_68K_GOT16 8
-#define R_68K_GOT8 9
-#define R_68K_GOT32O 10
-#define R_68K_GOT16O 11
-#define R_68K_GOT8O 12
-#define R_68K_PLT32 13
-#define R_68K_PLT16 14
-#define R_68K_PLT8 15
-#define R_68K_PLT32O 16
-#define R_68K_PLT16O 17
-#define R_68K_PLT8O 18
-#define R_68K_COPY 19
-#define R_68K_GLOB_DAT 20
-#define R_68K_JMP_SLOT 21
-#define R_68K_RELATIVE 22
+#define R_68K_NONE 0
+#define R_68K_32 1
+#define R_68K_16 2
+#define R_68K_8 3
+#define R_68K_PC32 4
+#define R_68K_PC16 5
+#define R_68K_PC8 6
+#define R_68K_GOT32 7
+#define R_68K_GOT16 8
+#define R_68K_GOT8 9
+#define R_68K_GOT32O 10
+#define R_68K_GOT16O 11
+#define R_68K_GOT8O 12
+#define R_68K_PLT32 13
+#define R_68K_PLT16 14
+#define R_68K_PLT8 15
+#define R_68K_PLT32O 16
+#define R_68K_PLT16O 17
+#define R_68K_PLT8O 18
+#define R_68K_COPY 19
+#define R_68K_GLOB_DAT 20
+#define R_68K_JMP_SLOT 21
+#define R_68K_RELATIVE 22
/*
* Alpha ELF relocation types
@@ -693,7 +693,7 @@
#define R_ALPHA_GLOB_DAT 25 /* Create GOT entry */
#define R_ALPHA_JMP_SLOT 26 /* Create PLT entry */
#define R_ALPHA_RELATIVE 27 /* Adjust by program base */
-#define R_ALPHA_BRSGP 28
+#define R_ALPHA_BRSGP 28
#define R_ALPHA_TLSGD 29
#define R_ALPHA_TLS_LDM 30
#define R_ALPHA_DTPMOD64 31
@@ -708,78 +708,78 @@
#define R_ALPHA_TPRELLO 40
#define R_ALPHA_TPREL16 41
-#define SHF_ALPHA_GPREL 0x10000000
+#define SHF_ALPHA_GPREL 0x10000000
/* PowerPC specific definitions. */
/* Processor specific flags for the ELF header e_flags field. */
-#define EF_PPC64_ABI 0x3
+#define EF_PPC64_ABI 0x3
/* PowerPC relocations defined by the ABIs */
-#define R_PPC_NONE 0
-#define R_PPC_ADDR32 1 /* 32bit absolute address */
-#define R_PPC_ADDR24 2 /* 26bit address, 2 bits ignored. */
-#define R_PPC_ADDR16 3 /* 16bit absolute address */
-#define R_PPC_ADDR16_LO 4 /* lower 16bit of absolute address */
-#define R_PPC_ADDR16_HI 5 /* high 16bit of absolute address */
-#define R_PPC_ADDR16_HA 6 /* adjusted high 16bit */
-#define R_PPC_ADDR14 7 /* 16bit address, 2 bits ignored */
-#define R_PPC_ADDR14_BRTAKEN 8
-#define R_PPC_ADDR14_BRNTAKEN 9
-#define R_PPC_REL24 10 /* PC relative 26 bit */
-#define R_PPC_REL14 11 /* PC relative 16 bit */
-#define R_PPC_REL14_BRTAKEN 12
-#define R_PPC_REL14_BRNTAKEN 13
-#define R_PPC_GOT16 14
-#define R_PPC_GOT16_LO 15
-#define R_PPC_GOT16_HI 16
-#define R_PPC_GOT16_HA 17
-#define R_PPC_PLTREL24 18
-#define R_PPC_COPY 19
-#define R_PPC_GLOB_DAT 20
-#define R_PPC_JMP_SLOT 21
-#define R_PPC_RELATIVE 22
-#define R_PPC_LOCAL24PC 23
-#define R_PPC_UADDR32 24
-#define R_PPC_UADDR16 25
-#define R_PPC_REL32 26
-#define R_PPC_PLT32 27
-#define R_PPC_PLTREL32 28
-#define R_PPC_PLT16_LO 29
-#define R_PPC_PLT16_HI 30
-#define R_PPC_PLT16_HA 31
-#define R_PPC_SDAREL16 32
-#define R_PPC_SECTOFF 33
-#define R_PPC_SECTOFF_LO 34
-#define R_PPC_SECTOFF_HI 35
-#define R_PPC_SECTOFF_HA 36
+#define R_PPC_NONE 0
+#define R_PPC_ADDR32 1 /* 32bit absolute address */
+#define R_PPC_ADDR24 2 /* 26bit address, 2 bits ignored. */
+#define R_PPC_ADDR16 3 /* 16bit absolute address */
+#define R_PPC_ADDR16_LO 4 /* lower 16bit of absolute address */
+#define R_PPC_ADDR16_HI 5 /* high 16bit of absolute address */
+#define R_PPC_ADDR16_HA 6 /* adjusted high 16bit */
+#define R_PPC_ADDR14 7 /* 16bit address, 2 bits ignored */
+#define R_PPC_ADDR14_BRTAKEN 8
+#define R_PPC_ADDR14_BRNTAKEN 9
+#define R_PPC_REL24 10 /* PC relative 26 bit */
+#define R_PPC_REL14 11 /* PC relative 16 bit */
+#define R_PPC_REL14_BRTAKEN 12
+#define R_PPC_REL14_BRNTAKEN 13
+#define R_PPC_GOT16 14
+#define R_PPC_GOT16_LO 15
+#define R_PPC_GOT16_HI 16
+#define R_PPC_GOT16_HA 17
+#define R_PPC_PLTREL24 18
+#define R_PPC_COPY 19
+#define R_PPC_GLOB_DAT 20
+#define R_PPC_JMP_SLOT 21
+#define R_PPC_RELATIVE 22
+#define R_PPC_LOCAL24PC 23
+#define R_PPC_UADDR32 24
+#define R_PPC_UADDR16 25
+#define R_PPC_REL32 26
+#define R_PPC_PLT32 27
+#define R_PPC_PLTREL32 28
+#define R_PPC_PLT16_LO 29
+#define R_PPC_PLT16_HI 30
+#define R_PPC_PLT16_HA 31
+#define R_PPC_SDAREL16 32
+#define R_PPC_SECTOFF 33
+#define R_PPC_SECTOFF_LO 34
+#define R_PPC_SECTOFF_HI 35
+#define R_PPC_SECTOFF_HA 36
/* Keep this the last entry. */
#ifndef R_PPC_NUM
-#define R_PPC_NUM 37
+#define R_PPC_NUM 37
#endif
/* ARM specific declarations */
/* Processor specific flags for the ELF header e_flags field. */
-#define EF_ARM_RELEXEC 0x01
-#define EF_ARM_HASENTRY 0x02
-#define EF_ARM_INTERWORK 0x04
-#define EF_ARM_APCS_26 0x08
-#define EF_ARM_APCS_FLOAT 0x10
-#define EF_ARM_PIC 0x20
-#define EF_ALIGN8 0x40 /* 8-bit structure alignment is in use */
-#define EF_NEW_ABI 0x80
-#define EF_OLD_ABI 0x100
-#define EF_ARM_SOFT_FLOAT 0x200
-#define EF_ARM_VFP_FLOAT 0x400
+#define EF_ARM_RELEXEC 0x01
+#define EF_ARM_HASENTRY 0x02
+#define EF_ARM_INTERWORK 0x04
+#define EF_ARM_APCS_26 0x08
+#define EF_ARM_APCS_FLOAT 0x10
+#define EF_ARM_PIC 0x20
+#define EF_ALIGN8 0x40 /* 8-bit structure alignment is in use */
+#define EF_NEW_ABI 0x80
+#define EF_OLD_ABI 0x100
+#define EF_ARM_SOFT_FLOAT 0x200
+#define EF_ARM_VFP_FLOAT 0x400
#define EF_ARM_MAVERICK_FLOAT 0x800
/* Other constants defined in the ARM ELF spec. version B-01. */
-#define EF_ARM_SYMSARESORTED 0x04 /* NB conflicts with EF_INTERWORK */
-#define EF_ARM_DYNSYMSUSESEGIDX 0x08 /* NB conflicts with EF_APCS26 */
-#define EF_ARM_MAPSYMSFIRST 0x10 /* NB conflicts with EF_APCS_FLOAT */
-#define EF_ARM_EABIMASK 0xFF000000
+#define EF_ARM_SYMSARESORTED 0x04 /* NB conflicts with EF_INTERWORK */
+#define EF_ARM_DYNSYMSUSESEGIDX 0x08 /* NB conflicts with EF_APCS26 */
+#define EF_ARM_MAPSYMSFIRST 0x10 /* NB conflicts with EF_APCS_FLOAT */
+#define EF_ARM_EABIMASK 0xFF000000
/* Constants defined in AAELF. */
#define EF_ARM_BE8 0x00800000
@@ -806,46 +806,46 @@
addressed by the static base */
/* ARM relocs. */
-#define R_ARM_NONE 0 /* No reloc */
-#define R_ARM_PC24 1 /* PC relative 26 bit branch */
-#define R_ARM_ABS32 2 /* Direct 32 bit */
-#define R_ARM_REL32 3 /* PC relative 32 bit */
-#define R_ARM_PC13 4
-#define R_ARM_ABS16 5 /* Direct 16 bit */
-#define R_ARM_ABS12 6 /* Direct 12 bit */
-#define R_ARM_THM_ABS5 7
-#define R_ARM_ABS8 8 /* Direct 8 bit */
-#define R_ARM_SBREL32 9
-#define R_ARM_THM_PC22 10
-#define R_ARM_THM_PC8 11
-#define R_ARM_AMP_VCALL9 12
-#define R_ARM_SWI24 13
-#define R_ARM_THM_SWI8 14
-#define R_ARM_XPC25 15
-#define R_ARM_THM_XPC22 16
-#define R_ARM_COPY 20 /* Copy symbol at runtime */
-#define R_ARM_GLOB_DAT 21 /* Create GOT entry */
-#define R_ARM_JUMP_SLOT 22 /* Create PLT entry */
-#define R_ARM_RELATIVE 23 /* Adjust by program base */
-#define R_ARM_GOTOFF 24 /* 32 bit offset to GOT */
-#define R_ARM_GOTPC 25 /* 32 bit PC relative offset to GOT */
-#define R_ARM_GOT32 26 /* 32 bit GOT entry */
-#define R_ARM_PLT32 27 /* 32 bit PLT address */
+#define R_ARM_NONE 0 /* No reloc */
+#define R_ARM_PC24 1 /* PC relative 26 bit branch */
+#define R_ARM_ABS32 2 /* Direct 32 bit */
+#define R_ARM_REL32 3 /* PC relative 32 bit */
+#define R_ARM_PC13 4
+#define R_ARM_ABS16 5 /* Direct 16 bit */
+#define R_ARM_ABS12 6 /* Direct 12 bit */
+#define R_ARM_THM_ABS5 7
+#define R_ARM_ABS8 8 /* Direct 8 bit */
+#define R_ARM_SBREL32 9
+#define R_ARM_THM_PC22 10
+#define R_ARM_THM_PC8 11
+#define R_ARM_AMP_VCALL9 12
+#define R_ARM_SWI24 13
+#define R_ARM_THM_SWI8 14
+#define R_ARM_XPC25 15
+#define R_ARM_THM_XPC22 16
+#define R_ARM_COPY 20 /* Copy symbol at runtime */
+#define R_ARM_GLOB_DAT 21 /* Create GOT entry */
+#define R_ARM_JUMP_SLOT 22 /* Create PLT entry */
+#define R_ARM_RELATIVE 23 /* Adjust by program base */
+#define R_ARM_GOTOFF 24 /* 32 bit offset to GOT */
+#define R_ARM_GOTPC 25 /* 32 bit PC relative offset to GOT */
+#define R_ARM_GOT32 26 /* 32 bit GOT entry */
+#define R_ARM_PLT32 27 /* 32 bit PLT address */
#define R_ARM_CALL 28
#define R_ARM_JUMP24 29
-#define R_ARM_GNU_VTENTRY 100
-#define R_ARM_GNU_VTINHERIT 101
-#define R_ARM_THM_PC11 102 /* thumb unconditional branch */
-#define R_ARM_THM_PC9 103 /* thumb conditional branch */
-#define R_ARM_RXPC25 249
-#define R_ARM_RSBREL32 250
-#define R_ARM_THM_RPC22 251
-#define R_ARM_RREL32 252
-#define R_ARM_RABS22 253
-#define R_ARM_RPC24 254
-#define R_ARM_RBASE 255
+#define R_ARM_GNU_VTENTRY 100
+#define R_ARM_GNU_VTINHERIT 101
+#define R_ARM_THM_PC11 102 /* thumb unconditional branch */
+#define R_ARM_THM_PC9 103 /* thumb conditional branch */
+#define R_ARM_RXPC25 249
+#define R_ARM_RSBREL32 250
+#define R_ARM_THM_RPC22 251
+#define R_ARM_RREL32 252
+#define R_ARM_RABS22 253
+#define R_ARM_RPC24 254
+#define R_ARM_RBASE 255
/* Keep this the last entry. */
-#define R_ARM_NUM 256
+#define R_ARM_NUM 256
/* ARM Aarch64 relocation types */
#define R_AARCH64_NONE 256 /* also accepts R_ARM_NONE (0) */
@@ -975,385 +975,385 @@
#define R_AARCH64_TLS_TPREL32 1033
/* s390 relocations defined by the ABIs */
-#define R_390_NONE 0 /* No reloc. */
-#define R_390_8 1 /* Direct 8 bit. */
-#define R_390_12 2 /* Direct 12 bit. */
-#define R_390_16 3 /* Direct 16 bit. */
-#define R_390_32 4 /* Direct 32 bit. */
-#define R_390_PC32 5 /* PC relative 32 bit. */
-#define R_390_GOT12 6 /* 12 bit GOT offset. */
-#define R_390_GOT32 7 /* 32 bit GOT offset. */
-#define R_390_PLT32 8 /* 32 bit PC relative PLT address. */
-#define R_390_COPY 9 /* Copy symbol at runtime. */
-#define R_390_GLOB_DAT 10 /* Create GOT entry. */
-#define R_390_JMP_SLOT 11 /* Create PLT entry. */
-#define R_390_RELATIVE 12 /* Adjust by program base. */
-#define R_390_GOTOFF32 13 /* 32 bit offset to GOT. */
-#define R_390_GOTPC 14 /* 32 bit PC rel. offset to GOT. */
-#define R_390_GOT16 15 /* 16 bit GOT offset. */
-#define R_390_PC16 16 /* PC relative 16 bit. */
-#define R_390_PC16DBL 17 /* PC relative 16 bit shifted by 1. */
-#define R_390_PLT16DBL 18 /* 16 bit PC rel. PLT shifted by 1. */
-#define R_390_PC32DBL 19 /* PC relative 32 bit shifted by 1. */
-#define R_390_PLT32DBL 20 /* 32 bit PC rel. PLT shifted by 1. */
-#define R_390_GOTPCDBL 21 /* 32 bit PC rel. GOT shifted by 1. */
-#define R_390_64 22 /* Direct 64 bit. */
-#define R_390_PC64 23 /* PC relative 64 bit. */
-#define R_390_GOT64 24 /* 64 bit GOT offset. */
-#define R_390_PLT64 25 /* 64 bit PC relative PLT address. */
-#define R_390_GOTENT 26 /* 32 bit PC rel. to GOT entry >> 1. */
-#define R_390_GOTOFF16 27 /* 16 bit offset to GOT. */
-#define R_390_GOTOFF64 28 /* 64 bit offset to GOT. */
-#define R_390_GOTPLT12 29 /* 12 bit offset to jump slot. */
-#define R_390_GOTPLT16 30 /* 16 bit offset to jump slot. */
-#define R_390_GOTPLT32 31 /* 32 bit offset to jump slot. */
-#define R_390_GOTPLT64 32 /* 64 bit offset to jump slot. */
-#define R_390_GOTPLTENT 33 /* 32 bit rel. offset to jump slot. */
-#define R_390_PLTOFF16 34 /* 16 bit offset from GOT to PLT. */
-#define R_390_PLTOFF32 35 /* 32 bit offset from GOT to PLT. */
-#define R_390_PLTOFF64 36 /* 16 bit offset from GOT to PLT. */
-#define R_390_TLS_LOAD 37 /* Tag for load insn in TLS code. */
-#define R_390_TLS_GDCALL 38 /* Tag for function call in general
- dynamic TLS code. */
-#define R_390_TLS_LDCALL 39 /* Tag for function call in local
- dynamic TLS code. */
-#define R_390_TLS_GD32 40 /* Direct 32 bit for general dynamic
- thread local data. */
-#define R_390_TLS_GD64 41 /* Direct 64 bit for general dynamic
- thread local data. */
-#define R_390_TLS_GOTIE12 42 /* 12 bit GOT offset for static TLS
- block offset. */
-#define R_390_TLS_GOTIE32 43 /* 32 bit GOT offset for static TLS
- block offset. */
-#define R_390_TLS_GOTIE64 44 /* 64 bit GOT offset for static TLS
- block offset. */
-#define R_390_TLS_LDM32 45 /* Direct 32 bit for local dynamic
- thread local data in LD code. */
-#define R_390_TLS_LDM64 46 /* Direct 64 bit for local dynamic
- thread local data in LD code. */
-#define R_390_TLS_IE32 47 /* 32 bit address of GOT entry for
- negated static TLS block offset. */
-#define R_390_TLS_IE64 48 /* 64 bit address of GOT entry for
- negated static TLS block offset. */
-#define R_390_TLS_IEENT 49 /* 32 bit rel. offset to GOT entry for
- negated static TLS block offset. */
-#define R_390_TLS_LE32 50 /* 32 bit negated offset relative to
- static TLS block. */
-#define R_390_TLS_LE64 51 /* 64 bit negated offset relative to
- static TLS block. */
-#define R_390_TLS_LDO32 52 /* 32 bit offset relative to TLS
- block. */
-#define R_390_TLS_LDO64 53 /* 64 bit offset relative to TLS
- block. */
-#define R_390_TLS_DTPMOD 54 /* ID of module containing symbol. */
-#define R_390_TLS_DTPOFF 55 /* Offset in TLS block. */
-#define R_390_TLS_TPOFF 56 /* Negate offset in static TLS
+#define R_390_NONE 0 /* No reloc. */
+#define R_390_8 1 /* Direct 8 bit. */
+#define R_390_12 2 /* Direct 12 bit. */
+#define R_390_16 3 /* Direct 16 bit. */
+#define R_390_32 4 /* Direct 32 bit. */
+#define R_390_PC32 5 /* PC relative 32 bit. */
+#define R_390_GOT12 6 /* 12 bit GOT offset. */
+#define R_390_GOT32 7 /* 32 bit GOT offset. */
+#define R_390_PLT32 8 /* 32 bit PC relative PLT address. */
+#define R_390_COPY 9 /* Copy symbol at runtime. */
+#define R_390_GLOB_DAT 10 /* Create GOT entry. */
+#define R_390_JMP_SLOT 11 /* Create PLT entry. */
+#define R_390_RELATIVE 12 /* Adjust by program base. */
+#define R_390_GOTOFF32 13 /* 32 bit offset to GOT. */
+#define R_390_GOTPC 14 /* 32 bit PC rel. offset to GOT. */
+#define R_390_GOT16 15 /* 16 bit GOT offset. */
+#define R_390_PC16 16 /* PC relative 16 bit. */
+#define R_390_PC16DBL 17 /* PC relative 16 bit shifted by 1. */
+#define R_390_PLT16DBL 18 /* 16 bit PC rel. PLT shifted by 1. */
+#define R_390_PC32DBL 19 /* PC relative 32 bit shifted by 1. */
+#define R_390_PLT32DBL 20 /* 32 bit PC rel. PLT shifted by 1. */
+#define R_390_GOTPCDBL 21 /* 32 bit PC rel. GOT shifted by 1. */
+#define R_390_64 22 /* Direct 64 bit. */
+#define R_390_PC64 23 /* PC relative 64 bit. */
+#define R_390_GOT64 24 /* 64 bit GOT offset. */
+#define R_390_PLT64 25 /* 64 bit PC relative PLT address. */
+#define R_390_GOTENT 26 /* 32 bit PC rel. to GOT entry >> 1. */
+#define R_390_GOTOFF16 27 /* 16 bit offset to GOT. */
+#define R_390_GOTOFF64 28 /* 64 bit offset to GOT. */
+#define R_390_GOTPLT12 29 /* 12 bit offset to jump slot. */
+#define R_390_GOTPLT16 30 /* 16 bit offset to jump slot. */
+#define R_390_GOTPLT32 31 /* 32 bit offset to jump slot. */
+#define R_390_GOTPLT64 32 /* 64 bit offset to jump slot. */
+#define R_390_GOTPLTENT 33 /* 32 bit rel. offset to jump slot. */
+#define R_390_PLTOFF16 34 /* 16 bit offset from GOT to PLT. */
+#define R_390_PLTOFF32 35 /* 32 bit offset from GOT to PLT. */
+#define R_390_PLTOFF64 36 /* 16 bit offset from GOT to PLT. */
+#define R_390_TLS_LOAD 37 /* Tag for load insn in TLS code. */
+#define R_390_TLS_GDCALL 38 /* Tag for function call in general
+ dynamic TLS code. */
+#define R_390_TLS_LDCALL 39 /* Tag for function call in local
+ dynamic TLS code. */
+#define R_390_TLS_GD32 40 /* Direct 32 bit for general dynamic
+ thread local data. */
+#define R_390_TLS_GD64 41 /* Direct 64 bit for general dynamic
+ thread local data. */
+#define R_390_TLS_GOTIE12 42 /* 12 bit GOT offset for static TLS
+ block offset. */
+#define R_390_TLS_GOTIE32 43 /* 32 bit GOT offset for static TLS
+ block offset. */
+#define R_390_TLS_GOTIE64 44 /* 64 bit GOT offset for static TLS
+ block offset. */
+#define R_390_TLS_LDM32 45 /* Direct 32 bit for local dynamic
+ thread local data in LD code. */
+#define R_390_TLS_LDM64 46 /* Direct 64 bit for local dynamic
+ thread local data in LD code. */
+#define R_390_TLS_IE32 47 /* 32 bit address of GOT entry for
+ negated static TLS block offset. */
+#define R_390_TLS_IE64 48 /* 64 bit address of GOT entry for
+ negated static TLS block offset. */
+#define R_390_TLS_IEENT 49 /* 32 bit rel. offset to GOT entry for
+ negated static TLS block offset. */
+#define R_390_TLS_LE32 50 /* 32 bit negated offset relative to
+ static TLS block. */
+#define R_390_TLS_LE64 51 /* 64 bit negated offset relative to
+ static TLS block. */
+#define R_390_TLS_LDO32 52 /* 32 bit offset relative to TLS
+ block. */
+#define R_390_TLS_LDO64 53 /* 64 bit offset relative to TLS
+ block. */
+#define R_390_TLS_DTPMOD 54 /* ID of module containing symbol. */
+#define R_390_TLS_DTPOFF 55 /* Offset in TLS block. */
+#define R_390_TLS_TPOFF 56 /* Negate offset in static TLS
block. */
#define R_390_20 57
/* Keep this the last entry. */
#define R_390_NUM 58
/* x86-64 relocation types */
-#define R_X86_64_NONE 0 /* No reloc */
-#define R_X86_64_64 1 /* Direct 64 bit */
-#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
-#define R_X86_64_GOT32 3 /* 32 bit GOT entry */
-#define R_X86_64_PLT32 4 /* 32 bit PLT address */
-#define R_X86_64_COPY 5 /* Copy symbol at runtime */
-#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
-#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
-#define R_X86_64_RELATIVE 8 /* Adjust by program base */
-#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative
+#define R_X86_64_NONE 0 /* No reloc */
+#define R_X86_64_64 1 /* Direct 64 bit */
+#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
+#define R_X86_64_GOT32 3 /* 32 bit GOT entry */
+#define R_X86_64_PLT32 4 /* 32 bit PLT address */
+#define R_X86_64_COPY 5 /* Copy symbol at runtime */
+#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
+#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
+#define R_X86_64_RELATIVE 8 /* Adjust by program base */
+#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative
offset to GOT */
-#define R_X86_64_32 10 /* Direct 32 bit zero extended */
-#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
-#define R_X86_64_16 12 /* Direct 16 bit zero extended */
-#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
-#define R_X86_64_8 14 /* Direct 8 bit sign extended */
-#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
+#define R_X86_64_32 10 /* Direct 32 bit zero extended */
+#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
+#define R_X86_64_16 12 /* Direct 16 bit zero extended */
+#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
+#define R_X86_64_8 14 /* Direct 8 bit sign extended */
+#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
-#define R_X86_64_NUM 16
+#define R_X86_64_NUM 16
/* Legal values for e_flags field of Elf64_Ehdr. */
-#define EF_ALPHA_32BIT 1 /* All addresses are below 2GB */
+#define EF_ALPHA_32BIT 1 /* All addresses are below 2GB */
/* HPPA specific definitions. */
/* Legal values for e_flags field of Elf32_Ehdr. */
-#define EF_PARISC_TRAPNIL 0x00010000 /* Trap nil pointer dereference. */
-#define EF_PARISC_EXT 0x00020000 /* Program uses arch. extensions. */
-#define EF_PARISC_LSB 0x00040000 /* Program expects little endian. */
-#define EF_PARISC_WIDE 0x00080000 /* Program expects wide mode. */
-#define EF_PARISC_NO_KABP 0x00100000 /* No kernel assisted branch
+#define EF_PARISC_TRAPNIL 0x00010000 /* Trap nil pointer dereference. */
+#define EF_PARISC_EXT 0x00020000 /* Program uses arch. extensions. */
+#define EF_PARISC_LSB 0x00040000 /* Program expects little endian. */
+#define EF_PARISC_WIDE 0x00080000 /* Program expects wide mode. */
+#define EF_PARISC_NO_KABP 0x00100000 /* No kernel assisted branch
prediction. */
-#define EF_PARISC_LAZYSWAP 0x00400000 /* Allow lazy swapping. */
-#define EF_PARISC_ARCH 0x0000ffff /* Architecture version. */
+#define EF_PARISC_LAZYSWAP 0x00400000 /* Allow lazy swapping. */
+#define EF_PARISC_ARCH 0x0000ffff /* Architecture version. */
/* Defined values for `e_flags & EF_PARISC_ARCH' are: */
-#define EFA_PARISC_1_0 0x020b /* PA-RISC 1.0 big-endian. */
-#define EFA_PARISC_1_1 0x0210 /* PA-RISC 1.1 big-endian. */
-#define EFA_PARISC_2_0 0x0214 /* PA-RISC 2.0 big-endian. */
+#define EFA_PARISC_1_0 0x020b /* PA-RISC 1.0 big-endian. */
+#define EFA_PARISC_1_1 0x0210 /* PA-RISC 1.1 big-endian. */
+#define EFA_PARISC_2_0 0x0214 /* PA-RISC 2.0 big-endian. */
/* Additional section indeces. */
-#define SHN_PARISC_ANSI_COMMON 0xff00 /* Section for tenatively declared
+#define SHN_PARISC_ANSI_COMMON 0xff00 /* Section for tenatively declared
symbols in ANSI C. */
-#define SHN_PARISC_HUGE_COMMON 0xff01 /* Common blocks in huge model. */
+#define SHN_PARISC_HUGE_COMMON 0xff01 /* Common blocks in huge model. */
/* Legal values for sh_type field of Elf32_Shdr. */
-#define SHT_PARISC_EXT 0x70000000 /* Contains product specific ext. */
-#define SHT_PARISC_UNWIND 0x70000001 /* Unwind information. */
-#define SHT_PARISC_DOC 0x70000002 /* Debug info for optimized code. */
+#define SHT_PARISC_EXT 0x70000000 /* Contains product specific ext. */
+#define SHT_PARISC_UNWIND 0x70000001 /* Unwind information. */
+#define SHT_PARISC_DOC 0x70000002 /* Debug info for optimized code. */
/* Legal values for sh_flags field of Elf32_Shdr. */
-#define SHF_PARISC_SHORT 0x20000000 /* Section with short addressing. */
-#define SHF_PARISC_HUGE 0x40000000 /* Section far from gp. */
-#define SHF_PARISC_SBP 0x80000000 /* Static branch prediction code. */
+#define SHF_PARISC_SHORT 0x20000000 /* Section with short addressing. */
+#define SHF_PARISC_HUGE 0x40000000 /* Section far from gp. */
+#define SHF_PARISC_SBP 0x80000000 /* Static branch prediction code. */
/* Legal values for ST_TYPE subfield of st_info (symbol type). */
-#define STT_PARISC_MILLICODE 13 /* Millicode function entry point. */
+#define STT_PARISC_MILLICODE 13 /* Millicode function entry point. */
-#define STT_HP_OPAQUE (STT_LOOS + 0x1)
-#define STT_HP_STUB (STT_LOOS + 0x2)
+#define STT_HP_OPAQUE (STT_LOOS + 0x1)
+#define STT_HP_STUB (STT_LOOS + 0x2)
/* HPPA relocs. */
-#define R_PARISC_NONE 0 /* No reloc. */
-#define R_PARISC_DIR32 1 /* Direct 32-bit reference. */
-#define R_PARISC_DIR21L 2 /* Left 21 bits of eff. address. */
-#define R_PARISC_DIR17R 3 /* Right 17 bits of eff. address. */
-#define R_PARISC_DIR17F 4 /* 17 bits of eff. address. */
-#define R_PARISC_DIR14R 6 /* Right 14 bits of eff. address. */
-#define R_PARISC_PCREL32 9 /* 32-bit rel. address. */
-#define R_PARISC_PCREL21L 10 /* Left 21 bits of rel. address. */
-#define R_PARISC_PCREL17R 11 /* Right 17 bits of rel. address. */
-#define R_PARISC_PCREL17F 12 /* 17 bits of rel. address. */
-#define R_PARISC_PCREL14R 14 /* Right 14 bits of rel. address. */
-#define R_PARISC_DPREL21L 18 /* Left 21 bits of rel. address. */
-#define R_PARISC_DPREL14R 22 /* Right 14 bits of rel. address. */
-#define R_PARISC_GPREL21L 26 /* GP-relative, left 21 bits. */
-#define R_PARISC_GPREL14R 30 /* GP-relative, right 14 bits. */
-#define R_PARISC_LTOFF21L 34 /* LT-relative, left 21 bits. */
-#define R_PARISC_LTOFF14R 38 /* LT-relative, right 14 bits. */
-#define R_PARISC_SECREL32 41 /* 32 bits section rel. address. */
-#define R_PARISC_SEGBASE 48 /* No relocation, set segment base. */
-#define R_PARISC_SEGREL32 49 /* 32 bits segment rel. address. */
-#define R_PARISC_PLTOFF21L 50 /* PLT rel. address, left 21 bits. */
-#define R_PARISC_PLTOFF14R 54 /* PLT rel. address, right 14 bits. */
-#define R_PARISC_LTOFF_FPTR32 57 /* 32 bits LT-rel. function pointer. */
-#define R_PARISC_LTOFF_FPTR21L 58 /* LT-rel. fct ptr, left 21 bits. */
-#define R_PARISC_LTOFF_FPTR14R 62 /* LT-rel. fct ptr, right 14 bits. */
-#define R_PARISC_FPTR64 64 /* 64 bits function address. */
-#define R_PARISC_PLABEL32 65 /* 32 bits function address. */
-#define R_PARISC_PCREL64 72 /* 64 bits PC-rel. address. */
-#define R_PARISC_PCREL22F 74 /* 22 bits PC-rel. address. */
-#define R_PARISC_PCREL14WR 75 /* PC-rel. address, right 14 bits. */
-#define R_PARISC_PCREL14DR 76 /* PC rel. address, right 14 bits. */
-#define R_PARISC_PCREL16F 77 /* 16 bits PC-rel. address. */
-#define R_PARISC_PCREL16WF 78 /* 16 bits PC-rel. address. */
-#define R_PARISC_PCREL16DF 79 /* 16 bits PC-rel. address. */
-#define R_PARISC_DIR64 80 /* 64 bits of eff. address. */
-#define R_PARISC_DIR14WR 83 /* 14 bits of eff. address. */
-#define R_PARISC_DIR14DR 84 /* 14 bits of eff. address. */
-#define R_PARISC_DIR16F 85 /* 16 bits of eff. address. */
-#define R_PARISC_DIR16WF 86 /* 16 bits of eff. address. */
-#define R_PARISC_DIR16DF 87 /* 16 bits of eff. address. */
-#define R_PARISC_GPREL64 88 /* 64 bits of GP-rel. address. */
-#define R_PARISC_GPREL14WR 91 /* GP-rel. address, right 14 bits. */
-#define R_PARISC_GPREL14DR 92 /* GP-rel. address, right 14 bits. */
-#define R_PARISC_GPREL16F 93 /* 16 bits GP-rel. address. */
-#define R_PARISC_GPREL16WF 94 /* 16 bits GP-rel. address. */
-#define R_PARISC_GPREL16DF 95 /* 16 bits GP-rel. address. */
-#define R_PARISC_LTOFF64 96 /* 64 bits LT-rel. address. */
-#define R_PARISC_LTOFF14WR 99 /* LT-rel. address, right 14 bits. */
-#define R_PARISC_LTOFF14DR 100 /* LT-rel. address, right 14 bits. */
-#define R_PARISC_LTOFF16F 101 /* 16 bits LT-rel. address. */
-#define R_PARISC_LTOFF16WF 102 /* 16 bits LT-rel. address. */
-#define R_PARISC_LTOFF16DF 103 /* 16 bits LT-rel. address. */
-#define R_PARISC_SECREL64 104 /* 64 bits section rel. address. */
-#define R_PARISC_SEGREL64 112 /* 64 bits segment rel. address. */
-#define R_PARISC_PLTOFF14WR 115 /* PLT-rel. address, right 14 bits. */
-#define R_PARISC_PLTOFF14DR 116 /* PLT-rel. address, right 14 bits. */
-#define R_PARISC_PLTOFF16F 117 /* 16 bits LT-rel. address. */
-#define R_PARISC_PLTOFF16WF 118 /* 16 bits PLT-rel. address. */
-#define R_PARISC_PLTOFF16DF 119 /* 16 bits PLT-rel. address. */
-#define R_PARISC_LTOFF_FPTR64 120 /* 64 bits LT-rel. function ptr. */
-#define R_PARISC_LTOFF_FPTR14WR 123 /* LT-rel. fct. ptr., right 14 bits. */
-#define R_PARISC_LTOFF_FPTR14DR 124 /* LT-rel. fct. ptr., right 14 bits. */
-#define R_PARISC_LTOFF_FPTR16F 125 /* 16 bits LT-rel. function ptr. */
-#define R_PARISC_LTOFF_FPTR16WF 126 /* 16 bits LT-rel. function ptr. */
-#define R_PARISC_LTOFF_FPTR16DF 127 /* 16 bits LT-rel. function ptr. */
-#define R_PARISC_LORESERVE 128
-#define R_PARISC_COPY 128 /* Copy relocation. */
-#define R_PARISC_IPLT 129 /* Dynamic reloc, imported PLT */
-#define R_PARISC_EPLT 130 /* Dynamic reloc, exported PLT */
-#define R_PARISC_TPREL32 153 /* 32 bits TP-rel. address. */
-#define R_PARISC_TPREL21L 154 /* TP-rel. address, left 21 bits. */
-#define R_PARISC_TPREL14R 158 /* TP-rel. address, right 14 bits. */
-#define R_PARISC_LTOFF_TP21L 162 /* LT-TP-rel. address, left 21 bits. */
-#define R_PARISC_LTOFF_TP14R 166 /* LT-TP-rel. address, right 14 bits.*/
-#define R_PARISC_LTOFF_TP14F 167 /* 14 bits LT-TP-rel. address. */
-#define R_PARISC_TPREL64 216 /* 64 bits TP-rel. address. */
-#define R_PARISC_TPREL14WR 219 /* TP-rel. address, right 14 bits. */
-#define R_PARISC_TPREL14DR 220 /* TP-rel. address, right 14 bits. */
-#define R_PARISC_TPREL16F 221 /* 16 bits TP-rel. address. */
-#define R_PARISC_TPREL16WF 222 /* 16 bits TP-rel. address. */
-#define R_PARISC_TPREL16DF 223 /* 16 bits TP-rel. address. */
-#define R_PARISC_LTOFF_TP64 224 /* 64 bits LT-TP-rel. address. */
-#define R_PARISC_LTOFF_TP14WR 227 /* LT-TP-rel. address, right 14 bits.*/
-#define R_PARISC_LTOFF_TP14DR 228 /* LT-TP-rel. address, right 14 bits.*/
-#define R_PARISC_LTOFF_TP16F 229 /* 16 bits LT-TP-rel. address. */
-#define R_PARISC_LTOFF_TP16WF 230 /* 16 bits LT-TP-rel. address. */
-#define R_PARISC_LTOFF_TP16DF 231 /* 16 bits LT-TP-rel. address. */
-#define R_PARISC_HIRESERVE 255
+#define R_PARISC_NONE 0 /* No reloc. */
+#define R_PARISC_DIR32 1 /* Direct 32-bit reference. */
+#define R_PARISC_DIR21L 2 /* Left 21 bits of eff. address. */
+#define R_PARISC_DIR17R 3 /* Right 17 bits of eff. address. */
+#define R_PARISC_DIR17F 4 /* 17 bits of eff. address. */
+#define R_PARISC_DIR14R 6 /* Right 14 bits of eff. address. */
+#define R_PARISC_PCREL32 9 /* 32-bit rel. address. */
+#define R_PARISC_PCREL21L 10 /* Left 21 bits of rel. address. */
+#define R_PARISC_PCREL17R 11 /* Right 17 bits of rel. address. */
+#define R_PARISC_PCREL17F 12 /* 17 bits of rel. address. */
+#define R_PARISC_PCREL14R 14 /* Right 14 bits of rel. address. */
+#define R_PARISC_DPREL21L 18 /* Left 21 bits of rel. address. */
+#define R_PARISC_DPREL14R 22 /* Right 14 bits of rel. address. */
+#define R_PARISC_GPREL21L 26 /* GP-relative, left 21 bits. */
+#define R_PARISC_GPREL14R 30 /* GP-relative, right 14 bits. */
+#define R_PARISC_LTOFF21L 34 /* LT-relative, left 21 bits. */
+#define R_PARISC_LTOFF14R 38 /* LT-relative, right 14 bits. */
+#define R_PARISC_SECREL32 41 /* 32 bits section rel. address. */
+#define R_PARISC_SEGBASE 48 /* No relocation, set segment base. */
+#define R_PARISC_SEGREL32 49 /* 32 bits segment rel. address. */
+#define R_PARISC_PLTOFF21L 50 /* PLT rel. address, left 21 bits. */
+#define R_PARISC_PLTOFF14R 54 /* PLT rel. address, right 14 bits. */
+#define R_PARISC_LTOFF_FPTR32 57 /* 32 bits LT-rel. function pointer. */
+#define R_PARISC_LTOFF_FPTR21L 58 /* LT-rel. fct ptr, left 21 bits. */
+#define R_PARISC_LTOFF_FPTR14R 62 /* LT-rel. fct ptr, right 14 bits. */
+#define R_PARISC_FPTR64 64 /* 64 bits function address. */
+#define R_PARISC_PLABEL32 65 /* 32 bits function address. */
+#define R_PARISC_PCREL64 72 /* 64 bits PC-rel. address. */
+#define R_PARISC_PCREL22F 74 /* 22 bits PC-rel. address. */
+#define R_PARISC_PCREL14WR 75 /* PC-rel. address, right 14 bits. */
+#define R_PARISC_PCREL14DR 76 /* PC rel. address, right 14 bits. */
+#define R_PARISC_PCREL16F 77 /* 16 bits PC-rel. address. */
+#define R_PARISC_PCREL16WF 78 /* 16 bits PC-rel. address. */
+#define R_PARISC_PCREL16DF 79 /* 16 bits PC-rel. address. */
+#define R_PARISC_DIR64 80 /* 64 bits of eff. address. */
+#define R_PARISC_DIR14WR 83 /* 14 bits of eff. address. */
+#define R_PARISC_DIR14DR 84 /* 14 bits of eff. address. */
+#define R_PARISC_DIR16F 85 /* 16 bits of eff. address. */
+#define R_PARISC_DIR16WF 86 /* 16 bits of eff. address. */
+#define R_PARISC_DIR16DF 87 /* 16 bits of eff. address. */
+#define R_PARISC_GPREL64 88 /* 64 bits of GP-rel. address. */
+#define R_PARISC_GPREL14WR 91 /* GP-rel. address, right 14 bits. */
+#define R_PARISC_GPREL14DR 92 /* GP-rel. address, right 14 bits. */
+#define R_PARISC_GPREL16F 93 /* 16 bits GP-rel. address. */
+#define R_PARISC_GPREL16WF 94 /* 16 bits GP-rel. address. */
+#define R_PARISC_GPREL16DF 95 /* 16 bits GP-rel. address. */
+#define R_PARISC_LTOFF64 96 /* 64 bits LT-rel. address. */
+#define R_PARISC_LTOFF14WR 99 /* LT-rel. address, right 14 bits. */
+#define R_PARISC_LTOFF14DR 100 /* LT-rel. address, right 14 bits. */
+#define R_PARISC_LTOFF16F 101 /* 16 bits LT-rel. address. */
+#define R_PARISC_LTOFF16WF 102 /* 16 bits LT-rel. address. */
+#define R_PARISC_LTOFF16DF 103 /* 16 bits LT-rel. address. */
+#define R_PARISC_SECREL64 104 /* 64 bits section rel. address. */
+#define R_PARISC_SEGREL64 112 /* 64 bits segment rel. address. */
+#define R_PARISC_PLTOFF14WR 115 /* PLT-rel. address, right 14 bits. */
+#define R_PARISC_PLTOFF14DR 116 /* PLT-rel. address, right 14 bits. */
+#define R_PARISC_PLTOFF16F 117 /* 16 bits LT-rel. address. */
+#define R_PARISC_PLTOFF16WF 118 /* 16 bits PLT-rel. address. */
+#define R_PARISC_PLTOFF16DF 119 /* 16 bits PLT-rel. address. */
+#define R_PARISC_LTOFF_FPTR64 120 /* 64 bits LT-rel. function ptr. */
+#define R_PARISC_LTOFF_FPTR14WR 123 /* LT-rel. fct. ptr., right 14 bits. */
+#define R_PARISC_LTOFF_FPTR14DR 124 /* LT-rel. fct. ptr., right 14 bits. */
+#define R_PARISC_LTOFF_FPTR16F 125 /* 16 bits LT-rel. function ptr. */
+#define R_PARISC_LTOFF_FPTR16WF 126 /* 16 bits LT-rel. function ptr. */
+#define R_PARISC_LTOFF_FPTR16DF 127 /* 16 bits LT-rel. function ptr. */
+#define R_PARISC_LORESERVE 128
+#define R_PARISC_COPY 128 /* Copy relocation. */
+#define R_PARISC_IPLT 129 /* Dynamic reloc, imported PLT */
+#define R_PARISC_EPLT 130 /* Dynamic reloc, exported PLT */
+#define R_PARISC_TPREL32 153 /* 32 bits TP-rel. address. */
+#define R_PARISC_TPREL21L 154 /* TP-rel. address, left 21 bits. */
+#define R_PARISC_TPREL14R 158 /* TP-rel. address, right 14 bits. */
+#define R_PARISC_LTOFF_TP21L 162 /* LT-TP-rel. address, left 21 bits. */
+#define R_PARISC_LTOFF_TP14R 166 /* LT-TP-rel. address, right 14 bits.*/
+#define R_PARISC_LTOFF_TP14F 167 /* 14 bits LT-TP-rel. address. */
+#define R_PARISC_TPREL64 216 /* 64 bits TP-rel. address. */
+#define R_PARISC_TPREL14WR 219 /* TP-rel. address, right 14 bits. */
+#define R_PARISC_TPREL14DR 220 /* TP-rel. address, right 14 bits. */
+#define R_PARISC_TPREL16F 221 /* 16 bits TP-rel. address. */
+#define R_PARISC_TPREL16WF 222 /* 16 bits TP-rel. address. */
+#define R_PARISC_TPREL16DF 223 /* 16 bits TP-rel. address. */
+#define R_PARISC_LTOFF_TP64 224 /* 64 bits LT-TP-rel. address. */
+#define R_PARISC_LTOFF_TP14WR 227 /* LT-TP-rel. address, right 14 bits.*/
+#define R_PARISC_LTOFF_TP14DR 228 /* LT-TP-rel. address, right 14 bits.*/
+#define R_PARISC_LTOFF_TP16F 229 /* 16 bits LT-TP-rel. address. */
+#define R_PARISC_LTOFF_TP16WF 230 /* 16 bits LT-TP-rel. address. */
+#define R_PARISC_LTOFF_TP16DF 231 /* 16 bits LT-TP-rel. address. */
+#define R_PARISC_HIRESERVE 255
/* Legal values for p_type field of Elf32_Phdr/Elf64_Phdr. */
-#define PT_HP_TLS (PT_LOOS + 0x0)
-#define PT_HP_CORE_NONE (PT_LOOS + 0x1)
-#define PT_HP_CORE_VERSION (PT_LOOS + 0x2)
-#define PT_HP_CORE_KERNEL (PT_LOOS + 0x3)
-#define PT_HP_CORE_COMM (PT_LOOS + 0x4)
-#define PT_HP_CORE_PROC (PT_LOOS + 0x5)
-#define PT_HP_CORE_LOADABLE (PT_LOOS + 0x6)
-#define PT_HP_CORE_STACK (PT_LOOS + 0x7)
-#define PT_HP_CORE_SHM (PT_LOOS + 0x8)
-#define PT_HP_CORE_MMF (PT_LOOS + 0x9)
-#define PT_HP_PARALLEL (PT_LOOS + 0x10)
-#define PT_HP_FASTBIND (PT_LOOS + 0x11)
-#define PT_HP_OPT_ANNOT (PT_LOOS + 0x12)
-#define PT_HP_HSL_ANNOT (PT_LOOS + 0x13)
-#define PT_HP_STACK (PT_LOOS + 0x14)
+#define PT_HP_TLS (PT_LOOS + 0x0)
+#define PT_HP_CORE_NONE (PT_LOOS + 0x1)
+#define PT_HP_CORE_VERSION (PT_LOOS + 0x2)
+#define PT_HP_CORE_KERNEL (PT_LOOS + 0x3)
+#define PT_HP_CORE_COMM (PT_LOOS + 0x4)
+#define PT_HP_CORE_PROC (PT_LOOS + 0x5)
+#define PT_HP_CORE_LOADABLE (PT_LOOS + 0x6)
+#define PT_HP_CORE_STACK (PT_LOOS + 0x7)
+#define PT_HP_CORE_SHM (PT_LOOS + 0x8)
+#define PT_HP_CORE_MMF (PT_LOOS + 0x9)
+#define PT_HP_PARALLEL (PT_LOOS + 0x10)
+#define PT_HP_FASTBIND (PT_LOOS + 0x11)
+#define PT_HP_OPT_ANNOT (PT_LOOS + 0x12)
+#define PT_HP_HSL_ANNOT (PT_LOOS + 0x13)
+#define PT_HP_STACK (PT_LOOS + 0x14)
-#define PT_PARISC_ARCHEXT 0x70000000
-#define PT_PARISC_UNWIND 0x70000001
+#define PT_PARISC_ARCHEXT 0x70000000
+#define PT_PARISC_UNWIND 0x70000001
/* Legal values for p_flags field of Elf32_Phdr/Elf64_Phdr. */
-#define PF_PARISC_SBP 0x08000000
+#define PF_PARISC_SBP 0x08000000
-#define PF_HP_PAGE_SIZE 0x00100000
-#define PF_HP_FAR_SHARED 0x00200000
-#define PF_HP_NEAR_SHARED 0x00400000
-#define PF_HP_CODE 0x01000000
-#define PF_HP_MODIFY 0x02000000
-#define PF_HP_LAZYSWAP 0x04000000
-#define PF_HP_SBP 0x08000000
+#define PF_HP_PAGE_SIZE 0x00100000
+#define PF_HP_FAR_SHARED 0x00200000
+#define PF_HP_NEAR_SHARED 0x00400000
+#define PF_HP_CODE 0x01000000
+#define PF_HP_MODIFY 0x02000000
+#define PF_HP_LAZYSWAP 0x04000000
+#define PF_HP_SBP 0x08000000
/* IA-64 specific declarations. */
/* Processor specific flags for the Ehdr e_flags field. */
-#define EF_IA_64_MASKOS 0x0000000f /* os-specific flags */
-#define EF_IA_64_ABI64 0x00000010 /* 64-bit ABI */
-#define EF_IA_64_ARCH 0xff000000 /* arch. version mask */
+#define EF_IA_64_MASKOS 0x0000000f /* os-specific flags */
+#define EF_IA_64_ABI64 0x00000010 /* 64-bit ABI */
+#define EF_IA_64_ARCH 0xff000000 /* arch. version mask */
/* Processor specific values for the Phdr p_type field. */
-#define PT_IA_64_ARCHEXT (PT_LOPROC + 0) /* arch extension bits */
-#define PT_IA_64_UNWIND (PT_LOPROC + 1) /* ia64 unwind bits */
+#define PT_IA_64_ARCHEXT (PT_LOPROC + 0) /* arch extension bits */
+#define PT_IA_64_UNWIND (PT_LOPROC + 1) /* ia64 unwind bits */
/* Processor specific flags for the Phdr p_flags field. */
-#define PF_IA_64_NORECOV 0x80000000 /* spec insns w/o recovery */
+#define PF_IA_64_NORECOV 0x80000000 /* spec insns w/o recovery */
/* Processor specific values for the Shdr sh_type field. */
-#define SHT_IA_64_EXT (SHT_LOPROC + 0) /* extension bits */
-#define SHT_IA_64_UNWIND (SHT_LOPROC + 1) /* unwind bits */
+#define SHT_IA_64_EXT (SHT_LOPROC + 0) /* extension bits */
+#define SHT_IA_64_UNWIND (SHT_LOPROC + 1) /* unwind bits */
/* Processor specific flags for the Shdr sh_flags field. */
-#define SHF_IA_64_SHORT 0x10000000 /* section near gp */
-#define SHF_IA_64_NORECOV 0x20000000 /* spec insns w/o recovery */
+#define SHF_IA_64_SHORT 0x10000000 /* section near gp */
+#define SHF_IA_64_NORECOV 0x20000000 /* spec insns w/o recovery */
/* Processor specific values for the Dyn d_tag field. */
-#define DT_IA_64_PLT_RESERVE (DT_LOPROC + 0)
-#define DT_IA_64_NUM 1
+#define DT_IA_64_PLT_RESERVE (DT_LOPROC + 0)
+#define DT_IA_64_NUM 1
/* IA-64 relocations. */
-#define R_IA64_NONE 0x00 /* none */
-#define R_IA64_IMM14 0x21 /* symbol + addend, add imm14 */
-#define R_IA64_IMM22 0x22 /* symbol + addend, add imm22 */
-#define R_IA64_IMM64 0x23 /* symbol + addend, mov imm64 */
-#define R_IA64_DIR32MSB 0x24 /* symbol + addend, data4 MSB */
-#define R_IA64_DIR32LSB 0x25 /* symbol + addend, data4 LSB */
-#define R_IA64_DIR64MSB 0x26 /* symbol + addend, data8 MSB */
-#define R_IA64_DIR64LSB 0x27 /* symbol + addend, data8 LSB */
-#define R_IA64_GPREL22 0x2a /* @gprel(sym + add), add imm22 */
-#define R_IA64_GPREL64I 0x2b /* @gprel(sym + add), mov imm64 */
-#define R_IA64_GPREL32MSB 0x2c /* @gprel(sym + add), data4 MSB */
-#define R_IA64_GPREL32LSB 0x2d /* @gprel(sym + add), data4 LSB */
-#define R_IA64_GPREL64MSB 0x2e /* @gprel(sym + add), data8 MSB */
-#define R_IA64_GPREL64LSB 0x2f /* @gprel(sym + add), data8 LSB */
-#define R_IA64_LTOFF22 0x32 /* @ltoff(sym + add), add imm22 */
-#define R_IA64_LTOFF64I 0x33 /* @ltoff(sym + add), mov imm64 */
-#define R_IA64_PLTOFF22 0x3a /* @pltoff(sym + add), add imm22 */
-#define R_IA64_PLTOFF64I 0x3b /* @pltoff(sym + add), mov imm64 */
-#define R_IA64_PLTOFF64MSB 0x3e /* @pltoff(sym + add), data8 MSB */
-#define R_IA64_PLTOFF64LSB 0x3f /* @pltoff(sym + add), data8 LSB */
-#define R_IA64_FPTR64I 0x43 /* @fptr(sym + add), mov imm64 */
-#define R_IA64_FPTR32MSB 0x44 /* @fptr(sym + add), data4 MSB */
-#define R_IA64_FPTR32LSB 0x45 /* @fptr(sym + add), data4 LSB */
-#define R_IA64_FPTR64MSB 0x46 /* @fptr(sym + add), data8 MSB */
-#define R_IA64_FPTR64LSB 0x47 /* @fptr(sym + add), data8 LSB */
-#define R_IA64_PCREL60B 0x48 /* @pcrel(sym + add), brl */
-#define R_IA64_PCREL21B 0x49 /* @pcrel(sym + add), ptb, call */
-#define R_IA64_PCREL21M 0x4a /* @pcrel(sym + add), chk.s */
-#define R_IA64_PCREL21F 0x4b /* @pcrel(sym + add), fchkf */
-#define R_IA64_PCREL32MSB 0x4c /* @pcrel(sym + add), data4 MSB */
-#define R_IA64_PCREL32LSB 0x4d /* @pcrel(sym + add), data4 LSB */
-#define R_IA64_PCREL64MSB 0x4e /* @pcrel(sym + add), data8 MSB */
-#define R_IA64_PCREL64LSB 0x4f /* @pcrel(sym + add), data8 LSB */
-#define R_IA64_LTOFF_FPTR22 0x52 /* @ltoff(@fptr(s+a)), imm22 */
-#define R_IA64_LTOFF_FPTR64I 0x53 /* @ltoff(@fptr(s+a)), imm64 */
-#define R_IA64_LTOFF_FPTR32MSB 0x54 /* @ltoff(@fptr(s+a)), data4 MSB */
-#define R_IA64_LTOFF_FPTR32LSB 0x55 /* @ltoff(@fptr(s+a)), data4 LSB */
-#define R_IA64_LTOFF_FPTR64MSB 0x56 /* @ltoff(@fptr(s+a)), data8 MSB */
-#define R_IA64_LTOFF_FPTR64LSB 0x57 /* @ltoff(@fptr(s+a)), data8 LSB */
-#define R_IA64_SEGREL32MSB 0x5c /* @segrel(sym + add), data4 MSB */
-#define R_IA64_SEGREL32LSB 0x5d /* @segrel(sym + add), data4 LSB */
-#define R_IA64_SEGREL64MSB 0x5e /* @segrel(sym + add), data8 MSB */
-#define R_IA64_SEGREL64LSB 0x5f /* @segrel(sym + add), data8 LSB */
-#define R_IA64_SECREL32MSB 0x64 /* @secrel(sym + add), data4 MSB */
-#define R_IA64_SECREL32LSB 0x65 /* @secrel(sym + add), data4 LSB */
-#define R_IA64_SECREL64MSB 0x66 /* @secrel(sym + add), data8 MSB */
-#define R_IA64_SECREL64LSB 0x67 /* @secrel(sym + add), data8 LSB */
-#define R_IA64_REL32MSB 0x6c /* data 4 + REL */
-#define R_IA64_REL32LSB 0x6d /* data 4 + REL */
-#define R_IA64_REL64MSB 0x6e /* data 8 + REL */
-#define R_IA64_REL64LSB 0x6f /* data 8 + REL */
-#define R_IA64_LTV32MSB 0x74 /* symbol + addend, data4 MSB */
-#define R_IA64_LTV32LSB 0x75 /* symbol + addend, data4 LSB */
-#define R_IA64_LTV64MSB 0x76 /* symbol + addend, data8 MSB */
-#define R_IA64_LTV64LSB 0x77 /* symbol + addend, data8 LSB */
-#define R_IA64_PCREL21BI 0x79 /* @pcrel(sym + add), 21bit inst */
-#define R_IA64_PCREL22 0x7a /* @pcrel(sym + add), 22bit inst */
-#define R_IA64_PCREL64I 0x7b /* @pcrel(sym + add), 64bit inst */
-#define R_IA64_IPLTMSB 0x80 /* dynamic reloc, imported PLT, MSB */
-#define R_IA64_IPLTLSB 0x81 /* dynamic reloc, imported PLT, LSB */
-#define R_IA64_COPY 0x84 /* copy relocation */
-#define R_IA64_SUB 0x85 /* Addend and symbol difference */
-#define R_IA64_LTOFF22X 0x86 /* LTOFF22, relaxable. */
-#define R_IA64_LDXMOV 0x87 /* Use of LTOFF22X. */
-#define R_IA64_TPREL14 0x91 /* @tprel(sym + add), imm14 */
-#define R_IA64_TPREL22 0x92 /* @tprel(sym + add), imm22 */
-#define R_IA64_TPREL64I 0x93 /* @tprel(sym + add), imm64 */
-#define R_IA64_TPREL64MSB 0x96 /* @tprel(sym + add), data8 MSB */
-#define R_IA64_TPREL64LSB 0x97 /* @tprel(sym + add), data8 LSB */
-#define R_IA64_LTOFF_TPREL22 0x9a /* @ltoff(@tprel(s+a)), imm2 */
-#define R_IA64_DTPMOD64MSB 0xa6 /* @dtpmod(sym + add), data8 MSB */
-#define R_IA64_DTPMOD64LSB 0xa7 /* @dtpmod(sym + add), data8 LSB */
-#define R_IA64_LTOFF_DTPMOD22 0xaa /* @ltoff(@dtpmod(sym + add)), imm22 */
-#define R_IA64_DTPREL14 0xb1 /* @dtprel(sym + add), imm14 */
-#define R_IA64_DTPREL22 0xb2 /* @dtprel(sym + add), imm22 */
-#define R_IA64_DTPREL64I 0xb3 /* @dtprel(sym + add), imm64 */
-#define R_IA64_DTPREL32MSB 0xb4 /* @dtprel(sym + add), data4 MSB */
-#define R_IA64_DTPREL32LSB 0xb5 /* @dtprel(sym + add), data4 LSB */
-#define R_IA64_DTPREL64MSB 0xb6 /* @dtprel(sym + add), data8 MSB */
-#define R_IA64_DTPREL64LSB 0xb7 /* @dtprel(sym + add), data8 LSB */
-#define R_IA64_LTOFF_DTPREL22 0xba /* @ltoff(@dtprel(s+a)), imm22 */
+#define R_IA64_NONE 0x00 /* none */
+#define R_IA64_IMM14 0x21 /* symbol + addend, add imm14 */
+#define R_IA64_IMM22 0x22 /* symbol + addend, add imm22 */
+#define R_IA64_IMM64 0x23 /* symbol + addend, mov imm64 */
+#define R_IA64_DIR32MSB 0x24 /* symbol + addend, data4 MSB */
+#define R_IA64_DIR32LSB 0x25 /* symbol + addend, data4 LSB */
+#define R_IA64_DIR64MSB 0x26 /* symbol + addend, data8 MSB */
+#define R_IA64_DIR64LSB 0x27 /* symbol + addend, data8 LSB */
+#define R_IA64_GPREL22 0x2a /* @gprel(sym + add), add imm22 */
+#define R_IA64_GPREL64I 0x2b /* @gprel(sym + add), mov imm64 */
+#define R_IA64_GPREL32MSB 0x2c /* @gprel(sym + add), data4 MSB */
+#define R_IA64_GPREL32LSB 0x2d /* @gprel(sym + add), data4 LSB */
+#define R_IA64_GPREL64MSB 0x2e /* @gprel(sym + add), data8 MSB */
+#define R_IA64_GPREL64LSB 0x2f /* @gprel(sym + add), data8 LSB */
+#define R_IA64_LTOFF22 0x32 /* @ltoff(sym + add), add imm22 */
+#define R_IA64_LTOFF64I 0x33 /* @ltoff(sym + add), mov imm64 */
+#define R_IA64_PLTOFF22 0x3a /* @pltoff(sym + add), add imm22 */
+#define R_IA64_PLTOFF64I 0x3b /* @pltoff(sym + add), mov imm64 */
+#define R_IA64_PLTOFF64MSB 0x3e /* @pltoff(sym + add), data8 MSB */
+#define R_IA64_PLTOFF64LSB 0x3f /* @pltoff(sym + add), data8 LSB */
+#define R_IA64_FPTR64I 0x43 /* @fptr(sym + add), mov imm64 */
+#define R_IA64_FPTR32MSB 0x44 /* @fptr(sym + add), data4 MSB */
+#define R_IA64_FPTR32LSB 0x45 /* @fptr(sym + add), data4 LSB */
+#define R_IA64_FPTR64MSB 0x46 /* @fptr(sym + add), data8 MSB */
+#define R_IA64_FPTR64LSB 0x47 /* @fptr(sym + add), data8 LSB */
+#define R_IA64_PCREL60B 0x48 /* @pcrel(sym + add), brl */
+#define R_IA64_PCREL21B 0x49 /* @pcrel(sym + add), ptb, call */
+#define R_IA64_PCREL21M 0x4a /* @pcrel(sym + add), chk.s */
+#define R_IA64_PCREL21F 0x4b /* @pcrel(sym + add), fchkf */
+#define R_IA64_PCREL32MSB 0x4c /* @pcrel(sym + add), data4 MSB */
+#define R_IA64_PCREL32LSB 0x4d /* @pcrel(sym + add), data4 LSB */
+#define R_IA64_PCREL64MSB 0x4e /* @pcrel(sym + add), data8 MSB */
+#define R_IA64_PCREL64LSB 0x4f /* @pcrel(sym + add), data8 LSB */
+#define R_IA64_LTOFF_FPTR22 0x52 /* @ltoff(@fptr(s+a)), imm22 */
+#define R_IA64_LTOFF_FPTR64I 0x53 /* @ltoff(@fptr(s+a)), imm64 */
+#define R_IA64_LTOFF_FPTR32MSB 0x54 /* @ltoff(@fptr(s+a)), data4 MSB */
+#define R_IA64_LTOFF_FPTR32LSB 0x55 /* @ltoff(@fptr(s+a)), data4 LSB */
+#define R_IA64_LTOFF_FPTR64MSB 0x56 /* @ltoff(@fptr(s+a)), data8 MSB */
+#define R_IA64_LTOFF_FPTR64LSB 0x57 /* @ltoff(@fptr(s+a)), data8 LSB */
+#define R_IA64_SEGREL32MSB 0x5c /* @segrel(sym + add), data4 MSB */
+#define R_IA64_SEGREL32LSB 0x5d /* @segrel(sym + add), data4 LSB */
+#define R_IA64_SEGREL64MSB 0x5e /* @segrel(sym + add), data8 MSB */
+#define R_IA64_SEGREL64LSB 0x5f /* @segrel(sym + add), data8 LSB */
+#define R_IA64_SECREL32MSB 0x64 /* @secrel(sym + add), data4 MSB */
+#define R_IA64_SECREL32LSB 0x65 /* @secrel(sym + add), data4 LSB */
+#define R_IA64_SECREL64MSB 0x66 /* @secrel(sym + add), data8 MSB */
+#define R_IA64_SECREL64LSB 0x67 /* @secrel(sym + add), data8 LSB */
+#define R_IA64_REL32MSB 0x6c /* data 4 + REL */
+#define R_IA64_REL32LSB 0x6d /* data 4 + REL */
+#define R_IA64_REL64MSB 0x6e /* data 8 + REL */
+#define R_IA64_REL64LSB 0x6f /* data 8 + REL */
+#define R_IA64_LTV32MSB 0x74 /* symbol + addend, data4 MSB */
+#define R_IA64_LTV32LSB 0x75 /* symbol + addend, data4 LSB */
+#define R_IA64_LTV64MSB 0x76 /* symbol + addend, data8 MSB */
+#define R_IA64_LTV64LSB 0x77 /* symbol + addend, data8 LSB */
+#define R_IA64_PCREL21BI 0x79 /* @pcrel(sym + add), 21bit inst */
+#define R_IA64_PCREL22 0x7a /* @pcrel(sym + add), 22bit inst */
+#define R_IA64_PCREL64I 0x7b /* @pcrel(sym + add), 64bit inst */
+#define R_IA64_IPLTMSB 0x80 /* dynamic reloc, imported PLT, MSB */
+#define R_IA64_IPLTLSB 0x81 /* dynamic reloc, imported PLT, LSB */
+#define R_IA64_COPY 0x84 /* copy relocation */
+#define R_IA64_SUB 0x85 /* Addend and symbol difference */
+#define R_IA64_LTOFF22X 0x86 /* LTOFF22, relaxable. */
+#define R_IA64_LDXMOV 0x87 /* Use of LTOFF22X. */
+#define R_IA64_TPREL14 0x91 /* @tprel(sym + add), imm14 */
+#define R_IA64_TPREL22 0x92 /* @tprel(sym + add), imm22 */
+#define R_IA64_TPREL64I 0x93 /* @tprel(sym + add), imm64 */
+#define R_IA64_TPREL64MSB 0x96 /* @tprel(sym + add), data8 MSB */
+#define R_IA64_TPREL64LSB 0x97 /* @tprel(sym + add), data8 LSB */
+#define R_IA64_LTOFF_TPREL22 0x9a /* @ltoff(@tprel(s+a)), imm2 */
+#define R_IA64_DTPMOD64MSB 0xa6 /* @dtpmod(sym + add), data8 MSB */
+#define R_IA64_DTPMOD64LSB 0xa7 /* @dtpmod(sym + add), data8 LSB */
+#define R_IA64_LTOFF_DTPMOD22 0xaa /* @ltoff(@dtpmod(sym + add)), imm22 */
+#define R_IA64_DTPREL14 0xb1 /* @dtprel(sym + add), imm14 */
+#define R_IA64_DTPREL22 0xb2 /* @dtprel(sym + add), imm22 */
+#define R_IA64_DTPREL64I 0xb3 /* @dtprel(sym + add), imm64 */
+#define R_IA64_DTPREL32MSB 0xb4 /* @dtprel(sym + add), data4 MSB */
+#define R_IA64_DTPREL32LSB 0xb5 /* @dtprel(sym + add), data4 LSB */
+#define R_IA64_DTPREL64MSB 0xb6 /* @dtprel(sym + add), data8 MSB */
+#define R_IA64_DTPREL64LSB 0xb7 /* @dtprel(sym + add), data8 LSB */
+#define R_IA64_LTOFF_DTPREL22 0xba /* @ltoff(@dtprel(s+a)), imm22 */
/* RISC-V relocations. */
#define R_RISCV_NONE 0
@@ -1421,47 +1421,47 @@
#define EF_RISCV_TSO 0x0010
typedef struct elf32_rel {
- Elf32_Addr r_offset;
- Elf32_Word r_info;
+ Elf32_Addr r_offset;
+ Elf32_Word r_info;
} Elf32_Rel;
typedef struct elf64_rel {
- Elf64_Addr r_offset; /* Location at which to apply the action */
- Elf64_Xword r_info; /* index and type of relocation */
+ Elf64_Addr r_offset; /* Location at which to apply the action */
+ Elf64_Xword r_info; /* index and type of relocation */
} Elf64_Rel;
typedef struct elf32_rela{
- Elf32_Addr r_offset;
- Elf32_Word r_info;
- Elf32_Sword r_addend;
+ Elf32_Addr r_offset;
+ Elf32_Word r_info;
+ Elf32_Sword r_addend;
} Elf32_Rela;
typedef struct elf64_rela {
- Elf64_Addr r_offset; /* Location at which to apply the action */
- Elf64_Xword r_info; /* index and type of relocation */
- Elf64_Sxword r_addend; /* Constant addend used to compute value */
+ Elf64_Addr r_offset; /* Location at which to apply the action */
+ Elf64_Xword r_info; /* index and type of relocation */
+ Elf64_Sxword r_addend; /* Constant addend used to compute value */
} Elf64_Rela;
typedef struct elf32_sym{
- Elf32_Word st_name;
- Elf32_Addr st_value;
- Elf32_Word st_size;
- unsigned char st_info;
- unsigned char st_other;
- Elf32_Half st_shndx;
+ Elf32_Word st_name;
+ Elf32_Addr st_value;
+ Elf32_Word st_size;
+ unsigned char st_info;
+ unsigned char st_other;
+ Elf32_Half st_shndx;
} Elf32_Sym;
typedef struct elf64_sym {
- Elf64_Word st_name; /* Symbol name, index in string tbl */
- unsigned char st_info; /* Type and binding attributes */
- unsigned char st_other; /* No defined meaning, 0 */
- Elf64_Half st_shndx; /* Associated section index */
- Elf64_Addr st_value; /* Value of the symbol */
- Elf64_Xword st_size; /* Associated symbol size */
+ Elf64_Word st_name; /* Symbol name, index in string tbl */
+ unsigned char st_info; /* Type and binding attributes */
+ unsigned char st_other; /* No defined meaning, 0 */
+ Elf64_Half st_shndx; /* Associated section index */
+ Elf64_Addr st_value; /* Value of the symbol */
+ Elf64_Xword st_size; /* Associated symbol size */
} Elf64_Sym;
-#define EI_NIDENT 16
+#define EI_NIDENT 16
/* Special value for e_phnum. This indicates that the real number of
program headers is too large to fit into e_phnum. Instead the real
@@ -1469,30 +1469,30 @@
#define PN_XNUM 0xffff
typedef struct elf32_hdr{
- unsigned char e_ident[EI_NIDENT];
- Elf32_Half e_type;
- Elf32_Half e_machine;
- Elf32_Word e_version;
- Elf32_Addr e_entry; /* Entry point */
- Elf32_Off e_phoff;
- Elf32_Off e_shoff;
- Elf32_Word e_flags;
- Elf32_Half e_ehsize;
- Elf32_Half e_phentsize;
- Elf32_Half e_phnum;
- Elf32_Half e_shentsize;
- Elf32_Half e_shnum;
- Elf32_Half e_shstrndx;
+ unsigned char e_ident[EI_NIDENT];
+ Elf32_Half e_type;
+ Elf32_Half e_machine;
+ Elf32_Word e_version;
+ Elf32_Addr e_entry; /* Entry point */
+ Elf32_Off e_phoff;
+ Elf32_Off e_shoff;
+ Elf32_Word e_flags;
+ Elf32_Half e_ehsize;
+ Elf32_Half e_phentsize;
+ Elf32_Half e_phnum;
+ Elf32_Half e_shentsize;
+ Elf32_Half e_shnum;
+ Elf32_Half e_shstrndx;
} Elf32_Ehdr;
typedef struct elf64_hdr {
- unsigned char e_ident[16]; /* ELF "magic number" */
+ unsigned char e_ident[16]; /* ELF "magic number" */
Elf64_Half e_type;
Elf64_Half e_machine;
Elf64_Word e_version;
- Elf64_Addr e_entry; /* Entry point virtual address */
- Elf64_Off e_phoff; /* Program header table file offset */
- Elf64_Off e_shoff; /* Section header table file offset */
+ Elf64_Addr e_entry; /* Entry point virtual address */
+ Elf64_Off e_phoff; /* Program header table file offset */
+ Elf64_Off e_shoff; /* Section header table file offset */
Elf64_Word e_flags;
Elf64_Half e_ehsize;
Elf64_Half e_phentsize;
@@ -1504,107 +1504,107 @@
/* These constants define the permissions on sections in the program
header, p_flags. */
-#define PF_R 0x4
-#define PF_W 0x2
-#define PF_X 0x1
+#define PF_R 0x4
+#define PF_W 0x2
+#define PF_X 0x1
typedef struct elf32_phdr{
- Elf32_Word p_type;
- Elf32_Off p_offset;
- Elf32_Addr p_vaddr;
- Elf32_Addr p_paddr;
- Elf32_Word p_filesz;
- Elf32_Word p_memsz;
- Elf32_Word p_flags;
- Elf32_Word p_align;
+ Elf32_Word p_type;
+ Elf32_Off p_offset;
+ Elf32_Addr p_vaddr;
+ Elf32_Addr p_paddr;
+ Elf32_Word p_filesz;
+ Elf32_Word p_memsz;
+ Elf32_Word p_flags;
+ Elf32_Word p_align;
} Elf32_Phdr;
typedef struct elf64_phdr {
Elf64_Word p_type;
Elf64_Word p_flags;
- Elf64_Off p_offset; /* Segment file offset */
- Elf64_Addr p_vaddr; /* Segment virtual address */
- Elf64_Addr p_paddr; /* Segment physical address */
- Elf64_Xword p_filesz; /* Segment size in file */
- Elf64_Xword p_memsz; /* Segment size in memory */
- Elf64_Xword p_align; /* Segment alignment, file & memory */
+ Elf64_Off p_offset; /* Segment file offset */
+ Elf64_Addr p_vaddr; /* Segment virtual address */
+ Elf64_Addr p_paddr; /* Segment physical address */
+ Elf64_Xword p_filesz; /* Segment size in file */
+ Elf64_Xword p_memsz; /* Segment size in memory */
+ Elf64_Xword p_align; /* Segment alignment, file & memory */
} Elf64_Phdr;
/* sh_type */
-#define SHT_NULL 0
-#define SHT_PROGBITS 1
-#define SHT_SYMTAB 2
-#define SHT_STRTAB 3
-#define SHT_RELA 4
-#define SHT_HASH 5
-#define SHT_DYNAMIC 6
-#define SHT_NOTE 7
-#define SHT_NOBITS 8
-#define SHT_REL 9
-#define SHT_SHLIB 10
-#define SHT_DYNSYM 11
-#define SHT_NUM 12
-#define SHT_LOPROC 0x70000000
-#define SHT_HIPROC 0x7fffffff
-#define SHT_LOUSER 0x80000000
-#define SHT_HIUSER 0xffffffff
-#define SHT_MIPS_LIST 0x70000000
-#define SHT_MIPS_CONFLICT 0x70000002
-#define SHT_MIPS_GPTAB 0x70000003
-#define SHT_MIPS_UCODE 0x70000004
+#define SHT_NULL 0
+#define SHT_PROGBITS 1
+#define SHT_SYMTAB 2
+#define SHT_STRTAB 3
+#define SHT_RELA 4
+#define SHT_HASH 5
+#define SHT_DYNAMIC 6
+#define SHT_NOTE 7
+#define SHT_NOBITS 8
+#define SHT_REL 9
+#define SHT_SHLIB 10
+#define SHT_DYNSYM 11
+#define SHT_NUM 12
+#define SHT_LOPROC 0x70000000
+#define SHT_HIPROC 0x7fffffff
+#define SHT_LOUSER 0x80000000
+#define SHT_HIUSER 0xffffffff
+#define SHT_MIPS_LIST 0x70000000
+#define SHT_MIPS_CONFLICT 0x70000002
+#define SHT_MIPS_GPTAB 0x70000003
+#define SHT_MIPS_UCODE 0x70000004
/* sh_flags */
-#define SHF_WRITE 0x1
-#define SHF_ALLOC 0x2
-#define SHF_EXECINSTR 0x4
-#define SHF_MASKPROC 0xf0000000
-#define SHF_MIPS_GPREL 0x10000000
+#define SHF_WRITE 0x1
+#define SHF_ALLOC 0x2
+#define SHF_EXECINSTR 0x4
+#define SHF_MASKPROC 0xf0000000
+#define SHF_MIPS_GPREL 0x10000000
/* special section indexes */
-#define SHN_UNDEF 0
-#define SHN_LORESERVE 0xff00
-#define SHN_LOPROC 0xff00
-#define SHN_HIPROC 0xff1f
-#define SHN_ABS 0xfff1
-#define SHN_COMMON 0xfff2
-#define SHN_HIRESERVE 0xffff
-#define SHN_MIPS_ACCOMON 0xff00
+#define SHN_UNDEF 0
+#define SHN_LORESERVE 0xff00
+#define SHN_LOPROC 0xff00
+#define SHN_HIPROC 0xff1f
+#define SHN_ABS 0xfff1
+#define SHN_COMMON 0xfff2
+#define SHN_HIRESERVE 0xffff
+#define SHN_MIPS_ACCOMON 0xff00
typedef struct elf32_shdr {
- Elf32_Word sh_name;
- Elf32_Word sh_type;
- Elf32_Word sh_flags;
- Elf32_Addr sh_addr;
- Elf32_Off sh_offset;
- Elf32_Word sh_size;
- Elf32_Word sh_link;
- Elf32_Word sh_info;
- Elf32_Word sh_addralign;
- Elf32_Word sh_entsize;
+ Elf32_Word sh_name;
+ Elf32_Word sh_type;
+ Elf32_Word sh_flags;
+ Elf32_Addr sh_addr;
+ Elf32_Off sh_offset;
+ Elf32_Word sh_size;
+ Elf32_Word sh_link;
+ Elf32_Word sh_info;
+ Elf32_Word sh_addralign;
+ Elf32_Word sh_entsize;
} Elf32_Shdr;
typedef struct elf64_shdr {
- Elf64_Word sh_name; /* Section name, index in string tbl */
- Elf64_Word sh_type; /* Type of section */
- Elf64_Xword sh_flags; /* Miscellaneous section attributes */
- Elf64_Addr sh_addr; /* Section virtual addr at execution */
- Elf64_Off sh_offset; /* Section file offset */
- Elf64_Xword sh_size; /* Size of section in bytes */
- Elf64_Word sh_link; /* Index of another section */
- Elf64_Word sh_info; /* Additional section information */
- Elf64_Xword sh_addralign; /* Section alignment */
- Elf64_Xword sh_entsize; /* Entry size if section holds table */
+ Elf64_Word sh_name; /* Section name, index in string tbl */
+ Elf64_Word sh_type; /* Type of section */
+ Elf64_Xword sh_flags; /* Miscellaneous section attributes */
+ Elf64_Addr sh_addr; /* Section virtual addr at execution */
+ Elf64_Off sh_offset; /* Section file offset */
+ Elf64_Xword sh_size; /* Size of section in bytes */
+ Elf64_Word sh_link; /* Index of another section */
+ Elf64_Word sh_info; /* Additional section information */
+ Elf64_Xword sh_addralign; /* Section alignment */
+ Elf64_Xword sh_entsize; /* Entry size if section holds table */
} Elf64_Shdr;
-#define EI_MAG0 0 /* e_ident[] indexes */
-#define EI_MAG1 1
-#define EI_MAG2 2
-#define EI_MAG3 3
-#define EI_CLASS 4
-#define EI_DATA 5
-#define EI_VERSION 6
-#define EI_OSABI 7
-#define EI_PAD 8
+#define EI_MAG0 0 /* e_ident[] indexes */
+#define EI_MAG1 1
+#define EI_MAG2 2
+#define EI_MAG3 3
+#define EI_CLASS 4
+#define EI_DATA 5
+#define EI_VERSION 6
+#define EI_OSABI 7
+#define EI_PAD 8
#define ELFOSABI_NONE 0 /* UNIX System V ABI */
#define ELFOSABI_SYSV 0 /* Alias. */
@@ -1619,56 +1619,57 @@
#define ELFOSABI_MODESTO 11 /* Novell Modesto. */
#define ELFOSABI_OPENBSD 12 /* OpenBSD. */
#define ELFOSABI_ARM_FDPIC 65 /* ARM FDPIC */
+#define ELFOSABI_XTENSA_FDPIC 65 /* Xtensa FDPIC */
#define ELFOSABI_ARM 97 /* ARM */
#define ELFOSABI_STANDALONE 255 /* Standalone (embedded) application */
-#define ELFMAG0 0x7f /* EI_MAG */
-#define ELFMAG1 'E'
-#define ELFMAG2 'L'
-#define ELFMAG3 'F'
-#define ELFMAG "\177ELF"
-#define SELFMAG 4
+#define ELFMAG0 0x7f /* EI_MAG */
+#define ELFMAG1 'E'
+#define ELFMAG2 'L'
+#define ELFMAG3 'F'
+#define ELFMAG "\177ELF"
+#define SELFMAG 4
-#define ELFCLASSNONE 0 /* EI_CLASS */
-#define ELFCLASS32 1
-#define ELFCLASS64 2
-#define ELFCLASSNUM 3
+#define ELFCLASSNONE 0 /* EI_CLASS */
+#define ELFCLASS32 1
+#define ELFCLASS64 2
+#define ELFCLASSNUM 3
-#define ELFDATANONE 0 /* e_ident[EI_DATA] */
-#define ELFDATA2LSB 1
-#define ELFDATA2MSB 2
+#define ELFDATANONE 0 /* e_ident[EI_DATA] */
+#define ELFDATA2LSB 1
+#define ELFDATA2MSB 2
-#define EV_NONE 0 /* e_version, EI_VERSION */
-#define EV_CURRENT 1
-#define EV_NUM 2
+#define EV_NONE 0 /* e_version, EI_VERSION */
+#define EV_CURRENT 1
+#define EV_NUM 2
/* Notes used in ET_CORE */
-#define NT_PRSTATUS 1
-#define NT_FPREGSET 2
-#define NT_PRFPREG 2
-#define NT_PRPSINFO 3
-#define NT_TASKSTRUCT 4
-#define NT_AUXV 6
-#define NT_PRXFPREG 0x46e62b7f /* copied from gdb5.1/include/elf/common.h */
-#define NT_S390_PV_CPU_DATA 0x30e /* s390 protvirt cpu dump data */
-#define NT_S390_RI_CB 0x30d /* s390 runtime instrumentation */
-#define NT_S390_GS_CB 0x30b /* s390 guarded storage registers */
-#define NT_S390_VXRS_HIGH 0x30a /* s390 vector registers 16-31 */
-#define NT_S390_VXRS_LOW 0x309 /* s390 vector registers 0-15 (lower half) */
-#define NT_S390_PREFIX 0x305 /* s390 prefix register */
-#define NT_S390_CTRS 0x304 /* s390 control registers */
-#define NT_S390_TODPREG 0x303 /* s390 TOD programmable register */
-#define NT_S390_TODCMP 0x302 /* s390 TOD clock comparator register */
-#define NT_S390_TIMER 0x301 /* s390 timer register */
-#define NT_PPC_VMX 0x100 /* PowerPC Altivec/VMX registers */
-#define NT_PPC_SPE 0x101 /* PowerPC SPE/EVR registers */
-#define NT_PPC_VSX 0x102 /* PowerPC VSX registers */
-#define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */
-#define NT_ARM_TLS 0x401 /* ARM TLS register */
-#define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */
-#define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */
-#define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */
-#define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension regs */
+#define NT_PRSTATUS 1
+#define NT_FPREGSET 2
+#define NT_PRFPREG 2
+#define NT_PRPSINFO 3
+#define NT_TASKSTRUCT 4
+#define NT_AUXV 6
+#define NT_PRXFPREG 0x46e62b7f /* copied from gdb5.1/include/elf/common.h */
+#define NT_S390_PV_CPU_DATA 0x30e /* s390 protvirt cpu dump data */
+#define NT_S390_RI_CB 0x30d /* s390 runtime instrumentation */
+#define NT_S390_GS_CB 0x30b /* s390 guarded storage registers */
+#define NT_S390_VXRS_HIGH 0x30a /* s390 vector registers 16-31 */
+#define NT_S390_VXRS_LOW 0x309 /* s390 vector registers 0-15 (lower half) */
+#define NT_S390_PREFIX 0x305 /* s390 prefix register */
+#define NT_S390_CTRS 0x304 /* s390 control registers */
+#define NT_S390_TODPREG 0x303 /* s390 TOD programmable register */
+#define NT_S390_TODCMP 0x302 /* s390 TOD clock comparator register */
+#define NT_S390_TIMER 0x301 /* s390 timer register */
+#define NT_PPC_VMX 0x100 /* PowerPC Altivec/VMX registers */
+#define NT_PPC_SPE 0x101 /* PowerPC SPE/EVR registers */
+#define NT_PPC_VSX 0x102 /* PowerPC VSX registers */
+#define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */
+#define NT_ARM_TLS 0x401 /* ARM TLS register */
+#define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */
+#define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */
+#define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */
+#define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension regs */
/* Defined note types for GNU systems. */
@@ -1701,16 +1702,16 @@
/* Note header in a PT_NOTE section */
typedef struct elf32_note {
- Elf32_Word n_namesz; /* Name size */
- Elf32_Word n_descsz; /* Content size */
- Elf32_Word n_type; /* Content type */
+ Elf32_Word n_namesz; /* Name size */
+ Elf32_Word n_descsz; /* Content size */
+ Elf32_Word n_type; /* Content type */
} Elf32_Nhdr;
/* Note header in a PT_NOTE section */
typedef struct elf64_note {
- Elf64_Word n_namesz; /* Name size */
- Elf64_Word n_descsz; /* Content size */
- Elf64_Word n_type; /* Content type */
+ Elf64_Word n_namesz; /* Name size */
+ Elf64_Word n_descsz; /* Content size */
+ Elf64_Word n_type; /* Content type */
} Elf64_Nhdr;
@@ -1735,13 +1736,13 @@
#ifdef ELF_CLASS
#if ELF_CLASS == ELFCLASS32
-#define elfhdr elf32_hdr
-#define elf_phdr elf32_phdr
-#define elf_note elf32_note
-#define elf_shdr elf32_shdr
-#define elf_sym elf32_sym
-#define elf_addr_t Elf32_Off
-#define elf_rela elf32_rela
+#define elfhdr elf32_hdr
+#define elf_phdr elf32_phdr
+#define elf_note elf32_note
+#define elf_shdr elf32_shdr
+#define elf_sym elf32_sym
+#define elf_addr_t Elf32_Off
+#define elf_rela elf32_rela
#ifdef ELF_USES_RELOCA
# define ELF_RELOC Elf32_Rela
@@ -1751,13 +1752,13 @@
#else
-#define elfhdr elf64_hdr
-#define elf_phdr elf64_phdr
-#define elf_note elf64_note
-#define elf_shdr elf64_shdr
-#define elf_sym elf64_sym
-#define elf_addr_t Elf64_Off
-#define elf_rela elf64_rela
+#define elfhdr elf64_hdr
+#define elf_phdr elf64_phdr
+#define elf_note elf64_note
+#define elf_shdr elf64_shdr
+#define elf_sym elf64_sym
+#define elf_addr_t Elf64_Off
+#define elf_rela elf64_rela
#ifdef ELF_USES_RELOCA
# define ELF_RELOC Elf64_Rela
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index 2eb1176..548be9c 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -25,7 +25,7 @@
#include "hw/core/cpu.h"
#include "qemu/rcu.h"
-#define EXCP_INTERRUPT 0x10000 /* async interruption */
+#define EXCP_INTERRUPT 0x10000 /* async interruption */
#define EXCP_HLT 0x10001 /* hlt instruction reached */
#define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */
#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index be920d4..e1c498e 100644
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -36,9 +36,6 @@
#ifndef TARGET_LONG_BITS
# error TARGET_LONG_BITS must be defined in cpu-param.h
#endif
-#ifndef NB_MMU_MODES
-# error NB_MMU_MODES must be defined in cpu-param.h
-#endif
#ifndef TARGET_PHYS_ADDR_SPACE_BITS
# error TARGET_PHYS_ADDR_SPACE_BITS must be defined in cpu-param.h
#endif
@@ -55,24 +52,13 @@
# endif
#endif
-#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8)
+#include "exec/target_long.h"
-/* target_ulong is the type of a virtual address */
-#if TARGET_LONG_SIZE == 4
-typedef int32_t target_long;
-typedef uint32_t target_ulong;
-#define TARGET_FMT_lx "%08x"
-#define TARGET_FMT_ld "%d"
-#define TARGET_FMT_lu "%u"
-#elif TARGET_LONG_SIZE == 8
-typedef int64_t target_long;
-typedef uint64_t target_ulong;
-#define TARGET_FMT_lx "%016" PRIx64
-#define TARGET_FMT_ld "%" PRId64
-#define TARGET_FMT_lu "%" PRIu64
-#else
-#error TARGET_LONG_SIZE undefined
-#endif
+/*
+ * Fix the number of mmu modes to 16, which is also the maximum
+ * supported by the softmmu tlb api.
+ */
+#define NB_MMU_MODES 16
#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index e092543..ad9eb60 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -677,7 +677,6 @@
#else
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
#endif
-void tb_flush(CPUState *cpu);
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end);
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
diff --git a/include/exec/gdbstub.h b/include/exec/gdbstub.h
index f667014..7d743fe 100644
--- a/include/exec/gdbstub.h
+++ b/include/exec/gdbstub.h
@@ -10,122 +10,7 @@
#define GDB_WATCHPOINT_READ 3
#define GDB_WATCHPOINT_ACCESS 4
-/* For gdb file i/o remote protocol open flags. */
-#define GDB_O_RDONLY 0
-#define GDB_O_WRONLY 1
-#define GDB_O_RDWR 2
-#define GDB_O_APPEND 8
-#define GDB_O_CREAT 0x200
-#define GDB_O_TRUNC 0x400
-#define GDB_O_EXCL 0x800
-/* For gdb file i/o remote protocol errno values */
-#define GDB_EPERM 1
-#define GDB_ENOENT 2
-#define GDB_EINTR 4
-#define GDB_EBADF 9
-#define GDB_EACCES 13
-#define GDB_EFAULT 14
-#define GDB_EBUSY 16
-#define GDB_EEXIST 17
-#define GDB_ENODEV 19
-#define GDB_ENOTDIR 20
-#define GDB_EISDIR 21
-#define GDB_EINVAL 22
-#define GDB_ENFILE 23
-#define GDB_EMFILE 24
-#define GDB_EFBIG 27
-#define GDB_ENOSPC 28
-#define GDB_ESPIPE 29
-#define GDB_EROFS 30
-#define GDB_ENAMETOOLONG 91
-#define GDB_EUNKNOWN 9999
-
-/* For gdb file i/o remote protocol lseek whence. */
-#define GDB_SEEK_SET 0
-#define GDB_SEEK_CUR 1
-#define GDB_SEEK_END 2
-
-/* For gdb file i/o stat/fstat. */
-typedef uint32_t gdb_mode_t;
-typedef uint32_t gdb_time_t;
-
-struct gdb_stat {
- uint32_t gdb_st_dev; /* device */
- uint32_t gdb_st_ino; /* inode */
- gdb_mode_t gdb_st_mode; /* protection */
- uint32_t gdb_st_nlink; /* number of hard links */
- uint32_t gdb_st_uid; /* user ID of owner */
- uint32_t gdb_st_gid; /* group ID of owner */
- uint32_t gdb_st_rdev; /* device type (if inode device) */
- uint64_t gdb_st_size; /* total size, in bytes */
- uint64_t gdb_st_blksize; /* blocksize for filesystem I/O */
- uint64_t gdb_st_blocks; /* number of blocks allocated */
- gdb_time_t gdb_st_atime; /* time of last access */
- gdb_time_t gdb_st_mtime; /* time of last modification */
- gdb_time_t gdb_st_ctime; /* time of last change */
-} QEMU_PACKED;
-
-struct gdb_timeval {
- gdb_time_t tv_sec; /* second */
- uint64_t tv_usec; /* microsecond */
-} QEMU_PACKED;
-
-#ifdef NEED_CPU_H
-#include "cpu.h"
-
-typedef void (*gdb_syscall_complete_cb)(CPUState *cpu, uint64_t ret, int err);
-
-/**
- * gdb_do_syscall:
- * @cb: function to call when the system call has completed
- * @fmt: gdb syscall format string
- * ...: list of arguments to interpolate into @fmt
- *
- * Send a GDB syscall request. This function will return immediately;
- * the callback function will be called later when the remote system
- * call has completed.
- *
- * @fmt should be in the 'call-id,parameter,parameter...' format documented
- * for the F request packet in the GDB remote protocol. A limited set of
- * printf-style format specifiers is supported:
- * %x - target_ulong argument printed in hex
- * %lx - 64-bit argument printed in hex
- * %s - string pointer (target_ulong) and length (int) pair
- */
-void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...);
-/**
- * gdb_do_syscallv:
- * @cb: function to call when the system call has completed
- * @fmt: gdb syscall format string
- * @va: arguments to interpolate into @fmt
- *
- * As gdb_do_syscall, but taking a va_list rather than a variable
- * argument list.
- */
-void gdb_do_syscallv(gdb_syscall_complete_cb cb, const char *fmt, va_list va);
-int use_gdb_syscalls(void);
-
-#ifdef CONFIG_USER_ONLY
-/**
- * gdb_handlesig: yield control to gdb
- * @cpu: CPU
- * @sig: if non-zero, the signal number which caused us to stop
- *
- * This function yields control to gdb, when a user-mode-only target
- * needs to stop execution. If @sig is non-zero, then we will send a
- * stop packet to tell gdb that we have stopped because of this signal.
- *
- * This function will block (handling protocol requests from gdb)
- * until gdb tells us to continue target execution. When it does
- * return, the return value is a signal to deliver to the target,
- * or 0 if no signal should be delivered, ie the signal that caused
- * us to stop should be ignored.
- */
-int gdb_handlesig(CPUState *, int);
-void gdb_signalled(CPUArchState *, int);
-void gdbserver_fork(CPUState *);
-#endif
/* Get or set a register. Returns the size of the register. */
typedef int (*gdb_get_reg_cb)(CPUArchState *env, GByteArray *buf, int reg);
typedef int (*gdb_set_reg_cb)(CPUArchState *env, uint8_t *buf, int reg);
@@ -133,89 +18,6 @@
gdb_get_reg_cb get_reg, gdb_set_reg_cb set_reg,
int num_regs, const char *xml, int g_pos);
-/*
- * The GDB remote protocol transfers values in target byte order. As
- * the gdbstub may be batching up several register values we always
- * append to the array.
- */
-
-static inline int gdb_get_reg8(GByteArray *buf, uint8_t val)
-{
- g_byte_array_append(buf, &val, 1);
- return 1;
-}
-
-static inline int gdb_get_reg16(GByteArray *buf, uint16_t val)
-{
- uint16_t to_word = tswap16(val);
- g_byte_array_append(buf, (uint8_t *) &to_word, 2);
- return 2;
-}
-
-static inline int gdb_get_reg32(GByteArray *buf, uint32_t val)
-{
- uint32_t to_long = tswap32(val);
- g_byte_array_append(buf, (uint8_t *) &to_long, 4);
- return 4;
-}
-
-static inline int gdb_get_reg64(GByteArray *buf, uint64_t val)
-{
- uint64_t to_quad = tswap64(val);
- g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
- return 8;
-}
-
-static inline int gdb_get_reg128(GByteArray *buf, uint64_t val_hi,
- uint64_t val_lo)
-{
- uint64_t to_quad;
-#if TARGET_BIG_ENDIAN
- to_quad = tswap64(val_hi);
- g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
- to_quad = tswap64(val_lo);
- g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
-#else
- to_quad = tswap64(val_lo);
- g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
- to_quad = tswap64(val_hi);
- g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
-#endif
- return 16;
-}
-
-static inline int gdb_get_zeroes(GByteArray *array, size_t len)
-{
- guint oldlen = array->len;
- g_byte_array_set_size(array, oldlen + len);
- memset(array->data + oldlen, 0, len);
-
- return len;
-}
-
-/**
- * gdb_get_reg_ptr: get pointer to start of last element
- * @len: length of element
- *
- * This is a helper function to extract the pointer to the last
- * element for additional processing. Some front-ends do additional
- * dynamic swapping of the elements based on CPU state.
- */
-static inline uint8_t * gdb_get_reg_ptr(GByteArray *buf, int len)
-{
- return buf->data + buf->len - len;
-}
-
-#if TARGET_LONG_BITS == 64
-#define gdb_get_regl(buf, val) gdb_get_reg64(buf, val)
-#define ldtul_p(addr) ldq_p(addr)
-#else
-#define gdb_get_regl(buf, val) gdb_get_reg32(buf, val)
-#define ldtul_p(addr) ldl_p(addr)
-#endif
-
-#endif /* NEED_CPU_H */
-
/**
* gdbserver_start: start the gdb server
* @port_or_device: connection spec for gdb
@@ -226,16 +28,6 @@
*/
int gdbserver_start(const char *port_or_device);
-/**
- * gdb_exit: exit gdb session, reporting inferior status
- * @code: exit code reported
- *
- * This closes the session and sends a final packet to GDB reporting
- * the exit status of the program. It also cleans up any connections
- * detritus before returning.
- */
-void gdb_exit(int code);
-
void gdb_set_stop_cpu(CPUState *cpu);
/**
diff --git a/include/exec/memory.h b/include/exec/memory.h
index 6fa0b07..15ade91 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -1738,7 +1738,7 @@
*
* @notifier: the notifier to be notified
*/
-void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *n);
+void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *notifier);
/**
diff --git a/include/exec/target_long.h b/include/exec/target_long.h
new file mode 100644
index 0000000..93c9472
--- /dev/null
+++ b/include/exec/target_long.h
@@ -0,0 +1,42 @@
+/*
+ * Target Long Definitions
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2023 Linaro Ltd
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef _TARGET_LONG_H_
+#define _TARGET_LONG_H_
+
+/*
+ * Usually this should only be included via cpu-defs.h however for
+ * certain cases where we want to build only two versions of a binary
+ * object we can include directly. However the build-system must
+ * ensure TARGET_LONG_BITS is defined directly.
+ */
+#ifndef TARGET_LONG_BITS
+#error TARGET_LONG_BITS not defined
+#endif
+
+#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8)
+
+/* target_ulong is the type of a virtual address */
+#if TARGET_LONG_SIZE == 4
+typedef int32_t target_long;
+typedef uint32_t target_ulong;
+#define TARGET_FMT_lx "%08x"
+#define TARGET_FMT_ld "%d"
+#define TARGET_FMT_lu "%u"
+#elif TARGET_LONG_SIZE == 8
+typedef int64_t target_long;
+typedef uint64_t target_ulong;
+#define TARGET_FMT_lx "%016" PRIx64
+#define TARGET_FMT_ld "%" PRId64
+#define TARGET_FMT_lu "%" PRIu64
+#else
+#error TARGET_LONG_SIZE undefined
+#endif
+
+#endif /* _TARGET_LONG_H_ */
diff --git a/include/exec/tb-flush.h b/include/exec/tb-flush.h
new file mode 100644
index 0000000..d92d065
--- /dev/null
+++ b/include/exec/tb-flush.h
@@ -0,0 +1,26 @@
+/*
+ * tb-flush prototype for use by the rest of the system.
+ *
+ * Copyright (c) 2022 Linaro Ltd
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef _TB_FLUSH_H_
+#define _TB_FLUSH_H_
+
+/**
+ * tb_flush() - flush all translation blocks
+ * @cs: CPUState (must be valid, but treated as anonymous pointer)
+ *
+ * Used to flush all the translation blocks in the system. Sometimes
+ * it is simpler to flush everything than work out which individual
+ * translations are now invalid and ensure they are not called
+ * anymore.
+ *
+ * tb_flush() takes care of running the flush in an exclusive context
+ * if it is not already running in one. This means no guest code will
+ * run until this complete.
+ */
+void tb_flush(CPUState *cs);
+
+#endif /* _TB_FLUSH_H_ */
diff --git a/include/gdbstub/helpers.h b/include/gdbstub/helpers.h
new file mode 100644
index 0000000..c573aef
--- /dev/null
+++ b/include/gdbstub/helpers.h
@@ -0,0 +1,103 @@
+/*
+ * gdbstub helpers
+ *
+ * These are all used by the various frontends and have to be host
+ * aware to ensure things are store in target order.
+ *
+ * Copyright (c) 2022 Linaro Ltd
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef _GDBSTUB_HELPERS_H_
+#define _GDBSTUB_HELPERS_H_
+
+#ifdef NEED_CPU_H
+#include "cpu.h"
+
+/*
+ * The GDB remote protocol transfers values in target byte order. As
+ * the gdbstub may be batching up several register values we always
+ * append to the array.
+ */
+
+static inline int gdb_get_reg8(GByteArray *buf, uint8_t val)
+{
+ g_byte_array_append(buf, &val, 1);
+ return 1;
+}
+
+static inline int gdb_get_reg16(GByteArray *buf, uint16_t val)
+{
+ uint16_t to_word = tswap16(val);
+ g_byte_array_append(buf, (uint8_t *) &to_word, 2);
+ return 2;
+}
+
+static inline int gdb_get_reg32(GByteArray *buf, uint32_t val)
+{
+ uint32_t to_long = tswap32(val);
+ g_byte_array_append(buf, (uint8_t *) &to_long, 4);
+ return 4;
+}
+
+static inline int gdb_get_reg64(GByteArray *buf, uint64_t val)
+{
+ uint64_t to_quad = tswap64(val);
+ g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
+ return 8;
+}
+
+static inline int gdb_get_reg128(GByteArray *buf, uint64_t val_hi,
+ uint64_t val_lo)
+{
+ uint64_t to_quad;
+#if TARGET_BIG_ENDIAN
+ to_quad = tswap64(val_hi);
+ g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
+ to_quad = tswap64(val_lo);
+ g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
+#else
+ to_quad = tswap64(val_lo);
+ g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
+ to_quad = tswap64(val_hi);
+ g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
+#endif
+ return 16;
+}
+
+static inline int gdb_get_zeroes(GByteArray *array, size_t len)
+{
+ guint oldlen = array->len;
+ g_byte_array_set_size(array, oldlen + len);
+ memset(array->data + oldlen, 0, len);
+
+ return len;
+}
+
+/**
+ * gdb_get_reg_ptr: get pointer to start of last element
+ * @len: length of element
+ *
+ * This is a helper function to extract the pointer to the last
+ * element for additional processing. Some front-ends do additional
+ * dynamic swapping of the elements based on CPU state.
+ */
+static inline uint8_t *gdb_get_reg_ptr(GByteArray *buf, int len)
+{
+ return buf->data + buf->len - len;
+}
+
+#if TARGET_LONG_BITS == 64
+#define gdb_get_regl(buf, val) gdb_get_reg64(buf, val)
+#define ldtul_p(addr) ldq_p(addr)
+#else
+#define gdb_get_regl(buf, val) gdb_get_reg32(buf, val)
+#define ldtul_p(addr) ldl_p(addr)
+#endif
+
+#else
+#error "gdbstub helpers should only be included by target specific code"
+#endif
+
+#endif /* _GDBSTUB_HELPERS_H_ */
diff --git a/include/gdbstub/syscalls.h b/include/gdbstub/syscalls.h
new file mode 100644
index 0000000..243eaf8
--- /dev/null
+++ b/include/gdbstub/syscalls.h
@@ -0,0 +1,113 @@
+/*
+ * GDB Syscall support
+ *
+ * Copyright (c) 2023 Linaro Ltd
+ *
+ * SPDX-License-Identifier: LGPL-2.0+
+ */
+
+#ifndef _SYSCALLS_H_
+#define _SYSCALLS_H_
+
+/* For gdb file i/o remote protocol open flags. */
+#define GDB_O_RDONLY 0
+#define GDB_O_WRONLY 1
+#define GDB_O_RDWR 2
+#define GDB_O_APPEND 8
+#define GDB_O_CREAT 0x200
+#define GDB_O_TRUNC 0x400
+#define GDB_O_EXCL 0x800
+
+/* For gdb file i/o remote protocol errno values */
+#define GDB_EPERM 1
+#define GDB_ENOENT 2
+#define GDB_EINTR 4
+#define GDB_EBADF 9
+#define GDB_EACCES 13
+#define GDB_EFAULT 14
+#define GDB_EBUSY 16
+#define GDB_EEXIST 17
+#define GDB_ENODEV 19
+#define GDB_ENOTDIR 20
+#define GDB_EISDIR 21
+#define GDB_EINVAL 22
+#define GDB_ENFILE 23
+#define GDB_EMFILE 24
+#define GDB_EFBIG 27
+#define GDB_ENOSPC 28
+#define GDB_ESPIPE 29
+#define GDB_EROFS 30
+#define GDB_ENAMETOOLONG 91
+#define GDB_EUNKNOWN 9999
+
+/* For gdb file i/o remote protocol lseek whence. */
+#define GDB_SEEK_SET 0
+#define GDB_SEEK_CUR 1
+#define GDB_SEEK_END 2
+
+/* For gdb file i/o stat/fstat. */
+typedef uint32_t gdb_mode_t;
+typedef uint32_t gdb_time_t;
+
+struct gdb_stat {
+ uint32_t gdb_st_dev; /* device */
+ uint32_t gdb_st_ino; /* inode */
+ gdb_mode_t gdb_st_mode; /* protection */
+ uint32_t gdb_st_nlink; /* number of hard links */
+ uint32_t gdb_st_uid; /* user ID of owner */
+ uint32_t gdb_st_gid; /* group ID of owner */
+ uint32_t gdb_st_rdev; /* device type (if inode device) */
+ uint64_t gdb_st_size; /* total size, in bytes */
+ uint64_t gdb_st_blksize; /* blocksize for filesystem I/O */
+ uint64_t gdb_st_blocks; /* number of blocks allocated */
+ gdb_time_t gdb_st_atime; /* time of last access */
+ gdb_time_t gdb_st_mtime; /* time of last modification */
+ gdb_time_t gdb_st_ctime; /* time of last change */
+} QEMU_PACKED;
+
+struct gdb_timeval {
+ gdb_time_t tv_sec; /* second */
+ uint64_t tv_usec; /* microsecond */
+} QEMU_PACKED;
+
+typedef void (*gdb_syscall_complete_cb)(CPUState *cpu, uint64_t ret, int err);
+
+/**
+ * gdb_do_syscall:
+ * @cb: function to call when the system call has completed
+ * @fmt: gdb syscall format string
+ * ...: list of arguments to interpolate into @fmt
+ *
+ * Send a GDB syscall request. This function will return immediately;
+ * the callback function will be called later when the remote system
+ * call has completed.
+ *
+ * @fmt should be in the 'call-id,parameter,parameter...' format documented
+ * for the F request packet in the GDB remote protocol. A limited set of
+ * printf-style format specifiers is supported:
+ * %x - target_ulong argument printed in hex
+ * %lx - 64-bit argument printed in hex
+ * %s - string pointer (target_ulong) and length (int) pair
+ */
+void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...);
+
+/**
+ * use_gdb_syscalls() - report if GDB should be used for syscalls
+ *
+ * This is mostly driven by the semihosting mode the user configures
+ * but assuming GDB is allowed by that we report true if GDB is
+ * connected to the stub.
+ */
+int use_gdb_syscalls(void);
+
+/**
+ * gdb_exit: exit gdb session, reporting inferior status
+ * @code: exit code reported
+ *
+ * This closes the session and sends a final packet to GDB reporting
+ * the exit status of the program. It also cleans up any connections
+ * detritus before returning.
+ */
+void gdb_exit(int code);
+
+#endif /* _SYSCALLS_H_ */
diff --git a/include/gdbstub/user.h b/include/gdbstub/user.h
new file mode 100644
index 0000000..d392e51
--- /dev/null
+++ b/include/gdbstub/user.h
@@ -0,0 +1,43 @@
+/*
+ * gdbstub user-mode only APIs
+ *
+ * Copyright (c) 2022 Linaro Ltd
+ *
+ * SPDX-License-Identifier: LGPL-2.0+
+ */
+
+#ifndef GDBSTUB_USER_H
+#define GDBSTUB_USER_H
+
+/**
+ * gdb_handlesig() - yield control to gdb
+ * @cpu: CPU
+ * @sig: if non-zero, the signal number which caused us to stop
+ *
+ * This function yields control to gdb, when a user-mode-only target
+ * needs to stop execution. If @sig is non-zero, then we will send a
+ * stop packet to tell gdb that we have stopped because of this signal.
+ *
+ * This function will block (handling protocol requests from gdb)
+ * until gdb tells us to continue target execution. When it does
+ * return, the return value is a signal to deliver to the target,
+ * or 0 if no signal should be delivered, ie the signal that caused
+ * us to stop should be ignored.
+ */
+int gdb_handlesig(CPUState *, int);
+
+/**
+ * gdb_signalled() - inform remote gdb of sig exit
+ * @as: current CPUArchState
+ * @sig: signal number
+ */
+void gdb_signalled(CPUArchState *as, int sig);
+
+/**
+ * gdbserver_fork() - disable gdb stub for child processes.
+ * @cs: CPU
+ */
+void gdbserver_fork(CPUState *cs);
+
+
+#endif /* GDBSTUB_USER_H */
diff --git a/include/hw/acpi/acpi.h b/include/hw/acpi/acpi.h
index cc0d370..e0e51e8 100644
--- a/include/hw/acpi/acpi.h
+++ b/include/hw/acpi/acpi.h
@@ -66,7 +66,7 @@
#define ACPI_BITMASK_POWER_BUTTON_STATUS 0x0100
#define ACPI_BITMASK_SLEEP_BUTTON_STATUS 0x0200
#define ACPI_BITMASK_RT_CLOCK_STATUS 0x0400
-#define ACPI_BITMASK_PCIEXP_WAKE_STATUS 0x4000 /* ACPI 3.0 */
+#define ACPI_BITMASK_PCIEXP_WAKE_STATUS 0x4000 /* ACPI 3.0 */
#define ACPI_BITMASK_WAKE_STATUS 0x8000
#define ACPI_BITMASK_ALL_FIXED_STATUS (\
@@ -84,7 +84,7 @@
#define ACPI_BITMASK_POWER_BUTTON_ENABLE 0x0100
#define ACPI_BITMASK_SLEEP_BUTTON_ENABLE 0x0200
#define ACPI_BITMASK_RT_CLOCK_ENABLE 0x0400
-#define ACPI_BITMASK_PCIEXP_WAKE_DISABLE 0x4000 /* ACPI 3.0 */
+#define ACPI_BITMASK_PCIEXP_WAKE_DISABLE 0x4000 /* ACPI 3.0 */
#define ACPI_BITMASK_PM1_COMMON_ENABLED ( \
ACPI_BITMASK_RT_CLOCK_ENABLE | \
diff --git a/include/hw/acpi/ich9.h b/include/hw/acpi/ich9.h
index 57a542c..2faf7f0 100644
--- a/include/hw/acpi/ich9.h
+++ b/include/hw/acpi/ich9.h
@@ -87,6 +87,7 @@
DeviceState *dev, Error **errp);
void ich9_pm_device_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
Error **errp);
+bool ich9_pm_is_hotpluggable_bus(HotplugHandler *hotplug_dev, BusState *bus);
void ich9_pm_ospm_status(AcpiDeviceIf *adev, ACPIOSTInfoList ***list);
#endif /* HW_ACPI_ICH9_H */
diff --git a/include/hw/acpi/pcihp.h b/include/hw/acpi/pcihp.h
index 7e268c2..ef59810 100644
--- a/include/hw/acpi/pcihp.h
+++ b/include/hw/acpi/pcihp.h
@@ -49,15 +49,16 @@
uint32_t acpi_index;
PCIBus *root;
MemoryRegion io;
- bool legacy_piix;
uint16_t io_base;
uint16_t io_len;
+ bool use_acpi_hotplug_bridge;
+ bool use_acpi_root_pci_hotplug;
} AcpiPciHpState;
void acpi_pcihp_init(Object *owner, AcpiPciHpState *, PCIBus *root,
- MemoryRegion *address_space_io, bool bridges_enabled,
- uint16_t io_base);
+ MemoryRegion *address_space_io, uint16_t io_base);
+bool acpi_pcihp_is_hotpluggbale_bus(AcpiPciHpState *s, BusState *bus);
void acpi_pcihp_device_pre_plug_cb(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp);
void acpi_pcihp_device_plug_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s,
@@ -69,7 +70,9 @@
Error **errp);
/* Called on reset */
-void acpi_pcihp_reset(AcpiPciHpState *s, bool acpihp_root_off);
+void acpi_pcihp_reset(AcpiPciHpState *s);
+
+void build_append_pcihp_slots(Aml *parent_scope, PCIBus *bus);
extern const VMStateDescription vmstate_acpi_pcihp_pci_status;
diff --git a/include/hw/acpi/piix4.h b/include/hw/acpi/piix4.h
index be1f8ea..eb1c122 100644
--- a/include/hw/acpi/piix4.h
+++ b/include/hw/acpi/piix4.h
@@ -57,8 +57,6 @@
Notifier powerdown_notifier;
AcpiPciHpState acpi_pci_hotplug;
- bool use_acpi_hotplug_bridge;
- bool use_acpi_root_pci_hotplug;
bool not_migrate_acpi_index;
uint8_t disable_s3;
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index 75689bf..821e937 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -30,7 +30,7 @@
#include "qemu/rcu_queue.h"
#include "qemu/queue.h"
#include "qemu/thread.h"
-#include "qemu/plugin.h"
+#include "qemu/plugin-event.h"
#include "qom/object.h"
typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
diff --git a/include/hw/cxl/cxl.h b/include/hw/cxl/cxl.h
index b161be5..b2cffbb 100644
--- a/include/hw/cxl/cxl.h
+++ b/include/hw/cxl/cxl.h
@@ -49,6 +49,7 @@
PCIHostState parent_obj;
CXLComponentState cxl_cstate;
+ bool passthrough;
};
#define TYPE_PXB_CXL_HOST "pxb-cxl-host"
diff --git a/include/hw/cxl/cxl_component.h b/include/hw/cxl/cxl_component.h
index 692d7a5..42c7e58 100644
--- a/include/hw/cxl/cxl_component.h
+++ b/include/hw/cxl/cxl_component.h
@@ -65,11 +65,37 @@
#define CXL_RAS_REGISTERS_OFFSET 0x80
#define CXL_RAS_REGISTERS_SIZE 0x58
REG32(CXL_RAS_UNC_ERR_STATUS, CXL_RAS_REGISTERS_OFFSET)
+#define CXL_RAS_UNC_ERR_CACHE_DATA_PARITY 0
+#define CXL_RAS_UNC_ERR_CACHE_ADDRESS_PARITY 1
+#define CXL_RAS_UNC_ERR_CACHE_BE_PARITY 2
+#define CXL_RAS_UNC_ERR_CACHE_DATA_ECC 3
+#define CXL_RAS_UNC_ERR_MEM_DATA_PARITY 4
+#define CXL_RAS_UNC_ERR_MEM_ADDRESS_PARITY 5
+#define CXL_RAS_UNC_ERR_MEM_BE_PARITY 6
+#define CXL_RAS_UNC_ERR_MEM_DATA_ECC 7
+#define CXL_RAS_UNC_ERR_REINIT_THRESHOLD 8
+#define CXL_RAS_UNC_ERR_RSVD_ENCODING 9
+#define CXL_RAS_UNC_ERR_POISON_RECEIVED 10
+#define CXL_RAS_UNC_ERR_RECEIVER_OVERFLOW 11
+#define CXL_RAS_UNC_ERR_INTERNAL 14
+#define CXL_RAS_UNC_ERR_CXL_IDE_TX 15
+#define CXL_RAS_UNC_ERR_CXL_IDE_RX 16
+#define CXL_RAS_UNC_ERR_CXL_UNUSED 63 /* Magic value */
REG32(CXL_RAS_UNC_ERR_MASK, CXL_RAS_REGISTERS_OFFSET + 0x4)
REG32(CXL_RAS_UNC_ERR_SEVERITY, CXL_RAS_REGISTERS_OFFSET + 0x8)
REG32(CXL_RAS_COR_ERR_STATUS, CXL_RAS_REGISTERS_OFFSET + 0xc)
+#define CXL_RAS_COR_ERR_CACHE_DATA_ECC 0
+#define CXL_RAS_COR_ERR_MEM_DATA_ECC 1
+#define CXL_RAS_COR_ERR_CRC_THRESHOLD 2
+#define CXL_RAS_COR_ERR_RETRY_THRESHOLD 3
+#define CXL_RAS_COR_ERR_CACHE_POISON_RECEIVED 4
+#define CXL_RAS_COR_ERR_MEM_POISON_RECEIVED 5
+#define CXL_RAS_COR_ERR_PHYSICAL 6
REG32(CXL_RAS_COR_ERR_MASK, CXL_RAS_REGISTERS_OFFSET + 0x10)
REG32(CXL_RAS_ERR_CAP_CTRL, CXL_RAS_REGISTERS_OFFSET + 0x14)
+ FIELD(CXL_RAS_ERR_CAP_CTRL, FIRST_ERROR_POINTER, 0, 6)
+REG32(CXL_RAS_ERR_HEADER0, CXL_RAS_REGISTERS_OFFSET + 0x18)
+#define CXL_RAS_ERR_HEADER_NUM 32
/* Offset 0x18 - 0x58 reserved for RAS logs */
/* 8.2.5.10 - CXL Security Capability Structure */
@@ -221,6 +247,7 @@
}
CXLComponentState *cxl_get_hb_cstate(PCIHostState *hb);
+bool cxl_get_hb_passthrough(PCIHostState *hb);
void cxl_doe_cdat_init(CXLComponentState *cxl_cstate, Error **errp);
void cxl_doe_cdat_release(CXLComponentState *cxl_cstate);
diff --git a/include/hw/cxl/cxl_device.h b/include/hw/cxl/cxl_device.h
index 7e5ad65..d589f78 100644
--- a/include/hw/cxl/cxl_device.h
+++ b/include/hw/cxl/cxl_device.h
@@ -232,6 +232,14 @@
FIELD(CXL_MEM_DEV_STS, MBOX_READY, 4, 1)
FIELD(CXL_MEM_DEV_STS, RESET_NEEDED, 5, 3)
+typedef struct CXLError {
+ QTAILQ_ENTRY(CXLError) node;
+ int type; /* Error code as per FE definition */
+ uint32_t header[32];
+} CXLError;
+
+typedef QTAILQ_HEAD(, CXLError) CXLErrorList;
+
struct CXLType3Dev {
/* Private */
PCIDevice parent_obj;
@@ -248,6 +256,9 @@
/* DOE */
DOECap doe_cdat;
+
+ /* Error injection */
+ CXLErrorList error_list;
};
#define TYPE_CXL_TYPE3 "cxl-type3"
diff --git a/include/hw/elf_ops.h b/include/hw/elf_ops.h
index fbe0b1e..dffb0e7 100644
--- a/include/hw/elf_ops.h
+++ b/include/hw/elf_ops.h
@@ -1,30 +1,30 @@
static void glue(bswap_ehdr, SZ)(struct elfhdr *ehdr)
{
- bswap16s(&ehdr->e_type); /* Object file type */
- bswap16s(&ehdr->e_machine); /* Architecture */
- bswap32s(&ehdr->e_version); /* Object file version */
- bswapSZs(&ehdr->e_entry); /* Entry point virtual address */
- bswapSZs(&ehdr->e_phoff); /* Program header table file offset */
- bswapSZs(&ehdr->e_shoff); /* Section header table file offset */
- bswap32s(&ehdr->e_flags); /* Processor-specific flags */
- bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
- bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
- bswap16s(&ehdr->e_phnum); /* Program header table entry count */
- bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
- bswap16s(&ehdr->e_shnum); /* Section header table entry count */
- bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
+ bswap16s(&ehdr->e_type); /* Object file type */
+ bswap16s(&ehdr->e_machine); /* Architecture */
+ bswap32s(&ehdr->e_version); /* Object file version */
+ bswapSZs(&ehdr->e_entry); /* Entry point virtual address */
+ bswapSZs(&ehdr->e_phoff); /* Program header table file offset */
+ bswapSZs(&ehdr->e_shoff); /* Section header table file offset */
+ bswap32s(&ehdr->e_flags); /* Processor-specific flags */
+ bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
+ bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
+ bswap16s(&ehdr->e_phnum); /* Program header table entry count */
+ bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
+ bswap16s(&ehdr->e_shnum); /* Section header table entry count */
+ bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
}
static void glue(bswap_phdr, SZ)(struct elf_phdr *phdr)
{
- bswap32s(&phdr->p_type); /* Segment type */
- bswapSZs(&phdr->p_offset); /* Segment file offset */
- bswapSZs(&phdr->p_vaddr); /* Segment virtual address */
- bswapSZs(&phdr->p_paddr); /* Segment physical address */
- bswapSZs(&phdr->p_filesz); /* Segment size in file */
- bswapSZs(&phdr->p_memsz); /* Segment size in memory */
- bswap32s(&phdr->p_flags); /* Segment flags */
- bswapSZs(&phdr->p_align); /* Segment alignment */
+ bswap32s(&phdr->p_type); /* Segment type */
+ bswapSZs(&phdr->p_offset); /* Segment file offset */
+ bswapSZs(&phdr->p_vaddr); /* Segment virtual address */
+ bswapSZs(&phdr->p_paddr); /* Segment physical address */
+ bswapSZs(&phdr->p_filesz); /* Segment size in file */
+ bswapSZs(&phdr->p_memsz); /* Segment size in memory */
+ bswap32s(&phdr->p_flags); /* Segment flags */
+ bswapSZs(&phdr->p_align); /* Segment alignment */
}
static void glue(bswap_shdr, SZ)(struct elf_shdr *shdr)
diff --git a/include/hw/hotplug.h b/include/hw/hotplug.h
index e15f59c..a9840ed 100644
--- a/include/hw/hotplug.h
+++ b/include/hw/hotplug.h
@@ -48,6 +48,7 @@
* @unplug: unplug callback.
* Used for device removal with devices that implement
* asynchronous and synchronous (surprise) removal.
+ * @is_hotpluggable_bus: called to check if bus/its parent allow hotplug on bus
*/
struct HotplugHandlerClass {
/* <private> */
@@ -58,6 +59,7 @@
hotplug_fn plug;
hotplug_fn unplug_request;
hotplug_fn unplug;
+ bool (*is_hotpluggable_bus)(HotplugHandler *plug_handler, BusState *bus);
};
/**
diff --git a/include/hw/i386/x86.h b/include/hw/i386/x86.h
index 0b337a0..da19ae1 100644
--- a/include/hw/i386/x86.h
+++ b/include/hw/i386/x86.h
@@ -18,10 +18,8 @@
#define HW_I386_X86_H
#include "exec/hwaddr.h"
-#include "qemu/notify.h"
#include "hw/boards.h"
-#include "hw/nmi.h"
#include "hw/intc/ioapic.h"
#include "hw/isa/isa.h"
#include "qom/object.h"
diff --git a/include/hw/intc/mips_gic.h b/include/hw/intc/mips_gic.h
index eeb136e..5e4c71e 100644
--- a/include/hw/intc/mips_gic.h
+++ b/include/hw/intc/mips_gic.h
@@ -211,8 +211,8 @@
/* GIC VP Timer */
MIPSGICTimerState *gic_timer;
- int32_t num_vps;
- int32_t num_irq;
+ uint32_t num_vps;
+ uint32_t num_irq;
};
#endif /* MIPS_GIC_H */
diff --git a/include/hw/isa/i8259_internal.h b/include/hw/isa/i8259_internal.h
index 155b098..f9dcc41 100644
--- a/include/hw/isa/i8259_internal.h
+++ b/include/hw/isa/i8259_internal.h
@@ -61,6 +61,7 @@
uint8_t single_mode; /* true if slave pic is not initialized */
uint8_t elcr; /* PIIX edge/trigger selection*/
uint8_t elcr_mask;
+ uint8_t ltim; /* Edge/Level Bank Select (pre-PIIX, chip-wide) */
qemu_irq int_out[1];
uint32_t master; /* reflects /SP input pin */
uint32_t iobase;
diff --git a/include/hw/isa/vt82c686.h b/include/hw/isa/vt82c686.h
index e273cd3..da1722d 100644
--- a/include/hw/isa/vt82c686.h
+++ b/include/hw/isa/vt82c686.h
@@ -1,6 +1,8 @@
#ifndef HW_VT82C686_H
#define HW_VT82C686_H
+#include "hw/pci/pci_device.h"
+#include "audio/audio.h"
#define TYPE_VT82C686B_ISA "vt82c686b-isa"
#define TYPE_VT82C686B_USB_UHCI "vt82c686b-usb-uhci"
@@ -9,6 +11,29 @@
#define TYPE_VIA_IDE "via-ide"
#define TYPE_VIA_MC97 "via-mc97"
+typedef struct {
+ uint8_t stat;
+ uint8_t type;
+ uint32_t base;
+ uint32_t curr;
+ uint32_t addr;
+ uint32_t clen;
+} ViaAC97SGDChannel;
+
+OBJECT_DECLARE_SIMPLE_TYPE(ViaAC97State, VIA_AC97);
+
+struct ViaAC97State {
+ PCIDevice dev;
+ QEMUSoundCard card;
+ MemoryRegion sgd;
+ MemoryRegion fm;
+ MemoryRegion midi;
+ SWVoiceOut *vo;
+ ViaAC97SGDChannel aur;
+ uint16_t codec_regs[128];
+ uint32_t ac97_cmd;
+};
+
void via_isa_set_irq(PCIDevice *d, int n, int level);
#endif
diff --git a/include/hw/misc/mips_cmgcr.h b/include/hw/misc/mips_cmgcr.h
index 9fa5894..db4bf5f 100644
--- a/include/hw/misc/mips_cmgcr.h
+++ b/include/hw/misc/mips_cmgcr.h
@@ -75,7 +75,7 @@
SysBusDevice parent_obj;
int32_t gcr_rev;
- int32_t num_vps;
+ uint32_t num_vps;
hwaddr gcr_base;
MemoryRegion iomem;
MemoryRegion *cpc_mr;
diff --git a/include/hw/misc/mips_itu.h b/include/hw/misc/mips_itu.h
index 50d9611..35218b2 100644
--- a/include/hw/misc/mips_itu.h
+++ b/include/hw/misc/mips_itu.h
@@ -57,8 +57,8 @@
SysBusDevice parent_obj;
/*< public >*/
- int32_t num_fifo;
- int32_t num_semaphores;
+ uint32_t num_fifo;
+ uint32_t num_semaphores;
/* ITC Storage */
ITCStorageCell *cell;
@@ -72,9 +72,8 @@
uint64_t icr0;
/* SAAR */
- bool saar_present;
- void *saar;
-
+ uint64_t *saar;
+ MIPSCPU *cpu0;
};
/* Get ITC Configuration Tag memory region. */
diff --git a/include/hw/net/mii.h b/include/hw/net/mii.h
index 4ae4dcc..ed1bb52 100644
--- a/include/hw/net/mii.h
+++ b/include/hw/net/mii.h
@@ -55,6 +55,7 @@
#define MII_BMCR_CTST (1 << 7) /* Collision test */
#define MII_BMCR_SPEED1000 (1 << 6) /* MSB of Speed (1000) */
+#define MII_BMSR_100T4 (1 << 15) /* Can do 100mbps T4 */
#define MII_BMSR_100TX_FD (1 << 14) /* Can do 100mbps, full-duplex */
#define MII_BMSR_100TX_HD (1 << 13) /* Can do 100mbps, half-duplex */
#define MII_BMSR_10T_FD (1 << 12) /* Can do 10mbps, full-duplex */
@@ -81,20 +82,31 @@
#define MII_ANLPAR_ACK (1 << 14)
#define MII_ANLPAR_PAUSEASY (1 << 11) /* can pause asymmetrically */
#define MII_ANLPAR_PAUSE (1 << 10) /* can pause */
+#define MII_ANLPAR_T4 (1 << 9)
#define MII_ANLPAR_TXFD (1 << 8)
#define MII_ANLPAR_TX (1 << 7)
#define MII_ANLPAR_10FD (1 << 6)
#define MII_ANLPAR_10 (1 << 5)
#define MII_ANLPAR_CSMACD (1 << 0)
-#define MII_ANER_NWAY (1 << 0) /* Can do N-way auto-nego */
+#define MII_ANER_NP (1 << 2) /* Next Page Able */
+#define MII_ANER_NWAY (1 << 0) /* Can do N-way auto-nego */
+#define MII_ANNP_MP (1 << 13) /* Message Page */
+
+#define MII_CTRL1000_MASTER (1 << 11) /* MASTER-SLAVE Manual Configuration Value */
+#define MII_CTRL1000_PORT (1 << 10) /* T2_Repeater/DTE bit */
#define MII_CTRL1000_FULL (1 << 9) /* 1000BASE-T full duplex */
#define MII_CTRL1000_HALF (1 << 8) /* 1000BASE-T half duplex */
+#define MII_STAT1000_LOK (1 << 13) /* Local Receiver Status */
+#define MII_STAT1000_ROK (1 << 12) /* Remote Receiver Status */
#define MII_STAT1000_FULL (1 << 11) /* 1000BASE-T full duplex */
#define MII_STAT1000_HALF (1 << 10) /* 1000BASE-T half duplex */
+#define MII_EXTSTAT_1000T_FD (1 << 13) /* 1000BASE-T Full Duplex */
+#define MII_EXTSTAT_1000T_HD (1 << 12) /* 1000BASE-T Half Duplex */
+
/* List of vendor identifiers */
/* RealTek 8201 */
#define RTL8201CP_PHYID1 0x0000
diff --git a/include/hw/pci/pci_bridge.h b/include/hw/pci/pci_bridge.h
index 63a7521..1677176 100644
--- a/include/hw/pci/pci_bridge.h
+++ b/include/hw/pci/pci_bridge.h
@@ -92,6 +92,7 @@
uint8_t bus_nr;
uint16_t numa_node;
bool bypass_iommu;
+ bool hdm_for_passthrough;
struct cxl_dev {
CXLHost *cxl_host_bridge; /* Pointer to a CXLHost */
} cxl;
@@ -136,11 +137,11 @@
pci_map_irq_fn map_irq);
/* TODO: add this define to pci_regs.h in linux and then in qemu. */
-#define PCI_BRIDGE_CTL_VGA_16BIT 0x10 /* VGA 16-bit decode */
-#define PCI_BRIDGE_CTL_DISCARD 0x100 /* Primary discard timer */
-#define PCI_BRIDGE_CTL_SEC_DISCARD 0x200 /* Secondary discard timer */
-#define PCI_BRIDGE_CTL_DISCARD_STATUS 0x400 /* Discard timer status */
-#define PCI_BRIDGE_CTL_DISCARD_SERR 0x800 /* Discard timer SERR# enable */
+#define PCI_BRIDGE_CTL_VGA_16BIT 0x10 /* VGA 16-bit decode */
+#define PCI_BRIDGE_CTL_DISCARD 0x100 /* Primary discard timer */
+#define PCI_BRIDGE_CTL_SEC_DISCARD 0x200 /* Secondary discard timer */
+#define PCI_BRIDGE_CTL_DISCARD_STATUS 0x400 /* Discard timer status */
+#define PCI_BRIDGE_CTL_DISCARD_SERR 0x800 /* Discard timer SERR# enable */
typedef struct PCIBridgeQemuCap {
uint8_t id; /* Standard PCI capability header field */
diff --git a/include/hw/pci/pcie_aer.h b/include/hw/pci/pcie_aer.h
index 65e71d9..1234fdc 100644
--- a/include/hw/pci/pcie_aer.h
+++ b/include/hw/pci/pcie_aer.h
@@ -100,4 +100,5 @@
uint32_t addr, uint32_t val, int len,
uint32_t root_cmd_prev);
+int pcie_aer_inject_error(PCIDevice *dev, const PCIEAERErr *err);
#endif /* QEMU_PCIE_AER_H */
diff --git a/include/hw/pci/pcie_port.h b/include/hw/pci/pcie_port.h
index 6c40e37..90e6cf4 100644
--- a/include/hw/pci/pcie_port.h
+++ b/include/hw/pci/pcie_port.h
@@ -41,6 +41,8 @@
void pcie_port_init_reg(PCIDevice *d);
PCIDevice *pcie_find_port_by_pn(PCIBus *bus, uint8_t pn);
+PCIDevice *pcie_find_port_first(PCIBus *bus);
+int pcie_count_ds_ports(PCIBus *bus);
#define TYPE_PCIE_SLOT "pcie-slot"
OBJECT_DECLARE_SIMPLE_TYPE(PCIESlot, PCIE_SLOT)
diff --git a/include/hw/pci/pcie_regs.h b/include/hw/pci/pcie_regs.h
index 1fe0bdd..4972106 100644
--- a/include/hw/pci/pcie_regs.h
+++ b/include/hw/pci/pcie_regs.h
@@ -141,6 +141,9 @@
PCI_ERR_UNC_ATOP_EBLOCKED | \
PCI_ERR_UNC_TLP_PRF_BLOCKED)
+#define PCI_ERR_UNC_MASK_DEFAULT (PCI_ERR_UNC_INTN | \
+ PCI_ERR_UNC_TLP_PRF_BLOCKED)
+
#define PCI_ERR_UNC_SEVERITY_DEFAULT (PCI_ERR_UNC_DLP | \
PCI_ERR_UNC_SDN | \
PCI_ERR_UNC_FCP | \
diff --git a/include/hw/pci/pcie_sriov.h b/include/hw/pci/pcie_sriov.h
index 96cc743..095fb0c 100644
--- a/include/hw/pci/pcie_sriov.h
+++ b/include/hw/pci/pcie_sriov.h
@@ -76,4 +76,7 @@
*/
PCIDevice *pcie_sriov_get_vf_at_index(PCIDevice *dev, int n);
+/* Returns the current number of virtual functions. */
+uint16_t pcie_sriov_num_vfs(PCIDevice *dev);
+
#endif /* QEMU_PCIE_SRIOV_H */
diff --git a/include/hw/pcmcia.h b/include/hw/pcmcia.h
index e3ba44e..ab26802 100644
--- a/include/hw/pcmcia.h
+++ b/include/hw/pcmcia.h
@@ -43,22 +43,22 @@
void (*io_write)(PCMCIACardState *card, uint32_t address, uint16_t value);
};
-#define CISTPL_DEVICE 0x01 /* 5V Device Information Tuple */
-#define CISTPL_NO_LINK 0x14 /* No Link Tuple */
-#define CISTPL_VERS_1 0x15 /* Level 1 Version Tuple */
-#define CISTPL_JEDEC_C 0x18 /* JEDEC ID Tuple */
-#define CISTPL_JEDEC_A 0x19 /* JEDEC ID Tuple */
-#define CISTPL_CONFIG 0x1a /* Configuration Tuple */
-#define CISTPL_CFTABLE_ENTRY 0x1b /* 16-bit PCCard Configuration */
-#define CISTPL_DEVICE_OC 0x1c /* Additional Device Information */
-#define CISTPL_DEVICE_OA 0x1d /* Additional Device Information */
-#define CISTPL_DEVICE_GEO 0x1e /* Additional Device Information */
-#define CISTPL_DEVICE_GEO_A 0x1f /* Additional Device Information */
-#define CISTPL_MANFID 0x20 /* Manufacture ID Tuple */
-#define CISTPL_FUNCID 0x21 /* Function ID Tuple */
-#define CISTPL_FUNCE 0x22 /* Function Extension Tuple */
-#define CISTPL_END 0xff /* Tuple End */
-#define CISTPL_ENDMARK 0xff
+#define CISTPL_DEVICE 0x01 /* 5V Device Information Tuple */
+#define CISTPL_NO_LINK 0x14 /* No Link Tuple */
+#define CISTPL_VERS_1 0x15 /* Level 1 Version Tuple */
+#define CISTPL_JEDEC_C 0x18 /* JEDEC ID Tuple */
+#define CISTPL_JEDEC_A 0x19 /* JEDEC ID Tuple */
+#define CISTPL_CONFIG 0x1a /* Configuration Tuple */
+#define CISTPL_CFTABLE_ENTRY 0x1b /* 16-bit PCCard Configuration */
+#define CISTPL_DEVICE_OC 0x1c /* Additional Device Information */
+#define CISTPL_DEVICE_OA 0x1d /* Additional Device Information */
+#define CISTPL_DEVICE_GEO 0x1e /* Additional Device Information */
+#define CISTPL_DEVICE_GEO_A 0x1f /* Additional Device Information */
+#define CISTPL_MANFID 0x20 /* Manufacture ID Tuple */
+#define CISTPL_FUNCID 0x21 /* Function ID Tuple */
+#define CISTPL_FUNCE 0x22 /* Function Extension Tuple */
+#define CISTPL_END 0xff /* Tuple End */
+#define CISTPL_ENDMARK 0xff
/* dscm1xxxx.c */
PCMCIACardState *dscm1xxxx_init(DriveInfo *bdrv);
diff --git a/include/hw/qdev-core.h b/include/hw/qdev-core.h
index f5b3b2f..bd50ad5 100644
--- a/include/hw/qdev-core.h
+++ b/include/hw/qdev-core.h
@@ -812,7 +812,18 @@
static inline bool qbus_is_hotpluggable(BusState *bus)
{
- return bus->hotplug_handler;
+ HotplugHandler *plug_handler = bus->hotplug_handler;
+ bool ret = !!plug_handler;
+
+ if (plug_handler) {
+ HotplugHandlerClass *hdc;
+
+ hdc = HOTPLUG_HANDLER_GET_CLASS(plug_handler);
+ if (hdc->is_hotpluggable_bus) {
+ ret = hdc->is_hotpluggable_bus(plug_handler, bus);
+ }
+ }
+ return ret;
}
/**
diff --git a/include/hw/scsi/scsi.h b/include/hw/scsi/scsi.h
index 6ea4b64..6f23a7a 100644
--- a/include/hw/scsi/scsi.h
+++ b/include/hw/scsi/scsi.h
@@ -8,7 +8,7 @@
#include "qemu/notify.h"
#include "qom/object.h"
-#define MAX_SCSI_DEVS 255
+#define MAX_SCSI_DEVS 255
typedef struct SCSIBus SCSIBus;
typedef struct SCSIBusInfo SCSIBusInfo;
diff --git a/include/hw/sd/sd.h b/include/hw/sd/sd.h
index 47360ba..3047adb 100644
--- a/include/hw/sd/sd.h
+++ b/include/hw/sd/sd.h
@@ -77,10 +77,10 @@
typedef enum {
sd_none = -1,
- sd_bc = 0, /* broadcast -- no response */
- sd_bcr, /* broadcast with response */
- sd_ac, /* addressed -- no data transfer */
- sd_adtc, /* addressed with data transfer */
+ sd_bc = 0, /* broadcast -- no response */
+ sd_bcr, /* broadcast with response */
+ sd_ac, /* addressed -- no data transfer */
+ sd_adtc, /* addressed with data transfer */
} sd_cmd_type_t;
typedef struct {
diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
index 87524c6..eed244f 100644
--- a/include/hw/vfio/vfio-common.h
+++ b/include/hw/vfio/vfio-common.h
@@ -143,6 +143,8 @@
VFIOMigration *migration;
Error *migration_blocker;
OnOffAuto pre_copy_dirty_page_tracking;
+ bool dirty_pages_supported;
+ bool dirty_tracking;
} VFIODevice;
struct VFIODeviceOps {
@@ -220,6 +222,7 @@
bool vfio_mig_active(void);
int vfio_block_multiple_devices_migration(Error **errp);
void vfio_unblock_multiple_devices_migration(void);
+int vfio_block_giommu_migration(Error **errp);
int64_t vfio_mig_bytes_transferred(void);
#ifdef CONFIG_LINUX
@@ -243,7 +246,8 @@
int vfio_spapr_remove_window(VFIOContainer *container,
hwaddr offset_within_address_space);
-int vfio_migration_probe(VFIODevice *vbasedev, Error **errp);
-void vfio_migration_finalize(VFIODevice *vbasedev);
+int vfio_migration_realize(VFIODevice *vbasedev, Error **errp);
+void vfio_migration_exit(VFIODevice *vbasedev);
+void vfio_migration_finalize(void);
#endif /* HW_VFIO_VFIO_COMMON_H */
diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
index c5ab490..ec3fbae 100644
--- a/include/hw/virtio/vhost-backend.h
+++ b/include/hw/virtio/vhost-backend.h
@@ -130,6 +130,9 @@
typedef int (*vhost_set_config_call_op)(struct vhost_dev *dev,
int fd);
+
+typedef void (*vhost_reset_status_op)(struct vhost_dev *dev);
+
typedef struct VhostOps {
VhostBackendType backend_type;
vhost_backend_init vhost_backend_init;
@@ -177,6 +180,7 @@
vhost_get_device_id_op vhost_get_device_id;
vhost_force_iommu_op vhost_force_iommu;
vhost_set_config_call_op vhost_set_config_call;
+ vhost_reset_status_op vhost_reset_status;
} VhostOps;
int vhost_backend_update_device_iotlb(struct vhost_dev *dev,
diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
index 7997f09..c278a2a 100644
--- a/include/hw/virtio/vhost-vdpa.h
+++ b/include/hw/virtio/vhost-vdpa.h
@@ -42,12 +42,15 @@
bool shadow_vqs_enabled;
/* Vdpa must send shadow addresses as IOTLB key for data queues, not GPA */
bool shadow_data;
+ /* Device suspended successfully */
+ bool suspended;
/* IOVA mapping used by the Shadow Virtqueue */
VhostIOVATree *iova_tree;
GPtrArray *shadow_vqs;
const VhostShadowVirtqueueOps *shadow_vq_ops;
void *shadow_vq_ops_opaque;
struct vhost_dev *dev;
+ Error *migration_blocker;
VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
} VhostVDPA;
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index 77c6c55..f236e94 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -29,7 +29,7 @@
* vhost-user to advertise VHOST_USER_F_PROTOCOL_FEATURES between QEMU
* and a vhost-user backend.
*/
-#define VIRTIO_F_BAD_FEATURE 30
+#define VIRTIO_F_BAD_FEATURE 30
#define VIRTIO_LEGACY_FEATURES ((0x1ULL << VIRTIO_F_BAD_FEATURE) | \
(0x1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | \
diff --git a/include/hw/xen/xen-bus-helper.h b/include/hw/xen/xen-bus-helper.h
index 8782f30..d8dcc2f 100644
--- a/include/hw/xen/xen-bus-helper.h
+++ b/include/hw/xen/xen-bus-helper.h
@@ -8,40 +8,40 @@
#ifndef HW_XEN_BUS_HELPER_H
#define HW_XEN_BUS_HELPER_H
-#include "hw/xen/xen_common.h"
+#include "hw/xen/xen_backend_ops.h"
const char *xs_strstate(enum xenbus_state state);
-void xs_node_create(struct xs_handle *xsh, xs_transaction_t tid,
- const char *node, struct xs_permissions perms[],
- unsigned int nr_perms, Error **errp);
-void xs_node_destroy(struct xs_handle *xsh, xs_transaction_t tid,
+void xs_node_create(struct qemu_xs_handle *h, xs_transaction_t tid,
+ const char *node, unsigned int owner, unsigned int domid,
+ unsigned int perms, Error **errp);
+void xs_node_destroy(struct qemu_xs_handle *h, xs_transaction_t tid,
const char *node, Error **errp);
/* Write to node/key unless node is empty, in which case write to key */
-void xs_node_vprintf(struct xs_handle *xsh, xs_transaction_t tid,
+void xs_node_vprintf(struct qemu_xs_handle *h, xs_transaction_t tid,
const char *node, const char *key, Error **errp,
const char *fmt, va_list ap)
G_GNUC_PRINTF(6, 0);
-void xs_node_printf(struct xs_handle *xsh, xs_transaction_t tid,
+void xs_node_printf(struct qemu_xs_handle *h, xs_transaction_t tid,
const char *node, const char *key, Error **errp,
const char *fmt, ...)
G_GNUC_PRINTF(6, 7);
/* Read from node/key unless node is empty, in which case read from key */
-int xs_node_vscanf(struct xs_handle *xsh, xs_transaction_t tid,
+int xs_node_vscanf(struct qemu_xs_handle *h, xs_transaction_t tid,
const char *node, const char *key, Error **errp,
const char *fmt, va_list ap)
G_GNUC_SCANF(6, 0);
-int xs_node_scanf(struct xs_handle *xsh, xs_transaction_t tid,
+int xs_node_scanf(struct qemu_xs_handle *h, xs_transaction_t tid,
const char *node, const char *key, Error **errp,
const char *fmt, ...)
G_GNUC_SCANF(6, 7);
/* Watch node/key unless node is empty, in which case watch key */
-void xs_node_watch(struct xs_handle *xsh, const char *node, const char *key,
- char *token, Error **errp);
-void xs_node_unwatch(struct xs_handle *xsh, const char *node, const char *key,
- const char *token, Error **errp);
+struct qemu_xs_watch *xs_node_watch(struct qemu_xs_handle *h, const char *node,
+ const char *key, xs_watch_fn fn,
+ void *opaque, Error **errp);
+void xs_node_unwatch(struct qemu_xs_handle *h, struct qemu_xs_watch *w);
#endif /* HW_XEN_BUS_HELPER_H */
diff --git a/include/hw/xen/xen-bus.h b/include/hw/xen/xen-bus.h
index 4d966a2..f435898 100644
--- a/include/hw/xen/xen-bus.h
+++ b/include/hw/xen/xen-bus.h
@@ -8,31 +8,25 @@
#ifndef HW_XEN_BUS_H
#define HW_XEN_BUS_H
-#include "hw/xen/xen_common.h"
+#include "hw/xen/xen_backend_ops.h"
#include "hw/sysbus.h"
#include "qemu/notify.h"
#include "qom/object.h"
-typedef void (*XenWatchHandler)(void *opaque);
-
-typedef struct XenWatchList XenWatchList;
-typedef struct XenWatch XenWatch;
typedef struct XenEventChannel XenEventChannel;
struct XenDevice {
DeviceState qdev;
domid_t frontend_id;
char *name;
- struct xs_handle *xsh;
- XenWatchList *watch_list;
+ struct qemu_xs_handle *xsh;
char *backend_path, *frontend_path;
enum xenbus_state backend_state, frontend_state;
Notifier exit;
- XenWatch *backend_state_watch, *frontend_state_watch;
+ struct qemu_xs_watch *backend_state_watch, *frontend_state_watch;
bool backend_online;
- XenWatch *backend_online_watch;
+ struct qemu_xs_watch *backend_online_watch;
xengnttab_handle *xgth;
- bool feature_grant_copy;
bool inactive;
QLIST_HEAD(, XenEventChannel) event_channels;
QLIST_ENTRY(XenDevice) list;
@@ -64,10 +58,9 @@
struct XenBus {
BusState qbus;
domid_t backend_id;
- struct xs_handle *xsh;
- XenWatchList *watch_list;
+ struct qemu_xs_handle *xsh;
unsigned int backend_types;
- XenWatch **backend_watch;
+ struct qemu_xs_watch **backend_watch;
QLIST_HEAD(, XenDevice) inactive_devices;
};
@@ -102,7 +95,7 @@
void *xen_device_map_grant_refs(XenDevice *xendev, uint32_t *refs,
unsigned int nr_refs, int prot,
Error **errp);
-void xen_device_unmap_grant_refs(XenDevice *xendev, void *map,
+void xen_device_unmap_grant_refs(XenDevice *xendev, void *map, uint32_t *refs,
unsigned int nr_refs, Error **errp);
typedef struct XenDeviceGrantCopySegment {
diff --git a/include/hw/xen/xen-legacy-backend.h b/include/hw/xen/xen-legacy-backend.h
index e31cd3a..6c307c5 100644
--- a/include/hw/xen/xen-legacy-backend.h
+++ b/include/hw/xen/xen-legacy-backend.h
@@ -1,7 +1,7 @@
#ifndef HW_XEN_LEGACY_BACKEND_H
#define HW_XEN_LEGACY_BACKEND_H
-#include "hw/xen/xen_common.h"
+#include "hw/xen/xen_backend_ops.h"
#include "hw/xen/xen_pvdev.h"
#include "net/net.h"
#include "qom/object.h"
@@ -15,7 +15,7 @@
TYPE_XENBACKEND)
/* variables */
-extern struct xs_handle *xenstore;
+extern struct qemu_xs_handle *xenstore;
extern const char *xen_protocol;
extern DeviceState *xen_sysdev;
extern BusState *xen_sysbus;
@@ -30,9 +30,6 @@
char *xenstore_read_be_str(struct XenLegacyDevice *xendev, const char *node);
int xenstore_read_be_int(struct XenLegacyDevice *xendev, const char *node,
int *ival);
-void xenstore_update_fe(char *watch, struct XenLegacyDevice *xendev);
-void xenstore_update_be(char *watch, char *type, int dom,
- struct XenDevOps *ops);
char *xenstore_read_fe_str(struct XenLegacyDevice *xendev, const char *node);
int xenstore_read_fe_int(struct XenLegacyDevice *xendev, const char *node,
int *ival);
@@ -51,18 +48,7 @@
void *xen_be_map_grant_refs(struct XenLegacyDevice *xendev, uint32_t *refs,
unsigned int nr_refs, int prot);
void xen_be_unmap_grant_refs(struct XenLegacyDevice *xendev, void *ptr,
- unsigned int nr_refs);
-
-typedef struct XenGrantCopySegment {
- union {
- void *virt;
- struct {
- uint32_t ref;
- off_t offset;
- } foreign;
- } source, dest;
- size_t len;
-} XenGrantCopySegment;
+ uint32_t *refs, unsigned int nr_refs);
int xen_be_copy_grant_refs(struct XenLegacyDevice *xendev,
bool to_domain, XenGrantCopySegment segs[],
@@ -75,9 +61,9 @@
}
static inline void xen_be_unmap_grant_ref(struct XenLegacyDevice *xendev,
- void *ptr)
+ void *ptr, uint32_t ref)
{
- return xen_be_unmap_grant_refs(xendev, ptr, 1);
+ return xen_be_unmap_grant_refs(xendev, ptr, &ref, 1);
}
/* actual backend drivers */
diff --git a/include/hw/xen/xen.h b/include/hw/xen/xen.h
index 0398393..2bd8ec7 100644
--- a/include/hw/xen/xen.h
+++ b/include/hw/xen/xen.h
@@ -8,15 +8,21 @@
#define QEMU_HW_XEN_H
/*
- * As a temporary measure while the headers are being untangled, define
- * __XEN_TOOLS__ here before any Xen headers are included. Otherwise, if
- * the Xen toolstack library headers are later included, they will find
- * some of the "internal" definitions missing and the build will fail. In
- * later commits, we'll end up with a rule that the native libraries have
- * to be included first, which will ensure that the libraries get the
- * version of Xen libraries that they expect.
+ * C files using Xen toolstack libraries will have included those headers
+ * already via xen_native.h, and having __XEM_TOOLS__ defined will have
+ * automatically set __XEN_INTERFACE_VERSION__ to the latest supported
+ * by the *system* Xen headers which were transitively included.
+ *
+ * C files which are part of the internal emulation, and which did not
+ * include xen_native.h, may need this defined so that the Xen headers
+ * imported to include/hw/xen/interface/ will expose the appropriate API
+ * version.
+ *
+ * This is why there's a rule that xen_native.h must be included first.
*/
-#define __XEN_TOOLS__ 1
+#ifndef __XEN_INTERFACE_VERSION__
+#define __XEN_INTERFACE_VERSION__ 0x00040e00
+#endif
#include "exec/cpu-common.h"
@@ -39,8 +45,6 @@
qemu_irq *xen_interrupt_controller_init(void);
-void xenstore_store_pv_console_info(int i, Chardev *chr);
-
void xen_register_framebuffer(struct MemoryRegion *mr);
#endif /* QEMU_HW_XEN_H */
diff --git a/include/hw/xen/xen_backend_ops.h b/include/hw/xen/xen_backend_ops.h
new file mode 100644
index 0000000..90cca85
--- /dev/null
+++ b/include/hw/xen/xen_backend_ops.h
@@ -0,0 +1,408 @@
+/*
+ * QEMU Xen backend support
+ *
+ * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Authors: David Woodhouse <dwmw2@infradead.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_XEN_BACKEND_OPS_H
+#define QEMU_XEN_BACKEND_OPS_H
+
+#include "hw/xen/xen.h"
+#include "hw/xen/interface/xen.h"
+#include "hw/xen/interface/io/xenbus.h"
+
+/*
+ * For the time being, these operations map fairly closely to the API of
+ * the actual Xen libraries, e.g. libxenevtchn. As we complete the migration
+ * from XenLegacyDevice back ends to the new XenDevice model, they may
+ * evolve to slightly higher-level APIs.
+ *
+ * The internal emulations do not emulate the Xen APIs entirely faithfully;
+ * only enough to be used by the Xen backend devices. For example, only one
+ * event channel can be bound to each handle, since that's sufficient for
+ * the device support (only the true Xen HVM backend uses more). And the
+ * behaviour of unmask() and pending() is different too because the device
+ * backends don't care.
+ */
+
+typedef struct xenevtchn_handle xenevtchn_handle;
+typedef int xenevtchn_port_or_error_t;
+typedef uint32_t evtchn_port_t;
+typedef uint16_t domid_t;
+typedef uint32_t grant_ref_t;
+
+#define XEN_PAGE_SHIFT 12
+#define XEN_PAGE_SIZE (1UL << XEN_PAGE_SHIFT)
+#define XEN_PAGE_MASK (~(XEN_PAGE_SIZE - 1))
+
+#ifndef xen_rmb
+#define xen_rmb() smp_rmb()
+#endif
+#ifndef xen_wmb
+#define xen_wmb() smp_wmb()
+#endif
+#ifndef xen_mb
+#define xen_mb() smp_mb()
+#endif
+
+struct evtchn_backend_ops {
+ xenevtchn_handle *(*open)(void);
+ int (*bind_interdomain)(xenevtchn_handle *xc, uint32_t domid,
+ evtchn_port_t guest_port);
+ int (*unbind)(xenevtchn_handle *xc, evtchn_port_t port);
+ int (*close)(struct xenevtchn_handle *xc);
+ int (*get_fd)(struct xenevtchn_handle *xc);
+ int (*notify)(struct xenevtchn_handle *xc, evtchn_port_t port);
+ int (*unmask)(struct xenevtchn_handle *xc, evtchn_port_t port);
+ int (*pending)(struct xenevtchn_handle *xc);
+};
+
+extern struct evtchn_backend_ops *xen_evtchn_ops;
+
+static inline xenevtchn_handle *qemu_xen_evtchn_open(void)
+{
+ if (!xen_evtchn_ops) {
+ return NULL;
+ }
+ return xen_evtchn_ops->open();
+}
+
+static inline int qemu_xen_evtchn_bind_interdomain(xenevtchn_handle *xc,
+ uint32_t domid,
+ evtchn_port_t guest_port)
+{
+ if (!xen_evtchn_ops) {
+ return -ENOSYS;
+ }
+ return xen_evtchn_ops->bind_interdomain(xc, domid, guest_port);
+}
+
+static inline int qemu_xen_evtchn_unbind(xenevtchn_handle *xc,
+ evtchn_port_t port)
+{
+ if (!xen_evtchn_ops) {
+ return -ENOSYS;
+ }
+ return xen_evtchn_ops->unbind(xc, port);
+}
+
+static inline int qemu_xen_evtchn_close(xenevtchn_handle *xc)
+{
+ if (!xen_evtchn_ops) {
+ return -ENOSYS;
+ }
+ return xen_evtchn_ops->close(xc);
+}
+
+static inline int qemu_xen_evtchn_fd(xenevtchn_handle *xc)
+{
+ if (!xen_evtchn_ops) {
+ return -ENOSYS;
+ }
+ return xen_evtchn_ops->get_fd(xc);
+}
+
+static inline int qemu_xen_evtchn_notify(xenevtchn_handle *xc,
+ evtchn_port_t port)
+{
+ if (!xen_evtchn_ops) {
+ return -ENOSYS;
+ }
+ return xen_evtchn_ops->notify(xc, port);
+}
+
+static inline int qemu_xen_evtchn_unmask(xenevtchn_handle *xc,
+ evtchn_port_t port)
+{
+ if (!xen_evtchn_ops) {
+ return -ENOSYS;
+ }
+ return xen_evtchn_ops->unmask(xc, port);
+}
+
+static inline int qemu_xen_evtchn_pending(xenevtchn_handle *xc)
+{
+ if (!xen_evtchn_ops) {
+ return -ENOSYS;
+ }
+ return xen_evtchn_ops->pending(xc);
+}
+
+typedef struct xengntdev_handle xengnttab_handle;
+
+typedef struct XenGrantCopySegment {
+ union {
+ void *virt;
+ struct {
+ uint32_t ref;
+ off_t offset;
+ } foreign;
+ } source, dest;
+ size_t len;
+} XenGrantCopySegment;
+
+#define XEN_GNTTAB_OP_FEATURE_MAP_MULTIPLE (1U << 0)
+
+struct gnttab_backend_ops {
+ uint32_t features;
+ xengnttab_handle *(*open)(void);
+ int (*close)(xengnttab_handle *xgt);
+ int (*grant_copy)(xengnttab_handle *xgt, bool to_domain, uint32_t domid,
+ XenGrantCopySegment *segs, uint32_t nr_segs,
+ Error **errp);
+ int (*set_max_grants)(xengnttab_handle *xgt, uint32_t nr_grants);
+ void *(*map_refs)(xengnttab_handle *xgt, uint32_t count, uint32_t domid,
+ uint32_t *refs, int prot);
+ int (*unmap)(xengnttab_handle *xgt, void *start_address, uint32_t *refs,
+ uint32_t count);
+};
+
+extern struct gnttab_backend_ops *xen_gnttab_ops;
+
+static inline bool qemu_xen_gnttab_can_map_multi(void)
+{
+ return xen_gnttab_ops &&
+ !!(xen_gnttab_ops->features & XEN_GNTTAB_OP_FEATURE_MAP_MULTIPLE);
+}
+
+static inline xengnttab_handle *qemu_xen_gnttab_open(void)
+{
+ if (!xen_gnttab_ops) {
+ return NULL;
+ }
+ return xen_gnttab_ops->open();
+}
+
+static inline int qemu_xen_gnttab_close(xengnttab_handle *xgt)
+{
+ if (!xen_gnttab_ops) {
+ return -ENOSYS;
+ }
+ return xen_gnttab_ops->close(xgt);
+}
+
+static inline int qemu_xen_gnttab_grant_copy(xengnttab_handle *xgt,
+ bool to_domain, uint32_t domid,
+ XenGrantCopySegment *segs,
+ uint32_t nr_segs, Error **errp)
+{
+ if (!xen_gnttab_ops) {
+ return -ENOSYS;
+ }
+
+ return xen_gnttab_ops->grant_copy(xgt, to_domain, domid, segs, nr_segs,
+ errp);
+}
+
+static inline int qemu_xen_gnttab_set_max_grants(xengnttab_handle *xgt,
+ uint32_t nr_grants)
+{
+ if (!xen_gnttab_ops) {
+ return -ENOSYS;
+ }
+ return xen_gnttab_ops->set_max_grants(xgt, nr_grants);
+}
+
+static inline void *qemu_xen_gnttab_map_refs(xengnttab_handle *xgt,
+ uint32_t count, uint32_t domid,
+ uint32_t *refs, int prot)
+{
+ if (!xen_gnttab_ops) {
+ return NULL;
+ }
+ return xen_gnttab_ops->map_refs(xgt, count, domid, refs, prot);
+}
+
+static inline int qemu_xen_gnttab_unmap(xengnttab_handle *xgt,
+ void *start_address, uint32_t *refs,
+ uint32_t count)
+{
+ if (!xen_gnttab_ops) {
+ return -ENOSYS;
+ }
+ return xen_gnttab_ops->unmap(xgt, start_address, refs, count);
+}
+
+struct foreignmem_backend_ops {
+ void *(*map)(uint32_t dom, void *addr, int prot, size_t pages,
+ xen_pfn_t *pfns, int *errs);
+ int (*unmap)(void *addr, size_t pages);
+};
+
+extern struct foreignmem_backend_ops *xen_foreignmem_ops;
+
+static inline void *qemu_xen_foreignmem_map(uint32_t dom, void *addr, int prot,
+ size_t pages, xen_pfn_t *pfns,
+ int *errs)
+{
+ if (!xen_foreignmem_ops) {
+ return NULL;
+ }
+ return xen_foreignmem_ops->map(dom, addr, prot, pages, pfns, errs);
+}
+
+static inline int qemu_xen_foreignmem_unmap(void *addr, size_t pages)
+{
+ if (!xen_foreignmem_ops) {
+ return -ENOSYS;
+ }
+ return xen_foreignmem_ops->unmap(addr, pages);
+}
+
+typedef void (*xs_watch_fn)(void *opaque, const char *path);
+
+struct qemu_xs_handle;
+struct qemu_xs_watch;
+typedef uint32_t xs_transaction_t;
+
+#define XBT_NULL 0
+
+#define XS_PERM_NONE 0x00
+#define XS_PERM_READ 0x01
+#define XS_PERM_WRITE 0x02
+
+struct xenstore_backend_ops {
+ struct qemu_xs_handle *(*open)(void);
+ void (*close)(struct qemu_xs_handle *h);
+ char *(*get_domain_path)(struct qemu_xs_handle *h, unsigned int domid);
+ char **(*directory)(struct qemu_xs_handle *h, xs_transaction_t t,
+ const char *path, unsigned int *num);
+ void *(*read)(struct qemu_xs_handle *h, xs_transaction_t t,
+ const char *path, unsigned int *len);
+ bool (*write)(struct qemu_xs_handle *h, xs_transaction_t t,
+ const char *path, const void *data, unsigned int len);
+ bool (*create)(struct qemu_xs_handle *h, xs_transaction_t t,
+ unsigned int owner, unsigned int domid,
+ unsigned int perms, const char *path);
+ bool (*destroy)(struct qemu_xs_handle *h, xs_transaction_t t,
+ const char *path);
+ struct qemu_xs_watch *(*watch)(struct qemu_xs_handle *h, const char *path,
+ xs_watch_fn fn, void *opaque);
+ void (*unwatch)(struct qemu_xs_handle *h, struct qemu_xs_watch *w);
+ xs_transaction_t (*transaction_start)(struct qemu_xs_handle *h);
+ bool (*transaction_end)(struct qemu_xs_handle *h, xs_transaction_t t,
+ bool abort);
+};
+
+extern struct xenstore_backend_ops *xen_xenstore_ops;
+
+static inline struct qemu_xs_handle *qemu_xen_xs_open(void)
+{
+ if (!xen_xenstore_ops) {
+ return NULL;
+ }
+ return xen_xenstore_ops->open();
+}
+
+static inline void qemu_xen_xs_close(struct qemu_xs_handle *h)
+{
+ if (!xen_xenstore_ops) {
+ return;
+ }
+ xen_xenstore_ops->close(h);
+}
+
+static inline char *qemu_xen_xs_get_domain_path(struct qemu_xs_handle *h,
+ unsigned int domid)
+{
+ if (!xen_xenstore_ops) {
+ return NULL;
+ }
+ return xen_xenstore_ops->get_domain_path(h, domid);
+}
+
+static inline char **qemu_xen_xs_directory(struct qemu_xs_handle *h,
+ xs_transaction_t t, const char *path,
+ unsigned int *num)
+{
+ if (!xen_xenstore_ops) {
+ return NULL;
+ }
+ return xen_xenstore_ops->directory(h, t, path, num);
+}
+
+static inline void *qemu_xen_xs_read(struct qemu_xs_handle *h,
+ xs_transaction_t t, const char *path,
+ unsigned int *len)
+{
+ if (!xen_xenstore_ops) {
+ return NULL;
+ }
+ return xen_xenstore_ops->read(h, t, path, len);
+}
+
+static inline bool qemu_xen_xs_write(struct qemu_xs_handle *h,
+ xs_transaction_t t, const char *path,
+ const void *data, unsigned int len)
+{
+ if (!xen_xenstore_ops) {
+ return false;
+ }
+ return xen_xenstore_ops->write(h, t, path, data, len);
+}
+
+static inline bool qemu_xen_xs_create(struct qemu_xs_handle *h,
+ xs_transaction_t t, unsigned int owner,
+ unsigned int domid, unsigned int perms,
+ const char *path)
+{
+ if (!xen_xenstore_ops) {
+ return false;
+ }
+ return xen_xenstore_ops->create(h, t, owner, domid, perms, path);
+}
+
+static inline bool qemu_xen_xs_destroy(struct qemu_xs_handle *h,
+ xs_transaction_t t, const char *path)
+{
+ if (!xen_xenstore_ops) {
+ return false;
+ }
+ return xen_xenstore_ops->destroy(h, t, path);
+}
+
+static inline struct qemu_xs_watch *qemu_xen_xs_watch(struct qemu_xs_handle *h,
+ const char *path,
+ xs_watch_fn fn,
+ void *opaque)
+{
+ if (!xen_xenstore_ops) {
+ return NULL;
+ }
+ return xen_xenstore_ops->watch(h, path, fn, opaque);
+}
+
+static inline void qemu_xen_xs_unwatch(struct qemu_xs_handle *h,
+ struct qemu_xs_watch *w)
+{
+ if (!xen_xenstore_ops) {
+ return;
+ }
+ xen_xenstore_ops->unwatch(h, w);
+}
+
+static inline xs_transaction_t qemu_xen_xs_transaction_start(struct qemu_xs_handle *h)
+{
+ if (!xen_xenstore_ops) {
+ return XBT_NULL;
+ }
+ return xen_xenstore_ops->transaction_start(h);
+}
+
+static inline bool qemu_xen_xs_transaction_end(struct qemu_xs_handle *h,
+ xs_transaction_t t, bool abort)
+{
+ if (!xen_xenstore_ops) {
+ return false;
+ }
+ return xen_xenstore_ops->transaction_end(h, t, abort);
+}
+
+void setup_xen_backend_ops(void);
+
+#endif /* QEMU_XEN_BACKEND_OPS_H */
diff --git a/include/hw/xen/xen_common.h b/include/hw/xen/xen_native.h
similarity index 88%
rename from include/hw/xen/xen_common.h
rename to include/hw/xen/xen_native.h
index 9a13a75..6bcc83b 100644
--- a/include/hw/xen/xen_common.h
+++ b/include/hw/xen/xen_native.h
@@ -1,5 +1,9 @@
-#ifndef QEMU_HW_XEN_COMMON_H
-#define QEMU_HW_XEN_COMMON_H
+#ifndef QEMU_HW_XEN_NATIVE_H
+#define QEMU_HW_XEN_NATIVE_H
+
+#ifdef __XEN_INTERFACE_VERSION__
+#error In Xen native files, include xen_native.h before other Xen headers
+#endif
/*
* If we have new enough libxenctrl then we do not want/need these compat
@@ -12,7 +16,6 @@
#include <xenctrl.h>
#include <xenstore.h>
-#include "hw/xen/interface/io/xenbus.h"
#include "hw/xen/xen.h"
#include "hw/pci/pci_device.h"
@@ -28,49 +31,12 @@
#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701
typedef xc_interface xenforeignmemory_handle;
-typedef xc_evtchn xenevtchn_handle;
-typedef xc_gnttab xengnttab_handle;
-typedef evtchn_port_or_error_t xenevtchn_port_or_error_t;
-
-#define xenevtchn_open(l, f) xc_evtchn_open(l, f);
-#define xenevtchn_close(h) xc_evtchn_close(h)
-#define xenevtchn_fd(h) xc_evtchn_fd(h)
-#define xenevtchn_pending(h) xc_evtchn_pending(h)
-#define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
-#define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
-#define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
-#define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
-
-#define xengnttab_open(l, f) xc_gnttab_open(l, f)
-#define xengnttab_close(h) xc_gnttab_close(h)
-#define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
-#define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
-#define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
-#define xengnttab_map_grant_refs(h, c, d, r, p) \
- xc_gnttab_map_grant_refs(h, c, d, r, p)
-#define xengnttab_map_domain_grant_refs(h, c, d, r, p) \
- xc_gnttab_map_domain_grant_refs(h, c, d, r, p)
#define xenforeignmemory_open(l, f) xen_xc
#define xenforeignmemory_close(h)
-static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom,
- int prot, size_t pages,
- const xen_pfn_t arr[/*pages*/],
- int err[/*pages*/])
-{
- if (err)
- return xc_map_foreign_bulk(h, dom, prot, arr, err, pages);
- else
- return xc_map_foreign_pages(h, dom, prot, arr, pages);
-}
-
-#define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE)
-
#else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */
-#include <xenevtchn.h>
-#include <xengnttab.h>
#include <xenforeignmemory.h>
#endif
@@ -660,31 +626,4 @@
#endif
-/* Xen before 4.8 */
-
-#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40800
-
-struct xengnttab_grant_copy_segment {
- union xengnttab_copy_ptr {
- void *virt;
- struct {
- uint32_t ref;
- uint16_t offset;
- uint16_t domid;
- } foreign;
- } source, dest;
- uint16_t len;
- uint16_t flags;
- int16_t status;
-};
-
-typedef struct xengnttab_grant_copy_segment xengnttab_grant_copy_segment_t;
-
-static inline int xengnttab_grant_copy(xengnttab_handle *xgt, uint32_t count,
- xengnttab_grant_copy_segment_t *segs)
-{
- return -ENOSYS;
-}
-#endif
-
-#endif /* QEMU_HW_XEN_COMMON_H */
+#endif /* QEMU_HW_XEN_NATIVE_H */
diff --git a/include/hw/xen/xen_pvdev.h b/include/hw/xen/xen_pvdev.h
index 7cd4bc2..ddad4b9 100644
--- a/include/hw/xen/xen_pvdev.h
+++ b/include/hw/xen/xen_pvdev.h
@@ -1,7 +1,9 @@
#ifndef QEMU_HW_XEN_PVDEV_H
#define QEMU_HW_XEN_PVDEV_H
-#include "hw/xen/xen_common.h"
+#include "hw/qdev-core.h"
+#include "hw/xen/xen_backend_ops.h"
+
/* ------------------------------------------------------------- */
#define XEN_BUFSIZE 1024
@@ -38,6 +40,7 @@
char name[64];
int debug;
+ struct qemu_xs_watch *watch;
enum xenbus_state be_state;
enum xenbus_state fe_state;
int online;
@@ -63,7 +66,6 @@
char *xenstore_read_str(const char *base, const char *node);
int xenstore_read_int(const char *base, const char *node, int *ival);
int xenstore_read_uint64(const char *base, const char *node, uint64_t *uval);
-void xenstore_update(void *unused);
const char *xenbus_strstate(enum xenbus_state state);
diff --git a/include/monitor/hmp.h b/include/monitor/hmp.h
index efae6b0..fdb69b7 100644
--- a/include/monitor/hmp.h
+++ b/include/monitor/hmp.h
@@ -180,5 +180,6 @@
void hmp_ioport_write(Monitor *mon, const QDict *qdict);
void hmp_boot_set(Monitor *mon, const QDict *qdict);
void hmp_info_mtree(Monitor *mon, const QDict *qdict);
+void hmp_info_cryptodev(Monitor *mon, const QDict *qdict);
#endif
diff --git a/include/net/eth.h b/include/net/eth.h
index 6e699b0..c5ae449 100644
--- a/include/net/eth.h
+++ b/include/net/eth.h
@@ -381,18 +381,24 @@
bool fragment;
} eth_ip4_hdr_info;
+typedef enum EthL4HdrProto {
+ ETH_L4_HDR_PROTO_INVALID,
+ ETH_L4_HDR_PROTO_TCP,
+ ETH_L4_HDR_PROTO_UDP
+} EthL4HdrProto;
+
typedef struct eth_l4_hdr_info_st {
union {
struct tcp_header tcp;
struct udp_header udp;
} hdr;
+ EthL4HdrProto proto;
bool has_tcp_data;
} eth_l4_hdr_info;
void eth_get_protocols(const struct iovec *iov, int iovcnt,
- bool *isip4, bool *isip6,
- bool *isudp, bool *istcp,
+ bool *hasip4, bool *hasip6,
size_t *l3hdr_off,
size_t *l4hdr_off,
size_t *l5hdr_off,
@@ -400,11 +406,6 @@
eth_ip4_hdr_info *ip4hdr_info,
eth_l4_hdr_info *l4hdr_info);
-void eth_setup_ip4_fragmentation(const void *l2hdr, size_t l2hdr_len,
- void *l3hdr, size_t l3hdr_len,
- size_t l3payload_len,
- size_t frag_offset, bool more_frags);
-
void
eth_fix_ip4_checksum(void *l3hdr, size_t l3hdr_len);
diff --git a/include/net/net.h b/include/net/net.h
index 1d88621..1448d00 100644
--- a/include/net/net.h
+++ b/include/net/net.h
@@ -56,8 +56,10 @@
typedef bool (HasUfo)(NetClientState *);
typedef bool (HasVnetHdr)(NetClientState *);
typedef bool (HasVnetHdrLen)(NetClientState *, int);
+typedef bool (GetUsingVnetHdr)(NetClientState *);
typedef void (UsingVnetHdr)(NetClientState *, bool);
typedef void (SetOffload)(NetClientState *, int, int, int, int, int);
+typedef int (GetVnetHdrLen)(NetClientState *);
typedef void (SetVnetHdrLen)(NetClientState *, int);
typedef int (SetVnetLE)(NetClientState *, bool);
typedef int (SetVnetBE)(NetClientState *, bool);
@@ -84,8 +86,10 @@
HasUfo *has_ufo;
HasVnetHdr *has_vnet_hdr;
HasVnetHdrLen *has_vnet_hdr_len;
+ GetUsingVnetHdr *get_using_vnet_hdr;
UsingVnetHdr *using_vnet_hdr;
SetOffload *set_offload;
+ GetVnetHdrLen *get_vnet_hdr_len;
SetVnetHdrLen *set_vnet_hdr_len;
SetVnetLE *set_vnet_le;
SetVnetBE *set_vnet_be;
@@ -185,9 +189,11 @@
bool qemu_has_ufo(NetClientState *nc);
bool qemu_has_vnet_hdr(NetClientState *nc);
bool qemu_has_vnet_hdr_len(NetClientState *nc, int len);
+bool qemu_get_using_vnet_hdr(NetClientState *nc);
void qemu_using_vnet_hdr(NetClientState *nc, bool enable);
void qemu_set_offload(NetClientState *nc, int csum, int tso4, int tso6,
int ecn, int ufo);
+int qemu_get_vnet_hdr_len(NetClientState *nc);
void qemu_set_vnet_hdr_len(NetClientState *nc, int len);
int qemu_set_vnet_le(NetClientState *nc, bool is_le);
int qemu_set_vnet_be(NetClientState *nc, bool is_be);
diff --git a/include/qapi/error.h b/include/qapi/error.h
index d798fae..f21a231 100644
--- a/include/qapi/error.h
+++ b/include/qapi/error.h
@@ -520,6 +520,12 @@
G_DEFINE_AUTO_CLEANUP_CLEAR_FUNC(ErrorPropagator, error_propagator_cleanup);
/*
+ * Special error destination to warn on error.
+ * See error_setg() and error_propagate() for details.
+ */
+extern Error *error_warn;
+
+/*
* Special error destination to abort on error.
* See error_setg() and error_propagate() for details.
*/
diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
index 874134f..f85834e 100644
--- a/include/qemu/atomic.h
+++ b/include/qemu/atomic.h
@@ -245,6 +245,20 @@
#define smp_wmb() smp_mb_release()
#define smp_rmb() smp_mb_acquire()
+/*
+ * SEQ_CST is weaker than the older __sync_* builtins and Linux
+ * kernel read-modify-write atomics. Provide a macro to obtain
+ * the same semantics.
+ */
+#if !defined(QEMU_SANITIZE_THREAD) && \
+ (defined(__i386__) || defined(__x86_64__) || defined(__s390x__))
+# define smp_mb__before_rmw() signal_barrier()
+# define smp_mb__after_rmw() signal_barrier()
+#else
+# define smp_mb__before_rmw() smp_mb()
+# define smp_mb__after_rmw() smp_mb()
+#endif
+
/* qatomic_mb_read/set semantics map Java volatile variables. They are
* less expensive on some platforms (notably POWER) than fully
* sequentially consistent operations.
@@ -259,7 +273,8 @@
#if !defined(QEMU_SANITIZE_THREAD) && \
(defined(__i386__) || defined(__x86_64__) || defined(__s390x__))
/* This is more efficient than a store plus a fence. */
-# define qatomic_mb_set(ptr, i) ((void)qatomic_xchg(ptr, i))
+# define qatomic_mb_set(ptr, i) \
+ ({ (void)qatomic_xchg(ptr, i); smp_mb__after_rmw(); })
#else
# define qatomic_mb_set(ptr, i) \
({ qatomic_store_release(ptr, i); smp_mb(); })
diff --git a/include/qemu/bitmap.h b/include/qemu/bitmap.h
index 3ccb008..9780681 100644
--- a/include/qemu/bitmap.h
+++ b/include/qemu/bitmap.h
@@ -22,23 +22,23 @@
* Note that nbits should be always a compile time evaluable constant.
* Otherwise many inlines will generate horrible code.
*
- * bitmap_zero(dst, nbits) *dst = 0UL
- * bitmap_fill(dst, nbits) *dst = ~0UL
- * bitmap_copy(dst, src, nbits) *dst = *src
- * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2
- * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2
- * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2
- * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2)
- * bitmap_complement(dst, src, nbits) *dst = ~(*src)
- * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal?
+ * bitmap_zero(dst, nbits) *dst = 0UL
+ * bitmap_fill(dst, nbits) *dst = ~0UL
+ * bitmap_copy(dst, src, nbits) *dst = *src
+ * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2
+ * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2
+ * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2
+ * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2)
+ * bitmap_complement(dst, src, nbits) *dst = ~(*src)
+ * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal?
* bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap?
- * bitmap_empty(src, nbits) Are all bits zero in *src?
- * bitmap_full(src, nbits) Are all bits set in *src?
- * bitmap_set(dst, pos, nbits) Set specified bit area
- * bitmap_set_atomic(dst, pos, nbits) Set specified bit area with atomic ops
- * bitmap_clear(dst, pos, nbits) Clear specified bit area
+ * bitmap_empty(src, nbits) Are all bits zero in *src?
+ * bitmap_full(src, nbits) Are all bits set in *src?
+ * bitmap_set(dst, pos, nbits) Set specified bit area
+ * bitmap_set_atomic(dst, pos, nbits) Set specified bit area with atomic ops
+ * bitmap_clear(dst, pos, nbits) Clear specified bit area
* bitmap_test_and_clear_atomic(dst, pos, nbits) Test and clear area
- * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
+ * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
* bitmap_to_le(dst, src, nbits) Convert bitmap to little endian
* bitmap_from_le(dst, src, nbits) Convert bitmap from little endian
* bitmap_copy_with_src_offset(dst, src, offset, nbits)
@@ -50,17 +50,17 @@
/*
* Also the following operations apply to bitmaps.
*
- * set_bit(bit, addr) *addr |= bit
- * clear_bit(bit, addr) *addr &= ~bit
- * change_bit(bit, addr) *addr ^= bit
- * test_bit(bit, addr) Is bit set in *addr?
- * test_and_set_bit(bit, addr) Set bit and return old value
- * test_and_clear_bit(bit, addr) Clear bit and return old value
- * test_and_change_bit(bit, addr) Change bit and return old value
- * find_first_zero_bit(addr, nbits) Position first zero bit in *addr
- * find_first_bit(addr, nbits) Position first set bit in *addr
- * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit
- * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit
+ * set_bit(bit, addr) *addr |= bit
+ * clear_bit(bit, addr) *addr &= ~bit
+ * change_bit(bit, addr) *addr ^= bit
+ * test_bit(bit, addr) Is bit set in *addr?
+ * test_and_set_bit(bit, addr) Set bit and return old value
+ * test_and_clear_bit(bit, addr) Clear bit and return old value
+ * test_and_change_bit(bit, addr) Change bit and return old value
+ * find_first_zero_bit(addr, nbits) Position first zero bit in *addr
+ * find_first_bit(addr, nbits) Position first set bit in *addr
+ * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit
+ * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit
*/
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
diff --git a/include/qemu/compiler.h b/include/qemu/compiler.h
index f20a76e..c2f49df 100644
--- a/include/qemu/compiler.h
+++ b/include/qemu/compiler.h
@@ -33,8 +33,8 @@
#ifndef glue
#define xglue(x, y) x ## y
#define glue(x, y) xglue(x, y)
-#define stringify(s) tostring(s)
-#define tostring(s) #s
+#define stringify(s) tostring(s)
+#define tostring(s) #s
#endif
#ifndef likely
diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h
index c25f390..b3e54e0 100644
--- a/include/qemu/main-loop.h
+++ b/include/qemu/main-loop.h
@@ -387,8 +387,6 @@
/* internal interfaces */
-void qemu_fd_register(int fd);
-
#define qemu_bh_new(cb, opaque) \
qemu_bh_new_full((cb), (opaque), (stringify(cb)))
QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name);
diff --git a/include/qemu/osdep.h b/include/qemu/osdep.h
index 88c9fac..9eff0be 100644
--- a/include/qemu/osdep.h
+++ b/include/qemu/osdep.h
@@ -237,7 +237,7 @@
* supports QEMU_ERROR, this will be reported at compile time; otherwise
* this will be reported at link time due to the missing symbol.
*/
-extern G_NORETURN
+G_NORETURN extern
void QEMU_ERROR("code path is reachable")
qemu_build_not_reached_always(void);
#if defined(__OPTIMIZE__) && !defined(__NO_INLINE__)
@@ -665,20 +665,6 @@
*/
char *qemu_get_pid_name(pid_t pid);
-/**
- * qemu_fork:
- *
- * A version of fork that avoids signal handler race
- * conditions that can lead to child process getting
- * signals that are otherwise only expected by the
- * parent. It also resets all signal handlers to the
- * default settings.
- *
- * Returns 0 to child process, pid number to parent
- * or -1 on failure.
- */
-pid_t qemu_fork(Error **errp);
-
/* Using intptr_t ensures that qemu_*_page_mask is sign-extended even
* when intptr_t is 32-bit and we are aligning a long long.
*/
diff --git a/include/qemu/plugin-event.h b/include/qemu/plugin-event.h
new file mode 100644
index 0000000..7056d84
--- /dev/null
+++ b/include/qemu/plugin-event.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
+ *
+ * License: GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef QEMU_PLUGIN_EVENT_H
+#define QEMU_PLUGIN_EVENT_H
+
+/*
+ * Events that plugins can subscribe to.
+ */
+enum qemu_plugin_event {
+ QEMU_PLUGIN_EV_VCPU_INIT,
+ QEMU_PLUGIN_EV_VCPU_EXIT,
+ QEMU_PLUGIN_EV_VCPU_TB_TRANS,
+ QEMU_PLUGIN_EV_VCPU_IDLE,
+ QEMU_PLUGIN_EV_VCPU_RESUME,
+ QEMU_PLUGIN_EV_VCPU_SYSCALL,
+ QEMU_PLUGIN_EV_VCPU_SYSCALL_RET,
+ QEMU_PLUGIN_EV_FLUSH,
+ QEMU_PLUGIN_EV_ATEXIT,
+ QEMU_PLUGIN_EV_MAX, /* total number of plugin events we support */
+};
+
+#endif /* QEMU_PLUGIN_EVENT_H */
diff --git a/include/qemu/plugin.h b/include/qemu/plugin.h
index fb338ba..bc0781c 100644
--- a/include/qemu/plugin.h
+++ b/include/qemu/plugin.h
@@ -12,23 +12,9 @@
#include "qemu/error-report.h"
#include "qemu/queue.h"
#include "qemu/option.h"
+#include "qemu/plugin-event.h"
#include "exec/memopidx.h"
-
-/*
- * Events that plugins can subscribe to.
- */
-enum qemu_plugin_event {
- QEMU_PLUGIN_EV_VCPU_INIT,
- QEMU_PLUGIN_EV_VCPU_EXIT,
- QEMU_PLUGIN_EV_VCPU_TB_TRANS,
- QEMU_PLUGIN_EV_VCPU_IDLE,
- QEMU_PLUGIN_EV_VCPU_RESUME,
- QEMU_PLUGIN_EV_VCPU_SYSCALL,
- QEMU_PLUGIN_EV_VCPU_SYSCALL_RET,
- QEMU_PLUGIN_EV_FLUSH,
- QEMU_PLUGIN_EV_ATEXIT,
- QEMU_PLUGIN_EV_MAX, /* total number of plugin events we support */
-};
+#include "hw/core/cpu.h"
/*
* Option parsing/processing.
@@ -59,8 +45,6 @@
#ifdef CONFIG_PLUGIN
extern QemuOptsList qemu_plugin_opts;
-#define QEMU_PLUGIN_ASSERT(cond) g_assert(cond)
-
static inline void qemu_plugin_add_opts(void)
{
qemu_add_opts(&qemu_plugin_opts);
@@ -221,7 +205,10 @@
void qemu_plugin_add_dyn_cb_arr(GArray *arr);
-void qemu_plugin_disable_mem_helpers(CPUState *cpu);
+static inline void qemu_plugin_disable_mem_helpers(CPUState *cpu)
+{
+ cpu->plugin_mem_cbs = NULL;
+}
/**
* qemu_plugin_user_exit(): clean-up callbacks before calling exit callbacks
@@ -252,8 +239,6 @@
#else /* !CONFIG_PLUGIN */
-#define QEMU_PLUGIN_ASSERT(cond)
-
static inline void qemu_plugin_add_opts(void)
{ }
diff --git a/include/qemu/qemu-plugin.h b/include/qemu/qemu-plugin.h
index d0e9d03..50a9957 100644
--- a/include/qemu/qemu-plugin.h
+++ b/include/qemu/qemu-plugin.h
@@ -481,17 +481,56 @@
*/
const char *qemu_plugin_hwaddr_device_name(const struct qemu_plugin_hwaddr *h);
-typedef void
-(*qemu_plugin_vcpu_mem_cb_t)(unsigned int vcpu_index,
- qemu_plugin_meminfo_t info, uint64_t vaddr,
- void *userdata);
+/**
+ * typedef qemu_plugin_vcpu_mem_cb_t - memory callback function type
+ * @vcpu_index: the executing vCPU
+ * @info: an opaque handle for further queries about the memory
+ * @vaddr: the virtual address of the transaction
+ * @userdata: any user data attached to the callback
+ */
+typedef void (*qemu_plugin_vcpu_mem_cb_t) (unsigned int vcpu_index,
+ qemu_plugin_meminfo_t info,
+ uint64_t vaddr,
+ void *userdata);
+/**
+ * qemu_plugin_register_vcpu_mem_cb() - register memory access callback
+ * @insn: handle for instruction to instrument
+ * @cb: callback of type qemu_plugin_vcpu_mem_cb_t
+ * @flags: (currently unused) callback flags
+ * @rw: monitor reads, writes or both
+ * @userdata: opaque pointer for userdata
+ *
+ * This registers a full callback for every memory access generated by
+ * an instruction. If the instruction doesn't access memory no
+ * callback will be made.
+ *
+ * The callback reports the vCPU the access took place on, the virtual
+ * address of the access and a handle for further queries. The user
+ * can attach some userdata to the callback for additional purposes.
+ *
+ * Other execution threads will continue to execute during the
+ * callback so the plugin is responsible for ensuring it doesn't get
+ * confused by making appropriate use of locking if required.
+ */
void qemu_plugin_register_vcpu_mem_cb(struct qemu_plugin_insn *insn,
qemu_plugin_vcpu_mem_cb_t cb,
enum qemu_plugin_cb_flags flags,
enum qemu_plugin_mem_rw rw,
void *userdata);
+/**
+ * qemu_plugin_register_vcpu_mem_inline() - register an inline op to any memory access
+ * @insn: handle for instruction to instrument
+ * @rw: apply to reads, writes or both
+ * @op: the op, of type qemu_plugin_op
+ * @ptr: pointer memory for the op
+ * @imm: immediate data for @op
+ *
+ * This registers a inline op every memory access generated by the
+ * instruction. This provides for a lightweight but not thread-safe
+ * way of counting the number of operations done.
+ */
void qemu_plugin_register_vcpu_mem_inline(struct qemu_plugin_insn *insn,
enum qemu_plugin_mem_rw rw,
enum qemu_plugin_op op, void *ptr,
diff --git a/include/qemu/sockets.h b/include/qemu/sockets.h
index 2b0698a..d935fd8 100644
--- a/include/qemu/sockets.h
+++ b/include/qemu/sockets.h
@@ -15,7 +15,6 @@
bool fd_is_socket(int fd);
int qemu_socket(int domain, int type, int protocol);
-#ifndef WIN32
/**
* qemu_socketpair:
* @domain: specifies a communication domain, such as PF_UNIX
@@ -30,7 +29,6 @@
* Return 0 on success.
*/
int qemu_socketpair(int domain, int type, int protocol, int sv[2]);
-#endif
int qemu_accept(int s, struct sockaddr *addr, socklen_t *addrlen);
/*
diff --git a/include/qemu/uri.h b/include/qemu/uri.h
index 3ad211d..b43f35a 100644
--- a/include/qemu/uri.h
+++ b/include/qemu/uri.h
@@ -59,16 +59,16 @@
* as described in RFC 2396 but separated for further processing.
*/
typedef struct URI {
- char *scheme; /* the URI scheme */
- char *opaque; /* opaque part */
- char *authority; /* the authority part */
- char *server; /* the server part */
- char *user; /* the user part */
- int port; /* the port number */
- char *path; /* the path string */
- char *fragment; /* the fragment identifier */
- int cleanup; /* parsing potentially unclean URI */
- char *query; /* the query string (as it appears in the URI) */
+ char *scheme; /* the URI scheme */
+ char *opaque; /* opaque part */
+ char *authority; /* the authority part */
+ char *server; /* the server part */
+ char *user; /* the user part */
+ int port; /* the port number */
+ char *path; /* the path string */
+ char *fragment; /* the fragment identifier */
+ int cleanup; /* parsing potentially unclean URI */
+ char *query; /* the query string (as it appears in the URI) */
} URI;
URI *uri_new(void);
@@ -84,16 +84,16 @@
/* Single web service query parameter 'name=value'. */
typedef struct QueryParam {
- char *name; /* Name (unescaped). */
- char *value; /* Value (unescaped). */
- int ignore; /* Ignore this field in qparam_get_query */
+ char *name; /* Name (unescaped). */
+ char *value; /* Value (unescaped). */
+ int ignore; /* Ignore this field in qparam_get_query */
} QueryParam;
/* Set of parameters. */
typedef struct QueryParams {
- int n; /* number of parameters used */
- int alloc; /* allocated space */
- QueryParam *p; /* array of parameters */
+ int n; /* number of parameters used */
+ int alloc; /* allocated space */
+ QueryParam *p; /* array of parameters */
} QueryParams;
struct QueryParams *query_params_new (int init_alloc);
diff --git a/include/sysemu/accel-ops.h b/include/sysemu/accel-ops.h
index 30690c7..3c1fab4 100644
--- a/include/sysemu/accel-ops.h
+++ b/include/sysemu/accel-ops.h
@@ -48,6 +48,7 @@
/* gdbstub hooks */
bool (*supports_guest_debug)(void);
+ int (*update_guest_debug)(CPUState *cpu);
int (*insert_breakpoint)(CPUState *cpu, int type, vaddr addr, vaddr len);
int (*remove_breakpoint)(CPUState *cpu, int type, vaddr addr, vaddr len);
void (*remove_all_breakpoints)(CPUState *cpu);
diff --git a/include/sysemu/cryptodev.h b/include/sysemu/cryptodev.h
index cf9b3f0..bc021ce 100644
--- a/include/sysemu/cryptodev.h
+++ b/include/sysemu/cryptodev.h
@@ -24,7 +24,9 @@
#define CRYPTODEV_H
#include "qemu/queue.h"
+#include "qemu/throttle.h"
#include "qom/object.h"
+#include "qapi/qapi-types-cryptodev.h"
/**
* CryptoDevBackend:
@@ -48,12 +50,6 @@
typedef struct CryptoDevBackendClient
CryptoDevBackendClient;
-enum CryptoDevBackendAlgType {
- CRYPTODEV_BACKEND_ALG_SYM,
- CRYPTODEV_BACKEND_ALG_ASYM,
- CRYPTODEV_BACKEND_ALG__MAX,
-};
-
/**
* CryptoDevBackendSymSessionInfo:
*
@@ -179,17 +175,22 @@
uint8_t *dst;
} CryptoDevBackendAsymOpInfo;
+typedef void (*CryptoDevCompletionFunc) (void *opaque, int ret);
+
typedef struct CryptoDevBackendOpInfo {
- enum CryptoDevBackendAlgType algtype;
+ QCryptodevBackendAlgType algtype;
uint32_t op_code;
+ uint32_t queue_index;
+ CryptoDevCompletionFunc cb;
+ void *opaque; /* argument for cb */
uint64_t session_id;
union {
CryptoDevBackendSymOpInfo *sym_op_info;
CryptoDevBackendAsymOpInfo *asym_op_info;
} u;
+ QTAILQ_ENTRY(CryptoDevBackendOpInfo) next;
} CryptoDevBackendOpInfo;
-typedef void (*CryptoDevCompletionFunc) (void *opaque, int ret);
struct CryptoDevBackendClass {
ObjectClass parent_class;
@@ -209,24 +210,11 @@
void *opaque);
int (*do_op)(CryptoDevBackend *backend,
- CryptoDevBackendOpInfo *op_info,
- uint32_t queue_index,
- CryptoDevCompletionFunc cb,
- void *opaque);
+ CryptoDevBackendOpInfo *op_info);
};
-typedef enum CryptoDevBackendOptionsType {
- CRYPTODEV_BACKEND_TYPE_NONE = 0,
- CRYPTODEV_BACKEND_TYPE_BUILTIN = 1,
- CRYPTODEV_BACKEND_TYPE_VHOST_USER = 2,
- CRYPTODEV_BACKEND_TYPE_LKCF = 3,
- CRYPTODEV_BACKEND_TYPE__MAX,
-} CryptoDevBackendOptionsType;
-
struct CryptoDevBackendClient {
- CryptoDevBackendOptionsType type;
- char *model;
- char *name;
+ QCryptodevBackendType type;
char *info_str;
unsigned int queue_index;
int vring_enable;
@@ -260,6 +248,24 @@
uint64_t max_size;
};
+typedef struct CryptodevBackendSymStat {
+ int64_t encrypt_ops;
+ int64_t decrypt_ops;
+ int64_t encrypt_bytes;
+ int64_t decrypt_bytes;
+} CryptodevBackendSymStat;
+
+typedef struct CryptodevBackendAsymStat {
+ int64_t encrypt_ops;
+ int64_t decrypt_ops;
+ int64_t sign_ops;
+ int64_t verify_ops;
+ int64_t encrypt_bytes;
+ int64_t decrypt_bytes;
+ int64_t sign_bytes;
+ int64_t verify_bytes;
+} CryptodevBackendAsymStat;
+
struct CryptoDevBackend {
Object parent_obj;
@@ -267,15 +273,48 @@
/* Tag the cryptodev backend is used by virtio-crypto or not */
bool is_used;
CryptoDevBackendConf conf;
+ CryptodevBackendSymStat *sym_stat;
+ CryptodevBackendAsymStat *asym_stat;
+
+ ThrottleState ts;
+ ThrottleTimers tt;
+ ThrottleConfig tc;
+ QTAILQ_HEAD(, CryptoDevBackendOpInfo) opinfos;
};
+#define CryptodevSymStatInc(be, op, bytes) do { \
+ be->sym_stat->op##_bytes += (bytes); \
+ be->sym_stat->op##_ops += 1; \
+} while (/*CONSTCOND*/0)
+
+#define CryptodevSymStatIncEncrypt(be, bytes) \
+ CryptodevSymStatInc(be, encrypt, bytes)
+
+#define CryptodevSymStatIncDecrypt(be, bytes) \
+ CryptodevSymStatInc(be, decrypt, bytes)
+
+#define CryptodevAsymStatInc(be, op, bytes) do { \
+ be->asym_stat->op##_bytes += (bytes); \
+ be->asym_stat->op##_ops += 1; \
+} while (/*CONSTCOND*/0)
+
+#define CryptodevAsymStatIncEncrypt(be, bytes) \
+ CryptodevAsymStatInc(be, encrypt, bytes)
+
+#define CryptodevAsymStatIncDecrypt(be, bytes) \
+ CryptodevAsymStatInc(be, decrypt, bytes)
+
+#define CryptodevAsymStatIncSign(be, bytes) \
+ CryptodevAsymStatInc(be, sign, bytes)
+
+#define CryptodevAsymStatIncVerify(be, bytes) \
+ CryptodevAsymStatInc(be, verify, bytes)
+
+
/**
* cryptodev_backend_new_client:
- * @model: the cryptodev backend model
- * @name: the cryptodev backend name, can be NULL
*
- * Creates a new cryptodev backend client object
- * with the @name in the model @model.
+ * Creates a new cryptodev backend client object.
*
* The returned object must be released with
* cryptodev_backend_free_client() when no
@@ -283,9 +322,8 @@
*
* Returns: a new cryptodev backend client object
*/
-CryptoDevBackendClient *
-cryptodev_backend_new_client(const char *model,
- const char *name);
+CryptoDevBackendClient *cryptodev_backend_new_client(void);
+
/**
* cryptodev_backend_free_client:
* @cc: the cryptodev backend client object
@@ -354,24 +392,17 @@
/**
* cryptodev_backend_crypto_operation:
* @backend: the cryptodev backend object
- * @opaque1: pointer to a VirtIOCryptoReq object
- * @queue_index: queue index of cryptodev backend client
- * @errp: pointer to a NULL-initialized error object
- * @cb: callbacks when operation is completed
- * @opaque2: parameter passed to cb
+ * @op_info: pointer to a CryptoDevBackendOpInfo object
*
- * Do crypto operation, such as encryption and
- * decryption
+ * Do crypto operation, such as encryption, decryption, signature and
+ * verification
*
* Returns: 0 for success and cb will be called when creation is completed,
* negative value for error, and cb will not be called.
*/
int cryptodev_backend_crypto_operation(
CryptoDevBackend *backend,
- void *opaque1,
- uint32_t queue_index,
- CryptoDevCompletionFunc cb,
- void *opaque2);
+ CryptoDevBackendOpInfo *op_info);
/**
* cryptodev_backend_set_used:
diff --git a/include/sysemu/os-posix.h b/include/sysemu/os-posix.h
index 58de7c9..1030d39 100644
--- a/include/sysemu/os-posix.h
+++ b/include/sysemu/os-posix.h
@@ -51,9 +51,6 @@
void os_setup_post(void);
int os_mlock(void);
-#define closesocket(s) close(s)
-#define ioctlsocket(s, r, v) ioctl(s, r, v)
-
int os_set_daemonize(bool d);
bool is_daemonized(void);
diff --git a/include/sysemu/os-win32.h b/include/sysemu/os-win32.h
index 97d0243..15c296e 100644
--- a/include/sysemu/os-win32.h
+++ b/include/sysemu/os-win32.h
@@ -29,6 +29,7 @@
#include <winsock2.h>
#include <windows.h>
#include <ws2tcpip.h>
+#include "qemu/typedefs.h"
#ifdef HAVE_AFUNIX_H
#include <afunix.h>
@@ -164,10 +165,31 @@
#endif
}
-/* We wrap all the sockets functions so that we can
- * set errno based on WSAGetLastError()
+/* Helper for WSAEventSelect, to report errors */
+bool qemu_socket_select(int sockfd, WSAEVENT hEventObject,
+ long lNetworkEvents, Error **errp);
+
+bool qemu_socket_unselect(int sockfd, Error **errp);
+
+/* We wrap all the sockets functions so that we can set errno based on
+ * WSAGetLastError(), and use file-descriptors instead of SOCKET.
*/
+/*
+ * qemu_close_socket_osfhandle:
+ * @fd: a file descriptor associated with a SOCKET
+ *
+ * Close only the C run-time file descriptor, leave the SOCKET opened.
+ *
+ * Returns zero on success. On error, -1 is returned, and errno is set to
+ * indicate the error.
+ */
+int qemu_close_socket_osfhandle(int fd);
+
+#undef close
+#define close qemu_close_wrap
+int qemu_close_wrap(int fd);
+
#undef connect
#define connect qemu_connect_wrap
int qemu_connect_wrap(int sockfd, const struct sockaddr *addr,
@@ -199,10 +221,6 @@
#define ioctlsocket qemu_ioctlsocket_wrap
int qemu_ioctlsocket_wrap(int fd, int req, void *val);
-#undef closesocket
-#define closesocket qemu_closesocket_wrap
-int qemu_closesocket_wrap(int fd);
-
#undef getsockopt
#define getsockopt qemu_getsockopt_wrap
int qemu_getsockopt_wrap(int sockfd, int level, int optname,
diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h
index 7085614..dff17c7 100644
--- a/include/tcg/tcg-op.h
+++ b/include/tcg/tcg-op.h
@@ -1089,9 +1089,7 @@
#define tcg_gen_extract_tl tcg_gen_extract_i64
#define tcg_gen_sextract_tl tcg_gen_sextract_i64
#define tcg_gen_extract2_tl tcg_gen_extract2_i64
-#define tcg_const_tl tcg_const_i64
#define tcg_constant_tl tcg_constant_i64
-#define tcg_const_local_tl tcg_const_local_i64
#define tcg_gen_movcond_tl tcg_gen_movcond_i64
#define tcg_gen_add2_tl tcg_gen_add2_i64
#define tcg_gen_sub2_tl tcg_gen_sub2_i64
@@ -1205,9 +1203,7 @@
#define tcg_gen_extract_tl tcg_gen_extract_i32
#define tcg_gen_sextract_tl tcg_gen_sextract_i32
#define tcg_gen_extract2_tl tcg_gen_extract2_i32
-#define tcg_const_tl tcg_const_i32
#define tcg_constant_tl tcg_constant_i32
-#define tcg_const_local_tl tcg_const_local_i32
#define tcg_gen_movcond_tl tcg_gen_movcond_i32
#define tcg_gen_add2_tl tcg_gen_add2_i32
#define tcg_gen_sub2_tl tcg_gen_sub2_i32
diff --git a/include/tcg/tcg-temp-internal.h b/include/tcg/tcg-temp-internal.h
new file mode 100644
index 0000000..dded291
--- /dev/null
+++ b/include/tcg/tcg-temp-internal.h
@@ -0,0 +1,83 @@
+/*
+ * TCG internals related to TCG temp allocation
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef TCG_TEMP_INTERNAL_H
+#define TCG_TEMP_INTERNAL_H
+
+/*
+ * Allocation and freeing of EBB temps is reserved to TCG internals
+ */
+
+void tcg_temp_free_internal(TCGTemp *);
+
+static inline void tcg_temp_free_i32(TCGv_i32 arg)
+{
+ tcg_temp_free_internal(tcgv_i32_temp(arg));
+}
+
+static inline void tcg_temp_free_i64(TCGv_i64 arg)
+{
+ tcg_temp_free_internal(tcgv_i64_temp(arg));
+}
+
+static inline void tcg_temp_free_i128(TCGv_i128 arg)
+{
+ tcg_temp_free_internal(tcgv_i128_temp(arg));
+}
+
+static inline void tcg_temp_free_ptr(TCGv_ptr arg)
+{
+ tcg_temp_free_internal(tcgv_ptr_temp(arg));
+}
+
+static inline void tcg_temp_free_vec(TCGv_vec arg)
+{
+ tcg_temp_free_internal(tcgv_vec_temp(arg));
+}
+
+static inline TCGv_i32 tcg_temp_ebb_new_i32(void)
+{
+ TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, TEMP_EBB);
+ return temp_tcgv_i32(t);
+}
+
+static inline TCGv_i64 tcg_temp_ebb_new_i64(void)
+{
+ TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, TEMP_EBB);
+ return temp_tcgv_i64(t);
+}
+
+static inline TCGv_i128 tcg_temp_ebb_new_i128(void)
+{
+ TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I128, TEMP_EBB);
+ return temp_tcgv_i128(t);
+}
+
+static inline TCGv_ptr tcg_temp_ebb_new_ptr(void)
+{
+ TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, TEMP_EBB);
+ return temp_tcgv_ptr(t);
+}
+
+#endif /* TCG_TEMP_FREE_H */
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
index a5cf21b..5cfaa53 100644
--- a/include/tcg/tcg.h
+++ b/include/tcg/tcg.h
@@ -862,35 +862,9 @@
TCGTemp *tcg_global_mem_new_internal(TCGType, TCGv_ptr,
intptr_t, const char *);
TCGTemp *tcg_temp_new_internal(TCGType, TCGTempKind);
-void tcg_temp_free_internal(TCGTemp *);
TCGv_vec tcg_temp_new_vec(TCGType type);
TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match);
-static inline void tcg_temp_free_i32(TCGv_i32 arg)
-{
- tcg_temp_free_internal(tcgv_i32_temp(arg));
-}
-
-static inline void tcg_temp_free_i64(TCGv_i64 arg)
-{
- tcg_temp_free_internal(tcgv_i64_temp(arg));
-}
-
-static inline void tcg_temp_free_i128(TCGv_i128 arg)
-{
- tcg_temp_free_internal(tcgv_i128_temp(arg));
-}
-
-static inline void tcg_temp_free_ptr(TCGv_ptr arg)
-{
- tcg_temp_free_internal(tcgv_ptr_temp(arg));
-}
-
-static inline void tcg_temp_free_vec(TCGv_vec arg)
-{
- tcg_temp_free_internal(tcgv_vec_temp(arg));
-}
-
static inline TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t offset,
const char *name)
{
@@ -898,13 +872,6 @@
return temp_tcgv_i32(t);
}
-/* Used only by tcg infrastructure: tcg-op.c or plugin-gen.c */
-static inline TCGv_i32 tcg_temp_ebb_new_i32(void)
-{
- TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, TEMP_EBB);
- return temp_tcgv_i32(t);
-}
-
static inline TCGv_i32 tcg_temp_new_i32(void)
{
TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, TEMP_TB);
@@ -918,26 +885,12 @@
return temp_tcgv_i64(t);
}
-/* Used only by tcg infrastructure: tcg-op.c or plugin-gen.c */
-static inline TCGv_i64 tcg_temp_ebb_new_i64(void)
-{
- TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, TEMP_EBB);
- return temp_tcgv_i64(t);
-}
-
static inline TCGv_i64 tcg_temp_new_i64(void)
{
TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, TEMP_TB);
return temp_tcgv_i64(t);
}
-/* Used only by tcg infrastructure: tcg-op.c or plugin-gen.c */
-static inline TCGv_i128 tcg_temp_ebb_new_i128(void)
-{
- TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I128, TEMP_EBB);
- return temp_tcgv_i128(t);
-}
-
static inline TCGv_i128 tcg_temp_new_i128(void)
{
TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I128, TEMP_TB);
@@ -951,13 +904,6 @@
return temp_tcgv_ptr(t);
}
-/* Used only by tcg infrastructure: tcg-op.c or plugin-gen.c */
-static inline TCGv_ptr tcg_temp_ebb_new_ptr(void)
-{
- TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, TEMP_EBB);
- return temp_tcgv_ptr(t);
-}
-
static inline TCGv_ptr tcg_temp_new_ptr(void)
{
TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, TEMP_TB);
@@ -1050,14 +996,6 @@
void tcg_optimize(TCGContext *s);
-/* Allocate a new temporary and initialize it with a constant. */
-TCGv_i32 tcg_const_i32(int32_t val);
-TCGv_i64 tcg_const_i64(int64_t val);
-TCGv_vec tcg_const_zeros_vec(TCGType);
-TCGv_vec tcg_const_ones_vec(TCGType);
-TCGv_vec tcg_const_zeros_vec_matching(TCGv_vec);
-TCGv_vec tcg_const_ones_vec_matching(TCGv_vec);
-
/*
* Locate or create a read-only temporary that is a constant.
* This kind of temporary need not be freed, but for convenience
@@ -1079,10 +1017,8 @@
TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val);
#if UINTPTR_MAX == UINT32_MAX
-# define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i32((intptr_t)(x)))
# define tcg_constant_ptr(x) ((TCGv_ptr)tcg_constant_i32((intptr_t)(x)))
#else
-# define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i64((intptr_t)(x)))
# define tcg_constant_ptr(x) ((TCGv_ptr)tcg_constant_i64((intptr_t)(x)))
#endif
diff --git a/include/ui/console.h b/include/ui/console.h
index 1cb53ac..2a8fab0 100644
--- a/include/ui/console.h
+++ b/include/ui/console.h
@@ -151,8 +151,8 @@
} QEMUCursor;
QEMUCursor *cursor_alloc(int width, int height);
-void cursor_get(QEMUCursor *c);
-void cursor_put(QEMUCursor *c);
+QEMUCursor *cursor_ref(QEMUCursor *c);
+void cursor_unref(QEMUCursor *c);
QEMUCursor *cursor_builtin_hidden(void);
QEMUCursor *cursor_builtin_left_ptr(void);
void cursor_print_ascii_art(QEMUCursor *c, const char *prefix);
@@ -459,6 +459,7 @@
QemuConsole *qemu_console_lookup_by_device_name(const char *device_id,
uint32_t head, Error **errp);
QemuConsole *qemu_console_lookup_unused(void);
+QEMUCursor *qemu_console_get_cursor(QemuConsole *con);
bool qemu_console_is_visible(QemuConsole *con);
bool qemu_console_is_graphic(QemuConsole *con);
bool qemu_console_is_fixedsize(QemuConsole *con);
diff --git a/include/ui/egl-helpers.h b/include/ui/egl-helpers.h
index 2fb6e0d..53d953d 100644
--- a/include/ui/egl-helpers.h
+++ b/include/ui/egl-helpers.h
@@ -22,6 +22,8 @@
QemuDmaBuf *dmabuf;
} egl_fb;
+#define EGL_FB_INIT { 0, }
+
void egl_fb_destroy(egl_fb *fb);
void egl_fb_setup_default(egl_fb *fb, int width, int height);
void egl_fb_setup_for_tex(egl_fb *fb, int width, int height,
@@ -63,4 +65,6 @@
EGLContext qemu_egl_init_ctx(void);
bool qemu_egl_has_dmabuf(void);
+bool egl_init(const char *rendernode, DisplayGLMode mode, Error **errp);
+
#endif /* EGL_HELPERS_H */
diff --git a/include/user/syscall-trace.h b/include/user/syscall-trace.h
index c5a220d..90bda76 100644
--- a/include/user/syscall-trace.h
+++ b/include/user/syscall-trace.h
@@ -11,6 +11,7 @@
#define SYSCALL_TRACE_H
#include "exec/user/abitypes.h"
+#include "qemu/plugin.h"
#include "trace/trace-root.h"
/*
diff --git a/io/channel-socket.c b/io/channel-socket.c
index 7aca84f..b0ea7d4 100644
--- a/io/channel-socket.c
+++ b/io/channel-socket.c
@@ -442,9 +442,9 @@
}
}
#ifdef WIN32
- WSAEventSelect(ioc->fd, NULL, 0);
+ qemu_socket_unselect(ioc->fd, NULL);
#endif
- closesocket(ioc->fd);
+ close(ioc->fd);
ioc->fd = -1;
}
}
@@ -846,13 +846,13 @@
if (sioc->fd != -1) {
#ifdef WIN32
- WSAEventSelect(sioc->fd, NULL, 0);
+ qemu_socket_unselect(sioc->fd, NULL);
#endif
if (qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_LISTEN)) {
socket_listen_cleanup(sioc->fd, errp);
}
- if (closesocket(sioc->fd) < 0) {
+ if (close(sioc->fd) < 0) {
sioc->fd = -1;
error_setg_errno(&err, errno, "Unable to close socket");
error_propagate(errp, err);
diff --git a/io/channel-tls.c b/io/channel-tls.c
index 8052945..5a7a3d4 100644
--- a/io/channel-tls.c
+++ b/io/channel-tls.c
@@ -446,6 +446,7 @@
object_ref(OBJECT(tioc));
g_source_add_child_source(source, child);
+ g_source_unref(child);
}
static GSource *qio_channel_tls_create_watch(QIOChannel *ioc,
diff --git a/io/channel-watch.c b/io/channel-watch.c
index ad7c568..64b486e 100644
--- a/io/channel-watch.c
+++ b/io/channel-watch.c
@@ -275,15 +275,15 @@
#ifdef CONFIG_WIN32
GSource *qio_channel_create_socket_watch(QIOChannel *ioc,
- int socket,
+ int sockfd,
GIOCondition condition)
{
GSource *source;
QIOChannelSocketSource *ssource;
- WSAEventSelect(socket, ioc->event,
- FD_READ | FD_ACCEPT | FD_CLOSE |
- FD_CONNECT | FD_WRITE | FD_OOB);
+ qemu_socket_select(sockfd, ioc->event,
+ FD_READ | FD_ACCEPT | FD_CLOSE |
+ FD_CONNECT | FD_WRITE | FD_OOB, NULL);
source = g_source_new(&qio_channel_socket_source_funcs,
sizeof(QIOChannelSocketSource));
@@ -293,7 +293,7 @@
object_ref(OBJECT(ioc));
ssource->condition = condition;
- ssource->socket = socket;
+ ssource->socket = _get_osfhandle(sockfd);
ssource->revents = 0;
ssource->fd.fd = (gintptr)ioc->event;
diff --git a/linux-user/alpha/target_mman.h b/linux-user/alpha/target_mman.h
index cd6e3d7..051544f 100644
--- a/linux-user/alpha/target_mman.h
+++ b/linux-user/alpha/target_mman.h
@@ -3,6 +3,10 @@
#define TARGET_MADV_DONTNEED 6
+#define TARGET_MS_ASYNC 1
+#define TARGET_MS_SYNC 2
+#define TARGET_MS_INVALIDATE 4
+
#include "../generic/target_mman.h"
#endif
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index 5928c14..1dbc1f0 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -18,6 +18,7 @@
#include "qemu/units.h"
#include "qemu/selfmap.h"
#include "qapi/error.h"
+#include "qemu/error-report.h"
#include "target_signal.h"
#include "accel/tcg/debuginfo.h"
@@ -1748,6 +1749,15 @@
regs->windowstart = 1;
regs->areg[1] = infop->start_stack;
regs->pc = infop->entry;
+ if (info_is_fdpic(infop)) {
+ regs->areg[4] = infop->loadmap_addr;
+ regs->areg[5] = infop->interpreter_loadmap_addr;
+ if (infop->interpreter_loadmap_addr) {
+ regs->areg[6] = infop->interpreter_pt_dynamic_addr;
+ } else {
+ regs->areg[6] = infop->pt_dynamic_addr;
+ }
+ }
}
/* See linux kernel: arch/xtensa/include/asm/elf.h. */
@@ -2207,11 +2217,16 @@
}
}
-#ifdef TARGET_ARM
+#if defined(TARGET_ARM)
static int elf_is_fdpic(struct elfhdr *exec)
{
return exec->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC;
}
+#elif defined(TARGET_XTENSA)
+static int elf_is_fdpic(struct elfhdr *exec)
+{
+ return exec->e_ident[EI_OSABI] == ELFOSABI_XTENSA_FDPIC;
+}
#else
/* Default implementation, always false. */
static int elf_is_fdpic(struct elfhdr *exec)
diff --git a/linux-user/exit.c b/linux-user/exit.c
index 607b6da..3017d28 100644
--- a/linux-user/exit.c
+++ b/linux-user/exit.c
@@ -18,9 +18,10 @@
*/
#include "qemu/osdep.h"
#include "accel/tcg/perf.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/syscalls.h"
#include "qemu.h"
#include "user-internals.h"
+#include "qemu/plugin.h"
#ifdef CONFIG_GPROF
#include <sys/gmon.h>
#endif
diff --git a/linux-user/fd-trans.c b/linux-user/fd-trans.c
index 7b25468..c04a97c 100644
--- a/linux-user/fd-trans.c
+++ b/linux-user/fd-trans.c
@@ -1284,6 +1284,49 @@
return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
}
+static abi_long target_to_host_for_each_nlattr(struct nlattr *nlattr,
+ size_t len,
+ abi_long (*target_to_host_nlattr)
+ (struct nlattr *))
+{
+ unsigned short aligned_nla_len;
+ abi_long ret;
+
+ while (len > sizeof(struct nlattr)) {
+ if (tswap16(nlattr->nla_len) < sizeof(struct rtattr) ||
+ tswap16(nlattr->nla_len) > len) {
+ break;
+ }
+ nlattr->nla_len = tswap16(nlattr->nla_len);
+ nlattr->nla_type = tswap16(nlattr->nla_type);
+ ret = target_to_host_nlattr(nlattr);
+ if (ret < 0) {
+ return ret;
+ }
+
+ aligned_nla_len = NLA_ALIGN(nlattr->nla_len);
+ if (aligned_nla_len >= len) {
+ break;
+ }
+ len -= aligned_nla_len;
+ nlattr = (struct nlattr *)(((char *)nlattr) + aligned_nla_len);
+ }
+ return 0;
+}
+
+static abi_long target_to_host_data_inet6_nlattr(struct nlattr *nlattr)
+{
+ switch (nlattr->nla_type) {
+ /* uint8_t */
+ case QEMU_IFLA_INET6_ADDR_GEN_MODE:
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "Unknown target AF_INET6 type: %d\n",
+ nlattr->nla_type);
+ }
+ return 0;
+}
+
static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
size_t len,
abi_long (*target_to_host_rtattr)
@@ -1314,16 +1357,35 @@
return 0;
}
+static abi_long target_to_host_data_spec_nlattr(struct nlattr *nlattr)
+{
+ switch (nlattr->nla_type & NLA_TYPE_MASK) {
+ case AF_INET6:
+ return target_to_host_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
+ target_to_host_data_inet6_nlattr);
+ default:
+ qemu_log_mask(LOG_UNIMP, "Unknown target AF_SPEC type: %d\n",
+ nlattr->nla_type);
+ break;
+ }
+ return 0;
+}
+
static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
{
uint32_t *u32;
- switch (rtattr->rta_type) {
+ switch (rtattr->rta_type & NLA_TYPE_MASK) {
/* uint32_t */
+ case QEMU_IFLA_MTU:
+ case QEMU_IFLA_TXQLEN:
case QEMU_IFLA_EXT_MASK:
u32 = RTA_DATA(rtattr);
*u32 = tswap32(*u32);
break;
+ case QEMU_IFLA_AF_SPEC:
+ return target_to_host_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
+ target_to_host_data_spec_nlattr);
default:
qemu_log_mask(LOG_UNIMP, "Unknown target QEMU_IFLA type: %d\n",
rtattr->rta_type);
@@ -1622,7 +1684,7 @@
.host_to_target_data = host_to_target_data_signalfd,
};
-static abi_long swap_data_eventfd(void *buf, size_t len)
+static abi_long swap_data_u64(void *buf, size_t len)
{
uint64_t *counter = buf;
int i;
@@ -1640,8 +1702,12 @@
}
TargetFdTrans target_eventfd_trans = {
- .host_to_target_data = swap_data_eventfd,
- .target_to_host_data = swap_data_eventfd,
+ .host_to_target_data = swap_data_u64,
+ .target_to_host_data = swap_data_u64,
+};
+
+TargetFdTrans target_timerfd_trans = {
+ .host_to_target_data = swap_data_u64,
};
#if defined(CONFIG_INOTIFY) && (defined(TARGET_NR_inotify_init) || \
diff --git a/linux-user/fd-trans.h b/linux-user/fd-trans.h
index 1b9fa20..910faaf 100644
--- a/linux-user/fd-trans.h
+++ b/linux-user/fd-trans.h
@@ -130,6 +130,7 @@
extern TargetFdTrans target_netlink_audit_trans;
extern TargetFdTrans target_signalfd_trans;
extern TargetFdTrans target_eventfd_trans;
+extern TargetFdTrans target_timerfd_trans;
#if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
(defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
defined(__NR_inotify_init1))
diff --git a/linux-user/generic/target_mman.h b/linux-user/generic/target_mman.h
index 1436a3c..32bf1a5 100644
--- a/linux-user/generic/target_mman.h
+++ b/linux-user/generic/target_mman.h
@@ -89,4 +89,17 @@
#define TARGET_MADV_DONTNEED_LOCKED 24
#endif
+
+#ifndef TARGET_MS_ASYNC
+#define TARGET_MS_ASYNC 1
+#endif
+
+#ifndef TARGET_MS_INVALIDATE
+#define TARGET_MS_INVALIDATE 2
+#endif
+
+#ifndef TARGET_MS_SYNC
+#define TARGET_MS_SYNC 4
+#endif
+
#endif
diff --git a/linux-user/generic/target_resource.h b/linux-user/generic/target_resource.h
index 539d8c4..37d3eb0 100644
--- a/linux-user/generic/target_resource.h
+++ b/linux-user/generic/target_resource.h
@@ -12,8 +12,8 @@
};
struct target_rlimit64 {
- uint64_t rlim_cur;
- uint64_t rlim_max;
+ abi_ullong rlim_cur;
+ abi_ullong rlim_max;
};
#define TARGET_RLIM_INFINITY ((abi_ulong)-1)
diff --git a/linux-user/hppa/target_mman.h b/linux-user/hppa/target_mman.h
index 66dd9f7..f9b6b97 100644
--- a/linux-user/hppa/target_mman.h
+++ b/linux-user/hppa/target_mman.h
@@ -10,6 +10,10 @@
#define TARGET_MADV_WIPEONFORK 71
#define TARGET_MADV_KEEPONFORK 72
+#define TARGET_MS_SYNC 1
+#define TARGET_MS_ASYNC 2
+#define TARGET_MS_INVALIDATE 4
+
#include "../generic/target_mman.h"
#endif
diff --git a/linux-user/i386/cpu_loop.c b/linux-user/i386/cpu_loop.c
index 865413c..2d0918a 100644
--- a/linux-user/i386/cpu_loop.c
+++ b/linux-user/i386/cpu_loop.c
@@ -314,8 +314,17 @@
}
}
+static void target_cpu_free(void *obj)
+{
+ CPUArchState *env = ((CPUState *)obj)->env_ptr;
+ target_munmap(env->gdt.base, sizeof(uint64_t) * TARGET_GDT_ENTRIES);
+ g_free(obj);
+}
+
void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
{
+ CPUState *cpu = env_cpu(env);
+ OBJECT(cpu)->free = target_cpu_free;
env->cr[0] = CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK;
env->hflags |= HF_PE_MASK | HF_CPL_MASK;
if (env->features[FEAT_1_EDX] & CPUID_SSE) {
diff --git a/linux-user/main.c b/linux-user/main.c
index 4ff30ff..4b18461 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -40,6 +40,7 @@
#include "qemu/plugin.h"
#include "exec/exec-all.h"
#include "exec/gdbstub.h"
+#include "gdbstub/user.h"
#include "tcg/tcg.h"
#include "qemu/timer.h"
#include "qemu/envlist.h"
@@ -65,6 +66,7 @@
#endif
char *exec_path;
+char real_exec_path[PATH_MAX];
int singlestep;
static const char *argv0;
@@ -237,6 +239,14 @@
new_cpu->tcg_cflags = cpu->tcg_cflags;
memcpy(new_env, env, sizeof(CPUArchState));
+#if defined(TARGET_I386) || defined(TARGET_X86_64)
+ new_env->gdt.base = target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES,
+ PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ memcpy(g2h_untagged(new_env->gdt.base), g2h_untagged(env->gdt.base),
+ sizeof(uint64_t) * TARGET_GDT_ENTRIES);
+ OBJECT(new_cpu)->free = OBJECT(cpu)->free;
+#endif
/* Clone all break/watchpoints.
Note: Once we support ptrace with hw-debug register access, make sure
@@ -739,6 +749,11 @@
}
}
+ /* Resolve executable file name to full path name */
+ if (realpath(exec_path, real_exec_path)) {
+ exec_path = real_exec_path;
+ }
+
/*
* get binfmt_misc flags
*/
diff --git a/linux-user/signal.c b/linux-user/signal.c
index 098f3a7..748a98f 100644
--- a/linux-user/signal.c
+++ b/linux-user/signal.c
@@ -18,7 +18,7 @@
*/
#include "qemu/osdep.h"
#include "qemu/bitops.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/user.h"
#include "hw/core/tcg-cpu-ops.h"
#include <sys/ucontext.h>
diff --git a/linux-user/sparc/cpu_loop.c b/linux-user/sparc/cpu_loop.c
index c120c42..b36bb25 100644
--- a/linux-user/sparc/cpu_loop.c
+++ b/linux-user/sparc/cpu_loop.c
@@ -149,6 +149,69 @@
#endif
}
+static void next_instruction(CPUSPARCState *env)
+{
+ env->pc = env->npc;
+ env->npc = env->npc + 4;
+}
+
+static uint32_t do_getcc(CPUSPARCState *env)
+{
+#ifdef TARGET_SPARC64
+ return cpu_get_ccr(env) & 0xf;
+#else
+ return extract32(cpu_get_psr(env), 20, 4);
+#endif
+}
+
+static void do_setcc(CPUSPARCState *env, uint32_t icc)
+{
+#ifdef TARGET_SPARC64
+ cpu_put_ccr(env, (cpu_get_ccr(env) & 0xf0) | (icc & 0xf));
+#else
+ cpu_put_psr(env, deposit32(cpu_get_psr(env), 20, 4, icc));
+#endif
+}
+
+static uint32_t do_getpsr(CPUSPARCState *env)
+{
+#ifdef TARGET_SPARC64
+ const uint64_t TSTATE_CWP = 0x1f;
+ const uint64_t TSTATE_ICC = 0xfull << 32;
+ const uint64_t TSTATE_XCC = 0xfull << 36;
+ const uint32_t PSR_S = 0x00000080u;
+ const uint32_t PSR_V8PLUS = 0xff000000u;
+ uint64_t tstate = sparc64_tstate(env);
+
+ /* See <asm/psrcompat.h>, tstate_to_psr. */
+ return ((tstate & TSTATE_CWP) |
+ PSR_S |
+ ((tstate & TSTATE_ICC) >> 12) |
+ ((tstate & TSTATE_XCC) >> 20) |
+ PSR_V8PLUS);
+#else
+ return (cpu_get_psr(env) & (PSR_ICC | PSR_CWP)) | PSR_S;
+#endif
+}
+
+/* Avoid ifdefs below for the abi32 and abi64 paths. */
+#ifdef TARGET_ABI32
+#define TARGET_TT_SYSCALL (TT_TRAP + 0x10) /* t_linux */
+#define syscall_cc psr
+#else
+#define TARGET_TT_SYSCALL (TT_TRAP + 0x6d) /* tl0_linux64 */
+#define syscall_cc xcc
+#endif
+
+/* Avoid ifdefs below for the v9 and pre-v9 hw traps. */
+#ifdef TARGET_SPARC64
+#define TARGET_TT_SPILL TT_SPILL
+#define TARGET_TT_FILL TT_FILL
+#else
+#define TARGET_TT_SPILL TT_WIN_OVF
+#define TARGET_TT_FILL TT_WIN_UNF
+#endif
+
void cpu_loop (CPUSPARCState *env)
{
CPUState *cs = env_cpu(env);
@@ -167,13 +230,7 @@
}
switch (trapnr) {
-#ifndef TARGET_SPARC64
- case 0x88:
- case 0x90:
-#else
- case 0x110:
- case 0x16d:
-#endif
+ case TARGET_TT_SYSCALL:
ret = do_syscall (env, env->gregs[1],
env->regwptr[0], env->regwptr[1],
env->regwptr[2], env->regwptr[3],
@@ -183,67 +240,110 @@
break;
}
if ((abi_ulong)ret >= (abi_ulong)(-515)) {
-#if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
- env->xcc |= PSR_CARRY;
-#else
- env->psr |= PSR_CARRY;
-#endif
+ env->syscall_cc |= PSR_CARRY;
ret = -ret;
} else {
-#if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
- env->xcc &= ~PSR_CARRY;
-#else
- env->psr &= ~PSR_CARRY;
-#endif
+ env->syscall_cc &= ~PSR_CARRY;
}
env->regwptr[0] = ret;
/* next instruction */
env->pc = env->npc;
env->npc = env->npc + 4;
break;
- case 0x83: /* flush windows */
-#ifdef TARGET_ABI32
- case 0x103:
-#endif
+
+ case TT_TRAP + 0x01: /* breakpoint */
+ case EXCP_DEBUG:
+ force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc);
+ break;
+
+ case TT_TRAP + 0x02: /* div0 */
+ case TT_DIV_ZERO:
+ force_sig_fault(TARGET_SIGFPE, TARGET_FPE_INTDIV, env->pc);
+ break;
+
+ case TT_TRAP + 0x03: /* flush windows */
flush_windows(env);
- /* next instruction */
- env->pc = env->npc;
- env->npc = env->npc + 4;
+ next_instruction(env);
break;
-#ifndef TARGET_SPARC64
- case TT_WIN_OVF: /* window overflow */
- save_window(env);
+
+ case TT_TRAP + 0x20: /* getcc */
+ env->gregs[1] = do_getcc(env);
+ next_instruction(env);
break;
- case TT_WIN_UNF: /* window underflow */
- restore_window(env);
+ case TT_TRAP + 0x21: /* setcc */
+ do_setcc(env, env->gregs[1]);
+ next_instruction(env);
break;
-#else
- case TT_SPILL: /* window overflow */
- save_window(env);
+ case TT_TRAP + 0x22: /* getpsr */
+ env->gregs[1] = do_getpsr(env);
+ next_instruction(env);
break;
- case TT_FILL: /* window underflow */
- restore_window(env);
- break;
-#ifndef TARGET_ABI32
- case 0x16e:
+
+#ifdef TARGET_SPARC64
+ case TT_TRAP + 0x6e:
flush_windows(env);
sparc64_get_context(env);
break;
- case 0x16f:
+ case TT_TRAP + 0x6f:
flush_windows(env);
sparc64_set_context(env);
break;
#endif
-#endif
+
+ case TARGET_TT_SPILL: /* window overflow */
+ save_window(env);
+ break;
+ case TARGET_TT_FILL: /* window underflow */
+ restore_window(env);
+ break;
+
+ case TT_FP_EXCP:
+ {
+ int code = TARGET_FPE_FLTUNK;
+ target_ulong fsr = env->fsr;
+
+ if ((fsr & FSR_FTT_MASK) == FSR_FTT_IEEE_EXCP) {
+ if (fsr & FSR_NVC) {
+ code = TARGET_FPE_FLTINV;
+ } else if (fsr & FSR_OFC) {
+ code = TARGET_FPE_FLTOVF;
+ } else if (fsr & FSR_UFC) {
+ code = TARGET_FPE_FLTUND;
+ } else if (fsr & FSR_DZC) {
+ code = TARGET_FPE_FLTDIV;
+ } else if (fsr & FSR_NXC) {
+ code = TARGET_FPE_FLTRES;
+ }
+ }
+ force_sig_fault(TARGET_SIGFPE, code, env->pc);
+ }
+ break;
+
case EXCP_INTERRUPT:
/* just indicate that signals should be handled asap */
break;
case TT_ILL_INSN:
force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPC, env->pc);
break;
- case EXCP_DEBUG:
- force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc);
+ case TT_PRIV_INSN:
+ force_sig_fault(TARGET_SIGILL, TARGET_ILL_PRVOPC, env->pc);
break;
+ case TT_TOVF:
+ force_sig_fault(TARGET_SIGEMT, TARGET_EMT_TAGOVF, env->pc);
+ break;
+#ifdef TARGET_SPARC64
+ case TT_PRIV_ACT:
+ /* Note do_privact defers to do_privop. */
+ force_sig_fault(TARGET_SIGILL, TARGET_ILL_PRVOPC, env->pc);
+ break;
+#else
+ case TT_NCP_INSN:
+ force_sig_fault(TARGET_SIGILL, TARGET_ILL_COPROC, env->pc);
+ break;
+ case TT_UNIMP_FLUSH:
+ next_instruction(env);
+ break;
+#endif
case EXCP_ATOMIC:
cpu_exec_step_atomic(cs);
break;
diff --git a/linux-user/sparc/signal.c b/linux-user/sparc/signal.c
index b501750..2be9000 100644
--- a/linux-user/sparc/signal.c
+++ b/linux-user/sparc/signal.c
@@ -503,7 +503,23 @@
return -QEMU_ESIGRETURN;
}
-#if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
+#ifdef TARGET_ABI32
+void setup_sigtramp(abi_ulong sigtramp_page)
+{
+ uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 2 * 8, 0);
+ assert(tramp != NULL);
+
+ default_sigreturn = sigtramp_page;
+ install_sigtramp(tramp, TARGET_NR_sigreturn);
+
+ default_rt_sigreturn = sigtramp_page + 8;
+ install_sigtramp(tramp + 2, TARGET_NR_rt_sigreturn);
+
+ unlock_user(tramp, sigtramp_page, 2 * 8);
+}
+#endif
+
+#ifdef TARGET_SPARC64
#define SPARC_MC_TSTATE 0
#define SPARC_MC_PC 1
#define SPARC_MC_NPC 2
@@ -575,7 +591,7 @@
struct target_ucontext *ucp;
target_mc_gregset_t *grp;
target_mc_fpu_t *fpup;
- abi_ulong pc, npc, tstate;
+ target_ulong pc, npc, tstate;
unsigned int i;
unsigned char fenab;
@@ -773,18 +789,4 @@
unlock_user_struct(ucp, ucp_addr, 1);
force_sig(TARGET_SIGSEGV);
}
-#else
-void setup_sigtramp(abi_ulong sigtramp_page)
-{
- uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 2 * 8, 0);
- assert(tramp != NULL);
-
- default_sigreturn = sigtramp_page;
- install_sigtramp(tramp, TARGET_NR_sigreturn);
-
- default_rt_sigreturn = sigtramp_page + 8;
- install_sigtramp(tramp + 2, TARGET_NR_rt_sigreturn);
-
- unlock_user(tramp, sigtramp_page, 2 * 8);
-}
-#endif
+#endif /* TARGET_SPARC64 */
diff --git a/linux-user/sparc/target_signal.h b/linux-user/sparc/target_signal.h
index 87757f0..f223eb4 100644
--- a/linux-user/sparc/target_signal.h
+++ b/linux-user/sparc/target_signal.h
@@ -8,7 +8,7 @@
#define TARGET_SIGTRAP 5
#define TARGET_SIGABRT 6
#define TARGET_SIGIOT 6
-#define TARGET_SIGSTKFLT 7 /* actually EMT */
+#define TARGET_SIGEMT 7
#define TARGET_SIGFPE 8
#define TARGET_SIGKILL 9
#define TARGET_SIGBUS 10
diff --git a/linux-user/strace.c b/linux-user/strace.c
index 3400106..aad2b62 100644
--- a/linux-user/strace.c
+++ b/linux-user/strace.c
@@ -81,6 +81,7 @@
UNUSED static void print_string(abi_long, int);
UNUSED static void print_buf(abi_long addr, abi_long len, int last);
UNUSED static void print_raw_param(const char *, abi_long, int);
+UNUSED static void print_raw_param64(const char *, long long, int last);
UNUSED static void print_timeval(abi_ulong, int);
UNUSED static void print_timespec(abi_ulong, int);
UNUSED static void print_timespec64(abi_ulong, int);
@@ -1110,11 +1111,16 @@
FLAG_END,
};
+#ifndef CLONE_PIDFD
+# define CLONE_PIDFD 0x00001000
+#endif
+
UNUSED static const struct flags clone_flags[] = {
FLAG_GENERIC(CLONE_VM),
FLAG_GENERIC(CLONE_FS),
FLAG_GENERIC(CLONE_FILES),
FLAG_GENERIC(CLONE_SIGHAND),
+ FLAG_GENERIC(CLONE_PIDFD),
FLAG_GENERIC(CLONE_PTRACE),
FLAG_GENERIC(CLONE_VFORK),
FLAG_GENERIC(CLONE_PARENT),
@@ -1642,6 +1648,19 @@
qemu_log(format, param);
}
+/*
+ * Same as print_raw_param() but prints out raw 64-bit parameter.
+ */
+static void
+print_raw_param64(const char *fmt, long long param, int last)
+{
+ char format[64];
+
+ (void)snprintf(format, sizeof(format), "%s%s", fmt, get_comma(last));
+ qemu_log(format, param);
+}
+
+
static void
print_pointer(abi_long p, int last)
{
@@ -1718,10 +1737,8 @@
print_pointer(ts_addr, last);
return;
}
- qemu_log("{tv_sec = %lld"
- ",tv_nsec = %lld}%s",
- (long long)tswap64(ts->tv_sec), (long long)tswap64(ts->tv_nsec),
- get_comma(last));
+ print_raw_param64("{tv_sec=%" PRId64, tswap64(ts->tv_sec), 0);
+ print_raw_param64("tv_nsec=%" PRId64 "}", tswap64(ts->tv_nsec), last);
unlock_user(ts, ts_addr, 0);
} else {
qemu_log("NULL%s", get_comma(last));
@@ -3854,6 +3871,94 @@
}
#endif
+#ifdef TARGET_NR_prlimit64
+static const char *target_ressource_string(abi_ulong r)
+{
+ #define RET_RES_ENTRY(res) case TARGET_##res: return #res;
+ switch (r) {
+ RET_RES_ENTRY(RLIMIT_AS);
+ RET_RES_ENTRY(RLIMIT_CORE);
+ RET_RES_ENTRY(RLIMIT_CPU);
+ RET_RES_ENTRY(RLIMIT_DATA);
+ RET_RES_ENTRY(RLIMIT_FSIZE);
+ RET_RES_ENTRY(RLIMIT_LOCKS);
+ RET_RES_ENTRY(RLIMIT_MEMLOCK);
+ RET_RES_ENTRY(RLIMIT_MSGQUEUE);
+ RET_RES_ENTRY(RLIMIT_NICE);
+ RET_RES_ENTRY(RLIMIT_NOFILE);
+ RET_RES_ENTRY(RLIMIT_NPROC);
+ RET_RES_ENTRY(RLIMIT_RSS);
+ RET_RES_ENTRY(RLIMIT_RTPRIO);
+#ifdef RLIMIT_RTTIME
+ RET_RES_ENTRY(RLIMIT_RTTIME);
+#endif
+ RET_RES_ENTRY(RLIMIT_SIGPENDING);
+ RET_RES_ENTRY(RLIMIT_STACK);
+ default:
+ return NULL;
+ }
+ #undef RET_RES_ENTRY
+}
+
+static void
+print_rlimit64(abi_ulong rlim_addr, int last)
+{
+ if (rlim_addr) {
+ struct target_rlimit64 *rl;
+
+ rl = lock_user(VERIFY_READ, rlim_addr, sizeof(*rl), 1);
+ if (!rl) {
+ print_pointer(rlim_addr, last);
+ return;
+ }
+ print_raw_param64("{rlim_cur=%" PRId64, tswap64(rl->rlim_cur), 0);
+ print_raw_param64("rlim_max=%" PRId64 "}", tswap64(rl->rlim_max),
+ last);
+ unlock_user(rl, rlim_addr, 0);
+ } else {
+ qemu_log("NULL%s", get_comma(last));
+ }
+}
+
+static void
+print_prlimit64(CPUArchState *cpu_env, const struct syscallname *name,
+ abi_long arg0, abi_long arg1, abi_long arg2,
+ abi_long arg3, abi_long arg4, abi_long arg5)
+{
+ const char *rlim_name;
+
+ print_syscall_prologue(name);
+ print_raw_param("%d", arg0, 0);
+ rlim_name = target_ressource_string(arg1);
+ if (rlim_name) {
+ qemu_log("%s,", rlim_name);
+ } else {
+ print_raw_param("%d", arg1, 0);
+ }
+ print_rlimit64(arg2, 0);
+ print_pointer(arg3, 1);
+ print_syscall_epilogue(name);
+}
+
+static void
+print_syscall_ret_prlimit64(CPUArchState *cpu_env,
+ const struct syscallname *name,
+ abi_long ret, abi_long arg0, abi_long arg1,
+ abi_long arg2, abi_long arg3, abi_long arg4,
+ abi_long arg5)
+{
+ if (!print_syscall_err(ret)) {
+ qemu_log(TARGET_ABI_FMT_ld, ret);
+ if (arg3) {
+ qemu_log(" (");
+ print_rlimit64(arg3, 1);
+ qemu_log(")");
+ }
+ }
+ qemu_log("\n");
+}
+#endif
+
#ifdef TARGET_NR_kill
static void
print_kill(CPUArchState *cpu_env, const struct syscallname *name,
diff --git a/linux-user/strace.list b/linux-user/strace.list
index d8acbee..c7808ea 100644
--- a/linux-user/strace.list
+++ b/linux-user/strace.list
@@ -656,7 +656,7 @@
{ TARGET_NR_msgsnd, "msgsnd" , NULL, NULL, NULL },
#endif
#ifdef TARGET_NR_msync
-{ TARGET_NR_msync, "msync" , NULL, NULL, NULL },
+{ TARGET_NR_msync, "msync" , "%s(%p,%u,%d)", NULL, NULL },
#endif
#ifdef TARGET_NR_multiplexer
{ TARGET_NR_multiplexer, "multiplexer" , NULL, NULL, NULL },
@@ -1074,7 +1074,8 @@
{ TARGET_NR_preadv, "preadv" , NULL, NULL, NULL },
#endif
#ifdef TARGET_NR_prlimit64
-{ TARGET_NR_prlimit64, "prlimit64" , NULL, NULL, NULL },
+{ TARGET_NR_prlimit64, "prlimit64" , NULL, print_prlimit64,
+ print_syscall_ret_prlimit64 },
#endif
#ifdef TARGET_NR_process_vm_readv
{ TARGET_NR_process_vm_readv, "process_vm_readv" , NULL, NULL, NULL },
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index a6c426d..2787164 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -22,6 +22,8 @@
#include "qemu/path.h"
#include "qemu/memfd.h"
#include "qemu/queue.h"
+#include "qemu/plugin.h"
+#include "target_mman.h"
#include <elf.h>
#include <endian.h>
#include <grp.h>
@@ -168,9 +170,13 @@
#define CLONE_IGNORED_FLAGS \
(CLONE_DETACHED | CLONE_IO)
+#ifndef CLONE_PIDFD
+# define CLONE_PIDFD 0x00001000
+#endif
+
/* Flags for fork which we can implement within QEMU itself */
#define CLONE_OPTIONAL_FORK_FLAGS \
- (CLONE_SETTLS | CLONE_PARENT_SETTID | \
+ (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
/* Flags for thread creation which we can implement within QEMU itself */
@@ -795,49 +801,52 @@
}
static abi_ulong target_brk;
-static abi_ulong target_original_brk;
static abi_ulong brk_page;
void target_set_brk(abi_ulong new_brk)
{
- target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
+ target_brk = new_brk;
brk_page = HOST_PAGE_ALIGN(target_brk);
}
-//#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
-#define DEBUGF_BRK(message, args...)
-
/* do_brk() must return target values and target errnos. */
-abi_long do_brk(abi_ulong new_brk)
+abi_long do_brk(abi_ulong brk_val)
{
abi_long mapped_addr;
abi_ulong new_alloc_size;
+ abi_ulong new_brk, new_host_brk_page;
/* brk pointers are always untagged */
- DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
-
- if (!new_brk) {
- DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
- return target_brk;
- }
- if (new_brk < target_original_brk) {
- DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
- target_brk);
+ /* return old brk value if brk_val unchanged or zero */
+ if (!brk_val || brk_val == target_brk) {
return target_brk;
}
- /* If the new brk is less than the highest page reserved to the
- * target heap allocation, set it and we're almost done... */
- if (new_brk <= brk_page) {
- /* Heap contents are initialized to zero, as for anonymous
- * mapped pages. */
- if (new_brk > target_brk) {
- memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
+ new_brk = TARGET_PAGE_ALIGN(brk_val);
+ new_host_brk_page = HOST_PAGE_ALIGN(brk_val);
+
+ /* brk_val and old target_brk might be on the same page */
+ if (new_brk == TARGET_PAGE_ALIGN(target_brk)) {
+ if (brk_val > target_brk) {
+ /* empty remaining bytes in (possibly larger) host page */
+ memset(g2h_untagged(target_brk), 0, new_host_brk_page - target_brk);
}
- target_brk = new_brk;
- DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
- return target_brk;
+ target_brk = brk_val;
+ return target_brk;
+ }
+
+ /* Release heap if necesary */
+ if (new_brk < target_brk) {
+ /* empty remaining bytes in (possibly larger) host page */
+ memset(g2h_untagged(brk_val), 0, new_host_brk_page - brk_val);
+
+ /* free unused host pages and set new brk_page */
+ target_munmap(new_host_brk_page, brk_page - new_host_brk_page);
+ brk_page = new_host_brk_page;
+
+ target_brk = brk_val;
+ return target_brk;
}
/* We need to allocate more memory after the brk... Note that
@@ -846,10 +855,14 @@
* itself); instead we treat "mapped but at wrong address" as
* a failure and unmap again.
*/
- new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
- mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
+ new_alloc_size = new_host_brk_page - brk_page;
+ if (new_alloc_size) {
+ mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
PROT_READ|PROT_WRITE,
MAP_ANON|MAP_PRIVATE, 0, 0));
+ } else {
+ mapped_addr = brk_page;
+ }
if (mapped_addr == brk_page) {
/* Heap contents are initialized to zero, as for anonymous
@@ -861,10 +874,8 @@
* then shrunken). */
memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
- target_brk = new_brk;
- brk_page = HOST_PAGE_ALIGN(target_brk);
- DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
- target_brk);
+ target_brk = brk_val;
+ brk_page = new_host_brk_page;
return target_brk;
} else if (mapped_addr != -1) {
/* Mapped but at wrong address, meaning there wasn't actually
@@ -872,10 +883,6 @@
*/
target_munmap(mapped_addr, new_alloc_size);
mapped_addr = -1;
- DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
- }
- else {
- DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
}
#if defined(TARGET_ALPHA)
@@ -1713,6 +1720,11 @@
lladdr = (struct target_sockaddr_ll *)addr;
lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
+ } else if (sa_family == AF_INET6) {
+ struct sockaddr_in6 *in6addr;
+
+ in6addr = (struct sockaddr_in6 *)addr;
+ in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
}
unlock_user(target_saddr, target_addr, 0);
@@ -6723,6 +6735,17 @@
return -TARGET_EINVAL;
}
+#if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
+ if (flags & CLONE_PIDFD) {
+ return -TARGET_EINVAL;
+ }
+#endif
+
+ /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
+ if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
+ return -TARGET_EINVAL;
+ }
+
if (block_signals()) {
return -QEMU_ERESTARTSYS;
}
@@ -6750,6 +6773,20 @@
ts->child_tidptr = child_tidptr;
} else {
cpu_clone_regs_parent(env, flags);
+ if (flags & CLONE_PIDFD) {
+ int pid_fd = 0;
+#if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
+ int pid_child = ret;
+ pid_fd = pidfd_open(pid_child, 0);
+ if (pid_fd >= 0) {
+ fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
+ | FD_CLOEXEC);
+ } else {
+ pid_fd = 0;
+ }
+#endif
+ put_user_u32(pid_fd, parent_tidptr);
+ }
fork_end(0);
}
g_assert(!cpu_in_exclusive_context(cpu));
@@ -7606,6 +7643,14 @@
}
#endif
+static inline int target_to_host_msync_arg(abi_long arg)
+{
+ return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
+ ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
+ ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
+ (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
+}
+
#if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
defined(TARGET_NR_newfstatat))
@@ -8079,6 +8124,9 @@
gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
bin = bin ? bin + 1 : ts->bprm->argv[0];
g_string_printf(buf, "(%.15s) ", bin);
+ } else if (i == 2) {
+ /* task state */
+ g_string_assign(buf, "R "); /* we are running right now */
} else if (i == 3) {
/* ppid */
g_string_printf(buf, FMT_pid " ", getppid());
@@ -9989,18 +10037,13 @@
/* Short circuit this for the magic exe check. */
ret = -TARGET_EINVAL;
} else if (is_proc_myself((const char *)p, "exe")) {
- char real[PATH_MAX], *temp;
- temp = realpath(exec_path, real);
- /* Return value is # of bytes that we wrote to the buffer. */
- if (temp == NULL) {
- ret = get_errno(-1);
- } else {
- /* Don't worry about sign mismatch as earlier mapping
- * logic would have thrown a bad address error. */
- ret = MIN(strlen(real), arg3);
- /* We cannot NUL terminate the string. */
- memcpy(p2, real, ret);
- }
+ /*
+ * Don't worry about sign mismatch as earlier mapping
+ * logic would have thrown a bad address error.
+ */
+ ret = MIN(strlen(exec_path), arg3);
+ /* We cannot NUL terminate the string. */
+ memcpy(p2, exec_path, ret);
} else {
ret = get_errno(readlink(path(p), p2, arg3));
}
@@ -10021,18 +10064,13 @@
/* Short circuit this for the magic exe check. */
ret = -TARGET_EINVAL;
} else if (is_proc_myself((const char *)p, "exe")) {
- char real[PATH_MAX], *temp;
- temp = realpath(exec_path, real);
- /* Return value is # of bytes that we wrote to the buffer. */
- if (temp == NULL) {
- ret = get_errno(-1);
- } else {
- /* Don't worry about sign mismatch as earlier mapping
- * logic would have thrown a bad address error. */
- ret = MIN(strlen(real), arg4);
- /* We cannot NUL terminate the string. */
- memcpy(p2, real, ret);
- }
+ /*
+ * Don't worry about sign mismatch as earlier mapping
+ * logic would have thrown a bad address error.
+ */
+ ret = MIN(strlen(exec_path), arg4);
+ /* We cannot NUL terminate the string. */
+ memcpy(p2, exec_path, ret);
} else {
ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
}
@@ -10129,7 +10167,8 @@
/* ??? msync/mlock/munlock are broken for softmmu. */
#ifdef TARGET_NR_msync
case TARGET_NR_msync:
- return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
+ return get_errno(msync(g2h(cpu, arg1), arg2,
+ target_to_host_msync_arg(arg3)));
#endif
#ifdef TARGET_NR_mlock
case TARGET_NR_mlock:
@@ -12886,8 +12925,8 @@
if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
return -TARGET_EFAULT;
}
- rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
- rnew.rlim_max = tswap64(target_rnew->rlim_max);
+ __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
+ __get_user(rnew.rlim_max, &target_rnew->rlim_max);
unlock_user_struct(target_rnew, arg3, 0);
rnewp = &rnew;
}
@@ -12897,8 +12936,8 @@
if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
return -TARGET_EFAULT;
}
- target_rold->rlim_cur = tswap64(rold.rlim_cur);
- target_rold->rlim_max = tswap64(rold.rlim_max);
+ __put_user(rold.rlim_cur, &target_rold->rlim_cur);
+ __put_user(rold.rlim_max, &target_rold->rlim_max);
unlock_user_struct(target_rold, arg4, 1);
}
return ret;
@@ -13118,8 +13157,12 @@
#if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
case TARGET_NR_timerfd_create:
- return get_errno(timerfd_create(arg1,
- target_to_host_bitmask(arg2, fcntl_flags_tbl)));
+ ret = get_errno(timerfd_create(arg1,
+ target_to_host_bitmask(arg2, fcntl_flags_tbl)));
+ if (ret >= 0) {
+ fd_trans_register(ret, &target_timerfd_trans);
+ }
+ return ret;
#endif
#if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h
index 77864de..614a1cb 100644
--- a/linux-user/syscall_defs.h
+++ b/linux-user/syscall_defs.h
@@ -717,6 +717,11 @@
#define TARGET_TRAP_HWBKPT (4) /* hardware breakpoint/watchpoint */
#define TARGET_TRAP_UNK (5) /* undiagnosed trap */
+/*
+ * SIGEMT si_codes
+ */
+#define TARGET_EMT_TAGOVF 1 /* tag overflow */
+
#include "target_resource.h"
struct target_pollfd {
diff --git a/linux-user/user-internals.h b/linux-user/user-internals.h
index 3576da4..9333db4 100644
--- a/linux-user/user-internals.h
+++ b/linux-user/user-internals.h
@@ -20,6 +20,7 @@
#include "exec/user/thunk.h"
#include "exec/exec-all.h"
+#include "exec/tb-flush.h"
#include "qemu/log.h"
extern char *exec_path;
diff --git a/meson.build b/meson.build
index 6bcab8b..29f8644 100644
--- a/meson.build
+++ b/meson.build
@@ -1746,8 +1746,8 @@
error_message: '-display dbus requires glib>=2.64') \
.require(gdbus_codegen.found(),
error_message: gdbus_codegen_error.format('-display dbus')) \
- .require(opengl.found() and gbm.found(),
- error_message: '-display dbus requires epoxy/egl and gbm') \
+ .require(targetos != 'windows',
+ error_message: '-display dbus is not available on Windows') \
.allowed()
have_virtfs = get_option('virtfs') \
diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
index 575d48c..180ba38 100644
--- a/migration/dirtyrate.c
+++ b/migration/dirtyrate.c
@@ -11,6 +11,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/error-report.h"
#include <zlib.h>
#include "qapi/error.h"
#include "cpu.h"
diff --git a/migration/exec.c b/migration/exec.c
index 38604d7..2bf882b 100644
--- a/migration/exec.c
+++ b/migration/exec.c
@@ -18,6 +18,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/error-report.h"
#include "channel.h"
#include "exec.h"
#include "migration.h"
diff --git a/migration/multifd.c b/migration/multifd.c
index 5e85c3e..cbc0dfe 100644
--- a/migration/multifd.c
+++ b/migration/multifd.c
@@ -677,7 +677,7 @@
if (p->pending_job) {
uint64_t packet_num = p->packet_num;
- uint32_t flags = p->flags;
+ uint32_t flags;
p->normal_num = 0;
if (use_zero_copy_send) {
@@ -699,6 +699,7 @@
}
}
multifd_send_fill_packet(p);
+ flags = p->flags;
p->flags = 0;
p->num_packets++;
p->total_normal_pages += p->normal_num;
diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c
index f54f44d..41c0713 100644
--- a/migration/postcopy-ram.c
+++ b/migration/postcopy-ram.c
@@ -1198,11 +1198,6 @@
if (migrate_postcopy_preempt()) {
/*
- * The preempt channel is established in asynchronous way. Wait
- * for its completion.
- */
- qemu_sem_wait(&mis->postcopy_qemufile_dst_done);
- /*
* This thread needs to be created after the temp pages because
* it'll fetch RAM_CHANNEL_POSTCOPY PostcopyTmpPage immediately.
*/
@@ -1668,6 +1663,12 @@
qemu_sem_post(&mis->thread_sync_sem);
+ /*
+ * The preempt channel is established in asynchronous way. Wait
+ * for its completion.
+ */
+ qemu_sem_wait(&mis->postcopy_qemufile_dst_done);
+
/* Sending RAM_SAVE_FLAG_EOS to terminate this thread */
qemu_mutex_lock(&mis->postcopy_prio_thread_mutex);
while (1) {
diff --git a/migration/rdma.c b/migration/rdma.c
index 288eadc..df646be 100644
--- a/migration/rdma.c
+++ b/migration/rdma.c
@@ -3373,7 +3373,8 @@
* initialize the RDMAContext for return path for postcopy after first
* connection request reached.
*/
- if (migrate_postcopy() && !rdma->is_return_path) {
+ if ((migrate_postcopy() || migrate_use_return_path())
+ && !rdma->is_return_path) {
rdma_return_path = qemu_rdma_data_init(rdma->host_port, NULL);
if (rdma_return_path == NULL) {
rdma_ack_cm_event(cm_event);
@@ -3455,7 +3456,8 @@
}
/* Accept the second connection request for return path */
- if (migrate_postcopy() && !rdma->is_return_path) {
+ if ((migrate_postcopy() || migrate_use_return_path())
+ && !rdma->is_return_path) {
qemu_set_fd_handler(rdma->channel->fd, rdma_accept_incoming_migration,
NULL,
(void *)(intptr_t)rdma->return_path);
@@ -4109,7 +4111,7 @@
void rdma_start_incoming_migration(const char *host_port, Error **errp)
{
int ret;
- RDMAContext *rdma, *rdma_return_path = NULL;
+ RDMAContext *rdma;
Error *local_err = NULL;
trace_rdma_start_incoming_migration();
@@ -4155,7 +4157,6 @@
g_free(rdma->host_port);
}
g_free(rdma);
- g_free(rdma_return_path);
}
void rdma_start_outgoing_migration(void *opaque,
@@ -4192,7 +4193,7 @@
}
/* RDMA postcopy need a separate queue pair for return path */
- if (migrate_postcopy()) {
+ if (migrate_postcopy() || migrate_use_return_path()) {
rdma_return_path = qemu_rdma_data_init(host_port, errp);
if (rdma_return_path == NULL) {
diff --git a/migration/target.c b/migration/target.c
index 907ebf0..00ca007 100644
--- a/migration/target.c
+++ b/migration/target.c
@@ -8,6 +8,7 @@
#include "qemu/osdep.h"
#include "qapi/qapi-types-migration.h"
#include "migration.h"
+#include CONFIG_DEVICES
#ifdef CONFIG_VFIO
#include "hw/vfio/vfio-common.h"
@@ -17,7 +18,6 @@
{
#ifdef CONFIG_VFIO
if (vfio_mig_active()) {
- info->has_vfio = true;
info->vfio = g_malloc0(sizeof(*info->vfio));
info->vfio->transferred = vfio_mig_bytes_transferred();
}
diff --git a/migration/xbzrle.c b/migration/xbzrle.c
index 05366e8..c6f8b20 100644
--- a/migration/xbzrle.c
+++ b/migration/xbzrle.c
@@ -12,6 +12,7 @@
*/
#include "qemu/osdep.h"
#include "qemu/cutils.h"
+#include "qemu/host-utils.h"
#include "xbzrle.h"
/*
@@ -196,10 +197,6 @@
__m512i r = _mm512_set1_epi32(0);
while (count512s) {
- if (d + 2 > dlen) {
- return -1;
- }
-
int bytes_to_check = 64;
uint64_t mask = 0xffffffffffffffff;
if (count512s == 1) {
@@ -215,6 +212,9 @@
bool is_same = (comp & 0x1);
while (bytes_to_check) {
+ if (d + 2 > dlen) {
+ return -1;
+ }
if (is_same) {
if (nzrun_len) {
d += uleb128_encode_small(dst + d, nzrun_len);
@@ -233,7 +233,7 @@
break;
}
never_same = false;
- num = __builtin_ctzll(~comp);
+ num = ctz64(~comp);
num = (num < bytes_to_check) ? num : bytes_to_check;
zrun_len += num;
bytes_to_check -= num;
@@ -262,7 +262,7 @@
nzrun_len += 64;
break;
}
- num = __builtin_ctzll(comp);
+ num = ctz64(comp);
num = (num < bytes_to_check) ? num : bytes_to_check;
nzrun_len += num;
bytes_to_check -= num;
diff --git a/monitor/fds.c b/monitor/fds.c
index 26b39a0..d86c2c6 100644
--- a/monitor/fds.c
+++ b/monitor/fds.c
@@ -61,11 +61,48 @@
static QemuMutex mon_fdsets_lock;
static QLIST_HEAD(, MonFdset) mon_fdsets;
+static bool monitor_add_fd(Monitor *mon, int fd, const char *fdname, Error **errp)
+{
+ mon_fd_t *monfd;
+
+ if (qemu_isdigit(fdname[0])) {
+ close(fd);
+ error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "fdname",
+ "a name not starting with a digit");
+ return false;
+ }
+
+ /* See close() call below. */
+ qemu_mutex_lock(&mon->mon_lock);
+ QLIST_FOREACH(monfd, &mon->fds, next) {
+ int tmp_fd;
+
+ if (strcmp(monfd->name, fdname) != 0) {
+ continue;
+ }
+
+ tmp_fd = monfd->fd;
+ monfd->fd = fd;
+ qemu_mutex_unlock(&mon->mon_lock);
+ /* Make sure close() is outside critical section */
+ close(tmp_fd);
+ return true;
+ }
+
+ monfd = g_new0(mon_fd_t, 1);
+ monfd->name = g_strdup(fdname);
+ monfd->fd = fd;
+
+ QLIST_INSERT_HEAD(&mon->fds, monfd, next);
+ qemu_mutex_unlock(&mon->mon_lock);
+ return true;
+}
+
+#ifdef CONFIG_POSIX
void qmp_getfd(const char *fdname, Error **errp)
{
Monitor *cur_mon = monitor_cur();
- mon_fd_t *monfd;
- int fd, tmp_fd;
+ int fd;
fd = qemu_chr_fe_get_msgfd(&cur_mon->chr);
if (fd == -1) {
@@ -73,32 +110,9 @@
return;
}
- if (qemu_isdigit(fdname[0])) {
- close(fd);
- error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "fdname",
- "a name not starting with a digit");
- return;
- }
-
- QEMU_LOCK_GUARD(&cur_mon->mon_lock);
- QLIST_FOREACH(monfd, &cur_mon->fds, next) {
- if (strcmp(monfd->name, fdname) != 0) {
- continue;
- }
-
- tmp_fd = monfd->fd;
- monfd->fd = fd;
- /* Make sure close() is outside critical section */
- close(tmp_fd);
- return;
- }
-
- monfd = g_new0(mon_fd_t, 1);
- monfd->name = g_strdup(fdname);
- monfd->fd = fd;
-
- QLIST_INSERT_HEAD(&cur_mon->fds, monfd, next);
+ monitor_add_fd(cur_mon, fd, fdname, errp);
}
+#endif
void qmp_closefd(const char *fdname, Error **errp)
{
@@ -211,6 +225,41 @@
return NULL;
}
+#ifdef WIN32
+void qmp_get_win32_socket(const char *infos, const char *fdname, Error **errp)
+{
+ g_autofree WSAPROTOCOL_INFOW *info = NULL;
+ gsize len;
+ SOCKET sk;
+ int fd;
+
+ info = (void *)g_base64_decode(infos, &len);
+ if (len != sizeof(*info)) {
+ error_setg(errp, "Invalid WSAPROTOCOL_INFOW value");
+ return;
+ }
+
+ sk = WSASocketW(FROM_PROTOCOL_INFO,
+ FROM_PROTOCOL_INFO,
+ FROM_PROTOCOL_INFO,
+ info, 0, 0);
+ if (sk == INVALID_SOCKET) {
+ error_setg_win32(errp, WSAGetLastError(), "Couldn't import socket");
+ return;
+ }
+
+ fd = _open_osfhandle(sk, _O_BINARY);
+ if (fd < 0) {
+ error_setg_errno(errp, errno, "Failed to associate a FD with the SOCKET");
+ closesocket(sk);
+ return;
+ }
+
+ monitor_add_fd(monitor_cur(), fd, fdname, errp);
+}
+#endif
+
+
void qmp_remove_fd(int64_t fdset_id, bool has_fd, int64_t fd, Error **errp)
{
MonFdset *mon_fdset;
diff --git a/monitor/hmp-cmds.c b/monitor/hmp-cmds.c
index 34bd8c6..6c559b4 100644
--- a/monitor/hmp-cmds.c
+++ b/monitor/hmp-cmds.c
@@ -192,6 +192,7 @@
hmp_handle_error(mon, err);
}
+#ifdef CONFIG_POSIX
void hmp_getfd(Monitor *mon, const QDict *qdict)
{
const char *fdname = qdict_get_str(qdict, "fdname");
@@ -200,6 +201,7 @@
qmp_getfd(fdname, &err);
hmp_handle_error(mon, err);
}
+#endif
void hmp_closefd(Monitor *mon, const QDict *qdict)
{
diff --git a/monitor/qmp-cmds.c b/monitor/qmp-cmds.c
index 859012a..b0f948d 100644
--- a/monitor/qmp-cmds.c
+++ b/monitor/qmp-cmds.c
@@ -14,6 +14,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/sockets.h"
#include "monitor-internal.h"
#include "monitor/qdev.h"
#include "monitor/qmp-helpers.h"
@@ -139,6 +140,12 @@
return;
}
+ if (!fd_is_socket(fd)) {
+ error_setg(errp, "parameter @fdname must name a socket");
+ close(fd);
+ return;
+ }
+
for (i = 0; i < ARRAY_SIZE(protocol_table); i++) {
if (!strcmp(protocol, protocol_table[i].name)) {
if (!protocol_table[i].add_client(fd, has_skipauth, skipauth,
diff --git a/net/dgram.c b/net/dgram.c
index 9f7bf38..48f653b 100644
--- a/net/dgram.c
+++ b/net/dgram.c
@@ -230,7 +230,7 @@
return fd;
fail:
if (fd >= 0) {
- closesocket(fd);
+ close(fd);
}
return -1;
}
@@ -352,7 +352,7 @@
if (convert_host_port(saddr, local->u.inet.host, local->u.inet.port,
errp) < 0) {
g_free(saddr);
- closesocket(fd);
+ close(fd);
return -1;
}
@@ -360,14 +360,14 @@
if (saddr->sin_addr.s_addr == 0) {
error_setg(errp, "can't setup multicast destination address");
g_free(saddr);
- closesocket(fd);
+ close(fd);
return -1;
}
/* clone dgram socket */
newfd = net_dgram_mcast_create(saddr, NULL, errp);
if (newfd < 0) {
g_free(saddr);
- closesocket(fd);
+ close(fd);
return -1;
}
/* clone newfd to fd, close newfd */
@@ -494,14 +494,14 @@
if (ret < 0) {
error_setg_errno(errp, errno,
"can't set socket option SO_REUSEADDR");
- closesocket(fd);
+ close(fd);
return -1;
}
ret = bind(fd, (struct sockaddr *)&laddr_in, sizeof(laddr_in));
if (ret < 0) {
error_setg_errno(errp, errno, "can't bind ip=%s to socket",
inet_ntoa(laddr_in.sin_addr));
- closesocket(fd);
+ close(fd);
return -1;
}
qemu_socket_set_nonblock(fd);
@@ -548,7 +548,7 @@
if (ret < 0) {
error_setg_errno(errp, errno, "can't bind unix=%s to socket",
laddr_un.sun_path);
- closesocket(fd);
+ close(fd);
return -1;
}
qemu_socket_set_nonblock(fd);
diff --git a/net/dump.c b/net/dump.c
index 6a63b15..7d05f16 100644
--- a/net/dump.c
+++ b/net/dump.c
@@ -61,12 +61,13 @@
uint32_t len;
};
-static ssize_t dump_receive_iov(DumpState *s, const struct iovec *iov, int cnt)
+static ssize_t dump_receive_iov(DumpState *s, const struct iovec *iov, int cnt,
+ int offset)
{
struct pcap_sf_pkthdr hdr;
int64_t ts;
int caplen;
- size_t size = iov_size(iov, cnt);
+ size_t size = iov_size(iov, cnt) - offset;
struct iovec dumpiov[cnt + 1];
/* Early return in case of previous error. */
@@ -84,7 +85,7 @@
dumpiov[0].iov_base = &hdr;
dumpiov[0].iov_len = sizeof(hdr);
- cnt = iov_copy(&dumpiov[1], cnt, iov, cnt, 0, caplen);
+ cnt = iov_copy(&dumpiov[1], cnt, iov, cnt, offset, caplen);
if (writev(s->fd, dumpiov, cnt + 1) != sizeof(hdr) + caplen) {
error_report("network dump write error - stopping dump");
@@ -153,8 +154,10 @@
int iovcnt, NetPacketSent *sent_cb)
{
NetFilterDumpState *nfds = FILTER_DUMP(nf);
+ int offset = qemu_get_using_vnet_hdr(nf->netdev) ?
+ qemu_get_vnet_hdr_len(nf->netdev) : 0;
- dump_receive_iov(&nfds->ds, iov, iovcnt);
+ dump_receive_iov(&nfds->ds, iov, iovcnt, offset);
return 0;
}
diff --git a/net/eth.c b/net/eth.c
index f074b2f..70bcd8e 100644
--- a/net/eth.c
+++ b/net/eth.c
@@ -137,8 +137,7 @@
}
void eth_get_protocols(const struct iovec *iov, int iovcnt,
- bool *isip4, bool *isip6,
- bool *isudp, bool *istcp,
+ bool *hasip4, bool *hasip6,
size_t *l3hdr_off,
size_t *l4hdr_off,
size_t *l5hdr_off,
@@ -151,8 +150,10 @@
size_t l2hdr_len = eth_get_l2_hdr_length_iov(iov, iovcnt);
size_t input_size = iov_size(iov, iovcnt);
size_t copied;
+ uint8_t ip_p;
- *isip4 = *isip6 = *isudp = *istcp = false;
+ *hasip4 = *hasip6 = false;
+ l4hdr_info->proto = ETH_L4_HDR_PROTO_INVALID;
proto = eth_get_l3_proto(iov, iovcnt, l2hdr_len);
@@ -166,68 +167,62 @@
}
copied = iov_to_buf(iov, iovcnt, l2hdr_len, iphdr, sizeof(*iphdr));
-
- *isip4 = true;
-
- if (copied < sizeof(*iphdr)) {
+ if (copied < sizeof(*iphdr) ||
+ IP_HEADER_VERSION(iphdr) != IP_HEADER_VERSION_4) {
return;
}
- if (IP_HEADER_VERSION(iphdr) == IP_HEADER_VERSION_4) {
- if (iphdr->ip_p == IP_PROTO_TCP) {
- *istcp = true;
- } else if (iphdr->ip_p == IP_PROTO_UDP) {
- *isudp = true;
- }
- }
-
+ *hasip4 = true;
+ ip_p = iphdr->ip_p;
ip4hdr_info->fragment = IP4_IS_FRAGMENT(iphdr);
*l4hdr_off = l2hdr_len + IP_HDR_GET_LEN(iphdr);
fragment = ip4hdr_info->fragment;
} else if (proto == ETH_P_IPV6) {
-
- *isip6 = true;
- if (eth_parse_ipv6_hdr(iov, iovcnt, l2hdr_len,
- ip6hdr_info)) {
- if (ip6hdr_info->l4proto == IP_PROTO_TCP) {
- *istcp = true;
- } else if (ip6hdr_info->l4proto == IP_PROTO_UDP) {
- *isudp = true;
- }
- } else {
+ if (!eth_parse_ipv6_hdr(iov, iovcnt, l2hdr_len, ip6hdr_info)) {
return;
}
+ *hasip6 = true;
+ ip_p = ip6hdr_info->l4proto;
*l4hdr_off = l2hdr_len + ip6hdr_info->full_hdr_len;
fragment = ip6hdr_info->fragment;
+ } else {
+ return;
}
- if (!fragment) {
- if (*istcp) {
- *istcp = _eth_copy_chunk(input_size,
- iov, iovcnt,
- *l4hdr_off, sizeof(l4hdr_info->hdr.tcp),
- &l4hdr_info->hdr.tcp);
+ if (fragment) {
+ return;
+ }
- if (*istcp) {
- *l5hdr_off = *l4hdr_off +
- TCP_HEADER_DATA_OFFSET(&l4hdr_info->hdr.tcp);
+ switch (ip_p) {
+ case IP_PROTO_TCP:
+ if (_eth_copy_chunk(input_size,
+ iov, iovcnt,
+ *l4hdr_off, sizeof(l4hdr_info->hdr.tcp),
+ &l4hdr_info->hdr.tcp)) {
+ l4hdr_info->proto = ETH_L4_HDR_PROTO_TCP;
+ *l5hdr_off = *l4hdr_off +
+ TCP_HEADER_DATA_OFFSET(&l4hdr_info->hdr.tcp);
- l4hdr_info->has_tcp_data =
- _eth_tcp_has_data(proto == ETH_P_IP,
- &ip4hdr_info->ip4_hdr,
- &ip6hdr_info->ip6_hdr,
- *l4hdr_off - *l3hdr_off,
- &l4hdr_info->hdr.tcp);
- }
- } else if (*isudp) {
- *isudp = _eth_copy_chunk(input_size,
- iov, iovcnt,
- *l4hdr_off, sizeof(l4hdr_info->hdr.udp),
- &l4hdr_info->hdr.udp);
+ l4hdr_info->has_tcp_data =
+ _eth_tcp_has_data(proto == ETH_P_IP,
+ &ip4hdr_info->ip4_hdr,
+ &ip6hdr_info->ip6_hdr,
+ *l4hdr_off - *l3hdr_off,
+ &l4hdr_info->hdr.tcp);
+ }
+ break;
+
+ case IP_PROTO_UDP:
+ if (_eth_copy_chunk(input_size,
+ iov, iovcnt,
+ *l4hdr_off, sizeof(l4hdr_info->hdr.udp),
+ &l4hdr_info->hdr.udp)) {
+ l4hdr_info->proto = ETH_L4_HDR_PROTO_UDP;
*l5hdr_off = *l4hdr_off + sizeof(l4hdr_info->hdr.udp);
}
+ break;
}
}
@@ -315,33 +310,6 @@
}
void
-eth_setup_ip4_fragmentation(const void *l2hdr, size_t l2hdr_len,
- void *l3hdr, size_t l3hdr_len,
- size_t l3payload_len,
- size_t frag_offset, bool more_frags)
-{
- const struct iovec l2vec = {
- .iov_base = (void *) l2hdr,
- .iov_len = l2hdr_len
- };
-
- if (eth_get_l3_proto(&l2vec, 1, l2hdr_len) == ETH_P_IP) {
- uint16_t orig_flags;
- struct ip_header *iphdr = (struct ip_header *) l3hdr;
- uint16_t frag_off_units = frag_offset / IP_FRAG_UNIT_SIZE;
- uint16_t new_ip_off;
-
- assert(frag_offset % IP_FRAG_UNIT_SIZE == 0);
- assert((frag_off_units & ~IP_OFFMASK) == 0);
-
- orig_flags = be16_to_cpu(iphdr->ip_off) & ~(IP_OFFMASK|IP_MF);
- new_ip_off = frag_off_units | orig_flags | (more_frags ? IP_MF : 0);
- iphdr->ip_off = cpu_to_be16(new_ip_off);
- iphdr->ip_len = cpu_to_be16(l3payload_len + l3hdr_len);
- }
-}
-
-void
eth_fix_ip4_checksum(void *l3hdr, size_t l3hdr_len)
{
struct ip_header *iphdr = (struct ip_header *) l3hdr;
diff --git a/net/net.c b/net/net.c
index ebc7ce0..6492ad5 100644
--- a/net/net.c
+++ b/net/net.c
@@ -513,6 +513,15 @@
return nc->info->has_vnet_hdr_len(nc, len);
}
+bool qemu_get_using_vnet_hdr(NetClientState *nc)
+{
+ if (!nc || !nc->info->get_using_vnet_hdr) {
+ return false;
+ }
+
+ return nc->info->get_using_vnet_hdr(nc);
+}
+
void qemu_using_vnet_hdr(NetClientState *nc, bool enable)
{
if (!nc || !nc->info->using_vnet_hdr) {
@@ -532,6 +541,15 @@
nc->info->set_offload(nc, csum, tso4, tso6, ecn, ufo);
}
+int qemu_get_vnet_hdr_len(NetClientState *nc)
+{
+ if (!nc || !nc->info->get_vnet_hdr_len) {
+ return 0;
+ }
+
+ return nc->info->get_vnet_hdr_len(nc);
+}
+
void qemu_set_vnet_hdr_len(NetClientState *nc, int len)
{
if (!nc || !nc->info->set_vnet_hdr_len) {
diff --git a/net/slirp.c b/net/slirp.c
index 2ee3f1a..c33b3e0 100644
--- a/net/slirp.c
+++ b/net/slirp.c
@@ -248,12 +248,24 @@
static void net_slirp_register_poll_fd(int fd, void *opaque)
{
- qemu_fd_register(fd);
+#ifdef WIN32
+ AioContext *ctxt = qemu_get_aio_context();
+
+ if (WSAEventSelect(fd, event_notifier_get_handle(&ctxt->notifier),
+ FD_READ | FD_ACCEPT | FD_CLOSE |
+ FD_CONNECT | FD_WRITE | FD_OOB) != 0) {
+ error_setg_win32(&error_warn, WSAGetLastError(), "failed to WSAEventSelect()");
+ }
+#endif
}
static void net_slirp_unregister_poll_fd(int fd, void *opaque)
{
- /* no qemu_fd_unregister */
+#ifdef WIN32
+ if (WSAEventSelect(fd, NULL, 0) != 0) {
+ error_setg_win32(&error_warn, WSAGetLastError(), "failed to WSAEventSelect()");
+ }
+#endif
}
static void net_slirp_notify(void *opaque)
diff --git a/net/socket.c b/net/socket.c
index 2fc5696..ba6e5b0 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -172,7 +172,7 @@
if (s->listen_fd != -1) {
qemu_set_fd_handler(s->listen_fd, net_socket_accept, NULL, s);
}
- closesocket(s->fd);
+ close(s->fd);
s->fd = -1;
net_socket_rs_init(&s->rs, net_socket_rs_finalize, false);
@@ -299,7 +299,7 @@
return fd;
fail:
if (fd >= 0)
- closesocket(fd);
+ close(fd);
return -1;
}
@@ -314,7 +314,7 @@
}
if (s->listen_fd != -1) {
qemu_set_fd_handler(s->listen_fd, NULL, NULL, NULL);
- closesocket(s->listen_fd);
+ close(s->listen_fd);
s->listen_fd = -1;
}
}
@@ -399,7 +399,7 @@
return s;
err:
- closesocket(fd);
+ close(fd);
return NULL;
}
@@ -456,7 +456,7 @@
if(getsockopt(fd, SOL_SOCKET, SO_TYPE, (char *)&so_type,
(socklen_t *)&optlen)< 0) {
error_setg(errp, "can't get socket option SO_TYPE");
- closesocket(fd);
+ close(fd);
return NULL;
}
switch(so_type) {
@@ -468,7 +468,7 @@
default:
error_setg(errp, "socket type=%d for fd=%d must be either"
" SOCK_DGRAM or SOCK_STREAM", so_type, fd);
- closesocket(fd);
+ close(fd);
}
return NULL;
}
@@ -526,13 +526,13 @@
if (ret < 0) {
error_setg_errno(errp, errno, "can't bind ip=%s to socket",
inet_ntoa(saddr.sin_addr));
- closesocket(fd);
+ close(fd);
return -1;
}
ret = listen(fd, 0);
if (ret < 0) {
error_setg_errno(errp, errno, "can't listen on socket");
- closesocket(fd);
+ close(fd);
return -1;
}
@@ -579,7 +579,7 @@
break;
} else {
error_setg_errno(errp, errno, "can't connect socket");
- closesocket(fd);
+ close(fd);
return -1;
}
} else {
@@ -671,14 +671,14 @@
if (ret < 0) {
error_setg_errno(errp, errno,
"can't set socket option SO_REUSEADDR");
- closesocket(fd);
+ close(fd);
return -1;
}
ret = bind(fd, (struct sockaddr *)&laddr, sizeof(laddr));
if (ret < 0) {
error_setg_errno(errp, errno, "can't bind ip=%s to socket",
inet_ntoa(laddr.sin_addr));
- closesocket(fd);
+ close(fd);
return -1;
}
qemu_socket_set_nonblock(fd);
diff --git a/net/tap.c b/net/tap.c
index 7d7bc1d..1bf085d 100644
--- a/net/tap.c
+++ b/net/tap.c
@@ -255,6 +255,13 @@
return !!tap_probe_vnet_hdr_len(s->fd, len);
}
+static int tap_get_vnet_hdr_len(NetClientState *nc)
+{
+ TAPState *s = DO_UPCAST(TAPState, nc, nc);
+
+ return s->host_vnet_hdr_len;
+}
+
static void tap_set_vnet_hdr_len(NetClientState *nc, int len)
{
TAPState *s = DO_UPCAST(TAPState, nc, nc);
@@ -268,6 +275,13 @@
s->host_vnet_hdr_len = len;
}
+static bool tap_get_using_vnet_hdr(NetClientState *nc)
+{
+ TAPState *s = DO_UPCAST(TAPState, nc, nc);
+
+ return s->using_vnet_hdr;
+}
+
static void tap_using_vnet_hdr(NetClientState *nc, bool using_vnet_hdr)
{
TAPState *s = DO_UPCAST(TAPState, nc, nc);
@@ -372,8 +386,10 @@
.has_ufo = tap_has_ufo,
.has_vnet_hdr = tap_has_vnet_hdr,
.has_vnet_hdr_len = tap_has_vnet_hdr_len,
+ .get_using_vnet_hdr = tap_get_using_vnet_hdr,
.using_vnet_hdr = tap_using_vnet_hdr,
.set_offload = tap_set_offload,
+ .get_vnet_hdr_len = tap_get_vnet_hdr_len,
.set_vnet_hdr_len = tap_set_vnet_hdr_len,
.set_vnet_le = tap_set_vnet_le,
.set_vnet_be = tap_set_vnet_be,
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index de5ed8f..99904a0 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -26,12 +26,15 @@
#include <err.h>
#include "standard-headers/linux/virtio_net.h"
#include "monitor/monitor.h"
+#include "migration/migration.h"
+#include "migration/misc.h"
#include "hw/virtio/vhost.h"
/* Todo:need to add the multiqueue support here */
typedef struct VhostVDPAState {
NetClientState nc;
struct vhost_vdpa vhost_vdpa;
+ Notifier migration_state;
VHostNetState *vhost_net;
/* Control commands shadow buffers */
@@ -98,6 +101,8 @@
BIT_ULL(VIRTIO_NET_F_MQ) |
BIT_ULL(VIRTIO_F_ANY_LAYOUT) |
BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
+ /* VHOST_F_LOG_ALL is exposed by SVQ */
+ BIT_ULL(VHOST_F_LOG_ALL) |
BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
BIT_ULL(VIRTIO_NET_F_STANDBY);
@@ -178,13 +183,9 @@
static void vhost_vdpa_cleanup(NetClientState *nc)
{
VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
- struct vhost_dev *dev = &s->vhost_net->dev;
qemu_vfree(s->cvq_cmd_out_buffer);
qemu_vfree(s->status);
- if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
- g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
- }
if (s->vhost_net) {
vhost_net_cleanup(s->vhost_net);
g_free(s->vhost_net);
@@ -234,10 +235,126 @@
return size;
}
+/** From any vdpa net client, get the netclient of the first queue pair */
+static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s)
+{
+ NICState *nic = qemu_get_nic(s->nc.peer);
+ NetClientState *nc0 = qemu_get_peer(nic->ncs, 0);
+
+ return DO_UPCAST(VhostVDPAState, nc, nc0);
+}
+
+static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable)
+{
+ struct vhost_vdpa *v = &s->vhost_vdpa;
+ VirtIONet *n;
+ VirtIODevice *vdev;
+ int data_queue_pairs, cvq, r;
+
+ /* We are only called on the first data vqs and only if x-svq is not set */
+ if (s->vhost_vdpa.shadow_vqs_enabled == enable) {
+ return;
+ }
+
+ vdev = v->dev->vdev;
+ n = VIRTIO_NET(vdev);
+ if (!n->vhost_started) {
+ return;
+ }
+
+ data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
+ cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ?
+ n->max_ncs - n->max_queue_pairs : 0;
+ /*
+ * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter
+ * in the future and resume the device if read-only operations between
+ * suspend and reset goes wrong.
+ */
+ vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq);
+
+ /* Start will check migration setup_or_active to configure or not SVQ */
+ r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq);
+ if (unlikely(r < 0)) {
+ error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r);
+ }
+}
+
+static void vdpa_net_migration_state_notifier(Notifier *notifier, void *data)
+{
+ MigrationState *migration = data;
+ VhostVDPAState *s = container_of(notifier, VhostVDPAState,
+ migration_state);
+
+ if (migration_in_setup(migration)) {
+ vhost_vdpa_net_log_global_enable(s, true);
+ } else if (migration_has_failed(migration)) {
+ vhost_vdpa_net_log_global_enable(s, false);
+ }
+}
+
+static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
+{
+ struct vhost_vdpa *v = &s->vhost_vdpa;
+
+ add_migration_state_change_notifier(&s->migration_state);
+ if (v->shadow_vqs_enabled) {
+ v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
+ v->iova_range.last);
+ }
+}
+
+static int vhost_vdpa_net_data_start(NetClientState *nc)
+{
+ VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
+ struct vhost_vdpa *v = &s->vhost_vdpa;
+
+ assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
+
+ if (s->always_svq ||
+ migration_is_setup_or_active(migrate_get_current()->state)) {
+ v->shadow_vqs_enabled = true;
+ v->shadow_data = true;
+ } else {
+ v->shadow_vqs_enabled = false;
+ v->shadow_data = false;
+ }
+
+ if (v->index == 0) {
+ vhost_vdpa_net_data_start_first(s);
+ return 0;
+ }
+
+ if (v->shadow_vqs_enabled) {
+ VhostVDPAState *s0 = vhost_vdpa_net_first_nc_vdpa(s);
+ v->iova_tree = s0->vhost_vdpa.iova_tree;
+ }
+
+ return 0;
+}
+
+static void vhost_vdpa_net_client_stop(NetClientState *nc)
+{
+ VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
+ struct vhost_dev *dev;
+
+ assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
+
+ if (s->vhost_vdpa.index == 0) {
+ remove_migration_state_change_notifier(&s->migration_state);
+ }
+
+ dev = s->vhost_vdpa.dev;
+ if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
+ g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
+ }
+}
+
static NetClientInfo net_vhost_vdpa_info = {
.type = NET_CLIENT_DRIVER_VHOST_VDPA,
.size = sizeof(VhostVDPAState),
.receive = vhost_vdpa_receive,
+ .start = vhost_vdpa_net_data_start,
+ .stop = vhost_vdpa_net_client_stop,
.cleanup = vhost_vdpa_cleanup,
.has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
.has_ufo = vhost_vdpa_has_ufo,
@@ -351,7 +468,7 @@
static int vhost_vdpa_net_cvq_start(NetClientState *nc)
{
- VhostVDPAState *s;
+ VhostVDPAState *s, *s0;
struct vhost_vdpa *v;
uint64_t backend_features;
int64_t cvq_group;
@@ -362,11 +479,12 @@
s = DO_UPCAST(VhostVDPAState, nc, nc);
v = &s->vhost_vdpa;
- v->shadow_data = s->always_svq;
+ s0 = vhost_vdpa_net_first_nc_vdpa(s);
+ v->shadow_data = s0->vhost_vdpa.shadow_vqs_enabled;
v->shadow_vqs_enabled = s->always_svq;
s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
- if (s->always_svq) {
+ if (s->vhost_vdpa.shadow_data) {
/* SVQ is already configured for all virtqueues */
goto out;
}
@@ -415,8 +533,6 @@
return r;
}
- v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
- v->iova_range.last);
v->shadow_vqs_enabled = true;
s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
@@ -425,6 +541,26 @@
return 0;
}
+ if (s0->vhost_vdpa.iova_tree) {
+ /*
+ * SVQ is already configured for all virtqueues. Reuse IOVA tree for
+ * simplicity, whether CVQ shares ASID with guest or not, because:
+ * - Memory listener need access to guest's memory addresses allocated
+ * in the IOVA tree.
+ * - There should be plenty of IOVA address space for both ASID not to
+ * worry about collisions between them. Guest's translations are
+ * still validated with virtio virtqueue_pop so there is no risk for
+ * the guest to access memory that it shouldn't.
+ *
+ * To allocate a iova tree per ASID is doable but it complicates the
+ * code and it is not worth it for the moment.
+ */
+ v->iova_tree = s0->vhost_vdpa.iova_tree;
+ } else {
+ v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
+ v->iova_range.last);
+ }
+
r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
vhost_vdpa_net_cvq_cmd_page_len(), false);
if (unlikely(r < 0)) {
@@ -449,15 +585,9 @@
if (s->vhost_vdpa.shadow_vqs_enabled) {
vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
- if (!s->always_svq) {
- /*
- * If only the CVQ is shadowed we can delete this safely.
- * If all the VQs are shadows this will be needed by the time the
- * device is started again to register SVQ vrings and similar.
- */
- g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
- }
}
+
+ vhost_vdpa_net_client_stop(nc);
}
static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
@@ -668,7 +798,7 @@
bool is_datapath,
bool svq,
struct vhost_vdpa_iova_range iova_range,
- VhostIOVATree *iova_tree)
+ uint64_t features)
{
NetClientState *nc = NULL;
VhostVDPAState *s;
@@ -687,11 +817,14 @@
s->vhost_vdpa.device_fd = vdpa_device_fd;
s->vhost_vdpa.index = queue_pair_index;
s->always_svq = svq;
+ s->migration_state.notify = vdpa_net_migration_state_notifier;
s->vhost_vdpa.shadow_vqs_enabled = svq;
s->vhost_vdpa.iova_range = iova_range;
s->vhost_vdpa.shadow_data = svq;
- s->vhost_vdpa.iova_tree = iova_tree;
- if (!is_datapath) {
+ if (queue_pair_index == 0) {
+ vhost_vdpa_net_valid_svq_features(features,
+ &s->vhost_vdpa.migration_blocker);
+ } else if (!is_datapath) {
s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(),
vhost_vdpa_net_cvq_cmd_page_len());
memset(s->cvq_cmd_out_buffer, 0, vhost_vdpa_net_cvq_cmd_page_len());
@@ -701,6 +834,15 @@
s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
s->vhost_vdpa.shadow_vq_ops_opaque = s;
+
+ /*
+ * TODO: We cannot migrate devices with CVQ as there is no way to set
+ * the device state (MAC, MQ, etc) before starting the datapath.
+ *
+ * Migration blocker ownership now belongs to s->vhost_vdpa.
+ */
+ error_setg(&s->vhost_vdpa.migration_blocker,
+ "net vdpa cannot migrate with CVQ feature");
}
ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
if (ret) {
@@ -760,7 +902,6 @@
uint64_t features;
int vdpa_device_fd;
g_autofree NetClientState **ncs = NULL;
- g_autoptr(VhostIOVATree) iova_tree = NULL;
struct vhost_vdpa_iova_range iova_range;
NetClientState *nc;
int queue_pairs, r, i = 0, has_cvq = 0;
@@ -812,12 +953,8 @@
goto err;
}
- if (opts->x_svq) {
- if (!vhost_vdpa_net_valid_svq_features(features, errp)) {
- goto err_svq;
- }
-
- iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last);
+ if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) {
+ goto err;
}
ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
@@ -825,7 +962,7 @@
for (i = 0; i < queue_pairs; i++) {
ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
vdpa_device_fd, i, 2, true, opts->x_svq,
- iova_range, iova_tree);
+ iova_range, features);
if (!ncs[i])
goto err;
}
@@ -833,13 +970,11 @@
if (has_cvq) {
nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
vdpa_device_fd, i, 1, false,
- opts->x_svq, iova_range, iova_tree);
+ opts->x_svq, iova_range, features);
if (!nc)
goto err;
}
- /* iova_tree ownership belongs to last NetClientState */
- g_steal_pointer(&iova_tree);
return 0;
err:
@@ -849,7 +984,6 @@
}
}
-err_svq:
qemu_close(vdpa_device_fd);
return -1;
diff --git a/pc-bios/README b/pc-bios/README
index 3702ed4..c555dd3 100644
--- a/pc-bios/README
+++ b/pc-bios/README
@@ -50,8 +50,8 @@
variable store templates built from the TianoCore community's EFI Development
Kit II project
<https://github.com/tianocore/tianocore.github.io/wiki/EDK-II>. The images
- were built at git tag "edk2-stable202008". The firmware binaries bundle parts
- of the OpenSSL project, at git tag "OpenSSL_1_1_1g" (the OpenSSL tag is a
+ were built at git tag "edk2-stable202302". The firmware binaries bundle parts
+ of the OpenSSL project, at git tag "OpenSSL_1_1_1s" (the OpenSSL tag is a
function of the edk2 tag). Parts of the Berkeley SoftFloat library are
bundled as well, at Release 3e plus a subsequent typo fix (commit
b64af41c3276f97f0e181920400ee056b9c88037), as an OpenSSL dependency on 32-bit
diff --git a/pc-bios/bios-256k.bin b/pc-bios/bios-256k.bin
index 211b2a4..f70aa72 100644
--- a/pc-bios/bios-256k.bin
+++ b/pc-bios/bios-256k.bin
Binary files differ
diff --git a/pc-bios/bios-microvm.bin b/pc-bios/bios-microvm.bin
index 6204a71..94792cf 100644
--- a/pc-bios/bios-microvm.bin
+++ b/pc-bios/bios-microvm.bin
Binary files differ
diff --git a/pc-bios/bios.bin b/pc-bios/bios.bin
index 12d6a03..6a196cf 100644
--- a/pc-bios/bios.bin
+++ b/pc-bios/bios.bin
Binary files differ
diff --git a/pc-bios/edk2-aarch64-code.fd.bz2 b/pc-bios/edk2-aarch64-code.fd.bz2
index 0262f5b..4bc824e 100644
--- a/pc-bios/edk2-aarch64-code.fd.bz2
+++ b/pc-bios/edk2-aarch64-code.fd.bz2
Binary files differ
diff --git a/pc-bios/edk2-arm-code.fd.bz2 b/pc-bios/edk2-arm-code.fd.bz2
index 4ca97b4..7899fca 100644
--- a/pc-bios/edk2-arm-code.fd.bz2
+++ b/pc-bios/edk2-arm-code.fd.bz2
Binary files differ
diff --git a/pc-bios/edk2-i386-code.fd.bz2 b/pc-bios/edk2-i386-code.fd.bz2
index 6e02c9b..a68ae4f 100644
--- a/pc-bios/edk2-i386-code.fd.bz2
+++ b/pc-bios/edk2-i386-code.fd.bz2
Binary files differ
diff --git a/pc-bios/edk2-i386-secure-code.fd.bz2 b/pc-bios/edk2-i386-secure-code.fd.bz2
index a4b1cc9..91936eb 100644
--- a/pc-bios/edk2-i386-secure-code.fd.bz2
+++ b/pc-bios/edk2-i386-secure-code.fd.bz2
Binary files differ
diff --git a/pc-bios/edk2-riscv.fd.bz2 b/pc-bios/edk2-riscv.fd.bz2
new file mode 100644
index 0000000..ef566b3
--- /dev/null
+++ b/pc-bios/edk2-riscv.fd.bz2
Binary files differ
diff --git a/pc-bios/edk2-x86_64-code.fd.bz2 b/pc-bios/edk2-x86_64-code.fd.bz2
index 37bfb0d..35c2340 100644
--- a/pc-bios/edk2-x86_64-code.fd.bz2
+++ b/pc-bios/edk2-x86_64-code.fd.bz2
Binary files differ
diff --git a/pc-bios/edk2-x86_64-microvm.fd.bz2 b/pc-bios/edk2-x86_64-microvm.fd.bz2
index 1d65c61..742abf0 100644
--- a/pc-bios/edk2-x86_64-microvm.fd.bz2
+++ b/pc-bios/edk2-x86_64-microvm.fd.bz2
Binary files differ
diff --git a/pc-bios/edk2-x86_64-secure-code.fd.bz2 b/pc-bios/edk2-x86_64-secure-code.fd.bz2
index 76dc6d5..311b7b9 100644
--- a/pc-bios/edk2-x86_64-secure-code.fd.bz2
+++ b/pc-bios/edk2-x86_64-secure-code.fd.bz2
Binary files differ
diff --git a/pc-bios/openbios-ppc b/pc-bios/openbios-ppc
index d8203a5..4af6002 100644
--- a/pc-bios/openbios-ppc
+++ b/pc-bios/openbios-ppc
Binary files differ
diff --git a/pc-bios/openbios-sparc32 b/pc-bios/openbios-sparc32
index 118b1cc..41b6a60 100644
--- a/pc-bios/openbios-sparc32
+++ b/pc-bios/openbios-sparc32
Binary files differ
diff --git a/pc-bios/openbios-sparc64 b/pc-bios/openbios-sparc64
index 846cb48..902b4b3 100644
--- a/pc-bios/openbios-sparc64
+++ b/pc-bios/openbios-sparc64
Binary files differ
diff --git a/pc-bios/optionrom/optionrom.h b/pc-bios/optionrom/optionrom.h
index 8d74c0d..7bcdf0e 100644
--- a/pc-bios/optionrom/optionrom.h
+++ b/pc-bios/optionrom/optionrom.h
@@ -34,8 +34,8 @@
#define FW_CFG_SETUP_SIZE 0x17
#define FW_CFG_SETUP_DATA 0x18
-#define BIOS_CFG_IOPORT_CFG 0x510
-#define BIOS_CFG_IOPORT_DATA 0x511
+#define BIOS_CFG_IOPORT_CFG 0x510
+#define BIOS_CFG_IOPORT_DATA 0x511
#define FW_CFG_DMA_CTL_ERROR 0x01
#define FW_CFG_DMA_CTL_READ 0x02
@@ -49,65 +49,65 @@
#define BIOS_CFG_DMA_ADDR_LOW 0x518
/* Break the translation block flow so -d cpu shows us values */
-#define DEBUG_HERE \
- jmp 1f; \
- 1:
-
+#define DEBUG_HERE \
+ jmp 1f; \
+ 1:
+
/*
* Read a variable from the fw_cfg device.
- * Clobbers: %edx
- * Out: %eax
+ * Clobbers: %edx
+ * Out: %eax
*/
.macro read_fw VAR
- mov $\VAR, %ax
- mov $BIOS_CFG_IOPORT_CFG, %dx
- outw %ax, (%dx)
- mov $BIOS_CFG_IOPORT_DATA, %dx
- inb (%dx), %al
- shl $8, %eax
- inb (%dx), %al
- shl $8, %eax
- inb (%dx), %al
- shl $8, %eax
- inb (%dx), %al
- bswap %eax
+ mov $\VAR, %ax
+ mov $BIOS_CFG_IOPORT_CFG, %dx
+ outw %ax, (%dx)
+ mov $BIOS_CFG_IOPORT_DATA, %dx
+ inb (%dx), %al
+ shl $8, %eax
+ inb (%dx), %al
+ shl $8, %eax
+ inb (%dx), %al
+ shl $8, %eax
+ inb (%dx), %al
+ bswap %eax
.endm
/*
* Read data from the fw_cfg device using DMA.
- * Clobbers: %edx, %eax, ADDR, SIZE, memory[%esp-16] to memory[%esp]
+ * Clobbers: %edx, %eax, ADDR, SIZE, memory[%esp-16] to memory[%esp]
*/
.macro read_fw_dma VAR, SIZE, ADDR
/* Address */
- bswapl \ADDR
- pushl \ADDR
+ bswapl \ADDR
+ pushl \ADDR
- /* We only support 32 bit target addresses */
- xorl %eax, %eax
- pushl %eax
- mov $BIOS_CFG_DMA_ADDR_HIGH, %dx
- outl %eax, (%dx)
+ /* We only support 32 bit target addresses */
+ xorl %eax, %eax
+ pushl %eax
+ mov $BIOS_CFG_DMA_ADDR_HIGH, %dx
+ outl %eax, (%dx)
- /* Size */
- bswapl \SIZE
- pushl \SIZE
+ /* Size */
+ bswapl \SIZE
+ pushl \SIZE
/* Control */
- movl $(\VAR << 16) | (FW_CFG_DMA_CTL_READ | FW_CFG_DMA_CTL_SELECT), %eax
- bswapl %eax
- pushl %eax
+ movl $(\VAR << 16) | (FW_CFG_DMA_CTL_READ | FW_CFG_DMA_CTL_SELECT), %eax
+ bswapl %eax
+ pushl %eax
- movl %esp, %eax /* Address of the struct we generated */
- bswapl %eax
- mov $BIOS_CFG_DMA_ADDR_LOW, %dx
- outl %eax, (%dx) /* Initiate DMA */
+ movl %esp, %eax /* Address of the struct we generated */
+ bswapl %eax
+ mov $BIOS_CFG_DMA_ADDR_LOW, %dx
+ outl %eax, (%dx) /* Initiate DMA */
-1: mov (%esp), %eax /* Wait for completion */
- bswapl %eax
- testl $~FW_CFG_DMA_CTL_ERROR, %eax
- jnz 1b
- addl $16, %esp
+1: mov (%esp), %eax /* Wait for completion */
+ bswapl %eax
+ testl $~FW_CFG_DMA_CTL_ERROR, %eax
+ jnz 1b
+ addl $16, %esp
.endm
@@ -115,116 +115,116 @@
* Read a blob from the fw_cfg device using DMA
* Requires _ADDR, _SIZE and _DATA values for the parameter.
*
- * Clobbers: %eax, %edx, %es, %ecx, %edi and adresses %esp-20 to %esp
+ * Clobbers: %eax, %edx, %es, %ecx, %edi and adresses %esp-20 to %esp
*/
#ifdef USE_FW_CFG_DMA
-#define read_fw_blob_dma(var) \
- read_fw var ## _SIZE; \
- mov %eax, %ecx; \
- read_fw var ## _ADDR; \
- mov %eax, %edi ;\
- read_fw_dma var ## _DATA, %ecx, %edi
+#define read_fw_blob_dma(var) \
+ read_fw var ## _SIZE; \
+ mov %eax, %ecx; \
+ read_fw var ## _ADDR; \
+ mov %eax, %edi ; \
+ read_fw_dma var ## _DATA, %ecx, %edi
#else
#define read_fw_blob_dma(var) read_fw_blob(var)
#endif
-#define read_fw_blob_pre(var) \
- read_fw var ## _SIZE; \
- mov %eax, %ecx; \
- mov $var ## _DATA, %ax; \
- mov $BIOS_CFG_IOPORT_CFG, %edx; \
- outw %ax, (%dx); \
- mov $BIOS_CFG_IOPORT_DATA, %dx; \
- cld
+#define read_fw_blob_pre(var) \
+ read_fw var ## _SIZE; \
+ mov %eax, %ecx; \
+ mov $var ## _DATA, %ax; \
+ mov $BIOS_CFG_IOPORT_CFG, %edx; \
+ outw %ax, (%dx); \
+ mov $BIOS_CFG_IOPORT_DATA, %dx; \
+ cld
/*
* Read a blob from the fw_cfg device.
* Requires _ADDR, _SIZE and _DATA values for the parameter.
*
- * Clobbers: %eax, %edx, %es, %ecx, %edi
+ * Clobbers: %eax, %edx, %es, %ecx, %edi
*/
-#define read_fw_blob(var) \
- read_fw var ## _ADDR; \
- mov %eax, %edi; \
- read_fw_blob_pre(var); \
- /* old as(1) doesn't like this insn so emit the bytes instead: \
- rep insb (%dx), %es:(%edi); \
- */ \
- .dc.b 0xf3,0x6c
+#define read_fw_blob(var) \
+ read_fw var ## _ADDR; \
+ mov %eax, %edi; \
+ read_fw_blob_pre(var); \
+ /* old as(1) doesn't like this insn so emit the bytes instead: \
+ rep insb (%dx), %es:(%edi); \
+ */ \
+ .dc.b 0xf3,0x6c
/*
* Read a blob from the fw_cfg device in forced addr32 mode.
* Requires _ADDR, _SIZE and _DATA values for the parameter.
*
- * Clobbers: %eax, %edx, %es, %ecx, %edi
+ * Clobbers: %eax, %edx, %es, %ecx, %edi
*/
-#define read_fw_blob_addr32(var) \
- read_fw var ## _ADDR; \
- mov %eax, %edi; \
- read_fw_blob_pre(var); \
- /* old as(1) doesn't like this insn so emit the bytes instead: \
- addr32 rep insb (%dx), %es:(%edi); \
- */ \
- .dc.b 0x67,0xf3,0x6c
+#define read_fw_blob_addr32(var) \
+ read_fw var ## _ADDR; \
+ mov %eax, %edi; \
+ read_fw_blob_pre(var); \
+ /* old as(1) doesn't like this insn so emit the bytes instead: \
+ addr32 rep insb (%dx), %es:(%edi); \
+ */ \
+ .dc.b 0x67,0xf3,0x6c
/*
* Read a blob from the fw_cfg device in forced addr32 mode, address is in %edi.
* Requires _SIZE and _DATA values for the parameter.
*
- * Clobbers: %eax, %edx, %edi, %es, %ecx
+ * Clobbers: %eax, %edx, %edi, %es, %ecx
*/
-#define read_fw_blob_addr32_edi(var) \
- read_fw_blob_pre(var); \
- /* old as(1) doesn't like this insn so emit the bytes instead: \
- addr32 rep insb (%dx), %es:(%edi); \
- */ \
- .dc.b 0x67,0xf3,0x6c
+#define read_fw_blob_addr32_edi(var) \
+ read_fw_blob_pre(var); \
+ /* old as(1) doesn't like this insn so emit the bytes instead: \
+ addr32 rep insb (%dx), %es:(%edi); \
+ */ \
+ .dc.b 0x67,0xf3,0x6c
-#define OPTION_ROM_START \
- .code16; \
- .text; \
- .global _start; \
- _start:; \
- .short 0xaa55; \
- .byte (_end - _start) / 512;
+#define OPTION_ROM_START \
+ .code16; \
+ .text; \
+ .global _start; \
+ _start:; \
+ .short 0xaa55; \
+ .byte (_end - _start) / 512;
-#define BOOT_ROM_START \
- OPTION_ROM_START \
- lret; \
- .org 0x18; \
- .short 0; \
- .short _pnph; \
- _pnph: \
- .ascii "$PnP"; \
- .byte 0x01; \
- .byte ( _pnph_len / 16 ); \
- .short 0x0000; \
- .byte 0x00; \
- .byte 0x00; \
- .long 0x00000000; \
- .short _manufacturer; \
- .short _product; \
- .long 0x00000000; \
- .short 0x0000; \
- .short 0x0000; \
- .short _bev; \
- .short 0x0000; \
- .short 0x0000; \
- .equ _pnph_len, . - _pnph; \
- _bev:; \
- /* DS = CS */ \
- movw %cs, %ax; \
- movw %ax, %ds;
+#define BOOT_ROM_START \
+ OPTION_ROM_START \
+ lret; \
+ .org 0x18; \
+ .short 0; \
+ .short _pnph; \
+ _pnph: \
+ .ascii "$PnP"; \
+ .byte 0x01; \
+ .byte ( _pnph_len / 16 ); \
+ .short 0x0000; \
+ .byte 0x00; \
+ .byte 0x00; \
+ .long 0x00000000; \
+ .short _manufacturer; \
+ .short _product; \
+ .long 0x00000000; \
+ .short 0x0000; \
+ .short 0x0000; \
+ .short _bev; \
+ .short 0x0000; \
+ .short 0x0000; \
+ .equ _pnph_len, . - _pnph; \
+ _bev:; \
+ /* DS = CS */ \
+ movw %cs, %ax; \
+ movw %ax, %ds;
-#define OPTION_ROM_END \
- .byte 0; \
- .align 512, 0; \
+#define OPTION_ROM_END \
+ .byte 0; \
+ .align 512, 0; \
_end:
-#define BOOT_ROM_END \
- _manufacturer:; \
- .asciz "QEMU"; \
- _product:; \
- .asciz BOOT_ROM_PRODUCT; \
- OPTION_ROM_END
+#define BOOT_ROM_END \
+ _manufacturer:; \
+ .asciz "QEMU"; \
+ _product:; \
+ .asciz BOOT_ROM_PRODUCT; \
+ OPTION_ROM_END
diff --git a/pc-bios/s390-ccw.img b/pc-bios/s390-ccw.img
index 554fcbd..c9a5a21 100644
--- a/pc-bios/s390-ccw.img
+++ b/pc-bios/s390-ccw.img
Binary files differ
diff --git a/pc-bios/s390-ccw/bootmap.c b/pc-bios/s390-ccw/bootmap.c
index 994e59c..a213744 100644
--- a/pc-bios/s390-ccw/bootmap.c
+++ b/pc-bios/s390-ccw/bootmap.c
@@ -72,42 +72,74 @@
"Bad block size in zIPL section of the 1st record.");
}
-static block_number_t eckd_block_num(EckdCHS *chs)
+static void eckd_format_chs(ExtEckdBlockPtr *ptr, bool ldipl,
+ uint64_t *c,
+ uint64_t *h,
+ uint64_t *s)
+{
+ if (ldipl) {
+ *c = ptr->ldptr.chs.cylinder;
+ *h = ptr->ldptr.chs.head;
+ *s = ptr->ldptr.chs.sector;
+ } else {
+ *c = ptr->bptr.chs.cylinder;
+ *h = ptr->bptr.chs.head;
+ *s = ptr->bptr.chs.sector;
+ }
+}
+
+static block_number_t eckd_chs_to_block(uint64_t c, uint64_t h, uint64_t s)
{
const uint64_t sectors = virtio_get_sectors();
const uint64_t heads = virtio_get_heads();
- const uint64_t cylinder = chs->cylinder
- + ((chs->head & 0xfff0) << 12);
- const uint64_t head = chs->head & 0x000f;
+ const uint64_t cylinder = c + ((h & 0xfff0) << 12);
+ const uint64_t head = h & 0x000f;
const block_number_t block = sectors * heads * cylinder
+ sectors * head
- + chs->sector
- - 1; /* block nr starts with zero */
+ + s - 1; /* block nr starts with zero */
return block;
}
-static bool eckd_valid_address(BootMapPointer *p)
+static block_number_t eckd_block_num(EckdCHS *chs)
{
- const uint64_t head = p->eckd.chs.head & 0x000f;
+ return eckd_chs_to_block(chs->cylinder, chs->head, chs->sector);
+}
+static block_number_t gen_eckd_block_num(ExtEckdBlockPtr *ptr, bool ldipl)
+{
+ uint64_t cyl, head, sec;
+ eckd_format_chs(ptr, ldipl, &cyl, &head, &sec);
+ return eckd_chs_to_block(cyl, head, sec);
+}
+
+static bool eckd_valid_chs(uint64_t cyl, uint64_t head, uint64_t sector)
+{
if (head >= virtio_get_heads()
- || p->eckd.chs.sector > virtio_get_sectors()
- || p->eckd.chs.sector <= 0) {
+ || sector > virtio_get_sectors()
+ || sector <= 0) {
return false;
}
if (!virtio_guessed_disk_nature() &&
- eckd_block_num(&p->eckd.chs) >= virtio_get_blocks()) {
+ eckd_chs_to_block(cyl, head, sector) >= virtio_get_blocks()) {
return false;
}
return true;
}
-static block_number_t load_eckd_segments(block_number_t blk, uint64_t *address)
+static bool eckd_valid_address(ExtEckdBlockPtr *ptr, bool ldipl)
+{
+ uint64_t cyl, head, sec;
+ eckd_format_chs(ptr, ldipl, &cyl, &head, &sec);
+ return eckd_valid_chs(cyl, head, sec);
+}
+
+static block_number_t load_eckd_segments(block_number_t blk, bool ldipl,
+ uint64_t *address)
{
block_number_t block_nr;
- int j, rc;
+ int j, rc, count;
BootMapPointer *bprs = (void *)_bprs;
bool more_data;
@@ -117,7 +149,7 @@
do {
more_data = false;
for (j = 0;; j++) {
- block_nr = eckd_block_num(&bprs[j].xeckd.bptr.chs);
+ block_nr = gen_eckd_block_num(&bprs[j].xeckd, ldipl);
if (is_null_block_number(block_nr)) { /* end of chunk */
break;
}
@@ -129,11 +161,26 @@
break;
}
- IPL_assert(block_size_ok(bprs[j].xeckd.bptr.size),
+ /* List directed pointer does not store block size */
+ IPL_assert(ldipl || block_size_ok(bprs[j].xeckd.bptr.size),
"bad chunk block size");
- IPL_assert(eckd_valid_address(&bprs[j]), "bad chunk ECKD addr");
- if ((bprs[j].xeckd.bptr.count == 0) && unused_space(&(bprs[j+1]),
+ if (!eckd_valid_address(&bprs[j].xeckd, ldipl)) {
+ /*
+ * If an invalid address is found during LD-IPL then break and
+ * retry as CCW
+ */
+ IPL_assert(ldipl, "bad chunk ECKD addr");
+ break;
+ }
+
+ if (ldipl) {
+ count = bprs[j].xeckd.ldptr.count;
+ } else {
+ count = bprs[j].xeckd.bptr.count;
+ }
+
+ if (count == 0 && unused_space(&bprs[j + 1],
sizeof(EckdBlockPtr))) {
/* This is a "continue" pointer.
* This ptr should be the last one in the current
@@ -149,11 +196,10 @@
/* Load (count+1) blocks of code at (block_nr)
* to memory (address).
*/
- rc = virtio_read_many(block_nr, (void *)(*address),
- bprs[j].xeckd.bptr.count+1);
+ rc = virtio_read_many(block_nr, (void *)(*address), count + 1);
IPL_assert(rc == 0, "code chunk read failed");
- *address += (bprs[j].xeckd.bptr.count+1) * virtio_get_block_size();
+ *address += (count + 1) * virtio_get_block_size();
}
} while (more_data);
return block_nr;
@@ -237,8 +283,10 @@
uint64_t address;
BootMapTable *bmt = (void *)sec;
BootMapScript *bms = (void *)sec;
+ /* The S1B block number is NULL_BLOCK_NR if and only if it's an LD-IPL */
+ bool ldipl = (s1b_block_nr == NULL_BLOCK_NR);
- if (menu_is_enabled_zipl()) {
+ if (menu_is_enabled_zipl() && !ldipl) {
loadparm = eckd_get_boot_menu_index(s1b_block_nr);
}
@@ -249,7 +297,7 @@
memset(sec, FREE_SPACE_FILLER, sizeof(sec));
read_block(bmt_block_nr, sec, "Cannot read Boot Map Table");
- block_nr = eckd_block_num(&bmt->entry[loadparm].xeckd.bptr.chs);
+ block_nr = gen_eckd_block_num(&bmt->entry[loadparm].xeckd, ldipl);
IPL_assert(block_nr != -1, "Cannot find Boot Map Table Entry");
memset(sec, FREE_SPACE_FILLER, sizeof(sec));
@@ -264,13 +312,18 @@
}
address = bms->entry[i].address.load_address;
- block_nr = eckd_block_num(&bms->entry[i].blkptr.xeckd.bptr.chs);
+ block_nr = gen_eckd_block_num(&bms->entry[i].blkptr.xeckd, ldipl);
do {
- block_nr = load_eckd_segments(block_nr, &address);
+ block_nr = load_eckd_segments(block_nr, ldipl, &address);
} while (block_nr != -1);
}
+ if (ldipl && bms->entry[i].type != BOOT_SCRIPT_EXEC) {
+ /* Abort LD-IPL and retry as CCW-IPL */
+ return;
+ }
+
IPL_assert(bms->entry[i].type == BOOT_SCRIPT_EXEC,
"Unknown script entry type");
write_reset_psw(bms->entry[i].address.load_address); /* no return */
@@ -380,6 +433,23 @@
/* no return */
}
+static block_number_t eckd_find_bmt(ExtEckdBlockPtr *ptr)
+{
+ block_number_t blockno;
+ uint8_t tmp_sec[MAX_SECTOR_SIZE];
+ BootRecord *br;
+
+ blockno = gen_eckd_block_num(ptr, 0);
+ read_block(blockno, tmp_sec, "Cannot read boot record");
+ br = (BootRecord *)tmp_sec;
+ if (!magic_match(br->magic, ZIPL_MAGIC)) {
+ /* If the boot record is invalid, return and try CCW-IPL instead */
+ return NULL_BLOCK_NR;
+ }
+
+ return gen_eckd_block_num(&br->pgt.xeckd, 1);
+}
+
static void print_eckd_msg(void)
{
char msg[] = "Using ECKD scheme (block size *****), ";
@@ -401,28 +471,43 @@
static void ipl_eckd(void)
{
- XEckdMbr *mbr = (void *)sec;
- LDL_VTOC *vlbl = (void *)sec;
+ IplVolumeLabel *vlbl = (void *)sec;
+ LDL_VTOC *vtoc = (void *)sec;
+ block_number_t ldipl_bmt; /* Boot Map Table for List-Directed IPL */
print_eckd_msg();
- /* Grab the MBR again */
- memset(sec, FREE_SPACE_FILLER, sizeof(sec));
- read_block(0, mbr, "Cannot read block 0 on DASD");
-
- if (magic_match(mbr->magic, IPL1_MAGIC)) {
- ipl_eckd_cdl(); /* only returns in case of error */
- return;
- }
-
- /* LDL/CMS? */
+ /* Block 2 can contain either the CDL VOL1 label or the LDL VTOC */
memset(sec, FREE_SPACE_FILLER, sizeof(sec));
read_block(2, vlbl, "Cannot read block 2");
- if (magic_match(vlbl->magic, CMS1_MAGIC)) {
+ /*
+ * First check for a list-directed-format pointer which would
+ * supersede the CCW pointer.
+ */
+ if (eckd_valid_address((ExtEckdBlockPtr *)&vlbl->f.br, 0)) {
+ ldipl_bmt = eckd_find_bmt((ExtEckdBlockPtr *)&vlbl->f.br);
+ if (ldipl_bmt) {
+ sclp_print("List-Directed\n");
+ /* LD-IPL does not use the S1B bock, just make it NULL */
+ run_eckd_boot_script(ldipl_bmt, NULL_BLOCK_NR);
+ /* Only return in error, retry as CCW-IPL */
+ sclp_print("Retrying IPL ");
+ print_eckd_msg();
+ }
+ memset(sec, FREE_SPACE_FILLER, sizeof(sec));
+ read_block(2, vtoc, "Cannot read block 2");
+ }
+
+ /* Not list-directed */
+ if (magic_match(vtoc->magic, VOL1_MAGIC)) {
+ ipl_eckd_cdl(); /* may return in error */
+ }
+
+ if (magic_match(vtoc->magic, CMS1_MAGIC)) {
ipl_eckd_ldl(ECKD_CMS); /* no return */
}
- if (magic_match(vlbl->magic, LNX1_MAGIC)) {
+ if (magic_match(vtoc->magic, LNX1_MAGIC)) {
ipl_eckd_ldl(ECKD_LDL); /* no return */
}
diff --git a/pc-bios/s390-ccw/bootmap.h b/pc-bios/s390-ccw/bootmap.h
index 3946aa3..d4690a8 100644
--- a/pc-bios/s390-ccw/bootmap.h
+++ b/pc-bios/s390-ccw/bootmap.h
@@ -45,9 +45,23 @@
* it's 0 for TablePtr, ScriptPtr, and SectionPtr */
} __attribute__ ((packed)) EckdBlockPtr;
-typedef struct ExtEckdBlockPtr {
+typedef struct LdEckdCHS {
+ uint32_t cylinder;
+ uint8_t head;
+ uint8_t sector;
+} __attribute__ ((packed)) LdEckdCHS;
+
+typedef struct LdEckdBlockPtr {
+ LdEckdCHS chs; /* cylinder/head/sector is an address of the block */
+ uint8_t reserved[4];
+ uint16_t count;
+ uint32_t pad;
+} __attribute__ ((packed)) LdEckdBlockPtr;
+
+/* bptr is used for CCW type IPL, while ldptr is for list-directed IPL */
+typedef union ExtEckdBlockPtr {
EckdBlockPtr bptr;
- uint8_t reserved[8];
+ LdEckdBlockPtr ldptr;
} __attribute__ ((packed)) ExtEckdBlockPtr;
typedef union BootMapPointer {
@@ -57,6 +71,15 @@
ExtEckdBlockPtr xeckd;
} __attribute__ ((packed)) BootMapPointer;
+typedef struct BootRecord {
+ uint8_t magic[4];
+ uint32_t version;
+ uint64_t res1;
+ BootMapPointer pgt;
+ uint8_t reserved[510 - 32];
+ uint16_t os_id;
+} __attribute__ ((packed)) BootRecord;
+
/* aka Program Table */
typedef struct BootMapTable {
uint8_t magic[4];
@@ -292,7 +315,8 @@
struct {
unsigned char key[4]; /* == "VOL1" */
unsigned char volser[6];
- unsigned char reserved[6];
+ unsigned char reserved[64];
+ EckdCHS br; /* Location of Boot Record for list-directed IPL */
} f;
};
} __attribute__((packed)) IplVolumeLabel;
diff --git a/pc-bios/vgabios-ati.bin b/pc-bios/vgabios-ati.bin
index 39b2405..9fb8627 100644
--- a/pc-bios/vgabios-ati.bin
+++ b/pc-bios/vgabios-ati.bin
Binary files differ
diff --git a/pc-bios/vgabios-bochs-display.bin b/pc-bios/vgabios-bochs-display.bin
index b20d67c..91969ae 100644
--- a/pc-bios/vgabios-bochs-display.bin
+++ b/pc-bios/vgabios-bochs-display.bin
Binary files differ
diff --git a/pc-bios/vgabios-cirrus.bin b/pc-bios/vgabios-cirrus.bin
index ebe5336..c429540 100644
--- a/pc-bios/vgabios-cirrus.bin
+++ b/pc-bios/vgabios-cirrus.bin
Binary files differ
diff --git a/pc-bios/vgabios-qxl.bin b/pc-bios/vgabios-qxl.bin
index 4b5573a..088385f 100644
--- a/pc-bios/vgabios-qxl.bin
+++ b/pc-bios/vgabios-qxl.bin
Binary files differ
diff --git a/pc-bios/vgabios-ramfb.bin b/pc-bios/vgabios-ramfb.bin
index d458ec74..134c751 100644
--- a/pc-bios/vgabios-ramfb.bin
+++ b/pc-bios/vgabios-ramfb.bin
Binary files differ
diff --git a/pc-bios/vgabios-stdvga.bin b/pc-bios/vgabios-stdvga.bin
index 797e103..4cd0d52 100644
--- a/pc-bios/vgabios-stdvga.bin
+++ b/pc-bios/vgabios-stdvga.bin
Binary files differ
diff --git a/pc-bios/vgabios-virtio.bin b/pc-bios/vgabios-virtio.bin
index 3f8fe9d..976c786 100644
--- a/pc-bios/vgabios-virtio.bin
+++ b/pc-bios/vgabios-virtio.bin
Binary files differ
diff --git a/pc-bios/vgabios-vmware.bin b/pc-bios/vgabios-vmware.bin
index d5f263a..119a2b1 100644
--- a/pc-bios/vgabios-vmware.bin
+++ b/pc-bios/vgabios-vmware.bin
Binary files differ
diff --git a/pc-bios/vgabios.bin b/pc-bios/vgabios.bin
index d26af41..cac6131 100644
--- a/pc-bios/vgabios.bin
+++ b/pc-bios/vgabios.bin
Binary files differ
diff --git a/plugins/core.c b/plugins/core.c
index e04ffa1..9912f2c 100644
--- a/plugins/core.c
+++ b/plugins/core.c
@@ -24,6 +24,7 @@
#include "exec/cpu-common.h"
#include "exec/exec-all.h"
+#include "exec/tb-flush.h"
#include "exec/helper-proto.h"
#include "tcg/tcg.h"
#include "tcg/tcg-op.h"
@@ -552,17 +553,6 @@
}
}
-
-/*
- * Call this function after longjmp'ing to the main loop. It's possible that the
- * last instruction of a TB might have used helpers, and therefore the
- * "disable" instruction will never execute because it ended up as dead code.
- */
-void qemu_plugin_disable_mem_helpers(CPUState *cpu)
-{
- cpu->plugin_mem_cbs = NULL;
-}
-
static bool plugin_dyn_cb_arr_cmp(const void *ap, const void *bp)
{
return ap == bp;
diff --git a/plugins/loader.c b/plugins/loader.c
index 88c30bd..809f3f9 100644
--- a/plugins/loader.c
+++ b/plugins/loader.c
@@ -29,7 +29,7 @@
#include "qemu/plugin.h"
#include "qemu/memalign.h"
#include "hw/core/cpu.h"
-#include "exec/exec-all.h"
+#include "exec/tb-flush.h"
#ifndef CONFIG_USER_ONLY
#include "hw/boards.h"
#endif
diff --git a/qapi/cryptodev.json b/qapi/cryptodev.json
new file mode 100644
index 0000000..f33f96a
--- /dev/null
+++ b/qapi/cryptodev.json
@@ -0,0 +1,89 @@
+# -*- Mode: Python -*-
+# vim: filetype=python
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or later.
+# See the COPYING file in the top-level directory.
+
+##
+# @QCryptodevBackendAlgType:
+#
+# The supported algorithm types of a crypto device.
+#
+# @sym: symmetric encryption
+# @asym: asymmetric Encryption
+#
+# Since: 8.0
+##
+{ 'enum': 'QCryptodevBackendAlgType',
+ 'prefix': 'QCRYPTODEV_BACKEND_ALG',
+ 'data': ['sym', 'asym']}
+
+##
+# @QCryptodevBackendServiceType:
+#
+# The supported service types of a crypto device.
+#
+# Since: 8.0
+##
+{ 'enum': 'QCryptodevBackendServiceType',
+ 'prefix': 'QCRYPTODEV_BACKEND_SERVICE',
+ 'data': ['cipher', 'hash', 'mac', 'aead', 'akcipher']}
+
+##
+# @QCryptodevBackendType:
+#
+# The crypto device backend type
+#
+# @builtin: the QEMU builtin support
+# @vhost-user: vhost-user
+# @lkcf: Linux kernel cryptographic framework
+#
+# Since: 8.0
+##
+{ 'enum': 'QCryptodevBackendType',
+ 'prefix': 'QCRYPTODEV_BACKEND_TYPE',
+ 'data': ['builtin', 'vhost-user', 'lkcf']}
+
+##
+# @QCryptodevBackendClient:
+#
+# Information about a queue of crypto device.
+#
+# @queue: the queue index of the crypto device
+#
+# @type: the type of the crypto device
+#
+# Since: 8.0
+##
+{ 'struct': 'QCryptodevBackendClient',
+ 'data': { 'queue': 'uint32',
+ 'type': 'QCryptodevBackendType' } }
+
+##
+# @QCryptodevInfo:
+#
+# Information about a crypto device.
+#
+# @id: the id of the crypto device
+#
+# @service: supported service types of a crypto device
+#
+# @client: the additional infomation of the crypto device
+#
+# Since: 8.0
+##
+{ 'struct': 'QCryptodevInfo',
+ 'data': { 'id': 'str',
+ 'service': ['QCryptodevBackendServiceType'],
+ 'client': ['QCryptodevBackendClient'] } }
+
+##
+# @query-cryptodev:
+#
+# Returns information about current crypto devices.
+#
+# Returns: a list of @QCryptodevInfo
+#
+# Since: 8.0
+##
+{ 'command': 'query-cryptodev', 'returns': ['QCryptodevInfo']}
diff --git a/qapi/cxl.json b/qapi/cxl.json
new file mode 100644
index 0000000..4be7d46
--- /dev/null
+++ b/qapi/cxl.json
@@ -0,0 +1,128 @@
+# -*- Mode: Python -*-
+# vim: filetype=python
+
+##
+# = CXL devices
+##
+
+##
+# @CxlUncorErrorType:
+#
+# Type of uncorrectable CXL error to inject. These errors are reported via
+# an AER uncorrectable internal error with additional information logged at
+# the CXL device.
+#
+# @cache-data-parity: Data error such as data parity or data ECC error CXL.cache
+# @cache-address-parity: Address parity or other errors associated with the
+# address field on CXL.cache
+# @cache-be-parity: Byte enable parity or other byte enable errors on CXL.cache
+# @cache-data-ecc: ECC error on CXL.cache
+# @mem-data-parity: Data error such as data parity or data ECC error on CXL.mem
+# @mem-address-parity: Address parity or other errors associated with the
+# address field on CXL.mem
+# @mem-be-parity: Byte enable parity or other byte enable errors on CXL.mem.
+# @mem-data-ecc: Data ECC error on CXL.mem.
+# @reinit-threshold: REINIT threshold hit.
+# @rsvd-encoding: Received unrecognized encoding.
+# @poison-received: Received poison from the peer.
+# @receiver-overflow: Buffer overflows (first 3 bits of header log indicate which)
+# @internal: Component specific error
+# @cxl-ide-tx: Integrity and data encryption tx error.
+# @cxl-ide-rx: Integrity and data encryption rx error.
+#
+# Since: 8.0
+##
+
+{ 'enum': 'CxlUncorErrorType',
+ 'data': ['cache-data-parity',
+ 'cache-address-parity',
+ 'cache-be-parity',
+ 'cache-data-ecc',
+ 'mem-data-parity',
+ 'mem-address-parity',
+ 'mem-be-parity',
+ 'mem-data-ecc',
+ 'reinit-threshold',
+ 'rsvd-encoding',
+ 'poison-received',
+ 'receiver-overflow',
+ 'internal',
+ 'cxl-ide-tx',
+ 'cxl-ide-rx'
+ ]
+ }
+
+##
+# @CXLUncorErrorRecord:
+#
+# Record of a single error including header log.
+#
+# @type: Type of error
+# @header: 16 DWORD of header.
+#
+# Since: 8.0
+##
+{ 'struct': 'CXLUncorErrorRecord',
+ 'data': {
+ 'type': 'CxlUncorErrorType',
+ 'header': [ 'uint32' ]
+ }
+}
+
+##
+# @cxl-inject-uncorrectable-errors:
+#
+# Command to allow injection of multiple errors in one go. This allows testing
+# of multiple header log handling in the OS.
+#
+# @path: CXL Type 3 device canonical QOM path
+# @errors: Errors to inject
+#
+# Since: 8.0
+##
+{ 'command': 'cxl-inject-uncorrectable-errors',
+ 'data': { 'path': 'str',
+ 'errors': [ 'CXLUncorErrorRecord' ] }}
+
+##
+# @CxlCorErrorType:
+#
+# Type of CXL correctable error to inject
+#
+# @cache-data-ecc: Data ECC error on CXL.cache
+# @mem-data-ecc: Data ECC error on CXL.mem
+# @crc-threshold: Component specific and applicable to 68 byte Flit mode only.
+# @cache-poison-received: Received poison from a peer on CXL.cache.
+# @mem-poison-received: Received poison from a peer on CXL.mem
+# @physical: Received error indication from the physical layer.
+#
+# Since: 8.0
+##
+{ 'enum': 'CxlCorErrorType',
+ 'data': ['cache-data-ecc',
+ 'mem-data-ecc',
+ 'crc-threshold',
+ 'retry-threshold',
+ 'cache-poison-received',
+ 'mem-poison-received',
+ 'physical']
+}
+
+##
+# @cxl-inject-correctable-error:
+#
+# Command to inject a single correctable error. Multiple error injection
+# of this error type is not interesting as there is no associated header log.
+# These errors are reported via AER as a correctable internal error, with
+# additional detail available from the CXL device.
+#
+# @path: CXL Type 3 device canonical QOM path
+# @type: Type of error.
+#
+# Since: 8.0
+##
+{ 'command': 'cxl-inject-correctable-error',
+ 'data': { 'path': 'str',
+ 'type': 'CxlCorErrorType'
+ }
+}
diff --git a/qapi/meson.build b/qapi/meson.build
index fbdb442..9fd480c 100644
--- a/qapi/meson.build
+++ b/qapi/meson.build
@@ -31,6 +31,7 @@
'compat',
'control',
'crypto',
+ 'cxl',
'dump',
'error',
'introspect',
@@ -56,6 +57,7 @@
qapi_all_modules += [
'acpi',
'audio',
+ 'cryptodev',
'qdev',
'pci',
'rdma',
diff --git a/qapi/misc.json b/qapi/misc.json
index 27ef5a2..6ddd16e 100644
--- a/qapi/misc.json
+++ b/qapi/misc.json
@@ -14,6 +14,9 @@
# Allow client connections for VNC, Spice and socket based
# character devices to be passed in to QEMU via SCM_RIGHTS.
#
+# If the FD associated with @fdname is not a socket, the command will fail and
+# the FD will be closed.
+#
# @protocol: protocol name. Valid names are "vnc", "spice", "@dbus-display" or
# the name of a character device (eg. from -chardev id=XXXX)
#
@@ -270,7 +273,38 @@
# <- { "return": {} }
#
##
-{ 'command': 'getfd', 'data': {'fdname': 'str'} }
+{ 'command': 'getfd', 'data': {'fdname': 'str'}, 'if': 'CONFIG_POSIX' }
+
+##
+# @get-win32-socket:
+#
+# Add a socket that was duplicated to QEMU process with
+# WSADuplicateSocketW() via WSASocket() & WSAPROTOCOL_INFOW structure
+# and assign it a name (the SOCKET is associated with a CRT file
+# descriptor)
+#
+# @info: the WSAPROTOCOL_INFOW structure (encoded in base64)
+#
+# @fdname: file descriptor name
+#
+# Returns: Nothing on success
+#
+# Since: 8.0
+#
+# Notes: If @fdname already exists, the file descriptor assigned to
+# it will be closed and replaced by the received file
+# descriptor.
+#
+# The 'closefd' command can be used to explicitly close the
+# file descriptor when it is no longer needed.
+#
+# Example:
+#
+# -> { "execute": "get-win32-socket", "arguments": { "info": "abcd123..", fdname": "skclient" } }
+# <- { "return": {} }
+#
+##
+{ 'command': 'get-win32-socket', 'data': {'info': 'str', 'fdname': 'str'}, 'if': 'CONFIG_WIN32' }
##
# @closefd:
diff --git a/qapi/qapi-schema.json b/qapi/qapi-schema.json
index f000b90..7c09af5 100644
--- a/qapi/qapi-schema.json
+++ b/qapi/qapi-schema.json
@@ -95,3 +95,5 @@
{ 'include': 'pci.json' }
{ 'include': 'stats.json' }
{ 'include': 'virtio.json' }
+{ 'include': 'cryptodev.json' }
+{ 'include': 'cxl.json' }
diff --git a/qapi/qom.json b/qapi/qom.json
index 30e7665..a877b87 100644
--- a/qapi/qom.json
+++ b/qapi/qom.json
@@ -278,10 +278,16 @@
# cryptodev-backend and must be 1 for cryptodev-backend-builtin.
# (default: 1)
#
+# @throttle-bps: limit total bytes per second (Since 8.0)
+#
+# @throttle-ops: limit total operations per second (Since 8.0)
+#
# Since: 2.8
##
{ 'struct': 'CryptodevBackendProperties',
- 'data': { '*queues': 'uint32' } }
+ 'data': { '*queues': 'uint32',
+ '*throttle-bps': 'uint64',
+ '*throttle-ops': 'uint64' } }
##
# @CryptodevVhostUserProperties:
diff --git a/qapi/stats.json b/qapi/stats.json
index 57db5b1..1f5d3c5 100644
--- a/qapi/stats.json
+++ b/qapi/stats.json
@@ -50,10 +50,14 @@
#
# Enumeration of statistics providers.
#
+# @kvm: since 7.1
+#
+# @cryptodev: since 8.0
+#
# Since: 7.1
##
{ 'enum': 'StatsProvider',
- 'data': [ 'kvm' ] }
+ 'data': [ 'kvm', 'cryptodev' ] }
##
# @StatsTarget:
@@ -65,10 +69,12 @@
#
# @vcpu: statistics that apply to a single virtual CPU.
#
+# @cryptodev: statistics that apply to a crypto device. since 8.0
+#
# Since: 7.1
##
{ 'enum': 'StatsTarget',
- 'data': [ 'vm', 'vcpu' ] }
+ 'data': [ 'vm', 'vcpu', 'cryptodev' ] }
##
# @StatsRequest:
diff --git a/qapi/ui.json b/qapi/ui.json
index 0abba3e..9832234 100644
--- a/qapi/ui.json
+++ b/qapi/ui.json
@@ -886,6 +886,19 @@
# @lang1: since 6.1
# @lang2: since 6.1
#
+# @f13: since 8.0
+# @f14: since 8.0
+# @f15: since 8.0
+# @f16: since 8.0
+# @f17: since 8.0
+# @f18: since 8.0
+# @f19: since 8.0
+# @f20: since 8.0
+# @f21: since 8.0
+# @f22: since 8.0
+# @f23: since 8.0
+# @f24: since 8.0
+#
# 'sysrq' was mistakenly added to hack around the fact that
# the ps2 driver was not generating correct scancodes sequences
# when 'alt+print' was pressed. This flaw is now fixed and the
@@ -918,7 +931,7 @@
'volumeup', 'volumedown', 'mediaselect',
'mail', 'calculator', 'computer',
'ac_home', 'ac_back', 'ac_forward', 'ac_refresh', 'ac_bookmarks',
- 'lang1', 'lang2' ] }
+ 'lang1', 'lang2','f13','f14','f15','f16','f17','f18','f19','f20','f21','f22','f23','f24' ] }
##
# @KeyValueKind:
diff --git a/qemu-options.hx b/qemu-options.hx
index d42f60f..59bdf67 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -1606,7 +1606,7 @@
.. parsed-literal::
- |qemu_system_x86| -drive file=a -drive file=b"
+ |qemu_system_x86| -drive file=a -drive file=b
is interpreted like:
diff --git a/qga/installer/qemu-ga.wxs b/qga/installer/qemu-ga.wxs
index 51340f7..df572ad 100644
--- a/qga/installer/qemu-ga.wxs
+++ b/qga/installer/qemu-ga.wxs
@@ -31,6 +31,7 @@
/>
<Media Id="1" Cabinet="qemu_ga.$(var.QEMU_GA_VERSION).cab" EmbedCab="yes" />
<Property Id="WHSLogo">1</Property>
+ <Property Id="ARPNOMODIFY" Value="yes" Secure="yes" />
<MajorUpgrade
DowngradeErrorMessage="Error: A newer version of QEMU guest agent is already installed."
/>
@@ -121,27 +122,31 @@
<RegistryValue Type="integer" Name="TypesSupported" Value="7" />
<RegistryValue Type="string" Name="EventMessageFile" Value="[qemu_ga_directory]qemu-ga.exe" />
</RegistryKey>
+ <RegistryKey Root="HKLM"
+ Key="System\CurrentControlSet\Services\QEMU Guest Agent VSS Provider">
+ <RegistryValue Type="integer" Name="VssOption" Value="1" />
+ </RegistryKey>
</Component>
</Directory>
</Directory>
</Directory>
- <Property Id="cmd" Value="cmd.exe"/>
+ <Property Id="rundll" Value="rundll32.exe"/>
<Property Id="REINSTALLMODE" Value="amus"/>
<?ifdef var.InstallVss?>
<CustomAction Id="RegisterCom"
- ExeCommand='/c "[qemu_ga_directory]qemu-ga.exe" -s vss-install'
+ ExeCommand='"[qemu_ga_directory]qga-vss.dll",DLLCOMRegister'
Execute="deferred"
- Property="cmd"
+ Property="rundll"
Impersonate="no"
Return="check"
>
</CustomAction>
<CustomAction Id="UnRegisterCom"
- ExeCommand='/c "[qemu_ga_directory]qemu-ga.exe" -s vss-uninstall'
+ ExeCommand='"[qemu_ga_directory]qga-vss.dll",DLLCOMUnregister'
Execute="deferred"
- Property="cmd"
+ Property="rundll"
Impersonate="no"
Return="check"
>
diff --git a/qga/vss-win32/install.cpp b/qga/vss-win32/install.cpp
index b8087e5..ff93b08 100644
--- a/qga/vss-win32/install.cpp
+++ b/qga/vss-win32/install.cpp
@@ -357,6 +357,15 @@
return hr;
}
+STDAPI_(void) CALLBACK DLLCOMRegister(HWND, HINSTANCE, LPSTR, int)
+{
+ COMRegister();
+}
+
+STDAPI_(void) CALLBACK DLLCOMUnregister(HWND, HINSTANCE, LPSTR, int)
+{
+ COMUnregister();
+}
static BOOL CreateRegistryKey(LPCTSTR key, LPCTSTR value, LPCTSTR data)
{
diff --git a/qga/vss-win32/qga-vss.def b/qga/vss-win32/qga-vss.def
index 927782c..ee97a81 100644
--- a/qga/vss-win32/qga-vss.def
+++ b/qga/vss-win32/qga-vss.def
@@ -1,6 +1,8 @@
LIBRARY "QGA-PROVIDER.DLL"
EXPORTS
+ DLLCOMRegister
+ DLLCOMUnregister
COMRegister PRIVATE
COMUnregister PRIVATE
DllCanUnloadNow PRIVATE
diff --git a/qga/vss-win32/requester.cpp b/qga/vss-win32/requester.cpp
index b371aff..3e998af 100644
--- a/qga/vss-win32/requester.cpp
+++ b/qga/vss-win32/requester.cpp
@@ -23,6 +23,8 @@
/* Call QueryStatus every 10 ms while waiting for frozen event */
#define VSS_TIMEOUT_EVENT_MSEC 10
+#define DEFAULT_VSS_BACKUP_TYPE VSS_BT_FULL
+
#define err_set(e, err, fmt, ...) \
((e)->error_setg_win32_wrapper((e)->errp, __FILE__, __LINE__, __func__, \
err, fmt, ## __VA_ARGS__))
@@ -234,6 +236,42 @@
}
}
+DWORD get_reg_dword_value(HKEY baseKey, LPCSTR subKey, LPCSTR valueName,
+ DWORD defaultData)
+{
+ DWORD regGetValueError;
+ DWORD dwordData;
+ DWORD dataSize = sizeof(DWORD);
+
+ regGetValueError = RegGetValue(baseKey, subKey, valueName, RRF_RT_DWORD,
+ NULL, &dwordData, &dataSize);
+ if (regGetValueError != ERROR_SUCCESS) {
+ return defaultData;
+ }
+ return dwordData;
+}
+
+bool is_valid_vss_backup_type(VSS_BACKUP_TYPE vssBT)
+{
+ return (vssBT > VSS_BT_UNDEFINED && vssBT < VSS_BT_OTHER);
+}
+
+VSS_BACKUP_TYPE get_vss_backup_type(
+ VSS_BACKUP_TYPE defaultVssBT = DEFAULT_VSS_BACKUP_TYPE)
+{
+ VSS_BACKUP_TYPE vssBackupType;
+
+ vssBackupType = static_cast<VSS_BACKUP_TYPE>(
+ get_reg_dword_value(HKEY_LOCAL_MACHINE,
+ QGA_PROVIDER_REGISTRY_ADDRESS,
+ "VssOption",
+ defaultVssBT));
+ if (!is_valid_vss_backup_type(vssBackupType)) {
+ return defaultVssBT;
+ }
+ return vssBackupType;
+}
+
void requester_freeze(int *num_vols, void *mountpoints, ErrorSet *errset)
{
COMPointer<IVssAsync> pAsync;
@@ -247,6 +285,7 @@
DWORD wait_status;
int num_fixed_drives = 0, i;
int num_mount_points = 0;
+ VSS_BACKUP_TYPE vss_bt = get_vss_backup_type();
if (vss_ctx.pVssbc) { /* already frozen */
*num_vols = 0;
@@ -294,7 +333,7 @@
goto out;
}
- hr = vss_ctx.pVssbc->SetBackupState(true, true, VSS_BT_FULL, false);
+ hr = vss_ctx.pVssbc->SetBackupState(true, true, vss_bt, false);
if (FAILED(hr)) {
err_set(errset, hr, "failed to set backup state");
goto out;
diff --git a/qga/vss-win32/vss-handles.h b/qga/vss-win32/vss-handles.h
index 0f8a741..1a7d842 100644
--- a/qga/vss-win32/vss-handles.h
+++ b/qga/vss-win32/vss-handles.h
@@ -6,6 +6,9 @@
#define QGA_PROVIDER_NAME "QEMU Guest Agent VSS Provider"
#define QGA_PROVIDER_LNAME L(QGA_PROVIDER_NAME)
#define QGA_PROVIDER_VERSION L(QEMU_VERSION)
+#define QGA_PROVIDER_REGISTRY_ADDRESS "SYSTEM\\CurrentControlSet"\
+ "\\Services"\
+ "\\" QGA_PROVIDER_NAME
#define EVENT_NAME_FROZEN "Global\\QGAVSSEvent-frozen"
#define EVENT_NAME_THAW "Global\\QGAVSSEvent-thaw"
diff --git a/roms/Makefile b/roms/Makefile
index 955f922..6859685 100644
--- a/roms/Makefile
+++ b/roms/Makefile
@@ -126,25 +126,6 @@
CROSS_COMPILE=$(x86_64_cross_prefix) \
$(patsubst %,bin-x86_64-efi/%.efidrv,$(pxerom_targets))
-# Build scripts can pass compiler/linker flags to the EDK2
-# build tools via the EDK2_BASETOOLS_OPTFLAGS (CFLAGS) and
-# EDK2_BASETOOLS_LDFLAGS (LDFLAGS) environment variables.
-#
-# Example:
-#
-# make -C roms \
-# EDK2_BASETOOLS_OPTFLAGS='...' \
-# EDK2_BASETOOLS_LDFLAGS='...' \
-# efirom
-#
-edk2-basetools:
- cd edk2/BaseTools && git submodule update --init --force \
- Source/C/BrotliCompress/brotli
- $(MAKE) -C edk2/BaseTools \
- PYTHON_COMMAND=$${EDK2_PYTHON_COMMAND:-python3} \
- EXTRA_OPTFLAGS='$(EDK2_BASETOOLS_OPTFLAGS)' \
- EXTRA_LDFLAGS='$(EDK2_BASETOOLS_LDFLAGS)'
-
slof:
$(MAKE) -C SLOF CROSS=$(powerpc64_cross_prefix) qemu
cp SLOF/boot_rom.bin ../pc-bios/slof.bin
@@ -165,8 +146,12 @@
$(MAKE) -C skiboot CROSS=$(powerpc64_cross_prefix)
cp skiboot/skiboot.lid ../pc-bios/skiboot.lid
-efi: edk2-basetools
- $(MAKE) -f Makefile.edk2
+efi:
+ python3 edk2-build.py --config edk2-build.config \
+ --version-override "edk2-stable202302-for-qemu" \
+ --release-date "03/01/2023"
+ rm -f ../pc-bios/edk2-*.fd.bz2
+ bzip2 --verbose ../pc-bios/edk2-*.fd
opensbi32-generic:
$(MAKE) -C opensbi \
@@ -200,7 +185,7 @@
rm -rf u-boot/build-e500
$(MAKE) -C u-boot-sam460ex distclean
$(MAKE) -C skiboot clean
- $(MAKE) -f Makefile.edk2 clean
+ rm -rf Build
$(MAKE) -C opensbi clean
$(MAKE) -C qboot clean
$(MAKE) -C vbootrom clean
diff --git a/roms/Makefile.edk2 b/roms/Makefile.edk2
deleted file mode 100644
index 485f224..0000000
--- a/roms/Makefile.edk2
+++ /dev/null
@@ -1,178 +0,0 @@
-# Makefile for building firmware binaries and variable store templates for a
-# number of virtual platforms in edk2.
-#
-# Copyright (C) 2019 Red Hat, Inc.
-#
-# This program and the accompanying materials are licensed and made available
-# under the terms and conditions of the BSD License that accompanies this
-# distribution. The full text of the license may be found at
-# <http://opensource.org/licenses/bsd-license.php>.
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, WITHOUT
-# WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-
-SHELL = /bin/bash
-
-target = RELEASE
-toolchain = $(shell source ./edk2-funcs.sh && qemu_edk2_get_toolchain $(1))
-
-licenses := \
- edk2/License.txt \
- edk2/License-History.txt \
- edk2/OvmfPkg/License.txt \
- edk2/ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3/COPYING.txt \
- edk2/CryptoPkg/Library/OpensslLib/openssl/LICENSE
-
-# The "edk2-arm-vars.fd" varstore template is suitable for aarch64 as well.
-# Similarly, the "edk2-i386-vars.fd" varstore template is suitable for x86_64
-# as well, independently of "secure" too.
-flashdevs := \
- aarch64-code \
- arm-code \
- i386-code \
- i386-secure-code \
- x86_64-code \
- x86_64-secure-code \
- x86_64-microvm \
- \
- arm-vars \
- i386-vars
-
-all: $(foreach flashdev,$(flashdevs),../pc-bios/edk2-$(flashdev).fd.bz2) \
- ../pc-bios/edk2-licenses.txt
-
-../pc-bios/edk2-%.fd.bz2: ../pc-bios/edk2-%.fd
- bzip2 -9 -c $< > $@
-
-# When the build completes, we need not keep the uncompressed flash device
-# files.
-.INTERMEDIATE: $(foreach flashdev,$(flashdevs),../pc-bios/edk2-$(flashdev).fd)
-
-# Fetch edk2 submodule's submodules. If it is not in a git tree, assume
-# we're building from a tarball and that they've already been fetched by
-# make-release/tarball scripts.
-submodules:
- if test -e edk2/.git; then \
- cd edk2 && git submodule update --init --force -- \
- ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3 \
- BaseTools/Source/C/BrotliCompress/brotli \
- CryptoPkg/Library/OpensslLib/openssl \
- MdeModulePkg/Library/BrotliCustomDecompressLib/brotli \
- ; \
- fi
-
-# See notes on the ".NOTPARALLEL" target and the "+" indicator in
-# "tests/uefi-test-tools/Makefile".
-.NOTPARALLEL:
-
-../pc-bios/edk2-aarch64-code.fd: submodules
- +./edk2-build.sh \
- aarch64 \
- --arch=AARCH64 \
- --platform=ArmVirtPkg/ArmVirtQemu.dsc \
- -D NETWORK_IP6_ENABLE \
- -D NETWORK_HTTP_BOOT_ENABLE \
- -D NETWORK_TLS_ENABLE \
- -D TPM2_ENABLE \
- -D TPM2_CONFIG_ENABLE
- cp edk2/Build/ArmVirtQemu-AARCH64/$(target)_$(call toolchain,aarch64)/FV/QEMU_EFI.fd \
- $@
- truncate --size=64M $@
-
-../pc-bios/edk2-arm-code.fd: submodules
- +./edk2-build.sh \
- arm \
- --arch=ARM \
- --platform=ArmVirtPkg/ArmVirtQemu.dsc \
- -D NETWORK_IP6_ENABLE \
- -D NETWORK_HTTP_BOOT_ENABLE \
- -D NETWORK_TLS_ENABLE \
- -D TPM2_ENABLE \
- -D TPM2_CONFIG_ENABLE
- cp edk2/Build/ArmVirtQemu-ARM/$(target)_$(call toolchain,arm)/FV/QEMU_EFI.fd \
- $@
- truncate --size=64M $@
-
-../pc-bios/edk2-i386-code.fd: submodules
- +./edk2-build.sh \
- i386 \
- --arch=IA32 \
- --platform=OvmfPkg/OvmfPkgIa32.dsc \
- -D NETWORK_IP6_ENABLE \
- -D NETWORK_HTTP_BOOT_ENABLE \
- -D NETWORK_TLS_ENABLE \
- -D TPM_ENABLE \
- -D TPM_CONFIG_ENABLE
- cp edk2/Build/OvmfIa32/$(target)_$(call toolchain,i386)/FV/OVMF_CODE.fd $@
-
-../pc-bios/edk2-i386-secure-code.fd: submodules
- +./edk2-build.sh \
- i386 \
- --arch=IA32 \
- --platform=OvmfPkg/OvmfPkgIa32.dsc \
- -D NETWORK_IP6_ENABLE \
- -D NETWORK_HTTP_BOOT_ENABLE \
- -D NETWORK_TLS_ENABLE \
- -D TPM_ENABLE \
- -D TPM_CONFIG_ENABLE \
- -D SECURE_BOOT_ENABLE \
- -D SMM_REQUIRE
- cp edk2/Build/OvmfIa32/$(target)_$(call toolchain,i386)/FV/OVMF_CODE.fd $@
-
-../pc-bios/edk2-x86_64-code.fd: submodules
- +./edk2-build.sh \
- x86_64 \
- --arch=X64 \
- --platform=OvmfPkg/OvmfPkgX64.dsc \
- -D NETWORK_IP6_ENABLE \
- -D NETWORK_HTTP_BOOT_ENABLE \
- -D NETWORK_TLS_ENABLE \
- -D TPM_ENABLE \
- -D TPM_CONFIG_ENABLE
- cp edk2/Build/OvmfX64/$(target)_$(call toolchain,x86_64)/FV/OVMF_CODE.fd $@
-
-../pc-bios/edk2-x86_64-secure-code.fd: submodules
- +./edk2-build.sh \
- x86_64 \
- --arch=IA32 \
- --arch=X64 \
- --platform=OvmfPkg/OvmfPkgIa32X64.dsc \
- -D NETWORK_IP6_ENABLE \
- -D NETWORK_HTTP_BOOT_ENABLE \
- -D NETWORK_TLS_ENABLE \
- -D TPM_ENABLE \
- -D TPM_CONFIG_ENABLE \
- -D SECURE_BOOT_ENABLE \
- -D SMM_REQUIRE
- cp edk2/Build/Ovmf3264/$(target)_$(call toolchain,x86_64)/FV/OVMF_CODE.fd $@
-
-../pc-bios/edk2-x86_64-microvm.fd: submodules
- +./edk2-build.sh \
- x86_64 \
- --arch=X64 \
- --platform=OvmfPkg/Microvm/MicrovmX64.dsc \
- -D NETWORK_IP6_ENABLE \
- -D NETWORK_HTTP_BOOT_ENABLE \
- -D NETWORK_TLS_ENABLE
- cp edk2/Build/MicrovmX64/$(target)_$(call toolchain,x86_64)/FV/MICROVM.fd $@
-
-../pc-bios/edk2-arm-vars.fd: ../pc-bios/edk2-arm-code.fd
- cp edk2/Build/ArmVirtQemu-ARM/$(target)_$(call toolchain,arm)/FV/QEMU_VARS.fd \
- $@
- truncate --size=64M $@
-
-../pc-bios/edk2-i386-vars.fd: ../pc-bios/edk2-i386-code.fd
- cp edk2/Build/OvmfIa32/$(target)_$(call toolchain,i386)/FV/OVMF_VARS.fd $@
-
-# The license file accumulates several individual licenses from under edk2,
-# prefixing each individual license with a header (generated by "tail") that
-# states its pathname.
-../pc-bios/edk2-licenses.txt: submodules
- tail -n $(shell cat $(licenses) | wc -l) $(licenses) > $@
- dos2unix $@
-
-clean:
- rm -rf edk2/Build
- cd edk2/Conf && \
- rm -rf .cache BuildEnv.sh build_rule.txt target.txt \
- tools_def.txt
diff --git a/roms/edk2 b/roms/edk2
index b24306f..f80f052 160000
--- a/roms/edk2
+++ b/roms/edk2
@@ -1 +1 @@
-Subproject commit b24306f15daa2ff8510b06702114724b33895d3c
+Subproject commit f80f052277c88a67c55e107b550f504eeea947d3
diff --git a/roms/edk2-build.config b/roms/edk2-build.config
new file mode 100644
index 0000000..66ef9ff
--- /dev/null
+++ b/roms/edk2-build.config
@@ -0,0 +1,124 @@
+[global]
+core = edk2
+
+####################################################################################
+# options
+
+[opts.common]
+NETWORK_HTTP_BOOT_ENABLE = TRUE
+NETWORK_IP6_ENABLE = TRUE
+NETWORK_TLS_ENABLE = TRUE
+NETWORK_ISCSI_ENABLE = TRUE
+NETWORK_ALLOW_HTTP_CONNECTIONS = TRUE
+TPM2_ENABLE = TRUE
+TPM2_CONFIG_ENABLE = TRUE
+TPM1_ENABLE = TRUE
+CAVIUM_ERRATUM_27456 = TRUE
+
+[opts.ovmf.sb.smm]
+SECURE_BOOT_ENABLE = TRUE
+SMM_REQUIRE = TRUE
+
+[opts.armvirt.silent]
+DEBUG_PRINT_ERROR_LEVEL = 0x80000000
+
+[pcds.nx.broken.grub]
+# grub.efi uses EfiLoaderData for code
+PcdDxeNxMemoryProtectionPolicy = 0xC000000000007FD1
+
+####################################################################################
+# i386
+
+[build.ovmf.i386]
+desc = ovmf build (32-bit)
+conf = OvmfPkg/OvmfPkgIa32.dsc
+arch = IA32
+opts = common
+plat = OvmfIa32
+dest = ../pc-bios
+cpy1 = FV/OVMF_CODE.fd edk2-i386-code.fd
+cpy2 = FV/OVMF_VARS.fd edk2-i386-vars.fd
+
+[build.ovmf.i386.secure]
+desc = ovmf build (32-bit, secure boot)
+conf = OvmfPkg/OvmfPkgIa32.dsc
+arch = IA32
+opts = common
+ ovmf.sb.smm
+plat = OvmfIa32
+dest = ../pc-bios
+cpy1 = FV/OVMF_CODE.fd edk2-i386-secure-code.fd
+
+####################################################################################
+# x86_64
+
+[build.ovmf.x86_64]
+desc = ovmf build (64-bit)
+conf = OvmfPkg/OvmfPkgX64.dsc
+arch = X64
+opts = common
+plat = OvmfX64
+dest = ../pc-bios
+cpy1 = FV/OVMF_CODE.fd edk2-x86_64-code.fd
+
+[build.ovmf.x86_64.secure]
+desc = ovmf build (64-bit, secure boot)
+conf = OvmfPkg/OvmfPkgIa32X64.dsc
+arch = IA32 X64
+opts = common
+ ovmf.sb.smm
+plat = Ovmf3264
+dest = ../pc-bios
+cpy1 = FV/OVMF_CODE.fd edk2-x86_64-secure-code.fd
+
+[build.ovmf.microvm]
+desc = ovmf build for microvm
+conf = OvmfPkg/Microvm/MicrovmX64.dsc
+arch = X64
+opts = common
+plat = MicrovmX64
+dest = ../pc-bios
+cpy1 = FV/MICROVM.fd edk2-x86_64-microvm.fd
+
+####################################################################################
+# arm
+
+[build.armvirt.arm]
+desc = ArmVirt build, 32-bit (arm v7)
+conf = ArmVirtPkg/ArmVirtQemu.dsc
+arch = ARM
+opts = common
+ armvirt.silent
+pcds = nx.broken.grub
+plat = ArmVirtQemu-ARM
+dest = ../pc-bios
+cpy1 = FV/QEMU_EFI.fd edk2-arm-code.fd
+cpy2 = FV/QEMU_VARS.fd edk2-arm-vars.fd
+pad1 = edk2-arm-code.fd 64m
+pad2 = edk2-arm-vars.fd 64m
+
+####################################################################################
+# aarch64
+
+[build.armvirt.aa64]
+desc = ArmVirt build, 64-bit (arm v8)
+conf = ArmVirtPkg/ArmVirtQemu.dsc
+arch = AARCH64
+opts = common
+ armvirt.silent
+pcds = nx.broken.grub
+plat = ArmVirtQemu-AARCH64
+dest = ../pc-bios
+cpy1 = FV/QEMU_EFI.fd edk2-aarch64-code.fd
+pad1 = edk2-aarch64-code.fd 64m
+
+####################################################################################
+# riscv64
+
+[build.riscv.qemu]
+conf = OvmfPkg/RiscVVirt/RiscVVirtQemu.dsc
+arch = RISCV64
+plat = RiscVVirtQemu
+dest = ../pc-bios
+cpy1 = FV/RISCV_VIRT.fd edk2-riscv.fd
+pad1 = edk2-riscv.fd 32m
diff --git a/roms/edk2-build.py b/roms/edk2-build.py
new file mode 100755
index 0000000..870893f
--- /dev/null
+++ b/roms/edk2-build.py
@@ -0,0 +1,380 @@
+#!/usr/bin/python3
+"""
+build helper script for edk2, see
+https://gitlab.com/kraxel/edk2-build-config
+
+"""
+import os
+import sys
+import shutil
+import argparse
+import subprocess
+import configparser
+
+rebase_prefix = ""
+version_override = None
+release_date = None
+
+# pylint: disable=unused-variable
+def check_rebase():
+ """ detect 'git rebase -x edk2-build.py master' testbuilds """
+ global rebase_prefix
+ global version_override
+ gitdir = '.git'
+
+ if os.path.isfile(gitdir):
+ with open(gitdir, 'r', encoding = 'utf-8') as f:
+ (unused, gitdir) = f.read().split()
+
+ if not os.path.exists(f'{gitdir}/rebase-merge/msgnum'):
+ return
+ with open(f'{gitdir}/rebase-merge/msgnum', 'r', encoding = 'utf-8') as f:
+ msgnum = int(f.read())
+ with open(f'{gitdir}/rebase-merge/end', 'r', encoding = 'utf-8') as f:
+ end = int(f.read())
+ with open(f'{gitdir}/rebase-merge/head-name', 'r', encoding = 'utf-8') as f:
+ head = f.read().strip().split('/')
+
+ rebase_prefix = f'[ {int(msgnum/2)} / {int(end/2)} - {head[-1]} ] '
+ if msgnum != end and not version_override:
+ # fixed version speeds up builds
+ version_override = "test-build-patch-series"
+
+def get_coredir(cfg):
+ if cfg.has_option('global', 'core'):
+ return os.path.abspath(cfg['global']['core'])
+ return os.getcwd()
+
+def get_version(cfg):
+ coredir = get_coredir(cfg)
+ if version_override:
+ version = version_override
+ print('')
+ print(f'### version [override]: {version}')
+ return version
+ if os.environ.get('RPM_PACKAGE_NAME'):
+ version = os.environ.get('RPM_PACKAGE_NAME')
+ version += '-' + os.environ.get('RPM_PACKAGE_VERSION')
+ version += '-' + os.environ.get('RPM_PACKAGE_RELEASE')
+ print('')
+ print(f'### version [rpmbuild]: {version}')
+ return version
+ if os.path.exists(coredir + '/.git'):
+ cmdline = [ 'git', 'describe', '--tags', '--abbrev=8',
+ '--match=edk2-stable*' ]
+ result = subprocess.run(cmdline, cwd = coredir,
+ stdout = subprocess.PIPE,
+ check = True)
+ version = result.stdout.decode().strip()
+ print('')
+ print(f'### version [git]: {version}')
+ return version
+ return None
+
+def pcd_string(name, value):
+ return f'{name}=L{value}\\0'
+
+def pcd_version(cfg):
+ version = get_version(cfg)
+ if version is None:
+ return []
+ return [ '--pcd', pcd_string('PcdFirmwareVersionString', version) ]
+
+def pcd_release_date():
+ if release_date is None:
+ return []
+ return [ '--pcd', pcd_string('PcdFirmwareReleaseDateString', release_date) ]
+
+def build_message(line, line2 = None):
+ if os.environ.get('TERM') in [ 'xterm', 'xterm-256color' ]:
+ # setxterm title
+ start = '\x1b]2;'
+ end = '\x07'
+ print(f'{start}{rebase_prefix}{line}{end}', end = '')
+
+ print('')
+ print('###')
+ print(f'### {rebase_prefix}{line}')
+ if line2:
+ print(f'### {line2}')
+ print('###', flush = True)
+
+def build_run(cmdline, name, section, silent = False):
+ print(cmdline, flush = True)
+ if silent:
+ print('### building in silent mode ...', flush = True)
+ result = subprocess.run(cmdline, check = False,
+ stdout = subprocess.PIPE,
+ stderr = subprocess.STDOUT)
+
+ logfile = f'{section}.log'
+ print(f'### writing log to {logfile} ...')
+ with open(logfile, 'wb') as f:
+ f.write(result.stdout)
+
+ if result.returncode:
+ print('### BUILD FAILURE')
+ print('### output')
+ print(result.stdout.decode())
+ print(f'### exit code: {result.returncode}')
+ else:
+ print('### OK')
+ else:
+ result = subprocess.run(cmdline, check = False)
+ if result.returncode:
+ print(f'ERROR: {cmdline[0]} exited with {result.returncode}'
+ f' while building {name}')
+ sys.exit(result.returncode)
+
+def build_copy(plat, tgt, dstdir, copy):
+ srcdir = f'Build/{plat}/{tgt}_GCC5'
+ names = copy.split()
+ srcfile = names[0]
+ if len(names) > 1:
+ dstfile = names[1]
+ else:
+ dstfile = os.path.basename(srcfile)
+ print(f'# copy: {srcdir} / {srcfile} => {dstdir} / {dstfile}')
+
+ src = srcdir + '/' + srcfile
+ dst = dstdir + '/' + dstfile
+ os.makedirs(os.path.dirname(dst), exist_ok = True)
+ shutil.copy(src, dst)
+
+def pad_file(dstdir, pad):
+ args = pad.split()
+ if len(args) < 2:
+ raise RuntimeError(f'missing arg for pad ({args})')
+ name = args[0]
+ size = args[1]
+ cmdline = [
+ 'truncate',
+ '--size', size,
+ dstdir + '/' + name,
+ ]
+ print(f'# padding: {dstdir} / {name} => {size}')
+ subprocess.run(cmdline, check = True)
+
+# pylint: disable=too-many-branches
+def build_one(cfg, build, jobs = None, silent = False):
+ cmdline = [ 'build' ]
+ cmdline += [ '-t', 'GCC5' ]
+ cmdline += [ '-p', cfg[build]['conf'] ]
+
+ if (cfg[build]['conf'].startswith('OvmfPkg/') or
+ cfg[build]['conf'].startswith('ArmVirtPkg/')):
+ cmdline += pcd_version(cfg)
+ cmdline += pcd_release_date()
+
+ if jobs:
+ cmdline += [ '-n', jobs ]
+ for arch in cfg[build]['arch'].split():
+ cmdline += [ '-a', arch ]
+ if 'opts' in cfg[build]:
+ for name in cfg[build]['opts'].split():
+ section = 'opts.' + name
+ for opt in cfg[section]:
+ cmdline += [ '-D', opt + '=' + cfg[section][opt] ]
+ if 'pcds' in cfg[build]:
+ for name in cfg[build]['pcds'].split():
+ section = 'pcds.' + name
+ for pcd in cfg[section]:
+ cmdline += [ '--pcd', pcd + '=' + cfg[section][pcd] ]
+ if 'tgts' in cfg[build]:
+ tgts = cfg[build]['tgts'].split()
+ else:
+ tgts = [ 'DEBUG' ]
+ for tgt in tgts:
+ desc = None
+ if 'desc' in cfg[build]:
+ desc = cfg[build]['desc']
+ build_message(f'building: {cfg[build]["conf"]} ({cfg[build]["arch"]}, {tgt})',
+ f'description: {desc}')
+ build_run(cmdline + [ '-b', tgt ],
+ cfg[build]['conf'],
+ build + '.' + tgt,
+ silent)
+
+ if 'plat' in cfg[build]:
+ # copy files
+ for cpy in cfg[build]:
+ if not cpy.startswith('cpy'):
+ continue
+ build_copy(cfg[build]['plat'],
+ tgt,
+ cfg[build]['dest'],
+ cfg[build][cpy])
+ # pad builds
+ for pad in cfg[build]:
+ if not pad.startswith('pad'):
+ continue
+ pad_file(cfg[build]['dest'],
+ cfg[build][pad])
+
+def build_basetools(silent = False):
+ build_message('building: BaseTools')
+ basedir = os.environ['EDK_TOOLS_PATH']
+ cmdline = [ 'make', '-C', basedir ]
+ build_run(cmdline, 'BaseTools', 'build.basetools', silent)
+
+def binary_exists(name):
+ for pdir in os.environ['PATH'].split(':'):
+ if os.path.exists(pdir + '/' + name):
+ return True
+ return False
+
+def prepare_env(cfg):
+ """ mimic Conf/BuildEnv.sh """
+ workspace = os.getcwd()
+ packages = [ workspace, ]
+ path = os.environ['PATH'].split(':')
+ dirs = [
+ 'BaseTools/Bin/Linux-x86_64',
+ 'BaseTools/BinWrappers/PosixLike'
+ ]
+
+ if cfg.has_option('global', 'pkgs'):
+ for pkgdir in cfg['global']['pkgs'].split():
+ packages.append(os.path.abspath(pkgdir))
+ coredir = get_coredir(cfg)
+ if coredir != workspace:
+ packages.append(coredir)
+
+ # add basetools to path
+ for pdir in dirs:
+ p = coredir + '/' + pdir
+ if not os.path.exists(p):
+ continue
+ if p in path:
+ continue
+ path.insert(0, p)
+
+ # run edksetup if needed
+ toolsdef = coredir + '/Conf/tools_def.txt'
+ if not os.path.exists(toolsdef):
+ os.makedirs(os.path.dirname(toolsdef), exist_ok = True)
+ build_message('running BaseTools/BuildEnv')
+ cmdline = [ 'bash', 'BaseTools/BuildEnv' ]
+ subprocess.run(cmdline, cwd = coredir, check = True)
+
+ # set variables
+ os.environ['PATH'] = ':'.join(path)
+ os.environ['PACKAGES_PATH'] = ':'.join(packages)
+ os.environ['WORKSPACE'] = workspace
+ os.environ['EDK_TOOLS_PATH'] = coredir + '/BaseTools'
+ os.environ['CONF_PATH'] = coredir + '/Conf'
+ os.environ['PYTHON_COMMAND'] = '/usr/bin/python3'
+ os.environ['PYTHONHASHSEED'] = '1'
+
+ # for cross builds
+ if binary_exists('arm-linux-gnu-gcc'):
+ os.environ['GCC5_ARM_PREFIX'] = 'arm-linux-gnu-'
+ if binary_exists('loongarch64-linux-gnu-gcc'):
+ os.environ['GCC5_LOONGARCH64_PREFIX'] = 'loongarch64-linux-gnu-'
+
+ hostarch = os.uname().machine
+ if binary_exists('aarch64-linux-gnu-gcc') and hostarch != 'aarch64':
+ os.environ['GCC5_AARCH64_PREFIX'] = 'aarch64-linux-gnu-'
+ if binary_exists('riscv64-linux-gnu-gcc') and hostarch != 'riscv64':
+ os.environ['GCC5_RISCV64_PREFIX'] = 'riscv64-linux-gnu-'
+ if binary_exists('x86_64-linux-gnu-gcc') and hostarch != 'x86_64':
+ os.environ['GCC5_IA32_PREFIX'] = 'x86_64-linux-gnu-'
+ os.environ['GCC5_X64_PREFIX'] = 'x86_64-linux-gnu-'
+ os.environ['GCC5_BIN'] = 'x86_64-linux-gnu-'
+
+def build_list(cfg):
+ for build in cfg.sections():
+ if not build.startswith('build.'):
+ continue
+ name = build.lstrip('build.')
+ desc = 'no description'
+ if 'desc' in cfg[build]:
+ desc = cfg[build]['desc']
+ print(f'# {name:20s} - {desc}')
+
+def main():
+ parser = argparse.ArgumentParser(prog = 'edk2-build',
+ description = 'edk2 build helper script')
+ parser.add_argument('-c', '--config', dest = 'configfile',
+ type = str, default = '.edk2.builds', metavar = 'FILE',
+ help = 'read configuration from FILE (default: .edk2.builds)')
+ parser.add_argument('-C', '--directory', dest = 'directory', type = str,
+ help = 'change to DIR before building', metavar = 'DIR')
+ parser.add_argument('-j', '--jobs', dest = 'jobs', type = str,
+ help = 'allow up to JOBS parallel build jobs',
+ metavar = 'JOBS')
+ parser.add_argument('-m', '--match', dest = 'match', type = str,
+ help = 'only run builds matching INCLUDE (substring)',
+ metavar = 'INCLUDE')
+ parser.add_argument('-x', '--exclude', dest = 'exclude', type = str,
+ help = 'skip builds matching EXCLUDE (substring)',
+ metavar = 'EXCLUDE')
+ parser.add_argument('-l', '--list', dest = 'list',
+ action = 'store_true', default = False,
+ help = 'list build configs available')
+ parser.add_argument('--silent', dest = 'silent',
+ action = 'store_true', default = False,
+ help = 'write build output to logfiles, '
+ 'write to console only on errors')
+ parser.add_argument('--core', dest = 'core', type = str, metavar = 'DIR',
+ help = 'location of the core edk2 repository '
+ '(i.e. where BuildTools are located)')
+ parser.add_argument('--pkg', '--package', dest = 'pkgs',
+ type = str, action = 'append', metavar = 'DIR',
+ help = 'location(s) of additional packages '
+ '(can be specified multiple times)')
+ parser.add_argument('--version-override', dest = 'version_override',
+ type = str, metavar = 'VERSION',
+ help = 'set firmware build version')
+ parser.add_argument('--release-date', dest = 'release_date',
+ type = str, metavar = 'DATE',
+ help = 'set firmware build release date (in MM/DD/YYYY format)')
+ options = parser.parse_args()
+
+ if options.directory:
+ os.chdir(options.directory)
+
+ if not os.path.exists(options.configfile):
+ print('config file "{options.configfile}" not found')
+ return 1
+
+ cfg = configparser.ConfigParser()
+ cfg.optionxform = str
+ cfg.read(options.configfile)
+
+ if options.list:
+ build_list(cfg)
+ return
+
+ if not cfg.has_section('global'):
+ cfg.add_section('global')
+ if options.core:
+ cfg.set('global', 'core', options.core)
+ if options.pkgs:
+ cfg.set('global', 'pkgs', ' '.join(options.pkgs))
+
+ global version_override
+ global release_date
+ check_rebase()
+ if options.version_override:
+ version_override = options.version_override
+ if options.release_date:
+ release_date = options.release_date
+
+ prepare_env(cfg)
+ build_basetools(options.silent)
+ for build in cfg.sections():
+ if not build.startswith('build.'):
+ continue
+ if options.match and options.match not in build:
+ print(f'# skipping "{build}" (not matching "{options.match}")')
+ continue
+ if options.exclude and options.exclude in build:
+ print(f'# skipping "{build}" (matching "{options.exclude}")')
+ continue
+ build_one(cfg, build, options.jobs, options.silent)
+
+ return 0
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/roms/edk2-build.sh b/roms/edk2-build.sh
deleted file mode 100755
index ea79dc2..0000000
--- a/roms/edk2-build.sh
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/bin/bash
-
-# Wrapper shell script for building a virtual platform firmware in edk2.
-#
-# Copyright (C) 2019 Red Hat, Inc.
-#
-# This program and the accompanying materials are licensed and made available
-# under the terms and conditions of the BSD License that accompanies this
-# distribution. The full text of the license may be found at
-# <http://opensource.org/licenses/bsd-license.php>.
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, WITHOUT
-# WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-
-set -e -u -C
-
-# Save the command line arguments. We need to reset $# to 0 before sourcing
-# "edksetup.sh", as it will inherit $@.
-emulation_target=$1
-shift
-num_args=0
-args=()
-for arg in "$@"; do
- args[num_args++]="$arg"
-done
-shift $num_args
-
-cd edk2
-
-export PYTHON_COMMAND=${EDK2_PYTHON_COMMAND:-python3}
-
-# Source "edksetup.sh" carefully.
-set +e +u +C
-source ./edksetup.sh
-ret=$?
-set -e -u -C
-if [ $ret -ne 0 ]; then
- exit $ret
-fi
-
-# Fetch some option arguments, and set the cross-compilation environment (if
-# any), for the edk2 "build" utility.
-source ../edk2-funcs.sh
-edk2_toolchain=$(qemu_edk2_get_toolchain "$emulation_target")
-MAKEFLAGS=$(qemu_edk2_quirk_tianocore_1607 "$MAKEFLAGS")
-edk2_thread_count=$(qemu_edk2_get_thread_count "$MAKEFLAGS")
-qemu_edk2_set_cross_env "$emulation_target"
-
-# Build the platform firmware.
-build \
- --cmd-len=65536 \
- -n "$edk2_thread_count" \
- --buildtarget=RELEASE \
- --tagname="$edk2_toolchain" \
- "${args[@]}"
diff --git a/roms/edk2-funcs.sh b/roms/edk2-funcs.sh
deleted file mode 100644
index cd6e4f2..0000000
--- a/roms/edk2-funcs.sh
+++ /dev/null
@@ -1,273 +0,0 @@
-# Shell script that defines functions for determining some environmental
-# characteristics for the edk2 "build" utility.
-#
-# This script is meant to be sourced, in a bash environment.
-#
-# Copyright (C) 2019 Red Hat, Inc.
-#
-# This program and the accompanying materials are licensed and made available
-# under the terms and conditions of the BSD License that accompanies this
-# distribution. The full text of the license may be found at
-# <http://opensource.org/licenses/bsd-license.php>.
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, WITHOUT
-# WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-
-
-# Verify whether the QEMU system emulation target is supported by the UEFI spec
-# and edk2. Print a message to the standard error, and return with nonzero
-# status, if verification fails.
-#
-# Parameters:
-# $1: QEMU system emulation target
-qemu_edk2_verify_arch()
-{
- local emulation_target="$1"
- local program_name=$(basename -- "$0")
-
- case "$emulation_target" in
- (arm|aarch64|i386|x86_64)
- ;;
- (*)
- printf '%s: unknown/unsupported QEMU system emulation target "%s"\n' \
- "$program_name" "$emulation_target" >&2
- return 1
- ;;
- esac
-}
-
-
-# Translate the QEMU system emulation target to the edk2 architecture
-# identifier. Print the result to the standard output.
-#
-# Parameters:
-# $1: QEMU system emulation target
-qemu_edk2_get_arch()
-{
- local emulation_target="$1"
-
- if ! qemu_edk2_verify_arch "$emulation_target"; then
- return 1
- fi
-
- case "$emulation_target" in
- (arm)
- printf 'ARM\n'
- ;;
- (aarch64)
- printf 'AARCH64\n'
- ;;
- (i386)
- printf 'IA32\n'
- ;;
- (x86_64)
- printf 'X64\n'
- ;;
- esac
-}
-
-
-# Translate the QEMU system emulation target to the gcc cross-compilation
-# architecture identifier. Print the result to the standard output.
-#
-# Parameters:
-# $1: QEMU system emulation target
-qemu_edk2_get_gcc_arch()
-{
- local emulation_target="$1"
-
- if ! qemu_edk2_verify_arch "$emulation_target"; then
- return 1
- fi
-
- case "$emulation_target" in
- (arm|aarch64|x86_64)
- printf '%s\n' "$emulation_target"
- ;;
- (i386)
- printf 'i686\n'
- ;;
- esac
-}
-
-
-# Determine the gcc cross-compiler prefix (if any) for use with the edk2
-# toolchain. Print the result to the standard output.
-#
-# Parameters:
-# $1: QEMU system emulation target
-qemu_edk2_get_cross_prefix()
-{
- local emulation_target="$1"
- local gcc_arch
- local host_arch
-
- if ! gcc_arch=$(qemu_edk2_get_gcc_arch "$emulation_target"); then
- return 1
- fi
-
- host_arch=$(uname -m)
-
- if [ "$gcc_arch" == "$host_arch" ] ||
- ( [ "$gcc_arch" == i686 ] && [ "$host_arch" == x86_64 ] ); then
- # no cross-compiler needed
- :
- elif ( [ -e /etc/debian_version ] && [ "$gcc_arch" == arm ] ); then
- # force soft-float cross-compiler on Debian
- printf 'arm-linux-gnueabi-'
- else
- printf '%s-linux-gnu-\n' "$gcc_arch"
- fi
-}
-
-
-# Determine the edk2 toolchain tag for the QEMU system emulation target. Print
-# the result to the standard output. Print a message to the standard error, and
-# return with nonzero status, if the (conditional) gcc version check fails.
-#
-# Parameters:
-# $1: QEMU system emulation target
-qemu_edk2_get_toolchain()
-{
- local emulation_target="$1"
- local program_name=$(basename -- "$0")
- local cross_prefix
- local gcc_version
-
- if ! qemu_edk2_verify_arch "$emulation_target"; then
- return 1
- fi
-
- case "$emulation_target" in
- (arm|aarch64)
- printf 'GCC5\n'
- ;;
-
- (i386|x86_64)
- if ! cross_prefix=$(qemu_edk2_get_cross_prefix "$emulation_target"); then
- return 1
- fi
-
- gcc_version=$("${cross_prefix}gcc" -v 2>&1 | tail -1 | awk '{print $3}')
- # Run "git-blame" on "OvmfPkg/build.sh" in edk2 for more information on
- # the mapping below.
- case "$gcc_version" in
- ([1-3].*|4.[0-7].*)
- printf '%s: unsupported gcc version "%s"\n' \
- "$program_name" "$gcc_version" >&2
- return 1
- ;;
- (4.8.*)
- printf 'GCC48\n'
- ;;
- (4.9.*|6.[0-2].*)
- printf 'GCC49\n'
- ;;
- (*)
- printf 'GCC5\n'
- ;;
- esac
- ;;
- esac
-}
-
-
-# Determine the name of the environment variable that exposes the
-# cross-compiler prefix to the edk2 "build" utility. Print the result to the
-# standard output.
-#
-# Parameters:
-# $1: QEMU system emulation target
-qemu_edk2_get_cross_prefix_var()
-{
- local emulation_target="$1"
- local edk2_toolchain
- local edk2_arch
-
- if ! edk2_toolchain=$(qemu_edk2_get_toolchain "$emulation_target"); then
- return 1
- fi
-
- case "$emulation_target" in
- (arm|aarch64)
- if ! edk2_arch=$(qemu_edk2_get_arch "$emulation_target"); then
- return 1
- fi
- printf '%s_%s_PREFIX\n' "$edk2_toolchain" "$edk2_arch"
- ;;
- (i386|x86_64)
- printf '%s_BIN\n' "$edk2_toolchain"
- ;;
- esac
-}
-
-
-# Set and export the environment variable(s) necessary for cross-compilation,
-# whenever needed by the edk2 "build" utility.
-#
-# Parameters:
-# $1: QEMU system emulation target
-qemu_edk2_set_cross_env()
-{
- local emulation_target="$1"
- local cross_prefix
- local cross_prefix_var
-
- if ! cross_prefix=$(qemu_edk2_get_cross_prefix "$emulation_target"); then
- return 1
- fi
-
- if [ -z "$cross_prefix" ]; then
- # Nothing to do.
- return 0
- fi
-
- if ! cross_prefix_var=$(qemu_edk2_get_cross_prefix_var \
- "$emulation_target"); then
- return 1
- fi
-
- eval "export $cross_prefix_var=\$cross_prefix"
-}
-
-
-# Determine the "-n" option argument (that is, the number of modules to build
-# in parallel) for the edk2 "build" utility. Print the result to the standard
-# output.
-#
-# Parameters:
-# $1: the value of the MAKEFLAGS variable
-qemu_edk2_get_thread_count()
-{
- local makeflags="$1"
-
- if [[ "$makeflags" == *--jobserver-auth=* ]] ||
- [[ "$makeflags" == *--jobserver-fds=* ]]; then
- # If there is a job server, allow the edk2 "build" utility to parallelize
- # as many module builds as there are logical CPUs in the system. The "make"
- # instances forked by "build" are supposed to limit themselves through the
- # job server. The zero value below causes the edk2 "build" utility to fetch
- # the logical CPU count with Python's multiprocessing.cpu_count() method.
- printf '0\n'
- else
- # Build a single module at a time.
- printf '1\n'
- fi
-}
-
-
-# Work around <https://bugzilla.tianocore.org/show_bug.cgi?id=1607> by
-# filtering jobserver-related flags out of MAKEFLAGS. Print the result to the
-# standard output.
-#
-# Parameters:
-# $1: the value of the MAKEFLAGS variable
-qemu_edk2_quirk_tianocore_1607()
-{
- local makeflags="$1"
-
- printf %s "$makeflags" \
- | LC_ALL=C sed --regexp-extended \
- --expression='s/--jobserver-(auth|fds)=[0-9]+,[0-9]+//' \
- --expression='s/-j([0-9]+)?//'
-}
diff --git a/roms/openbios b/roms/openbios
index 0e0afae..af97fd7 160000
--- a/roms/openbios
+++ b/roms/openbios
@@ -1 +1 @@
-Subproject commit 0e0afae6579c1efe9f0d85505b75ffe989554133
+Subproject commit af97fd7af5e7c18f591a7b987291d3db4ffb28b5
diff --git a/roms/seabios b/roms/seabios
index 3208b09..ea1b7a0 160000
--- a/roms/seabios
+++ b/roms/seabios
@@ -1 +1 @@
-Subproject commit 3208b098f51a9ef96d0dfa71d5ec3a3eaec88f0a
+Subproject commit ea1b7a0733906b8425d948ae94fba63c32b1d425
diff --git a/scripts/ci/org.centos/stream/8/build-environment.yml b/scripts/ci/org.centos/stream/8/build-environment.yml
index 0d094d7..1ead77e 100644
--- a/scripts/ci/org.centos/stream/8/build-environment.yml
+++ b/scripts/ci/org.centos/stream/8/build-environment.yml
@@ -55,6 +55,7 @@
- librados-devel
- librbd-devel
- libseccomp-devel
+ - libslirp-devel
- libssh-devel
- libxkbcommon-devel
- lzo-devel
diff --git a/scripts/ci/org.centos/stream/8/x86_64/test-avocado b/scripts/ci/org.centos/stream/8/x86_64/test-avocado
index f403e4e..d2c0e5f 100755
--- a/scripts/ci/org.centos/stream/8/x86_64/test-avocado
+++ b/scripts/ci/org.centos/stream/8/x86_64/test-avocado
@@ -30,6 +30,7 @@
tests/avocado/cpu_queries.py:QueryCPUModelExpansion.test \
tests/avocado/empty_cpu_model.py:EmptyCPUModel.test \
tests/avocado/hotplug_cpu.py:HotPlugCPU.test \
+ tests/avocado/igb.py:IGB.test \
tests/avocado/info_usernet.py:InfoUsernet.test_hostfwd \
tests/avocado/intel_iommu.py:IntelIOMMU.test_intel_iommu \
tests/avocado/intel_iommu.py:IntelIOMMU.test_intel_iommu_pt \
diff --git a/scripts/ci/setup/gitlab-runner.yml b/scripts/ci/setup/gitlab-runner.yml
index 95d4199..1a1b270 100644
--- a/scripts/ci/setup/gitlab-runner.yml
+++ b/scripts/ci/setup/gitlab-runner.yml
@@ -48,13 +48,29 @@
- debug:
msg: gitlab-runner arch is {{ gitlab_runner_arch }}
- - name: Download the matching gitlab-runner
+ - name: Download the matching gitlab-runner (DEB)
get_url:
dest: "/root/"
url: "https://gitlab-runner-downloads.s3.amazonaws.com/latest/deb/gitlab-runner_{{ gitlab_runner_arch }}.deb"
+ when:
+ - ansible_facts['distribution'] == 'Ubuntu'
- - name: Install gitlab-runner via package manager
+ - name: Download the matching gitlab-runner (RPM)
+ get_url:
+ dest: "/root/"
+ url: "https://gitlab-runner-downloads.s3.amazonaws.com/latest/rpm/gitlab-runner_{{ gitlab_runner_arch }}.rpm"
+ when:
+ - ansible_facts['distribution'] == 'CentOS'
+
+ - name: Install gitlab-runner via package manager (DEB)
apt: deb="/root/gitlab-runner_{{ gitlab_runner_arch }}.deb"
+ when:
+ - ansible_facts['distribution'] == 'Ubuntu'
+
+ - name: Install gitlab-runner via package manager (RPM)
+ yum: name="/root/gitlab-runner_{{ gitlab_runner_arch }}.rpm"
+ when:
+ - ansible_facts['distribution'] == 'CentOS'
- name: Register the gitlab-runner
command: "/usr/bin/gitlab-runner register --non-interactive --url {{ gitlab_runner_server_url }} --registration-token {{ gitlab_runner_registration_token }} --executor shell --tag-list {{ ansible_facts[\"architecture\"] }},{{ ansible_facts[\"distribution\"]|lower }}_{{ ansible_facts[\"distribution_version\"] }} --description '{{ ansible_facts[\"distribution\"] }} {{ ansible_facts[\"distribution_version\"] }} {{ ansible_facts[\"architecture\"] }} ({{ ansible_facts[\"os_family\"] }})'"
diff --git a/scripts/probe-gdb-support.py b/scripts/probe-gdb-support.py
new file mode 100755
index 0000000..5755255
--- /dev/null
+++ b/scripts/probe-gdb-support.py
@@ -0,0 +1,88 @@
+#!/usr/bin/env python3
+# coding: utf-8
+#
+# Probe gdb for supported architectures.
+#
+# This is required to support testing of the gdbstub as its hard to
+# handle errors gracefully during the test. Instead this script when
+# passed a GDB binary will probe its architecture support and return a
+# string of supported arches, stripped of guff.
+#
+# Copyright 2023 Linaro Ltd
+#
+# Author: Alex Bennée <alex.bennee@linaro.org>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or later.
+# See the COPYING file in the top-level directory.
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import argparse
+import re
+from subprocess import check_output, STDOUT
+
+# mappings from gdb arch to QEMU target
+mappings = {
+ "alpha" : "alpha",
+ "aarch64" : ["aarch64", "aarch64_be"],
+ "armv7": "arm",
+ "armv8-a" : ["aarch64", "aarch64_be"],
+ "avr" : "avr",
+ "cris" : "cris",
+ # no hexagon in upstream gdb
+ "hppa1.0" : "hppa",
+ "i386" : "i386",
+ "i386:x86-64" : "x86_64",
+ "Loongarch64" : "loongarch64",
+ "m68k" : "m68k",
+ "MicroBlaze" : "microblaze",
+ "mips:isa64" : ["mips64", "mips64el"],
+ "nios2" : "nios2",
+ "or1k" : "or1k",
+ "powerpc:common" : "ppc",
+ "powerpc:common64" : ["ppc64", "ppc64le"],
+ "riscv:rv32" : "riscv32",
+ "riscv:rv64" : "riscv64",
+ "s390:64-bit" : "s390x",
+ "sh4" : ["sh4", "sh4eb"],
+ "sparc": "sparc",
+ "sparc:v8plus": "sparc32plus",
+ "sparc:v9a" : "sparc64",
+ # no tricore in upstream gdb
+ "xtensa" : ["xtensa", "xtensaeb"]
+}
+
+def do_probe(gdb):
+ gdb_out = check_output([gdb,
+ "-ex", "set architecture",
+ "-ex", "quit"], stderr=STDOUT)
+
+ m = re.search(r"Valid arguments are (.*)",
+ gdb_out.decode("utf-8"))
+
+ valid_arches = set()
+
+ if m.group(1):
+ for arch in m.group(1).split(", "):
+ if arch in mappings:
+ mapping = mappings[arch]
+ if isinstance(mapping, str):
+ valid_arches.add(mapping)
+ else:
+ for entry in mapping:
+ valid_arches.add(entry)
+
+ return valid_arches
+
+def main() -> None:
+ parser = argparse.ArgumentParser(description='Probe GDB Architectures')
+ parser.add_argument('gdb', help='Path to GDB binary.')
+
+ args = parser.parse_args()
+
+ supported = do_probe(args.gdb)
+
+ print(" ".join(supported))
+
+if __name__ == '__main__':
+ main()
diff --git a/semihosting/arm-compat-semi.c b/semihosting/arm-compat-semi.c
index 62d8bae..564fe17 100644
--- a/semihosting/arm-compat-semi.c
+++ b/semihosting/arm-compat-semi.c
@@ -34,6 +34,7 @@
#include "qemu/osdep.h"
#include "qemu/timer.h"
#include "exec/gdbstub.h"
+#include "gdbstub/syscalls.h"
#include "semihosting/semihost.h"
#include "semihosting/console.h"
#include "semihosting/common-semi.h"
diff --git a/semihosting/guestfd.c b/semihosting/guestfd.c
index b05c52f..acb86b5 100644
--- a/semihosting/guestfd.c
+++ b/semihosting/guestfd.c
@@ -9,7 +9,7 @@
*/
#include "qemu/osdep.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/syscalls.h"
#include "semihosting/semihost.h"
#include "semihosting/guestfd.h"
#ifdef CONFIG_USER_ONLY
diff --git a/semihosting/syscalls.c b/semihosting/syscalls.c
index e89992c..68899eb 100644
--- a/semihosting/syscalls.c
+++ b/semihosting/syscalls.c
@@ -7,7 +7,8 @@
*/
#include "qemu/osdep.h"
-#include "exec/gdbstub.h"
+#include "cpu.h"
+#include "gdbstub/syscalls.h"
#include "semihosting/guestfd.h"
#include "semihosting/syscalls.h"
#include "semihosting/console.h"
@@ -138,46 +139,48 @@
gdb_open_complete = complete;
gdb_do_syscall(gdb_open_cb, "open,%s,%x,%x",
- fname, len, (target_ulong)gdb_flags, (target_ulong)mode);
+ (uint64_t)fname, (uint32_t)len,
+ (uint32_t)gdb_flags, (uint32_t)mode);
}
static void gdb_close(CPUState *cs, gdb_syscall_complete_cb complete,
GuestFD *gf)
{
- gdb_do_syscall(complete, "close,%x", (target_ulong)gf->hostfd);
+ gdb_do_syscall(complete, "close,%x", (uint32_t)gf->hostfd);
}
static void gdb_read(CPUState *cs, gdb_syscall_complete_cb complete,
GuestFD *gf, target_ulong buf, target_ulong len)
{
- gdb_do_syscall(complete, "read,%x,%x,%x",
- (target_ulong)gf->hostfd, buf, len);
+ gdb_do_syscall(complete, "read,%x,%lx,%lx",
+ (uint32_t)gf->hostfd, (uint64_t)buf, (uint64_t)len);
}
static void gdb_write(CPUState *cs, gdb_syscall_complete_cb complete,
GuestFD *gf, target_ulong buf, target_ulong len)
{
- gdb_do_syscall(complete, "write,%x,%x,%x",
- (target_ulong)gf->hostfd, buf, len);
+ gdb_do_syscall(complete, "write,%x,%lx,%lx",
+ (uint32_t)gf->hostfd, (uint64_t)buf, (uint64_t)len);
}
static void gdb_lseek(CPUState *cs, gdb_syscall_complete_cb complete,
GuestFD *gf, int64_t off, int gdb_whence)
{
gdb_do_syscall(complete, "lseek,%x,%lx,%x",
- (target_ulong)gf->hostfd, off, (target_ulong)gdb_whence);
+ (uint32_t)gf->hostfd, off, (uint32_t)gdb_whence);
}
static void gdb_isatty(CPUState *cs, gdb_syscall_complete_cb complete,
GuestFD *gf)
{
- gdb_do_syscall(complete, "isatty,%x", (target_ulong)gf->hostfd);
+ gdb_do_syscall(complete, "isatty,%x", (uint32_t)gf->hostfd);
}
static void gdb_fstat(CPUState *cs, gdb_syscall_complete_cb complete,
GuestFD *gf, target_ulong addr)
{
- gdb_do_syscall(complete, "fstat,%x,%x", (target_ulong)gf->hostfd, addr);
+ gdb_do_syscall(complete, "fstat,%x,%lx",
+ (uint32_t)gf->hostfd, (uint64_t)addr);
}
static void gdb_stat(CPUState *cs, gdb_syscall_complete_cb complete,
@@ -190,7 +193,8 @@
return;
}
- gdb_do_syscall(complete, "stat,%s,%x", fname, len, addr);
+ gdb_do_syscall(complete, "stat,%s,%lx",
+ (uint64_t)fname, (uint32_t)len, (uint64_t)addr);
}
static void gdb_remove(CPUState *cs, gdb_syscall_complete_cb complete,
@@ -202,7 +206,7 @@
return;
}
- gdb_do_syscall(complete, "unlink,%s", fname, len);
+ gdb_do_syscall(complete, "unlink,%s", (uint64_t)fname, (uint32_t)len);
}
static void gdb_rename(CPUState *cs, gdb_syscall_complete_cb complete,
@@ -222,7 +226,9 @@
return;
}
- gdb_do_syscall(complete, "rename,%s,%s", oname, olen, nname, nlen);
+ gdb_do_syscall(complete, "rename,%s,%s",
+ (uint64_t)oname, (uint32_t)olen,
+ (uint64_t)nname, (uint32_t)nlen);
}
static void gdb_system(CPUState *cs, gdb_syscall_complete_cb complete,
@@ -234,13 +240,14 @@
return;
}
- gdb_do_syscall(complete, "system,%s", cmd, len);
+ gdb_do_syscall(complete, "system,%s", (uint64_t)cmd, (uint32_t)len);
}
static void gdb_gettimeofday(CPUState *cs, gdb_syscall_complete_cb complete,
target_ulong tv_addr, target_ulong tz_addr)
{
- gdb_do_syscall(complete, "gettimeofday,%x,%x", tv_addr, tz_addr);
+ gdb_do_syscall(complete, "gettimeofday,%lx,%lx",
+ (uint64_t)tv_addr, (uint64_t)tz_addr);
}
/*
diff --git a/softmmu/globals.c b/softmmu/globals.c
index 0a44056..39678aa 100644
--- a/softmmu/globals.c
+++ b/softmmu/globals.c
@@ -65,3 +65,7 @@
uint32_t xen_domid;
enum xen_mode xen_mode = XEN_DISABLED;
bool xen_domid_restrict;
+struct evtchn_backend_ops *xen_evtchn_ops;
+struct gnttab_backend_ops *xen_gnttab_ops;
+struct foreignmem_backend_ops *xen_foreignmem_ops;
+struct xenstore_backend_ops *xen_xenstore_ops;
diff --git a/softmmu/memory.c b/softmmu/memory.c
index 4699ba5..5305aca 100644
--- a/softmmu/memory.c
+++ b/softmmu/memory.c
@@ -1996,17 +1996,17 @@
}
}
-void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *n)
+void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *notifier)
{
IOMMUTLBEvent event;
event.type = IOMMU_NOTIFIER_UNMAP;
event.entry.target_as = &address_space_memory;
- event.entry.iova = n->start;
+ event.entry.iova = notifier->start;
event.entry.perm = IOMMU_NONE;
- event.entry.addr_mask = n->end - n->start;
+ event.entry.addr_mask = notifier->end - notifier->start;
- memory_region_notify_iommu_one(n, &event);
+ memory_region_notify_iommu_one(notifier, &event);
}
void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
index 47143ed..e35061b 100644
--- a/softmmu/physmem.c
+++ b/softmmu/physmem.c
@@ -1126,15 +1126,21 @@
GString *buf = g_string_new("");
RCU_READ_LOCK_GUARD();
- g_string_append_printf(buf, "%24s %8s %18s %18s %18s\n",
- "Block Name", "PSize", "Offset", "Used", "Total");
+ g_string_append_printf(buf, "%24s %8s %18s %18s %18s %18s %3s\n",
+ "Block Name", "PSize", "Offset", "Used", "Total",
+ "HVA", "RO");
+
RAMBLOCK_FOREACH(block) {
psize = size_to_str(block->page_size);
g_string_append_printf(buf, "%24s %8s 0x%016" PRIx64 " 0x%016" PRIx64
- " 0x%016" PRIx64 "\n", block->idstr, psize,
+ " 0x%016" PRIx64 " 0x%016" PRIx64 " %3s\n",
+ block->idstr, psize,
(uint64_t)block->offset,
(uint64_t)block->used_length,
- (uint64_t)block->max_length);
+ (uint64_t)block->max_length,
+ (uint64_t)(uintptr_t)block->host,
+ block->mr->readonly ? "ro" : "rw");
+
g_free(psize);
}
@@ -2927,6 +2933,8 @@
qemu_mutex_lock(&map_client_list_lock);
client->bh = bh;
QLIST_INSERT_HEAD(&map_client_list, client, link);
+ /* Write map_client_list before reading in_use. */
+ smp_mb();
if (!qatomic_read(&bounce.in_use)) {
cpu_notify_map_clients_locked();
}
@@ -3116,6 +3124,7 @@
qemu_vfree(bounce.buffer);
bounce.buffer = NULL;
memory_region_unref(bounce.mr);
+ /* Clear in_use before reading map_client_list. */
qatomic_mb_set(&bounce.in_use, false);
cpu_notify_map_clients();
}
@@ -3166,7 +3175,7 @@
* cache->xlat and the end of the section.
*/
diff = int128_sub(cache->mrs.size,
- int128_make64(cache->xlat - cache->mrs.offset_within_region));
+ int128_make64(cache->xlat - cache->mrs.offset_within_region));
l = int128_get64(int128_min(diff, int128_make64(l)));
mr = cache->mrs.mr;
diff --git a/softmmu/runstate.c b/softmmu/runstate.c
index 9b3611d..d1e0458 100644
--- a/softmmu/runstate.c
+++ b/softmmu/runstate.c
@@ -30,7 +30,7 @@
#include "crypto/cipher.h"
#include "crypto/init.h"
#include "exec/cpu-common.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/syscalls.h"
#include "hw/boards.h"
#include "migration/misc.h"
#include "migration/postcopy-ram.h"
diff --git a/softmmu/vl.c b/softmmu/vl.c
index 3340f63..ea20b23 100644
--- a/softmmu/vl.c
+++ b/softmmu/vl.c
@@ -2465,10 +2465,11 @@
pid_file_realpath = g_malloc0(PATH_MAX);
if (!realpath(pid_file, pid_file_realpath)) {
- error_report("cannot resolve PID file path: %s: %s",
- pid_file, strerror(errno));
- unlink(pid_file);
- exit(1);
+ if (errno != ENOENT) {
+ warn_report("not removing PID file on exit: cannot resolve PID "
+ "file path: %s: %s", pid_file, strerror(errno));
+ }
+ return;
}
qemu_unlink_pidfile_notifier = (struct UnlinkPidfileNotifier) {
diff --git a/stats/stats-hmp-cmds.c b/stats/stats-hmp-cmds.c
index 531e35d..1f91bf8 100644
--- a/stats/stats-hmp-cmds.c
+++ b/stats/stats-hmp-cmds.c
@@ -155,6 +155,8 @@
filter->u.vcpu.vcpus = vcpu_list;
break;
}
+ case STATS_TARGET_CRYPTODEV:
+ break;
default:
break;
}
@@ -226,6 +228,9 @@
int cpu_index = monitor_get_cpu_index(mon);
filter = stats_filter(target, names, cpu_index, provider);
break;
+ case STATS_TARGET_CRYPTODEV:
+ filter = stats_filter(target, names, -1, provider);
+ break;
default:
abort();
}
diff --git a/stats/stats-qmp-cmds.c b/stats/stats-qmp-cmds.c
index bc97374..e214b96 100644
--- a/stats/stats-qmp-cmds.c
+++ b/stats/stats-qmp-cmds.c
@@ -64,6 +64,8 @@
targets = filter->u.vcpu.vcpus;
}
break;
+ case STATS_TARGET_CRYPTODEV:
+ break;
default:
abort();
}
diff --git a/stubs/meson.build b/stubs/meson.build
index 7657467..b2b5956 100644
--- a/stubs/meson.build
+++ b/stubs/meson.build
@@ -61,4 +61,5 @@
else
stub_ss.add(files('qdev.c'))
endif
+stub_ss.add(files('semihost-all.c'))
stub_ss.add(when: 'CONFIG_VFIO_USER_SERVER', if_false: files('vfio-user-obj.c'))
diff --git a/stubs/semihost-all.c b/stubs/semihost-all.c
new file mode 100644
index 0000000..a2a1fc9
--- /dev/null
+++ b/stubs/semihost-all.c
@@ -0,0 +1,17 @@
+/*
+ * Semihosting Stubs for all targets
+ *
+ * Copyright (c) 2023 Linaro Ltd
+ *
+ * Stubs for all targets that don't actually do semihosting.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "semihosting/semihost.h"
+
+SemihostingTarget semihosting_get_target(void)
+{
+ return SEMIHOSTING_TARGET_AUTO;
+}
diff --git a/stubs/semihost.c b/stubs/semihost.c
index d65c9fd..aad7a70 100644
--- a/stubs/semihost.c
+++ b/stubs/semihost.c
@@ -28,11 +28,6 @@
return false;
}
-SemihostingTarget semihosting_get_target(void)
-{
- return SEMIHOSTING_TARGET_AUTO;
-}
-
/*
* All the rest are empty subs. We could g_assert_not_reached() but
* that adds extra weight to the final binary. Waste not want not.
diff --git a/target/alpha/cpu-param.h b/target/alpha/cpu-param.h
index 17cd14e..68c46f7 100644
--- a/target/alpha/cpu-param.h
+++ b/target/alpha/cpu-param.h
@@ -15,6 +15,4 @@
#define TARGET_PHYS_ADDR_SPACE_BITS 44
#define TARGET_VIRT_ADDR_SPACE_BITS (30 + TARGET_PAGE_BITS)
-#define NB_MMU_MODES 3
-
#endif
diff --git a/target/alpha/gdbstub.c b/target/alpha/gdbstub.c
index 7db14f4..0f8fa15 100644
--- a/target/alpha/gdbstub.c
+++ b/target/alpha/gdbstub.c
@@ -19,7 +19,7 @@
*/
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
int alpha_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
{
diff --git a/target/alpha/sys_helper.c b/target/alpha/sys_helper.c
index 25f6cb8..c83c92d 100644
--- a/target/alpha/sys_helper.c
+++ b/target/alpha/sys_helper.c
@@ -20,6 +20,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/exec-all.h"
+#include "exec/tb-flush.h"
#include "exec/helper-proto.h"
#include "sysemu/runstate.h"
#include "sysemu/sysemu.h"
diff --git a/target/arm/cpu-param.h b/target/arm/cpu-param.h
index b7bde18..b3b35f7 100644
--- a/target/arm/cpu-param.h
+++ b/target/arm/cpu-param.h
@@ -45,6 +45,4 @@
bool guarded;
#endif
-#define NB_MMU_MODES 12
-
#endif
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index 4066950..0fb07cc 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -21,6 +21,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "cpu.h"
+#include "cpregs.h"
#include "qemu/module.h"
#include "sysemu/kvm.h"
#include "sysemu/hvf.h"
@@ -1027,6 +1028,72 @@
/* TODO: Add A64FX specific HPC extension registers */
}
+static const ARMCPRegInfo neoverse_n1_cp_reginfo[] = {
+ { .name = "ATCR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 7, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "ATCR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 15, .crm = 7, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "ATCR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 7, .opc2 = 0,
+ .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "ATCR_EL12", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 5, .crn = 15, .crm = 7, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "AVTCR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 15, .crm = 7, .opc2 = 1,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CPUACTLR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CPUACTLR2_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 1,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CPUACTLR3_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 2,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ /*
+ * Report CPUCFR_EL1.SCU as 1, as we do not implement the DSU
+ * (and in particular its system registers).
+ */
+ { .name = "CPUCFR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 0, .opc2 = 0,
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 4 },
+ { .name = "CPUECTLR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 4,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0x961563010 },
+ { .name = "CPUPCR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 1,
+ .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CPUPMR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 3,
+ .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CPUPOR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 2,
+ .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CPUPSELR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 0,
+ .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CPUPWRCTLR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 7,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "ERXPFGCDN_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 2,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "ERXPFGCTL_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 1,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "ERXPFGF_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+};
+
+static void define_neoverse_n1_cp_reginfo(ARMCPU *cpu)
+{
+ define_arm_cp_regs(cpu, neoverse_n1_cp_reginfo);
+}
+
static void aarch64_neoverse_n1_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
@@ -1094,6 +1161,8 @@
/* From D5.1 AArch64 PMU register summary */
cpu->isar.reset_pmcr_el0 = 0x410c3000;
+
+ define_neoverse_n1_cp_reginfo(cpu);
}
static void aarch64_host_initfn(Object *obj)
diff --git a/target/arm/gdbstub.c b/target/arm/gdbstub.c
index 3f799f5..3bd86ce 100644
--- a/target/arm/gdbstub.c
+++ b/target/arm/gdbstub.c
@@ -20,6 +20,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
#include "internals.h"
#include "cpregs.h"
@@ -519,12 +520,19 @@
aarch64_gdb_set_fpu_reg,
34, "aarch64-fpu.xml", 0);
}
+#if 0
+ /*
+ * GDB versions 9 through 12 have a bug which means they will
+ * crash if they see this XML from QEMU; disable it for the 8.0
+ * release, pending a better solution.
+ */
if (isar_feature_aa64_pauth(&cpu->isar)) {
gdb_register_coprocessor(cs, aarch64_gdb_get_pauth_reg,
aarch64_gdb_set_pauth_reg,
4, "aarch64-pauth.xml", 0);
}
#endif
+#endif
} else {
if (arm_feature(env, ARM_FEATURE_NEON)) {
gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
diff --git a/target/arm/gdbstub64.c b/target/arm/gdbstub64.c
index 3bee892..ec1e07f 100644
--- a/target/arm/gdbstub64.c
+++ b/target/arm/gdbstub64.c
@@ -20,7 +20,7 @@
#include "qemu/log.h"
#include "cpu.h"
#include "internals.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
int aarch64_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
{
diff --git a/target/arm/internals.h b/target/arm/internals.h
index b1ef059..673519a 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -193,16 +193,22 @@
void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
#endif /* CONFIG_TCG */
-enum arm_fprounding {
+typedef enum ARMFPRounding {
FPROUNDING_TIEEVEN,
FPROUNDING_POSINF,
FPROUNDING_NEGINF,
FPROUNDING_ZERO,
FPROUNDING_TIEAWAY,
FPROUNDING_ODD
-};
+} ARMFPRounding;
-int arm_rmode_to_sf(int rmode);
+extern const FloatRoundMode arm_rmode_to_sf_map[6];
+
+static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode)
+{
+ assert((unsigned)rmode < ARRAY_SIZE(arm_rmode_to_sf_map));
+ return arm_rmode_to_sf_map[rmode];
+}
static inline void aarch64_save_sp(CPUARMState *env, int el)
{
diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c
index 0972a4b..c3edf16 100644
--- a/target/arm/tcg/helper-a64.c
+++ b/target/arm/tcg/helper-a64.c
@@ -20,7 +20,7 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "cpu.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
#include "exec/helper-proto.h"
#include "qemu/host-utils.h"
#include "qemu/log.h"
diff --git a/target/arm/tcg/m_helper.c b/target/arm/tcg/m_helper.c
index 081fc3f..9758f22 100644
--- a/target/arm/tcg/m_helper.c
+++ b/target/arm/tcg/m_helper.c
@@ -9,6 +9,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "internals.h"
+#include "gdbstub/helpers.h"
#include "exec/helper-proto.h"
#include "qemu/main-loop.h"
#include "qemu/bitops.h"
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index 2c2ea45..dff391b 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -6146,13 +6146,12 @@
case 0xb: /* FRINTZ */
case 0xc: /* FRINTA */
{
- TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
+ TCGv_i32 tcg_rmode;
+
fpst = fpstatus_ptr(FPST_FPCR_F16);
-
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ tcg_rmode = gen_set_rmode(opcode & 7, fpst);
gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
-
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ gen_restore_rmode(tcg_rmode, fpst);
break;
}
case 0xe: /* FRINTX */
@@ -6202,7 +6201,7 @@
case 0xa: /* FRINTM */
case 0xb: /* FRINTZ */
case 0xc: /* FRINTA */
- rmode = arm_rmode_to_sf(opcode & 7);
+ rmode = opcode & 7;
gen_fpst = gen_helper_rints;
break;
case 0xe: /* FRINTX */
@@ -6212,14 +6211,14 @@
gen_fpst = gen_helper_rints;
break;
case 0x10: /* FRINT32Z */
- rmode = float_round_to_zero;
+ rmode = FPROUNDING_ZERO;
gen_fpst = gen_helper_frint32_s;
break;
case 0x11: /* FRINT32X */
gen_fpst = gen_helper_frint32_s;
break;
case 0x12: /* FRINT64Z */
- rmode = float_round_to_zero;
+ rmode = FPROUNDING_ZERO;
gen_fpst = gen_helper_frint64_s;
break;
case 0x13: /* FRINT64X */
@@ -6231,10 +6230,9 @@
fpst = fpstatus_ptr(FPST_FPCR);
if (rmode >= 0) {
- TCGv_i32 tcg_rmode = tcg_const_i32(rmode);
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ TCGv_i32 tcg_rmode = gen_set_rmode(rmode, fpst);
gen_fpst(tcg_res, tcg_op, fpst);
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ gen_restore_rmode(tcg_rmode, fpst);
} else {
gen_fpst(tcg_res, tcg_op, fpst);
}
@@ -6275,7 +6273,7 @@
case 0xa: /* FRINTM */
case 0xb: /* FRINTZ */
case 0xc: /* FRINTA */
- rmode = arm_rmode_to_sf(opcode & 7);
+ rmode = opcode & 7;
gen_fpst = gen_helper_rintd;
break;
case 0xe: /* FRINTX */
@@ -6285,14 +6283,14 @@
gen_fpst = gen_helper_rintd;
break;
case 0x10: /* FRINT32Z */
- rmode = float_round_to_zero;
+ rmode = FPROUNDING_ZERO;
gen_fpst = gen_helper_frint32_d;
break;
case 0x11: /* FRINT32X */
gen_fpst = gen_helper_frint32_d;
break;
case 0x12: /* FRINT64Z */
- rmode = float_round_to_zero;
+ rmode = FPROUNDING_ZERO;
gen_fpst = gen_helper_frint64_d;
break;
case 0x13: /* FRINT64X */
@@ -6304,10 +6302,9 @@
fpst = fpstatus_ptr(FPST_FPCR);
if (rmode >= 0) {
- TCGv_i32 tcg_rmode = tcg_const_i32(rmode);
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ TCGv_i32 tcg_rmode = gen_set_rmode(rmode, fpst);
gen_fpst(tcg_res, tcg_op, fpst);
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ gen_restore_rmode(tcg_rmode, fpst);
} else {
gen_fpst(tcg_res, tcg_op, fpst);
}
@@ -6944,9 +6941,7 @@
rmode = FPROUNDING_TIEAWAY;
}
- tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
-
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
+ tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus);
switch (type) {
case 1: /* float64 */
@@ -7023,7 +7018,7 @@
g_assert_not_reached();
}
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
+ gen_restore_rmode(tcg_rmode, tcg_fpstatus);
}
}
@@ -7447,10 +7442,10 @@
bool part = extract32(insn, 14, 1);
bool is_q = extract32(insn, 30, 1);
int esize = 8 << size;
- int i, ofs;
+ int i;
int datasize = is_q ? 128 : 64;
int elements = datasize / esize;
- TCGv_i64 tcg_res, tcg_resl, tcg_resh;
+ TCGv_i64 tcg_res[2], tcg_ele;
if (opcode == 0 || (size == 3 && !is_q)) {
unallocated_encoding(s);
@@ -7461,37 +7456,39 @@
return;
}
- tcg_resl = tcg_const_i64(0);
- tcg_resh = is_q ? tcg_const_i64(0) : NULL;
- tcg_res = tcg_temp_new_i64();
+ tcg_res[0] = tcg_temp_new_i64();
+ tcg_res[1] = is_q ? tcg_temp_new_i64() : NULL;
+ tcg_ele = tcg_temp_new_i64();
for (i = 0; i < elements; i++) {
+ int o, w;
+
switch (opcode) {
case 1: /* UZP1/2 */
{
int midpoint = elements / 2;
if (i < midpoint) {
- read_vec_element(s, tcg_res, rn, 2 * i + part, size);
+ read_vec_element(s, tcg_ele, rn, 2 * i + part, size);
} else {
- read_vec_element(s, tcg_res, rm,
+ read_vec_element(s, tcg_ele, rm,
2 * (i - midpoint) + part, size);
}
break;
}
case 2: /* TRN1/2 */
if (i & 1) {
- read_vec_element(s, tcg_res, rm, (i & ~1) + part, size);
+ read_vec_element(s, tcg_ele, rm, (i & ~1) + part, size);
} else {
- read_vec_element(s, tcg_res, rn, (i & ~1) + part, size);
+ read_vec_element(s, tcg_ele, rn, (i & ~1) + part, size);
}
break;
case 3: /* ZIP1/2 */
{
int base = part * elements / 2;
if (i & 1) {
- read_vec_element(s, tcg_res, rm, base + (i >> 1), size);
+ read_vec_element(s, tcg_ele, rm, base + (i >> 1), size);
} else {
- read_vec_element(s, tcg_res, rn, base + (i >> 1), size);
+ read_vec_element(s, tcg_ele, rn, base + (i >> 1), size);
}
break;
}
@@ -7499,19 +7496,18 @@
g_assert_not_reached();
}
- ofs = i * esize;
- if (ofs < 64) {
- tcg_gen_shli_i64(tcg_res, tcg_res, ofs);
- tcg_gen_or_i64(tcg_resl, tcg_resl, tcg_res);
+ w = (i * esize) / 64;
+ o = (i * esize) % 64;
+ if (o == 0) {
+ tcg_gen_mov_i64(tcg_res[w], tcg_ele);
} else {
- tcg_gen_shli_i64(tcg_res, tcg_res, ofs - 64);
- tcg_gen_or_i64(tcg_resh, tcg_resh, tcg_res);
+ tcg_gen_shli_i64(tcg_ele, tcg_ele, o);
+ tcg_gen_or_i64(tcg_res[w], tcg_res[w], tcg_ele);
}
}
- write_vec_element(s, tcg_resl, rd, 0, MO_64);
- if (is_q) {
- write_vec_element(s, tcg_resh, rd, 1, MO_64);
+ for (i = 0; i <= is_q; ++i) {
+ write_vec_element(s, tcg_res[i], rd, i, MO_64);
}
clear_vec_high(s, is_q, rd);
}
@@ -8463,7 +8459,7 @@
tcg_rn = tcg_temp_new_i64();
tcg_rd = tcg_temp_new_i64();
tcg_rd_narrowed = tcg_temp_new_i32();
- tcg_final = tcg_const_i64(0);
+ tcg_final = tcg_temp_new_i64();
if (round) {
tcg_round = tcg_constant_i64(1ULL << (shift - 1));
@@ -8477,7 +8473,11 @@
false, is_u_shift, size+1, shift);
narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd);
tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed);
- tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
+ if (i == 0) {
+ tcg_gen_mov_i64(tcg_final, tcg_rd);
+ } else {
+ tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
+ }
}
if (!is_q) {
@@ -8771,9 +8771,8 @@
assert(!(is_scalar && is_q));
- tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO));
tcg_fpstatus = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
+ tcg_rmode = gen_set_rmode(FPROUNDING_ZERO, tcg_fpstatus);
fracbits = (16 << size) - immhb;
tcg_shift = tcg_constant_i32(fracbits);
@@ -8831,7 +8830,7 @@
}
}
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
+ gen_restore_rmode(tcg_rmode, tcg_fpstatus);
}
/* AdvSIMD scalar shift by immediate
@@ -10219,12 +10218,11 @@
}
if (is_fcvt) {
- tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
tcg_fpstatus = fpstatus_ptr(FPST_FPCR);
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
+ tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus);
} else {
- tcg_rmode = NULL;
tcg_fpstatus = NULL;
+ tcg_rmode = NULL;
}
if (size == 3) {
@@ -10276,7 +10274,7 @@
}
if (is_fcvt) {
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
+ gen_restore_rmode(tcg_rmode, tcg_fpstatus);
}
}
@@ -12005,22 +12003,26 @@
int esize = 8 << size;
int elements = dsize / esize;
TCGv_i64 tcg_rn = tcg_temp_new_i64();
- TCGv_i64 tcg_rd = tcg_const_i64(0);
- TCGv_i64 tcg_rd_hi = tcg_const_i64(0);
+ TCGv_i64 tcg_rd[2];
+
+ for (i = 0; i < 2; i++) {
+ tcg_rd[i] = tcg_temp_new_i64();
+ tcg_gen_movi_i64(tcg_rd[i], 0);
+ }
for (i = 0; i < elements; i++) {
int e_rev = (i & 0xf) ^ revmask;
- int off = e_rev * esize;
+ int w = (e_rev * esize) / 64;
+ int o = (e_rev * esize) % 64;
+
read_vec_element(s, tcg_rn, rn, i, size);
- if (off >= 64) {
- tcg_gen_deposit_i64(tcg_rd_hi, tcg_rd_hi,
- tcg_rn, off - 64, esize);
- } else {
- tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, off, esize);
- }
+ tcg_gen_deposit_i64(tcg_rd[w], tcg_rd[w], tcg_rn, o, esize);
}
- write_vec_element(s, tcg_rd, rd, 0, MO_64);
- write_vec_element(s, tcg_rd_hi, rd, 1, MO_64);
+
+ for (i = 0; i < 2; i++) {
+ write_vec_element(s, tcg_rd[i], rd, i, MO_64);
+ }
+ clear_vec_high(s, true, rd);
}
}
@@ -12133,7 +12135,6 @@
int rn = extract32(insn, 5, 5);
int rd = extract32(insn, 0, 5);
bool need_fpstatus = false;
- bool need_rmode = false;
int rmode = -1;
TCGv_i32 tcg_rmode;
TCGv_ptr tcg_fpstatus;
@@ -12283,7 +12284,6 @@
case 0x7a: /* FCVTPU */
case 0x7b: /* FCVTZU */
need_fpstatus = true;
- need_rmode = true;
rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
if (size == 3 && !is_q) {
unallocated_encoding(s);
@@ -12293,7 +12293,6 @@
case 0x5c: /* FCVTAU */
case 0x1c: /* FCVTAS */
need_fpstatus = true;
- need_rmode = true;
rmode = FPROUNDING_TIEAWAY;
if (size == 3 && !is_q) {
unallocated_encoding(s);
@@ -12352,7 +12351,6 @@
case 0x19: /* FRINTM */
case 0x38: /* FRINTP */
case 0x39: /* FRINTZ */
- need_rmode = true;
rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
/* fall through */
case 0x59: /* FRINTX */
@@ -12364,7 +12362,6 @@
}
break;
case 0x58: /* FRINTA */
- need_rmode = true;
rmode = FPROUNDING_TIEAWAY;
need_fpstatus = true;
if (size == 3 && !is_q) {
@@ -12380,7 +12377,6 @@
break;
case 0x1e: /* FRINT32Z */
case 0x1f: /* FRINT64Z */
- need_rmode = true;
rmode = FPROUNDING_ZERO;
/* fall through */
case 0x5e: /* FRINT32X */
@@ -12406,14 +12402,13 @@
return;
}
- if (need_fpstatus || need_rmode) {
+ if (need_fpstatus || rmode >= 0) {
tcg_fpstatus = fpstatus_ptr(FPST_FPCR);
} else {
tcg_fpstatus = NULL;
}
- if (need_rmode) {
- tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
+ if (rmode >= 0) {
+ tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus);
} else {
tcg_rmode = NULL;
}
@@ -12595,8 +12590,8 @@
}
clear_vec_high(s, is_q, rd);
- if (need_rmode) {
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
+ if (tcg_rmode) {
+ gen_restore_rmode(tcg_rmode, tcg_fpstatus);
}
}
@@ -12625,9 +12620,8 @@
int pass;
TCGv_i32 tcg_rmode = NULL;
TCGv_ptr tcg_fpstatus = NULL;
- bool need_rmode = false;
bool need_fpst = true;
- int rmode;
+ int rmode = -1;
if (!dc_isar_feature(aa64_fp16, s)) {
unallocated_encoding(s);
@@ -12676,27 +12670,22 @@
case 0x3f: /* FRECPX */
break;
case 0x18: /* FRINTN */
- need_rmode = true;
only_in_vector = true;
rmode = FPROUNDING_TIEEVEN;
break;
case 0x19: /* FRINTM */
- need_rmode = true;
only_in_vector = true;
rmode = FPROUNDING_NEGINF;
break;
case 0x38: /* FRINTP */
- need_rmode = true;
only_in_vector = true;
rmode = FPROUNDING_POSINF;
break;
case 0x39: /* FRINTZ */
- need_rmode = true;
only_in_vector = true;
rmode = FPROUNDING_ZERO;
break;
case 0x58: /* FRINTA */
- need_rmode = true;
only_in_vector = true;
rmode = FPROUNDING_TIEAWAY;
break;
@@ -12706,43 +12695,33 @@
/* current rounding mode */
break;
case 0x1a: /* FCVTNS */
- need_rmode = true;
rmode = FPROUNDING_TIEEVEN;
break;
case 0x1b: /* FCVTMS */
- need_rmode = true;
rmode = FPROUNDING_NEGINF;
break;
case 0x1c: /* FCVTAS */
- need_rmode = true;
rmode = FPROUNDING_TIEAWAY;
break;
case 0x3a: /* FCVTPS */
- need_rmode = true;
rmode = FPROUNDING_POSINF;
break;
case 0x3b: /* FCVTZS */
- need_rmode = true;
rmode = FPROUNDING_ZERO;
break;
case 0x5a: /* FCVTNU */
- need_rmode = true;
rmode = FPROUNDING_TIEEVEN;
break;
case 0x5b: /* FCVTMU */
- need_rmode = true;
rmode = FPROUNDING_NEGINF;
break;
case 0x5c: /* FCVTAU */
- need_rmode = true;
rmode = FPROUNDING_TIEAWAY;
break;
case 0x7a: /* FCVTPU */
- need_rmode = true;
rmode = FPROUNDING_POSINF;
break;
case 0x7b: /* FCVTZU */
- need_rmode = true;
rmode = FPROUNDING_ZERO;
break;
case 0x2f: /* FABS */
@@ -12775,13 +12754,12 @@
return;
}
- if (need_rmode || need_fpst) {
+ if (rmode >= 0 || need_fpst) {
tcg_fpstatus = fpstatus_ptr(FPST_FPCR_F16);
}
- if (need_rmode) {
- tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
+ if (rmode >= 0) {
+ tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus);
}
if (is_scalar) {
@@ -12881,7 +12859,7 @@
}
if (tcg_rmode) {
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
+ gen_restore_rmode(tcg_rmode, tcg_fpstatus);
}
}
diff --git a/target/arm/tcg/translate-mve.c b/target/arm/tcg/translate-mve.c
index 798b4fd..31fb211 100644
--- a/target/arm/tcg/translate-mve.c
+++ b/target/arm/tcg/translate-mve.c
@@ -588,7 +588,7 @@
DO_VCVT(VCVT_FU, vcvt_hu, vcvt_fu)
static bool do_vcvt_rmode(DisasContext *s, arg_1op *a,
- enum arm_fprounding rmode, bool u)
+ ARMFPRounding rmode, bool u)
{
/*
* Handle VCVT fp to int with specified rounding mode.
@@ -1150,7 +1150,7 @@
MVEGenLongDualAccOpFn *fn)
{
TCGv_ptr qn, qm;
- TCGv_i64 rda;
+ TCGv_i64 rda_i, rda_o;
TCGv_i32 rdalo, rdahi;
if (!dc_isar_feature(aa32_mve, s) ||
@@ -1177,21 +1177,22 @@
* of an A=0 (no-accumulate) insn which does not execute the first
* beat must start with the current rda value, not 0.
*/
+ rda_o = tcg_temp_new_i64();
if (a->a || mve_skip_first_beat(s)) {
- rda = tcg_temp_new_i64();
+ rda_i = rda_o;
rdalo = load_reg(s, a->rdalo);
rdahi = load_reg(s, a->rdahi);
- tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
+ tcg_gen_concat_i32_i64(rda_i, rdalo, rdahi);
} else {
- rda = tcg_const_i64(0);
+ rda_i = tcg_constant_i64(0);
}
- fn(rda, cpu_env, qn, qm, rda);
+ fn(rda_o, cpu_env, qn, qm, rda_i);
rdalo = tcg_temp_new_i32();
rdahi = tcg_temp_new_i32();
- tcg_gen_extrl_i64_i32(rdalo, rda);
- tcg_gen_extrh_i64_i32(rdahi, rda);
+ tcg_gen_extrl_i64_i32(rdalo, rda_o);
+ tcg_gen_extrh_i64_i32(rdahi, rda_o);
store_reg(s, a->rdalo, rdalo);
store_reg(s, a->rdahi, rdahi);
mve_update_eci(s);
@@ -1258,7 +1259,7 @@
static bool do_dual_acc(DisasContext *s, arg_vmladav *a, MVEGenDualAccOpFn *fn)
{
TCGv_ptr qn, qm;
- TCGv_i32 rda;
+ TCGv_i32 rda_i, rda_o;
if (!dc_isar_feature(aa32_mve, s) ||
!mve_check_qreg_bank(s, a->qn) ||
@@ -1278,13 +1279,14 @@
* beat must start with the current rda value, not 0.
*/
if (a->a || mve_skip_first_beat(s)) {
- rda = load_reg(s, a->rda);
+ rda_o = rda_i = load_reg(s, a->rda);
} else {
- rda = tcg_const_i32(0);
+ rda_i = tcg_constant_i32(0);
+ rda_o = tcg_temp_new_i32();
}
- fn(rda, cpu_env, qn, qm, rda);
- store_reg(s, a->rda, rda);
+ fn(rda_o, cpu_env, qn, qm, rda_i);
+ store_reg(s, a->rda, rda_o);
mve_update_eci(s);
return true;
@@ -1396,7 +1398,7 @@
{ NULL, NULL }
};
TCGv_ptr qm;
- TCGv_i32 rda;
+ TCGv_i32 rda_i, rda_o;
if (!dc_isar_feature(aa32_mve, s) ||
a->size == 3) {
@@ -1413,15 +1415,16 @@
*/
if (a->a || mve_skip_first_beat(s)) {
/* Accumulate input from Rda */
- rda = load_reg(s, a->rda);
+ rda_o = rda_i = load_reg(s, a->rda);
} else {
/* Accumulate starting at zero */
- rda = tcg_const_i32(0);
+ rda_i = tcg_constant_i32(0);
+ rda_o = tcg_temp_new_i32();
}
qm = mve_qreg_ptr(a->qm);
- fns[a->size][a->u](rda, cpu_env, qm, rda);
- store_reg(s, a->rda, rda);
+ fns[a->size][a->u](rda_o, cpu_env, qm, rda_i);
+ store_reg(s, a->rda, rda_o);
mve_update_eci(s);
return true;
@@ -1436,7 +1439,7 @@
* No need to check Qm's bank: it is only 3 bits in decode.
*/
TCGv_ptr qm;
- TCGv_i64 rda;
+ TCGv_i64 rda_i, rda_o;
TCGv_i32 rdalo, rdahi;
if (!dc_isar_feature(aa32_mve, s)) {
@@ -1458,28 +1461,29 @@
* of an A=0 (no-accumulate) insn which does not execute the first
* beat must start with the current value of RdaHi:RdaLo, not zero.
*/
+ rda_o = tcg_temp_new_i64();
if (a->a || mve_skip_first_beat(s)) {
/* Accumulate input from RdaHi:RdaLo */
- rda = tcg_temp_new_i64();
+ rda_i = rda_o;
rdalo = load_reg(s, a->rdalo);
rdahi = load_reg(s, a->rdahi);
- tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
+ tcg_gen_concat_i32_i64(rda_i, rdalo, rdahi);
} else {
/* Accumulate starting at zero */
- rda = tcg_const_i64(0);
+ rda_i = tcg_constant_i64(0);
}
qm = mve_qreg_ptr(a->qm);
if (a->u) {
- gen_helper_mve_vaddlv_u(rda, cpu_env, qm, rda);
+ gen_helper_mve_vaddlv_u(rda_o, cpu_env, qm, rda_i);
} else {
- gen_helper_mve_vaddlv_s(rda, cpu_env, qm, rda);
+ gen_helper_mve_vaddlv_s(rda_o, cpu_env, qm, rda_i);
}
rdalo = tcg_temp_new_i32();
rdahi = tcg_temp_new_i32();
- tcg_gen_extrl_i64_i32(rdalo, rda);
- tcg_gen_extrh_i64_i32(rdahi, rda);
+ tcg_gen_extrl_i64_i32(rdalo, rda_o);
+ tcg_gen_extrh_i64_i32(rdahi, rda_o);
store_reg(s, a->rdalo, rdalo);
store_reg(s, a->rdahi, rdahi);
mve_update_eci(s);
diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c
index 5bf80b2..92ab290 100644
--- a/target/arm/tcg/translate-sve.c
+++ b/target/arm/tcg/translate-sve.c
@@ -4082,7 +4082,7 @@
a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
static bool do_frint_mode(DisasContext *s, arg_rpr_esz *a,
- int mode, gen_helper_gvec_3_ptr *fn)
+ ARMFPRounding mode, gen_helper_gvec_3_ptr *fn)
{
unsigned vsz;
TCGv_i32 tmode;
@@ -4096,30 +4096,28 @@
}
vsz = vec_full_reg_size(s);
- tmode = tcg_const_i32(mode);
status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
-
- gen_helper_set_rmode(tmode, tmode, status);
+ tmode = gen_set_rmode(mode, status);
tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
vec_full_reg_offset(s, a->rn),
pred_full_reg_offset(s, a->pg),
status, vsz, vsz, 0, fn);
- gen_helper_set_rmode(tmode, tmode, status);
+ gen_restore_rmode(tmode, status);
return true;
}
TRANS_FEAT(FRINTN, aa64_sve, do_frint_mode, a,
- float_round_nearest_even, frint_fns[a->esz])
+ FPROUNDING_TIEEVEN, frint_fns[a->esz])
TRANS_FEAT(FRINTP, aa64_sve, do_frint_mode, a,
- float_round_up, frint_fns[a->esz])
+ FPROUNDING_POSINF, frint_fns[a->esz])
TRANS_FEAT(FRINTM, aa64_sve, do_frint_mode, a,
- float_round_down, frint_fns[a->esz])
+ FPROUNDING_NEGINF, frint_fns[a->esz])
TRANS_FEAT(FRINTZ, aa64_sve, do_frint_mode, a,
- float_round_to_zero, frint_fns[a->esz])
+ FPROUNDING_ZERO, frint_fns[a->esz])
TRANS_FEAT(FRINTA, aa64_sve, do_frint_mode, a,
- float_round_ties_away, frint_fns[a->esz])
+ FPROUNDING_TIEAWAY, frint_fns[a->esz])
static gen_helper_gvec_3_ptr * const frecpx_fns[] = {
NULL, gen_helper_sve_frecpx_h,
@@ -4208,8 +4206,9 @@
}
} else {
TCGLabel *loop = gen_new_label();
- TCGv_ptr tp, i = tcg_const_ptr(0);
+ TCGv_ptr tp, i = tcg_temp_new_ptr();
+ tcg_gen_movi_ptr(i, 0);
gen_set_label(loop);
t0 = tcg_temp_new_i64();
@@ -4286,8 +4285,9 @@
}
} else {
TCGLabel *loop = gen_new_label();
- TCGv_ptr tp, i = tcg_const_ptr(0);
+ TCGv_ptr tp, i = tcg_temp_new_ptr();
+ tcg_gen_movi_ptr(i, 0);
gen_set_label(loop);
t0 = tcg_temp_new_i64();
@@ -7145,9 +7145,9 @@
gen_helper_sve2_fcvtlt_sd, a, 0, FPST_FPCR)
TRANS_FEAT(FCVTX_ds, aa64_sve2, do_frint_mode, a,
- float_round_to_odd, gen_helper_sve_fcvt_ds)
+ FPROUNDING_ODD, gen_helper_sve_fcvt_ds)
TRANS_FEAT(FCVTXNT_ds, aa64_sve2, do_frint_mode, a,
- float_round_to_odd, gen_helper_sve2_fcvtnt_ds)
+ FPROUNDING_ODD, gen_helper_sve2_fcvtnt_ds)
static gen_helper_gvec_3_ptr * const flogb_fns[] = {
NULL, gen_helper_flogb_h,
diff --git a/target/arm/tcg/translate-vfp.c b/target/arm/tcg/translate-vfp.c
index 757a2bf..dd782aa 100644
--- a/target/arm/tcg/translate-vfp.c
+++ b/target/arm/tcg/translate-vfp.c
@@ -464,8 +464,7 @@
fpst = fpstatus_ptr(FPST_FPCR);
}
- tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ tcg_rmode = gen_set_rmode(rounding, fpst);
if (sz == 3) {
TCGv_i64 tcg_op;
@@ -489,7 +488,7 @@
vfp_store_reg32(tcg_res, rd);
}
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ gen_restore_rmode(tcg_rmode, fpst);
return true;
}
@@ -533,9 +532,7 @@
}
tcg_shift = tcg_constant_i32(0);
-
- tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ tcg_rmode = gen_set_rmode(rounding, fpst);
if (sz == 3) {
TCGv_i64 tcg_double, tcg_res;
@@ -572,7 +569,7 @@
vfp_store_reg32(tcg_res, rd);
}
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ gen_restore_rmode(tcg_rmode, fpst);
return true;
}
@@ -2783,10 +2780,9 @@
tmp = tcg_temp_new_i32();
vfp_load_reg32(tmp, a->vm);
fpst = fpstatus_ptr(FPST_FPCR_F16);
- tcg_rmode = tcg_const_i32(float_round_to_zero);
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ tcg_rmode = gen_set_rmode(FPROUNDING_ZERO, fpst);
gen_helper_rinth(tmp, tmp, fpst);
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ gen_restore_rmode(tcg_rmode, fpst);
vfp_store_reg32(tmp, a->vd);
return true;
}
@@ -2808,10 +2804,9 @@
tmp = tcg_temp_new_i32();
vfp_load_reg32(tmp, a->vm);
fpst = fpstatus_ptr(FPST_FPCR);
- tcg_rmode = tcg_const_i32(float_round_to_zero);
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ tcg_rmode = gen_set_rmode(FPROUNDING_ZERO, fpst);
gen_helper_rints(tmp, tmp, fpst);
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ gen_restore_rmode(tcg_rmode, fpst);
vfp_store_reg32(tmp, a->vd);
return true;
}
@@ -2842,10 +2837,9 @@
tmp = tcg_temp_new_i64();
vfp_load_reg64(tmp, a->vm);
fpst = fpstatus_ptr(FPST_FPCR);
- tcg_rmode = tcg_const_i32(float_round_to_zero);
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ tcg_rmode = gen_set_rmode(FPROUNDING_ZERO, fpst);
gen_helper_rintd(tmp, tmp, fpst);
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ gen_restore_rmode(tcg_rmode, fpst);
vfp_store_reg64(tmp, a->vd);
return true;
}
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
index b70b628..2cb9368 100644
--- a/target/arm/tcg/translate.c
+++ b/target/arm/tcg/translate.c
@@ -7261,8 +7261,8 @@
static bool trans_BFCI(DisasContext *s, arg_BFCI *a)
{
- TCGv_i32 tmp;
int msb = a->msb, lsb = a->lsb;
+ TCGv_i32 t_in, t_rd;
int width;
if (!ENABLE_ARCH_6T2) {
@@ -7277,16 +7277,14 @@
width = msb + 1 - lsb;
if (a->rn == 15) {
/* BFC */
- tmp = tcg_const_i32(0);
+ t_in = tcg_constant_i32(0);
} else {
/* BFI */
- tmp = load_reg(s, a->rn);
+ t_in = load_reg(s, a->rn);
}
- if (width != 32) {
- TCGv_i32 tmp2 = load_reg(s, a->rd);
- tcg_gen_deposit_i32(tmp, tmp2, tmp, lsb, width);
- }
- store_reg(s, a->rd, tmp);
+ t_rd = load_reg(s, a->rd);
+ tcg_gen_deposit_i32(t_rd, t_rd, t_in, lsb, width);
+ store_reg(s, a->rd, t_rd);
return true;
}
diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h
index 20f3ca7..f02d468 100644
--- a/target/arm/tcg/translate.h
+++ b/target/arm/tcg/translate.h
@@ -617,6 +617,23 @@
}
/*
+ * Set and reset rounding mode around another operation.
+ */
+static inline TCGv_i32 gen_set_rmode(ARMFPRounding rmode, TCGv_ptr fpst)
+{
+ TCGv_i32 new = tcg_constant_i32(arm_rmode_to_sf(rmode));
+ TCGv_i32 old = tcg_temp_new_i32();
+
+ gen_helper_set_rmode(old, new, fpst);
+ return old;
+}
+
+static inline void gen_restore_rmode(TCGv_i32 old, TCGv_ptr fpst)
+{
+ gen_helper_set_rmode(old, old, fpst);
+}
+
+/*
* Helpers for implementing sets of trans_* functions.
* Defer the implementation of NAME to FUNC, with optional extra arguments.
*/
diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
index 24e3d82..36906db 100644
--- a/target/arm/vfp_helper.c
+++ b/target/arm/vfp_helper.c
@@ -1104,33 +1104,14 @@
}
/* Convert ARM rounding mode to softfloat */
-int arm_rmode_to_sf(int rmode)
-{
- switch (rmode) {
- case FPROUNDING_TIEAWAY:
- rmode = float_round_ties_away;
- break;
- case FPROUNDING_ODD:
- /* FIXME: add support for TIEAWAY and ODD */
- qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n",
- rmode);
- /* fall through for now */
- case FPROUNDING_TIEEVEN:
- default:
- rmode = float_round_nearest_even;
- break;
- case FPROUNDING_POSINF:
- rmode = float_round_up;
- break;
- case FPROUNDING_NEGINF:
- rmode = float_round_down;
- break;
- case FPROUNDING_ZERO:
- rmode = float_round_to_zero;
- break;
- }
- return rmode;
-}
+const FloatRoundMode arm_rmode_to_sf_map[] = {
+ [FPROUNDING_TIEEVEN] = float_round_nearest_even,
+ [FPROUNDING_POSINF] = float_round_up,
+ [FPROUNDING_NEGINF] = float_round_down,
+ [FPROUNDING_ZERO] = float_round_to_zero,
+ [FPROUNDING_TIEAWAY] = float_round_ties_away,
+ [FPROUNDING_ODD] = float_round_to_odd,
+};
/*
* Implement float64 to int32_t conversion without saturation;
diff --git a/target/avr/cpu-param.h b/target/avr/cpu-param.h
index 7ef4e7c..9a92bc7 100644
--- a/target/avr/cpu-param.h
+++ b/target/avr/cpu-param.h
@@ -31,6 +31,5 @@
#define TARGET_PAGE_BITS 8
#define TARGET_PHYS_ADDR_SPACE_BITS 24
#define TARGET_VIRT_ADDR_SPACE_BITS 24
-#define NB_MMU_MODES 2
#endif
diff --git a/target/avr/gdbstub.c b/target/avr/gdbstub.c
index 1c1b908..150344d 100644
--- a/target/avr/gdbstub.c
+++ b/target/avr/gdbstub.c
@@ -19,7 +19,7 @@
*/
#include "qemu/osdep.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
int avr_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
{
diff --git a/target/avr/translate.c b/target/avr/translate.c
index b9506a8..a6aeae6 100644
--- a/target/avr/translate.c
+++ b/target/avr/translate.c
@@ -400,7 +400,7 @@
static bool trans_SUBI(DisasContext *ctx, arg_SUBI *a)
{
TCGv Rd = cpu_r[a->rd];
- TCGv Rr = tcg_const_i32(a->imm);
+ TCGv Rr = tcg_constant_i32(a->imm);
TCGv R = tcg_temp_new_i32();
tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Imm */
@@ -425,7 +425,7 @@
TCGv Rd = cpu_r[a->rd];
TCGv Rr = cpu_r[a->rr];
TCGv R = tcg_temp_new_i32();
- TCGv zero = tcg_const_i32(0);
+ TCGv zero = tcg_constant_i32(0);
tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr - Cf */
tcg_gen_sub_tl(R, R, cpu_Cf);
@@ -453,9 +453,9 @@
static bool trans_SBCI(DisasContext *ctx, arg_SBCI *a)
{
TCGv Rd = cpu_r[a->rd];
- TCGv Rr = tcg_const_i32(a->imm);
+ TCGv Rr = tcg_constant_i32(a->imm);
TCGv R = tcg_temp_new_i32();
- TCGv zero = tcg_const_i32(0);
+ TCGv zero = tcg_constant_i32(0);
tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr - Cf */
tcg_gen_sub_tl(R, R, cpu_Cf);
@@ -637,7 +637,7 @@
static bool trans_NEG(DisasContext *ctx, arg_NEG *a)
{
TCGv Rd = cpu_r[a->rd];
- TCGv t0 = tcg_const_i32(0);
+ TCGv t0 = tcg_constant_i32(0);
TCGv R = tcg_temp_new_i32();
tcg_gen_sub_tl(R, t0, Rd); /* R = 0 - Rd */
@@ -930,19 +930,19 @@
static void gen_push_ret(DisasContext *ctx, int ret)
{
if (avr_feature(ctx->env, AVR_FEATURE_1_BYTE_PC)) {
- TCGv t0 = tcg_const_i32((ret & 0x0000ff));
+ TCGv t0 = tcg_constant_i32(ret & 0x0000ff);
tcg_gen_qemu_st_tl(t0, cpu_sp, MMU_DATA_IDX, MO_UB);
tcg_gen_subi_tl(cpu_sp, cpu_sp, 1);
} else if (avr_feature(ctx->env, AVR_FEATURE_2_BYTE_PC)) {
- TCGv t0 = tcg_const_i32((ret & 0x00ffff));
+ TCGv t0 = tcg_constant_i32(ret & 0x00ffff);
tcg_gen_subi_tl(cpu_sp, cpu_sp, 1);
tcg_gen_qemu_st_tl(t0, cpu_sp, MMU_DATA_IDX, MO_BEUW);
tcg_gen_subi_tl(cpu_sp, cpu_sp, 1);
} else if (avr_feature(ctx->env, AVR_FEATURE_3_BYTE_PC)) {
- TCGv lo = tcg_const_i32((ret & 0x0000ff));
- TCGv hi = tcg_const_i32((ret & 0xffff00) >> 8);
+ TCGv lo = tcg_constant_i32(ret & 0x0000ff);
+ TCGv hi = tcg_constant_i32((ret & 0xffff00) >> 8);
tcg_gen_qemu_st_tl(lo, cpu_sp, MMU_DATA_IDX, MO_UB);
tcg_gen_subi_tl(cpu_sp, cpu_sp, 2);
@@ -1211,7 +1211,7 @@
TCGv Rd = cpu_r[a->rd];
TCGv Rr = cpu_r[a->rr];
TCGv R = tcg_temp_new_i32();
- TCGv zero = tcg_const_i32(0);
+ TCGv zero = tcg_constant_i32(0);
tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr - Cf */
tcg_gen_sub_tl(R, R, cpu_Cf);
@@ -1238,7 +1238,7 @@
{
TCGv Rd = cpu_r[a->rd];
int Imm = a->imm;
- TCGv Rr = tcg_const_i32(Imm);
+ TCGv Rr = tcg_constant_i32(Imm);
TCGv R = tcg_temp_new_i32();
tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr */
@@ -1288,12 +1288,13 @@
*/
static bool trans_SBIC(DisasContext *ctx, arg_SBIC *a)
{
- TCGv temp = tcg_const_i32(a->reg);
+ TCGv data = tcg_temp_new_i32();
+ TCGv port = tcg_constant_i32(a->reg);
- gen_helper_inb(temp, cpu_env, temp);
- tcg_gen_andi_tl(temp, temp, 1 << a->bit);
+ gen_helper_inb(data, cpu_env, port);
+ tcg_gen_andi_tl(data, data, 1 << a->bit);
ctx->skip_cond = TCG_COND_EQ;
- ctx->skip_var0 = temp;
+ ctx->skip_var0 = data;
return true;
}
@@ -1305,12 +1306,13 @@
*/
static bool trans_SBIS(DisasContext *ctx, arg_SBIS *a)
{
- TCGv temp = tcg_const_i32(a->reg);
+ TCGv data = tcg_temp_new_i32();
+ TCGv port = tcg_constant_i32(a->reg);
- gen_helper_inb(temp, cpu_env, temp);
- tcg_gen_andi_tl(temp, temp, 1 << a->bit);
+ gen_helper_inb(data, cpu_env, port);
+ tcg_gen_andi_tl(data, data, 1 << a->bit);
ctx->skip_cond = TCG_COND_NE;
- ctx->skip_var0 = temp;
+ ctx->skip_var0 = data;
return true;
}
@@ -2122,7 +2124,7 @@
static bool trans_IN(DisasContext *ctx, arg_IN *a)
{
TCGv Rd = cpu_r[a->rd];
- TCGv port = tcg_const_i32(a->imm);
+ TCGv port = tcg_constant_i32(a->imm);
gen_helper_inb(Rd, cpu_env, port);
return true;
@@ -2135,7 +2137,7 @@
static bool trans_OUT(DisasContext *ctx, arg_OUT *a)
{
TCGv Rd = cpu_r[a->rd];
- TCGv port = tcg_const_i32(a->imm);
+ TCGv port = tcg_constant_i32(a->imm);
gen_helper_outb(cpu_env, port, Rd);
return true;
@@ -2403,7 +2405,7 @@
static bool trans_SBI(DisasContext *ctx, arg_SBI *a)
{
TCGv data = tcg_temp_new_i32();
- TCGv port = tcg_const_i32(a->reg);
+ TCGv port = tcg_constant_i32(a->reg);
gen_helper_inb(data, cpu_env, port);
tcg_gen_ori_tl(data, data, 1 << a->bit);
@@ -2418,7 +2420,7 @@
static bool trans_CBI(DisasContext *ctx, arg_CBI *a)
{
TCGv data = tcg_temp_new_i32();
- TCGv port = tcg_const_i32(a->reg);
+ TCGv port = tcg_constant_i32(a->reg);
gen_helper_inb(data, cpu_env, port);
tcg_gen_andi_tl(data, data, ~(1 << a->bit));
diff --git a/target/cris/cpu-param.h b/target/cris/cpu-param.h
index 12ec22d..b31b742 100644
--- a/target/cris/cpu-param.h
+++ b/target/cris/cpu-param.h
@@ -12,6 +12,5 @@
#define TARGET_PAGE_BITS 13
#define TARGET_PHYS_ADDR_SPACE_BITS 32
#define TARGET_VIRT_ADDR_SPACE_BITS 32
-#define NB_MMU_MODES 2
#endif
diff --git a/target/cris/gdbstub.c b/target/cris/gdbstub.c
index 2418d57..25c0ca3 100644
--- a/target/cris/gdbstub.c
+++ b/target/cris/gdbstub.c
@@ -19,7 +19,7 @@
*/
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
int crisv10_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
{
diff --git a/target/cris/translate.c b/target/cris/translate.c
index 5172c9b..b2beb99 100644
--- a/target/cris/translate.c
+++ b/target/cris/translate.c
@@ -175,10 +175,7 @@
#define t_gen_mov_env_TN(member, tn) \
tcg_gen_st_tl(tn, cpu_env, offsetof(CPUCRISState, member))
#define t_gen_movi_env_TN(member, c) \
- do { \
- TCGv tc = tcg_const_tl(c); \
- t_gen_mov_env_TN(member, tc); \
- } while (0)
+ t_gen_mov_env_TN(member, tcg_constant_tl(c))
static inline void t_gen_mov_TN_preg(TCGv tn, int r)
{
@@ -268,8 +265,7 @@
static inline void t_gen_raise_exception(uint32_t index)
{
- TCGv_i32 tmp = tcg_const_i32(index);
- gen_helper_raise_exception(cpu_env, tmp);
+ gen_helper_raise_exception(cpu_env, tcg_constant_i32(index));
}
static void t_gen_lsl(TCGv d, TCGv a, TCGv b)
@@ -277,7 +273,7 @@
TCGv t0, t_31;
t0 = tcg_temp_new();
- t_31 = tcg_const_tl(31);
+ t_31 = tcg_constant_tl(31);
tcg_gen_shl_tl(d, a, b);
tcg_gen_sub_tl(t0, t_31, b);
@@ -1250,7 +1246,7 @@
cris_cc_mask(dc, CC_MASK_NZVC);
- c = tcg_const_tl(dc->op1);
+ c = tcg_constant_tl(dc->op1);
cris_alu(dc, CC_OP_ADD,
cpu_R[dc->op2], cpu_R[dc->op2], c, 4);
return 2;
@@ -1274,7 +1270,7 @@
LOG_DIS("subq %u, $r%u\n", dc->op1, dc->op2);
cris_cc_mask(dc, CC_MASK_NZVC);
- c = tcg_const_tl(dc->op1);
+ c = tcg_constant_tl(dc->op1);
cris_alu(dc, CC_OP_SUB,
cpu_R[dc->op2], cpu_R[dc->op2], c, 4);
return 2;
@@ -1289,7 +1285,7 @@
LOG_DIS("cmpq %d, $r%d\n", imm, dc->op2);
cris_cc_mask(dc, CC_MASK_NZVC);
- c = tcg_const_tl(imm);
+ c = tcg_constant_tl(imm);
cris_alu(dc, CC_OP_CMP,
cpu_R[dc->op2], cpu_R[dc->op2], c, 4);
return 2;
@@ -1304,7 +1300,7 @@
LOG_DIS("andq %d, $r%d\n", imm, dc->op2);
cris_cc_mask(dc, CC_MASK_NZ);
- c = tcg_const_tl(imm);
+ c = tcg_constant_tl(imm);
cris_alu(dc, CC_OP_AND,
cpu_R[dc->op2], cpu_R[dc->op2], c, 4);
return 2;
@@ -1318,7 +1314,7 @@
LOG_DIS("orq %d, $r%d\n", imm, dc->op2);
cris_cc_mask(dc, CC_MASK_NZ);
- c = tcg_const_tl(imm);
+ c = tcg_constant_tl(imm);
cris_alu(dc, CC_OP_OR,
cpu_R[dc->op2], cpu_R[dc->op2], c, 4);
return 2;
@@ -1330,7 +1326,7 @@
LOG_DIS("btstq %u, $r%d\n", dc->op1, dc->op2);
cris_cc_mask(dc, CC_MASK_NZ);
- c = tcg_const_tl(dc->op1);
+ c = tcg_constant_tl(dc->op1);
cris_evaluate_flags(dc);
gen_helper_btst(cpu_PR[PR_CCS], cpu_env, cpu_R[dc->op2],
c, cpu_PR[PR_CCS]);
@@ -1945,8 +1941,8 @@
{
TCGv c2, c1;
LOG_DIS("move $r%u, $s%u\n", dc->op1, dc->op2);
- c1 = tcg_const_tl(dc->op1);
- c2 = tcg_const_tl(dc->op2);
+ c1 = tcg_constant_tl(dc->op1);
+ c2 = tcg_constant_tl(dc->op2);
cris_cc_mask(dc, 0);
gen_helper_movl_sreg_reg(cpu_env, c2, c1);
return 2;
@@ -1955,8 +1951,8 @@
{
TCGv c2, c1;
LOG_DIS("move $s%u, $r%u\n", dc->op2, dc->op1);
- c1 = tcg_const_tl(dc->op1);
- c2 = tcg_const_tl(dc->op2);
+ c1 = tcg_constant_tl(dc->op1);
+ c2 = tcg_constant_tl(dc->op2);
cris_cc_mask(dc, 0);
gen_helper_movl_reg_sreg(cpu_env, c1, c2);
return 2;
@@ -2237,7 +2233,7 @@
cris_cc_mask(dc, CC_MASK_NZ);
tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~3);
- c = tcg_const_tl(0);
+ c = tcg_constant_tl(0);
cris_alu(dc, CC_OP_CMP,
cpu_R[dc->op2], t[1], c, memsize_zz(dc));
do_postinc(dc, memsize);
@@ -2582,7 +2578,7 @@
if (dc->op2 > 15) {
abort();
}
- c = tcg_const_tl(dc->pc + 4);
+ c = tcg_constant_tl(dc->pc + 4);
t_gen_mov_preg_TN(dc, dc->op2, c);
cris_prepare_jmp(dc, JMP_INDIRECT);
@@ -2598,7 +2594,7 @@
LOG_DIS("jas 0x%x\n", imm);
cris_cc_mask(dc, 0);
- c = tcg_const_tl(dc->pc + 8);
+ c = tcg_constant_tl(dc->pc + 8);
/* Store the return address in Pd. */
t_gen_mov_preg_TN(dc, dc->op2, c);
@@ -2616,7 +2612,7 @@
LOG_DIS("jasc 0x%x\n", imm);
cris_cc_mask(dc, 0);
- c = tcg_const_tl(dc->pc + 8 + 4);
+ c = tcg_constant_tl(dc->pc + 8 + 4);
/* Store the return address in Pd. */
t_gen_mov_preg_TN(dc, dc->op2, c);
@@ -2632,7 +2628,7 @@
cris_cc_mask(dc, 0);
/* Store the return address in Pd. */
tcg_gen_mov_tl(env_btarget, cpu_R[dc->op1]);
- c = tcg_const_tl(dc->pc + 4 + 4);
+ c = tcg_constant_tl(dc->pc + 4 + 4);
t_gen_mov_preg_TN(dc, dc->op2, c);
cris_prepare_jmp(dc, JMP_INDIRECT);
return 2;
@@ -2664,7 +2660,7 @@
LOG_DIS("bas 0x%x, $p%u\n", dc->pc + simm, dc->op2);
cris_cc_mask(dc, 0);
- c = tcg_const_tl(dc->pc + 8);
+ c = tcg_constant_tl(dc->pc + 8);
/* Store the return address in Pd. */
t_gen_mov_preg_TN(dc, dc->op2, c);
@@ -2681,7 +2677,7 @@
LOG_DIS("basc 0x%x, $p%u\n", dc->pc + simm, dc->op2);
cris_cc_mask(dc, 0);
- c = tcg_const_tl(dc->pc + 12);
+ c = tcg_constant_tl(dc->pc + 12);
/* Store the return address in Pd. */
t_gen_mov_preg_TN(dc, dc->op2, c);
@@ -2695,7 +2691,7 @@
cris_cc_mask(dc, 0);
if (dc->op2 == 15) {
- tcg_gen_st_i32(tcg_const_i32(1), cpu_env,
+ tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
-offsetof(CRISCPU, env) + offsetof(CPUState, halted));
tcg_gen_movi_tl(env_pc, dc->pc + 2);
t_gen_raise_exception(EXCP_HLT);
diff --git a/target/cris/translate_v10.c.inc b/target/cris/translate_v10.c.inc
index b03b2ef..32338bb 100644
--- a/target/cris/translate_v10.c.inc
+++ b/target/cris/translate_v10.c.inc
@@ -251,7 +251,7 @@
LOG_DIS("moveq %d, $r%d\n", simm, dc->dst);
cris_cc_mask(dc, CC_MASK_NZVC);
- c = tcg_const_tl(simm);
+ c = tcg_constant_tl(simm);
cris_alu(dc, CC_OP_MOVE, cpu_R[dc->dst],
cpu_R[dc->dst], c, 4);
break;
@@ -259,7 +259,7 @@
LOG_DIS("cmpq %d, $r%d\n", simm, dc->dst);
cris_cc_mask(dc, CC_MASK_NZVC);
- c = tcg_const_tl(simm);
+ c = tcg_constant_tl(simm);
cris_alu(dc, CC_OP_CMP, cpu_R[dc->dst],
cpu_R[dc->dst], c, 4);
break;
@@ -267,7 +267,7 @@
LOG_DIS("addq %d, $r%d\n", imm, dc->dst);
cris_cc_mask(dc, CC_MASK_NZVC);
- c = tcg_const_tl(imm);
+ c = tcg_constant_tl(imm);
cris_alu(dc, CC_OP_ADD, cpu_R[dc->dst],
cpu_R[dc->dst], c, 4);
break;
@@ -275,7 +275,7 @@
LOG_DIS("andq %d, $r%d\n", simm, dc->dst);
cris_cc_mask(dc, CC_MASK_NZVC);
- c = tcg_const_tl(simm);
+ c = tcg_constant_tl(simm);
cris_alu(dc, CC_OP_AND, cpu_R[dc->dst],
cpu_R[dc->dst], c, 4);
break;
@@ -285,7 +285,7 @@
cris_cc_mask(dc, CC_MASK_NZVC);
op = imm & (1 << 5);
imm &= 0x1f;
- c = tcg_const_tl(imm);
+ c = tcg_constant_tl(imm);
if (op) {
cris_alu(dc, CC_OP_ASR, cpu_R[dc->dst],
cpu_R[dc->dst], c, 4);
@@ -305,7 +305,7 @@
}
imm &= 0x1f;
cris_cc_mask(dc, CC_MASK_NZVC);
- c = tcg_const_tl(imm);
+ c = tcg_constant_tl(imm);
cris_alu(dc, op, cpu_R[dc->dst],
cpu_R[dc->dst], c, 4);
break;
@@ -313,7 +313,7 @@
LOG_DIS("subq %d, $r%d\n", imm, dc->dst);
cris_cc_mask(dc, CC_MASK_NZVC);
- c = tcg_const_tl(imm);
+ c = tcg_constant_tl(imm);
cris_alu(dc, CC_OP_SUB, cpu_R[dc->dst],
cpu_R[dc->dst], c, 4);
break;
@@ -321,7 +321,7 @@
LOG_DIS("andq %d, $r%d\n", simm, dc->dst);
cris_cc_mask(dc, CC_MASK_NZVC);
- c = tcg_const_tl(simm);
+ c = tcg_constant_tl(simm);
cris_alu(dc, CC_OP_OR, cpu_R[dc->dst],
cpu_R[dc->dst], c, 4);
break;
@@ -1014,7 +1014,7 @@
cris_alu_m_alloc_temps(t);
insn_len += dec10_prep_move_m(env, dc, 0, size, t[0]);
tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~3);
- c = tcg_const_tl(0);
+ c = tcg_constant_tl(0);
cris_alu(dc, CC_OP_CMP, cpu_R[dc->dst],
t[0], c, size);
break;
@@ -1111,7 +1111,7 @@
if (dc->mode == CRISV10_MODE_AUTOINC)
insn_len += size;
- c = tcg_const_tl(dc->pc + insn_len);
+ c = tcg_constant_tl(dc->pc + insn_len);
t_gen_mov_preg_TN(dc, dc->dst, c);
dc->jmp_pc = imm;
cris_prepare_jmp(dc, JMP_DIRECT);
@@ -1121,7 +1121,7 @@
LOG_DIS("break %d\n", dc->src);
cris_evaluate_flags(dc);
tcg_gen_movi_tl(env_pc, dc->pc + 2);
- c = tcg_const_tl(dc->src + 2);
+ c = tcg_constant_tl(dc->src + 2);
t_gen_mov_env_TN(trap_vector, c);
t_gen_raise_exception(EXCP_BREAK);
dc->base.is_jmp = DISAS_NORETURN;
@@ -1130,7 +1130,7 @@
LOG_DIS("%d: jump.%d %d r%d r%d\n", __LINE__, size,
dc->opcode, dc->src, dc->dst);
t[0] = tcg_temp_new();
- c = tcg_const_tl(dc->pc + insn_len);
+ c = tcg_constant_tl(dc->pc + insn_len);
t_gen_mov_preg_TN(dc, dc->dst, c);
crisv10_prepare_memaddr(dc, t[0], size);
gen_load(dc, env_btarget, t[0], 4, 0);
@@ -1153,7 +1153,7 @@
LOG_DIS("jmp pc=%x opcode=%d r%d r%d\n",
dc->pc, dc->opcode, dc->dst, dc->src);
tcg_gen_mov_tl(env_btarget, cpu_R[dc->src]);
- c = tcg_const_tl(dc->pc + insn_len);
+ c = tcg_constant_tl(dc->pc + insn_len);
t_gen_mov_preg_TN(dc, dc->dst, c);
cris_prepare_jmp(dc, JMP_INDIRECT);
dc->delayed_branch--; /* v10 has no dslot here. */
diff --git a/target/hexagon/README b/target/hexagon/README
index 251960b..ebafc78 100644
--- a/target/hexagon/README
+++ b/target/hexagon/README
@@ -52,6 +52,7 @@
gen_tcg_func_table.py -> tcg_func_table_generated.c.inc
gen_helper_funcs.py -> helper_funcs_generated.c.inc
gen_idef_parser_funcs.py -> idef_parser_input.h
+ gen_analyze_funcs.py -> analyze_funcs_generated.c.inc
Qemu helper functions have 3 parts
DEF_HELPER declaration indicates the signature of the helper
@@ -87,7 +88,6 @@
TCGv RtV = hex_gpr[insn->regno[2]];
gen_helper_A2_add(RdV, cpu_env, RsV, RtV);
gen_log_reg_write(RdN, RdV);
- ctx_log_reg_write(ctx, RdN);
}
helper_funcs_generated.c.inc
@@ -136,12 +136,9 @@
won't fit in a TCGv or TCGv_i64, so we pass TCGv_ptr variables to pass the
address to helper functions. Here's an example for an HVX vector-add-word
istruction.
- static void generate_V6_vaddw(
- CPUHexagonState *env,
- DisasContext *ctx,
- Insn *insn,
- Packet *pkt)
+ static void generate_V6_vaddw(DisasContext *ctx)
{
+ Insn *insn __attribute__((unused)) = ctx->insn;
const int VdN = insn->regno[0];
const intptr_t VdV_off =
ctx_future_vreg_off(ctx, VdN, 1, true);
@@ -157,10 +154,7 @@
TCGv_ptr VvV = tcg_temp_new_ptr();
tcg_gen_addi_ptr(VuV, cpu_env, VuV_off);
tcg_gen_addi_ptr(VvV, cpu_env, VvV_off);
- TCGv slot = tcg_constant_tl(insn->slot);
- gen_helper_V6_vaddw(cpu_env, VdV, VuV, VvV, slot);
- gen_log_vreg_write(ctx, VdV_off, VdN, EXT_DFL, insn->slot, false);
- ctx_log_vreg_write(ctx, VdN, EXT_DFL, false);
+ gen_helper_V6_vaddw(cpu_env, VdV, VuV, VvV);
}
Notice that we also generate a variable named <operand>_off for each operand of
@@ -173,12 +167,9 @@
Finally, we notice that the override doesn't use the TCGv_ptr variables, so
we don't generate them when an override is present. Here is what we generate
when the override is present.
- static void generate_V6_vaddw(
- CPUHexagonState *env,
- DisasContext *ctx,
- Insn *insn,
- Packet *pkt)
+ static void generate_V6_vaddw(DisasContext *ctx)
{
+ Insn *insn __attribute__((unused)) = ctx->insn;
const int VdN = insn->regno[0];
const intptr_t VdV_off =
ctx_future_vreg_off(ctx, VdN, 1, true);
@@ -189,10 +180,14 @@
const intptr_t VvV_off =
vreg_src_off(ctx, VvN);
fGEN_TCG_V6_vaddw({ fHIDE(int i;) fVFOREACH(32, i) { VdV.w[i] = VuV.w[i] + VvV.w[i] ; } });
- gen_log_vreg_write(ctx, VdV_off, VdN, EXT_DFL, insn->slot, false);
- ctx_log_vreg_write(ctx, VdN, EXT_DFL, false);
}
+We also generate an analyze_<tag> function for each instruction. Currently,
+these functions record the writes to registers by calling ctx_log_*. During
+gen_start_packet, we invoke the analyze_<tag> function for each instruction in
+the packet, and we mark the implicit writes. After the analysis is performed,
+we initialize hex_new_value for each of the predicated assignments.
+
In addition to instruction semantics, we use a generator to create the decode
tree. This generation is also a two step process. The first step is to run
target/hexagon/gen_dectree_import.c to produce
@@ -277,10 +272,8 @@
VRegs Vector registers
future_VRegs Registers to be stored during packet commit
tmp_VRegs Temporary registers *not* stored during commit
- VRegs_updated Mask of predicated vector writes
QRegs Q (vector predicate) registers
future_QRegs Registers to be stored during packet commit
- QRegs_updated Mask of predicated vector writes
*** Debugging ***
diff --git a/target/hexagon/attribs_def.h.inc b/target/hexagon/attribs_def.h.inc
index 5d2a102..9874d16 100644
--- a/target/hexagon/attribs_def.h.inc
+++ b/target/hexagon/attribs_def.h.inc
@@ -44,6 +44,7 @@
DEF_ATTRIB(MEMSIZE_2B, "Memory width is 2 bytes", "", "")
DEF_ATTRIB(MEMSIZE_4B, "Memory width is 4 bytes", "", "")
DEF_ATTRIB(MEMSIZE_8B, "Memory width is 8 bytes", "", "")
+DEF_ATTRIB(SCALAR_LOAD, "Load is scalar", "", "")
DEF_ATTRIB(SCALAR_STORE, "Store is scalar", "", "")
DEF_ATTRIB(REGWRSIZE_1B, "Memory width is 1 byte", "", "")
DEF_ATTRIB(REGWRSIZE_2B, "Memory width is 2 bytes", "", "")
diff --git a/target/hexagon/cpu-param.h b/target/hexagon/cpu-param.h
index e8ed546..71b4a9b 100644
--- a/target/hexagon/cpu-param.h
+++ b/target/hexagon/cpu-param.h
@@ -24,6 +24,4 @@
#define TARGET_PHYS_ADDR_SPACE_BITS 36
#define TARGET_VIRT_ADDR_SPACE_BITS 32
-#define NB_MMU_MODES 1
-
#endif
diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h
index 34c0ae0..81b663e 100644
--- a/target/hexagon/cpu.h
+++ b/target/hexagon/cpu.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -111,11 +111,8 @@
MMVector future_VRegs[VECTOR_TEMPS_MAX] QEMU_ALIGNED(16);
MMVector tmp_VRegs[VECTOR_TEMPS_MAX] QEMU_ALIGNED(16);
- VRegMask VRegs_updated;
-
MMQReg QRegs[NUM_QREGS] QEMU_ALIGNED(16);
MMQReg future_QRegs[NUM_QREGS] QEMU_ALIGNED(16);
- QRegMask QRegs_updated;
/* Temporaries used within instructions */
MMVectorPair VuuV QEMU_ALIGNED(16);
diff --git a/target/hexagon/gdbstub.c b/target/hexagon/gdbstub.c
index d152d01..46083da 100644
--- a/target/hexagon/gdbstub.c
+++ b/target/hexagon/gdbstub.c
@@ -16,7 +16,7 @@
*/
#include "qemu/osdep.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
#include "cpu.h"
#include "internal.h"
diff --git a/target/hexagon/gen_analyze_funcs.py b/target/hexagon/gen_analyze_funcs.py
new file mode 100755
index 0000000..ebd3e7a
--- /dev/null
+++ b/target/hexagon/gen_analyze_funcs.py
@@ -0,0 +1,252 @@
+#!/usr/bin/env python3
+
+##
+## Copyright(c) 2022-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, see <http://www.gnu.org/licenses/>.
+##
+
+import sys
+import re
+import string
+import hex_common
+
+##
+## Helpers for gen_analyze_func
+##
+def is_predicated(tag):
+ return 'A_CONDEXEC' in hex_common.attribdict[tag]
+
+def analyze_opn_old(f, tag, regtype, regid, regno):
+ regN = "%s%sN" % (regtype, regid)
+ predicated = "true" if is_predicated(tag) else "false"
+ if (regtype == "R"):
+ if (regid in {"ss", "tt"}):
+ f.write("// const int %s = insn->regno[%d];\n" % \
+ (regN, regno))
+ elif (regid in {"dd", "ee", "xx", "yy"}):
+ f.write(" const int %s = insn->regno[%d];\n" % (regN, regno))
+ f.write(" ctx_log_reg_write_pair(ctx, %s, %s);\n" % \
+ (regN, predicated))
+ elif (regid in {"s", "t", "u", "v"}):
+ f.write("// const int %s = insn->regno[%d];\n" % \
+ (regN, regno))
+ elif (regid in {"d", "e", "x", "y"}):
+ f.write(" const int %s = insn->regno[%d];\n" % (regN, regno))
+ f.write(" ctx_log_reg_write(ctx, %s, %s);\n" % \
+ (regN, predicated))
+ else:
+ print("Bad register parse: ", regtype, regid)
+ elif (regtype == "P"):
+ if (regid in {"s", "t", "u", "v"}):
+ f.write("// const int %s = insn->regno[%d];\n" % \
+ (regN, regno))
+ elif (regid in {"d", "e", "x"}):
+ f.write(" const int %s = insn->regno[%d];\n" % (regN, regno))
+ f.write(" ctx_log_pred_write(ctx, %s);\n" % (regN))
+ else:
+ print("Bad register parse: ", regtype, regid)
+ elif (regtype == "C"):
+ if (regid == "ss"):
+ f.write("// const int %s = insn->regno[%d] + HEX_REG_SA0;\n" % \
+ (regN, regno))
+ elif (regid == "dd"):
+ f.write(" const int %s = insn->regno[%d] + HEX_REG_SA0;\n" % \
+ (regN, regno))
+ f.write(" ctx_log_reg_write_pair(ctx, %s, %s);\n" % \
+ (regN, predicated))
+ elif (regid == "s"):
+ f.write("// const int %s = insn->regno[%d] + HEX_REG_SA0;\n" % \
+ (regN, regno))
+ elif (regid == "d"):
+ f.write(" const int %s = insn->regno[%d] + HEX_REG_SA0;\n" % \
+ (regN, regno))
+ f.write(" ctx_log_reg_write(ctx, %s, %s);\n" % \
+ (regN, predicated))
+ else:
+ print("Bad register parse: ", regtype, regid)
+ elif (regtype == "M"):
+ if (regid == "u"):
+ f.write("// const int %s = insn->regno[%d];\n"% \
+ (regN, regno))
+ else:
+ print("Bad register parse: ", regtype, regid)
+ elif (regtype == "V"):
+ newv = "EXT_DFL"
+ if (hex_common.is_new_result(tag)):
+ newv = "EXT_NEW"
+ elif (hex_common.is_tmp_result(tag)):
+ newv = "EXT_TMP"
+ if (regid in {"dd", "xx"}):
+ f.write(" const int %s = insn->regno[%d];\n" %\
+ (regN, regno))
+ f.write(" ctx_log_vreg_write_pair(ctx, %s, %s, %s);\n" % \
+ (regN, newv, predicated))
+ elif (regid in {"uu", "vv"}):
+ f.write("// const int %s = insn->regno[%d];\n" % \
+ (regN, regno))
+ elif (regid in {"s", "u", "v", "w"}):
+ f.write("// const int %s = insn->regno[%d];\n" % \
+ (regN, regno))
+ elif (regid in {"d", "x", "y"}):
+ f.write(" const int %s = insn->regno[%d];\n" % \
+ (regN, regno))
+ f.write(" ctx_log_vreg_write(ctx, %s, %s, %s);\n" % \
+ (regN, newv, predicated))
+ else:
+ print("Bad register parse: ", regtype, regid)
+ elif (regtype == "Q"):
+ if (regid in {"d", "e", "x"}):
+ f.write(" const int %s = insn->regno[%d];\n" % \
+ (regN, regno))
+ f.write(" ctx_log_qreg_write(ctx, %s);\n" % (regN))
+ elif (regid in {"s", "t", "u", "v"}):
+ f.write("// const int %s = insn->regno[%d];\n" % \
+ (regN, regno))
+ else:
+ print("Bad register parse: ", regtype, regid)
+ elif (regtype == "G"):
+ if (regid in {"dd"}):
+ f.write("// const int %s = insn->regno[%d];\n" % \
+ (regN, regno))
+ elif (regid in {"d"}):
+ f.write("// const int %s = insn->regno[%d];\n" % \
+ (regN, regno))
+ elif (regid in {"ss"}):
+ f.write("// const int %s = insn->regno[%d];\n" % \
+ (regN, regno))
+ elif (regid in {"s"}):
+ f.write("// const int %s = insn->regno[%d];\n" % \
+ (regN, regno))
+ else:
+ print("Bad register parse: ", regtype, regid)
+ elif (regtype == "S"):
+ if (regid in {"dd"}):
+ f.write("// const int %s = insn->regno[%d];\n" % \
+ (regN, regno))
+ elif (regid in {"d"}):
+ f.write("// const int %s = insn->regno[%d];\n" % \
+ (regN, regno))
+ elif (regid in {"ss"}):
+ f.write("// const int %s = insn->regno[%d];\n" % \
+ (regN, regno))
+ elif (regid in {"s"}):
+ f.write("// const int %s = insn->regno[%d];\n" % \
+ (regN, regno))
+ else:
+ print("Bad register parse: ", regtype, regid)
+ else:
+ print("Bad register parse: ", regtype, regid)
+
+def analyze_opn_new(f, tag, regtype, regid, regno):
+ regN = "%s%sN" % (regtype, regid)
+ if (regtype == "N"):
+ if (regid in {"s", "t"}):
+ f.write("// const int %s = insn->regno[%d];\n" % \
+ (regN, regno))
+ else:
+ print("Bad register parse: ", regtype, regid)
+ elif (regtype == "P"):
+ if (regid in {"t", "u", "v"}):
+ f.write("// const int %s = insn->regno[%d];\n" % \
+ (regN, regno))
+ else:
+ print("Bad register parse: ", regtype, regid)
+ elif (regtype == "O"):
+ if (regid == "s"):
+ f.write("// const int %s = insn->regno[%d];\n" % \
+ (regN, regno))
+ else:
+ print("Bad register parse: ", regtype, regid)
+ else:
+ print("Bad register parse: ", regtype, regid)
+
+def analyze_opn(f, tag, regtype, regid, toss, numregs, i):
+ if (hex_common.is_pair(regid)):
+ analyze_opn_old(f, tag, regtype, regid, i)
+ elif (hex_common.is_single(regid)):
+ if hex_common.is_old_val(regtype, regid, tag):
+ analyze_opn_old(f,tag, regtype, regid, i)
+ elif hex_common.is_new_val(regtype, regid, tag):
+ analyze_opn_new(f, tag, regtype, regid, i)
+ else:
+ print("Bad register parse: ", regtype, regid, toss, numregs)
+ else:
+ print("Bad register parse: ", regtype, regid, toss, numregs)
+
+##
+## Generate the code to analyze the instruction
+## For A2_add: Rd32=add(Rs32,Rt32), { RdV=RsV+RtV;}
+## We produce:
+## static void analyze_A2_add(DisasContext *ctx)
+## {
+## Insn *insn G_GNUC_UNUSED = ctx->insn;
+## const int RdN = insn->regno[0];
+## ctx_log_reg_write(ctx, RdN, false);
+## // const int RsN = insn->regno[1];
+## // const int RtN = insn->regno[2];
+## }
+##
+def gen_analyze_func(f, tag, regs, imms):
+ f.write("static void analyze_%s(DisasContext *ctx)\n" %tag)
+ f.write('{\n')
+
+ f.write(" Insn *insn G_GNUC_UNUSED = ctx->insn;\n")
+
+ i=0
+ ## Analyze all the registers
+ for regtype, regid, toss, numregs in regs:
+ analyze_opn(f, tag, regtype, regid, toss, numregs, i)
+ i += 1
+
+ has_generated_helper = (not hex_common.skip_qemu_helper(tag) and
+ not hex_common.is_idef_parser_enabled(tag))
+ if (has_generated_helper and
+ 'A_SCALAR_LOAD' in hex_common.attribdict[tag]):
+ f.write(" ctx->need_pkt_has_store_s1 = true;\n")
+
+ f.write("}\n\n")
+
+def main():
+ hex_common.read_semantics_file(sys.argv[1])
+ hex_common.read_attribs_file(sys.argv[2])
+ hex_common.read_overrides_file(sys.argv[3])
+ hex_common.read_overrides_file(sys.argv[4])
+ ## Whether or not idef-parser is enabled is
+ ## determined by the number of arguments to
+ ## this script:
+ ##
+ ## 5 args. -> not enabled,
+ ## 6 args. -> idef-parser enabled.
+ ##
+ ## The 6:th arg. then holds a list of the successfully
+ ## parsed instructions.
+ is_idef_parser_enabled = len(sys.argv) > 6
+ if is_idef_parser_enabled:
+ hex_common.read_idef_parser_enabled_file(sys.argv[5])
+ hex_common.calculate_attribs()
+ tagregs = hex_common.get_tagregs()
+ tagimms = hex_common.get_tagimms()
+
+ with open(sys.argv[-1], 'w') as f:
+ f.write("#ifndef HEXAGON_TCG_FUNCS_H\n")
+ f.write("#define HEXAGON_TCG_FUNCS_H\n\n")
+
+ for tag in hex_common.tags:
+ gen_analyze_func(f, tag, tagregs[tag], tagimms[tag])
+
+ f.write("#endif /* HEXAGON_TCG_FUNCS_H */\n")
+
+if __name__ == "__main__":
+ main()
diff --git a/target/hexagon/gen_helper_funcs.py b/target/hexagon/gen_helper_funcs.py
index 19e9883..7a224b6 100755
--- a/target/hexagon/gen_helper_funcs.py
+++ b/target/hexagon/gen_helper_funcs.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
##
-## Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved.
+## Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
@@ -226,6 +226,14 @@
print("Bad register parse: ",regtype,regid,toss,numregs)
i += 1
+ ## For conditional instructions, we pass in the destination register
+ if 'A_CONDEXEC' in hex_common.attribdict[tag]:
+ for regtype, regid, toss, numregs in regs:
+ if (hex_common.is_writeonly(regid) and
+ not hex_common.is_hvx_reg(regtype)):
+ gen_helper_arg_opn(f, regtype, regid, i, tag)
+ i += 1
+
## Arguments to the helper function are the source regs and immediates
for regtype,regid,toss,numregs in regs:
if (hex_common.is_read(regid)):
@@ -262,10 +270,11 @@
if hex_common.need_ea(tag): gen_decl_ea(f)
## Declare the return variable
i=0
- for regtype,regid,toss,numregs in regs:
- if (hex_common.is_writeonly(regid)):
- gen_helper_dest_decl_opn(f,regtype,regid,i)
- i += 1
+ if 'A_CONDEXEC' not in hex_common.attribdict[tag]:
+ for regtype,regid,toss,numregs in regs:
+ if (hex_common.is_writeonly(regid)):
+ gen_helper_dest_decl_opn(f,regtype,regid,i)
+ i += 1
for regtype,regid,toss,numregs in regs:
if (hex_common.is_read(regid)):
diff --git a/target/hexagon/gen_helper_protos.py b/target/hexagon/gen_helper_protos.py
index 674bf37..ddddc9e 100755
--- a/target/hexagon/gen_helper_protos.py
+++ b/target/hexagon/gen_helper_protos.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
##
-## Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved.
+## Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
@@ -87,6 +87,7 @@
if hex_common.need_slot(tag): def_helper_size += 1
if hex_common.need_PC(tag): def_helper_size += 1
if hex_common.helper_needs_next_PC(tag): def_helper_size += 1
+ if hex_common.need_condexec_reg(tag, regs): def_helper_size += 1
f.write('DEF_HELPER_%s(%s' % (def_helper_size, tag))
## The return type is void
f.write(', void' )
@@ -96,6 +97,7 @@
if hex_common.need_part1(tag): def_helper_size += 1
if hex_common.need_slot(tag): def_helper_size += 1
if hex_common.need_PC(tag): def_helper_size += 1
+ if hex_common.need_condexec_reg(tag, regs): def_helper_size += 1
if hex_common.helper_needs_next_PC(tag): def_helper_size += 1
f.write('DEF_HELPER_%s(%s' % (def_helper_size, tag))
@@ -121,6 +123,14 @@
gen_def_helper_opn(f, tag, regtype, regid, toss, numregs, i)
i += 1
+ ## For conditional instructions, we pass in the destination register
+ if 'A_CONDEXEC' in hex_common.attribdict[tag]:
+ for regtype, regid, toss, numregs in regs:
+ if (hex_common.is_writeonly(regid) and
+ not hex_common.is_hvx_reg(regtype)):
+ gen_def_helper_opn(f, tag, regtype, regid, toss, numregs, i)
+ i += 1
+
## Generate the qemu type for each input operand (regs and immediates)
for regtype,regid,toss,numregs in regs:
if (hex_common.is_read(regid)):
diff --git a/target/hexagon/gen_tcg.h b/target/hexagon/gen_tcg.h
index b2e7880..bcf0cf4 100644
--- a/target/hexagon/gen_tcg.h
+++ b/target/hexagon/gen_tcg.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -332,8 +332,6 @@
tcg_gen_movi_tl(EA, 0); \
PRED; \
CHECK_NOSHUF_PRED(GET_EA, SIZE, LSB); \
- PRED_LOAD_CANCEL(LSB, EA); \
- tcg_gen_movi_tl(RdV, 0); \
tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, label); \
fLOAD(1, SIZE, SIGN, EA, RdV); \
gen_set_label(label); \
@@ -391,8 +389,6 @@
tcg_gen_movi_tl(EA, 0); \
PRED; \
CHECK_NOSHUF_PRED(GET_EA, 8, LSB); \
- PRED_LOAD_CANCEL(LSB, EA); \
- tcg_gen_movi_i64(RddV, 0); \
tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, label); \
fLOAD(1, 8, u, EA, RddV); \
gen_set_label(label); \
@@ -419,16 +415,16 @@
#define fGEN_TCG_STORE(SHORTCODE) \
do { \
- TCGv HALF = tcg_temp_new(); \
- TCGv BYTE = tcg_temp_new(); \
+ TCGv HALF G_GNUC_UNUSED = tcg_temp_new(); \
+ TCGv BYTE G_GNUC_UNUSED = tcg_temp_new(); \
SHORTCODE; \
} while (0)
#define fGEN_TCG_STORE_pcr(SHIFT, STORE) \
do { \
TCGv ireg = tcg_temp_new(); \
- TCGv HALF = tcg_temp_new(); \
- TCGv BYTE = tcg_temp_new(); \
+ TCGv HALF G_GNUC_UNUSED = tcg_temp_new(); \
+ TCGv BYTE G_GNUC_UNUSED = tcg_temp_new(); \
tcg_gen_mov_tl(EA, RxV); \
gen_read_ireg(ireg, MuV, SHIFT); \
gen_helper_fcircadd(RxV, RxV, ireg, MuV, hex_gpr[HEX_REG_CS0 + MuN]); \
@@ -492,6 +488,59 @@
fGEN_TCG_STORE_pcr(2, fSTORE(1, 4, EA, NtN))
/*
+ * dealloc_return
+ * Assembler mapped to
+ * r31:30 = dealloc_return(r30):raw
+ */
+#define fGEN_TCG_L4_return(SHORTCODE) \
+ gen_return(ctx, RddV, RsV)
+
+/*
+ * sub-instruction version (no RddV, so handle it manually)
+ */
+#define fGEN_TCG_SL2_return(SHORTCODE) \
+ do { \
+ TCGv_i64 RddV = get_result_gpr_pair(ctx, HEX_REG_FP); \
+ gen_return(ctx, RddV, hex_gpr[HEX_REG_FP]); \
+ gen_log_reg_write_pair(HEX_REG_FP, RddV); \
+ } while (0)
+
+/*
+ * Conditional returns follow this naming convention
+ * _t predicate true
+ * _f predicate false
+ * _tnew_pt predicate.new true predict taken
+ * _fnew_pt predicate.new false predict taken
+ * _tnew_pnt predicate.new true predict not taken
+ * _fnew_pnt predicate.new false predict not taken
+ * Predictions are not modelled in QEMU
+ *
+ * Example:
+ * if (p1) r31:30 = dealloc_return(r30):raw
+ */
+#define fGEN_TCG_L4_return_t(SHORTCODE) \
+ gen_cond_return(ctx, RddV, RsV, PvV, TCG_COND_EQ);
+#define fGEN_TCG_L4_return_f(SHORTCODE) \
+ gen_cond_return(ctx, RddV, RsV, PvV, TCG_COND_NE)
+#define fGEN_TCG_L4_return_tnew_pt(SHORTCODE) \
+ gen_cond_return(ctx, RddV, RsV, PvN, TCG_COND_EQ)
+#define fGEN_TCG_L4_return_fnew_pt(SHORTCODE) \
+ gen_cond_return(ctx, RddV, RsV, PvN, TCG_COND_NE)
+#define fGEN_TCG_L4_return_tnew_pnt(SHORTCODE) \
+ gen_cond_return(ctx, RddV, RsV, PvN, TCG_COND_EQ)
+#define fGEN_TCG_L4_return_fnew_pnt(SHORTCODE) \
+ gen_cond_return(ctx, RddV, RsV, PvN, TCG_COND_NE)
+
+#define fGEN_TCG_SL2_return_t(SHORTCODE) \
+ gen_cond_return_subinsn(ctx, TCG_COND_EQ, hex_pred[0])
+#define fGEN_TCG_SL2_return_f(SHORTCODE) \
+ gen_cond_return_subinsn(ctx, TCG_COND_NE, hex_pred[0])
+#define fGEN_TCG_SL2_return_tnew(SHORTCODE) \
+ gen_cond_return_subinsn(ctx, TCG_COND_EQ, hex_new_pred_value[0])
+#define fGEN_TCG_SL2_return_fnew(SHORTCODE) \
+ gen_cond_return_subinsn(ctx, TCG_COND_NE, hex_new_pred_value[0])
+
+/*
* Mathematical operations with more than one definition require
* special handling
*/
@@ -589,14 +638,24 @@
#define fGEN_TCG_J2_call(SHORTCODE) \
gen_call(ctx, riV)
+#define fGEN_TCG_J2_callr(SHORTCODE) \
+ gen_callr(ctx, RsV)
#define fGEN_TCG_J2_callt(SHORTCODE) \
gen_cond_call(ctx, PuV, TCG_COND_EQ, riV)
#define fGEN_TCG_J2_callf(SHORTCODE) \
gen_cond_call(ctx, PuV, TCG_COND_NE, riV)
+#define fGEN_TCG_J2_callrt(SHORTCODE) \
+ gen_cond_callr(ctx, TCG_COND_EQ, PuV, RsV)
+#define fGEN_TCG_J2_callrf(SHORTCODE) \
+ gen_cond_callr(ctx, TCG_COND_NE, PuV, RsV)
#define fGEN_TCG_J2_endloop0(SHORTCODE) \
gen_endloop0(ctx)
+#define fGEN_TCG_J2_endloop1(SHORTCODE) \
+ gen_endloop1(ctx)
+#define fGEN_TCG_J2_endloop01(SHORTCODE) \
+ gen_endloop01(ctx)
/*
* Compound compare and jump instructions
@@ -986,6 +1045,19 @@
#define fGEN_TCG_S2_asl_r_r_sat(SHORTCODE) \
gen_asl_r_r_sat(RdV, RsV, RtV)
+#define fGEN_TCG_SL2_jumpr31(SHORTCODE) \
+ gen_jumpr(ctx, hex_gpr[HEX_REG_LR])
+
+#define fGEN_TCG_SL2_jumpr31_t(SHORTCODE) \
+ gen_cond_jumpr31(ctx, TCG_COND_EQ, hex_pred[0])
+#define fGEN_TCG_SL2_jumpr31_f(SHORTCODE) \
+ gen_cond_jumpr31(ctx, TCG_COND_NE, hex_pred[0])
+
+#define fGEN_TCG_SL2_jumpr31_tnew(SHORTCODE) \
+ gen_cond_jumpr31(ctx, TCG_COND_EQ, hex_new_pred_value[0])
+#define fGEN_TCG_SL2_jumpr31_fnew(SHORTCODE) \
+ gen_cond_jumpr31(ctx, TCG_COND_NE, hex_new_pred_value[0])
+
/* Floating point */
#define fGEN_TCG_F2_conv_sf2df(SHORTCODE) \
gen_helper_conv_sf2df(RddV, cpu_env, RsV)
diff --git a/target/hexagon/gen_tcg_funcs.py b/target/hexagon/gen_tcg_funcs.py
index 02cb52c..fa93e18 100755
--- a/target/hexagon/gen_tcg_funcs.py
+++ b/target/hexagon/gen_tcg_funcs.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
##
-## Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved.
+## Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
@@ -30,37 +30,33 @@
def genptr_decl_pair_writable(f, tag, regtype, regid, regno):
regN="%s%sN" % (regtype,regid)
- f.write(" TCGv_i64 %s%sV = tcg_temp_new_i64();\n" % \
- (regtype, regid))
- if (regtype == "C"):
+ if (regtype == "R"):
+ f.write(" const int %s = insn->regno[%d];\n" % (regN, regno))
+ elif (regtype == "C"):
f.write(" const int %s = insn->regno[%d] + HEX_REG_SA0;\n" % \
(regN, regno))
else:
- f.write(" const int %s = insn->regno[%d];\n" % (regN, regno))
- if ('A_CONDEXEC' in hex_common.attribdict[tag]):
- f.write(" if (!is_preloaded(ctx, %s)) {\n" % regN)
- f.write(" tcg_gen_mov_tl(hex_new_value[%s], hex_gpr[%s]);\n" % \
- (regN, regN))
- f.write(" }\n")
- f.write(" if (!is_preloaded(ctx, %s + 1)) {\n" % regN)
- f.write(" tcg_gen_mov_tl(hex_new_value[%s + 1], hex_gpr[%s + 1]);\n" % \
- (regN, regN))
- f.write(" }\n")
+ print("Bad register parse: ", regtype, regid)
+ f.write(" TCGv_i64 %s%sV = get_result_gpr_pair(ctx, %s);\n" % \
+ (regtype, regid, regN))
def genptr_decl_writable(f, tag, regtype, regid, regno):
regN="%s%sN" % (regtype,regid)
- f.write(" TCGv %s%sV = tcg_temp_new();\n" % \
- (regtype, regid))
- if (regtype == "C"):
+ if (regtype == "R"):
+ f.write(" const int %s = insn->regno[%d];\n" % (regN, regno))
+ f.write(" TCGv %s%sV = get_result_gpr(ctx, %s);\n" % \
+ (regtype, regid, regN))
+ elif (regtype == "C"):
f.write(" const int %s = insn->regno[%d] + HEX_REG_SA0;\n" % \
(regN, regno))
- else:
+ f.write(" TCGv %s%sV = get_result_gpr(ctx, %s);\n" % \
+ (regtype, regid, regN))
+ elif (regtype == "P"):
f.write(" const int %s = insn->regno[%d];\n" % (regN, regno))
- if ('A_CONDEXEC' in hex_common.attribdict[tag]):
- f.write(" if (!is_preloaded(ctx, %s)) {\n" % regN)
- f.write(" tcg_gen_mov_tl(hex_new_value[%s], hex_gpr[%s]);\n" % \
- (regN, regN))
- f.write(" }\n")
+ f.write(" TCGv %s%sV = tcg_temp_new();\n" % \
+ (regtype, regid))
+ else:
+ print("Bad register parse: ", regtype, regid)
def genptr_decl(f, tag, regtype, regid, regno):
regN="%s%sN" % (regtype,regid)
@@ -166,17 +162,6 @@
f.write(" ctx_future_vreg_off(ctx, %s%sN," % \
(regtype, regid))
f.write(" 1, true);\n");
- if 'A_CONDEXEC' in hex_common.attribdict[tag]:
- f.write(" if (!is_vreg_preloaded(ctx, %s)) {\n" % (regN))
- f.write(" intptr_t src_off =")
- f.write(" offsetof(CPUHexagonState, VRegs[%s%sN]);\n"% \
- (regtype, regid))
- f.write(" tcg_gen_gvec_mov(MO_64, %s%sV_off,\n" % \
- (regtype, regid))
- f.write(" src_off,\n")
- f.write(" sizeof(MMVector),\n")
- f.write(" sizeof(MMVector));\n")
- f.write(" }\n")
if (not hex_common.skip_qemu_helper(tag)):
f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \
@@ -191,8 +176,7 @@
(regtype, regid, regno))
f.write(" const intptr_t %s%sV_off =\n" % \
(regtype, regid))
- f.write(" offsetof(CPUHexagonState,\n")
- f.write(" future_QRegs[%s%sN]);\n" % \
+ f.write(" get_result_qreg(ctx, %s%sN);\n" % \
(regtype, regid))
if (not hex_common.skip_qemu_helper(tag)):
f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \
@@ -274,8 +258,12 @@
f.write(" hex_gpr[%s%sN + 1]);\n" % \
(regtype, regid))
elif (regid in {"x", "y"}):
- f.write(" tcg_gen_mov_tl(%s%sV, hex_gpr[%s%sN]);\n" % \
- (regtype,regid,regtype,regid))
+ ## For read/write registers, we need to get the original value into
+ ## the result TCGv. For conditional instructions, this is done in
+ ## gen_start_packet. For unconditional instructions, we do it here.
+ if ('A_CONDEXEC' not in hex_common.attribdict[tag]):
+ f.write(" tcg_gen_mov_tl(%s%sV, hex_gpr[%s%sN]);\n" % \
+ (regtype, regid, regtype, regid))
elif (regid not in {"s", "t", "u", "v"}):
print("Bad register parse: ", regtype, regid)
elif (regtype == "P"):
@@ -385,37 +373,22 @@
f.write(", tcgv_%s" % hex_common.imm_name(immlett))
def genptr_dst_write_pair(f, tag, regtype, regid):
- if ('A_CONDEXEC' in hex_common.attribdict[tag]):
- f.write(" gen_log_predicated_reg_write_pair(%s%sN, %s%sV, insn->slot);\n" % \
- (regtype, regid, regtype, regid))
- else:
- f.write(" gen_log_reg_write_pair(%s%sN, %s%sV);\n" % \
- (regtype, regid, regtype, regid))
- f.write(" ctx_log_reg_write_pair(ctx, %s%sN);\n" % \
- (regtype, regid))
+ f.write(" gen_log_reg_write_pair(%s%sN, %s%sV);\n" % \
+ (regtype, regid, regtype, regid))
def genptr_dst_write(f, tag, regtype, regid):
if (regtype == "R"):
if (regid in {"dd", "xx", "yy"}):
genptr_dst_write_pair(f, tag, regtype, regid)
elif (regid in {"d", "e", "x", "y"}):
- if ('A_CONDEXEC' in hex_common.attribdict[tag]):
- f.write(" gen_log_predicated_reg_write(%s%sN, %s%sV,\n" % \
- (regtype, regid, regtype, regid))
- f.write(" insn->slot);\n")
- else:
- f.write(" gen_log_reg_write(%s%sN, %s%sV);\n" % \
- (regtype, regid, regtype, regid))
- f.write(" ctx_log_reg_write(ctx, %s%sN);\n" % \
- (regtype, regid))
+ f.write(" gen_log_reg_write(%s%sN, %s%sV);\n" % \
+ (regtype, regid, regtype, regid))
else:
print("Bad register parse: ", regtype, regid)
elif (regtype == "P"):
if (regid in {"d", "e", "x"}):
f.write(" gen_log_pred_write(ctx, %s%sN, %s%sV);\n" % \
(regtype, regid, regtype, regid))
- f.write(" ctx_log_pred_write(ctx, %s%sN);\n" % \
- (regtype, regid))
else:
print("Bad register parse: ", regtype, regid)
elif (regtype == "C"):
@@ -432,43 +405,18 @@
def genptr_dst_write_ext(f, tag, regtype, regid, newv="EXT_DFL"):
if (regtype == "V"):
- if (regid in {"dd", "xx", "yy"}):
- if ('A_CONDEXEC' in hex_common.attribdict[tag]):
- is_predicated = "true"
- else:
- is_predicated = "false"
+ if (regid in {"xx"}):
f.write(" gen_log_vreg_write_pair(ctx, %s%sV_off, %s%sN, " % \
(regtype, regid, regtype, regid))
- f.write("%s, insn->slot, %s);\n" % \
- (newv, is_predicated))
- f.write(" ctx_log_vreg_write_pair(ctx, %s%sN, %s,\n" % \
- (regtype, regid, newv))
- f.write(" %s);\n" % (is_predicated))
- elif (regid in {"d", "x", "y"}):
- if ('A_CONDEXEC' in hex_common.attribdict[tag]):
- is_predicated = "true"
- else:
- is_predicated = "false"
- f.write(" gen_log_vreg_write(ctx, %s%sV_off, %s%sN, %s, " % \
+ f.write("%s);\n" % \
+ (newv))
+ elif (regid in {"y"}):
+ f.write(" gen_log_vreg_write(ctx, %s%sV_off, %s%sN, %s);\n" % \
(regtype, regid, regtype, regid, newv))
- f.write("insn->slot, %s);\n" % \
- (is_predicated))
- f.write(" ctx_log_vreg_write(ctx, %s%sN, %s, %s);\n" % \
- (regtype, regid, newv, is_predicated))
- else:
+ elif (regid not in {"dd", "d", "x"}):
print("Bad register parse: ", regtype, regid)
elif (regtype == "Q"):
- if (regid in {"d", "e", "x"}):
- if ('A_CONDEXEC' in hex_common.attribdict[tag]):
- is_predicated = "true"
- else:
- is_predicated = "false"
- f.write(" gen_log_qreg_write(%s%sV_off, %s%sN, %s, " % \
- (regtype, regid, regtype, regid, newv))
- f.write("insn->slot, %s);\n" % (is_predicated))
- f.write(" ctx_log_qreg_write(ctx, %s%sN, %s);\n" % \
- (regtype, regid, is_predicated))
- else:
+ if (regid not in {"d", "e", "x"}):
print("Bad register parse: ", regtype, regid)
else:
print("Bad register parse: ", regtype, regid)
@@ -500,15 +448,15 @@
## For A2_add: Rd32=add(Rs32,Rt32), { RdV=RsV+RtV;}
## We produce:
## static void generate_A2_add(DisasContext *ctx)
-## {
-## TCGv RdV = tcg_temp_new();
-## const int RdN = insn->regno[0];
-## TCGv RsV = hex_gpr[insn->regno[1]];
-## TCGv RtV = hex_gpr[insn->regno[2]];
-## <GEN>
-## gen_log_reg_write(RdN, RdV);
-## ctx_log_reg_write(ctx, RdN);
-## }
+## {
+## Insn *insn __attribute__((unused)) = ctx->insn;
+## const int RdN = insn->regno[0];
+## TCGv RdV = get_result_gpr(ctx, RdN);
+## TCGv RsV = hex_gpr[insn->regno[1]];
+## TCGv RtV = hex_gpr[insn->regno[2]];
+## <GEN>
+## gen_log_reg_write(RdN, RdV);
+## }
##
## where <GEN> depends on hex_common.skip_qemu_helper(tag)
## if hex_common.skip_qemu_helper(tag) is True
@@ -592,6 +540,14 @@
if (i > 0): f.write(", ")
f.write("cpu_env")
i=1
+ ## For conditional instructions, we pass in the destination register
+ if 'A_CONDEXEC' in hex_common.attribdict[tag]:
+ for regtype, regid, toss, numregs in regs:
+ if (hex_common.is_writeonly(regid) and
+ not hex_common.is_hvx_reg(regtype)):
+ gen_helper_call_opn(f, tag, regtype, regid, toss, \
+ numregs, i)
+ i += 1
for regtype,regid,toss,numregs in regs:
if (hex_common.is_written(regid)):
if (not hex_common.is_hvx_reg(regtype)):
diff --git a/target/hexagon/gen_tcg_hvx.h b/target/hexagon/gen_tcg_hvx.h
index 94f272e..d4aefe8 100644
--- a/target/hexagon/gen_tcg_hvx.h
+++ b/target/hexagon/gen_tcg_hvx.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -133,16 +133,11 @@
do { \
TCGv lsb = tcg_temp_new(); \
TCGLabel *false_label = gen_new_label(); \
- TCGLabel *end_label = gen_new_label(); \
tcg_gen_andi_tl(lsb, PsV, 1); \
tcg_gen_brcondi_tl(TCG_COND_NE, lsb, PRED, false_label); \
tcg_gen_gvec_mov(MO_64, VdV_off, VuV_off, \
sizeof(MMVector), sizeof(MMVector)); \
- tcg_gen_br(end_label); \
gen_set_label(false_label); \
- tcg_gen_ori_tl(hex_slot_cancelled, hex_slot_cancelled, \
- 1 << insn->slot); \
- gen_set_label(end_label); \
} while (0)
@@ -547,17 +542,12 @@
do { \
TCGv LSB = tcg_temp_new(); \
TCGLabel *false_label = gen_new_label(); \
- TCGLabel *end_label = gen_new_label(); \
GET_EA; \
PRED; \
tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, false_label); \
gen_vreg_load(ctx, DSTOFF, EA, true); \
INC; \
- tcg_gen_br(end_label); \
gen_set_label(false_label); \
- tcg_gen_ori_tl(hex_slot_cancelled, hex_slot_cancelled, \
- 1 << insn->slot); \
- gen_set_label(end_label); \
} while (0)
#define fGEN_TCG_PRED_VEC_LOAD_pred_pi \
@@ -717,17 +707,12 @@
do { \
TCGv LSB = tcg_temp_new(); \
TCGLabel *false_label = gen_new_label(); \
- TCGLabel *end_label = gen_new_label(); \
GET_EA; \
PRED; \
tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, false_label); \
gen_vreg_store(ctx, EA, SRCOFF, insn->slot, ALIGN); \
INC; \
- tcg_gen_br(end_label); \
gen_set_label(false_label); \
- tcg_gen_ori_tl(hex_slot_cancelled, hex_slot_cancelled, \
- 1 << insn->slot); \
- gen_set_label(end_label); \
} while (0)
#define fGEN_TCG_PRED_VEC_STORE_pred_pi(ALIGN) \
diff --git a/target/hexagon/genptr.c b/target/hexagon/genptr.c
index 86bd093..bb274d4 100644
--- a/target/hexagon/genptr.c
+++ b/target/hexagon/genptr.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -68,26 +68,17 @@
}
}
-static inline void gen_log_predicated_reg_write(int rnum, TCGv val,
- uint32_t slot)
+static TCGv get_result_gpr(DisasContext *ctx, int rnum)
{
- TCGv zero = tcg_constant_tl(0);
- TCGv slot_mask = tcg_temp_new();
+ return hex_new_value[rnum];
+}
- tcg_gen_andi_tl(slot_mask, hex_slot_cancelled, 1 << slot);
- tcg_gen_movcond_tl(TCG_COND_EQ, hex_new_value[rnum], slot_mask, zero,
- val, hex_new_value[rnum]);
- if (HEX_DEBUG) {
- /*
- * Do this so HELPER(debug_commit_end) will know
- *
- * Note that slot_mask indicates the value is not written
- * (i.e., slot was cancelled), so we create a true/false value before
- * or'ing with hex_reg_written[rnum].
- */
- tcg_gen_setcond_tl(TCG_COND_EQ, slot_mask, slot_mask, zero);
- tcg_gen_or_tl(hex_reg_written[rnum], hex_reg_written[rnum], slot_mask);
- }
+static TCGv_i64 get_result_gpr_pair(DisasContext *ctx, int rnum)
+{
+ TCGv_i64 result = tcg_temp_new_i64();
+ tcg_gen_concat_i32_i64(result, hex_new_value[rnum],
+ hex_new_value[rnum + 1]);
+ return result;
}
void gen_log_reg_write(int rnum, TCGv val)
@@ -102,39 +93,6 @@
}
}
-static void gen_log_predicated_reg_write_pair(int rnum, TCGv_i64 val,
- uint32_t slot)
-{
- TCGv val32 = tcg_temp_new();
- TCGv zero = tcg_constant_tl(0);
- TCGv slot_mask = tcg_temp_new();
-
- tcg_gen_andi_tl(slot_mask, hex_slot_cancelled, 1 << slot);
- /* Low word */
- tcg_gen_extrl_i64_i32(val32, val);
- tcg_gen_movcond_tl(TCG_COND_EQ, hex_new_value[rnum],
- slot_mask, zero,
- val32, hex_new_value[rnum]);
- /* High word */
- tcg_gen_extrh_i64_i32(val32, val);
- tcg_gen_movcond_tl(TCG_COND_EQ, hex_new_value[rnum + 1],
- slot_mask, zero,
- val32, hex_new_value[rnum + 1]);
- if (HEX_DEBUG) {
- /*
- * Do this so HELPER(debug_commit_end) will know
- *
- * Note that slot_mask indicates the value is not written
- * (i.e., slot was cancelled), so we create a true/false value before
- * or'ing with hex_reg_written[rnum].
- */
- tcg_gen_setcond_tl(TCG_COND_EQ, slot_mask, slot_mask, zero);
- tcg_gen_or_tl(hex_reg_written[rnum], hex_reg_written[rnum], slot_mask);
- tcg_gen_or_tl(hex_reg_written[rnum + 1], hex_reg_written[rnum + 1],
- slot_mask);
- }
-}
-
static void gen_log_reg_write_pair(int rnum, TCGv_i64 val)
{
const target_ulong reg_mask_low = reg_immut_masks[rnum];
@@ -180,6 +138,7 @@
hex_new_pred_value[pnum], base_val);
}
tcg_gen_ori_tl(hex_pred_written, hex_pred_written, 1 << pnum);
+ set_bit(pnum, ctx->pregs_written);
}
static inline void gen_read_p3_0(TCGv control_reg)
@@ -256,7 +215,6 @@
for (int i = 0; i < NUM_PREGS; i++) {
tcg_gen_extract_tl(hex_p8, control_reg, i * 8, 8);
gen_log_pred_write(ctx, i, hex_p8);
- ctx_log_pred_write(ctx, i);
}
}
@@ -274,7 +232,6 @@
gen_write_p3_0(ctx, val);
} else {
gen_log_reg_write(reg_num, val);
- ctx_log_reg_write(ctx, reg_num);
if (reg_num == HEX_REG_QEMU_PKT_CNT) {
ctx->num_packets = 0;
}
@@ -291,15 +248,14 @@
TCGv_i64 val)
{
if (reg_num == HEX_REG_P3_0_ALIASED) {
+ TCGv result = get_result_gpr(ctx, reg_num + 1);
TCGv val32 = tcg_temp_new();
tcg_gen_extrl_i64_i32(val32, val);
gen_write_p3_0(ctx, val32);
tcg_gen_extrh_i64_i32(val32, val);
- gen_log_reg_write(reg_num + 1, val32);
- ctx_log_reg_write(ctx, reg_num + 1);
+ tcg_gen_mov_tl(result, val32);
} else {
gen_log_reg_write_pair(reg_num, val);
- ctx_log_reg_write_pair(ctx, reg_num);
if (reg_num == HEX_REG_QEMU_PKT_CNT) {
ctx->num_packets = 0;
ctx->num_insns = 0;
@@ -571,6 +527,13 @@
gen_write_new_pc_addr(ctx, dst_pc, cond, pred);
}
+static void gen_cond_jumpr31(DisasContext *ctx, TCGCond cond, TCGv pred)
+{
+ TCGv LSB = tcg_temp_new();
+ tcg_gen_andi_tl(LSB, pred, 1);
+ gen_cond_jumpr(ctx, hex_gpr[HEX_REG_LR], cond, LSB);
+}
+
static void gen_cond_jump(DisasContext *ctx, TCGCond cond, TCGv pred,
int pc_off)
{
@@ -669,27 +632,99 @@
static void gen_call(DisasContext *ctx, int pc_off)
{
- TCGv next_PC =
- tcg_constant_tl(ctx->pkt->pc + ctx->pkt->encod_pkt_size_in_bytes);
- gen_log_reg_write(HEX_REG_LR, next_PC);
+ TCGv lr = get_result_gpr(ctx, HEX_REG_LR);
+ tcg_gen_movi_tl(lr, ctx->next_PC);
gen_write_new_pc_pcrel(ctx, pc_off, TCG_COND_ALWAYS, NULL);
}
+static void gen_callr(DisasContext *ctx, TCGv new_pc)
+{
+ TCGv lr = get_result_gpr(ctx, HEX_REG_LR);
+ tcg_gen_movi_tl(lr, ctx->next_PC);
+ gen_write_new_pc_addr(ctx, new_pc, TCG_COND_ALWAYS, NULL);
+}
+
static void gen_cond_call(DisasContext *ctx, TCGv pred,
TCGCond cond, int pc_off)
{
- TCGv next_PC;
+ TCGv lr = get_result_gpr(ctx, HEX_REG_LR);
TCGv lsb = tcg_temp_new();
TCGLabel *skip = gen_new_label();
tcg_gen_andi_tl(lsb, pred, 1);
gen_write_new_pc_pcrel(ctx, pc_off, cond, lsb);
tcg_gen_brcondi_tl(cond, lsb, 0, skip);
- next_PC =
- tcg_constant_tl(ctx->pkt->pc + ctx->pkt->encod_pkt_size_in_bytes);
- gen_log_reg_write(HEX_REG_LR, next_PC);
+ tcg_gen_movi_tl(lr, ctx->next_PC);
gen_set_label(skip);
}
+static void gen_cond_callr(DisasContext *ctx,
+ TCGCond cond, TCGv pred, TCGv new_pc)
+{
+ TCGv lsb = tcg_temp_new();
+ TCGLabel *skip = gen_new_label();
+ tcg_gen_andi_tl(lsb, pred, 1);
+ tcg_gen_brcondi_tl(cond, lsb, 0, skip);
+ gen_callr(ctx, new_pc);
+ gen_set_label(skip);
+}
+
+/* frame ^= (int64_t)FRAMEKEY << 32 */
+static void gen_frame_unscramble(TCGv_i64 frame)
+{
+ TCGv_i64 framekey = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(framekey, hex_gpr[HEX_REG_FRAMEKEY]);
+ tcg_gen_shli_i64(framekey, framekey, 32);
+ tcg_gen_xor_i64(frame, frame, framekey);
+}
+
+static void gen_load_frame(DisasContext *ctx, TCGv_i64 frame, TCGv EA)
+{
+ Insn *insn = ctx->insn; /* Needed for CHECK_NOSHUF */
+ CHECK_NOSHUF(EA, 8);
+ tcg_gen_qemu_ld64(frame, EA, ctx->mem_idx);
+}
+
+static void gen_return(DisasContext *ctx, TCGv_i64 dst, TCGv src)
+{
+ /*
+ * frame = *src
+ * dst = frame_unscramble(frame)
+ * SP = src + 8
+ * PC = dst.w[1]
+ */
+ TCGv_i64 frame = tcg_temp_new_i64();
+ TCGv r31 = tcg_temp_new();
+ TCGv r29 = get_result_gpr(ctx, HEX_REG_SP);
+
+ gen_load_frame(ctx, frame, src);
+ gen_frame_unscramble(frame);
+ tcg_gen_mov_i64(dst, frame);
+ tcg_gen_addi_tl(r29, src, 8);
+ tcg_gen_extrh_i64_i32(r31, dst);
+ gen_jumpr(ctx, r31);
+}
+
+/* if (pred) dst = dealloc_return(src):raw */
+static void gen_cond_return(DisasContext *ctx, TCGv_i64 dst, TCGv src,
+ TCGv pred, TCGCond cond)
+{
+ TCGv LSB = tcg_temp_new();
+ TCGLabel *skip = gen_new_label();
+ tcg_gen_andi_tl(LSB, pred, 1);
+
+ tcg_gen_brcondi_tl(cond, LSB, 0, skip);
+ gen_return(ctx, dst, src);
+ gen_set_label(skip);
+}
+
+/* sub-instruction version (no RddV, so handle it manually) */
+static void gen_cond_return_subinsn(DisasContext *ctx, TCGCond cond, TCGv pred)
+{
+ TCGv_i64 RddV = get_result_gpr_pair(ctx, HEX_REG_FP);
+ gen_cond_return(ctx, RddV, hex_gpr[HEX_REG_FP], pred, cond);
+ gen_log_reg_write_pair(HEX_REG_FP, RddV);
+}
+
static void gen_endloop0(DisasContext *ctx)
{
TCGv lpcfg = tcg_temp_new();
@@ -737,14 +772,95 @@
TCGLabel *label3 = gen_new_label();
tcg_gen_brcondi_tl(TCG_COND_LEU, hex_gpr[HEX_REG_LC0], 1, label3);
{
+ TCGv lc0 = get_result_gpr(ctx, HEX_REG_LC0);
gen_jumpr(ctx, hex_gpr[HEX_REG_SA0]);
- tcg_gen_subi_tl(hex_new_value[HEX_REG_LC0],
- hex_gpr[HEX_REG_LC0], 1);
+ tcg_gen_subi_tl(lc0, hex_gpr[HEX_REG_LC0], 1);
}
gen_set_label(label3);
}
}
+static void gen_endloop1(DisasContext *ctx)
+{
+ /*
+ * if (hex_gpr[HEX_REG_LC1] > 1) {
+ * PC = hex_gpr[HEX_REG_SA1];
+ * hex_new_value[HEX_REG_LC1] = hex_gpr[HEX_REG_LC1] - 1;
+ * }
+ */
+ TCGLabel *label = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_LEU, hex_gpr[HEX_REG_LC1], 1, label);
+ {
+ TCGv lc1 = get_result_gpr(ctx, HEX_REG_LC1);
+ gen_jumpr(ctx, hex_gpr[HEX_REG_SA1]);
+ tcg_gen_subi_tl(lc1, hex_gpr[HEX_REG_LC1], 1);
+ }
+ gen_set_label(label);
+}
+
+static void gen_endloop01(DisasContext *ctx)
+{
+ TCGv lpcfg = tcg_temp_new();
+ TCGLabel *label1 = gen_new_label();
+ TCGLabel *label2 = gen_new_label();
+ TCGLabel *label3 = gen_new_label();
+ TCGLabel *done = gen_new_label();
+
+ GET_USR_FIELD(USR_LPCFG, lpcfg);
+
+ /*
+ * if (lpcfg == 1) {
+ * hex_new_pred_value[3] = 0xff;
+ * hex_pred_written |= 1 << 3;
+ * }
+ */
+ tcg_gen_brcondi_tl(TCG_COND_NE, lpcfg, 1, label1);
+ {
+ tcg_gen_movi_tl(hex_new_pred_value[3], 0xff);
+ tcg_gen_ori_tl(hex_pred_written, hex_pred_written, 1 << 3);
+ }
+ gen_set_label(label1);
+
+ /*
+ * if (lpcfg) {
+ * SET_USR_FIELD(USR_LPCFG, lpcfg - 1);
+ * }
+ */
+ tcg_gen_brcondi_tl(TCG_COND_EQ, lpcfg, 0, label2);
+ {
+ tcg_gen_subi_tl(lpcfg, lpcfg, 1);
+ SET_USR_FIELD(USR_LPCFG, lpcfg);
+ }
+ gen_set_label(label2);
+
+ /*
+ * if (hex_gpr[HEX_REG_LC0] > 1) {
+ * PC = hex_gpr[HEX_REG_SA0];
+ * hex_new_value[HEX_REG_LC0] = hex_gpr[HEX_REG_LC0] - 1;
+ * } else {
+ * if (hex_gpr[HEX_REG_LC1] > 1) {
+ * hex_next_pc = hex_gpr[HEX_REG_SA1];
+ * hex_new_value[HEX_REG_LC1] = hex_gpr[HEX_REG_LC1] - 1;
+ * }
+ * }
+ */
+ tcg_gen_brcondi_tl(TCG_COND_LEU, hex_gpr[HEX_REG_LC0], 1, label3);
+ {
+ TCGv lc0 = get_result_gpr(ctx, HEX_REG_LC0);
+ gen_jumpr(ctx, hex_gpr[HEX_REG_SA0]);
+ tcg_gen_subi_tl(lc0, hex_gpr[HEX_REG_LC0], 1);
+ tcg_gen_br(done);
+ }
+ gen_set_label(label3);
+ tcg_gen_brcondi_tl(TCG_COND_LEU, hex_gpr[HEX_REG_LC1], 1, done);
+ {
+ TCGv lc1 = get_result_gpr(ctx, HEX_REG_LC1);
+ gen_jumpr(ctx, hex_gpr[HEX_REG_SA1]);
+ tcg_gen_subi_tl(lc1, hex_gpr[HEX_REG_LC1], 1);
+ }
+ gen_set_label(done);
+}
+
static void gen_cmp_jumpnv(DisasContext *ctx,
TCGCond cond, TCGv val, TCGv src, int pc_off)
{
@@ -869,68 +985,32 @@
}
static void gen_log_vreg_write(DisasContext *ctx, intptr_t srcoff, int num,
- VRegWriteType type, int slot_num,
- bool is_predicated)
+ VRegWriteType type)
{
- TCGLabel *label_end = NULL;
intptr_t dstoff;
- if (is_predicated) {
- TCGv cancelled = tcg_temp_new();
- label_end = gen_new_label();
-
- /* Don't do anything if the slot was cancelled */
- tcg_gen_extract_tl(cancelled, hex_slot_cancelled, slot_num, 1);
- tcg_gen_brcondi_tl(TCG_COND_NE, cancelled, 0, label_end);
- }
-
if (type != EXT_TMP) {
dstoff = ctx_future_vreg_off(ctx, num, 1, true);
tcg_gen_gvec_mov(MO_64, dstoff, srcoff,
sizeof(MMVector), sizeof(MMVector));
- tcg_gen_ori_tl(hex_VRegs_updated, hex_VRegs_updated, 1 << num);
} else {
dstoff = ctx_tmp_vreg_off(ctx, num, 1, false);
tcg_gen_gvec_mov(MO_64, dstoff, srcoff,
sizeof(MMVector), sizeof(MMVector));
}
-
- if (is_predicated) {
- gen_set_label(label_end);
- }
}
static void gen_log_vreg_write_pair(DisasContext *ctx, intptr_t srcoff, int num,
- VRegWriteType type, int slot_num,
- bool is_predicated)
+ VRegWriteType type)
{
- gen_log_vreg_write(ctx, srcoff, num ^ 0, type, slot_num, is_predicated);
+ gen_log_vreg_write(ctx, srcoff, num ^ 0, type);
srcoff += sizeof(MMVector);
- gen_log_vreg_write(ctx, srcoff, num ^ 1, type, slot_num, is_predicated);
+ gen_log_vreg_write(ctx, srcoff, num ^ 1, type);
}
-static void gen_log_qreg_write(intptr_t srcoff, int num, int vnew,
- int slot_num, bool is_predicated)
+static intptr_t get_result_qreg(DisasContext *ctx, int qnum)
{
- TCGLabel *label_end = NULL;
- intptr_t dstoff;
-
- if (is_predicated) {
- TCGv cancelled = tcg_temp_new();
- label_end = gen_new_label();
-
- /* Don't do anything if the slot was cancelled */
- tcg_gen_extract_tl(cancelled, hex_slot_cancelled, slot_num, 1);
- tcg_gen_brcondi_tl(TCG_COND_NE, cancelled, 0, label_end);
- }
-
- dstoff = offsetof(CPUHexagonState, future_QRegs[num]);
- tcg_gen_gvec_mov(MO_64, dstoff, srcoff, sizeof(MMQReg), sizeof(MMQReg));
-
- if (is_predicated) {
- tcg_gen_ori_tl(hex_QRegs_updated, hex_QRegs_updated, 1 << num);
- gen_set_label(label_end);
- }
+ return offsetof(CPUHexagonState, future_QRegs[qnum]);
}
static void gen_vreg_load(DisasContext *ctx, intptr_t dstoff, TCGv src,
diff --git a/target/hexagon/hex_common.py b/target/hexagon/hex_common.py
index a29f61b..0200a66 100755
--- a/target/hexagon/hex_common.py
+++ b/target/hexagon/hex_common.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
##
-## Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved.
+## Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
@@ -89,6 +89,7 @@
add_qemu_macro_attrib('fWRITE_P3', 'A_WRITES_PRED_REG')
add_qemu_macro_attrib('fSET_OVERFLOW', 'A_IMPLICIT_WRITES_USR')
add_qemu_macro_attrib('fSET_LPCFG', 'A_IMPLICIT_WRITES_USR')
+ add_qemu_macro_attrib('fLOAD', 'A_SCALAR_LOAD')
add_qemu_macro_attrib('fSTORE', 'A_SCALAR_STORE')
# Recurse down macros, find attributes from sub-macros
@@ -236,6 +237,13 @@
def need_pkt_has_multi_cof(tag):
return 'A_COF' in attribdict[tag]
+def need_condexec_reg(tag, regs):
+ if 'A_CONDEXEC' in attribdict[tag]:
+ for regtype, regid, toss, numregs in regs:
+ if is_writeonly(regid) and not is_hvx_reg(regtype):
+ return True
+ return False
+
def skip_qemu_helper(tag):
return tag in overrides.keys()
diff --git a/target/hexagon/idef-parser/idef-parser.h b/target/hexagon/idef-parser/idef-parser.h
index 17d2ebf..d23e71f 100644
--- a/target/hexagon/idef-parser/idef-parser.h
+++ b/target/hexagon/idef-parser/idef-parser.h
@@ -82,7 +82,6 @@
VALUE,
QEMU_TMP,
IMM_PC,
- IMM_NPC,
IMM_CONSTEXT,
};
diff --git a/target/hexagon/idef-parser/idef-parser.lex b/target/hexagon/idef-parser/idef-parser.lex
index ff87a02..5eb8ac5 100644
--- a/target/hexagon/idef-parser/idef-parser.lex
+++ b/target/hexagon/idef-parser/idef-parser.lex
@@ -5,7 +5,7 @@
%{
/*
- * Copyright(c) 2019-2022 rev.ng Labs Srl. All Rights Reserved.
+ * Copyright(c) 2019-2023 rev.ng Labs Srl. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -140,8 +140,6 @@
yylval->rvalue.is_dotnew = true;
yylval->rvalue.signedness = SIGNED;
return PRED; }
-"IV1DEAD()" |
-"fPAUSE(uiV);" { return ';'; }
"+=" { return INC; }
"-=" { return DEC; }
"++" { return PLUSPLUS; }
@@ -159,9 +157,8 @@
"else" { return ELSE; }
"for" { return FOR; }
"fREAD_IREG" { return ICIRC; }
-"fPART1" { return PART1; }
"if" { return IF; }
-"fFRAME_SCRAMBLE" { return FSCR; }
+"fFRAME_SCRAMBLE" |
"fFRAME_UNSCRAMBLE" { return FSCR; }
"fFRAMECHECK" { return FCHK; }
"Constant_extended" { return CONSTEXT; }
@@ -312,14 +309,10 @@
"(unsigned int)" { yylval->cast.bit_width = 32;
yylval->cast.signedness = UNSIGNED;
return CAST; }
-"fREAD_PC()" |
-"PC" { return PC; }
-"fREAD_NPC()" |
-"NPC" { return NPC; }
-"fGET_LPCFG" |
+"fREAD_PC()" { return PC; }
"USR.LPCFG" { return LPCFG; }
"LOAD_CANCEL(EA)" { return LOAD_CANCEL; }
-"STORE_CANCEL(EA)" |
+"STORE_CANCEL(EA)" { return STORE_CANCEL; }
"CANCEL" { return CANCEL; }
"N"{LOWER_ID}"N" { yylval->rvalue.type = REGISTER_ARG;
yylval->rvalue.reg.type = DOTNEW;
@@ -360,14 +353,6 @@
yylval->rvalue.bit_width = 32;
yylval->rvalue.signedness = UNSIGNED;
return REG; }
-"fREAD_LC"[01] { yylval->rvalue.type = REGISTER;
- yylval->rvalue.reg.type = CONTROL;
- yylval->rvalue.reg.id = HEX_REG_LC0
- + (yytext[8] - '0') * 2;
- yylval->rvalue.reg.bit_width = 32;
- yylval->rvalue.bit_width = 32;
- yylval->rvalue.signedness = UNSIGNED;
- return REG; }
"LC"[01] { yylval->rvalue.type = REGISTER;
yylval->rvalue.reg.type = CONTROL;
yylval->rvalue.reg.id = HEX_REG_LC0
@@ -376,14 +361,6 @@
yylval->rvalue.bit_width = 32;
yylval->rvalue.signedness = UNSIGNED;
return REG; }
-"fREAD_SA"[01] { yylval->rvalue.type = REGISTER;
- yylval->rvalue.reg.type = CONTROL;
- yylval->rvalue.reg.id = HEX_REG_SA0
- + (yytext[8] - '0') * 2;
- yylval->rvalue.reg.bit_width = 32;
- yylval->rvalue.bit_width = 32;
- yylval->rvalue.signedness = UNSIGNED;
- return REG; }
"SA"[01] { yylval->rvalue.type = REGISTER;
yylval->rvalue.reg.type = CONTROL;
yylval->rvalue.reg.id = HEX_REG_SA0
diff --git a/target/hexagon/idef-parser/idef-parser.y b/target/hexagon/idef-parser/idef-parser.y
index c784726..7d05773 100644
--- a/target/hexagon/idef-parser/idef-parser.y
+++ b/target/hexagon/idef-parser/idef-parser.y
@@ -1,6 +1,6 @@
%{
/*
- * Copyright(c) 2019-2022 rev.ng Labs Srl. All Rights Reserved.
+ * Copyright(c) 2019-2023 rev.ng Labs Srl. All Rights Reserved.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -52,8 +52,8 @@
%token IN INAME VAR
%token ABS CROUND ROUND CIRCADD COUNTONES INC DEC ANDA ORA XORA PLUSPLUS ASL
%token ASR LSR EQ NEQ LTE GTE MIN MAX ANDL FOR ICIRC IF MUN FSCR FCHK SXT
-%token ZXT CONSTEXT LOCNT BREV SIGN LOAD STORE PC NPC LPCFG
-%token LOAD_CANCEL CANCEL IDENTITY PART1 ROTL INSBITS SETBITS EXTRANGE
+%token ZXT CONSTEXT LOCNT BREV SIGN LOAD STORE PC LPCFG
+%token LOAD_CANCEL STORE_CANCEL CANCEL IDENTITY ROTL INSBITS SETBITS EXTRANGE
%token CAST4_8U FAIL CARRY_FROM_ADD ADDSAT64 LSBNEW
%token TYPE_SIZE_T TYPE_INT TYPE_SIGNED TYPE_UNSIGNED TYPE_LONG
@@ -336,15 +336,6 @@
OUT(c, &@1, &$1, " = ", &$3, ";\n");
$$ = $1;
}
- | PC '=' rvalue
- {
- @1.last_column = @3.last_column;
- yyassert(c, &@1, !is_inside_ternary(c),
- "Assignment side-effect not modeled!");
- $3 = gen_rvalue_truncate(c, &@1, &$3);
- $3 = rvalue_materialize(c, &@1, &$3);
- OUT(c, &@1, "gen_write_new_pc(", &$3, ");\n");
- }
| LOAD '(' IMM ',' IMM ',' SIGN ',' var ',' lvalue ')'
{
@1.last_column = @12.last_column;
@@ -412,7 +403,6 @@
| cancel_statement
| if_statement
| for_statement
- | fpart1_statement
;
frame_check : FCHK '(' rvalue ',' rvalue ')' ';'
@@ -422,10 +412,11 @@
{
gen_load_cancel(c, &@1);
}
- | CANCEL
+ | STORE_CANCEL
{
gen_cancel(c, &@1);
}
+ | CANCEL
;
if_statement : if_stmt
@@ -462,17 +453,6 @@
}
;
-fpart1_statement : PART1
- {
- OUT(c, &@1, "if (insn->part1) {\n");
- }
- '(' statements ')'
- {
- @1.last_column = @3.last_column;
- OUT(c, &@1, "return; }\n");
- }
- ;
-
if_stmt : IF '(' rvalue ')'
{
@1.last_column = @3.last_column;
@@ -512,20 +492,6 @@
rvalue.signedness = UNSIGNED;
$$ = rvalue;
}
- | NPC
- {
- /*
- * NPC is only read from CALLs, so we can hardcode it
- * at translation time
- */
- HexValue rvalue;
- memset(&rvalue, 0, sizeof(HexValue));
- rvalue.type = IMMEDIATE;
- rvalue.imm.type = IMM_NPC;
- rvalue.bit_width = 32;
- rvalue.signedness = UNSIGNED;
- $$ = rvalue;
- }
| CONSTEXT
{
HexValue rvalue;
@@ -781,11 +747,6 @@
/* Ones count */
$$ = gen_ctpop_op(c, &@1, &$3);
}
- | LPCFG
- {
- $$ = gen_tmp(c, &@1, 32, UNSIGNED);
- OUT(c, &@1, "GET_USR_FIELD(USR_LPCFG, ", &$$, ");\n");
- }
| EXTRACT '(' rvalue ',' rvalue ')'
{
@1.last_column = @6.last_column;
diff --git a/target/hexagon/idef-parser/macros.inc b/target/hexagon/idef-parser/macros.inc
index 6b697da..7478d4d 100644
--- a/target/hexagon/idef-parser/macros.inc
+++ b/target/hexagon/idef-parser/macros.inc
@@ -97,16 +97,8 @@
#define fWRITE_LR(A) (LR = A)
#define fWRITE_FP(A) (FP = A)
#define fWRITE_SP(A) (SP = A)
-/*
- * Note: There is a rule in the parser that matches `PC = ...` and emits
- * a call to `gen_write_new_pc`. We need to call `gen_write_new_pc` to
- * get the correct semantics when there are multiple stores in a packet.
- */
-#define fBRANCH(LOC, TYPE) (PC = LOC)
-#define fJUMPR(REGNO, TARGET, TYPE) (PC = TARGET)
#define fWRITE_LOOP_REGS0(START, COUNT) SA0 = START; (LC0 = COUNT)
#define fWRITE_LOOP_REGS1(START, COUNT) SA1 = START; (LC1 = COUNT)
-#define fWRITE_LC0(VAL) (LC0 = VAL)
#define fWRITE_LC1(VAL) (LC1 = VAL)
#define fSET_LPCFG(VAL) (USR.LPCFG = VAL)
#define fWRITE_P0(VAL) P0 = VAL;
@@ -121,7 +113,6 @@
#define fEA_GPI(IMM) (EA = fREAD_GP() + IMM)
#define fPM_I(REG, IMM) (REG = REG + IMM)
#define fPM_M(REG, MVAL) (REG = REG + MVAL)
-#define fWRITE_NPC(VAL) (PC = VAL)
/* Unary operators */
#define fROUND(A) (A + 0x8000)
diff --git a/target/hexagon/idef-parser/parser-helpers.c b/target/hexagon/idef-parser/parser-helpers.c
index e1a5541..18cde6a 100644
--- a/target/hexagon/idef-parser/parser-helpers.c
+++ b/target/hexagon/idef-parser/parser-helpers.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2019-2022 rev.ng Labs Srl. All Rights Reserved.
+ * Copyright(c) 2019-2023 rev.ng Labs Srl. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -185,9 +185,6 @@
case IMM_PC:
EMIT(c, "ctx->base.pc_next");
break;
- case IMM_NPC:
- EMIT(c, "ctx->npc");
- break;
case IMM_CONSTEXT:
EMIT(c, "insn->extension_valid");
break;
@@ -1323,10 +1320,6 @@
locp,
"gen_log_reg_write(", ®->reg.id, ", ",
&value_m, ");\n");
- OUT(c,
- locp,
- "ctx_log_reg_write(ctx, ", ®->reg.id,
- ");\n");
}
void gen_assign(Context *c,
@@ -1675,9 +1668,7 @@
for (unsigned i = 0; i < c->inst.init_list->len; i++) {
HexValue *val = &g_array_index(c->inst.init_list, HexValue, i);
if (val->type == REGISTER_ARG) {
- char reg_id[5];
- reg_compose(c, locp, &val->reg, reg_id);
- EMIT_HEAD(c, "tcg_gen_movi_i%u(%s, 0);\n", val->bit_width, reg_id);
+ /* Nothing to do here */
} else if (val->type == PREDICATE) {
char suffix = val->is_dotnew ? 'N' : 'V';
EMIT_HEAD(c, "tcg_gen_movi_i%u(P%c%c, 0);\n", val->bit_width,
@@ -1722,13 +1713,10 @@
*left_pred = gen_tmp(c, locp, 32, UNSIGNED);
}
/* Extract first 8 bits, and store new predicate value */
- OUT(c, locp, "tcg_gen_mov_i32(", left_pred, ", ", &r, ");\n");
- OUT(c, locp, "tcg_gen_andi_i32(", left_pred, ", ", left_pred,
- ", 0xff);\n");
+ OUT(c, locp, "tcg_gen_andi_i32(", left_pred, ", ", &r, ", 0xff);\n");
if (is_direct) {
OUT(c, locp, "gen_log_pred_write(ctx, ", pred_id, ", ", left_pred,
");\n");
- OUT(c, locp, "ctx_log_pred_write(ctx, ", pred_id, ");\n");
}
}
@@ -1739,7 +1727,6 @@
void gen_load_cancel(Context *c, YYLTYPE *locp)
{
- gen_cancel(c, locp);
OUT(c, locp, "if (insn->slot == 0 && pkt->pkt_has_store_s1) {\n");
OUT(c, locp, "ctx->s1_store_processed = false;\n");
OUT(c, locp, "process_store(ctx, 1);\n");
diff --git a/target/hexagon/macros.h b/target/hexagon/macros.h
index 17facad..482a9c7 100644
--- a/target/hexagon/macros.h
+++ b/target/hexagon/macros.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -205,26 +205,11 @@
#define CANCEL gen_cancel(slot);
#else
-#define CANCEL cancel_slot(env, slot)
+#define CANCEL do { } while (0)
#endif
#define LOAD_CANCEL(EA) do { CANCEL; } while (0)
-#ifdef QEMU_GENERATE
-static inline void gen_pred_cancel(TCGv pred, uint32_t slot_num)
- {
- TCGv slot_mask = tcg_temp_new();
- TCGv tmp = tcg_temp_new();
- TCGv zero = tcg_constant_tl(0);
- tcg_gen_ori_tl(slot_mask, hex_slot_cancelled, 1 << slot_num);
- tcg_gen_andi_tl(tmp, pred, 1);
- tcg_gen_movcond_tl(TCG_COND_EQ, hex_slot_cancelled, tmp, zero,
- slot_mask, hex_slot_cancelled);
-}
-#define PRED_LOAD_CANCEL(PRED, EA) \
- gen_pred_cancel(PRED, insn->is_endloop ? 4 : insn->slot)
-#endif
-
#define STORE_CANCEL(EA) { env->slot_cancelled |= (1 << slot); }
#define fMAX(A, B) (((A) > (B)) ? (A) : (B))
@@ -415,16 +400,6 @@
#define fBRANCH(LOC, TYPE) fWRITE_NPC(LOC)
#define fJUMPR(REGNO, TARGET, TYPE) fBRANCH(TARGET, COF_TYPE_JUMPR)
#define fHINTJR(TARGET) { /* Not modelled in qemu */}
-#define fCALL(A) \
- do { \
- fWRITE_LR(fREAD_NPC()); \
- fBRANCH(A, COF_TYPE_CALL); \
- } while (0)
-#define fCALLR(A) \
- do { \
- fWRITE_LR(fREAD_NPC()); \
- fBRANCH(A, COF_TYPE_CALLR); \
- } while (0)
#define fWRITE_LOOP_REGS0(START, COUNT) \
do { \
WRITE_RREG(HEX_REG_LC0, COUNT); \
diff --git a/target/hexagon/meson.build b/target/hexagon/meson.build
index c9d31d0..da8e608 100644
--- a/target/hexagon/meson.build
+++ b/target/hexagon/meson.build
@@ -1,5 +1,5 @@
##
-## Copyright(c) 2020-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+## Copyright(c) 2020-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
@@ -183,7 +183,7 @@
)
bison = generator(
- find_program('bison'),
+ find_program('bison', version: '>=3.0'),
output: ['@BASENAME@.tab.c', '@BASENAME@.tab.h'],
arguments: ['@INPUT@', '--defines=@OUTPUT1@', '--output=@OUTPUT0@']
)
@@ -276,4 +276,13 @@
)
hexagon_ss.add(tcg_funcs_generated)
+analyze_funcs_generated = custom_target(
+ 'analyze_funcs_generated.c.inc',
+ output: 'analyze_funcs_generated.c.inc',
+ depends: helper_dep,
+ depend_files: [hex_common_py, attribs_def, gen_tcg_h, gen_tcg_hvx_h],
+ command: [python, files('gen_analyze_funcs.py'), helper_in, '@OUTPUT@'],
+)
+hexagon_ss.add(analyze_funcs_generated)
+
target_arch += {'hexagon': hexagon_ss}
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
index 35449ef..c9a1560 100644
--- a/target/hexagon/op_helper.c
+++ b/target/hexagon/op_helper.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -30,6 +30,7 @@
#include "mmvec/mmvec.h"
#include "mmvec/macros.h"
#include "op_helper.h"
+#include "translate.h"
#define SF_BIAS 127
#define SF_MANTBITS 23
@@ -105,30 +106,6 @@
env->mem_log_stores[slot].data64 = val;
}
-void write_new_pc(CPUHexagonState *env, bool pkt_has_multi_cof,
- target_ulong addr)
-{
- HEX_DEBUG_LOG("write_new_pc(0x" TARGET_FMT_lx ")\n", addr);
-
- if (pkt_has_multi_cof) {
- /*
- * If more than one branch is taken in a packet, only the first one
- * is actually done.
- */
- if (env->branch_taken) {
- HEX_DEBUG_LOG("INFO: multiple branches taken in same packet, "
- "ignoring the second one\n");
- } else {
- fCHECK_PCALIGN(addr);
- env->gpr[HEX_REG_PC] = addr;
- env->branch_taken = 1;
- }
- } else {
- fCHECK_PCALIGN(addr);
- env->gpr[HEX_REG_PC] = addr;
- }
-}
-
/* Handy place to set a breakpoint */
void HELPER(debug_start_packet)(CPUHexagonState *env)
{
@@ -439,9 +416,10 @@
return PeV;
}
-static void probe_store(CPUHexagonState *env, int slot, int mmu_idx)
+static void probe_store(CPUHexagonState *env, int slot, int mmu_idx,
+ bool is_predicated)
{
- if (!(env->slot_cancelled & (1 << slot))) {
+ if (!is_predicated || !(env->slot_cancelled & (1 << slot))) {
size1u_t width = env->mem_log_stores[slot].width;
target_ulong va = env->mem_log_stores[slot].va;
uintptr_t ra = GETPC();
@@ -461,9 +439,12 @@
}
/* Called during packet commit when there are two scalar stores */
-void HELPER(probe_pkt_scalar_store_s0)(CPUHexagonState *env, int mmu_idx)
+void HELPER(probe_pkt_scalar_store_s0)(CPUHexagonState *env, int args)
{
- probe_store(env, 0, mmu_idx);
+ int mmu_idx = FIELD_EX32(args, PROBE_PKT_SCALAR_STORE_S0, MMU_IDX);
+ bool is_predicated =
+ FIELD_EX32(args, PROBE_PKT_SCALAR_STORE_S0, IS_PREDICATED);
+ probe_store(env, 0, mmu_idx, is_predicated);
}
void HELPER(probe_hvx_stores)(CPUHexagonState *env, int mmu_idx)
@@ -510,15 +491,18 @@
void HELPER(probe_pkt_scalar_hvx_stores)(CPUHexagonState *env, int mask,
int mmu_idx)
{
- bool has_st0 = (mask >> 0) & 1;
- bool has_st1 = (mask >> 1) & 1;
- bool has_hvx_stores = (mask >> 2) & 1;
+ bool has_st0 = FIELD_EX32(mask, PROBE_PKT_SCALAR_HVX_STORES, HAS_ST0);
+ bool has_st1 = FIELD_EX32(mask, PROBE_PKT_SCALAR_HVX_STORES, HAS_ST1);
+ bool has_hvx_stores =
+ FIELD_EX32(mask, PROBE_PKT_SCALAR_HVX_STORES, HAS_HVX_STORES);
+ bool s0_is_pred = FIELD_EX32(mask, PROBE_PKT_SCALAR_HVX_STORES, S0_IS_PRED);
+ bool s1_is_pred = FIELD_EX32(mask, PROBE_PKT_SCALAR_HVX_STORES, S1_IS_PRED);
if (has_st0) {
- probe_store(env, 0, mmu_idx);
+ probe_store(env, 0, mmu_idx, s0_is_pred);
}
if (has_st1) {
- probe_store(env, 1, mmu_idx);
+ probe_store(env, 1, mmu_idx, s1_is_pred);
}
if (has_hvx_stores) {
HELPER(probe_hvx_stores)(env, mmu_idx);
@@ -1193,7 +1177,7 @@
{
float32 neg_RsV;
arch_fpop_start(env);
- neg_RsV = float32_sub(float32_zero, RsV, &env->fp_status);
+ neg_RsV = float32_set_sign(RsV, float32_is_neg(RsV) ? 0 : 1);
RxV = internal_fmafx(neg_RsV, RtV, RxV, 0, &env->fp_status);
arch_fpop_end(env);
return RxV;
@@ -1468,12 +1452,6 @@
}
}
-void cancel_slot(CPUHexagonState *env, uint32_t slot)
-{
- HEX_DEBUG_LOG("Slot %d cancelled\n", slot);
- env->slot_cancelled |= (1 << slot);
-}
-
/* These macros can be referenced in the generated helper functions */
#define warn(...) /* Nothing */
#define fatal(...) g_assert_not_reached();
diff --git a/target/hexagon/op_helper.h b/target/hexagon/op_helper.h
index 02347ed..34b3a53 100644
--- a/target/hexagon/op_helper.h
+++ b/target/hexagon/op_helper.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,7 +19,6 @@
#define HEXAGON_OP_HELPER_H
/* Misc functions */
-void cancel_slot(CPUHexagonState *env, uint32_t slot);
void write_new_pc(CPUHexagonState *env, bool pkt_has_multi_cof, target_ulong addr);
uint8_t mem_load1(CPUHexagonState *env, uint32_t slot, target_ulong vaddr);
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
index 93fd1b5..665476a 100644
--- a/target/hexagon/translate.c
+++ b/target/hexagon/translate.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -29,6 +29,15 @@
#include "translate.h"
#include "printinsn.h"
+#include "analyze_funcs_generated.c.inc"
+
+typedef void (*AnalyzeInsn)(DisasContext *ctx);
+static const AnalyzeInsn opcode_analyze[XX_LAST_OPCODE] = {
+#define OPCODE(X) [X] = analyze_##X
+#include "opcodes_def_generated.h.inc"
+#undef OPCODE
+};
+
TCGv hex_gpr[TOTAL_PER_THREAD_REGS];
TCGv hex_pred[NUM_PREGS];
TCGv hex_this_PC;
@@ -47,8 +56,6 @@
TCGv hex_llsc_addr;
TCGv hex_llsc_val;
TCGv_i64 hex_llsc_val_i64;
-TCGv hex_VRegs_updated;
-TCGv hex_QRegs_updated;
TCGv hex_vstore_addr[VSTORES_MAX];
TCGv hex_vstore_size[VSTORES_MAX];
TCGv hex_vstore_pending[VSTORES_MAX];
@@ -239,7 +246,15 @@
static bool need_slot_cancelled(Packet *pkt)
{
- return check_for_attrib(pkt, A_CONDEXEC);
+ /* We only need slot_cancelled for conditional store instructions */
+ for (int i = 0; i < pkt->num_insns; i++) {
+ uint16_t opcode = pkt->insn[i].opcode;
+ if (GET_ATTRIB(opcode, A_CONDEXEC) &&
+ GET_ATTRIB(opcode, A_SCALAR_STORE)) {
+ return true;
+ }
+ }
+ return false;
}
static bool need_pred_written(Packet *pkt)
@@ -265,6 +280,77 @@
return false;
}
+/*
+ * The opcode_analyze functions mark most of the writes in a packet
+ * However, there are some implicit writes marked as attributes
+ * of the applicable instructions.
+ */
+static void mark_implicit_reg_write(DisasContext *ctx, int attrib, int rnum)
+{
+ uint16_t opcode = ctx->insn->opcode;
+ if (GET_ATTRIB(opcode, attrib)) {
+ /*
+ * USR is used to set overflow and FP exceptions,
+ * so treat it as conditional
+ */
+ bool is_predicated = GET_ATTRIB(opcode, A_CONDEXEC) ||
+ rnum == HEX_REG_USR;
+
+ /* LC0/LC1 is conditionally written by endloop instructions */
+ if ((rnum == HEX_REG_LC0 || rnum == HEX_REG_LC1) &&
+ (opcode == J2_endloop0 ||
+ opcode == J2_endloop1 ||
+ opcode == J2_endloop01)) {
+ is_predicated = true;
+ }
+
+ ctx_log_reg_write(ctx, rnum, is_predicated);
+ }
+}
+
+static void mark_implicit_reg_writes(DisasContext *ctx)
+{
+ mark_implicit_reg_write(ctx, A_IMPLICIT_WRITES_FP, HEX_REG_FP);
+ mark_implicit_reg_write(ctx, A_IMPLICIT_WRITES_SP, HEX_REG_SP);
+ mark_implicit_reg_write(ctx, A_IMPLICIT_WRITES_LR, HEX_REG_LR);
+ mark_implicit_reg_write(ctx, A_IMPLICIT_WRITES_LC0, HEX_REG_LC0);
+ mark_implicit_reg_write(ctx, A_IMPLICIT_WRITES_SA0, HEX_REG_SA0);
+ mark_implicit_reg_write(ctx, A_IMPLICIT_WRITES_LC1, HEX_REG_LC1);
+ mark_implicit_reg_write(ctx, A_IMPLICIT_WRITES_SA1, HEX_REG_SA1);
+ mark_implicit_reg_write(ctx, A_IMPLICIT_WRITES_USR, HEX_REG_USR);
+ mark_implicit_reg_write(ctx, A_FPOP, HEX_REG_USR);
+}
+
+static void mark_implicit_pred_write(DisasContext *ctx, int attrib, int pnum)
+{
+ if (GET_ATTRIB(ctx->insn->opcode, attrib)) {
+ ctx_log_pred_write(ctx, pnum);
+ }
+}
+
+static void mark_implicit_pred_writes(DisasContext *ctx)
+{
+ mark_implicit_pred_write(ctx, A_IMPLICIT_WRITES_P0, 0);
+ mark_implicit_pred_write(ctx, A_IMPLICIT_WRITES_P1, 1);
+ mark_implicit_pred_write(ctx, A_IMPLICIT_WRITES_P2, 2);
+ mark_implicit_pred_write(ctx, A_IMPLICIT_WRITES_P3, 3);
+}
+
+static void analyze_packet(DisasContext *ctx)
+{
+ Packet *pkt = ctx->pkt;
+ ctx->need_pkt_has_store_s1 = false;
+ for (int i = 0; i < pkt->num_insns; i++) {
+ Insn *insn = &pkt->insn[i];
+ ctx->insn = insn;
+ if (opcode_analyze[insn->opcode]) {
+ opcode_analyze[insn->opcode](ctx);
+ }
+ mark_implicit_reg_writes(ctx);
+ mark_implicit_pred_writes(ctx);
+ }
+}
+
static void gen_start_packet(DisasContext *ctx)
{
Packet *pkt = ctx->pkt;
@@ -275,6 +361,7 @@
ctx->next_PC = next_PC;
ctx->reg_log_idx = 0;
bitmap_zero(ctx->regs_written, TOTAL_PER_THREAD_REGS);
+ bitmap_zero(ctx->predicated_regs, TOTAL_PER_THREAD_REGS);
ctx->preg_log_idx = 0;
bitmap_zero(ctx->pregs_written, NUM_PREGS);
ctx->future_vregs_idx = 0;
@@ -283,14 +370,27 @@
bitmap_zero(ctx->vregs_updated_tmp, NUM_VREGS);
bitmap_zero(ctx->vregs_updated, NUM_VREGS);
bitmap_zero(ctx->vregs_select, NUM_VREGS);
+ bitmap_zero(ctx->predicated_future_vregs, NUM_VREGS);
+ bitmap_zero(ctx->predicated_tmp_vregs, NUM_VREGS);
ctx->qreg_log_idx = 0;
for (i = 0; i < STORES_MAX; i++) {
ctx->store_width[i] = 0;
}
- tcg_gen_movi_tl(hex_pkt_has_store_s1, pkt->pkt_has_store_s1);
ctx->s1_store_processed = false;
ctx->pre_commit = true;
+ analyze_packet(ctx);
+
+ if (ctx->need_pkt_has_store_s1) {
+ tcg_gen_movi_tl(hex_pkt_has_store_s1, pkt->pkt_has_store_s1);
+ }
+
+ /*
+ * pregs_written is used both in the analyze phase as well as the code
+ * gen phase, so clear it again.
+ */
+ bitmap_zero(ctx->pregs_written, NUM_PREGS);
+
if (HEX_DEBUG) {
/* Handy place to set a breakpoint before the packet executes */
gen_helper_debug_start_packet(cpu_env);
@@ -313,9 +413,42 @@
tcg_gen_movi_tl(hex_pred_written, 0);
}
- if (pkt->pkt_has_hvx) {
- tcg_gen_movi_tl(hex_VRegs_updated, 0);
- tcg_gen_movi_tl(hex_QRegs_updated, 0);
+ /* Preload the predicated registers into hex_new_value[i] */
+ if (!bitmap_empty(ctx->predicated_regs, TOTAL_PER_THREAD_REGS)) {
+ int i = find_first_bit(ctx->predicated_regs, TOTAL_PER_THREAD_REGS);
+ while (i < TOTAL_PER_THREAD_REGS) {
+ tcg_gen_mov_tl(hex_new_value[i], hex_gpr[i]);
+ i = find_next_bit(ctx->predicated_regs, TOTAL_PER_THREAD_REGS,
+ i + 1);
+ }
+ }
+
+ /* Preload the predicated HVX registers into future_VRegs and tmp_VRegs */
+ if (!bitmap_empty(ctx->predicated_future_vregs, NUM_VREGS)) {
+ int i = find_first_bit(ctx->predicated_future_vregs, NUM_VREGS);
+ while (i < NUM_VREGS) {
+ const intptr_t VdV_off =
+ ctx_future_vreg_off(ctx, i, 1, true);
+ intptr_t src_off = offsetof(CPUHexagonState, VRegs[i]);
+ tcg_gen_gvec_mov(MO_64, VdV_off,
+ src_off,
+ sizeof(MMVector),
+ sizeof(MMVector));
+ i = find_next_bit(ctx->predicated_future_vregs, NUM_VREGS, i + 1);
+ }
+ }
+ if (!bitmap_empty(ctx->predicated_tmp_vregs, NUM_VREGS)) {
+ int i = find_first_bit(ctx->predicated_tmp_vregs, NUM_VREGS);
+ while (i < NUM_VREGS) {
+ const intptr_t VdV_off =
+ ctx_tmp_vreg_off(ctx, i, 1, true);
+ intptr_t src_off = offsetof(CPUHexagonState, VRegs[i]);
+ tcg_gen_gvec_mov(MO_64, VdV_off,
+ src_off,
+ sizeof(MMVector),
+ sizeof(MMVector));
+ i = find_next_bit(ctx->predicated_tmp_vregs, NUM_VREGS, i + 1);
+ }
}
}
@@ -336,66 +469,6 @@
return false;
}
-/*
- * The LOG_*_WRITE macros mark most of the writes in a packet
- * However, there are some implicit writes marked as attributes
- * of the applicable instructions.
- */
-static void mark_implicit_reg_write(DisasContext *ctx, int attrib, int rnum)
-{
- uint16_t opcode = ctx->insn->opcode;
- if (GET_ATTRIB(opcode, attrib)) {
- /*
- * USR is used to set overflow and FP exceptions,
- * so treat it as conditional
- */
- bool is_predicated = GET_ATTRIB(opcode, A_CONDEXEC) ||
- rnum == HEX_REG_USR;
-
- /* LC0/LC1 is conditionally written by endloop instructions */
- if ((rnum == HEX_REG_LC0 || rnum == HEX_REG_LC1) &&
- (opcode == J2_endloop0 ||
- opcode == J2_endloop1 ||
- opcode == J2_endloop01)) {
- is_predicated = true;
- }
-
- if (is_predicated && !is_preloaded(ctx, rnum)) {
- tcg_gen_mov_tl(hex_new_value[rnum], hex_gpr[rnum]);
- }
-
- ctx_log_reg_write(ctx, rnum);
- }
-}
-
-static void mark_implicit_pred_write(DisasContext *ctx, int attrib, int pnum)
-{
- if (GET_ATTRIB(ctx->insn->opcode, attrib)) {
- ctx_log_pred_write(ctx, pnum);
- }
-}
-
-static void mark_implicit_reg_writes(DisasContext *ctx)
-{
- mark_implicit_reg_write(ctx, A_IMPLICIT_WRITES_FP, HEX_REG_FP);
- mark_implicit_reg_write(ctx, A_IMPLICIT_WRITES_SP, HEX_REG_SP);
- mark_implicit_reg_write(ctx, A_IMPLICIT_WRITES_LR, HEX_REG_LR);
- mark_implicit_reg_write(ctx, A_IMPLICIT_WRITES_LC0, HEX_REG_LC0);
- mark_implicit_reg_write(ctx, A_IMPLICIT_WRITES_SA0, HEX_REG_SA0);
- mark_implicit_reg_write(ctx, A_IMPLICIT_WRITES_LC1, HEX_REG_LC1);
- mark_implicit_reg_write(ctx, A_IMPLICIT_WRITES_SA1, HEX_REG_SA1);
- mark_implicit_reg_write(ctx, A_IMPLICIT_WRITES_USR, HEX_REG_USR);
- mark_implicit_reg_write(ctx, A_FPOP, HEX_REG_USR);
-}
-
-static void mark_implicit_pred_writes(DisasContext *ctx)
-{
- mark_implicit_pred_write(ctx, A_IMPLICIT_WRITES_P0, 0);
- mark_implicit_pred_write(ctx, A_IMPLICIT_WRITES_P1, 1);
- mark_implicit_pred_write(ctx, A_IMPLICIT_WRITES_P2, 2);
- mark_implicit_pred_write(ctx, A_IMPLICIT_WRITES_P3, 3);
-}
-
static void mark_store_width(DisasContext *ctx)
{
uint16_t opcode = ctx->insn->opcode;
@@ -423,9 +496,7 @@
static void gen_insn(DisasContext *ctx)
{
if (ctx->insn->generate) {
- mark_implicit_reg_writes(ctx);
ctx->insn->generate(ctx);
- mark_implicit_pred_writes(ctx);
mark_store_width(ctx);
} else {
gen_exception_end_tb(ctx, HEX_EXCP_INVALID_OPCODE);
@@ -646,65 +717,31 @@
/*
* for (i = 0; i < ctx->vreg_log_idx; i++) {
* int rnum = ctx->vreg_log[i];
- * if (ctx->vreg_is_predicated[i]) {
- * if (env->VRegs_updated & (1 << rnum)) {
- * env->VRegs[rnum] = env->future_VRegs[rnum];
- * }
- * } else {
- * env->VRegs[rnum] = env->future_VRegs[rnum];
- * }
+ * env->VRegs[rnum] = env->future_VRegs[rnum];
* }
*/
for (i = 0; i < ctx->vreg_log_idx; i++) {
int rnum = ctx->vreg_log[i];
- bool is_predicated = ctx->vreg_is_predicated[i];
intptr_t dstoff = offsetof(CPUHexagonState, VRegs[rnum]);
intptr_t srcoff = ctx_future_vreg_off(ctx, rnum, 1, false);
size_t size = sizeof(MMVector);
- if (is_predicated) {
- TCGv cmp = tcg_temp_new();
- TCGLabel *label_skip = gen_new_label();
-
- tcg_gen_andi_tl(cmp, hex_VRegs_updated, 1 << rnum);
- tcg_gen_brcondi_tl(TCG_COND_EQ, cmp, 0, label_skip);
- tcg_gen_gvec_mov(MO_64, dstoff, srcoff, size, size);
- gen_set_label(label_skip);
- } else {
- tcg_gen_gvec_mov(MO_64, dstoff, srcoff, size, size);
- }
+ tcg_gen_gvec_mov(MO_64, dstoff, srcoff, size, size);
}
/*
* for (i = 0; i < ctx->qreg_log_idx; i++) {
* int rnum = ctx->qreg_log[i];
- * if (ctx->qreg_is_predicated[i]) {
- * if (env->QRegs_updated) & (1 << rnum)) {
- * env->QRegs[rnum] = env->future_QRegs[rnum];
- * }
- * } else {
- * env->QRegs[rnum] = env->future_QRegs[rnum];
- * }
+ * env->QRegs[rnum] = env->future_QRegs[rnum];
* }
*/
for (i = 0; i < ctx->qreg_log_idx; i++) {
int rnum = ctx->qreg_log[i];
- bool is_predicated = ctx->qreg_is_predicated[i];
intptr_t dstoff = offsetof(CPUHexagonState, QRegs[rnum]);
intptr_t srcoff = offsetof(CPUHexagonState, future_QRegs[rnum]);
size_t size = sizeof(MMQReg);
- if (is_predicated) {
- TCGv cmp = tcg_temp_new();
- TCGLabel *label_skip = gen_new_label();
-
- tcg_gen_andi_tl(cmp, hex_QRegs_updated, 1 << rnum);
- tcg_gen_brcondi_tl(TCG_COND_EQ, cmp, 0, label_skip);
- tcg_gen_gvec_mov(MO_64, dstoff, srcoff, size, size);
- gen_set_label(label_skip);
- } else {
- tcg_gen_gvec_mov(MO_64, dstoff, srcoff, size, size);
- }
+ tcg_gen_gvec_mov(MO_64, dstoff, srcoff, size, size);
}
if (pkt_has_hvx_store(ctx->pkt)) {
@@ -775,13 +812,27 @@
TCGv mask_tcgv;
if (has_store_s0) {
- mask |= (1 << 0);
+ mask =
+ FIELD_DP32(mask, PROBE_PKT_SCALAR_HVX_STORES, HAS_ST0, 1);
}
if (has_store_s1) {
- mask |= (1 << 1);
+ mask =
+ FIELD_DP32(mask, PROBE_PKT_SCALAR_HVX_STORES, HAS_ST1, 1);
}
if (has_hvx_store) {
- mask |= (1 << 2);
+ mask =
+ FIELD_DP32(mask, PROBE_PKT_SCALAR_HVX_STORES,
+ HAS_HVX_STORES, 1);
+ }
+ if (has_store_s0 && slot_is_predicated(pkt, 0)) {
+ mask =
+ FIELD_DP32(mask, PROBE_PKT_SCALAR_HVX_STORES,
+ S0_IS_PRED, 1);
+ }
+ if (has_store_s1 && slot_is_predicated(pkt, 1)) {
+ mask =
+ FIELD_DP32(mask, PROBE_PKT_SCALAR_HVX_STORES,
+ S1_IS_PRED, 1);
}
mask_tcgv = tcg_constant_tl(mask);
gen_helper_probe_pkt_scalar_hvx_stores(cpu_env, mask_tcgv, mem_idx);
@@ -791,8 +842,15 @@
* process_store_log will execute the slot 1 store first,
* so we only have to probe the store in slot 0
*/
- TCGv mem_idx = tcg_constant_tl(ctx->mem_idx);
- gen_helper_probe_pkt_scalar_store_s0(cpu_env, mem_idx);
+ int args = 0;
+ args =
+ FIELD_DP32(args, PROBE_PKT_SCALAR_STORE_S0, MMU_IDX, ctx->mem_idx);
+ if (slot_is_predicated(pkt, 0)) {
+ args =
+ FIELD_DP32(args, PROBE_PKT_SCALAR_STORE_S0, IS_PREDICATED, 1);
+ }
+ TCGv args_tcgv = tcg_constant_tl(args);
+ gen_helper_probe_pkt_scalar_store_s0(cpu_env, args_tcgv);
}
process_store_log(ctx);
@@ -1029,10 +1087,6 @@
offsetof(CPUHexagonState, llsc_val), "llsc_val");
hex_llsc_val_i64 = tcg_global_mem_new_i64(cpu_env,
offsetof(CPUHexagonState, llsc_val_i64), "llsc_val_i64");
- hex_VRegs_updated = tcg_global_mem_new(cpu_env,
- offsetof(CPUHexagonState, VRegs_updated), "VRegs_updated");
- hex_QRegs_updated = tcg_global_mem_new(cpu_env,
- offsetof(CPUHexagonState, QRegs_updated), "QRegs_updated");
for (i = 0; i < STORES_MAX; i++) {
snprintf(store_addr_names[i], NAME_LEN, "store_addr_%d", i);
hex_store_addr[i] = tcg_global_mem_new(cpu_env,
diff --git a/target/hexagon/translate.h b/target/hexagon/translate.h
index d971f4f..db832b0 100644
--- a/target/hexagon/translate.h
+++ b/target/hexagon/translate.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -38,6 +38,7 @@
int reg_log[REG_WRITES_MAX];
int reg_log_idx;
DECLARE_BITMAP(regs_written, TOTAL_PER_THREAD_REGS);
+ DECLARE_BITMAP(predicated_regs, TOTAL_PER_THREAD_REGS);
int preg_log[PRED_WRITES_MAX];
int preg_log_idx;
DECLARE_BITMAP(pregs_written, NUM_PREGS);
@@ -48,52 +49,54 @@
int tmp_vregs_idx;
int tmp_vregs_num[VECTOR_TEMPS_MAX];
int vreg_log[NUM_VREGS];
- bool vreg_is_predicated[NUM_VREGS];
int vreg_log_idx;
DECLARE_BITMAP(vregs_updated_tmp, NUM_VREGS);
DECLARE_BITMAP(vregs_updated, NUM_VREGS);
DECLARE_BITMAP(vregs_select, NUM_VREGS);
+ DECLARE_BITMAP(predicated_future_vregs, NUM_VREGS);
+ DECLARE_BITMAP(predicated_tmp_vregs, NUM_VREGS);
int qreg_log[NUM_QREGS];
- bool qreg_is_predicated[NUM_QREGS];
int qreg_log_idx;
bool pre_commit;
TCGCond branch_cond;
target_ulong branch_dest;
bool is_tight_loop;
+ bool need_pkt_has_store_s1;
} DisasContext;
-static inline void ctx_log_reg_write(DisasContext *ctx, int rnum)
-{
- if (test_bit(rnum, ctx->regs_written)) {
- HEX_DEBUG_LOG("WARNING: Multiple writes to r%d\n", rnum);
- }
- ctx->reg_log[ctx->reg_log_idx] = rnum;
- ctx->reg_log_idx++;
- set_bit(rnum, ctx->regs_written);
-}
-
-static inline void ctx_log_reg_write_pair(DisasContext *ctx, int rnum)
-{
- ctx_log_reg_write(ctx, rnum);
- ctx_log_reg_write(ctx, rnum + 1);
-}
-
static inline void ctx_log_pred_write(DisasContext *ctx, int pnum)
{
- ctx->preg_log[ctx->preg_log_idx] = pnum;
- ctx->preg_log_idx++;
- set_bit(pnum, ctx->pregs_written);
+ if (!test_bit(pnum, ctx->pregs_written)) {
+ ctx->preg_log[ctx->preg_log_idx] = pnum;
+ ctx->preg_log_idx++;
+ set_bit(pnum, ctx->pregs_written);
+ }
}
-static inline bool is_preloaded(DisasContext *ctx, int num)
+static inline void ctx_log_reg_write(DisasContext *ctx, int rnum,
+ bool is_predicated)
{
- return test_bit(num, ctx->regs_written);
+ if (rnum == HEX_REG_P3_0_ALIASED) {
+ for (int i = 0; i < NUM_PREGS; i++) {
+ ctx_log_pred_write(ctx, i);
+ }
+ } else {
+ if (!test_bit(rnum, ctx->regs_written)) {
+ ctx->reg_log[ctx->reg_log_idx] = rnum;
+ ctx->reg_log_idx++;
+ set_bit(rnum, ctx->regs_written);
+ }
+ if (is_predicated) {
+ set_bit(rnum, ctx->predicated_regs);
+ }
+ }
}
-static inline bool is_vreg_preloaded(DisasContext *ctx, int num)
+static inline void ctx_log_reg_write_pair(DisasContext *ctx, int rnum,
+ bool is_predicated)
{
- return test_bit(num, ctx->vregs_updated) ||
- test_bit(num, ctx->vregs_updated_tmp);
+ ctx_log_reg_write(ctx, rnum, is_predicated);
+ ctx_log_reg_write(ctx, rnum + 1, is_predicated);
}
intptr_t ctx_future_vreg_off(DisasContext *ctx, int regnum,
@@ -106,17 +109,25 @@
bool is_predicated)
{
if (type != EXT_TMP) {
- ctx->vreg_log[ctx->vreg_log_idx] = rnum;
- ctx->vreg_is_predicated[ctx->vreg_log_idx] = is_predicated;
- ctx->vreg_log_idx++;
+ if (!test_bit(rnum, ctx->vregs_updated)) {
+ ctx->vreg_log[ctx->vreg_log_idx] = rnum;
+ ctx->vreg_log_idx++;
+ set_bit(rnum, ctx->vregs_updated);
+ }
set_bit(rnum, ctx->vregs_updated);
+ if (is_predicated) {
+ set_bit(rnum, ctx->predicated_future_vregs);
+ }
}
if (type == EXT_NEW) {
set_bit(rnum, ctx->vregs_select);
}
if (type == EXT_TMP) {
set_bit(rnum, ctx->vregs_updated_tmp);
+ if (is_predicated) {
+ set_bit(rnum, ctx->predicated_tmp_vregs);
+ }
}
}
@@ -129,10 +140,9 @@
}
static inline void ctx_log_qreg_write(DisasContext *ctx,
- int rnum, bool is_predicated)
+ int rnum)
{
ctx->qreg_log[ctx->qreg_log_idx] = rnum;
- ctx->qreg_is_predicated[ctx->qreg_log_idx] = is_predicated;
ctx->qreg_log_idx++;
}
@@ -153,12 +163,20 @@
extern TCGv hex_llsc_addr;
extern TCGv hex_llsc_val;
extern TCGv_i64 hex_llsc_val_i64;
-extern TCGv hex_VRegs_updated;
-extern TCGv hex_QRegs_updated;
extern TCGv hex_vstore_addr[VSTORES_MAX];
extern TCGv hex_vstore_size[VSTORES_MAX];
extern TCGv hex_vstore_pending[VSTORES_MAX];
bool is_gather_store_insn(DisasContext *ctx);
void process_store(DisasContext *ctx, int slot_num);
+
+FIELD(PROBE_PKT_SCALAR_STORE_S0, MMU_IDX, 0, 2)
+FIELD(PROBE_PKT_SCALAR_STORE_S0, IS_PREDICATED, 2, 1)
+
+FIELD(PROBE_PKT_SCALAR_HVX_STORES, HAS_ST0, 0, 1)
+FIELD(PROBE_PKT_SCALAR_HVX_STORES, HAS_ST1, 1, 1)
+FIELD(PROBE_PKT_SCALAR_HVX_STORES, HAS_HVX_STORES, 2, 1)
+FIELD(PROBE_PKT_SCALAR_HVX_STORES, S0_IS_PRED, 3, 1)
+FIELD(PROBE_PKT_SCALAR_HVX_STORES, S1_IS_PRED, 4, 1)
+
#endif
diff --git a/target/hppa/cpu-param.h b/target/hppa/cpu-param.h
index a48a270..c2791ae 100644
--- a/target/hppa/cpu-param.h
+++ b/target/hppa/cpu-param.h
@@ -29,6 +29,5 @@
# define TARGET_PHYS_ADDR_SPACE_BITS 32
#endif
#define TARGET_PAGE_BITS 12
-#define NB_MMU_MODES 5
#endif
diff --git a/target/hppa/gdbstub.c b/target/hppa/gdbstub.c
index 729c37b..48a5143 100644
--- a/target/hppa/gdbstub.c
+++ b/target/hppa/gdbstub.c
@@ -19,7 +19,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
int hppa_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
{
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
index cb4fd1f..6a3154e 100644
--- a/target/hppa/translate.c
+++ b/target/hppa/translate.c
@@ -135,8 +135,6 @@
#define tcg_gen_extract_reg tcg_gen_extract_i64
#define tcg_gen_sextract_reg tcg_gen_sextract_i64
#define tcg_gen_extract2_reg tcg_gen_extract2_i64
-#define tcg_const_reg tcg_const_i64
-#define tcg_const_local_reg tcg_const_local_i64
#define tcg_constant_reg tcg_constant_i64
#define tcg_gen_movcond_reg tcg_gen_movcond_i64
#define tcg_gen_add2_reg tcg_gen_add2_i64
@@ -228,8 +226,6 @@
#define tcg_gen_extract_reg tcg_gen_extract_i32
#define tcg_gen_sextract_reg tcg_gen_sextract_i32
#define tcg_gen_extract2_reg tcg_gen_extract2_i32
-#define tcg_const_reg tcg_const_i32
-#define tcg_const_local_reg tcg_const_local_i32
#define tcg_constant_reg tcg_constant_i32
#define tcg_gen_movcond_reg tcg_gen_movcond_i32
#define tcg_gen_add2_reg tcg_gen_add2_i32
@@ -574,7 +570,9 @@
static TCGv_i32 load_frw0_i32(unsigned rt)
{
if (rt == 0) {
- return tcg_const_i32(0);
+ TCGv_i32 ret = tcg_temp_new_i32();
+ tcg_gen_movi_i32(ret, 0);
+ return ret;
} else {
return load_frw_i32(rt);
}
@@ -582,15 +580,15 @@
static TCGv_i64 load_frw0_i64(unsigned rt)
{
+ TCGv_i64 ret = tcg_temp_new_i64();
if (rt == 0) {
- return tcg_const_i64(0);
+ tcg_gen_movi_i64(ret, 0);
} else {
- TCGv_i64 ret = tcg_temp_new_i64();
tcg_gen_ld32u_i64(ret, cpu_env,
offsetof(CPUHPPAState, fr[rt & 31])
+ (rt & 32 ? LO_OFS : HI_OFS));
- return ret;
}
+ return ret;
}
static void save_frw_i32(unsigned rt, TCGv_i32 val)
@@ -613,7 +611,9 @@
static TCGv_i64 load_frd0(unsigned rt)
{
if (rt == 0) {
- return tcg_const_i64(0);
+ TCGv_i64 ret = tcg_temp_new_i64();
+ tcg_gen_movi_i64(ret, 0);
+ return ret;
} else {
return load_frd(rt);
}
@@ -3330,7 +3330,8 @@
/* Convert big-endian bit numbering in SAR to left-shift. */
tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
- mask = tcg_const_reg(msb + (msb - 1));
+ mask = tcg_temp_new();
+ tcg_gen_movi_reg(mask, msb + (msb - 1));
tcg_gen_and_reg(tmp, val, mask);
if (rs) {
tcg_gen_shl_reg(mask, mask, shift);
@@ -3547,12 +3548,16 @@
static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
{
+ uint64_t ret;
+
+ if (TARGET_REGISTER_BITS == 64) {
+ ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
+ } else {
+ ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
+ }
+
nullify_over(ctx);
-#if TARGET_REGISTER_BITS == 64
- save_frd(0, tcg_const_i64(0x13080000000000ULL)); /* PA8700 (PCX-W2) */
-#else
- save_frd(0, tcg_const_i64(0x0f080000000000ULL)); /* PA7300LC (PCX-L2) */
-#endif
+ save_frd(0, tcg_constant_i64(ret));
return nullify_end(ctx);
}
diff --git a/target/i386/cpu-param.h b/target/i386/cpu-param.h
index abad52a..911b4cd 100644
--- a/target/i386/cpu-param.h
+++ b/target/i386/cpu-param.h
@@ -23,6 +23,5 @@
# define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif
#define TARGET_PAGE_BITS 12
-#define NB_MMU_MODES 5
#endif
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index cab1e2a..6576287 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -29,6 +29,7 @@
#include "kvm/kvm_i386.h"
#include "sev.h"
#include "qapi/error.h"
+#include "qemu/error-report.h"
#include "qapi/qapi-visit-machine.h"
#include "qapi/qmp/qerror.h"
#include "standard-headers/asm-x86/kvm_para.h"
diff --git a/target/i386/gdbstub.c b/target/i386/gdbstub.c
index 7869712..ebb000d 100644
--- a/target/i386/gdbstub.c
+++ b/target/i386/gdbstub.c
@@ -19,7 +19,7 @@
*/
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/gdbstub.h"
+#include "include/gdbstub/helpers.h"
#ifdef TARGET_X86_64
static const int gpr_map[16] = {
diff --git a/target/i386/host-cpu.c b/target/i386/host-cpu.c
index 10f8aba..92ecb72 100644
--- a/target/i386/host-cpu.c
+++ b/target/i386/host-cpu.c
@@ -11,6 +11,7 @@
#include "cpu.h"
#include "host-cpu.h"
#include "qapi/error.h"
+#include "qemu/error-report.h"
#include "sysemu/sysemu.h"
/* Note: Only safe for use on x86(-64) hosts */
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
index 1aef54f..de53184 100644
--- a/target/i386/kvm/kvm.c
+++ b/target/i386/kvm/kvm.c
@@ -4991,6 +4991,7 @@
kvm_rate_limit_on_bus_lock();
}
+#ifdef CONFIG_XEN_EMU
/*
* If the callback is asserted as a GSI (or PCI INTx) then check if
* vcpu_info->evtchn_upcall_pending has been cleared, and deassert
@@ -5001,6 +5002,7 @@
if (x86_cpu->env.xen_callback_asserted) {
kvm_xen_maybe_deassert_callback(cpu);
}
+#endif
/* We need to protect the apic state against concurrent accesses from
* different threads in case the userspace irqchip is used. */
diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c
index bad3131..d7c7eb8 100644
--- a/target/i386/kvm/xen-emu.c
+++ b/target/i386/kvm/xen-emu.c
@@ -12,6 +12,7 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "qemu/main-loop.h"
+#include "qemu/error-report.h"
#include "hw/xen/xen.h"
#include "sysemu/kvm_int.h"
#include "sysemu/kvm_xen.h"
@@ -1406,6 +1407,11 @@
return err;
}
+ err = xen_gnttab_reset();
+ if (err) {
+ return err;
+ }
+
err = xen_xenstore_reset();
if (err) {
return err;
diff --git a/target/i386/sev.c b/target/i386/sev.c
index 0ec9704..859e06f 100644
--- a/target/i386/sev.c
+++ b/target/i386/sev.c
@@ -23,6 +23,7 @@
#include "qemu/base64.h"
#include "qemu/module.h"
#include "qemu/uuid.h"
+#include "qemu/error-report.h"
#include "crypto/hash.h"
#include "sysemu/kvm.h"
#include "sev.h"
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
index fa422eb..9dfad2f 100644
--- a/target/i386/tcg/translate.c
+++ b/target/i386/tcg/translate.c
@@ -884,7 +884,7 @@
live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
if (dead) {
- zero = tcg_const_tl(0);
+ zero = tcg_constant_tl(0);
if (dead & USES_CC_DST) {
dst = zero;
}
@@ -1412,7 +1412,7 @@
/* NOTE the exception in "r" op ordering */
static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
{
- TCGv_i32 tmp = tcg_const_i32(opreg);
+ TCGv_i32 tmp = tcg_constant_i32(opreg);
switch (op) {
case 0:
gen_helper_fadd_STN_ST0(cpu_env, tmp);
@@ -1439,7 +1439,7 @@
{
gen_update_cc_op(s);
gen_update_eip_cur(s);
- gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
+ gen_helper_raise_exception(cpu_env, tcg_constant_i32(trapno));
s->base.is_jmp = DISAS_NORETURN;
}
@@ -1633,7 +1633,7 @@
/* Store the results into the CC variables. If we know that the
variable must be dead, store unconditionally. Otherwise we'll
need to not disrupt the current contents. */
- z_tl = tcg_const_tl(0);
+ z_tl = tcg_constant_tl(0);
if (cc_op_live[s->cc_op] & USES_CC_DST) {
tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
result, cpu_cc_dst);
@@ -1657,7 +1657,7 @@
}
/* Conditionally store the CC_OP value. */
- z32 = tcg_const_i32(0);
+ z32 = tcg_constant_i32(0);
s32 = tcg_temp_new_i32();
tcg_gen_trunc_tl_i32(s32, count);
tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, s->tmp2_i32, oldop);
@@ -1813,7 +1813,7 @@
is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
exactly as we computed above. */
- t0 = tcg_const_i32(0);
+ t0 = tcg_constant_i32(0);
t1 = tcg_temp_new_i32();
tcg_gen_trunc_tl_i32(t1, s->T1);
tcg_gen_movi_i32(s->tmp2_i32, CC_OP_ADCOX);
@@ -2497,7 +2497,7 @@
cc.reg = t0;
}
if (!cc.use_reg2) {
- cc.reg2 = tcg_const_tl(cc.imm);
+ cc.reg2 = tcg_constant_tl(cc.imm);
}
tcg_gen_movcond_tl(cc.cond, s->T0, cc.reg, cc.reg2,
@@ -2525,7 +2525,7 @@
{
if (PE(s) && !VM86(s)) {
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
- gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), s->tmp2_i32);
+ gen_helper_load_seg(cpu_env, tcg_constant_i32(seg_reg), s->tmp2_i32);
/* abort translation because the addseg value may change or
because ss32 may change. For R_SS, translation must always
stop as a special handling must be done to disable hardware
@@ -4344,7 +4344,7 @@
gen_op_mov_v_reg(s, ot, s->T1, reg);
if (shift) {
- TCGv imm = tcg_const_tl(x86_ldub_code(env, s));
+ TCGv imm = tcg_constant_tl(x86_ldub_code(env, s));
gen_shiftd_rm_T1(s, ot, opreg, op, imm);
} else {
gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
@@ -4503,7 +4503,7 @@
break;
case 0x0c: /* fldenv mem */
gen_helper_fldenv(cpu_env, s->A0,
- tcg_const_i32(dflag - 1));
+ tcg_constant_i32(dflag - 1));
update_fip = update_fdp = false;
break;
case 0x0d: /* fldcw mem */
@@ -4514,7 +4514,7 @@
break;
case 0x0e: /* fnstenv mem */
gen_helper_fstenv(cpu_env, s->A0,
- tcg_const_i32(dflag - 1));
+ tcg_constant_i32(dflag - 1));
update_fip = update_fdp = false;
break;
case 0x0f: /* fnstcw mem */
@@ -4532,12 +4532,12 @@
break;
case 0x2c: /* frstor mem */
gen_helper_frstor(cpu_env, s->A0,
- tcg_const_i32(dflag - 1));
+ tcg_constant_i32(dflag - 1));
update_fip = update_fdp = false;
break;
case 0x2e: /* fnsave mem */
gen_helper_fsave(cpu_env, s->A0,
- tcg_const_i32(dflag - 1));
+ tcg_constant_i32(dflag - 1));
update_fip = update_fdp = false;
break;
case 0x2f: /* fnstsw mem */
@@ -4587,12 +4587,12 @@
case 0x08: /* fld sti */
gen_helper_fpush(cpu_env);
gen_helper_fmov_ST0_STN(cpu_env,
- tcg_const_i32((opreg + 1) & 7));
+ tcg_constant_i32((opreg + 1) & 7));
break;
case 0x09: /* fxchg sti */
case 0x29: /* fxchg4 sti, undocumented op */
case 0x39: /* fxchg7 sti, undocumented op */
- gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fxchg_ST0_STN(cpu_env, tcg_constant_i32(opreg));
break;
case 0x0a: /* grp d9/2 */
switch (rm) {
@@ -4732,27 +4732,27 @@
}
} else {
gen_helper_fmov_FT0_STN(cpu_env,
- tcg_const_i32(opreg));
+ tcg_constant_i32(opreg));
gen_helper_fp_arith_ST0_FT0(op1);
}
}
break;
case 0x02: /* fcom */
case 0x22: /* fcom2, undocumented op */
- gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
gen_helper_fcom_ST0_FT0(cpu_env);
break;
case 0x03: /* fcomp */
case 0x23: /* fcomp3, undocumented op */
case 0x32: /* fcomp5, undocumented op */
- gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
gen_helper_fcom_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
break;
case 0x15: /* da/5 */
switch (rm) {
case 1: /* fucompp */
- gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(1));
gen_helper_fucom_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
gen_helper_fpop(cpu_env);
@@ -4786,7 +4786,7 @@
goto illegal_op;
}
gen_update_cc_op(s);
- gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
gen_helper_fucomi_ST0_FT0(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
@@ -4795,36 +4795,36 @@
goto illegal_op;
}
gen_update_cc_op(s);
- gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
gen_helper_fcomi_ST0_FT0(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x28: /* ffree sti */
- gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_ffree_STN(cpu_env, tcg_constant_i32(opreg));
break;
case 0x2a: /* fst sti */
- gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fmov_STN_ST0(cpu_env, tcg_constant_i32(opreg));
break;
case 0x2b: /* fstp sti */
case 0x0b: /* fstp1 sti, undocumented op */
case 0x3a: /* fstp8 sti, undocumented op */
case 0x3b: /* fstp9 sti, undocumented op */
- gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fmov_STN_ST0(cpu_env, tcg_constant_i32(opreg));
gen_helper_fpop(cpu_env);
break;
case 0x2c: /* fucom st(i) */
- gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
gen_helper_fucom_ST0_FT0(cpu_env);
break;
case 0x2d: /* fucomp st(i) */
- gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
gen_helper_fucom_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
break;
case 0x33: /* de/3 */
switch (rm) {
case 1: /* fcompp */
- gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(1));
gen_helper_fcom_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
gen_helper_fpop(cpu_env);
@@ -4834,7 +4834,7 @@
}
break;
case 0x38: /* ffreep sti, undocumented op */
- gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_ffree_STN(cpu_env, tcg_constant_i32(opreg));
gen_helper_fpop(cpu_env);
break;
case 0x3c: /* df/4 */
@@ -4853,7 +4853,7 @@
goto illegal_op;
}
gen_update_cc_op(s);
- gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
gen_helper_fucomi_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
@@ -4863,7 +4863,7 @@
goto illegal_op;
}
gen_update_cc_op(s);
- gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
gen_helper_fcomi_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
@@ -4886,7 +4886,8 @@
op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
l1 = gen_new_label();
gen_jcc1_noeob(s, op1, l1);
- gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fmov_ST0_STN(cpu_env,
+ tcg_constant_i32(opreg));
gen_set_label(l1);
}
break;
@@ -5092,8 +5093,8 @@
if (PE(s) && !VM86(s)) {
gen_update_cc_op(s);
gen_update_eip_cur(s);
- gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1),
- tcg_const_i32(val));
+ gen_helper_lret_protected(cpu_env, tcg_constant_i32(dflag - 1),
+ tcg_constant_i32(val));
} else {
gen_stack_A0(s);
/* pop offset */
@@ -5120,7 +5121,7 @@
if (!check_vm86_iopl(s)) {
break;
}
- gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
+ gen_helper_iret_real(cpu_env, tcg_constant_i32(dflag - 1));
} else {
gen_helper_iret_protected(cpu_env, tcg_constant_i32(dflag - 1),
eip_next_i32(s));
@@ -5509,7 +5510,7 @@
if (val == 0) {
gen_exception(s, EXCP00_DIVZ);
} else {
- gen_helper_aam(cpu_env, tcg_const_i32(val));
+ gen_helper_aam(cpu_env, tcg_constant_i32(val));
set_cc_op(s, CC_OP_LOGICB);
}
break;
@@ -5517,7 +5518,7 @@
if (CODE64(s))
goto illegal_op;
val = x86_ldub_code(env, s);
- gen_helper_aad(cpu_env, tcg_const_i32(val));
+ gen_helper_aad(cpu_env, tcg_constant_i32(val));
set_cc_op(s, CC_OP_LOGICB);
break;
/************************/
@@ -5698,7 +5699,7 @@
if (!PE(s)) {
gen_exception_gpf(s);
} else {
- gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1));
+ gen_helper_sysexit(cpu_env, tcg_constant_i32(dflag - 1));
s->base.is_jmp = DISAS_EOB_ONLY;
}
break;
@@ -5717,7 +5718,7 @@
if (!PE(s)) {
gen_exception_gpf(s);
} else {
- gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1));
+ gen_helper_sysret(cpu_env, tcg_constant_i32(dflag - 1));
/* condition codes are modified only in long mode */
if (LMA(s)) {
set_cc_op(s, CC_OP_EFLAGS);
@@ -5923,7 +5924,7 @@
}
gen_update_cc_op(s);
gen_update_eip_cur(s);
- gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1),
+ gen_helper_vmrun(cpu_env, tcg_constant_i32(s->aflag - 1),
cur_insn_len_i32(s));
tcg_gen_exit_tb(NULL, 0);
s->base.is_jmp = DISAS_NORETURN;
@@ -5947,7 +5948,7 @@
}
gen_update_cc_op(s);
gen_update_eip_cur(s);
- gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1));
+ gen_helper_vmload(cpu_env, tcg_constant_i32(s->aflag - 1));
break;
case 0xdb: /* VMSAVE */
@@ -5959,7 +5960,7 @@
}
gen_update_cc_op(s);
gen_update_eip_cur(s);
- gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1));
+ gen_helper_vmsave(cpu_env, tcg_constant_i32(s->aflag - 1));
break;
case 0xdc: /* STGI */
diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c
index 3d0c0b3..52af816 100644
--- a/target/i386/whpx/whpx-all.c
+++ b/target/i386/whpx/whpx-all.c
@@ -12,7 +12,7 @@
#include "cpu.h"
#include "exec/address-spaces.h"
#include "exec/ioport.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
#include "qemu/accel.h"
#include "sysemu/whpx.h"
#include "sysemu/cpus.h"
diff --git a/target/i386/whpx/whpx-apic.c b/target/i386/whpx/whpx-apic.c
index c15df35..8710e37 100644
--- a/target/i386/whpx/whpx-apic.c
+++ b/target/i386/whpx/whpx-apic.c
@@ -11,6 +11,7 @@
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
+#include "qemu/error-report.h"
#include "cpu.h"
#include "hw/i386/apic_internal.h"
#include "hw/i386/apic-msidef.h"
diff --git a/target/loongarch/cpu-param.h b/target/loongarch/cpu-param.h
index 414d8ff..1265dc7 100644
--- a/target/loongarch/cpu-param.h
+++ b/target/loongarch/cpu-param.h
@@ -13,6 +13,5 @@
#define TARGET_VIRT_ADDR_SPACE_BITS 48
#define TARGET_PAGE_BITS 14
-#define NB_MMU_MODES 5
#endif
diff --git a/target/loongarch/gdbstub.c b/target/loongarch/gdbstub.c
index a4d1e28..fa3e034 100644
--- a/target/loongarch/gdbstub.c
+++ b/target/loongarch/gdbstub.c
@@ -10,6 +10,7 @@
#include "cpu.h"
#include "internals.h"
#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
uint64_t read_fcc(CPULoongArchState *env)
{
diff --git a/target/m68k/cpu-param.h b/target/m68k/cpu-param.h
index 44a8d19..39dcbce 100644
--- a/target/m68k/cpu-param.h
+++ b/target/m68k/cpu-param.h
@@ -17,6 +17,5 @@
#define TARGET_PAGE_BITS 12
#define TARGET_PHYS_ADDR_SPACE_BITS 32
#define TARGET_VIRT_ADDR_SPACE_BITS 32
-#define NB_MMU_MODES 2
#endif
diff --git a/target/m68k/gdbstub.c b/target/m68k/gdbstub.c
index eb2d030..1e5f033 100644
--- a/target/m68k/gdbstub.c
+++ b/target/m68k/gdbstub.c
@@ -19,7 +19,7 @@
*/
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
int m68k_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
{
diff --git a/target/m68k/helper.c b/target/m68k/helper.c
index 4621cf2..3b3a6ea 100644
--- a/target/m68k/helper.c
+++ b/target/m68k/helper.c
@@ -23,6 +23,7 @@
#include "exec/exec-all.h"
#include "exec/gdbstub.h"
#include "exec/helper-proto.h"
+#include "gdbstub/helpers.h"
#include "fpu/softfloat.h"
#include "qemu/qemu-print.h"
diff --git a/target/m68k/m68k-semi.c b/target/m68k/m68k-semi.c
index 87b1314..88ad9ba 100644
--- a/target/m68k/m68k-semi.c
+++ b/target/m68k/m68k-semi.c
@@ -20,7 +20,8 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/syscalls.h"
+#include "gdbstub/helpers.h"
#include "semihosting/syscalls.h"
#include "semihosting/softmmu-uaccess.h"
#include "hw/boards.h"
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
index 3055d2d..422f465 100644
--- a/target/m68k/translate.c
+++ b/target/m68k/translate.c
@@ -264,10 +264,7 @@
static void gen_raise_exception(int nr)
{
- TCGv_i32 tmp;
-
- tmp = tcg_const_i32(nr);
- gen_helper_raise_exception(cpu_env, tmp);
+ gen_helper_raise_exception(cpu_env, tcg_constant_i32(nr));
}
static void gen_raise_exception_format2(DisasContext *s, int nr,
@@ -471,7 +468,7 @@
if ((ext & 0x80) == 0) {
/* base not suppressed */
if (IS_NULL_QREG(base)) {
- base = tcg_const_i32(offset + bd);
+ base = tcg_constant_i32(offset + bd);
bd = 0;
}
if (!IS_NULL_QREG(add)) {
@@ -487,7 +484,7 @@
add = tmp;
}
} else {
- add = tcg_const_i32(bd);
+ add = tcg_constant_i32(bd);
}
if ((ext & 3) != 0) {
/* memory indirect */
@@ -623,8 +620,7 @@
break;
default:
- t0 = tcg_const_i32(s->cc_op);
- gen_helper_flush_flags(cpu_env, t0);
+ gen_helper_flush_flags(cpu_env, tcg_constant_i32(s->cc_op));
s->cc_op_synced = 1;
break;
}
@@ -785,14 +781,14 @@
switch (reg0) {
case 0: /* Absolute short. */
offset = (int16_t)read_im16(env, s);
- return tcg_const_i32(offset);
+ return tcg_constant_i32(offset);
case 1: /* Absolute long. */
offset = read_im32(env, s);
- return tcg_const_i32(offset);
+ return tcg_constant_i32(offset);
case 2: /* pc displacement */
offset = s->pc;
offset += (int16_t)read_im16(env, s);
- return tcg_const_i32(offset);
+ return tcg_constant_i32(offset);
case 3: /* pc index+displacement. */
return gen_lea_indexed(env, s, NULL_QREG);
case 4: /* Immediate. */
@@ -920,7 +916,7 @@
default:
g_assert_not_reached();
}
- return tcg_const_i32(offset);
+ return tcg_constant_i32(offset);
default:
return NULL_QREG;
}
@@ -1167,23 +1163,23 @@
}
switch (opsize) {
case OS_BYTE:
- tmp = tcg_const_i32((int8_t)read_im8(env, s));
+ tmp = tcg_constant_i32((int8_t)read_im8(env, s));
gen_helper_exts32(cpu_env, fp, tmp);
break;
case OS_WORD:
- tmp = tcg_const_i32((int16_t)read_im16(env, s));
+ tmp = tcg_constant_i32((int16_t)read_im16(env, s));
gen_helper_exts32(cpu_env, fp, tmp);
break;
case OS_LONG:
- tmp = tcg_const_i32(read_im32(env, s));
+ tmp = tcg_constant_i32(read_im32(env, s));
gen_helper_exts32(cpu_env, fp, tmp);
break;
case OS_SINGLE:
- tmp = tcg_const_i32(read_im32(env, s));
+ tmp = tcg_constant_i32(read_im32(env, s));
gen_helper_extf32(cpu_env, fp, tmp);
break;
case OS_DOUBLE:
- t64 = tcg_const_i64(read_im64(env, s));
+ t64 = tcg_constant_i64(read_im64(env, s));
gen_helper_extf64(cpu_env, fp, t64);
break;
case OS_EXTENDED:
@@ -1191,9 +1187,9 @@
gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
break;
}
- tmp = tcg_const_i32(read_im32(env, s) >> 16);
+ tmp = tcg_constant_i32(read_im32(env, s) >> 16);
tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
- t64 = tcg_const_i64(read_im64(env, s));
+ t64 = tcg_constant_i64(read_im64(env, s));
tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
break;
case OS_PACKED:
@@ -1253,7 +1249,7 @@
goto done;
case 10: /* PL */
case 11: /* MI */
- c->v2 = tcg_const_i32(0);
+ c->v2 = tcg_constant_i32(0);
c->v1 = tmp = tcg_temp_new();
tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V);
gen_ext(tmp, tmp, op - CC_OP_CMPB, 1);
@@ -1269,7 +1265,7 @@
}
}
- c->v2 = tcg_const_i32(0);
+ c->v2 = tcg_constant_i32(0);
switch (cond) {
case 0: /* T */
@@ -1631,8 +1627,8 @@
* = result with some possible exceeding 0x6
*/
- t0 = tcg_const_i32(0x066);
- tcg_gen_add_i32(t0, t0, src);
+ t0 = tcg_temp_new();
+ tcg_gen_addi_i32(t0, src, 0x066);
t1 = tcg_temp_new();
tcg_gen_add_i32(t1, t0, dest);
@@ -1818,7 +1814,8 @@
SRC_EA(env, src, OS_BYTE, 0, &addr);
- dest = tcg_const_i32(0);
+ dest = tcg_temp_new();
+ tcg_gen_movi_i32(dest, 0);
bcd_sub(dest, src);
DEST_EA(env, insn, OS_BYTE, dest, &addr);
@@ -1896,8 +1893,8 @@
else
tcg_gen_andi_i32(src2, DREG(insn, 9), 31);
- tmp = tcg_const_i32(1);
- tcg_gen_shl_i32(tmp, tmp, src2);
+ tmp = tcg_temp_new();
+ tcg_gen_shl_i32(tmp, tcg_constant_i32(1), src2);
tcg_gen_and_i32(QREG_CC_Z, src1, tmp);
@@ -1999,7 +1996,7 @@
addr = tcg_temp_new();
tcg_gen_mov_i32(addr, tmp);
- incr = tcg_const_i32(opsize_bytes(opsize));
+ incr = tcg_constant_i32(opsize_bytes(opsize));
if (is_load) {
/* memory to register */
@@ -2235,13 +2232,13 @@
opsize = insn_opsize(insn);
switch (opsize) {
case OS_BYTE:
- im = tcg_const_i32((int8_t)read_im8(env, s));
+ im = tcg_constant_i32((int8_t)read_im8(env, s));
break;
case OS_WORD:
- im = tcg_const_i32((int16_t)read_im16(env, s));
+ im = tcg_constant_i32((int16_t)read_im16(env, s));
break;
case OS_LONG:
- im = tcg_const_i32(read_im32(env, s));
+ im = tcg_constant_i32(read_im32(env, s));
break;
default:
g_assert_not_reached();
@@ -2393,7 +2390,6 @@
{
uint16_t ext1, ext2;
TCGv addr1, addr2;
- TCGv regs;
/* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
@@ -2425,13 +2421,13 @@
* Dc2 = (R2)
*/
- regs = tcg_const_i32(REG(ext2, 6) |
- (REG(ext1, 6) << 3) |
- (REG(ext2, 0) << 6) |
- (REG(ext1, 0) << 9));
if (tb_cflags(s->base.tb) & CF_PARALLEL) {
gen_helper_exit_atomic(cpu_env);
} else {
+ TCGv regs = tcg_constant_i32(REG(ext2, 6) |
+ (REG(ext1, 6) << 3) |
+ (REG(ext2, 0) << 6) |
+ (REG(ext1, 0) << 9));
gen_helper_cas2w(cpu_env, regs, addr1, addr2);
}
@@ -2475,10 +2471,10 @@
* Dc2 = (R2)
*/
- regs = tcg_const_i32(REG(ext2, 6) |
- (REG(ext1, 6) << 3) |
- (REG(ext2, 0) << 6) |
- (REG(ext1, 0) << 9));
+ regs = tcg_constant_i32(REG(ext2, 6) |
+ (REG(ext1, 6) << 3) |
+ (REG(ext2, 0) << 6) |
+ (REG(ext1, 0) << 9));
if (tb_cflags(s->base.tb) & CF_PARALLEL) {
gen_helper_cas2l_parallel(cpu_env, regs, addr1, addr2);
} else {
@@ -2552,7 +2548,7 @@
* (X, N) = -(src + X);
*/
- z = tcg_const_i32(0);
+ z = tcg_constant_i32(0);
tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z);
tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X);
gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
@@ -2597,8 +2593,7 @@
int opsize;
TCGv zero;
- zero = tcg_const_i32(0);
-
+ zero = tcg_constant_i32(0);
opsize = insn_opsize(insn);
DEST_EA(env, insn, opsize, zero, NULL);
gen_logic_cc(s, zero, opsize);
@@ -2934,7 +2929,7 @@
}
if ((insn & 0x40) == 0) {
/* jsr */
- gen_push(s, tcg_const_i32(s->pc));
+ gen_push(s, tcg_constant_i32(s->pc));
}
gen_jmp(s, tmp);
}
@@ -2959,7 +2954,7 @@
if (imm == 0) {
imm = 8;
}
- val = tcg_const_i32(imm);
+ val = tcg_constant_i32(imm);
dest = tcg_temp_new();
tcg_gen_mov_i32(dest, src);
if ((insn & 0x38) == 0x08) {
@@ -3003,7 +2998,7 @@
}
if (op == 1) {
/* bsr */
- gen_push(s, tcg_const_i32(s->pc));
+ gen_push(s, tcg_constant_i32(s->pc));
}
if (op > 1) {
/* Bcc */
@@ -3076,7 +3071,7 @@
static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize)
{
- TCGv tmp;
+ TCGv tmp, zero;
gen_flush_flags(s); /* compute old Z */
@@ -3085,14 +3080,15 @@
* (X, N) = dest - (src + X);
*/
- tmp = tcg_const_i32(0);
- tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, tmp, QREG_CC_X, tmp);
- tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, tmp, QREG_CC_N, QREG_CC_X);
+ zero = tcg_constant_i32(0);
+ tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, zero, QREG_CC_X, zero);
+ tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, zero, QREG_CC_N, QREG_CC_X);
gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
/* Compute signed-overflow for subtract. */
+ tmp = tcg_temp_new();
tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest);
tcg_gen_xor_i32(tmp, dest, src);
tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp);
@@ -3151,9 +3147,10 @@
int val;
val = (insn >> 9) & 7;
- if (val == 0)
+ if (val == 0) {
val = -1;
- src = tcg_const_i32(val);
+ }
+ src = tcg_constant_i32(val);
gen_logic_cc(s, src, OS_LONG);
DEST_EA(env, insn, OS_LONG, src, NULL);
}
@@ -3279,7 +3276,7 @@
static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize)
{
- TCGv tmp;
+ TCGv tmp, zero;
gen_flush_flags(s); /* compute old Z */
@@ -3288,13 +3285,14 @@
* (X, N) = src + dest + X;
*/
- tmp = tcg_const_i32(0);
- tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, tmp, dest, tmp);
- tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, tmp);
+ zero = tcg_constant_i32(0);
+ tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, zero, dest, zero);
+ tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, zero);
gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
/* Compute signed-overflow for addition. */
+ tmp = tcg_temp_new();
tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
tcg_gen_xor_i32(tmp, dest, src);
tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp);
@@ -3430,7 +3428,7 @@
tcg_gen_extr_i64_i32(QREG_CC_N, QREG_CC_C, t64);
/* Note that C=0 if shift count is 0, and we get that for free. */
} else {
- TCGv zero = tcg_const_i32(0);
+ TCGv zero = tcg_constant_i32(0);
tcg_gen_extrl_i64_i32(QREG_CC_N, t64);
tcg_gen_shri_i32(QREG_CC_C, QREG_CC_N, bits);
tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
@@ -3452,7 +3450,7 @@
* V = ((s ^ t) & (-1 << (bits - 1))) != 0
*/
if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
- TCGv_i64 tt = tcg_const_i64(32);
+ TCGv_i64 tt = tcg_constant_i64(32);
/* if shift is greater than 32, use 32 */
tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64);
/* Sign extend the input to 64 bits; re-do the shift. */
@@ -3633,7 +3631,7 @@
{
TCGv X, shl, shr, shx, sz, zero;
- sz = tcg_const_i32(size);
+ sz = tcg_constant_i32(size);
shr = tcg_temp_new();
shl = tcg_temp_new();
@@ -3644,7 +3642,7 @@
tcg_gen_sub_i32(shr, shr, shift); /* shr = size + 1 - shift */
tcg_gen_subi_i32(shx, shift, 1); /* shx = shift - 1 */
/* shx = shx < 0 ? size : shx; */
- zero = tcg_const_i32(0);
+ zero = tcg_constant_i32(0);
tcg_gen_movcond_i32(TCG_COND_LT, shx, shx, zero, sz, shx);
} else {
tcg_gen_mov_i32(shr, shift); /* shr = shift */
@@ -3723,7 +3721,7 @@
/* if shift == 0, register and X are not affected */
- zero = tcg_const_i32(0);
+ zero = tcg_constant_i32(0);
tcg_gen_movcond_i32(TCG_COND_EQ, X, shift, zero, QREG_CC_X, X);
tcg_gen_movcond_i32(TCG_COND_EQ, reg, shift, zero, reg, lo);
@@ -3741,7 +3739,7 @@
tmp = 8;
}
- shift = tcg_const_i32(tmp);
+ shift = tcg_constant_i32(tmp);
if (insn & 8) {
rotate(DREG(insn, 0), shift, left, 32);
} else {
@@ -3766,7 +3764,7 @@
tmp = 8;
}
- shift = tcg_const_i32(tmp);
+ shift = tcg_constant_i32(tmp);
if (insn & 8) {
rotate(reg, shift, left, 8);
} else {
@@ -3790,7 +3788,7 @@
tmp = 8;
}
- shift = tcg_const_i32(tmp);
+ shift = tcg_constant_i32(tmp);
if (insn & 8) {
rotate(reg, shift, left, 16);
} else {
@@ -3905,7 +3903,7 @@
SRC_EA(env, src, OS_WORD, 0, &addr);
- shift = tcg_const_i32(1);
+ shift = tcg_constant_i32(1);
if (insn & 0x0200) {
rotate(src, shift, left, 16);
} else {
@@ -3999,12 +3997,12 @@
if (ext & 0x20) {
len = DREG(ext, 0);
} else {
- len = tcg_const_i32(extract32(ext, 0, 5));
+ len = tcg_constant_i32(extract32(ext, 0, 5));
}
if (ext & 0x800) {
ofs = DREG(ext, 6);
} else {
- ofs = tcg_const_i32(extract32(ext, 6, 5));
+ ofs = tcg_constant_i32(extract32(ext, 6, 5));
}
if (is_sign) {
@@ -4024,14 +4022,8 @@
TCGv src = DREG(insn, 0);
int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
int ofs = extract32(ext, 6, 5); /* big bit-endian */
- TCGv mask, tofs, tlen;
-
- tofs = NULL;
- tlen = NULL;
- if ((insn & 0x0f00) == 0x0d00) { /* bfffo */
- tofs = tcg_temp_new();
- tlen = tcg_temp_new();
- }
+ TCGv mask, tofs = NULL, tlen = NULL;
+ bool is_bfffo = (insn & 0x0f00) == 0x0d00;
if ((ext & 0x820) == 0) {
/* Immediate width and offset. */
@@ -4042,45 +4034,49 @@
tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
}
tcg_gen_andi_i32(QREG_CC_N, QREG_CC_N, ~maski);
- mask = tcg_const_i32(ror32(maski, ofs));
- if (tofs) {
- tcg_gen_movi_i32(tofs, ofs);
- tcg_gen_movi_i32(tlen, len);
+
+ mask = tcg_constant_i32(ror32(maski, ofs));
+ if (is_bfffo) {
+ tofs = tcg_constant_i32(ofs);
+ tlen = tcg_constant_i32(len);
}
} else {
TCGv tmp = tcg_temp_new();
+
+ mask = tcg_temp_new();
if (ext & 0x20) {
/* Variable width */
tcg_gen_subi_i32(tmp, DREG(ext, 0), 1);
tcg_gen_andi_i32(tmp, tmp, 31);
- mask = tcg_const_i32(0x7fffffffu);
- tcg_gen_shr_i32(mask, mask, tmp);
- if (tlen) {
+ tcg_gen_shr_i32(mask, tcg_constant_i32(0x7fffffffu), tmp);
+ if (is_bfffo) {
+ tlen = tcg_temp_new();
tcg_gen_addi_i32(tlen, tmp, 1);
}
} else {
/* Immediate width */
- mask = tcg_const_i32(0x7fffffffu >> (len - 1));
- if (tlen) {
- tcg_gen_movi_i32(tlen, len);
+ tcg_gen_movi_i32(mask, 0x7fffffffu >> (len - 1));
+ if (is_bfffo) {
+ tlen = tcg_constant_i32(len);
}
}
+
if (ext & 0x800) {
/* Variable offset */
tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
tcg_gen_rotl_i32(QREG_CC_N, src, tmp);
tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
tcg_gen_rotr_i32(mask, mask, tmp);
- if (tofs) {
- tcg_gen_mov_i32(tofs, tmp);
+ if (is_bfffo) {
+ tofs = tmp;
}
} else {
/* Immediate offset (and variable width) */
tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
tcg_gen_rotri_i32(mask, mask, ofs);
- if (tofs) {
- tcg_gen_movi_i32(tofs, ofs);
+ if (is_bfffo) {
+ tofs = tcg_constant_i32(ofs);
}
}
}
@@ -4122,12 +4118,12 @@
if (ext & 0x20) {
len = DREG(ext, 0);
} else {
- len = tcg_const_i32(extract32(ext, 0, 5));
+ len = tcg_constant_i32(extract32(ext, 0, 5));
}
if (ext & 0x800) {
ofs = DREG(ext, 6);
} else {
- ofs = tcg_const_i32(extract32(ext, 6, 5));
+ ofs = tcg_constant_i32(extract32(ext, 6, 5));
}
switch (insn & 0x0f00) {
@@ -4239,12 +4235,12 @@
if (ext & 0x20) {
len = DREG(ext, 0);
} else {
- len = tcg_const_i32(extract32(ext, 0, 5));
+ len = tcg_constant_i32(extract32(ext, 0, 5));
}
if (ext & 0x800) {
ofs = DREG(ext, 6);
} else {
- ofs = tcg_const_i32(extract32(ext, 6, 5));
+ ofs = tcg_constant_i32(extract32(ext, 6, 5));
}
gen_helper_bfins_mem(QREG_CC_N, cpu_env, addr, src, ofs, len);
@@ -4377,7 +4373,7 @@
TCGv reg, addr;
reg = AREG(insn, 0);
- addr = tcg_const_i32(read_im32(env, s));
+ addr = tcg_constant_i32(read_im32(env, s));
if ((insn >> 3) & 1) {
/* MOVE16 (xxx).L, (Ay) */
@@ -4567,14 +4563,14 @@
} else {
reg = DREG(ext, 12);
}
- gen_helper_cf_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg);
+ gen_helper_cf_movec_to(cpu_env, tcg_constant_i32(ext & 0xfff), reg);
gen_exit_tb(s);
}
DISAS_INSN(m68k_movec)
{
uint16_t ext;
- TCGv reg;
+ TCGv reg, creg;
if (IS_USER(s)) {
gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
@@ -4588,10 +4584,11 @@
} else {
reg = DREG(ext, 12);
}
+ creg = tcg_constant_i32(ext & 0xfff);
if (insn & 1) {
- gen_helper_m68k_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg);
+ gen_helper_m68k_movec_to(cpu_env, creg, reg);
} else {
- gen_helper_m68k_movec_from(reg, cpu_env, tcg_const_i32(ext & 0xfff));
+ gen_helper_m68k_movec_from(reg, cpu_env, creg);
}
gen_exit_tb(s);
}
@@ -4642,7 +4639,7 @@
return;
}
- opmode = tcg_const_i32((insn >> 3) & 3);
+ opmode = tcg_constant_i32((insn >> 3) & 3);
gen_helper_pflush(cpu_env, AREG(insn, 0), opmode);
}
@@ -4654,7 +4651,7 @@
gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
return;
}
- is_read = tcg_const_i32((insn >> 5) & 1);
+ is_read = tcg_constant_i32((insn >> 5) & 1);
gen_helper_ptest(cpu_env, AREG(insn, 0), is_read);
}
#endif
@@ -4824,7 +4821,7 @@
gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
return;
}
- tmp = tcg_const_i32(read_im32(env, s));
+ tmp = tcg_constant_i32(read_im32(env, s));
gen_store_fcr(s, tmp, mask);
return;
}
@@ -4961,7 +4958,7 @@
case 2:
if (insn == 0xf200 && (ext & 0xfc00) == 0x5c00) {
/* fmovecr */
- TCGv rom_offset = tcg_const_i32(opmode);
+ TCGv rom_offset = tcg_constant_i32(opmode);
cpu_dest = gen_fp_ptr(REG(ext, 7));
gen_helper_fconst(cpu_env, cpu_dest, rom_offset);
return;
@@ -5185,7 +5182,7 @@
{
TCGv fpsr;
- c->v2 = tcg_const_i32(0);
+ c->v2 = tcg_constant_i32(0);
/* TODO: Raise BSUN exception. */
fpsr = tcg_temp_new();
gen_load_fcr(s, fpsr, M68K_FPSR);
@@ -5405,7 +5402,7 @@
if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
/* always write IDLE */
- TCGv idle = tcg_const_i32(0x41000000);
+ TCGv idle = tcg_constant_i32(0x41000000);
DEST_EA(env, insn, OS_LONG, idle, NULL);
} else {
disas_undef(env, s, insn);
@@ -5535,7 +5532,7 @@
/* Skip the accumulate if the value is already saturated. */
l1 = gen_new_label();
tmp = tcg_temp_new();
- gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
+ gen_op_and32(tmp, QREG_MACSR, tcg_constant_i32(MACSR_PAV0 << acc));
gen_op_jmp_nz32(tmp, l1);
}
#endif
@@ -5546,11 +5543,11 @@
tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
if (s->env->macsr & MACSR_FI)
- gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
+ gen_helper_macsatf(cpu_env, tcg_constant_i32(acc));
else if (s->env->macsr & MACSR_SU)
- gen_helper_macsats(cpu_env, tcg_const_i32(acc));
+ gen_helper_macsats(cpu_env, tcg_constant_i32(acc));
else
- gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
+ gen_helper_macsatu(cpu_env, tcg_constant_i32(acc));
#if 0
/* Disabled because conditional branches clobber temporary vars. */
@@ -5569,7 +5566,7 @@
/* Skip the accumulate if the value is already saturated. */
l1 = gen_new_label();
tmp = tcg_temp_new();
- gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
+ gen_op_and32(tmp, QREG_MACSR, tcg_constant_i32(MACSR_PAV0 << acc));
gen_op_jmp_nz32(tmp, l1);
}
#endif
@@ -5578,18 +5575,18 @@
else
tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
if (s->env->macsr & MACSR_FI)
- gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
+ gen_helper_macsatf(cpu_env, tcg_constant_i32(acc));
else if (s->env->macsr & MACSR_SU)
- gen_helper_macsats(cpu_env, tcg_const_i32(acc));
+ gen_helper_macsats(cpu_env, tcg_constant_i32(acc));
else
- gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
+ gen_helper_macsatu(cpu_env, tcg_constant_i32(acc));
#if 0
/* Disabled because conditional branches clobber temporary vars. */
if (l1 != -1)
gen_set_label(l1);
#endif
}
- gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc));
+ gen_helper_mac_set_flags(cpu_env, tcg_constant_i32(acc));
if (insn & 0x30) {
TCGv rw;
@@ -5639,8 +5636,8 @@
int src;
TCGv dest;
src = insn & 3;
- dest = tcg_const_i32((insn >> 9) & 3);
- gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src));
+ dest = tcg_constant_i32((insn >> 9) & 3);
+ gen_helper_mac_move(cpu_env, dest, tcg_constant_i32(src));
gen_mac_clear_flags();
gen_helper_mac_set_flags(cpu_env, dest);
}
@@ -5665,7 +5662,7 @@
TCGv reg;
TCGv acc;
reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
- acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
+ acc = tcg_constant_i32((insn & 0x400) ? 2 : 0);
if (s->env->macsr & MACSR_FI)
gen_helper_get_mac_extf(reg, cpu_env, acc);
else
@@ -5700,7 +5697,7 @@
}
tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
gen_mac_clear_flags();
- gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum));
+ gen_helper_mac_set_flags(cpu_env, tcg_constant_i32(accnum));
}
DISAS_INSN(to_macsr)
@@ -5723,7 +5720,7 @@
TCGv val;
TCGv acc;
SRC_EA(env, val, OS_LONG, 0, NULL);
- acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
+ acc = tcg_constant_i32((insn & 0x400) ? 2 : 0);
if (s->env->macsr & MACSR_FI)
gen_helper_set_mac_extf(cpu_env, val, acc);
else if (s->env->macsr & MACSR_SU)
diff --git a/target/microblaze/cpu-param.h b/target/microblaze/cpu-param.h
index 5e54ea0..9770b0e 100644
--- a/target/microblaze/cpu-param.h
+++ b/target/microblaze/cpu-param.h
@@ -28,6 +28,5 @@
/* FIXME: MB uses variable pages down to 1K but linux only uses 4k. */
#define TARGET_PAGE_BITS 12
-#define NB_MMU_MODES 3
#endif
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
index f66df02..88324d0 100644
--- a/target/microblaze/cpu.h
+++ b/target/microblaze/cpu.h
@@ -394,7 +394,7 @@
#define MMU_NOMMU_IDX 0
#define MMU_KERNEL_IDX 1
#define MMU_USER_IDX 2
-/* See NB_MMU_MODES further up the file. */
+/* See NB_MMU_MODES in cpu-defs.h. */
#include "exec/cpu-all.h"
diff --git a/target/microblaze/gdbstub.c b/target/microblaze/gdbstub.c
index 8143fca..29ac6e9 100644
--- a/target/microblaze/gdbstub.c
+++ b/target/microblaze/gdbstub.c
@@ -19,7 +19,7 @@
*/
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
/*
* GDB expects SREGs in the following order:
diff --git a/target/mips/cpu-defs.c.inc b/target/mips/cpu-defs.c.inc
index 480e60a..d45f245 100644
--- a/target/mips/cpu-defs.c.inc
+++ b/target/mips/cpu-defs.c.inc
@@ -332,7 +332,11 @@
(0x1 << CP0C0_AR) | (MMU_TYPE_FMT << CP0C0_MT),
.CP0_Config1 = MIPS_CONFIG1,
.CP0_Config2 = MIPS_CONFIG2,
- .CP0_Config3 = MIPS_CONFIG3 | (0x2 << CP0C3_ISA) | (1 << CP0C3_VInt),
+ .CP0_Config3 = MIPS_CONFIG3 | (0x2 << CP0C3_ISA) | (1 << CP0C3_VInt) |
+ (1 << CP0C3_M),
+ .CP0_Config4 = MIPS_CONFIG4 | (1 << CP0C4_M),
+ .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_NFExists),
+ .CP0_Config7 = 1 << CP0C7_WII,
.CP0_LLAddr_rw_bitmask = 0,
.CP0_LLAddr_shift = 4,
.SYNCI_Step = 32,
@@ -353,7 +357,11 @@
(0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) |
(0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA),
.CP0_Config2 = MIPS_CONFIG2,
- .CP0_Config3 = MIPS_CONFIG3 | (0x2 << CP0C3_ISA) | (0 << CP0C3_VInt),
+ .CP0_Config3 = MIPS_CONFIG3 | (0x2 << CP0C3_ISA) | (0 << CP0C3_VInt) |
+ (1 << CP0C3_M),
+ .CP0_Config4 = MIPS_CONFIG4 | (1 << CP0C4_M),
+ .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_NFExists),
+ .CP0_Config7 = 1 << CP0C7_WII,
.CP0_LLAddr_rw_bitmask = 0,
.CP0_LLAddr_shift = 4,
.SYNCI_Step = 32,
@@ -392,6 +400,7 @@
.CP0_Config5_rw_bitmask = (1 << CP0C5_K) | (1 << CP0C5_CV) |
(1 << CP0C5_MSAEn) | (1 << CP0C5_UFE) |
(1 << CP0C5_FRE) | (1 << CP0C5_UFR),
+ .CP0_Config7 = 1 << CP0C7_WII,
.CP0_LLAddr_rw_bitmask = 0,
.CP0_LLAddr_shift = 0,
.SYNCI_Step = 32,
diff --git a/target/mips/cpu-param.h b/target/mips/cpu-param.h
index f4c7699..594c91a 100644
--- a/target/mips/cpu-param.h
+++ b/target/mips/cpu-param.h
@@ -29,6 +29,5 @@
#define TARGET_PAGE_BITS_VARY
#define TARGET_PAGE_BITS_MIN 12
#endif
-#define NB_MMU_MODES 4
#endif
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
index 05caf54..01e0fbe 100644
--- a/target/mips/cpu.c
+++ b/target/mips/cpu.c
@@ -21,6 +21,7 @@
#include "qemu/osdep.h"
#include "qemu/cutils.h"
#include "qemu/qemu-print.h"
+#include "qemu/error-report.h"
#include "qapi/error.h"
#include "cpu.h"
#include "internal.h"
@@ -143,11 +144,13 @@
/*
* Prior to MIPS Release 6 it is implementation dependent if non-enabled
* interrupts wake-up the CPU, however most of the implementations only
- * check for interrupts that can be taken.
+ * check for interrupts that can be taken. For pre-release 6 CPUs,
+ * check for CP0 Config7 'Wait IE ignore' bit.
*/
if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
cpu_mips_hw_interrupts_pending(env)) {
if (cpu_mips_hw_interrupts_enabled(env) ||
+ (env->CP0_Config7 & (1 << CP0C7_WII)) ||
(env->insn_flags & ISA_MIPS_R6)) {
has_work = true;
}
diff --git a/target/mips/cpu.h b/target/mips/cpu.h
index caf2b06..142c55a 100644
--- a/target/mips/cpu.h
+++ b/target/mips/cpu.h
@@ -980,6 +980,7 @@
#define CP0C6_DATAPREF 0
int32_t CP0_Config7;
int64_t CP0_Config7_rw_bitmask;
+#define CP0C7_WII 31
#define CP0C7_NAPCGEN 2
#define CP0C7_UNIMUEN 1
#define CP0C7_VFPUCGEN 0
diff --git a/target/mips/gdbstub.c b/target/mips/gdbstub.c
index f1c2a2c..62d7b72 100644
--- a/target/mips/gdbstub.c
+++ b/target/mips/gdbstub.c
@@ -20,7 +20,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "internal.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
#include "fpu_helper.h"
int mips_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
diff --git a/target/mips/sysemu/physaddr.c b/target/mips/sysemu/physaddr.c
index 2970df8..05990aa 100644
--- a/target/mips/sysemu/physaddr.c
+++ b/target/mips/sysemu/physaddr.c
@@ -70,8 +70,7 @@
/* is this AM mapped in current execution mode */
return ((adetlb_mask << am) < 0);
default:
- assert(0);
- return TLBRET_BADADDR;
+ g_assert_not_reached();
};
}
diff --git a/target/mips/tcg/ldst_helper.c b/target/mips/tcg/ldst_helper.c
index d0bd026..c1a8380 100644
--- a/target/mips/tcg/ldst_helper.c
+++ b/target/mips/tcg/ldst_helper.c
@@ -248,14 +248,14 @@
target_ulong i;
for (i = 0; i < base_reglist; i++) {
- cpu_stw_mmuidx_ra(env, addr, env->active_tc.gpr[multiple_regs[i]],
+ cpu_stl_mmuidx_ra(env, addr, env->active_tc.gpr[multiple_regs[i]],
mem_idx, GETPC());
addr += 4;
}
}
if (do_r31) {
- cpu_stw_mmuidx_ra(env, addr, env->active_tc.gpr[31], mem_idx, GETPC());
+ cpu_stl_mmuidx_ra(env, addr, env->active_tc.gpr[31], mem_idx, GETPC());
}
}
diff --git a/target/mips/tcg/micromips_translate.c.inc b/target/mips/tcg/micromips_translate.c.inc
index 632895c..e8b193a 100644
--- a/target/mips/tcg/micromips_translate.c.inc
+++ b/target/mips/tcg/micromips_translate.c.inc
@@ -704,8 +704,8 @@
gen_base_offset_addr(ctx, t0, base, offset);
- t1 = tcg_const_tl(reglist);
- t2 = tcg_const_i32(ctx->mem_idx);
+ t1 = tcg_constant_tl(reglist);
+ t2 = tcg_constant_i32(ctx->mem_idx);
save_cpu_state(ctx, 1);
switch (opc) {
@@ -724,9 +724,6 @@
break;
#endif
}
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free_i32(t2);
}
@@ -1018,8 +1015,6 @@
break;
#endif
}
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
static void gen_pool32axf(CPUMIPSState *env, DisasContext *ctx, int rt, int rs)
@@ -1067,7 +1062,6 @@
gen_load_gpr(t0, rt);
gen_mtc0(ctx, t0, rs, (ctx->opcode >> 11) & 0x7);
- tcg_temp_free(t0);
}
break;
#endif
@@ -1276,7 +1270,6 @@
* mode.
*/
ctx->base.is_jmp = DISAS_STOP;
- tcg_temp_free(t0);
}
break;
case EI:
@@ -1293,7 +1286,6 @@
*/
gen_save_pc(ctx->base.pc_next + 4);
ctx->base.is_jmp = DISAS_EXIT;
- tcg_temp_free(t0);
}
break;
default:
diff --git a/target/mips/tcg/msa_helper.c b/target/mips/tcg/msa_helper.c
index 736283e..29b31d7 100644
--- a/target/mips/tcg/msa_helper.c
+++ b/target/mips/tcg/msa_helper.c
@@ -5333,7 +5333,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
msa_move_v(pwd, pwx);
}
@@ -5368,7 +5368,7 @@
} \
break; \
default: \
- assert(0); \
+ g_assert_not_reached(); \
} \
}
@@ -5413,7 +5413,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
}
@@ -5461,7 +5461,7 @@
} \
break; \
default: \
- assert(0); \
+ g_assert_not_reached(); \
} \
}
@@ -5511,7 +5511,7 @@
} \
break; \
default: \
- assert(0); \
+ g_assert_not_reached(); \
} \
}
@@ -5557,7 +5557,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
}
@@ -5632,7 +5632,7 @@
pwd->d[1] = msa_ ## func ## _df(df, pws->d[1], pwt->d[1]); \
break; \
default: \
- assert(0); \
+ g_assert_not_reached(); \
} \
}
@@ -5771,7 +5771,7 @@
pwd->d[1] = msa_ ## func ## _df(df, pwd->d[1], pws->d[1], pwt->d[1]); \
break; \
default: \
- assert(0); \
+ g_assert_not_reached(); \
} \
}
@@ -5811,7 +5811,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
}
@@ -5869,7 +5869,7 @@
MSA_LOOP_D; \
break; \
default: \
- assert(0); \
+ g_assert_not_reached(); \
} \
msa_move_v(pwd, pwx); \
}
@@ -6090,7 +6090,7 @@
pwd->d[n] = (int64_t)pws->d[0];
break;
default:
- assert(0);
+ g_assert_not_reached();
}
}
@@ -6150,7 +6150,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
}
@@ -6565,7 +6565,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, retaddr);
@@ -6596,7 +6596,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, retaddr);
@@ -6625,7 +6625,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, retaddr);
@@ -6654,7 +6654,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, retaddr);
@@ -6683,7 +6683,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, retaddr);
@@ -6712,7 +6712,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, retaddr);
@@ -6741,7 +6741,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, retaddr);
@@ -6770,7 +6770,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, retaddr);
@@ -6799,7 +6799,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, retaddr);
@@ -6828,7 +6828,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, retaddr);
@@ -6857,7 +6857,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, retaddr);
@@ -7107,7 +7107,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, GETPC());
@@ -7137,7 +7137,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, GETPC());
@@ -7167,7 +7167,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, GETPC());
@@ -7198,7 +7198,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, GETPC());
@@ -7245,7 +7245,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, GETPC());
@@ -7280,7 +7280,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, GETPC());
@@ -7317,7 +7317,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, GETPC());
@@ -7371,7 +7371,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, GETPC());
@@ -7417,7 +7417,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, GETPC());
@@ -7526,7 +7526,7 @@
} else {
- assert(0);
+ g_assert_not_reached();
}
@@ -7555,7 +7555,7 @@
FMAXMIN_A(min, max, pwx->d[0], pws->d[0], pwt->d[0], 64, status);
FMAXMIN_A(min, max, pwx->d[1], pws->d[1], pwt->d[1], 64, status);
} else {
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, GETPC());
@@ -7628,7 +7628,7 @@
} else {
- assert(0);
+ g_assert_not_reached();
}
@@ -7657,7 +7657,7 @@
FMAXMIN_A(max, min, pwx->d[0], pws->d[0], pwt->d[0], 64, status);
FMAXMIN_A(max, min, pwx->d[1], pws->d[1], pwt->d[1], 64, status);
} else {
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, GETPC());
@@ -7681,7 +7681,7 @@
pwd->d[0] = float_class_d(pws->d[0], status);
pwd->d[1] = float_class_d(pws->d[1], status);
} else {
- assert(0);
+ g_assert_not_reached();
}
}
@@ -7723,7 +7723,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, GETPC());
@@ -7753,7 +7753,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, GETPC());
@@ -7783,7 +7783,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, GETPC());
@@ -7832,7 +7832,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, GETPC());
@@ -7862,7 +7862,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, GETPC());
@@ -7892,7 +7892,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, GETPC());
@@ -7946,7 +7946,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, GETPC());
@@ -7983,7 +7983,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, GETPC());
@@ -8019,7 +8019,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, GETPC());
@@ -8046,7 +8046,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
msa_move_v(pwd, pwx);
@@ -8072,7 +8072,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
msa_move_v(pwd, pwx);
@@ -8100,7 +8100,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, GETPC());
@@ -8130,7 +8130,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, GETPC());
@@ -8166,7 +8166,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, GETPC());
@@ -8196,7 +8196,7 @@
}
break;
default:
- assert(0);
+ g_assert_not_reached();
}
check_msacsr_cause(env, GETPC());
diff --git a/target/mips/tcg/msa_translate.c b/target/mips/tcg/msa_translate.c
index 1bcdbb1..220cd3b 100644
--- a/target/mips/tcg/msa_translate.c
+++ b/target/mips/tcg/msa_translate.c
@@ -217,8 +217,6 @@
/* if some bit is non-zero then some element is zero */
tcg_gen_setcondi_i64(cond, t0, t0, 0);
tcg_gen_trunc_i64_tl(tresult, t0);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
}
static bool gen_msa_BxZ_V(DisasContext *ctx, int wt, int sa, TCGCond cond)
@@ -237,7 +235,6 @@
tcg_gen_or_i64(t0, msa_wr_d[wt << 1], msa_wr_d[(wt << 1) + 1]);
tcg_gen_setcondi_i64(cond, t0, t0, 0);
tcg_gen_trunc_i64_tl(bcond, t0);
- tcg_temp_free_i64(t0);
ctx->btarget = ctx->base.pc_next + (sa << 2) + 4;
@@ -545,8 +542,6 @@
gen_load_gpr(telm, a->ws);
gen_helper_msa_ctcmsa(cpu_env, telm, tcg_constant_i32(a->wd));
- tcg_temp_free(telm);
-
return true;
}
@@ -563,8 +558,6 @@
gen_helper_msa_cfcmsa(telm, cpu_env, tcg_constant_i32(a->ws));
gen_store_gpr(telm, a->wd);
- tcg_temp_free(telm);
-
return true;
}
@@ -782,8 +775,6 @@
gen_base_offset_addr(ctx, taddr, a->ws, a->sa << a->df);
gen_msa_ldst(cpu_env, tcg_constant_i32(a->wd), taddr);
- tcg_temp_free(taddr);
-
return true;
}
diff --git a/target/mips/tcg/mxu_translate.c b/target/mips/tcg/mxu_translate.c
index f52244e..bdd2070 100644
--- a/target/mips/tcg/mxu_translate.c
+++ b/target/mips/tcg/mxu_translate.c
@@ -513,8 +513,6 @@
} else if (XRa == 16) {
gen_store_mxu_cr(t0);
}
-
- tcg_temp_free(t0);
}
/*
@@ -537,8 +535,6 @@
}
gen_store_gpr(t0, Rb);
-
- tcg_temp_free(t0);
}
/*
@@ -613,9 +609,6 @@
}
gen_store_mxu_gpr(t0, XRa);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
/*
@@ -664,11 +657,6 @@
}
gen_store_mxu_gpr(t3, XRa);
gen_store_mxu_gpr(t2, XRd);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
- tcg_temp_free(t3);
}
/*
@@ -741,11 +729,6 @@
}
gen_store_mxu_gpr(t3, XRa);
gen_store_mxu_gpr(t2, XRd);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
- tcg_temp_free(t3);
}
/*
@@ -821,15 +804,6 @@
gen_store_mxu_gpr(t0, XRd);
gen_store_mxu_gpr(t1, XRa);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
- tcg_temp_free(t3);
- tcg_temp_free(t4);
- tcg_temp_free(t5);
- tcg_temp_free(t6);
- tcg_temp_free(t7);
}
/*
@@ -860,9 +834,6 @@
tcg_gen_qemu_ld_tl(t1, t1, ctx->mem_idx, MO_TESL ^ (sel * MO_BSWAP));
gen_store_mxu_gpr(t1, XRa);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
@@ -1101,7 +1072,7 @@
uint32_t XRx = XRb ? XRb : XRc;
/* ...and do half-word-wise max/min with one operand 0 */
TCGv_i32 t0 = tcg_temp_new();
- TCGv_i32 t1 = tcg_const_i32(0);
+ TCGv_i32 t1 = tcg_constant_i32(0);
/* the left half-word first */
tcg_gen_andi_i32(t0, mxu_gpr[XRx - 1], 0xFFFF0000);
@@ -1125,9 +1096,6 @@
tcg_gen_shri_i32(t0, t0, 16);
/* finally update the destination */
tcg_gen_or_i32(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t0);
-
- tcg_temp_free(t1);
- tcg_temp_free(t0);
} else if (unlikely(XRb == XRc)) {
/* both operands same -> just set destination to one of them */
tcg_gen_mov_i32(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1]);
@@ -1161,9 +1129,6 @@
tcg_gen_shri_i32(t0, t0, 16);
/* finally update the destination */
tcg_gen_or_i32(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t0);
-
- tcg_temp_free(t1);
- tcg_temp_free(t0);
}
}
@@ -1198,7 +1163,7 @@
uint32_t XRx = XRb ? XRb : XRc;
/* ...and do byte-wise max/min with one operand 0 */
TCGv_i32 t0 = tcg_temp_new();
- TCGv_i32 t1 = tcg_const_i32(0);
+ TCGv_i32 t1 = tcg_constant_i32(0);
int32_t i;
/* the leftmost byte (byte 3) first */
@@ -1226,9 +1191,6 @@
/* finally update the destination */
tcg_gen_or_i32(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t0);
}
-
- tcg_temp_free(t1);
- tcg_temp_free(t0);
} else if (unlikely(XRb == XRc)) {
/* both operands same -> just set destination to one of them */
tcg_gen_mov_i32(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1]);
@@ -1266,9 +1228,6 @@
/* finally update the destination */
tcg_gen_or_i32(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t0);
}
-
- tcg_temp_free(t1);
- tcg_temp_free(t0);
}
}
@@ -1384,9 +1343,6 @@
tcg_gen_shri_i32(t1, t1, 24);
tcg_gen_or_i32(mxu_gpr[XRa - 1], t0, t1);
-
- tcg_temp_free(t1);
- tcg_temp_free(t0);
}
break;
case MXU_OPTN3_PTN2:
@@ -1410,9 +1366,6 @@
tcg_gen_shri_i32(t1, t1, 16);
tcg_gen_or_i32(mxu_gpr[XRa - 1], t0, t1);
-
- tcg_temp_free(t1);
- tcg_temp_free(t0);
}
break;
case MXU_OPTN3_PTN3:
@@ -1436,9 +1389,6 @@
tcg_gen_shri_i32(t1, t1, 8);
tcg_gen_or_i32(mxu_gpr[XRa - 1], t0, t1);
-
- tcg_temp_free(t1);
- tcg_temp_free(t0);
}
break;
case MXU_OPTN3_PTN4:
@@ -1598,7 +1548,6 @@
}
gen_set_label(l_exit);
- tcg_temp_free(t_mxu_cr);
}
return true;
diff --git a/target/mips/tcg/nanomips_translate.c.inc b/target/mips/tcg/nanomips_translate.c.inc
index faf6d67..9398e28 100644
--- a/target/mips/tcg/nanomips_translate.c.inc
+++ b/target/mips/tcg/nanomips_translate.c.inc
@@ -1005,13 +1005,9 @@
tcg_gen_extr_i64_tl(tmp1, tmp2, tval);
}
gen_store_gpr(tmp1, reg1);
- tcg_temp_free(tmp1);
gen_store_gpr(tmp2, reg2);
- tcg_temp_free(tmp2);
tcg_gen_st_i64(tval, cpu_env, offsetof(CPUMIPSState, llval_wp));
- tcg_temp_free_i64(tval);
tcg_gen_st_tl(taddr, cpu_env, offsetof(CPUMIPSState, lladdr));
- tcg_temp_free(taddr);
}
static void gen_scwp(DisasContext *ctx, uint32_t base, int16_t offset,
@@ -1084,9 +1080,6 @@
/* adjust stack pointer */
gen_adjust_sp(ctx, -u);
-
- tcg_temp_free(t0);
- tcg_temp_free(va);
}
static void gen_restore(DisasContext *ctx, uint8_t rt, uint8_t count,
@@ -1110,9 +1103,6 @@
/* adjust stack pointer */
gen_adjust_sp(ctx, u);
-
- tcg_temp_free(t0);
- tcg_temp_free(va);
}
static void gen_compute_branch_nm(DisasContext *ctx, uint32_t opc,
@@ -1232,8 +1222,6 @@
if (insn_bytes == 2) {
ctx->hflags |= MIPS_HFLAG_B16;
}
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
static void gen_pool16c_nanomips_insn(DisasContext *ctx)
@@ -1358,7 +1346,6 @@
}
break;
}
- tcg_temp_free(t0);
#endif
} else {
gen_slt(ctx, OPC_SLTU, rd, rs, rt);
@@ -1381,10 +1368,6 @@
/* operands of same sign, result different sign */
tcg_gen_setcondi_tl(TCG_COND_LT, t0, t1, 0);
gen_store_gpr(t0, rd);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
}
break;
case NM_MUL:
@@ -1427,7 +1410,6 @@
gen_load_gpr(t0, rt);
gen_mtc0(ctx, t0, rs, extract32(ctx->opcode, 11, 3));
- tcg_temp_free(t0);
}
break;
case NM_D_E_MT_VPE:
@@ -1467,8 +1449,6 @@
}
break;
}
-
- tcg_temp_free(t0);
}
break;
case NM_FORK:
@@ -1480,8 +1460,6 @@
gen_load_gpr(t0, rt);
gen_load_gpr(t1, rs);
gen_helper_fork(t0, t1);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
break;
case NM_MFTR:
@@ -1508,7 +1486,6 @@
gen_load_gpr(t0, rs);
gen_helper_yield(t0, cpu_env, t0);
gen_store_gpr(t0, rt);
- tcg_temp_free(t0);
}
break;
#endif
@@ -1557,11 +1534,6 @@
gen_reserved_instruction(ctx);
break;
}
-
- tcg_temp_free_i32(t0);
-
- tcg_temp_free(v0_t);
- tcg_temp_free(v1_t);
}
@@ -1682,10 +1654,6 @@
gen_reserved_instruction(ctx);
break;
}
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(v0_t);
}
static void gen_pool32axf_2_multiply(DisasContext *ctx, uint32_t opc,
@@ -1802,8 +1770,6 @@
gen_reserved_instruction(ctx);
break;
}
-
- tcg_temp_free_i32(t0);
}
static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc,
@@ -1855,10 +1821,8 @@
tcg_gen_mul_i64(t2, t2, t3);
tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
tcg_gen_add_i64(t2, t2, t3);
- tcg_temp_free_i64(t3);
gen_move_low32(cpu_LO[acc], t2);
gen_move_high32(cpu_HI[acc], t2);
- tcg_temp_free_i64(t2);
}
break;
case NM_MULT:
@@ -1878,8 +1842,6 @@
tcg_gen_muls2_i32(t2, t3, t2, t3);
tcg_gen_ext_i32_tl(cpu_LO[acc], t2);
tcg_gen_ext_i32_tl(cpu_HI[acc], t3);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t3);
}
break;
case NM_EXTRV_W:
@@ -1915,10 +1877,8 @@
tcg_gen_mul_i64(t2, t2, t3);
tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
tcg_gen_add_i64(t2, t2, t3);
- tcg_temp_free_i64(t3);
gen_move_low32(cpu_LO[acc], t2);
gen_move_high32(cpu_HI[acc], t2);
- tcg_temp_free_i64(t2);
}
break;
case NM_MULTU:
@@ -1938,8 +1898,6 @@
tcg_gen_mulu2_i32(t2, t3, t2, t3);
tcg_gen_ext_i32_tl(cpu_LO[acc], t2);
tcg_gen_ext_i32_tl(cpu_HI[acc], t3);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t3);
}
break;
case NM_EXTRV_R_W:
@@ -1982,10 +1940,8 @@
tcg_gen_mul_i64(t2, t2, t3);
tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
tcg_gen_sub_i64(t2, t3, t2);
- tcg_temp_free_i64(t3);
gen_move_low32(cpu_LO[acc], t2);
gen_move_high32(cpu_HI[acc], t2);
- tcg_temp_free_i64(t2);
}
break;
case NM_EXTRV_RS_W:
@@ -2027,10 +1983,8 @@
tcg_gen_mul_i64(t2, t2, t3);
tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
tcg_gen_sub_i64(t2, t3, t2);
- tcg_temp_free_i64(t3);
gen_move_low32(cpu_LO[acc], t2);
gen_move_high32(cpu_HI[acc], t2);
- tcg_temp_free_i64(t2);
}
break;
case NM_EXTRV_S_H:
@@ -2045,12 +1999,6 @@
gen_reserved_instruction(ctx);
break;
}
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
-
- tcg_temp_free(v0_t);
- tcg_temp_free(v1_t);
}
static void gen_pool32axf_4_nanomips_insn(DisasContext *ctx, uint32_t opc,
@@ -2162,7 +2110,6 @@
gen_load_gpr(tv0, rt);
gen_helper_insv(v0_t, cpu_env, v0_t, tv0);
gen_store_gpr(v0_t, ret);
- tcg_temp_free(tv0);
}
break;
case NM_RADDU_W_QB:
@@ -2188,9 +2135,6 @@
gen_reserved_instruction(ctx);
break;
}
-
- tcg_temp_free(v0_t);
- tcg_temp_free(t0);
}
static void gen_pool32axf_7_nanomips_insn(DisasContext *ctx, uint32_t opc,
@@ -2243,8 +2187,6 @@
gen_reserved_instruction(ctx);
break;
}
- tcg_temp_free(t0);
- tcg_temp_free(rs_t);
}
@@ -2304,7 +2246,6 @@
gen_store_gpr(t0, rt);
/* Stop translation as we may have switched the execution mode */
ctx->base.is_jmp = DISAS_STOP;
- tcg_temp_free(t0);
}
break;
case NM_EI:
@@ -2317,7 +2258,6 @@
gen_store_gpr(t0, rt);
/* Stop translation as we may have switched the execution mode */
ctx->base.is_jmp = DISAS_STOP;
- tcg_temp_free(t0);
}
break;
case NM_RDPGPR:
@@ -2374,7 +2314,7 @@
/* Unconditional branch */
} else if (rt == 0 && imm != 0) {
/* Treat as NOP */
- goto out;
+ return;
} else {
cond = TCG_COND_EQ;
}
@@ -2384,12 +2324,12 @@
check_nms(ctx);
if (imm >= 32 && !(ctx->hflags & MIPS_HFLAG_64)) {
gen_reserved_instruction(ctx);
- goto out;
+ return;
} else if (rt == 0 && opc == NM_BBEQZC) {
/* Unconditional branch */
} else if (rt == 0 && opc == NM_BBNEZC) {
/* Treat as NOP */
- goto out;
+ return;
} else {
tcg_gen_shri_tl(t0, t0, imm);
tcg_gen_andi_tl(t0, t0, 1);
@@ -2404,7 +2344,7 @@
case NM_BNEIC:
if (rt == 0 && imm == 0) {
/* Treat as NOP */
- goto out;
+ return;
} else if (rt == 0 && imm != 0) {
/* Unconditional branch */
} else {
@@ -2434,7 +2374,7 @@
default:
MIPS_INVAL("Immediate Value Compact branch");
gen_reserved_instruction(ctx);
- goto out;
+ return;
}
/* branch completion */
@@ -2455,10 +2395,6 @@
gen_goto_tb(ctx, 0, ctx->base.pc_next + 4);
}
-
-out:
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
/* P.BALRSC type nanoMIPS R6 branches: BALRSC and BRSC */
@@ -2488,9 +2424,6 @@
/* unconditional branch to register */
tcg_gen_mov_tl(cpu_PC, btarget);
tcg_gen_lookup_and_goto_ptr();
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
/* nanoMIPS Branches */
@@ -2540,14 +2473,12 @@
gen_load_gpr(tbase, rt);
tcg_gen_movi_tl(toffset, offset);
gen_op_addr_add(ctx, btarget, tbase, toffset);
- tcg_temp_free(tbase);
- tcg_temp_free(toffset);
}
break;
default:
MIPS_INVAL("Compact branch/jump");
gen_reserved_instruction(ctx);
- goto out;
+ return;
}
if (bcond_compute == 0) {
@@ -2559,7 +2490,7 @@
default:
MIPS_INVAL("Compact branch/jump");
gen_reserved_instruction(ctx);
- goto out;
+ return;
}
} else {
/* Conditional compact branch */
@@ -2620,7 +2551,7 @@
default:
MIPS_INVAL("Compact conditional branch/jump");
gen_reserved_instruction(ctx);
- goto out;
+ return;
}
/* branch completion */
@@ -2633,10 +2564,6 @@
gen_goto_tb(ctx, 0, ctx->base.pc_next + 4);
}
-
-out:
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
@@ -2664,15 +2591,12 @@
default:
MIPS_INVAL("cp1 cond branch");
gen_reserved_instruction(ctx);
- goto out;
+ return;
}
tcg_gen_trunc_i64_tl(bcond, t0);
ctx->btarget = btarget;
-
-out:
- tcg_temp_free_i64(t0);
}
@@ -2709,7 +2633,7 @@
break;
default:
gen_reserved_instruction(ctx);
- goto out;
+ return;
}
}
gen_op_addr_add(ctx, t0, t0, t1);
@@ -2799,10 +2723,6 @@
gen_reserved_instruction(ctx);
break;
}
-
-out:
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
static void gen_pool32f_nanomips_insn(DisasContext *ctx)
@@ -3439,21 +3359,19 @@
case 0:
/* PRECR_SRA_PH_W */
{
- TCGv_i32 sa_t = tcg_const_i32(rd);
+ TCGv_i32 sa_t = tcg_constant_i32(rd);
gen_helper_precr_sra_ph_w(v1_t, sa_t, v1_t,
cpu_gpr[rt]);
gen_store_gpr(v1_t, rt);
- tcg_temp_free_i32(sa_t);
}
break;
case 1:
/* PRECR_SRA_R_PH_W */
{
- TCGv_i32 sa_t = tcg_const_i32(rd);
+ TCGv_i32 sa_t = tcg_constant_i32(rd);
gen_helper_precr_sra_r_ph_w(v1_t, sa_t, v1_t,
cpu_gpr[rt]);
gen_store_gpr(v1_t, rt);
- tcg_temp_free_i32(sa_t);
}
break;
}
@@ -3536,8 +3454,6 @@
tcg_gen_movi_tl(tv0, rd >> 3);
tcg_gen_movi_tl(tv1, imm);
gen_helper_shilo(tv0, tv1, cpu_env);
- tcg_temp_free(tv1);
- tcg_temp_free(tv0);
}
break;
case NM_MULEQ_S_W_PHL:
@@ -3652,10 +3568,6 @@
gen_reserved_instruction(ctx);
break;
}
-
- tcg_temp_free(v2_t);
- tcg_temp_free(v1_t);
- tcg_temp_free(t0);
}
static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx)
@@ -3827,7 +3739,6 @@
tcg_gen_movi_tl(t0, addr);
tcg_gen_qemu_ld_tl(cpu_gpr[rt], t0, ctx->mem_idx, MO_TESL);
- tcg_temp_free(t0);
}
break;
case NM_SWPC48:
@@ -3844,9 +3755,6 @@
gen_load_gpr(t1, rt);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
break;
default:
@@ -3908,8 +3816,6 @@
gen_load_gpr(t0, rs);
tcg_gen_setcondi_tl(TCG_COND_EQ, t0, t0, imm);
gen_store_gpr(t0, rt);
-
- tcg_temp_free(t0);
}
break;
case NM_ADDIUNEG:
@@ -3958,18 +3864,15 @@
check_nms(ctx);
if (rt != 0) {
TCGv t0 = tcg_temp_new();
- TCGv_i32 shift = tcg_const_i32(extract32(ctx->opcode, 0, 5));
- TCGv_i32 shiftx = tcg_const_i32(extract32(ctx->opcode, 7, 4)
- << 1);
- TCGv_i32 stripe = tcg_const_i32(extract32(ctx->opcode, 6, 1));
+ TCGv_i32 shift =
+ tcg_constant_i32(extract32(ctx->opcode, 0, 5));
+ TCGv_i32 shiftx =
+ tcg_constant_i32(extract32(ctx->opcode, 7, 4) << 1);
+ TCGv_i32 stripe =
+ tcg_constant_i32(extract32(ctx->opcode, 6, 1));
gen_load_gpr(t0, rs);
gen_helper_rotx(cpu_gpr[rt], t0, shift, shiftx, stripe);
- tcg_temp_free(t0);
-
- tcg_temp_free_i32(shift);
- tcg_temp_free_i32(shiftx);
- tcg_temp_free_i32(stripe);
}
break;
case NM_P_INS:
@@ -4239,8 +4142,6 @@
MO_UNALN);
break;
}
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
break;
case NM_P_LL:
@@ -4432,8 +4333,6 @@
}
counter++;
}
- tcg_temp_free(va);
- tcg_temp_free(t1);
}
break;
default:
@@ -4454,7 +4353,6 @@
gen_load_gpr(t0, rt);
tcg_gen_mov_tl(cpu_gpr[rd], t0);
gen_compute_branch_nm(ctx, OPC_BGEZAL, 4, 0, 0, s);
- tcg_temp_free(t0);
}
break;
case NM_P_BAL:
@@ -4604,9 +4502,8 @@
/* make sure instructions are on a halfword boundary */
if (ctx->base.pc_next & 0x1) {
- TCGv tmp = tcg_const_tl(ctx->base.pc_next);
+ TCGv tmp = tcg_constant_tl(ctx->base.pc_next);
tcg_gen_st_tl(tmp, cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr));
- tcg_temp_free(tmp);
generate_exception_end(ctx, EXCP_AdEL);
return 2;
}
@@ -4941,8 +4838,6 @@
gen_load_gpr(t1, rt);
tcg_gen_mov_tl(cpu_gpr[rd], t0);
tcg_gen_mov_tl(cpu_gpr[re], t1);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
break;
default:
diff --git a/target/mips/tcg/octeon_translate.c b/target/mips/tcg/octeon_translate.c
index 6a207d2..103c304 100644
--- a/target/mips/tcg/octeon_translate.c
+++ b/target/mips/tcg/octeon_translate.c
@@ -40,8 +40,6 @@
ctx->hflags |= MIPS_HFLAG_BC;
ctx->btarget = ctx->base.pc_next + 4 + a->offset * 4;
ctx->hflags |= MIPS_HFLAG_BDS32;
-
- tcg_temp_free(t0);
return true;
}
@@ -61,10 +59,6 @@
tcg_gen_add_tl(t0, t0, t1);
tcg_gen_andi_i64(cpu_gpr[a->rd], t0, 0xff);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
-
return true;
}
@@ -83,10 +77,6 @@
gen_load_gpr(t1, a->rt);
tcg_gen_mul_i64(cpu_gpr[a->rd], t0, t1);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
-
return true;
}
@@ -103,8 +93,6 @@
gen_load_gpr(t0, a->rs);
tcg_gen_sextract_tl(t0, t0, a->p, a->lenm1 + 1);
gen_store_gpr(t0, a->rt);
- tcg_temp_free(t0);
-
return true;
}
@@ -121,8 +109,6 @@
gen_load_gpr(t0, a->rs);
tcg_gen_deposit_z_tl(t0, t0, a->p, a->lenm1 + 1);
gen_store_gpr(t0, a->rt);
- tcg_temp_free(t0);
-
return true;
}
@@ -142,8 +128,6 @@
}
tcg_gen_ctpop_tl(t0, t0);
gen_store_gpr(t0, a->rd);
- tcg_temp_free(t0);
-
return true;
}
@@ -167,10 +151,6 @@
} else {
tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr[a->rd], t1, t0);
}
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
-
return true;
}
@@ -194,8 +174,5 @@
} else {
tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr[a->rt], t0, imm);
}
-
- tcg_temp_free(t0);
-
return true;
}
diff --git a/target/mips/tcg/sysemu/mips-semi.c b/target/mips/tcg/sysemu/mips-semi.c
index 85f0567..f3735df 100644
--- a/target/mips/tcg/sysemu/mips-semi.c
+++ b/target/mips/tcg/sysemu/mips-semi.c
@@ -20,7 +20,8 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "qemu/log.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/syscalls.h"
+#include "gdbstub/helpers.h"
#include "semihosting/softmmu-uaccess.h"
#include "semihosting/semihost.h"
#include "semihosting/console.h"
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
index 8cad3d1..1fb4ef7 100644
--- a/target/mips/tcg/translate.c
+++ b/target/mips/tcg/translate.c
@@ -1274,11 +1274,8 @@
tcg_gen_add_ptr(addr, cpu_env, addr);
tcg_gen_ld_tl(t0, addr, sizeof(target_ulong) * from);
- tcg_temp_free_ptr(addr);
- tcg_temp_free_i32(t2);
}
gen_store_gpr(t0, to);
- tcg_temp_free(t0);
}
static inline void gen_store_srsgpr(int from, int to)
@@ -1297,9 +1294,6 @@
tcg_gen_add_ptr(addr, cpu_env, addr);
tcg_gen_st_tl(t0, addr, sizeof(target_ulong) * to);
- tcg_temp_free_ptr(addr);
- tcg_temp_free_i32(t2);
- tcg_temp_free(t0);
}
}
@@ -1396,7 +1390,6 @@
t64 = tcg_temp_new_i64();
tcg_gen_extu_i32_i64(t64, t);
tcg_gen_deposit_i64(fpu_f64[reg], fpu_f64[reg], t64, 0, 32);
- tcg_temp_free_i64(t64);
}
static void gen_load_fpr32h(DisasContext *ctx, TCGv_i32 t, int reg)
@@ -1414,7 +1407,6 @@
TCGv_i64 t64 = tcg_temp_new_i64();
tcg_gen_extu_i32_i64(t64, t);
tcg_gen_deposit_i64(fpu_f64[reg], fpu_f64[reg], t64, 32, 32);
- tcg_temp_free_i64(t64);
} else {
gen_store_fpr32(ctx, t, reg | 1);
}
@@ -1439,7 +1431,6 @@
t0 = tcg_temp_new_i64();
tcg_gen_shri_i64(t0, t, 32);
tcg_gen_deposit_i64(fpu_f64[reg | 1], fpu_f64[reg | 1], t0, 0, 32);
- tcg_temp_free_i64(t0);
}
}
@@ -1852,8 +1843,6 @@
default: \
abort(); \
} \
- tcg_temp_free_i##bits(fp0); \
- tcg_temp_free_i##bits(fp1); \
}
FOP_CONDS(, 0, d, FMT_D, 64)
@@ -1946,8 +1935,6 @@
abort(); \
} \
STORE; \
- tcg_temp_free_i ## bits(fp0); \
- tcg_temp_free_i ## bits(fp1); \
}
FOP_CONDNS(d, FMT_D, 64, gen_store_fpr64(ctx, fp0, fd))
@@ -1967,7 +1954,6 @@
tcg_gen_qemu_##fname(ret, arg1, ctx->mem_idx); \
tcg_gen_st_tl(t0, cpu_env, offsetof(CPUMIPSState, lladdr)); \
tcg_gen_st_tl(ret, cpu_env, offsetof(CPUMIPSState, llval)); \
- tcg_temp_free(t0); \
}
#else
#define OP_LD_ATOMIC(insn, fname) \
@@ -2009,11 +1995,65 @@
return pc;
}
+/* LWL or LDL, depending on MemOp. */
+static void gen_lxl(DisasContext *ctx, TCGv reg, TCGv addr,
+ int mem_idx, MemOp mop)
+{
+ int sizem1 = memop_size(mop) - 1;
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ /*
+ * Do a byte access to possibly trigger a page
+ * fault with the unaligned address.
+ */
+ tcg_gen_qemu_ld_tl(t1, addr, mem_idx, MO_UB);
+ tcg_gen_andi_tl(t1, addr, sizem1);
+ if (!cpu_is_bigendian(ctx)) {
+ tcg_gen_xori_tl(t1, t1, sizem1);
+ }
+ tcg_gen_shli_tl(t1, t1, 3);
+ tcg_gen_andi_tl(t0, addr, ~sizem1);
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mop);
+ tcg_gen_shl_tl(t0, t0, t1);
+ tcg_gen_shl_tl(t1, tcg_constant_tl(-1), t1);
+ tcg_gen_andc_tl(t1, reg, t1);
+ tcg_gen_or_tl(reg, t0, t1);
+}
+
+/* LWR or LDR, depending on MemOp. */
+static void gen_lxr(DisasContext *ctx, TCGv reg, TCGv addr,
+ int mem_idx, MemOp mop)
+{
+ int size = memop_size(mop);
+ int sizem1 = size - 1;
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ /*
+ * Do a byte access to possibly trigger a page
+ * fault with the unaligned address.
+ */
+ tcg_gen_qemu_ld_tl(t1, addr, mem_idx, MO_UB);
+ tcg_gen_andi_tl(t1, addr, sizem1);
+ if (cpu_is_bigendian(ctx)) {
+ tcg_gen_xori_tl(t1, t1, sizem1);
+ }
+ tcg_gen_shli_tl(t1, t1, 3);
+ tcg_gen_andi_tl(t0, addr, ~sizem1);
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mop);
+ tcg_gen_shr_tl(t0, t0, t1);
+ tcg_gen_xori_tl(t1, t1, size * 8 - 1);
+ tcg_gen_shl_tl(t1, tcg_constant_tl(~1), t1);
+ tcg_gen_and_tl(t1, reg, t1);
+ tcg_gen_or_tl(reg, t0, t1);
+}
+
/* Load */
static void gen_ld(DisasContext *ctx, uint32_t opc,
int rt, int base, int offset)
{
- TCGv t0, t1, t2;
+ TCGv t0, t1;
int mem_idx = ctx->mem_idx;
if (rt == 0 && ctx->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F |
@@ -2048,65 +2088,26 @@
break;
case OPC_LDL:
t1 = tcg_temp_new();
- /*
- * Do a byte access to possibly trigger a page
- * fault with the unaligned address.
- */
- tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_UB);
- tcg_gen_andi_tl(t1, t0, 7);
- if (!cpu_is_bigendian(ctx)) {
- tcg_gen_xori_tl(t1, t1, 7);
- }
- tcg_gen_shli_tl(t1, t1, 3);
- tcg_gen_andi_tl(t0, t0, ~7);
- tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ);
- tcg_gen_shl_tl(t0, t0, t1);
- t2 = tcg_const_tl(-1);
- tcg_gen_shl_tl(t2, t2, t1);
gen_load_gpr(t1, rt);
- tcg_gen_andc_tl(t1, t1, t2);
- tcg_temp_free(t2);
- tcg_gen_or_tl(t0, t0, t1);
- tcg_temp_free(t1);
- gen_store_gpr(t0, rt);
+ gen_lxl(ctx, t1, t0, mem_idx, MO_TEUQ);
+ gen_store_gpr(t1, rt);
break;
case OPC_LDR:
t1 = tcg_temp_new();
- /*
- * Do a byte access to possibly trigger a page
- * fault with the unaligned address.
- */
- tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_UB);
- tcg_gen_andi_tl(t1, t0, 7);
- if (cpu_is_bigendian(ctx)) {
- tcg_gen_xori_tl(t1, t1, 7);
- }
- tcg_gen_shli_tl(t1, t1, 3);
- tcg_gen_andi_tl(t0, t0, ~7);
- tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ);
- tcg_gen_shr_tl(t0, t0, t1);
- tcg_gen_xori_tl(t1, t1, 63);
- t2 = tcg_const_tl(0xfffffffffffffffeull);
- tcg_gen_shl_tl(t2, t2, t1);
gen_load_gpr(t1, rt);
- tcg_gen_and_tl(t1, t1, t2);
- tcg_temp_free(t2);
- tcg_gen_or_tl(t0, t0, t1);
- tcg_temp_free(t1);
- gen_store_gpr(t0, rt);
+ gen_lxr(ctx, t1, t0, mem_idx, MO_TEUQ);
+ gen_store_gpr(t1, rt);
break;
case OPC_LDPC:
- t1 = tcg_const_tl(pc_relative_pc(ctx));
+ t1 = tcg_constant_tl(pc_relative_pc(ctx));
gen_op_addr_add(ctx, t0, t0, t1);
- tcg_temp_free(t1);
tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ);
gen_store_gpr(t0, rt);
break;
#endif
case OPC_LWPC:
- t1 = tcg_const_tl(pc_relative_pc(ctx));
+ t1 = tcg_constant_tl(pc_relative_pc(ctx));
gen_op_addr_add(ctx, t0, t0, t1);
- tcg_temp_free(t1);
tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TESL);
gen_store_gpr(t0, rt);
break;
@@ -2153,57 +2154,20 @@
/* fall through */
case OPC_LWL:
t1 = tcg_temp_new();
- /*
- * Do a byte access to possibly trigger a page
- * fault with the unaligned address.
- */
- tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_UB);
- tcg_gen_andi_tl(t1, t0, 3);
- if (!cpu_is_bigendian(ctx)) {
- tcg_gen_xori_tl(t1, t1, 3);
- }
- tcg_gen_shli_tl(t1, t1, 3);
- tcg_gen_andi_tl(t0, t0, ~3);
- tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUL);
- tcg_gen_shl_tl(t0, t0, t1);
- t2 = tcg_const_tl(-1);
- tcg_gen_shl_tl(t2, t2, t1);
gen_load_gpr(t1, rt);
- tcg_gen_andc_tl(t1, t1, t2);
- tcg_temp_free(t2);
- tcg_gen_or_tl(t0, t0, t1);
- tcg_temp_free(t1);
- tcg_gen_ext32s_tl(t0, t0);
- gen_store_gpr(t0, rt);
+ gen_lxl(ctx, t1, t0, mem_idx, MO_TEUL);
+ tcg_gen_ext32s_tl(t1, t1);
+ gen_store_gpr(t1, rt);
break;
case OPC_LWRE:
mem_idx = MIPS_HFLAG_UM;
/* fall through */
case OPC_LWR:
t1 = tcg_temp_new();
- /*
- * Do a byte access to possibly trigger a page
- * fault with the unaligned address.
- */
- tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_UB);
- tcg_gen_andi_tl(t1, t0, 3);
- if (cpu_is_bigendian(ctx)) {
- tcg_gen_xori_tl(t1, t1, 3);
- }
- tcg_gen_shli_tl(t1, t1, 3);
- tcg_gen_andi_tl(t0, t0, ~3);
- tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUL);
- tcg_gen_shr_tl(t0, t0, t1);
- tcg_gen_xori_tl(t1, t1, 31);
- t2 = tcg_const_tl(0xfffffffeull);
- tcg_gen_shl_tl(t2, t2, t1);
gen_load_gpr(t1, rt);
- tcg_gen_and_tl(t1, t1, t2);
- tcg_temp_free(t2);
- tcg_gen_or_tl(t0, t0, t1);
- tcg_temp_free(t1);
- tcg_gen_ext32s_tl(t0, t0);
- gen_store_gpr(t0, rt);
+ gen_lxr(ctx, t1, t0, mem_idx, MO_TEUL);
+ tcg_gen_ext32s_tl(t1, t1);
+ gen_store_gpr(t1, rt);
break;
case OPC_LLE:
mem_idx = MIPS_HFLAG_UM;
@@ -2214,7 +2178,6 @@
gen_store_gpr(t0, rt);
break;
}
- tcg_temp_free(t0);
}
/* Store */
@@ -2273,8 +2236,6 @@
gen_helper_0e2i(swr, t1, t0, mem_idx);
break;
}
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
@@ -2291,7 +2252,6 @@
/* compare the address against that of the preceding LL */
gen_base_offset_addr(ctx, addr, base, offset);
tcg_gen_brcond_tl(TCG_COND_EQ, addr, cpu_lladdr, l1);
- tcg_temp_free(addr);
tcg_gen_movi_tl(t0, 0);
gen_store_gpr(t0, rt);
tcg_gen_br(done);
@@ -2304,10 +2264,8 @@
eva ? MIPS_HFLAG_UM : ctx->mem_idx, tcg_mo);
tcg_gen_setcond_tl(TCG_COND_EQ, t0, t0, cpu_llval);
gen_store_gpr(t0, rt);
- tcg_temp_free(val);
gen_set_label(done);
- tcg_temp_free(t0);
}
/* Load and store */
@@ -2325,7 +2283,6 @@
tcg_gen_qemu_ld_i32(fp0, t0, ctx->mem_idx, MO_TESL |
ctx->default_tcg_memop_mask);
gen_store_fpr32(ctx, fp0, ft);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_SWC1:
@@ -2334,7 +2291,6 @@
gen_load_fpr32(ctx, fp0, ft);
tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL |
ctx->default_tcg_memop_mask);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_LDC1:
@@ -2343,7 +2299,6 @@
tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
gen_store_fpr64(ctx, fp0, ft);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_SDC1:
@@ -2352,7 +2307,6 @@
gen_load_fpr64(ctx, fp0, ft);
tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
- tcg_temp_free_i64(fp0);
}
break;
default:
@@ -2381,7 +2335,6 @@
} else {
generate_exception_err(ctx, EXCP_CpU, 1);
}
- tcg_temp_free(t0);
}
/* Arithmetic with immediate operand */
@@ -2412,15 +2365,12 @@
tcg_gen_xori_tl(t1, t1, ~uimm);
tcg_gen_xori_tl(t2, t0, uimm);
tcg_gen_and_tl(t1, t1, t2);
- tcg_temp_free(t2);
tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
- tcg_temp_free(t1);
/* operands of same sign, result different sign */
generate_exception(ctx, EXCP_OVERFLOW);
gen_set_label(l1);
tcg_gen_ext32s_tl(t0, t0);
gen_store_gpr(t0, rt);
- tcg_temp_free(t0);
}
break;
case OPC_ADDIU:
@@ -2445,14 +2395,11 @@
tcg_gen_xori_tl(t1, t1, ~uimm);
tcg_gen_xori_tl(t2, t0, uimm);
tcg_gen_and_tl(t1, t1, t2);
- tcg_temp_free(t2);
tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
- tcg_temp_free(t1);
/* operands of same sign, result different sign */
generate_exception(ctx, EXCP_OVERFLOW);
gen_set_label(l1);
gen_store_gpr(t0, rt);
- tcg_temp_free(t0);
}
break;
case OPC_DADDIU:
@@ -2535,7 +2482,6 @@
tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_gpr[rt], t0, uimm);
break;
}
- tcg_temp_free(t0);
}
/* Shifts with immediate operand */
@@ -2575,7 +2521,6 @@
tcg_gen_trunc_tl_i32(t1, t0);
tcg_gen_rotri_i32(t1, t1, uimm);
tcg_gen_ext_i32_tl(cpu_gpr[rt], t1);
- tcg_temp_free_i32(t1);
} else {
tcg_gen_ext32s_tl(cpu_gpr[rt], t0);
}
@@ -2611,7 +2556,6 @@
break;
#endif
}
- tcg_temp_free(t0);
}
/* Arithmetic */
@@ -2642,14 +2586,11 @@
tcg_gen_xor_tl(t1, t1, t2);
tcg_gen_xor_tl(t2, t0, t2);
tcg_gen_andc_tl(t1, t2, t1);
- tcg_temp_free(t2);
tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
- tcg_temp_free(t1);
/* operands of same sign, result different sign */
generate_exception(ctx, EXCP_OVERFLOW);
gen_set_label(l1);
gen_store_gpr(t0, rd);
- tcg_temp_free(t0);
}
break;
case OPC_ADDU:
@@ -2678,9 +2619,7 @@
tcg_gen_xor_tl(t2, t1, t2);
tcg_gen_xor_tl(t1, t0, t1);
tcg_gen_and_tl(t1, t1, t2);
- tcg_temp_free(t2);
tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
- tcg_temp_free(t1);
/*
* operands of different sign, first operand and the result
* of different sign
@@ -2688,7 +2627,6 @@
generate_exception(ctx, EXCP_OVERFLOW);
gen_set_label(l1);
gen_store_gpr(t0, rd);
- tcg_temp_free(t0);
}
break;
case OPC_SUBU:
@@ -2718,14 +2656,11 @@
tcg_gen_xor_tl(t1, t1, t2);
tcg_gen_xor_tl(t2, t0, t2);
tcg_gen_andc_tl(t1, t2, t1);
- tcg_temp_free(t2);
tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
- tcg_temp_free(t1);
/* operands of same sign, result different sign */
generate_exception(ctx, EXCP_OVERFLOW);
gen_set_label(l1);
gen_store_gpr(t0, rd);
- tcg_temp_free(t0);
}
break;
case OPC_DADDU:
@@ -2752,9 +2687,7 @@
tcg_gen_xor_tl(t2, t1, t2);
tcg_gen_xor_tl(t1, t0, t1);
tcg_gen_and_tl(t1, t1, t2);
- tcg_temp_free(t2);
tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
- tcg_temp_free(t1);
/*
* Operands of different sign, first operand and result different
* sign.
@@ -2762,7 +2695,6 @@
generate_exception(ctx, EXCP_OVERFLOW);
gen_set_label(l1);
gen_store_gpr(t0, rd);
- tcg_temp_free(t0);
}
break;
case OPC_DSUBU:
@@ -2801,7 +2733,7 @@
t0 = tcg_temp_new();
gen_load_gpr(t0, rt);
- t1 = tcg_const_tl(0);
+ t1 = tcg_constant_tl(0);
t2 = tcg_temp_new();
gen_load_gpr(t2, rs);
switch (opc) {
@@ -2818,9 +2750,6 @@
tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr[rd], t0, t1, t2, t1);
break;
}
- tcg_temp_free(t2);
- tcg_temp_free(t1);
- tcg_temp_free(t0);
}
/* Logic */
@@ -2899,8 +2828,6 @@
tcg_gen_setcond_tl(TCG_COND_LTU, cpu_gpr[rd], t0, t1);
break;
}
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
/* Shifts */
@@ -2947,8 +2874,6 @@
tcg_gen_andi_i32(t2, t2, 0x1f);
tcg_gen_rotr_i32(t2, t3, t2);
tcg_gen_ext_i32_tl(cpu_gpr[rd], t2);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t3);
}
break;
#if defined(TARGET_MIPS64)
@@ -2970,8 +2895,6 @@
break;
#endif
}
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
/* Arithmetic on HI/LO registers */
@@ -3041,10 +2964,9 @@
static inline void gen_r6_ld(target_long addr, int reg, int memidx,
MemOp memop)
{
- TCGv t0 = tcg_const_tl(addr);
- tcg_gen_qemu_ld_tl(t0, t0, memidx, memop);
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_qemu_ld_tl(t0, tcg_constant_tl(addr), memidx, memop);
gen_store_gpr(t0, reg);
- tcg_temp_free(t0);
}
static inline void gen_pcrel(DisasContext *ctx, int opc, target_ulong pc,
@@ -3141,8 +3063,6 @@
tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
tcg_gen_div_tl(cpu_gpr[rd], t0, t1);
tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
- tcg_temp_free(t3);
- tcg_temp_free(t2);
}
break;
case R6_OPC_MOD:
@@ -3160,34 +3080,28 @@
tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
tcg_gen_rem_tl(cpu_gpr[rd], t0, t1);
tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
- tcg_temp_free(t3);
- tcg_temp_free(t2);
}
break;
case R6_OPC_DIVU:
{
- TCGv t2 = tcg_const_tl(0);
- TCGv t3 = tcg_const_tl(1);
+ TCGv t2 = tcg_constant_tl(0);
+ TCGv t3 = tcg_constant_tl(1);
tcg_gen_ext32u_tl(t0, t0);
tcg_gen_ext32u_tl(t1, t1);
tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
tcg_gen_divu_tl(cpu_gpr[rd], t0, t1);
tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
- tcg_temp_free(t3);
- tcg_temp_free(t2);
}
break;
case R6_OPC_MODU:
{
- TCGv t2 = tcg_const_tl(0);
- TCGv t3 = tcg_const_tl(1);
+ TCGv t2 = tcg_constant_tl(0);
+ TCGv t3 = tcg_constant_tl(1);
tcg_gen_ext32u_tl(t0, t0);
tcg_gen_ext32u_tl(t1, t1);
tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
tcg_gen_remu_tl(cpu_gpr[rd], t0, t1);
tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
- tcg_temp_free(t3);
- tcg_temp_free(t2);
}
break;
case R6_OPC_MUL:
@@ -3198,8 +3112,6 @@
tcg_gen_trunc_tl_i32(t3, t1);
tcg_gen_mul_i32(t2, t2, t3);
tcg_gen_ext_i32_tl(cpu_gpr[rd], t2);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t3);
}
break;
case R6_OPC_MUH:
@@ -3210,8 +3122,6 @@
tcg_gen_trunc_tl_i32(t3, t1);
tcg_gen_muls2_i32(t2, t3, t2, t3);
tcg_gen_ext_i32_tl(cpu_gpr[rd], t3);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t3);
}
break;
case R6_OPC_MULU:
@@ -3222,8 +3132,6 @@
tcg_gen_trunc_tl_i32(t3, t1);
tcg_gen_mul_i32(t2, t2, t3);
tcg_gen_ext_i32_tl(cpu_gpr[rd], t2);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t3);
}
break;
case R6_OPC_MUHU:
@@ -3234,8 +3142,6 @@
tcg_gen_trunc_tl_i32(t3, t1);
tcg_gen_mulu2_i32(t2, t3, t2, t3);
tcg_gen_ext_i32_tl(cpu_gpr[rd], t3);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t3);
}
break;
#if defined(TARGET_MIPS64)
@@ -3251,8 +3157,6 @@
tcg_gen_movi_tl(t3, 0);
tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
tcg_gen_div_tl(cpu_gpr[rd], t0, t1);
- tcg_temp_free(t3);
- tcg_temp_free(t2);
}
break;
case R6_OPC_DMOD:
@@ -3267,28 +3171,22 @@
tcg_gen_movi_tl(t3, 0);
tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
tcg_gen_rem_tl(cpu_gpr[rd], t0, t1);
- tcg_temp_free(t3);
- tcg_temp_free(t2);
}
break;
case R6_OPC_DDIVU:
{
- TCGv t2 = tcg_const_tl(0);
- TCGv t3 = tcg_const_tl(1);
+ TCGv t2 = tcg_constant_tl(0);
+ TCGv t3 = tcg_constant_tl(1);
tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
tcg_gen_divu_i64(cpu_gpr[rd], t0, t1);
- tcg_temp_free(t3);
- tcg_temp_free(t2);
}
break;
case R6_OPC_DMODU:
{
- TCGv t2 = tcg_const_tl(0);
- TCGv t3 = tcg_const_tl(1);
+ TCGv t2 = tcg_constant_tl(0);
+ TCGv t3 = tcg_constant_tl(1);
tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
tcg_gen_remu_i64(cpu_gpr[rd], t0, t1);
- tcg_temp_free(t3);
- tcg_temp_free(t2);
}
break;
case R6_OPC_DMUL:
@@ -3298,7 +3196,6 @@
{
TCGv t2 = tcg_temp_new();
tcg_gen_muls2_i64(t2, cpu_gpr[rd], t0, t1);
- tcg_temp_free(t2);
}
break;
case R6_OPC_DMULU:
@@ -3308,18 +3205,14 @@
{
TCGv t2 = tcg_temp_new();
tcg_gen_mulu2_i64(t2, cpu_gpr[rd], t0, t1);
- tcg_temp_free(t2);
}
break;
#endif
default:
MIPS_INVAL("r6 mul/div");
gen_reserved_instruction(ctx);
- goto out;
+ break;
}
- out:
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
#if defined(TARGET_MIPS64)
@@ -3351,14 +3244,12 @@
tcg_gen_rem_tl(cpu_HI[1], t0, t1);
tcg_gen_ext32s_tl(cpu_LO[1], cpu_LO[1]);
tcg_gen_ext32s_tl(cpu_HI[1], cpu_HI[1]);
- tcg_temp_free(t3);
- tcg_temp_free(t2);
}
break;
case MMI_OPC_DIVU1:
{
- TCGv t2 = tcg_const_tl(0);
- TCGv t3 = tcg_const_tl(1);
+ TCGv t2 = tcg_constant_tl(0);
+ TCGv t3 = tcg_constant_tl(1);
tcg_gen_ext32u_tl(t0, t0);
tcg_gen_ext32u_tl(t1, t1);
tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
@@ -3366,18 +3257,13 @@
tcg_gen_remu_tl(cpu_HI[1], t0, t1);
tcg_gen_ext32s_tl(cpu_LO[1], cpu_LO[1]);
tcg_gen_ext32s_tl(cpu_HI[1], cpu_HI[1]);
- tcg_temp_free(t3);
- tcg_temp_free(t2);
}
break;
default:
MIPS_INVAL("div1 TX79");
gen_reserved_instruction(ctx);
- goto out;
+ break;
}
- out:
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
#endif
@@ -3414,14 +3300,12 @@
tcg_gen_rem_tl(cpu_HI[acc], t0, t1);
tcg_gen_ext32s_tl(cpu_LO[acc], cpu_LO[acc]);
tcg_gen_ext32s_tl(cpu_HI[acc], cpu_HI[acc]);
- tcg_temp_free(t3);
- tcg_temp_free(t2);
}
break;
case OPC_DIVU:
{
- TCGv t2 = tcg_const_tl(0);
- TCGv t3 = tcg_const_tl(1);
+ TCGv t2 = tcg_constant_tl(0);
+ TCGv t3 = tcg_constant_tl(1);
tcg_gen_ext32u_tl(t0, t0);
tcg_gen_ext32u_tl(t1, t1);
tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
@@ -3429,8 +3313,6 @@
tcg_gen_remu_tl(cpu_HI[acc], t0, t1);
tcg_gen_ext32s_tl(cpu_LO[acc], cpu_LO[acc]);
tcg_gen_ext32s_tl(cpu_HI[acc], cpu_HI[acc]);
- tcg_temp_free(t3);
- tcg_temp_free(t2);
}
break;
case OPC_MULT:
@@ -3442,8 +3324,6 @@
tcg_gen_muls2_i32(t2, t3, t2, t3);
tcg_gen_ext_i32_tl(cpu_LO[acc], t2);
tcg_gen_ext_i32_tl(cpu_HI[acc], t3);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t3);
}
break;
case OPC_MULTU:
@@ -3455,8 +3335,6 @@
tcg_gen_mulu2_i32(t2, t3, t2, t3);
tcg_gen_ext_i32_tl(cpu_LO[acc], t2);
tcg_gen_ext_i32_tl(cpu_HI[acc], t3);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t3);
}
break;
#if defined(TARGET_MIPS64)
@@ -3473,19 +3351,15 @@
tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
tcg_gen_div_tl(cpu_LO[acc], t0, t1);
tcg_gen_rem_tl(cpu_HI[acc], t0, t1);
- tcg_temp_free(t3);
- tcg_temp_free(t2);
}
break;
case OPC_DDIVU:
{
- TCGv t2 = tcg_const_tl(0);
- TCGv t3 = tcg_const_tl(1);
+ TCGv t2 = tcg_constant_tl(0);
+ TCGv t3 = tcg_constant_tl(1);
tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
tcg_gen_divu_i64(cpu_LO[acc], t0, t1);
tcg_gen_remu_i64(cpu_HI[acc], t0, t1);
- tcg_temp_free(t3);
- tcg_temp_free(t2);
}
break;
case OPC_DMULT:
@@ -3505,10 +3379,8 @@
tcg_gen_mul_i64(t2, t2, t3);
tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
tcg_gen_add_i64(t2, t2, t3);
- tcg_temp_free_i64(t3);
gen_move_low32(cpu_LO[acc], t2);
gen_move_high32(cpu_HI[acc], t2);
- tcg_temp_free_i64(t2);
}
break;
case OPC_MADDU:
@@ -3523,10 +3395,8 @@
tcg_gen_mul_i64(t2, t2, t3);
tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
tcg_gen_add_i64(t2, t2, t3);
- tcg_temp_free_i64(t3);
gen_move_low32(cpu_LO[acc], t2);
gen_move_high32(cpu_HI[acc], t2);
- tcg_temp_free_i64(t2);
}
break;
case OPC_MSUB:
@@ -3539,10 +3409,8 @@
tcg_gen_mul_i64(t2, t2, t3);
tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
tcg_gen_sub_i64(t2, t3, t2);
- tcg_temp_free_i64(t3);
gen_move_low32(cpu_LO[acc], t2);
gen_move_high32(cpu_HI[acc], t2);
- tcg_temp_free_i64(t2);
}
break;
case OPC_MSUBU:
@@ -3557,20 +3425,15 @@
tcg_gen_mul_i64(t2, t2, t3);
tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
tcg_gen_sub_i64(t2, t3, t2);
- tcg_temp_free_i64(t3);
gen_move_low32(cpu_LO[acc], t2);
gen_move_high32(cpu_HI[acc], t2);
- tcg_temp_free_i64(t2);
}
break;
default:
MIPS_INVAL("mul/div");
gen_reserved_instruction(ctx);
- goto out;
+ break;
}
- out:
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
/*
@@ -3625,8 +3488,6 @@
}
tcg_gen_ext_i32_tl(cpu_LO[acc], t2);
tcg_gen_ext_i32_tl(cpu_HI[acc], t3);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t3);
}
break;
case MMI_OPC_MULTU1:
@@ -3644,8 +3505,6 @@
}
tcg_gen_ext_i32_tl(cpu_LO[acc], t2);
tcg_gen_ext_i32_tl(cpu_HI[acc], t3);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t3);
}
break;
case MMI_OPC_MADD1:
@@ -3661,13 +3520,11 @@
tcg_gen_mul_i64(t2, t2, t3);
tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
tcg_gen_add_i64(t2, t2, t3);
- tcg_temp_free_i64(t3);
gen_move_low32(cpu_LO[acc], t2);
gen_move_high32(cpu_HI[acc], t2);
if (rd) {
gen_move_low32(cpu_gpr[rd], t2);
}
- tcg_temp_free_i64(t2);
}
break;
case MMI_OPC_MADDU1:
@@ -3685,24 +3542,18 @@
tcg_gen_mul_i64(t2, t2, t3);
tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
tcg_gen_add_i64(t2, t2, t3);
- tcg_temp_free_i64(t3);
gen_move_low32(cpu_LO[acc], t2);
gen_move_high32(cpu_HI[acc], t2);
if (rd) {
gen_move_low32(cpu_gpr[rd], t2);
}
- tcg_temp_free_i64(t2);
}
break;
default:
MIPS_INVAL("mul/madd TXx9");
gen_reserved_instruction(ctx);
- goto out;
+ break;
}
-
- out:
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
static void gen_cl(DisasContext *ctx, uint32_t opc,
@@ -3924,9 +3775,6 @@
break;
#endif
}
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
/* Loongson multimedia instructions */
@@ -4221,7 +4069,6 @@
tcg_gen_xor_i64(t1, t1, t2);
tcg_gen_xor_i64(t2, t2, t0);
tcg_gen_andc_i64(t1, t2, t1);
- tcg_temp_free_i64(t2);
tcg_gen_brcondi_i64(TCG_COND_GE, t1, 0, lab);
generate_exception(ctx, EXCP_OVERFLOW);
gen_set_label(lab);
@@ -4242,7 +4089,6 @@
tcg_gen_xor_i64(t1, t1, t2);
tcg_gen_xor_i64(t2, t2, t0);
tcg_gen_and_i64(t1, t1, t2);
- tcg_temp_free_i64(t2);
tcg_gen_brcondi_i64(TCG_COND_GE, t1, 0, lab);
generate_exception(ctx, EXCP_OVERFLOW);
gen_set_label(lab);
@@ -4284,12 +4130,8 @@
tcg_gen_extrl_i64_i32(t32, t64);
tcg_gen_deposit_i32(fpu_fcr31, fpu_fcr31, t32,
get_fp_bit(cc), 1);
-
- tcg_temp_free_i32(t32);
- tcg_temp_free_i64(t64);
}
- goto no_rd;
- break;
+ return;
default:
MIPS_INVAL("loongson_cp2");
gen_reserved_instruction(ctx);
@@ -4297,16 +4139,12 @@
}
gen_store_fpr64(ctx, t0, rd);
-
-no_rd:
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
}
static void gen_loongson_lswc2(DisasContext *ctx, int rt,
int rs, int rd)
{
- TCGv t0, t1, t2;
+ TCGv t0, t1;
TCGv_i32 fp0;
#if defined(TARGET_MIPS64)
int lsq_rt1 = ctx->opcode & 0x1f;
@@ -4328,7 +4166,6 @@
ctx->default_tcg_memop_mask);
gen_store_gpr(t1, rt);
gen_store_gpr(t0, lsq_rt1);
- tcg_temp_free(t1);
break;
case OPC_GSLQC1:
check_cp1_enabled(ctx);
@@ -4341,7 +4178,6 @@
ctx->default_tcg_memop_mask);
gen_store_fpr64(ctx, t1, rt);
gen_store_fpr64(ctx, t0, lsq_rt1);
- tcg_temp_free(t1);
break;
case OPC_GSSQ:
t1 = tcg_temp_new();
@@ -4353,7 +4189,6 @@
gen_load_gpr(t1, lsq_rt1);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
- tcg_temp_free(t1);
break;
case OPC_GSSQC1:
check_cp1_enabled(ctx);
@@ -4366,7 +4201,6 @@
gen_load_fpr64(ctx, t1, lsq_rt1);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
- tcg_temp_free(t1);
break;
#endif
case OPC_GSSHFL:
@@ -4374,109 +4208,41 @@
case OPC_GSLWLC1:
check_cp1_enabled(ctx);
gen_base_offset_addr(ctx, t0, rs, shf_offset);
- t1 = tcg_temp_new();
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB);
- tcg_gen_andi_tl(t1, t0, 3);
- if (!cpu_is_bigendian(ctx)) {
- tcg_gen_xori_tl(t1, t1, 3);
- }
- tcg_gen_shli_tl(t1, t1, 3);
- tcg_gen_andi_tl(t0, t0, ~3);
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUL);
- tcg_gen_shl_tl(t0, t0, t1);
- t2 = tcg_const_tl(-1);
- tcg_gen_shl_tl(t2, t2, t1);
fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, rt);
+ t1 = tcg_temp_new();
tcg_gen_ext_i32_tl(t1, fp0);
- tcg_gen_andc_tl(t1, t1, t2);
- tcg_temp_free(t2);
- tcg_gen_or_tl(t0, t0, t1);
- tcg_temp_free(t1);
-#if defined(TARGET_MIPS64)
- tcg_gen_extrl_i64_i32(fp0, t0);
-#else
- tcg_gen_ext32s_tl(fp0, t0);
-#endif
+ gen_lxl(ctx, t1, t0, ctx->mem_idx, MO_TEUL);
+ tcg_gen_trunc_tl_i32(fp0, t1);
gen_store_fpr32(ctx, fp0, rt);
- tcg_temp_free_i32(fp0);
break;
case OPC_GSLWRC1:
check_cp1_enabled(ctx);
gen_base_offset_addr(ctx, t0, rs, shf_offset);
- t1 = tcg_temp_new();
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB);
- tcg_gen_andi_tl(t1, t0, 3);
- if (cpu_is_bigendian(ctx)) {
- tcg_gen_xori_tl(t1, t1, 3);
- }
- tcg_gen_shli_tl(t1, t1, 3);
- tcg_gen_andi_tl(t0, t0, ~3);
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUL);
- tcg_gen_shr_tl(t0, t0, t1);
- tcg_gen_xori_tl(t1, t1, 31);
- t2 = tcg_const_tl(0xfffffffeull);
- tcg_gen_shl_tl(t2, t2, t1);
fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, rt);
+ t1 = tcg_temp_new();
tcg_gen_ext_i32_tl(t1, fp0);
- tcg_gen_and_tl(t1, t1, t2);
- tcg_temp_free(t2);
- tcg_gen_or_tl(t0, t0, t1);
- tcg_temp_free(t1);
-#if defined(TARGET_MIPS64)
- tcg_gen_extrl_i64_i32(fp0, t0);
-#else
- tcg_gen_ext32s_tl(fp0, t0);
-#endif
+ gen_lxr(ctx, t1, t0, ctx->mem_idx, MO_TEUL);
+ tcg_gen_trunc_tl_i32(fp0, t1);
gen_store_fpr32(ctx, fp0, rt);
- tcg_temp_free_i32(fp0);
break;
#if defined(TARGET_MIPS64)
case OPC_GSLDLC1:
check_cp1_enabled(ctx);
gen_base_offset_addr(ctx, t0, rs, shf_offset);
t1 = tcg_temp_new();
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB);
- tcg_gen_andi_tl(t1, t0, 7);
- if (!cpu_is_bigendian(ctx)) {
- tcg_gen_xori_tl(t1, t1, 7);
- }
- tcg_gen_shli_tl(t1, t1, 3);
- tcg_gen_andi_tl(t0, t0, ~7);
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ);
- tcg_gen_shl_tl(t0, t0, t1);
- t2 = tcg_const_tl(-1);
- tcg_gen_shl_tl(t2, t2, t1);
gen_load_fpr64(ctx, t1, rt);
- tcg_gen_andc_tl(t1, t1, t2);
- tcg_temp_free(t2);
- tcg_gen_or_tl(t0, t0, t1);
- tcg_temp_free(t1);
- gen_store_fpr64(ctx, t0, rt);
+ gen_lxl(ctx, t1, t0, ctx->mem_idx, MO_TEUQ);
+ gen_store_fpr64(ctx, t1, rt);
break;
case OPC_GSLDRC1:
check_cp1_enabled(ctx);
gen_base_offset_addr(ctx, t0, rs, shf_offset);
t1 = tcg_temp_new();
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB);
- tcg_gen_andi_tl(t1, t0, 7);
- if (cpu_is_bigendian(ctx)) {
- tcg_gen_xori_tl(t1, t1, 7);
- }
- tcg_gen_shli_tl(t1, t1, 3);
- tcg_gen_andi_tl(t0, t0, ~7);
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ);
- tcg_gen_shr_tl(t0, t0, t1);
- tcg_gen_xori_tl(t1, t1, 63);
- t2 = tcg_const_tl(0xfffffffffffffffeull);
- tcg_gen_shl_tl(t2, t2, t1);
gen_load_fpr64(ctx, t1, rt);
- tcg_gen_and_tl(t1, t1, t2);
- tcg_temp_free(t2);
- tcg_gen_or_tl(t0, t0, t1);
- tcg_temp_free(t1);
- gen_store_fpr64(ctx, t0, rt);
+ gen_lxr(ctx, t1, t0, ctx->mem_idx, MO_TEUQ);
+ gen_store_fpr64(ctx, t1, rt);
break;
#endif
default:
@@ -4495,8 +4261,6 @@
gen_load_fpr32(ctx, fp0, rt);
tcg_gen_ext_i32_tl(t1, fp0);
gen_helper_0e2i(swl, t1, t0, ctx->mem_idx);
- tcg_temp_free_i32(fp0);
- tcg_temp_free(t1);
break;
case OPC_GSSWRC1:
check_cp1_enabled(ctx);
@@ -4506,8 +4270,6 @@
gen_load_fpr32(ctx, fp0, rt);
tcg_gen_ext_i32_tl(t1, fp0);
gen_helper_0e2i(swr, t1, t0, ctx->mem_idx);
- tcg_temp_free_i32(fp0);
- tcg_temp_free(t1);
break;
#if defined(TARGET_MIPS64)
case OPC_GSSDLC1:
@@ -4516,7 +4278,6 @@
gen_base_offset_addr(ctx, t0, rs, shf_offset);
gen_load_fpr64(ctx, t1, rt);
gen_helper_0e2i(sdl, t1, t0, ctx->mem_idx);
- tcg_temp_free(t1);
break;
case OPC_GSSDRC1:
check_cp1_enabled(ctx);
@@ -4524,7 +4285,6 @@
gen_base_offset_addr(ctx, t0, rs, shf_offset);
gen_load_fpr64(ctx, t1, rt);
gen_helper_0e2i(sdr, t1, t0, ctx->mem_idx);
- tcg_temp_free(t1);
break;
#endif
default:
@@ -4538,7 +4298,6 @@
gen_reserved_instruction(ctx);
break;
}
- tcg_temp_free(t0);
}
/* Loongson EXT LDC2/SDC2 */
@@ -4633,7 +4392,6 @@
tcg_gen_qemu_ld_i32(fp0, t0, ctx->mem_idx, MO_TESL |
ctx->default_tcg_memop_mask);
gen_store_fpr32(ctx, fp0, rt);
- tcg_temp_free_i32(fp0);
break;
#if defined(TARGET_MIPS64)
case OPC_GSLDXC1:
@@ -4650,21 +4408,18 @@
t1 = tcg_temp_new();
gen_load_gpr(t1, rt);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_SB);
- tcg_temp_free(t1);
break;
case OPC_GSSHX:
t1 = tcg_temp_new();
gen_load_gpr(t1, rt);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUW |
ctx->default_tcg_memop_mask);
- tcg_temp_free(t1);
break;
case OPC_GSSWX:
t1 = tcg_temp_new();
gen_load_gpr(t1, rt);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL |
ctx->default_tcg_memop_mask);
- tcg_temp_free(t1);
break;
#if defined(TARGET_MIPS64)
case OPC_GSSDX:
@@ -4672,7 +4427,6 @@
gen_load_gpr(t1, rt);
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
- tcg_temp_free(t1);
break;
#endif
case OPC_GSSWXC1:
@@ -4680,7 +4434,6 @@
gen_load_fpr32(ctx, fp0, rt);
tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL |
ctx->default_tcg_memop_mask);
- tcg_temp_free_i32(fp0);
break;
#if defined(TARGET_MIPS64)
case OPC_GSSDXC1:
@@ -4688,14 +4441,11 @@
gen_load_fpr64(ctx, t1, rt);
tcg_gen_qemu_st_i64(t1, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
- tcg_temp_free(t1);
break;
#endif
default:
break;
}
-
- tcg_temp_free(t0);
}
/* Traps */
@@ -4805,8 +4555,6 @@
generate_exception(ctx, EXCP_TRAP);
gen_set_label(l1);
}
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
@@ -4887,6 +4635,14 @@
break;
case OPC_J:
case OPC_JAL:
+ {
+ /* Jump to immediate */
+ int jal_mask = ctx->hflags & MIPS_HFLAG_M16 ? 0xF8000000
+ : 0xF0000000;
+ btgt = ((ctx->base.pc_next + insn_bytes) & jal_mask)
+ | (uint32_t)offset;
+ break;
+ }
case OPC_JALX:
/* Jump to immediate */
btgt = ((ctx->base.pc_next + insn_bytes) & (int32_t)0xF0000000) |
@@ -5072,8 +4828,6 @@
if (insn_bytes == 2) {
ctx->hflags |= MIPS_HFLAG_B16;
}
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
@@ -5142,13 +4896,9 @@
fail:
MIPS_INVAL("bitops");
gen_reserved_instruction(ctx);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
return;
}
gen_store_gpr(t0, rt);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
static void gen_bshfl(DisasContext *ctx, uint32_t op2, int rt, int rd)
@@ -5166,15 +4916,13 @@
case OPC_WSBH:
{
TCGv t1 = tcg_temp_new();
- TCGv t2 = tcg_const_tl(0x00FF00FF);
+ TCGv t2 = tcg_constant_tl(0x00FF00FF);
tcg_gen_shri_tl(t1, t0, 8);
tcg_gen_and_tl(t1, t1, t2);
tcg_gen_and_tl(t0, t0, t2);
tcg_gen_shli_tl(t0, t0, 8);
tcg_gen_or_tl(t0, t0, t1);
- tcg_temp_free(t2);
- tcg_temp_free(t1);
tcg_gen_ext32s_tl(cpu_gpr[rd], t0);
}
break;
@@ -5188,21 +4936,19 @@
case OPC_DSBH:
{
TCGv t1 = tcg_temp_new();
- TCGv t2 = tcg_const_tl(0x00FF00FF00FF00FFULL);
+ TCGv t2 = tcg_constant_tl(0x00FF00FF00FF00FFULL);
tcg_gen_shri_tl(t1, t0, 8);
tcg_gen_and_tl(t1, t1, t2);
tcg_gen_and_tl(t0, t0, t2);
tcg_gen_shli_tl(t0, t0, 8);
tcg_gen_or_tl(cpu_gpr[rd], t0, t1);
- tcg_temp_free(t2);
- tcg_temp_free(t1);
}
break;
case OPC_DSHD:
{
TCGv t1 = tcg_temp_new();
- TCGv t2 = tcg_const_tl(0x0000FFFF0000FFFFULL);
+ TCGv t2 = tcg_constant_tl(0x0000FFFF0000FFFFULL);
tcg_gen_shri_tl(t1, t0, 16);
tcg_gen_and_tl(t1, t1, t2);
@@ -5212,18 +4958,14 @@
tcg_gen_shri_tl(t1, t0, 32);
tcg_gen_shli_tl(t0, t0, 32);
tcg_gen_or_tl(cpu_gpr[rd], t0, t1);
- tcg_temp_free(t2);
- tcg_temp_free(t1);
}
break;
#endif
default:
MIPS_INVAL("bsfhl");
gen_reserved_instruction(ctx);
- tcg_temp_free(t0);
return;
}
- tcg_temp_free(t0);
}
static void gen_align_bits(DisasContext *ctx, int wordsz, int rd, int rs,
@@ -5262,7 +5004,6 @@
tcg_gen_concat_tl_i64(t2, t1, t0);
tcg_gen_shri_i64(t2, t2, 32 - bits);
gen_move_low32(cpu_gpr[rd], t2);
- tcg_temp_free_i64(t2);
}
break;
#if defined(TARGET_MIPS64)
@@ -5273,10 +5014,7 @@
break;
#endif
}
- tcg_temp_free(t1);
}
-
- tcg_temp_free(t0);
}
void gen_align(DisasContext *ctx, int wordsz, int rd, int rs, int rt, int bp)
@@ -5303,7 +5041,6 @@
break;
#endif
}
- tcg_temp_free(t0);
}
#ifndef CONFIG_USER_ONLY
@@ -5321,8 +5058,6 @@
tcg_gen_concat32_i64(t1, t1, t0);
#endif
tcg_gen_st_i64(t1, cpu_env, off);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t0);
}
static inline void gen_mthc0_store64(TCGv arg, target_ulong off)
@@ -5334,8 +5069,6 @@
tcg_gen_ld_i64(t1, cpu_env, off);
tcg_gen_concat32_i64(t1, t1, t0);
tcg_gen_st_i64(t1, cpu_env, off);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t0);
}
static inline void gen_mfhc0_entrylo(TCGv arg, target_ulong off)
@@ -5349,7 +5082,6 @@
tcg_gen_shri_i64(t0, t0, 32);
#endif
gen_move_low32(arg, t0);
- tcg_temp_free_i64(t0);
}
static inline void gen_mfhc0_load64(TCGv arg, target_ulong off, int shift)
@@ -5359,7 +5091,6 @@
tcg_gen_ld_i64(t0, cpu_env, off);
tcg_gen_shri_i64(t0, t0, 32 + shift);
gen_move_low32(arg, t0);
- tcg_temp_free_i64(t0);
}
static inline void gen_mfc0_load32(TCGv arg, target_ulong off)
@@ -5368,7 +5099,6 @@
tcg_gen_ld_i32(t0, cpu_env, off);
tcg_gen_ext_i32_tl(arg, t0);
- tcg_temp_free_i32(t0);
}
static inline void gen_mfc0_load64(TCGv arg, target_ulong off)
@@ -5383,7 +5113,6 @@
tcg_gen_trunc_tl_i32(t0, arg);
tcg_gen_st_i32(t0, cpu_env, off);
- tcg_temp_free_i32(t0);
}
#define CP0_CHECK(c) \
@@ -5705,7 +5434,6 @@
}
#endif
gen_move_low32(arg, tmp);
- tcg_temp_free_i64(tmp);
}
register_name = "EntryLo0";
break;
@@ -5763,7 +5491,6 @@
}
#endif
gen_move_low32(arg, tmp);
- tcg_temp_free_i64(tmp);
}
register_name = "EntryLo1";
break;
@@ -6292,7 +6019,6 @@
TCGv_i64 tmp = tcg_temp_new_i64();
tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUMIPSState, CP0_TagLo));
gen_move_low32(arg, tmp);
- tcg_temp_free_i64(tmp);
}
register_name = "TagLo";
break;
@@ -8733,7 +8459,7 @@
case 5:
case 6:
case 7:
- gen_helper_mftc0_configx(t0, cpu_env, tcg_const_tl(sel));
+ gen_helper_mftc0_configx(t0, cpu_env, tcg_constant_tl(sel));
break;
default:
goto die;
@@ -8813,13 +8539,11 @@
gen_load_fpr32(ctx, fp0, rt);
tcg_gen_ext_i32_tl(t0, fp0);
- tcg_temp_free_i32(fp0);
} else {
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32h(ctx, fp0, rt);
tcg_gen_ext_i32_tl(t0, fp0);
- tcg_temp_free_i32(fp0);
}
break;
case 3:
@@ -8836,11 +8560,9 @@
}
trace_mips_translate_tr("mftr", rt, u, sel, h);
gen_store_gpr(t0, rd);
- tcg_temp_free(t0);
return;
die:
- tcg_temp_free(t0);
LOG_DISAS("mftr (reg %d u %d sel %d h %d)\n", rt, u, sel, h);
gen_reserved_instruction(ctx);
}
@@ -9017,13 +8739,11 @@
tcg_gen_trunc_tl_i32(fp0, t0);
gen_store_fpr32(ctx, fp0, rd);
- tcg_temp_free_i32(fp0);
} else {
TCGv_i32 fp0 = tcg_temp_new_i32();
tcg_gen_trunc_tl_i32(fp0, t0);
gen_store_fpr32h(ctx, fp0, rd);
- tcg_temp_free_i32(fp0);
}
break;
case 3:
@@ -9041,11 +8761,9 @@
}
}
trace_mips_translate_tr("mttr", rd, u, sel, h);
- tcg_temp_free(t0);
return;
die:
- tcg_temp_free(t0);
LOG_DISAS("mttr (reg %d u %d sel %d h %d)\n", rd, u, sel, h);
gen_reserved_instruction(ctx);
}
@@ -9071,7 +8789,6 @@
gen_load_gpr(t0, rt);
gen_mtc0(ctx, t0, rd, ctx->opcode & 0x7);
- tcg_temp_free(t0);
}
opn = "mtc0";
break;
@@ -9092,7 +8809,6 @@
gen_load_gpr(t0, rt);
gen_dmtc0(ctx, t0, rd, ctx->opcode & 0x7);
- tcg_temp_free(t0);
}
opn = "dmtc0";
break;
@@ -9112,7 +8828,6 @@
TCGv t0 = tcg_temp_new();
gen_load_gpr(t0, rt);
gen_mthc0(ctx, t0, rd, ctx->opcode & 0x7);
- tcg_temp_free(t0);
}
opn = "mthc0";
break;
@@ -9246,7 +8961,7 @@
if ((ctx->insn_flags & ISA_MIPS_R6) && (ctx->hflags & MIPS_HFLAG_BMASK)) {
gen_reserved_instruction(ctx);
- goto out;
+ return;
}
if (cc != 0) {
@@ -9286,7 +9001,6 @@
tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc));
tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc + 1));
tcg_gen_nand_i32(t0, t0, t1);
- tcg_temp_free_i32(t1);
tcg_gen_andi_i32(t0, t0, 1);
tcg_gen_extu_i32_tl(bcond, t0);
}
@@ -9297,7 +9011,6 @@
tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc));
tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc + 1));
tcg_gen_or_i32(t0, t0, t1);
- tcg_temp_free_i32(t1);
tcg_gen_andi_i32(t0, t0, 1);
tcg_gen_extu_i32_tl(bcond, t0);
}
@@ -9312,7 +9025,6 @@
tcg_gen_and_i32(t0, t0, t1);
tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc + 3));
tcg_gen_nand_i32(t0, t0, t1);
- tcg_temp_free_i32(t1);
tcg_gen_andi_i32(t0, t0, 1);
tcg_gen_extu_i32_tl(bcond, t0);
}
@@ -9327,7 +9039,6 @@
tcg_gen_or_i32(t0, t0, t1);
tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc + 3));
tcg_gen_or_i32(t0, t0, t1);
- tcg_temp_free_i32(t1);
tcg_gen_andi_i32(t0, t0, 1);
tcg_gen_extu_i32_tl(bcond, t0);
}
@@ -9337,12 +9048,10 @@
default:
MIPS_INVAL("cp1 cond branch");
gen_reserved_instruction(ctx);
- goto out;
+ return;
}
ctx->btarget = btarget;
ctx->hflags |= MIPS_HFLAG_BDS32;
- out:
- tcg_temp_free_i32(t0);
}
/* R6 CP1 Branches */
@@ -9359,7 +9068,7 @@
"\n", ctx->base.pc_next);
#endif
gen_reserved_instruction(ctx);
- goto out;
+ return;
}
gen_load_fpr64(ctx, t0, ft);
@@ -9379,7 +9088,7 @@
default:
MIPS_INVAL("cp1 cond branch");
gen_reserved_instruction(ctx);
- goto out;
+ return;
}
tcg_gen_trunc_i64_tl(bcond, t0);
@@ -9394,9 +9103,6 @@
ctx->hflags |= MIPS_HFLAG_BDS32;
break;
}
-
-out:
- tcg_temp_free_i64(t0);
}
/* Coprocessor 1 (FPU) */
@@ -9624,7 +9330,6 @@
gen_load_fpr32(ctx, fp0, fs);
tcg_gen_ext_i32_tl(t0, fp0);
- tcg_temp_free_i32(fp0);
}
gen_store_gpr(t0, rt);
break;
@@ -9635,7 +9340,6 @@
tcg_gen_trunc_tl_i32(fp0, t0);
gen_store_fpr32(ctx, fp0, fs);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_CFC1:
@@ -9665,7 +9369,6 @@
gen_load_fpr32h(ctx, fp0, fs);
tcg_gen_ext_i32_tl(t0, fp0);
- tcg_temp_free_i32(fp0);
}
gen_store_gpr(t0, rt);
break;
@@ -9676,17 +9379,13 @@
tcg_gen_trunc_tl_i32(fp0, t0);
gen_store_fpr32h(ctx, fp0, fs);
- tcg_temp_free_i32(fp0);
}
break;
default:
MIPS_INVAL("cp1 move");
gen_reserved_instruction(ctx);
- goto out;
+ return;
}
-
- out:
- tcg_temp_free(t0);
}
static void gen_movci(DisasContext *ctx, int rd, int rs, int cc, int tf)
@@ -9710,7 +9409,6 @@
t0 = tcg_temp_new_i32();
tcg_gen_andi_i32(t0, fpu_fcr31, 1 << get_fp_bit(cc));
tcg_gen_brcondi_i32(cond, t0, 0, l1);
- tcg_temp_free_i32(t0);
gen_load_gpr(cpu_gpr[rd], rs);
gen_set_label(l1);
}
@@ -9733,7 +9431,6 @@
gen_load_fpr32(ctx, t0, fs);
gen_store_fpr32(ctx, t0, fd);
gen_set_label(l1);
- tcg_temp_free_i32(t0);
}
static inline void gen_movcf_d(DisasContext *ctx, int fs, int fd, int cc,
@@ -9752,11 +9449,9 @@
tcg_gen_andi_i32(t0, fpu_fcr31, 1 << get_fp_bit(cc));
tcg_gen_brcondi_i32(cond, t0, 0, l1);
- tcg_temp_free_i32(t0);
fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
gen_set_label(l1);
}
@@ -9784,14 +9479,13 @@
tcg_gen_brcondi_i32(cond, t0, 0, l2);
gen_load_fpr32h(ctx, t0, fs);
gen_store_fpr32h(ctx, t0, fd);
- tcg_temp_free_i32(t0);
gen_set_label(l2);
}
static void gen_sel_s(DisasContext *ctx, enum fopcode op1, int fd, int ft,
int fs)
{
- TCGv_i32 t1 = tcg_const_i32(0);
+ TCGv_i32 t1 = tcg_constant_i32(0);
TCGv_i32 fp0 = tcg_temp_new_i32();
TCGv_i32 fp1 = tcg_temp_new_i32();
TCGv_i32 fp2 = tcg_temp_new_i32();
@@ -9819,16 +9513,12 @@
}
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp2);
- tcg_temp_free_i32(fp1);
- tcg_temp_free_i32(fp0);
- tcg_temp_free_i32(t1);
}
static void gen_sel_d(DisasContext *ctx, enum fopcode op1, int fd, int ft,
int fs)
{
- TCGv_i64 t1 = tcg_const_i64(0);
+ TCGv_i64 t1 = tcg_constant_i64(0);
TCGv_i64 fp0 = tcg_temp_new_i64();
TCGv_i64 fp1 = tcg_temp_new_i64();
TCGv_i64 fp2 = tcg_temp_new_i64();
@@ -9856,10 +9546,6 @@
}
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp2);
- tcg_temp_free_i64(fp1);
- tcg_temp_free_i64(fp0);
- tcg_temp_free_i64(t1);
}
static void gen_farith(DisasContext *ctx, enum fopcode op1,
@@ -9875,9 +9561,7 @@
gen_load_fpr32(ctx, fp0, fs);
gen_load_fpr32(ctx, fp1, ft);
gen_helper_float_add_s(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i32(fp1);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_SUB_S:
@@ -9888,9 +9572,7 @@
gen_load_fpr32(ctx, fp0, fs);
gen_load_fpr32(ctx, fp1, ft);
gen_helper_float_sub_s(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i32(fp1);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_MUL_S:
@@ -9901,9 +9583,7 @@
gen_load_fpr32(ctx, fp0, fs);
gen_load_fpr32(ctx, fp1, ft);
gen_helper_float_mul_s(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i32(fp1);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_DIV_S:
@@ -9914,9 +9594,7 @@
gen_load_fpr32(ctx, fp0, fs);
gen_load_fpr32(ctx, fp1, ft);
gen_helper_float_div_s(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i32(fp1);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_SQRT_S:
@@ -9926,7 +9604,6 @@
gen_load_fpr32(ctx, fp0, fs);
gen_helper_float_sqrt_s(fp0, cpu_env, fp0);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_ABS_S:
@@ -9940,7 +9617,6 @@
gen_helper_float_abs_s(fp0, fp0);
}
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_MOV_S:
@@ -9949,7 +9625,6 @@
gen_load_fpr32(ctx, fp0, fs);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_NEG_S:
@@ -9963,7 +9638,6 @@
gen_helper_float_chs_s(fp0, fp0);
}
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_ROUND_L_S:
@@ -9978,9 +9652,7 @@
} else {
gen_helper_float_round_l_s(fp64, cpu_env, fp32);
}
- tcg_temp_free_i32(fp32);
gen_store_fpr64(ctx, fp64, fd);
- tcg_temp_free_i64(fp64);
}
break;
case OPC_TRUNC_L_S:
@@ -9995,9 +9667,7 @@
} else {
gen_helper_float_trunc_l_s(fp64, cpu_env, fp32);
}
- tcg_temp_free_i32(fp32);
gen_store_fpr64(ctx, fp64, fd);
- tcg_temp_free_i64(fp64);
}
break;
case OPC_CEIL_L_S:
@@ -10012,9 +9682,7 @@
} else {
gen_helper_float_ceil_l_s(fp64, cpu_env, fp32);
}
- tcg_temp_free_i32(fp32);
gen_store_fpr64(ctx, fp64, fd);
- tcg_temp_free_i64(fp64);
}
break;
case OPC_FLOOR_L_S:
@@ -10029,9 +9697,7 @@
} else {
gen_helper_float_floor_l_s(fp64, cpu_env, fp32);
}
- tcg_temp_free_i32(fp32);
gen_store_fpr64(ctx, fp64, fd);
- tcg_temp_free_i64(fp64);
}
break;
case OPC_ROUND_W_S:
@@ -10045,7 +9711,6 @@
gen_helper_float_round_w_s(fp0, cpu_env, fp0);
}
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_TRUNC_W_S:
@@ -10059,7 +9724,6 @@
gen_helper_float_trunc_w_s(fp0, cpu_env, fp0);
}
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_CEIL_W_S:
@@ -10073,7 +9737,6 @@
gen_helper_float_ceil_w_s(fp0, cpu_env, fp0);
}
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_FLOOR_W_S:
@@ -10087,7 +9750,6 @@
gen_helper_float_floor_w_s(fp0, cpu_env, fp0);
}
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_SEL_S:
@@ -10118,7 +9780,6 @@
fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, fs);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
gen_set_label(l1);
}
break;
@@ -10133,7 +9794,6 @@
fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, fs);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
gen_set_label(l1);
}
}
@@ -10145,7 +9805,6 @@
gen_load_fpr32(ctx, fp0, fs);
gen_helper_float_recip_s(fp0, cpu_env, fp0);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_RSQRT_S:
@@ -10155,7 +9814,6 @@
gen_load_fpr32(ctx, fp0, fs);
gen_helper_float_rsqrt_s(fp0, cpu_env, fp0);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_MADDF_S:
@@ -10169,9 +9827,6 @@
gen_load_fpr32(ctx, fp2, fd);
gen_helper_float_maddf_s(fp2, cpu_env, fp0, fp1, fp2);
gen_store_fpr32(ctx, fp2, fd);
- tcg_temp_free_i32(fp2);
- tcg_temp_free_i32(fp1);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_MSUBF_S:
@@ -10185,9 +9840,6 @@
gen_load_fpr32(ctx, fp2, fd);
gen_helper_float_msubf_s(fp2, cpu_env, fp0, fp1, fp2);
gen_store_fpr32(ctx, fp2, fd);
- tcg_temp_free_i32(fp2);
- tcg_temp_free_i32(fp1);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_RINT_S:
@@ -10197,7 +9849,6 @@
gen_load_fpr32(ctx, fp0, fs);
gen_helper_float_rint_s(fp0, cpu_env, fp0);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_CLASS_S:
@@ -10207,7 +9858,6 @@
gen_load_fpr32(ctx, fp0, fs);
gen_helper_float_class_s(fp0, cpu_env, fp0);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_MIN_S: /* OPC_RECIP2_S */
@@ -10220,9 +9870,6 @@
gen_load_fpr32(ctx, fp1, ft);
gen_helper_float_min_s(fp2, cpu_env, fp0, fp1);
gen_store_fpr32(ctx, fp2, fd);
- tcg_temp_free_i32(fp2);
- tcg_temp_free_i32(fp1);
- tcg_temp_free_i32(fp0);
} else {
/* OPC_RECIP2_S */
check_cp1_64bitmode(ctx);
@@ -10233,9 +9880,7 @@
gen_load_fpr32(ctx, fp0, fs);
gen_load_fpr32(ctx, fp1, ft);
gen_helper_float_recip2_s(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i32(fp1);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
}
break;
@@ -10249,9 +9894,6 @@
gen_load_fpr32(ctx, fp1, ft);
gen_helper_float_mina_s(fp2, cpu_env, fp0, fp1);
gen_store_fpr32(ctx, fp2, fd);
- tcg_temp_free_i32(fp2);
- tcg_temp_free_i32(fp1);
- tcg_temp_free_i32(fp0);
} else {
/* OPC_RECIP1_S */
check_cp1_64bitmode(ctx);
@@ -10261,7 +9903,6 @@
gen_load_fpr32(ctx, fp0, fs);
gen_helper_float_recip1_s(fp0, cpu_env, fp0);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
}
break;
@@ -10274,8 +9915,6 @@
gen_load_fpr32(ctx, fp1, ft);
gen_helper_float_max_s(fp1, cpu_env, fp0, fp1);
gen_store_fpr32(ctx, fp1, fd);
- tcg_temp_free_i32(fp1);
- tcg_temp_free_i32(fp0);
} else {
/* OPC_RSQRT1_S */
check_cp1_64bitmode(ctx);
@@ -10285,7 +9924,6 @@
gen_load_fpr32(ctx, fp0, fs);
gen_helper_float_rsqrt1_s(fp0, cpu_env, fp0);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
}
break;
@@ -10298,8 +9936,6 @@
gen_load_fpr32(ctx, fp1, ft);
gen_helper_float_maxa_s(fp1, cpu_env, fp0, fp1);
gen_store_fpr32(ctx, fp1, fd);
- tcg_temp_free_i32(fp1);
- tcg_temp_free_i32(fp0);
} else {
/* OPC_RSQRT2_S */
check_cp1_64bitmode(ctx);
@@ -10310,9 +9946,7 @@
gen_load_fpr32(ctx, fp0, fs);
gen_load_fpr32(ctx, fp1, ft);
gen_helper_float_rsqrt2_s(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i32(fp1);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
}
break;
@@ -10324,9 +9958,7 @@
gen_load_fpr32(ctx, fp32, fs);
gen_helper_float_cvtd_s(fp64, cpu_env, fp32);
- tcg_temp_free_i32(fp32);
gen_store_fpr64(ctx, fp64, fd);
- tcg_temp_free_i64(fp64);
}
break;
case OPC_CVT_W_S:
@@ -10340,7 +9972,6 @@
gen_helper_float_cvt_w_s(fp0, cpu_env, fp0);
}
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_CVT_L_S:
@@ -10355,9 +9986,7 @@
} else {
gen_helper_float_cvt_l_s(fp64, cpu_env, fp32);
}
- tcg_temp_free_i32(fp32);
gen_store_fpr64(ctx, fp64, fd);
- tcg_temp_free_i64(fp64);
}
break;
case OPC_CVT_PS_S:
@@ -10370,10 +9999,7 @@
gen_load_fpr32(ctx, fp32_0, fs);
gen_load_fpr32(ctx, fp32_1, ft);
tcg_gen_concat_i32_i64(fp64, fp32_1, fp32_0);
- tcg_temp_free_i32(fp32_1);
- tcg_temp_free_i32(fp32_0);
gen_store_fpr64(ctx, fp64, fd);
- tcg_temp_free_i64(fp64);
}
break;
case OPC_CMP_F_S:
@@ -10408,9 +10034,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_helper_float_add_d(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_SUB_D:
@@ -10422,9 +10046,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_helper_float_sub_d(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_MUL_D:
@@ -10436,9 +10058,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_helper_float_mul_d(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_DIV_D:
@@ -10450,9 +10070,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_helper_float_div_d(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_SQRT_D:
@@ -10463,7 +10081,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_helper_float_sqrt_d(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_ABS_D:
@@ -10478,7 +10095,6 @@
gen_helper_float_abs_d(fp0, fp0);
}
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_MOV_D:
@@ -10488,7 +10104,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_NEG_D:
@@ -10503,7 +10118,6 @@
gen_helper_float_chs_d(fp0, fp0);
}
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_ROUND_L_D:
@@ -10518,7 +10132,6 @@
gen_helper_float_round_l_d(fp0, cpu_env, fp0);
}
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_TRUNC_L_D:
@@ -10533,7 +10146,6 @@
gen_helper_float_trunc_l_d(fp0, cpu_env, fp0);
}
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_CEIL_L_D:
@@ -10548,7 +10160,6 @@
gen_helper_float_ceil_l_d(fp0, cpu_env, fp0);
}
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_FLOOR_L_D:
@@ -10563,7 +10174,6 @@
gen_helper_float_floor_l_d(fp0, cpu_env, fp0);
}
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_ROUND_W_D:
@@ -10578,9 +10188,7 @@
} else {
gen_helper_float_round_w_d(fp32, cpu_env, fp64);
}
- tcg_temp_free_i64(fp64);
gen_store_fpr32(ctx, fp32, fd);
- tcg_temp_free_i32(fp32);
}
break;
case OPC_TRUNC_W_D:
@@ -10595,9 +10203,7 @@
} else {
gen_helper_float_trunc_w_d(fp32, cpu_env, fp64);
}
- tcg_temp_free_i64(fp64);
gen_store_fpr32(ctx, fp32, fd);
- tcg_temp_free_i32(fp32);
}
break;
case OPC_CEIL_W_D:
@@ -10612,9 +10218,7 @@
} else {
gen_helper_float_ceil_w_d(fp32, cpu_env, fp64);
}
- tcg_temp_free_i64(fp64);
gen_store_fpr32(ctx, fp32, fd);
- tcg_temp_free_i32(fp32);
}
break;
case OPC_FLOOR_W_D:
@@ -10629,9 +10233,7 @@
} else {
gen_helper_float_floor_w_d(fp32, cpu_env, fp64);
}
- tcg_temp_free_i64(fp64);
gen_store_fpr32(ctx, fp32, fd);
- tcg_temp_free_i32(fp32);
}
break;
case OPC_SEL_D:
@@ -10662,7 +10264,6 @@
fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
gen_set_label(l1);
}
break;
@@ -10677,7 +10278,6 @@
fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
gen_set_label(l1);
}
}
@@ -10690,7 +10290,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_helper_float_recip_d(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_RSQRT_D:
@@ -10701,7 +10300,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_helper_float_rsqrt_d(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_MADDF_D:
@@ -10715,9 +10313,6 @@
gen_load_fpr64(ctx, fp2, fd);
gen_helper_float_maddf_d(fp2, cpu_env, fp0, fp1, fp2);
gen_store_fpr64(ctx, fp2, fd);
- tcg_temp_free_i64(fp2);
- tcg_temp_free_i64(fp1);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_MSUBF_D:
@@ -10731,9 +10326,6 @@
gen_load_fpr64(ctx, fp2, fd);
gen_helper_float_msubf_d(fp2, cpu_env, fp0, fp1, fp2);
gen_store_fpr64(ctx, fp2, fd);
- tcg_temp_free_i64(fp2);
- tcg_temp_free_i64(fp1);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_RINT_D:
@@ -10743,7 +10335,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_helper_float_rint_d(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_CLASS_D:
@@ -10753,7 +10344,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_helper_float_class_d(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_MIN_D: /* OPC_RECIP2_D */
@@ -10765,8 +10355,6 @@
gen_load_fpr64(ctx, fp1, ft);
gen_helper_float_min_d(fp1, cpu_env, fp0, fp1);
gen_store_fpr64(ctx, fp1, fd);
- tcg_temp_free_i64(fp1);
- tcg_temp_free_i64(fp0);
} else {
/* OPC_RECIP2_D */
check_cp1_64bitmode(ctx);
@@ -10777,9 +10365,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_helper_float_recip2_d(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
}
break;
@@ -10792,8 +10378,6 @@
gen_load_fpr64(ctx, fp1, ft);
gen_helper_float_mina_d(fp1, cpu_env, fp0, fp1);
gen_store_fpr64(ctx, fp1, fd);
- tcg_temp_free_i64(fp1);
- tcg_temp_free_i64(fp0);
} else {
/* OPC_RECIP1_D */
check_cp1_64bitmode(ctx);
@@ -10803,7 +10387,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_helper_float_recip1_d(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
}
break;
@@ -10816,8 +10399,6 @@
gen_load_fpr64(ctx, fp1, ft);
gen_helper_float_max_d(fp1, cpu_env, fp0, fp1);
gen_store_fpr64(ctx, fp1, fd);
- tcg_temp_free_i64(fp1);
- tcg_temp_free_i64(fp0);
} else {
/* OPC_RSQRT1_D */
check_cp1_64bitmode(ctx);
@@ -10827,7 +10408,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_helper_float_rsqrt1_d(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
}
break;
@@ -10840,8 +10420,6 @@
gen_load_fpr64(ctx, fp1, ft);
gen_helper_float_maxa_d(fp1, cpu_env, fp0, fp1);
gen_store_fpr64(ctx, fp1, fd);
- tcg_temp_free_i64(fp1);
- tcg_temp_free_i64(fp0);
} else {
/* OPC_RSQRT2_D */
check_cp1_64bitmode(ctx);
@@ -10852,9 +10430,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_helper_float_rsqrt2_d(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
}
break;
@@ -10889,9 +10465,7 @@
gen_load_fpr64(ctx, fp64, fs);
gen_helper_float_cvts_d(fp32, cpu_env, fp64);
- tcg_temp_free_i64(fp64);
gen_store_fpr32(ctx, fp32, fd);
- tcg_temp_free_i32(fp32);
}
break;
case OPC_CVT_W_D:
@@ -10906,9 +10480,7 @@
} else {
gen_helper_float_cvt_w_d(fp32, cpu_env, fp64);
}
- tcg_temp_free_i64(fp64);
gen_store_fpr32(ctx, fp32, fd);
- tcg_temp_free_i32(fp32);
}
break;
case OPC_CVT_L_D:
@@ -10923,7 +10495,6 @@
gen_helper_float_cvt_l_d(fp0, cpu_env, fp0);
}
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_CVT_S_W:
@@ -10933,7 +10504,6 @@
gen_load_fpr32(ctx, fp0, fs);
gen_helper_float_cvts_w(fp0, cpu_env, fp0);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_CVT_D_W:
@@ -10944,9 +10514,7 @@
gen_load_fpr32(ctx, fp32, fs);
gen_helper_float_cvtd_w(fp64, cpu_env, fp32);
- tcg_temp_free_i32(fp32);
gen_store_fpr64(ctx, fp64, fd);
- tcg_temp_free_i64(fp64);
}
break;
case OPC_CVT_S_L:
@@ -10957,9 +10525,7 @@
gen_load_fpr64(ctx, fp64, fs);
gen_helper_float_cvts_l(fp32, cpu_env, fp64);
- tcg_temp_free_i64(fp64);
gen_store_fpr32(ctx, fp32, fd);
- tcg_temp_free_i32(fp32);
}
break;
case OPC_CVT_D_L:
@@ -10970,7 +10536,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_helper_float_cvtd_l(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_CVT_PS_PW:
@@ -10981,7 +10546,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_helper_float_cvtps_pw(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_ADD_PS:
@@ -10993,9 +10557,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_helper_float_add_ps(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_SUB_PS:
@@ -11007,9 +10569,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_helper_float_sub_ps(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_MUL_PS:
@@ -11021,9 +10581,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_helper_float_mul_ps(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_ABS_PS:
@@ -11034,7 +10592,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_helper_float_abs_ps(fp0, fp0);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_MOV_PS:
@@ -11044,7 +10601,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_NEG_PS:
@@ -11055,7 +10611,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_helper_float_chs_ps(fp0, fp0);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_MOVCF_PS:
@@ -11074,7 +10629,6 @@
fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
gen_set_label(l1);
}
break;
@@ -11089,7 +10643,6 @@
fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
gen_set_label(l1);
}
}
@@ -11103,9 +10656,7 @@
gen_load_fpr64(ctx, fp0, ft);
gen_load_fpr64(ctx, fp1, fs);
gen_helper_float_addr_ps(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_MULR_PS:
@@ -11117,9 +10668,7 @@
gen_load_fpr64(ctx, fp0, ft);
gen_load_fpr64(ctx, fp1, fs);
gen_helper_float_mulr_ps(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_RECIP2_PS:
@@ -11131,9 +10680,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_helper_float_recip2_ps(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_RECIP1_PS:
@@ -11144,7 +10691,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_helper_float_recip1_ps(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_RSQRT1_PS:
@@ -11155,7 +10701,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_helper_float_rsqrt1_ps(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_RSQRT2_PS:
@@ -11167,9 +10712,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_helper_float_rsqrt2_ps(fp0, cpu_env, fp0, fp1);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_CVT_S_PU:
@@ -11180,7 +10723,6 @@
gen_load_fpr32h(ctx, fp0, fs);
gen_helper_float_cvts_pu(fp0, cpu_env, fp0);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_CVT_PW_PS:
@@ -11191,7 +10733,6 @@
gen_load_fpr64(ctx, fp0, fs);
gen_helper_float_cvtpw_ps(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_CVT_S_PL:
@@ -11202,7 +10743,6 @@
gen_load_fpr32(ctx, fp0, fs);
gen_helper_float_cvts_pl(fp0, cpu_env, fp0);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_PLL_PS:
@@ -11215,8 +10755,6 @@
gen_load_fpr32(ctx, fp1, ft);
gen_store_fpr32h(ctx, fp0, fd);
gen_store_fpr32(ctx, fp1, fd);
- tcg_temp_free_i32(fp0);
- tcg_temp_free_i32(fp1);
}
break;
case OPC_PLU_PS:
@@ -11229,8 +10767,6 @@
gen_load_fpr32h(ctx, fp1, ft);
gen_store_fpr32(ctx, fp1, fd);
gen_store_fpr32h(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
- tcg_temp_free_i32(fp1);
}
break;
case OPC_PUL_PS:
@@ -11243,8 +10779,6 @@
gen_load_fpr32(ctx, fp1, ft);
gen_store_fpr32(ctx, fp1, fd);
gen_store_fpr32h(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
- tcg_temp_free_i32(fp1);
}
break;
case OPC_PUU_PS:
@@ -11257,8 +10791,6 @@
gen_load_fpr32h(ctx, fp1, ft);
gen_store_fpr32(ctx, fp1, fd);
gen_store_fpr32h(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
- tcg_temp_free_i32(fp1);
}
break;
case OPC_CMP_F_PS:
@@ -11316,7 +10848,6 @@
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL);
tcg_gen_trunc_tl_i32(fp0, t0);
gen_store_fpr32(ctx, fp0, fd);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_LDXC1:
@@ -11326,7 +10857,6 @@
TCGv_i64 fp0 = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_LUXC1:
@@ -11337,7 +10867,6 @@
tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ);
gen_store_fpr64(ctx, fp0, fd);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_SWXC1:
@@ -11346,7 +10875,6 @@
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, fs);
tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL);
- tcg_temp_free_i32(fp0);
}
break;
case OPC_SDXC1:
@@ -11356,7 +10884,6 @@
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ);
- tcg_temp_free_i64(fp0);
}
break;
case OPC_SUXC1:
@@ -11366,11 +10893,9 @@
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ);
- tcg_temp_free_i64(fp0);
}
break;
}
- tcg_temp_free(t0);
}
static void gen_flt3_arith(DisasContext *ctx, uint32_t opc,
@@ -11397,7 +10922,6 @@
tcg_gen_br(l2);
gen_set_label(l1);
tcg_gen_brcondi_tl(TCG_COND_NE, t0, 4, l2);
- tcg_temp_free(t0);
if (cpu_is_bigendian(ctx)) {
gen_load_fpr32(ctx, fp, fs);
gen_load_fpr32h(ctx, fph, ft);
@@ -11410,8 +10934,6 @@
gen_store_fpr32h(ctx, fp, fd);
}
gen_set_label(l2);
- tcg_temp_free_i32(fp);
- tcg_temp_free_i32(fph);
}
break;
case OPC_MADD_S:
@@ -11425,10 +10947,7 @@
gen_load_fpr32(ctx, fp1, ft);
gen_load_fpr32(ctx, fp2, fr);
gen_helper_float_madd_s(fp2, cpu_env, fp0, fp1, fp2);
- tcg_temp_free_i32(fp0);
- tcg_temp_free_i32(fp1);
gen_store_fpr32(ctx, fp2, fd);
- tcg_temp_free_i32(fp2);
}
break;
case OPC_MADD_D:
@@ -11443,10 +10962,7 @@
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
gen_helper_float_madd_d(fp2, cpu_env, fp0, fp1, fp2);
- tcg_temp_free_i64(fp0);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp2, fd);
- tcg_temp_free_i64(fp2);
}
break;
case OPC_MADD_PS:
@@ -11460,10 +10976,7 @@
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
gen_helper_float_madd_ps(fp2, cpu_env, fp0, fp1, fp2);
- tcg_temp_free_i64(fp0);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp2, fd);
- tcg_temp_free_i64(fp2);
}
break;
case OPC_MSUB_S:
@@ -11477,10 +10990,7 @@
gen_load_fpr32(ctx, fp1, ft);
gen_load_fpr32(ctx, fp2, fr);
gen_helper_float_msub_s(fp2, cpu_env, fp0, fp1, fp2);
- tcg_temp_free_i32(fp0);
- tcg_temp_free_i32(fp1);
gen_store_fpr32(ctx, fp2, fd);
- tcg_temp_free_i32(fp2);
}
break;
case OPC_MSUB_D:
@@ -11495,10 +11005,7 @@
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
gen_helper_float_msub_d(fp2, cpu_env, fp0, fp1, fp2);
- tcg_temp_free_i64(fp0);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp2, fd);
- tcg_temp_free_i64(fp2);
}
break;
case OPC_MSUB_PS:
@@ -11512,10 +11019,7 @@
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
gen_helper_float_msub_ps(fp2, cpu_env, fp0, fp1, fp2);
- tcg_temp_free_i64(fp0);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp2, fd);
- tcg_temp_free_i64(fp2);
}
break;
case OPC_NMADD_S:
@@ -11529,10 +11033,7 @@
gen_load_fpr32(ctx, fp1, ft);
gen_load_fpr32(ctx, fp2, fr);
gen_helper_float_nmadd_s(fp2, cpu_env, fp0, fp1, fp2);
- tcg_temp_free_i32(fp0);
- tcg_temp_free_i32(fp1);
gen_store_fpr32(ctx, fp2, fd);
- tcg_temp_free_i32(fp2);
}
break;
case OPC_NMADD_D:
@@ -11547,10 +11048,7 @@
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
gen_helper_float_nmadd_d(fp2, cpu_env, fp0, fp1, fp2);
- tcg_temp_free_i64(fp0);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp2, fd);
- tcg_temp_free_i64(fp2);
}
break;
case OPC_NMADD_PS:
@@ -11564,10 +11062,7 @@
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
gen_helper_float_nmadd_ps(fp2, cpu_env, fp0, fp1, fp2);
- tcg_temp_free_i64(fp0);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp2, fd);
- tcg_temp_free_i64(fp2);
}
break;
case OPC_NMSUB_S:
@@ -11581,10 +11076,7 @@
gen_load_fpr32(ctx, fp1, ft);
gen_load_fpr32(ctx, fp2, fr);
gen_helper_float_nmsub_s(fp2, cpu_env, fp0, fp1, fp2);
- tcg_temp_free_i32(fp0);
- tcg_temp_free_i32(fp1);
gen_store_fpr32(ctx, fp2, fd);
- tcg_temp_free_i32(fp2);
}
break;
case OPC_NMSUB_D:
@@ -11599,10 +11091,7 @@
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
gen_helper_float_nmsub_d(fp2, cpu_env, fp0, fp1, fp2);
- tcg_temp_free_i64(fp0);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp2, fd);
- tcg_temp_free_i64(fp2);
}
break;
case OPC_NMSUB_PS:
@@ -11616,10 +11105,7 @@
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
gen_helper_float_nmsub_ps(fp2, cpu_env, fp0, fp1, fp2);
- tcg_temp_free_i64(fp0);
- tcg_temp_free_i64(fp1);
gen_store_fpr64(ctx, fp2, fd);
- tcg_temp_free_i64(fp2);
}
break;
default:
@@ -11708,7 +11194,6 @@
gen_reserved_instruction(ctx);
break;
}
- tcg_temp_free(t0);
}
static inline void clear_branch_hflags(DisasContext *ctx)
@@ -11767,11 +11252,9 @@
tcg_gen_andi_tl(t0, btarget, 0x1);
tcg_gen_trunc_tl_i32(t1, t0);
- tcg_temp_free(t0);
tcg_gen_andi_i32(hflags, hflags, ~(uint32_t)MIPS_HFLAG_M16);
tcg_gen_shli_i32(t1, t1, MIPS_HFLAG_M16_SHIFT);
tcg_gen_or_i32(hflags, hflags, t1);
- tcg_temp_free_i32(t1);
tcg_gen_andi_tl(cpu_PC, btarget, ~(target_ulong)0x1);
} else {
@@ -11801,7 +11284,7 @@
"\n", ctx->base.pc_next);
#endif
gen_reserved_instruction(ctx);
- goto out;
+ return;
}
/* Load needed operands and calculate btarget */
@@ -11855,13 +11338,12 @@
gen_load_gpr(tbase, rt);
gen_op_addr_add(ctx, btarget, tbase, toffset);
- tcg_temp_free(tbase);
}
break;
default:
MIPS_INVAL("Compact branch/jump");
gen_reserved_instruction(ctx);
- goto out;
+ return;
}
if (bcond_compute == 0) {
@@ -11882,7 +11364,7 @@
default:
MIPS_INVAL("Compact branch/jump");
gen_reserved_instruction(ctx);
- goto out;
+ return;
}
/* Generating branch here as compact branches don't have delay slot */
@@ -11972,10 +11454,6 @@
/* OPC_BNVC */
tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_EQ), t4, 0, fs);
}
- tcg_temp_free(input_overflow);
- tcg_temp_free(t4);
- tcg_temp_free(t3);
- tcg_temp_free(t2);
} else if (rs < rt && rs == 0) {
/* OPC_BEQZALC, OPC_BNEZALC */
if (opc == OPC_BEQZALC) {
@@ -12005,7 +11483,7 @@
default:
MIPS_INVAL("Compact conditional branch/jump");
gen_reserved_instruction(ctx);
- goto out;
+ return;
}
/* Generating branch here as compact branches don't have delay slot */
@@ -12014,10 +11492,6 @@
ctx->hflags |= MIPS_HFLAG_FBNSLOT;
}
-
-out:
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
void gen_addiupc(DisasContext *ctx, int rx, int imm,
@@ -12037,19 +11511,15 @@
if (!is_64_bit) {
tcg_gen_ext32s_tl(cpu_gpr[rx], cpu_gpr[rx]);
}
-
- tcg_temp_free(t0);
}
static void gen_cache_operation(DisasContext *ctx, uint32_t op, int base,
int16_t offset)
{
- TCGv_i32 t0 = tcg_const_i32(op);
+ TCGv_i32 t0 = tcg_constant_i32(op);
TCGv t1 = tcg_temp_new();
gen_base_offset_addr(ctx, t1, base, offset);
gen_helper_cache(cpu_env, t1, t0);
- tcg_temp_free(t1);
- tcg_temp_free_i32(t0);
}
static inline bool is_uhi(DisasContext *ctx, int sdbbp_code)
@@ -12077,9 +11547,6 @@
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL);
gen_store_gpr(t1, rd);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
static void gen_sync(int stype)
@@ -12183,7 +11650,6 @@
break;
#endif
}
- tcg_temp_free(t0);
}
static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2,
@@ -12394,19 +11860,17 @@
case OPC_PRECR_SRA_PH_W:
check_dsp_r2(ctx);
{
- TCGv_i32 sa_t = tcg_const_i32(v2);
+ TCGv_i32 sa_t = tcg_constant_i32(v2);
gen_helper_precr_sra_ph_w(cpu_gpr[ret], sa_t, v1_t,
cpu_gpr[ret]);
- tcg_temp_free_i32(sa_t);
break;
}
case OPC_PRECR_SRA_R_PH_W:
check_dsp_r2(ctx);
{
- TCGv_i32 sa_t = tcg_const_i32(v2);
+ TCGv_i32 sa_t = tcg_constant_i32(v2);
gen_helper_precr_sra_r_ph_w(cpu_gpr[ret], sa_t, v1_t,
cpu_gpr[ret]);
- tcg_temp_free_i32(sa_t);
break;
}
case OPC_PRECRQ_PH_W:
@@ -12593,17 +12057,15 @@
case OPC_PRECR_SRA_QH_PW:
check_dsp_r2(ctx);
{
- TCGv_i32 ret_t = tcg_const_i32(ret);
+ TCGv_i32 ret_t = tcg_constant_i32(ret);
gen_helper_precr_sra_qh_pw(v2_t, v1_t, v2_t, ret_t);
- tcg_temp_free_i32(ret_t);
break;
}
case OPC_PRECR_SRA_R_QH_PW:
check_dsp_r2(ctx);
{
- TCGv_i32 sa_v = tcg_const_i32(ret);
+ TCGv_i32 sa_v = tcg_constant_i32(ret);
gen_helper_precr_sra_r_qh_pw(v2_t, v1_t, v2_t, sa_v);
- tcg_temp_free_i32(sa_v);
break;
}
case OPC_PRECRQ_OB_QH:
@@ -12630,9 +12092,6 @@
break;
#endif
}
-
- tcg_temp_free(v1_t);
- tcg_temp_free(v2_t);
}
static void gen_mipsdsp_shift(DisasContext *ctx, uint32_t opc,
@@ -12872,10 +12331,6 @@
break;
#endif
}
-
- tcg_temp_free(t0);
- tcg_temp_free(v1_t);
- tcg_temp_free(v2_t);
}
static void gen_mipsdsp_multiply(DisasContext *ctx, uint32_t op1, uint32_t op2,
@@ -13182,10 +12637,6 @@
break;
#endif
}
-
- tcg_temp_free_i32(t0);
- tcg_temp_free(v1_t);
- tcg_temp_free(v2_t);
}
static void gen_mipsdsp_bitinsn(DisasContext *ctx, uint32_t op1, uint32_t op2,
@@ -13322,8 +12773,6 @@
break;
#endif
}
- tcg_temp_free(t0);
- tcg_temp_free(val_t);
}
static void gen_mipsdsp_add_cmp_pick(DisasContext *ctx,
@@ -13506,10 +12955,6 @@
break;
#endif
}
-
- tcg_temp_free(t1);
- tcg_temp_free(v1_t);
- tcg_temp_free(v2_t);
}
static void gen_mipsdsp_append(CPUMIPSState *env, DisasContext *ctx,
@@ -13597,7 +13042,6 @@
break;
#endif
}
- tcg_temp_free(t0);
}
static void gen_mipsdsp_accinsn(DisasContext *ctx, uint32_t op1, uint32_t op2,
@@ -13814,10 +13258,6 @@
break;
#endif
}
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(v1_t);
}
/* End MIPSDSP functions. */
@@ -14668,9 +14108,6 @@
gen_load_gpr(t1, rs);
gen_helper_insv(cpu_gpr[rt], cpu_env, t1, t0);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
break;
}
default: /* Invalid */
@@ -14940,9 +14377,6 @@
gen_load_gpr(t1, rs);
gen_helper_dinsv(cpu_gpr[rt], cpu_env, t1, t0);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
break;
}
default: /* Invalid */
@@ -15169,8 +14603,6 @@
gen_load_gpr(t0, rt);
gen_load_gpr(t1, rs);
gen_helper_fork(t0, t1);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
break;
case OPC_YIELD:
@@ -15181,7 +14613,6 @@
gen_load_gpr(t0, rs);
gen_helper_yield(t0, cpu_env, t0);
gen_store_gpr(t0, rd);
- tcg_temp_free(t0);
}
break;
default:
@@ -15424,7 +14855,6 @@
gen_reserved_instruction(ctx);
break;
}
- tcg_temp_free(t0);
}
#endif /* !CONFIG_USER_ONLY */
break;
@@ -15872,7 +15302,6 @@
TCGv t0 = tcg_temp_new();
gen_load_gpr(t0, rs);
tcg_gen_addi_tl(cpu_gpr[rt], t0, imm << 16);
- tcg_temp_free(t0);
}
#else
gen_reserved_instruction(ctx);
diff --git a/target/mips/tcg/translate_addr_const.c b/target/mips/tcg/translate_addr_const.c
index 96f4834..a510da4 100644
--- a/target/mips/tcg/translate_addr_const.c
+++ b/target/mips/tcg/translate_addr_const.c
@@ -30,10 +30,6 @@
tcg_gen_shli_tl(t0, t0, sa + 1);
tcg_gen_add_tl(cpu_gpr[rd], t0, t1);
tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
-
- tcg_temp_free(t1);
- tcg_temp_free(t0);
-
return true;
}
@@ -54,8 +50,5 @@
gen_load_gpr(t1, rt);
tcg_gen_shli_tl(t0, t0, sa + 1);
tcg_gen_add_tl(cpu_gpr[rd], t0, t1);
- tcg_temp_free(t1);
- tcg_temp_free(t0);
-
return true;
}
diff --git a/target/mips/tcg/tx79_translate.c b/target/mips/tcg/tx79_translate.c
index 4e479c2..3a45a1b 100644
--- a/target/mips/tcg/tx79_translate.c
+++ b/target/mips/tcg/tx79_translate.c
@@ -138,10 +138,6 @@
gen_load_gpr_hi(ax, a->rs);
gen_load_gpr_hi(bx, a->rt);
gen_logic_i64(cpu_gpr_hi[a->rd], ax, bx);
-
- tcg_temp_free(bx);
- tcg_temp_free(ax);
-
return true;
}
@@ -247,8 +243,8 @@
return true;
}
- c0 = tcg_const_tl(0);
- c1 = tcg_const_tl(0xffffffff);
+ c0 = tcg_constant_tl(0);
+ c1 = tcg_constant_tl(0xffffffff);
ax = tcg_temp_new_i64();
bx = tcg_temp_new_i64();
t0 = tcg_temp_new_i64();
@@ -273,15 +269,6 @@
tcg_gen_movcond_i64(cond, t2, t1, t0, c1, c0);
tcg_gen_deposit_i64(cpu_gpr_hi[a->rd], cpu_gpr_hi[a->rd], t2, wlen * i, wlen);
}
-
- tcg_temp_free(t2);
- tcg_temp_free(t1);
- tcg_temp_free(t0);
- tcg_temp_free(bx);
- tcg_temp_free(ax);
- tcg_temp_free(c1);
- tcg_temp_free(c0);
-
return true;
}
@@ -362,10 +349,6 @@
tcg_gen_addi_i64(addr, addr, 8);
tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, MO_TEUQ);
gen_store_gpr_hi(t0, a->rt);
-
- tcg_temp_free(t0);
- tcg_temp_free(addr);
-
return true;
}
@@ -389,10 +372,6 @@
tcg_gen_addi_i64(addr, addr, 8);
gen_load_gpr_hi(t0, a->rt);
tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, MO_TEUQ);
-
- tcg_temp_free(addr);
- tcg_temp_free(t0);
-
return true;
}
@@ -458,11 +437,6 @@
gen_load_gpr_hi(t0, a->rs); /* a1 */
tcg_gen_deposit_i64(cpu_gpr_hi[a->rd], a0, t0, 32, 32);
-
- tcg_temp_free(t0);
- tcg_temp_free(b0);
- tcg_temp_free(a0);
-
return true;
}
@@ -506,10 +480,6 @@
tcg_gen_shri_i64(bx, bx, wlen);
tcg_gen_shri_i64(ax, ax, wlen);
}
-
- tcg_temp_free(bx);
- tcg_temp_free(ax);
-
return true;
}
@@ -541,10 +511,6 @@
gen_load_gpr(ax, a->rs);
gen_load_gpr(bx, a->rt);
gen_pextw(cpu_gpr[a->rd], cpu_gpr_hi[a->rd], ax, bx);
-
- tcg_temp_free(bx);
- tcg_temp_free(ax);
-
return true;
}
@@ -564,10 +530,6 @@
gen_load_gpr_hi(ax, a->rs);
gen_load_gpr_hi(bx, a->rt);
gen_pextw(cpu_gpr[a->rd], cpu_gpr_hi[a->rd], ax, bx);
-
- tcg_temp_free(bx);
- tcg_temp_free(ax);
-
return true;
}
@@ -678,8 +640,5 @@
tcg_gen_deposit_i64(cpu_gpr[a->rd], cpu_gpr[a->rt], ax, 0, 32);
tcg_gen_rotri_i64(cpu_gpr[a->rd], cpu_gpr[a->rd], 32);
-
- tcg_temp_free(ax);
-
return true;
}
diff --git a/target/mips/tcg/vr54xx_translate.c b/target/mips/tcg/vr54xx_translate.c
index a7d241e..804672f 100644
--- a/target/mips/tcg/vr54xx_translate.c
+++ b/target/mips/tcg/vr54xx_translate.c
@@ -49,10 +49,6 @@
gen_helper_mult_acc(t0, cpu_env, t0, t1);
gen_store_gpr(t0, a->rd);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
-
return true;
}
diff --git a/target/nios2/cpu-param.h b/target/nios2/cpu-param.h
index 177d720..767bba4 100644
--- a/target/nios2/cpu-param.h
+++ b/target/nios2/cpu-param.h
@@ -16,6 +16,5 @@
#else
# define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif
-#define NB_MMU_MODES 2
#endif
diff --git a/target/nios2/cpu.c b/target/nios2/cpu.c
index cff3082..bc5cbf8 100644
--- a/target/nios2/cpu.c
+++ b/target/nios2/cpu.c
@@ -23,7 +23,7 @@
#include "qapi/error.h"
#include "cpu.h"
#include "exec/log.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
#include "hw/qdev-properties.h"
static void nios2_cpu_set_pc(CPUState *cs, vaddr value)
diff --git a/target/nios2/nios2-semi.c b/target/nios2/nios2-semi.c
index f76e858..3738774 100644
--- a/target/nios2/nios2-semi.c
+++ b/target/nios2/nios2-semi.c
@@ -23,7 +23,8 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/syscalls.h"
+#include "gdbstub/helpers.h"
#include "semihosting/syscalls.h"
#include "semihosting/softmmu-uaccess.h"
#include "qemu/log.h"
diff --git a/target/openrisc/cpu-param.h b/target/openrisc/cpu-param.h
index 73be699..3f08207 100644
--- a/target/openrisc/cpu-param.h
+++ b/target/openrisc/cpu-param.h
@@ -12,6 +12,5 @@
#define TARGET_PAGE_BITS 13
#define TARGET_PHYS_ADDR_SPACE_BITS 32
#define TARGET_VIRT_ADDR_SPACE_BITS 32
-#define NB_MMU_MODES 3
#endif
diff --git a/target/openrisc/gdbstub.c b/target/openrisc/gdbstub.c
index 095bf76..d1074a0 100644
--- a/target/openrisc/gdbstub.c
+++ b/target/openrisc/gdbstub.c
@@ -19,7 +19,7 @@
*/
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
int openrisc_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
{
diff --git a/target/openrisc/interrupt.c b/target/openrisc/interrupt.c
index c31c6f1..3887812 100644
--- a/target/openrisc/interrupt.c
+++ b/target/openrisc/interrupt.c
@@ -21,7 +21,7 @@
#include "qemu/log.h"
#include "cpu.h"
#include "exec/exec-all.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
#include "qemu/host-utils.h"
#ifndef CONFIG_USER_ONLY
#include "hw/loader.h"
diff --git a/target/openrisc/mmu.c b/target/openrisc/mmu.c
index 0b8afdb..603c267 100644
--- a/target/openrisc/mmu.c
+++ b/target/openrisc/mmu.c
@@ -22,7 +22,7 @@
#include "qemu/log.h"
#include "cpu.h"
#include "exec/exec-all.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
#include "qemu/host-utils.h"
#include "hw/loader.h"
diff --git a/target/ppc/cpu-param.h b/target/ppc/cpu-param.h
index ea377b7..0a0416e 100644
--- a/target/ppc/cpu-param.h
+++ b/target/ppc/cpu-param.h
@@ -32,6 +32,5 @@
# define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif
#define TARGET_PAGE_BITS 12
-#define NB_MMU_MODES 10
#endif
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
index d62ffe8..0ce2e3c 100644
--- a/target/ppc/cpu_init.c
+++ b/target/ppc/cpu_init.c
@@ -20,7 +20,7 @@
#include "qemu/osdep.h"
#include "disas/dis-asm.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
#include "kvm_ppc.h"
#include "sysemu/cpus.h"
#include "sysemu/hw_accel.h"
diff --git a/target/ppc/gdbstub.c b/target/ppc/gdbstub.c
index 1a0b9ca..d2bc1d7 100644
--- a/target/ppc/gdbstub.c
+++ b/target/ppc/gdbstub.c
@@ -20,6 +20,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
#include "internal.h"
static int ppc_gdb_register_len_apple(int n)
diff --git a/target/ppc/power8-pmu-regs.c.inc b/target/ppc/power8-pmu-regs.c.inc
index 42f2cd0..d900e13 100644
--- a/target/ppc/power8-pmu-regs.c.inc
+++ b/target/ppc/power8-pmu-regs.c.inc
@@ -177,7 +177,7 @@
void spr_read_PMC(DisasContext *ctx, int gprn, int sprn)
{
- TCGv_i32 t_sprn = tcg_const_i32(sprn);
+ TCGv_i32 t_sprn = tcg_constant_i32(sprn);
gen_icount_io_start(ctx);
gen_helper_read_pmc(cpu_gpr[gprn], cpu_env, t_sprn);
@@ -210,7 +210,7 @@
void spr_write_PMC(DisasContext *ctx, int sprn, int gprn)
{
- TCGv_i32 t_sprn = tcg_const_i32(sprn);
+ TCGv_i32 t_sprn = tcg_constant_i32(sprn);
gen_icount_io_start(ctx);
gen_helper_store_pmc(cpu_env, t_sprn, cpu_gpr[gprn]);
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
index df324fc..9d05357 100644
--- a/target/ppc/translate.c
+++ b/target/ppc/translate.c
@@ -263,8 +263,8 @@
* faulting instruction
*/
gen_update_nip(ctx, ctx->cia);
- t0 = tcg_const_i32(excp);
- t1 = tcg_const_i32(error);
+ t0 = tcg_constant_i32(excp);
+ t1 = tcg_constant_i32(error);
gen_helper_raise_exception_err(cpu_env, t0, t1);
ctx->base.is_jmp = DISAS_NORETURN;
}
@@ -278,7 +278,7 @@
* faulting instruction
*/
gen_update_nip(ctx, ctx->cia);
- t0 = tcg_const_i32(excp);
+ t0 = tcg_constant_i32(excp);
gen_helper_raise_exception(cpu_env, t0);
ctx->base.is_jmp = DISAS_NORETURN;
}
@@ -289,7 +289,7 @@
TCGv_i32 t0;
gen_update_nip(ctx, nip);
- t0 = tcg_const_i32(excp);
+ t0 = tcg_constant_i32(excp);
gen_helper_raise_exception(cpu_env, t0);
ctx->base.is_jmp = DISAS_NORETURN;
}
@@ -386,7 +386,7 @@
static void spr_load_dump_spr(int sprn)
{
#ifdef PPC_DUMP_SPR_ACCESSES
- TCGv_i32 t0 = tcg_const_i32(sprn);
+ TCGv_i32 t0 = tcg_constant_i32(sprn);
gen_helper_load_dump_spr(cpu_env, t0);
#endif
}
@@ -400,7 +400,7 @@
static void spr_store_dump_spr(int sprn)
{
#ifdef PPC_DUMP_SPR_ACCESSES
- TCGv_i32 t0 = tcg_const_i32(sprn);
+ TCGv_i32 t0 = tcg_constant_i32(sprn);
gen_helper_store_dump_spr(cpu_env, t0);
#endif
}
@@ -672,25 +672,25 @@
void spr_write_ibatu(DisasContext *ctx, int sprn, int gprn)
{
- TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0U) / 2);
+ TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_IBAT0U) / 2);
gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]);
}
void spr_write_ibatu_h(DisasContext *ctx, int sprn, int gprn)
{
- TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_IBAT4U) / 2) + 4);
+ TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_IBAT4U) / 2) + 4);
gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]);
}
void spr_write_ibatl(DisasContext *ctx, int sprn, int gprn)
{
- TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0L) / 2);
+ TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_IBAT0L) / 2);
gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]);
}
void spr_write_ibatl_h(DisasContext *ctx, int sprn, int gprn)
{
- TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_IBAT4L) / 2) + 4);
+ TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_IBAT4L) / 2) + 4);
gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]);
}
@@ -712,25 +712,25 @@
void spr_write_dbatu(DisasContext *ctx, int sprn, int gprn)
{
- TCGv_i32 t0 = tcg_const_i32((sprn - SPR_DBAT0U) / 2);
+ TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_DBAT0U) / 2);
gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]);
}
void spr_write_dbatu_h(DisasContext *ctx, int sprn, int gprn)
{
- TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_DBAT4U) / 2) + 4);
+ TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_DBAT4U) / 2) + 4);
gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]);
}
void spr_write_dbatl(DisasContext *ctx, int sprn, int gprn)
{
- TCGv_i32 t0 = tcg_const_i32((sprn - SPR_DBAT0L) / 2);
+ TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_DBAT0L) / 2);
gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]);
}
void spr_write_dbatl_h(DisasContext *ctx, int sprn, int gprn)
{
- TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_DBAT4L) / 2) + 4);
+ TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_DBAT4L) / 2) + 4);
gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]);
}
@@ -1040,13 +1040,15 @@
void spr_write_booke_pid(DisasContext *ctx, int sprn, int gprn)
{
- TCGv_i32 t0 = tcg_const_i32(sprn);
+ TCGv_i32 t0 = tcg_constant_i32(sprn);
gen_helper_booke_setpid(cpu_env, t0, cpu_gpr[gprn]);
}
+
void spr_write_eplc(DisasContext *ctx, int sprn, int gprn)
{
gen_helper_booke_set_eplc(cpu_env, cpu_gpr[gprn]);
}
+
void spr_write_epsc(DisasContext *ctx, int sprn, int gprn)
{
gen_helper_booke_set_epsc(cpu_env, cpu_gpr[gprn]);
@@ -1080,9 +1082,9 @@
static void gen_fscr_facility_check(DisasContext *ctx, int facility_sprn,
int bit, int sprn, int cause)
{
- TCGv_i32 t1 = tcg_const_i32(bit);
- TCGv_i32 t2 = tcg_const_i32(sprn);
- TCGv_i32 t3 = tcg_const_i32(cause);
+ TCGv_i32 t1 = tcg_constant_i32(bit);
+ TCGv_i32 t2 = tcg_constant_i32(sprn);
+ TCGv_i32 t3 = tcg_constant_i32(cause);
gen_helper_fscr_facility_check(cpu_env, t1, t2, t3);
}
@@ -1090,9 +1092,9 @@
static void gen_msr_facility_check(DisasContext *ctx, int facility_sprn,
int bit, int sprn, int cause)
{
- TCGv_i32 t1 = tcg_const_i32(bit);
- TCGv_i32 t2 = tcg_const_i32(sprn);
- TCGv_i32 t3 = tcg_const_i32(cause);
+ TCGv_i32 t1 = tcg_constant_i32(bit);
+ TCGv_i32 t2 = tcg_constant_i32(sprn);
+ TCGv_i32 t3 = tcg_constant_i32(cause);
gen_helper_msr_facility_check(cpu_env, t1, t2, t3);
}
@@ -1388,7 +1390,7 @@
static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf)
{
- TCGv t0 = tcg_const_tl(arg1);
+ TCGv t0 = tcg_constant_tl(arg1);
gen_op_cmp(arg0, t0, s, crf);
}
@@ -1409,7 +1411,7 @@
static inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf)
{
- TCGv t0 = tcg_const_tl(arg1);
+ TCGv t0 = tcg_constant_tl(arg1);
gen_op_cmp32(arg0, t0, s, crf);
}
@@ -1476,7 +1478,7 @@
tcg_gen_extu_i32_tl(t0, cpu_crf[bi >> 2]);
tcg_gen_andi_tl(t0, t0, mask);
- zr = tcg_const_tl(0);
+ zr = tcg_constant_tl(0);
tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rD(ctx->opcode)], t0, zr,
rA(ctx->opcode) ? cpu_gpr[rA(ctx->opcode)] : zr,
cpu_gpr[rB(ctx->opcode)]);
@@ -1568,7 +1570,7 @@
tcg_gen_mov_tl(ca32, ca);
}
} else {
- TCGv zero = tcg_const_tl(0);
+ TCGv zero = tcg_constant_tl(0);
if (add_ca) {
tcg_gen_add2_tl(t0, ca, arg1, zero, ca, zero);
tcg_gen_add2_tl(t0, ca, t0, ca, arg2, zero);
@@ -1609,7 +1611,7 @@
add_ca, compute_ca, compute_ov) \
static void glue(gen_, name)(DisasContext *ctx) \
{ \
- TCGv t0 = tcg_const_tl(const_val); \
+ TCGv t0 = tcg_constant_tl(const_val); \
gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
cpu_gpr[rA(ctx->opcode)], t0, \
ca, glue(ca, 32), \
@@ -1636,7 +1638,7 @@
/* addic addic.*/
static inline void gen_op_addic(DisasContext *ctx, bool compute_rc0)
{
- TCGv c = tcg_const_tl(SIMM(ctx->opcode));
+ TCGv c = tcg_constant_tl(SIMM(ctx->opcode));
gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
c, cpu_ca, cpu_ca32, 0, 1, 0, compute_rc0);
}
@@ -1709,7 +1711,7 @@
#define GEN_DIVE(name, hlpr, compute_ov) \
static void gen_##name(DisasContext *ctx) \
{ \
- TCGv_i32 t0 = tcg_const_i32(compute_ov); \
+ TCGv_i32 t0 = tcg_constant_i32(compute_ov); \
gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], cpu_env, \
cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \
if (unlikely(Rc(ctx->opcode) != 0)) { \
@@ -1802,8 +1804,8 @@
tcg_gen_rem_i32(t3, t0, t1);
tcg_gen_ext_i32_tl(ret, t3);
} else {
- TCGv_i32 t2 = tcg_const_i32(1);
- TCGv_i32 t3 = tcg_const_i32(0);
+ TCGv_i32 t2 = tcg_constant_i32(1);
+ TCGv_i32 t3 = tcg_constant_i32(0);
tcg_gen_movcond_i32(TCG_COND_EQ, t1, t1, t3, t2, t1);
tcg_gen_remu_i32(t3, t0, t1);
tcg_gen_extu_i32_tl(ret, t3);
@@ -1842,8 +1844,8 @@
tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
tcg_gen_rem_i64(ret, t0, t1);
} else {
- TCGv_i64 t2 = tcg_const_i64(1);
- TCGv_i64 t3 = tcg_const_i64(0);
+ TCGv_i64 t2 = tcg_constant_i64(1);
+ TCGv_i64 t3 = tcg_constant_i64(0);
tcg_gen_movcond_i64(TCG_COND_EQ, t1, t1, t3, t2, t1);
tcg_gen_remu_i64(ret, t0, t1);
}
@@ -2038,7 +2040,7 @@
} else if (add_ca) {
TCGv zero, inv1 = tcg_temp_new();
tcg_gen_not_tl(inv1, arg1);
- zero = tcg_const_tl(0);
+ zero = tcg_constant_tl(0);
tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero);
tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero);
gen_op_arith_compute_ca32(ctx, t0, inv1, arg2, cpu_ca32, 0);
@@ -2083,7 +2085,7 @@
add_ca, compute_ca, compute_ov) \
static void glue(gen_, name)(DisasContext *ctx) \
{ \
- TCGv t0 = tcg_const_tl(const_val); \
+ TCGv t0 = tcg_constant_tl(const_val); \
gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
cpu_gpr[rA(ctx->opcode)], t0, \
add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
@@ -2107,7 +2109,7 @@
/* subfic */
static void gen_subfic(DisasContext *ctx)
{
- TCGv c = tcg_const_tl(SIMM(ctx->opcode));
+ TCGv c = tcg_constant_tl(SIMM(ctx->opcode));
gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
c, 0, 1, 0, 0);
}
@@ -2115,7 +2117,7 @@
/* neg neg. nego nego. */
static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov)
{
- TCGv zero = tcg_const_tl(0);
+ TCGv zero = tcg_constant_tl(0);
gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
zero, 0, 0, compute_ov, Rc(ctx->opcode));
}
@@ -2214,7 +2216,7 @@
#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
static void gen_pause(DisasContext *ctx)
{
- TCGv_i32 t0 = tcg_const_i32(0);
+ TCGv_i32 t0 = tcg_constant_i32(0);
tcg_gen_st_i32(t0, cpu_env,
-offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
@@ -3256,7 +3258,7 @@
}
gen_set_access_type(ctx, ACCESS_INT);
t0 = tcg_temp_new();
- t1 = tcg_const_i32(rD(ctx->opcode));
+ t1 = tcg_constant_i32(rD(ctx->opcode));
gen_addr_imm_index(ctx, t0, 0);
gen_helper_lmw(cpu_env, t0, t1);
}
@@ -3273,7 +3275,7 @@
}
gen_set_access_type(ctx, ACCESS_INT);
t0 = tcg_temp_new();
- t1 = tcg_const_i32(rS(ctx->opcode));
+ t1 = tcg_constant_i32(rS(ctx->opcode));
gen_addr_imm_index(ctx, t0, 0);
gen_helper_stmw(cpu_env, t0, t1);
}
@@ -3311,8 +3313,8 @@
gen_set_access_type(ctx, ACCESS_INT);
t0 = tcg_temp_new();
gen_addr_register(ctx, t0);
- t1 = tcg_const_i32(nb);
- t2 = tcg_const_i32(start);
+ t1 = tcg_constant_i32(nb);
+ t2 = tcg_constant_i32(start);
gen_helper_lsw(cpu_env, t0, t1, t2);
}
@@ -3329,9 +3331,9 @@
gen_set_access_type(ctx, ACCESS_INT);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
- t1 = tcg_const_i32(rD(ctx->opcode));
- t2 = tcg_const_i32(rA(ctx->opcode));
- t3 = tcg_const_i32(rB(ctx->opcode));
+ t1 = tcg_constant_i32(rD(ctx->opcode));
+ t2 = tcg_constant_i32(rA(ctx->opcode));
+ t3 = tcg_constant_i32(rB(ctx->opcode));
gen_helper_lswx(cpu_env, t0, t1, t2, t3);
}
@@ -3352,8 +3354,8 @@
if (nb == 0) {
nb = 32;
}
- t1 = tcg_const_i32(nb);
- t2 = tcg_const_i32(rS(ctx->opcode));
+ t1 = tcg_constant_i32(nb);
+ t2 = tcg_constant_i32(rS(ctx->opcode));
gen_helper_stsw(cpu_env, t0, t1, t2);
}
@@ -3373,7 +3375,7 @@
t1 = tcg_temp_new_i32();
tcg_gen_trunc_tl_i32(t1, cpu_xer);
tcg_gen_andi_i32(t1, t1, 0x7F);
- t2 = tcg_const_i32(rS(ctx->opcode));
+ t2 = tcg_constant_i32(rS(ctx->opcode));
gen_helper_stsw(cpu_env, t0, t1, t2);
}
@@ -3943,7 +3945,7 @@
* to occur.
*/
if (wc == 0) {
- TCGv_i32 t0 = tcg_const_i32(1);
+ TCGv_i32 t0 = tcg_constant_i32(1);
tcg_gen_st_i32(t0, cpu_env,
-offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
/* Stop translation, as the CPU is supposed to sleep from now */
@@ -3989,7 +3991,7 @@
TCGv_i32 t;
CHK_HV(ctx);
- t = tcg_const_i32(PPC_PM_DOZE);
+ t = tcg_constant_i32(PPC_PM_DOZE);
gen_helper_pminsn(cpu_env, t);
/* Stop translation, as the CPU is supposed to sleep from now */
gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
@@ -4004,7 +4006,7 @@
TCGv_i32 t;
CHK_HV(ctx);
- t = tcg_const_i32(PPC_PM_NAP);
+ t = tcg_constant_i32(PPC_PM_NAP);
gen_helper_pminsn(cpu_env, t);
/* Stop translation, as the CPU is supposed to sleep from now */
gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
@@ -4019,7 +4021,7 @@
TCGv_i32 t;
CHK_HV(ctx);
- t = tcg_const_i32(PPC_PM_STOP);
+ t = tcg_constant_i32(PPC_PM_STOP);
gen_helper_pminsn(cpu_env, t);
/* Stop translation, as the CPU is supposed to sleep from now */
gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
@@ -4034,7 +4036,7 @@
TCGv_i32 t;
CHK_HV(ctx);
- t = tcg_const_i32(PPC_PM_SLEEP);
+ t = tcg_constant_i32(PPC_PM_SLEEP);
gen_helper_pminsn(cpu_env, t);
/* Stop translation, as the CPU is supposed to sleep from now */
gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
@@ -4049,7 +4051,7 @@
TCGv_i32 t;
CHK_HV(ctx);
- t = tcg_const_i32(PPC_PM_RVWINKLE);
+ t = tcg_constant_i32(PPC_PM_RVWINKLE);
gen_helper_pminsn(cpu_env, t);
/* Stop translation, as the CPU is supposed to sleep from now */
gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
@@ -4506,7 +4508,7 @@
if (check_unconditional_trap(ctx)) {
return;
}
- t0 = tcg_const_i32(TO(ctx->opcode));
+ t0 = tcg_constant_i32(TO(ctx->opcode));
gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
t0);
}
@@ -4520,8 +4522,8 @@
if (check_unconditional_trap(ctx)) {
return;
}
- t0 = tcg_const_tl(SIMM(ctx->opcode));
- t1 = tcg_const_i32(TO(ctx->opcode));
+ t0 = tcg_constant_tl(SIMM(ctx->opcode));
+ t1 = tcg_constant_i32(TO(ctx->opcode));
gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
}
@@ -4534,7 +4536,7 @@
if (check_unconditional_trap(ctx)) {
return;
}
- t0 = tcg_const_i32(TO(ctx->opcode));
+ t0 = tcg_constant_i32(TO(ctx->opcode));
gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
t0);
}
@@ -4548,8 +4550,8 @@
if (check_unconditional_trap(ctx)) {
return;
}
- t0 = tcg_const_tl(SIMM(ctx->opcode));
- t1 = tcg_const_i32(TO(ctx->opcode));
+ t0 = tcg_constant_tl(SIMM(ctx->opcode));
+ t1 = tcg_constant_i32(TO(ctx->opcode));
gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
}
#endif
@@ -5026,7 +5028,7 @@
gen_set_access_type(ctx, ACCESS_CACHE);
tcgv_addr = tcg_temp_new();
- tcgv_op = tcg_const_i32(ctx->opcode & 0x03FF000);
+ tcgv_op = tcg_constant_i32(ctx->opcode & 0x03FF000);
gen_addr_reg_index(ctx, tcgv_addr);
gen_helper_dcbz(cpu_env, tcgv_addr, tcgv_op);
}
@@ -5039,7 +5041,7 @@
gen_set_access_type(ctx, ACCESS_CACHE);
tcgv_addr = tcg_temp_new();
- tcgv_op = tcg_const_i32(ctx->opcode & 0x03FF000);
+ tcgv_op = tcg_constant_i32(ctx->opcode & 0x03FF000);
gen_addr_reg_index(ctx, tcgv_addr);
gen_helper_dcbzep(cpu_env, tcgv_addr, tcgv_op);
}
@@ -5114,7 +5116,7 @@
TCGv t0;
CHK_SV(ctx);
- t0 = tcg_const_tl(SR(ctx->opcode));
+ t0 = tcg_constant_tl(SR(ctx->opcode));
gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5143,7 +5145,7 @@
TCGv t0;
CHK_SV(ctx);
- t0 = tcg_const_tl(SR(ctx->opcode));
+ t0 = tcg_constant_tl(SR(ctx->opcode));
gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5175,7 +5177,7 @@
TCGv t0;
CHK_SV(ctx);
- t0 = tcg_const_tl(SR(ctx->opcode));
+ t0 = tcg_constant_tl(SR(ctx->opcode));
gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5204,7 +5206,7 @@
TCGv t0;
CHK_SV(ctx);
- t0 = tcg_const_tl(SR(ctx->opcode));
+ t0 = tcg_constant_tl(SR(ctx->opcode));
gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5558,7 +5560,7 @@
TCGv dcrn;
CHK_SV(ctx);
- dcrn = tcg_const_tl(SPR(ctx->opcode));
+ dcrn = tcg_constant_tl(SPR(ctx->opcode));
gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env, dcrn);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5572,7 +5574,7 @@
TCGv dcrn;
CHK_SV(ctx);
- dcrn = tcg_const_tl(SPR(ctx->opcode));
+ dcrn = tcg_constant_tl(SPR(ctx->opcode));
gen_helper_store_dcr(cpu_env, dcrn, cpu_gpr[rS(ctx->opcode)]);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5793,7 +5795,7 @@
case 1:
case 2:
{
- TCGv_i32 t0 = tcg_const_i32(rB(ctx->opcode));
+ TCGv_i32 t0 = tcg_constant_i32(rB(ctx->opcode));
gen_helper_440_tlbre(cpu_gpr[rD(ctx->opcode)], cpu_env,
t0, cpu_gpr[rA(ctx->opcode)]);
}
@@ -5839,7 +5841,7 @@
case 1:
case 2:
{
- TCGv_i32 t0 = tcg_const_i32(rB(ctx->opcode));
+ TCGv_i32 t0 = tcg_constant_i32(rB(ctx->opcode));
gen_helper_440_tlbwe(cpu_env, t0, cpu_gpr[rA(ctx->opcode)],
cpu_gpr[rS(ctx->opcode)]);
}
@@ -5875,12 +5877,10 @@
CHK_SV(ctx);
if (rA(ctx->opcode)) {
t0 = tcg_temp_new();
- tcg_gen_mov_tl(t0, cpu_gpr[rD(ctx->opcode)]);
+ tcg_gen_add_tl(t0, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
} else {
- t0 = tcg_const_tl(0);
+ t0 = cpu_gpr[rB(ctx->opcode)];
}
-
- tcg_gen_add_tl(t0, t0, cpu_gpr[rB(ctx->opcode)]);
gen_helper_booke206_tlbsx(cpu_env, t0);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5983,7 +5983,7 @@
/* dlmzb */
static void gen_dlmzb(DisasContext *ctx)
{
- TCGv_i32 t0 = tcg_const_i32(Rc(ctx->opcode));
+ TCGv_i32 t0 = tcg_constant_i32(Rc(ctx->opcode));
gen_helper_dlmzb(cpu_gpr[rA(ctx->opcode)], cpu_env,
cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0);
}
diff --git a/target/ppc/translate/fixedpoint-impl.c.inc b/target/ppc/translate/fixedpoint-impl.c.inc
index 20ea484..02d86b7 100644
--- a/target/ppc/translate/fixedpoint-impl.c.inc
+++ b/target/ppc/translate/fixedpoint-impl.c.inc
@@ -484,33 +484,35 @@
static bool trans_ADDG6S(DisasContext *ctx, arg_X *a)
{
- const uint64_t carry_bits = 0x1111111111111111ULL;
- TCGv t0, t1, carry, zero = tcg_constant_tl(0);
+ const target_ulong carry_bits = (target_ulong)-1 / 0xf;
+ TCGv in1, in2, carryl, carryh, tmp;
+ TCGv zero = tcg_constant_tl(0);
REQUIRE_INSNS_FLAGS2(ctx, BCDA_ISA206);
- t0 = tcg_temp_new();
- t1 = tcg_const_tl(0);
- carry = tcg_const_tl(0);
+ in1 = cpu_gpr[a->ra];
+ in2 = cpu_gpr[a->rb];
+ tmp = tcg_temp_new();
+ carryl = tcg_temp_new();
+ carryh = tcg_temp_new();
- for (int i = 0; i < 16; i++) {
- tcg_gen_shri_tl(t0, cpu_gpr[a->ra], i * 4);
- tcg_gen_andi_tl(t0, t0, 0xf);
- tcg_gen_add_tl(t1, t1, t0);
+ /* Addition with carry. */
+ tcg_gen_add2_tl(carryl, carryh, in1, zero, in2, zero);
+ /* Addition without carry. */
+ tcg_gen_xor_tl(tmp, in1, in2);
+ /* Difference between the two is carry in to each bit. */
+ tcg_gen_xor_tl(carryl, carryl, tmp);
- tcg_gen_shri_tl(t0, cpu_gpr[a->rb], i * 4);
- tcg_gen_andi_tl(t0, t0, 0xf);
- tcg_gen_add_tl(t1, t1, t0);
+ /*
+ * The carry-out that we're looking for is the carry-in to
+ * the next nibble. Shift the double-word down one nibble,
+ * which puts all of the bits back into one word.
+ */
+ tcg_gen_extract2_tl(carryl, carryl, carryh, 4);
- tcg_gen_andi_tl(t1, t1, 0x10);
- tcg_gen_setcond_tl(TCG_COND_NE, t1, t1, zero);
-
- tcg_gen_shli_tl(t0, t1, i * 4);
- tcg_gen_or_tl(carry, carry, t0);
- }
-
- tcg_gen_xori_tl(carry, carry, (target_long)carry_bits);
- tcg_gen_muli_tl(cpu_gpr[a->rt], carry, 6);
+ /* Invert, isolate the carry bits, and produce 6's. */
+ tcg_gen_andc_tl(carryl, tcg_constant_tl(carry_bits), carryl);
+ tcg_gen_muli_tl(cpu_gpr[a->rt], carryl, 6);
return true;
}
diff --git a/target/ppc/translate/fp-impl.c.inc b/target/ppc/translate/fp-impl.c.inc
index d5d88e7..57d8437 100644
--- a/target/ppc/translate/fp-impl.c.inc
+++ b/target/ppc/translate/fp-impl.c.inc
@@ -348,7 +348,7 @@
t0 = tcg_temp_new_i64();
t1 = tcg_temp_new_i64();
gen_reset_fpstatus();
- crf = tcg_const_i32(crfD(ctx->opcode));
+ crf = tcg_constant_i32(crfD(ctx->opcode));
get_fpr(t0, rA(ctx->opcode));
get_fpr(t1, rB(ctx->opcode));
gen_helper_fcmpo(cpu_env, t0, t1, crf);
@@ -368,7 +368,7 @@
t0 = tcg_temp_new_i64();
t1 = tcg_temp_new_i64();
gen_reset_fpstatus();
- crf = tcg_const_i32(crfD(ctx->opcode));
+ crf = tcg_constant_i32(crfD(ctx->opcode));
get_fpr(t0, rA(ctx->opcode));
get_fpr(t1, rB(ctx->opcode));
gen_helper_fcmpu(cpu_env, t0, t1, crf);
@@ -541,7 +541,7 @@
tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr,
~((0xF << shift) & FP_EX_CLEAR_BITS));
/* FEX and VX need to be updated, so don't set fpscr directly */
- tmask = tcg_const_i32(1 << nibble);
+ tmask = tcg_constant_i32(1 << nibble);
gen_helper_store_fpscr(cpu_env, tnew_fpscr, tmask);
}
@@ -681,9 +681,7 @@
crb = 31 - crbD(ctx->opcode);
gen_reset_fpstatus();
if (likely(crb != FPSCR_FEX && crb != FPSCR_VX)) {
- TCGv_i32 t0;
- t0 = tcg_const_i32(crb);
- gen_helper_fpscr_clrbit(cpu_env, t0);
+ gen_helper_fpscr_clrbit(cpu_env, tcg_constant_i32(crb));
}
if (unlikely(Rc(ctx->opcode) != 0)) {
tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
@@ -703,9 +701,7 @@
crb = 31 - crbD(ctx->opcode);
/* XXX: we pretend we can only do IEEE floating-point computations */
if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI)) {
- TCGv_i32 t0;
- t0 = tcg_const_i32(crb);
- gen_helper_fpscr_setbit(cpu_env, t0);
+ gen_helper_fpscr_setbit(cpu_env, tcg_constant_i32(crb));
}
if (unlikely(Rc(ctx->opcode) != 0)) {
tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
@@ -733,10 +729,12 @@
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
return;
}
- if (l) {
- t0 = tcg_const_i32((ctx->insns_flags2 & PPC2_ISA205) ? 0xffff : 0xff);
+ if (!l) {
+ t0 = tcg_constant_i32(flm << (w * 8));
+ } else if (ctx->insns_flags2 & PPC2_ISA205) {
+ t0 = tcg_constant_i32(0xffff);
} else {
- t0 = tcg_const_i32(flm << (w * 8));
+ t0 = tcg_constant_i32(0xff);
}
t1 = tcg_temp_new_i64();
get_fpr(t1, rB(ctx->opcode));
@@ -767,8 +765,8 @@
return;
}
sh = (8 * w) + 7 - bf;
- t0 = tcg_const_i64(((uint64_t)FPIMM(ctx->opcode)) << (4 * sh));
- t1 = tcg_const_i32(1 << sh);
+ t0 = tcg_constant_i64(((uint64_t)FPIMM(ctx->opcode)) << (4 * sh));
+ t1 = tcg_constant_i32(1 << sh);
gen_helper_store_fpscr(cpu_env, t0, t1);
if (unlikely(Rc(ctx->opcode) != 0)) {
tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc
index 05ba9c9..112233b 100644
--- a/target/ppc/translate/vmx-impl.c.inc
+++ b/target/ppc/translate/vmx-impl.c.inc
@@ -171,53 +171,56 @@
gen_helper_mtvscr(cpu_env, val);
}
+static void gen_vx_vmul10(DisasContext *ctx, bool add_cin, bool ret_carry)
+{
+ TCGv_i64 t0;
+ TCGv_i64 t1;
+ TCGv_i64 t2;
+ TCGv_i64 avr;
+ TCGv_i64 ten, z;
+
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ avr = tcg_temp_new_i64();
+ ten = tcg_constant_i64(10);
+ z = tcg_constant_i64(0);
+
+ if (add_cin) {
+ get_avr64(avr, rA(ctx->opcode), false);
+ tcg_gen_mulu2_i64(t0, t1, avr, ten);
+ get_avr64(avr, rB(ctx->opcode), false);
+ tcg_gen_andi_i64(t2, avr, 0xF);
+ tcg_gen_add2_i64(avr, t2, t0, t1, t2, z);
+ set_avr64(rD(ctx->opcode), avr, false);
+ } else {
+ get_avr64(avr, rA(ctx->opcode), false);
+ tcg_gen_mulu2_i64(avr, t2, avr, ten);
+ set_avr64(rD(ctx->opcode), avr, false);
+ }
+
+ if (ret_carry) {
+ get_avr64(avr, rA(ctx->opcode), true);
+ tcg_gen_mulu2_i64(t0, t1, avr, ten);
+ tcg_gen_add2_i64(t0, avr, t0, t1, t2, z);
+ set_avr64(rD(ctx->opcode), avr, false);
+ set_avr64(rD(ctx->opcode), z, true);
+ } else {
+ get_avr64(avr, rA(ctx->opcode), true);
+ tcg_gen_mul_i64(t0, avr, ten);
+ tcg_gen_add_i64(avr, t0, t2);
+ set_avr64(rD(ctx->opcode), avr, true);
+ }
+}
+
#define GEN_VX_VMUL10(name, add_cin, ret_carry) \
-static void glue(gen_, name)(DisasContext *ctx) \
-{ \
- TCGv_i64 t0; \
- TCGv_i64 t1; \
- TCGv_i64 t2; \
- TCGv_i64 avr; \
- TCGv_i64 ten, z; \
- \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- \
- t0 = tcg_temp_new_i64(); \
- t1 = tcg_temp_new_i64(); \
- t2 = tcg_temp_new_i64(); \
- avr = tcg_temp_new_i64(); \
- ten = tcg_const_i64(10); \
- z = tcg_const_i64(0); \
- \
- if (add_cin) { \
- get_avr64(avr, rA(ctx->opcode), false); \
- tcg_gen_mulu2_i64(t0, t1, avr, ten); \
- get_avr64(avr, rB(ctx->opcode), false); \
- tcg_gen_andi_i64(t2, avr, 0xF); \
- tcg_gen_add2_i64(avr, t2, t0, t1, t2, z); \
- set_avr64(rD(ctx->opcode), avr, false); \
- } else { \
- get_avr64(avr, rA(ctx->opcode), false); \
- tcg_gen_mulu2_i64(avr, t2, avr, ten); \
- set_avr64(rD(ctx->opcode), avr, false); \
- } \
- \
- if (ret_carry) { \
- get_avr64(avr, rA(ctx->opcode), true); \
- tcg_gen_mulu2_i64(t0, t1, avr, ten); \
- tcg_gen_add2_i64(t0, avr, t0, t1, t2, z); \
- set_avr64(rD(ctx->opcode), avr, false); \
- set_avr64(rD(ctx->opcode), z, true); \
- } else { \
- get_avr64(avr, rA(ctx->opcode), true); \
- tcg_gen_mul_i64(t0, avr, ten); \
- tcg_gen_add_i64(avr, t0, t2); \
- set_avr64(rD(ctx->opcode), avr, true); \
- } \
-} \
+ static void glue(gen_, name)(DisasContext *ctx) \
+ { gen_vx_vmul10(ctx, add_cin, ret_carry); }
GEN_VX_VMUL10(vmul10uq, 0, 0);
GEN_VX_VMUL10(vmul10euq, 1, 0);
@@ -903,7 +906,6 @@
hi = tcg_temp_new_i64();
lo = tcg_temp_new_i64();
t0 = tcg_temp_new_i64();
- t1 = tcg_const_i64(0);
get_avr64(lo, a->vra, false);
get_avr64(hi, a->vra, true);
@@ -914,7 +916,10 @@
if (right) {
tcg_gen_movcond_i64(TCG_COND_NE, lo, t0, zero, hi, lo);
if (alg) {
+ t1 = tcg_temp_new_i64();
tcg_gen_sari_i64(t1, lo, 63);
+ } else {
+ t1 = zero;
}
tcg_gen_movcond_i64(TCG_COND_NE, hi, t0, zero, t1, hi);
} else {
@@ -1619,7 +1624,7 @@
gen_exception(ctx, POWERPC_EXCP_VPU); \
return; \
} \
- uimm = tcg_const_i32(UIMM5(ctx->opcode)); \
+ uimm = tcg_constant_i32(UIMM5(ctx->opcode)); \
rb = gen_avr_ptr(rB(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
gen_helper_##name(cpu_env, rd, rb, uimm); \
@@ -1960,7 +1965,7 @@
ra = gen_avr_ptr(rA(ctx->opcode));
rb = gen_avr_ptr(rB(ctx->opcode));
rd = gen_avr_ptr(rD(ctx->opcode));
- sh = tcg_const_i32(VSH(ctx->opcode));
+ sh = tcg_constant_i32(VSH(ctx->opcode));
gen_helper_vsldoi(rd, ra, rb, sh);
}
@@ -2231,24 +2236,25 @@
static bool do_vcntmb(DisasContext *ctx, arg_VX_mp *a, int vece)
{
- TCGv_i64 rt, vrb, mask;
- rt = tcg_const_i64(0);
- vrb = tcg_temp_new_i64();
+ TCGv_i64 r[2], mask;
+
+ r[0] = tcg_temp_new_i64();
+ r[1] = tcg_temp_new_i64();
mask = tcg_constant_i64(dup_const(vece, 1ULL << ((8 << vece) - 1)));
for (int i = 0; i < 2; i++) {
- get_avr64(vrb, a->vrb, i);
+ get_avr64(r[i], a->vrb, i);
if (a->mp) {
- tcg_gen_and_i64(vrb, mask, vrb);
+ tcg_gen_and_i64(r[i], mask, r[i]);
} else {
- tcg_gen_andc_i64(vrb, mask, vrb);
+ tcg_gen_andc_i64(r[i], mask, r[i]);
}
- tcg_gen_ctpop_i64(vrb, vrb);
- tcg_gen_add_i64(rt, rt, vrb);
+ tcg_gen_ctpop_i64(r[i], r[i]);
}
- tcg_gen_shli_i64(rt, rt, TARGET_LONG_BITS - 8 + vece);
- tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], rt);
+ tcg_gen_add_i64(r[0], r[0], r[1]);
+ tcg_gen_shli_i64(r[0], r[0], TARGET_LONG_BITS - 8 + vece);
+ tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], r[0]);
return true;
}
@@ -2569,7 +2575,7 @@
rb = gen_avr_ptr(rB(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
\
- ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \
+ ps = tcg_constant_i32((ctx->opcode & 0x200) != 0); \
\
gen_helper_##op(cpu_crf[6], rd, ra, rb, ps); \
}
@@ -2588,7 +2594,7 @@
rb = gen_avr_ptr(rB(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
\
- ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \
+ ps = tcg_constant_i32((ctx->opcode & 0x200) != 0); \
\
gen_helper_##op(cpu_crf[6], rd, rb, ps); \
}
@@ -2720,7 +2726,7 @@
} \
ra = gen_avr_ptr(rA(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
- st_six = tcg_const_i32(rB(ctx->opcode)); \
+ st_six = tcg_constant_i32(rB(ctx->opcode)); \
gen_helper_##op(rd, ra, st_six); \
}
diff --git a/target/ppc/translate/vsx-impl.c.inc b/target/ppc/translate/vsx-impl.c.inc
index 6e63403..0f5b005 100644
--- a/target/ppc/translate/vsx-impl.c.inc
+++ b/target/ppc/translate/vsx-impl.c.inc
@@ -154,7 +154,7 @@
static void gen_bswap16x8(TCGv_i64 outh, TCGv_i64 outl,
TCGv_i64 inh, TCGv_i64 inl)
{
- TCGv_i64 mask = tcg_const_i64(0x00FF00FF00FF00FF);
+ TCGv_i64 mask = tcg_constant_i64(0x00FF00FF00FF00FF);
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
@@ -825,7 +825,7 @@
REQUIRE_INSNS_FLAGS2(ctx, ISA300);
REQUIRE_VSX(ctx);
- ro = tcg_const_i32(a->rc);
+ ro = tcg_constant_i32(a->rc);
xt = gen_avr_ptr(a->rt);
xb = gen_avr_ptr(a->rb);
@@ -860,7 +860,7 @@
gen_exception(ctx, POWERPC_EXCP_VSXU); \
return; \
} \
- opc = tcg_const_i32(ctx->opcode); \
+ opc = tcg_constant_i32(ctx->opcode); \
gen_helper_##name(cpu_env, opc); \
}
@@ -900,7 +900,7 @@
gen_exception(ctx, POWERPC_EXCP_VSXU); \
return; \
} \
- opc = tcg_const_i32(ctx->opcode); \
+ opc = tcg_constant_i32(ctx->opcode); \
xa = gen_vsr_ptr(xA(ctx->opcode)); \
xb = gen_vsr_ptr(xB(ctx->opcode)); \
gen_helper_##name(cpu_env, opc, xa, xb); \
@@ -915,7 +915,7 @@
gen_exception(ctx, POWERPC_EXCP_VSXU); \
return; \
} \
- opc = tcg_const_i32(ctx->opcode); \
+ opc = tcg_constant_i32(ctx->opcode); \
xb = gen_vsr_ptr(xB(ctx->opcode)); \
gen_helper_##name(cpu_env, opc, xb); \
}
@@ -929,7 +929,7 @@
gen_exception(ctx, POWERPC_EXCP_VSXU); \
return; \
} \
- opc = tcg_const_i32(ctx->opcode); \
+ opc = tcg_constant_i32(ctx->opcode); \
xt = gen_vsr_ptr(rD(ctx->opcode) + 32); \
xa = gen_vsr_ptr(rA(ctx->opcode) + 32); \
xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \
@@ -945,7 +945,7 @@
gen_exception(ctx, POWERPC_EXCP_VSXU); \
return; \
} \
- opc = tcg_const_i32(ctx->opcode); \
+ opc = tcg_constant_i32(ctx->opcode); \
xt = gen_vsr_ptr(rD(ctx->opcode) + 32); \
xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \
gen_helper_##name(cpu_env, opc, xt, xb); \
@@ -960,7 +960,7 @@
gen_exception(ctx, POWERPC_EXCP_VSXU); \
return; \
} \
- opc = tcg_const_i32(ctx->opcode); \
+ opc = tcg_constant_i32(ctx->opcode); \
xa = gen_vsr_ptr(rA(ctx->opcode) + 32); \
xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \
gen_helper_##name(cpu_env, opc, xa, xb); \
@@ -1994,8 +1994,8 @@
exp = tcg_temp_new_i64();
t0 = tcg_temp_new_i64();
t1 = tcg_temp_new_i64();
- zr = tcg_const_i64(0);
- nan = tcg_const_i64(2047);
+ zr = tcg_constant_i64(0);
+ nan = tcg_constant_i64(2047);
get_cpu_vsr(t1, xB(ctx->opcode), true);
tcg_gen_extract_i64(exp, t1, 52, 11);
@@ -2026,8 +2026,8 @@
get_cpu_vsr(xbl, rB(ctx->opcode) + 32, false);
exp = tcg_temp_new_i64();
t0 = tcg_temp_new_i64();
- zr = tcg_const_i64(0);
- nan = tcg_const_i64(32767);
+ zr = tcg_constant_i64(0);
+ nan = tcg_constant_i64(32767);
tcg_gen_extract_i64(exp, xbh, 48, 15);
tcg_gen_movi_i64(t0, 0x0001000000000000);
@@ -2193,8 +2193,8 @@
get_cpu_vsr(xbl, xB(ctx->opcode), false);
exp = tcg_temp_new_i64();
t0 = tcg_temp_new_i64();
- zr = tcg_const_i64(0);
- nan = tcg_const_i64(2047);
+ zr = tcg_constant_i64(0);
+ nan = tcg_constant_i64(2047);
tcg_gen_extract_i64(exp, xbh, 52, 11);
tcg_gen_movi_i64(t0, 0x0010000000000000);
@@ -2449,7 +2449,8 @@
TCGv_i64 conj, disj;
conj = tcg_temp_new_i64();
- disj = tcg_const_i64(0);
+ disj = tcg_temp_new_i64();
+ tcg_gen_movi_i64(disj, 0);
/* Iterate over set bits from the least to the most significant bit */
while (imm) {
@@ -2492,8 +2493,9 @@
int bit;
TCGv_vec disj, conj;
- disj = tcg_const_zeros_vec_matching(t);
conj = tcg_temp_new_vec_matching(t);
+ disj = tcg_temp_new_vec_matching(t);
+ tcg_gen_dupi_vec(vece, disj, 0);
/* Iterate over set bits from the least to the most significant bit */
while (imm) {
@@ -2546,7 +2548,7 @@
/* Equivalent functions that can be implemented with a single gen_gvec */
switch (a->imm) {
- case 0b00000000: /* true */
+ case 0b00000000: /* false */
set_cpu_vsr(a->xt, tcg_constant_i64(0), true);
set_cpu_vsr(a->xt, tcg_constant_i64(0), false);
break;
diff --git a/target/riscv/cpu-param.h b/target/riscv/cpu-param.h
index ebaf26d..b2a9396 100644
--- a/target/riscv/cpu-param.h
+++ b/target/riscv/cpu-param.h
@@ -27,6 +27,5 @@
* - S mode HLV/HLVX/HSV 0b101
* - M mode HLV/HLVX/HSV 0b111
*/
-#define NB_MMU_MODES 8
#endif
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index ab56663..d522efc 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -25,6 +25,7 @@
#include "time_helper.h"
#include "qemu/main-loop.h"
#include "exec/exec-all.h"
+#include "exec/tb-flush.h"
#include "sysemu/cpu-timers.h"
#include "qemu/guest-random.h"
#include "qapi/error.h"
diff --git a/target/riscv/gdbstub.c b/target/riscv/gdbstub.c
index 6048541..840d1ec 100644
--- a/target/riscv/gdbstub.c
+++ b/target/riscv/gdbstub.c
@@ -18,6 +18,7 @@
#include "qemu/osdep.h"
#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
#include "cpu.h"
struct TypeSize {
diff --git a/target/rx/cpu-param.h b/target/rx/cpu-param.h
index b156ad1..521d669 100644
--- a/target/rx/cpu-param.h
+++ b/target/rx/cpu-param.h
@@ -25,6 +25,4 @@
#define TARGET_PHYS_ADDR_SPACE_BITS 32
#define TARGET_VIRT_ADDR_SPACE_BITS 32
-#define NB_MMU_MODES 1
-
#endif
diff --git a/target/rx/gdbstub.c b/target/rx/gdbstub.c
index 7eb2059..d7e0e66 100644
--- a/target/rx/gdbstub.c
+++ b/target/rx/gdbstub.c
@@ -17,7 +17,7 @@
*/
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
int rx_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
{
diff --git a/target/rx/translate.c b/target/rx/translate.c
index 6624414..70fad98 100644
--- a/target/rx/translate.c
+++ b/target/rx/translate.c
@@ -456,7 +456,7 @@
static bool trans_MOV_im(DisasContext *ctx, arg_MOV_im *a)
{
TCGv imm, mem;
- imm = tcg_const_i32(a->imm);
+ imm = tcg_constant_i32(a->imm);
mem = tcg_temp_new();
tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz);
rx_gen_st(a->sz, imm, mem);
@@ -729,8 +729,8 @@
{
TCGv z;
TCGv _imm;
- z = tcg_const_i32(0);
- _imm = tcg_const_i32(imm);
+ z = tcg_constant_i32(0);
+ _imm = tcg_constant_i32(imm);
tcg_gen_movcond_i32(cond, cpu_regs[rd], cpu_psw_z, z,
_imm, cpu_regs[rd]);
}
@@ -815,7 +815,7 @@
static inline void rx_gen_op_irr(op3fn opr, int dst, int src, uint32_t src2)
{
- TCGv imm = tcg_const_i32(src2);
+ TCGv imm = tcg_constant_i32(src2);
opr(cpu_regs[dst], cpu_regs[src], imm);
}
@@ -967,14 +967,13 @@
/* ret = arg1 + arg2 + psw_c */
static void rx_adc(TCGv ret, TCGv arg1, TCGv arg2)
{
- TCGv z;
- z = tcg_const_i32(0);
+ TCGv z = tcg_constant_i32(0);
tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, cpu_psw_c, z);
tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, cpu_psw_s, cpu_psw_c, arg2, z);
- tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
- tcg_gen_xor_i32(z, arg1, arg2);
- tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, z);
+ tcg_gen_xor_i32(cpu_psw_z, arg1, arg2);
+ tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, cpu_psw_z);
+ tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
tcg_gen_mov_i32(ret, cpu_psw_s);
}
@@ -1006,13 +1005,12 @@
/* ret = arg1 + arg2 */
static void rx_add(TCGv ret, TCGv arg1, TCGv arg2)
{
- TCGv z;
- z = tcg_const_i32(0);
+ TCGv z = tcg_constant_i32(0);
tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, arg2, z);
- tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
- tcg_gen_xor_i32(z, arg1, arg2);
- tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, z);
+ tcg_gen_xor_i32(cpu_psw_z, arg1, arg2);
+ tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, cpu_psw_z);
+ tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
tcg_gen_mov_i32(ret, cpu_psw_s);
}
@@ -1042,23 +1040,23 @@
/* ret = arg1 - arg2 */
static void rx_sub(TCGv ret, TCGv arg1, TCGv arg2)
{
- TCGv temp;
tcg_gen_sub_i32(cpu_psw_s, arg1, arg2);
- tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
tcg_gen_setcond_i32(TCG_COND_GEU, cpu_psw_c, arg1, arg2);
tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
- temp = tcg_temp_new_i32();
- tcg_gen_xor_i32(temp, arg1, arg2);
- tcg_gen_and_i32(cpu_psw_o, cpu_psw_o, temp);
+ tcg_gen_xor_i32(cpu_psw_z, arg1, arg2);
+ tcg_gen_and_i32(cpu_psw_o, cpu_psw_o, cpu_psw_z);
+ tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
/* CMP not required return */
if (ret) {
tcg_gen_mov_i32(ret, cpu_psw_s);
}
}
+
static void rx_cmp(TCGv dummy, TCGv arg1, TCGv arg2)
{
rx_sub(NULL, arg1, arg2);
}
+
/* ret = arg1 - arg2 - !psw_c */
/* -> ret = arg1 + ~arg2 + psw_c */
static void rx_sbb(TCGv ret, TCGv arg1, TCGv arg2)
@@ -1126,21 +1124,11 @@
return true;
}
-static void rx_abs(TCGv ret, TCGv arg1)
-{
- TCGv neg;
- TCGv zero;
- neg = tcg_temp_new();
- zero = tcg_const_i32(0);
- tcg_gen_neg_i32(neg, arg1);
- tcg_gen_movcond_i32(TCG_COND_LT, ret, arg1, zero, neg, arg1);
-}
-
/* abs rd */
/* abs rs, rd */
static bool trans_ABS_rr(DisasContext *ctx, arg_ABS_rr *a)
{
- rx_gen_op_rr(rx_abs, a->rd, a->rs);
+ rx_gen_op_rr(tcg_gen_abs_i32, a->rd, a->rs);
return true;
}
@@ -1200,7 +1188,7 @@
/* emul #imm, rd */
static bool trans_EMUL_ir(DisasContext *ctx, arg_EMUL_ir *a)
{
- TCGv imm = tcg_const_i32(a->imm);
+ TCGv imm = tcg_constant_i32(a->imm);
if (a->rd > 14) {
qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
}
@@ -1227,7 +1215,7 @@
/* emulu #imm, rd */
static bool trans_EMULU_ir(DisasContext *ctx, arg_EMULU_ir *a)
{
- TCGv imm = tcg_const_i32(a->imm);
+ TCGv imm = tcg_constant_i32(a->imm);
if (a->rd > 14) {
qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
}
@@ -1325,10 +1313,10 @@
done = gen_new_label();
/* if (cpu_regs[a->rs]) { */
tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[a->rs], 0, noshift);
- count = tcg_const_i32(32);
+ count = tcg_temp_new();
tmp = tcg_temp_new();
tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 31);
- tcg_gen_sub_i32(count, count, tmp);
+ tcg_gen_sub_i32(count, tcg_constant_i32(32), tmp);
tcg_gen_sar_i32(cpu_psw_c, cpu_regs[a->rd], count);
tcg_gen_shl_i32(cpu_regs[a->rd], cpu_regs[a->rd], tmp);
tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, cpu_psw_c, 0);
@@ -1597,7 +1585,7 @@
static inline void rx_save_pc(DisasContext *ctx)
{
- TCGv pc = tcg_const_i32(ctx->base.pc_next);
+ TCGv pc = tcg_constant_i32(ctx->base.pc_next);
push(pc);
}
@@ -1680,7 +1668,7 @@
#define STRING(op) \
do { \
- TCGv size = tcg_const_i32(a->sz); \
+ TCGv size = tcg_constant_i32(a->sz); \
gen_helper_##op(cpu_env, size); \
} while (0)
@@ -1811,7 +1799,7 @@
/* racw #imm */
static bool trans_RACW(DisasContext *ctx, arg_RACW *a)
{
- TCGv imm = tcg_const_i32(a->imm + 1);
+ TCGv imm = tcg_constant_i32(a->imm + 1);
gen_helper_racw(cpu_env, imm);
return true;
}
@@ -1821,7 +1809,7 @@
{
TCGv tmp, z;
tmp = tcg_temp_new();
- z = tcg_const_i32(0);
+ z = tcg_constant_i32(0);
/* S == 1 -> 0xffffffff / S == 0 -> 0x00000000 */
tcg_gen_sari_i32(tmp, cpu_psw_s, 31);
/* S == 1 -> 0x7fffffff / S == 0 -> 0x80000000 */
@@ -1843,7 +1831,7 @@
static bool cat3(trans_, name, _ir)(DisasContext *ctx, \
cat3(arg_, name, _ir) * a) \
{ \
- TCGv imm = tcg_const_i32(li(ctx, 0)); \
+ TCGv imm = tcg_constant_i32(li(ctx, 0)); \
gen_helper_##op(cpu_regs[a->rd], cpu_env, \
cpu_regs[a->rd], imm); \
return true; \
@@ -1877,7 +1865,7 @@
/* fcmp #imm, rd */
static bool trans_FCMP_ir(DisasContext *ctx, arg_FCMP_ir * a)
{
- TCGv imm = tcg_const_i32(li(ctx, 0));
+ TCGv imm = tcg_constant_i32(li(ctx, 0));
gen_helper_fcmp(cpu_env, cpu_regs[a->rd], imm);
return true;
}
@@ -1974,7 +1962,7 @@
{ \
TCGv mask, mem, addr; \
mem = tcg_temp_new(); \
- mask = tcg_const_i32(1 << a->imm); \
+ mask = tcg_constant_i32(1 << a->imm); \
addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
cat3(rx_, op, m)(addr, mask); \
return true; \
@@ -1983,7 +1971,7 @@
cat3(arg_, name, _ir) * a) \
{ \
TCGv mask; \
- mask = tcg_const_i32(1 << a->imm); \
+ mask = tcg_constant_i32(1 << a->imm); \
cat3(rx_, op, r)(cpu_regs[a->rd], mask); \
return true; \
} \
@@ -1991,10 +1979,10 @@
cat3(arg_, name, _rr) * a) \
{ \
TCGv mask, b; \
- mask = tcg_const_i32(1); \
+ mask = tcg_temp_new(); \
b = tcg_temp_new(); \
tcg_gen_andi_i32(b, cpu_regs[a->rs], 31); \
- tcg_gen_shl_i32(mask, mask, b); \
+ tcg_gen_shl_i32(mask, tcg_constant_i32(1), b); \
cat3(rx_, op, r)(cpu_regs[a->rd], mask); \
return true; \
} \
@@ -2002,10 +1990,10 @@
cat3(arg_, name, _rm) * a) \
{ \
TCGv mask, mem, addr, b; \
- mask = tcg_const_i32(1); \
+ mask = tcg_temp_new(); \
b = tcg_temp_new(); \
tcg_gen_andi_i32(b, cpu_regs[a->rd], 7); \
- tcg_gen_shl_i32(mask, mask, b); \
+ tcg_gen_shl_i32(mask, tcg_constant_i32(1), b); \
mem = tcg_temp_new(); \
addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
cat3(rx_, op, m)(addr, mask); \
@@ -2128,7 +2116,7 @@
{
TCGv imm;
- imm = tcg_const_i32(a->imm);
+ imm = tcg_constant_i32(a->imm);
move_to_cr(ctx, imm, a->cr);
return true;
}
@@ -2190,7 +2178,7 @@
TCGv vec;
tcg_debug_assert(a->imm < 0x100);
- vec = tcg_const_i32(a->imm);
+ vec = tcg_constant_i32(a->imm);
tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
gen_helper_rxint(cpu_env, vec);
ctx->base.is_jmp = DISAS_NORETURN;
diff --git a/target/s390x/cpu-param.h b/target/s390x/cpu-param.h
index bf951a0..84ca086 100644
--- a/target/s390x/cpu-param.h
+++ b/target/s390x/cpu-param.h
@@ -12,6 +12,5 @@
#define TARGET_PAGE_BITS 12
#define TARGET_PHYS_ADDR_SPACE_BITS 64
#define TARGET_VIRT_ADDR_SPACE_BITS 64
-#define NB_MMU_MODES 4
#endif
diff --git a/target/s390x/cpu-sysemu.c b/target/s390x/cpu-sysemu.c
index 948e4bd..97d6c76 100644
--- a/target/s390x/cpu-sysemu.c
+++ b/target/s390x/cpu-sysemu.c
@@ -21,6 +21,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/error-report.h"
#include "qapi/error.h"
#include "cpu.h"
#include "s390x-internal.h"
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
index b10a854..40fdeaa 100644
--- a/target/s390x/cpu.c
+++ b/target/s390x/cpu.c
@@ -41,6 +41,26 @@
#define CR0_RESET 0xE0UL
#define CR14_RESET 0xC2000000UL;
+#ifndef CONFIG_USER_ONLY
+static bool is_early_exception_psw(uint64_t mask, uint64_t addr)
+{
+ if (mask & PSW_MASK_RESERVED) {
+ return true;
+ }
+
+ switch (mask & (PSW_MASK_32 | PSW_MASK_64)) {
+ case 0:
+ return addr & ~0xffffffULL;
+ case PSW_MASK_32:
+ return addr & ~0x7fffffffULL;
+ case PSW_MASK_32 | PSW_MASK_64:
+ return false;
+ default: /* PSW_MASK_64 */
+ return true;
+ }
+}
+#endif
+
void s390_cpu_set_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
{
#ifndef CONFIG_USER_ONLY
@@ -57,6 +77,12 @@
env->cc_op = (mask >> 44) & 3;
#ifndef CONFIG_USER_ONLY
+ if (is_early_exception_psw(mask, addr)) {
+ env->int_pgm_ilen = 0;
+ trigger_pgm_exception(env, PGM_SPECIFICATION);
+ return;
+ }
+
if ((old_mask ^ mask) & PSW_MASK_PER) {
s390_cpu_recompute_watchpoints(env_cpu(env));
}
diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h
index 7d6d013..c47e7ad 100644
--- a/target/s390x/cpu.h
+++ b/target/s390x/cpu.h
@@ -29,6 +29,7 @@
#include "cpu_models.h"
#include "exec/cpu-defs.h"
#include "qemu/cpu-float.h"
+#include "tcg/tcg_s390x.h"
#define ELF_MACHINE_UNAME "S390X"
@@ -87,6 +88,7 @@
uint64_t cc_vr;
uint64_t ex_value;
+ uint64_t ex_target;
uint64_t __excp_addr;
uint64_t psa;
@@ -292,6 +294,7 @@
#define PSW_MASK_32 0x0000000080000000ULL
#define PSW_MASK_SHORT_ADDR 0x000000007fffffffULL
#define PSW_MASK_SHORT_CTRL 0xffffffff80000000ULL
+#define PSW_MASK_RESERVED 0xb80800fe7fffffffULL
#undef PSW_ASC_PRIMARY
#undef PSW_ASC_ACCREG
@@ -381,6 +384,14 @@
static inline void cpu_get_tb_cpu_state(CPUS390XState* env, target_ulong *pc,
target_ulong *cs_base, uint32_t *flags)
{
+ if (env->psw.addr & 1) {
+ /*
+ * Instructions must be at even addresses.
+ * This needs to be checked before address translation.
+ */
+ env->int_pgm_ilen = 2; /* see s390_cpu_tlb_fill() */
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, 0);
+ }
*pc = env->psw.addr;
*cs_base = env->ex_value;
*flags = (env->psw.mask >> FLAG_MASK_PSW_SHIFT) & FLAG_MASK_PSW;
diff --git a/target/s390x/cpu_models.c b/target/s390x/cpu_models.c
index 065ec6d..457b5cb 100644
--- a/target/s390x/cpu_models.c
+++ b/target/s390x/cpu_models.c
@@ -17,6 +17,7 @@
#include "sysemu/kvm.h"
#include "sysemu/tcg.h"
#include "qapi/error.h"
+#include "qemu/error-report.h"
#include "qapi/visitor.h"
#include "qemu/module.h"
#include "qemu/hw-version.h"
diff --git a/target/s390x/diag.c b/target/s390x/diag.c
index 76b01dc..e5f0df1 100644
--- a/target/s390x/diag.c
+++ b/target/s390x/diag.c
@@ -22,6 +22,8 @@
#include "hw/s390x/pv.h"
#include "sysemu/kvm.h"
#include "kvm/kvm_s390x.h"
+#include "qemu/error-report.h"
+
int handle_diag_288(CPUS390XState *env, uint64_t r1, uint64_t r3)
{
diff --git a/target/s390x/gdbstub.c b/target/s390x/gdbstub.c
index a5d69d0..0cb6939 100644
--- a/target/s390x/gdbstub.c
+++ b/target/s390x/gdbstub.c
@@ -23,6 +23,7 @@
#include "s390x-internal.h"
#include "exec/exec-all.h"
#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
#include "qemu/bitops.h"
#include "sysemu/hw_accel.h"
#include "sysemu/tcg.h"
diff --git a/target/s390x/helper.c b/target/s390x/helper.c
index 473c8e5..2b363aa 100644
--- a/target/s390x/helper.c
+++ b/target/s390x/helper.c
@@ -21,7 +21,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "s390x-internal.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
#include "qemu/timer.h"
#include "hw/s390x/ioinst.h"
#include "hw/s390x/pv.h"
diff --git a/target/s390x/tcg/excp_helper.c b/target/s390x/tcg/excp_helper.c
index bc767f0..228aa9f 100644
--- a/target/s390x/tcg/excp_helper.c
+++ b/target/s390x/tcg/excp_helper.c
@@ -85,8 +85,8 @@
/*
* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
- * this is only for the atomic operations, for which we want to raise a
- * specification exception.
+ * this is only for the atomic and relative long operations, for which we want
+ * to raise a specification exception.
*/
static G_NORETURN
void do_unaligned_access(CPUState *cs, uintptr_t retaddr)
@@ -212,7 +212,8 @@
LowCore *lowcore;
int ilen = env->int_pgm_ilen;
- assert(ilen == 2 || ilen == 4 || ilen == 6);
+ assert((env->int_pgm_code == PGM_SPECIFICATION && ilen == 0) ||
+ ilen == 2 || ilen == 4 || ilen == 6);
switch (env->int_pgm_code) {
case PGM_PER:
diff --git a/target/s390x/tcg/insn-data.h.inc b/target/s390x/tcg/insn-data.h.inc
index 9d2d35f..597d968 100644
--- a/target/s390x/tcg/insn-data.h.inc
+++ b/target/s390x/tcg/insn-data.h.inc
@@ -199,8 +199,8 @@
C(0xe55c, CHSI, SIL, GIE, m1_32s, i2, 0, 0, 0, cmps64)
C(0xe558, CGHSI, SIL, GIE, m1_64, i2, 0, 0, 0, cmps64)
/* COMPARE HALFWORD RELATIVE LONG */
- C(0xc605, CHRL, RIL_b, GIE, r1_o, mri2_32s, 0, 0, 0, cmps32)
- C(0xc604, CGHRL, RIL_b, GIE, r1_o, mri2_64, 0, 0, 0, cmps64)
+ C(0xc605, CHRL, RIL_b, GIE, r1_o, mri2_16s, 0, 0, 0, cmps32)
+ C(0xc604, CGHRL, RIL_b, GIE, r1_o, mri2_16s, 0, 0, 0, cmps64)
/* COMPARE HIGH */
C(0xb9cd, CHHR, RRE, HW, r1_sr32, r2_sr32, 0, 0, 0, cmps32)
C(0xb9dd, CHLR, RRE, HW, r1_sr32, r2_o, 0, 0, 0, cmps32)
@@ -410,12 +410,12 @@
/* LOAD */
C(0x1800, LR, RR_a, Z, 0, r2_o, 0, cond_r1r2_32, mov2, 0)
- C(0x5800, L, RX_a, Z, 0, a2, new, r1_32, ld32s, 0)
- C(0xe358, LY, RXY_a, LD, 0, a2, new, r1_32, ld32s, 0)
+ D(0x5800, L, RX_a, Z, 0, a2, new, r1_32, ld32s, 0, 0)
+ D(0xe358, LY, RXY_a, LD, 0, a2, new, r1_32, ld32s, 0, 0)
C(0xb904, LGR, RRE, Z, 0, r2_o, 0, r1, mov2, 0)
C(0xb914, LGFR, RRE, Z, 0, r2_32s, 0, r1, mov2, 0)
- C(0xe304, LG, RXY_a, Z, 0, a2, r1, 0, ld64, 0)
- C(0xe314, LGF, RXY_a, Z, 0, a2, r1, 0, ld32s, 0)
+ D(0xe304, LG, RXY_a, Z, 0, a2, r1, 0, ld64, 0, 0)
+ D(0xe314, LGF, RXY_a, Z, 0, a2, r1, 0, ld32s, 0, 0)
F(0x2800, LDR, RR_a, Z, 0, f2, 0, f1, mov2, 0, IF_AFP1 | IF_AFP2)
F(0x6800, LD, RX_a, Z, 0, m2_64, 0, f1, mov2, 0, IF_AFP1)
F(0xed65, LDY, RXY_a, LD, 0, m2_64, 0, f1, mov2, 0, IF_AFP1)
@@ -426,9 +426,9 @@
/* LOAD IMMEDIATE */
C(0xc001, LGFI, RIL_a, EI, 0, i2, 0, r1, mov2, 0)
/* LOAD RELATIVE LONG */
- C(0xc40d, LRL, RIL_b, GIE, 0, ri2, new, r1_32, ld32s, 0)
- C(0xc408, LGRL, RIL_b, GIE, 0, ri2, r1, 0, ld64, 0)
- C(0xc40c, LGFRL, RIL_b, GIE, 0, ri2, r1, 0, ld32s, 0)
+ D(0xc40d, LRL, RIL_b, GIE, 0, ri2, new, r1_32, ld32s, 0, MO_ALIGN)
+ D(0xc408, LGRL, RIL_b, GIE, 0, ri2, r1, 0, ld64, 0, MO_ALIGN)
+ D(0xc40c, LGFRL, RIL_b, GIE, 0, ri2, r1, 0, ld32s, 0, MO_ALIGN)
/* LOAD ADDRESS */
C(0x4100, LA, RX_a, Z, 0, a2, 0, r1, mov2, 0)
C(0xe371, LAY, RXY_a, LD, 0, a2, 0, r1, mov2, 0)
@@ -456,9 +456,9 @@
C(0x1200, LTR, RR_a, Z, 0, r2_o, 0, cond_r1r2_32, mov2, s32)
C(0xb902, LTGR, RRE, Z, 0, r2_o, 0, r1, mov2, s64)
C(0xb912, LTGFR, RRE, Z, 0, r2_32s, 0, r1, mov2, s64)
- C(0xe312, LT, RXY_a, EI, 0, a2, new, r1_32, ld32s, s64)
- C(0xe302, LTG, RXY_a, EI, 0, a2, r1, 0, ld64, s64)
- C(0xe332, LTGF, RXY_a, GIE, 0, a2, r1, 0, ld32s, s64)
+ D(0xe312, LT, RXY_a, EI, 0, a2, new, r1_32, ld32s, s64, 0)
+ D(0xe302, LTG, RXY_a, EI, 0, a2, r1, 0, ld64, s64, 0)
+ D(0xe332, LTGF, RXY_a, GIE, 0, a2, r1, 0, ld32s, s64, 0)
F(0xb302, LTEBR, RRE, Z, 0, e2, 0, cond_e1e2, mov2, f32, IF_BFP)
F(0xb312, LTDBR, RRE, Z, 0, f2, 0, f1, mov2, f64, IF_BFP)
F(0xb342, LTXBR, RRE, Z, x2h, x2l, 0, x1_P, movx, f128, IF_BFP)
@@ -502,16 +502,16 @@
C(0xc405, LHRL, RIL_b, GIE, 0, ri2, new, r1_32, ld16s, 0)
C(0xc404, LGHRL, RIL_b, GIE, 0, ri2, r1, 0, ld16s, 0)
/* LOAD HIGH */
- C(0xe3ca, LFH, RXY_a, HW, 0, a2, new, r1_32h, ld32u, 0)
+ D(0xe3ca, LFH, RXY_a, HW, 0, a2, new, r1_32h, ld32u, 0, 0)
/* LOAG HIGH AND TRAP */
C(0xe3c8, LFHAT, RXY_a, LAT, 0, m2_32u, r1, 0, lfhat, 0)
/* LOAD LOGICAL */
C(0xb916, LLGFR, RRE, Z, 0, r2_32u, 0, r1, mov2, 0)
- C(0xe316, LLGF, RXY_a, Z, 0, a2, r1, 0, ld32u, 0)
+ D(0xe316, LLGF, RXY_a, Z, 0, a2, r1, 0, ld32u, 0, 0)
/* LOAD LOGICAL AND TRAP */
C(0xe39d, LLGFAT, RXY_a, LAT, 0, a2, r1, 0, llgfat, 0)
/* LOAD LOGICAL RELATIVE LONG */
- C(0xc40e, LLGFRL, RIL_b, GIE, 0, ri2, r1, 0, ld32u, 0)
+ D(0xc40e, LLGFRL, RIL_b, GIE, 0, ri2, r1, 0, ld32u, 0, MO_ALIGN)
/* LOAD LOGICAL CHARACTER */
C(0xb994, LLCR, RRE, EI, 0, r2_8u, 0, r1_32, mov2, 0)
C(0xb984, LLGCR, RRE, EI, 0, r2_8u, 0, r1, mov2, 0)
@@ -840,16 +840,16 @@
F(0xed15, SQDB, RXE, Z, 0, m2_64, new, f1, sqdb, 0, IF_BFP)
/* STORE */
- C(0x5000, ST, RX_a, Z, r1_o, a2, 0, 0, st32, 0)
- C(0xe350, STY, RXY_a, LD, r1_o, a2, 0, 0, st32, 0)
- C(0xe324, STG, RXY_a, Z, r1_o, a2, 0, 0, st64, 0)
- F(0x6000, STD, RX_a, Z, f1, a2, 0, 0, st64, 0, IF_AFP1)
- F(0xed67, STDY, RXY_a, LD, f1, a2, 0, 0, st64, 0, IF_AFP1)
- F(0x7000, STE, RX_a, Z, e1, a2, 0, 0, st32, 0, IF_AFP1)
- F(0xed66, STEY, RXY_a, LD, e1, a2, 0, 0, st32, 0, IF_AFP1)
+ D(0x5000, ST, RX_a, Z, r1_o, a2, 0, 0, st32, 0, 0)
+ D(0xe350, STY, RXY_a, LD, r1_o, a2, 0, 0, st32, 0, 0)
+ D(0xe324, STG, RXY_a, Z, r1_o, a2, 0, 0, st64, 0, 0)
+ E(0x6000, STD, RX_a, Z, f1, a2, 0, 0, st64, 0, 0, IF_AFP1)
+ E(0xed67, STDY, RXY_a, LD, f1, a2, 0, 0, st64, 0, 0, IF_AFP1)
+ E(0x7000, STE, RX_a, Z, e1, a2, 0, 0, st32, 0, 0, IF_AFP1)
+ E(0xed66, STEY, RXY_a, LD, e1, a2, 0, 0, st32, 0, 0, IF_AFP1)
/* STORE RELATIVE LONG */
- C(0xc40f, STRL, RIL_b, GIE, r1_o, ri2, 0, 0, st32, 0)
- C(0xc40b, STGRL, RIL_b, GIE, r1_o, ri2, 0, 0, st64, 0)
+ D(0xc40f, STRL, RIL_b, GIE, r1_o, ri2, 0, 0, st32, 0, MO_ALIGN)
+ D(0xc40b, STGRL, RIL_b, GIE, r1_o, ri2, 0, 0, st64, 0, MO_ALIGN)
/* STORE CHARACTER */
C(0x4200, STC, RX_a, Z, r1_o, a2, 0, 0, st8, 0)
C(0xe372, STCY, RXY_a, LD, r1_o, a2, 0, 0, st8, 0)
@@ -867,7 +867,7 @@
/* STORE HALFWORD RELATIVE LONG */
C(0xc407, STHRL, RIL_b, GIE, r1_o, ri2, 0, 0, st16, 0)
/* STORE HIGH */
- C(0xe3cb, STFH, RXY_a, HW, r1_sr32, a2, 0, 0, st32, 0)
+ D(0xe3cb, STFH, RXY_a, HW, r1_sr32, a2, 0, 0, st32, 0, 0)
/* STORE ON CONDITION */
D(0xebf3, STOC, RSY_b, LOC, 0, 0, 0, 0, soc, 0, 0)
D(0xebe3, STOCG, RSY_b, LOC, 0, 0, 0, 0, soc, 0, 1)
diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c
index 6835c26..b93dbd3 100644
--- a/target/s390x/tcg/mem_helper.c
+++ b/target/s390x/tcg/mem_helper.c
@@ -149,7 +149,6 @@
nonfault, phost, ra);
if (unlikely(flags & TLB_INVALID_MASK)) {
- assert(!nonfault);
#ifdef CONFIG_USER_ONLY
/* Address is in TEC in system mode; see s390_cpu_record_sigsegv. */
env->__excp_addr = addr & TARGET_PAGE_MASK;
@@ -2468,8 +2467,16 @@
*/
void HELPER(ex)(CPUS390XState *env, uint32_t ilen, uint64_t r1, uint64_t addr)
{
- uint64_t insn = cpu_lduw_code(env, addr);
- uint8_t opc = insn >> 8;
+ uint64_t insn;
+ uint8_t opc;
+
+ /* EXECUTE targets must be at even addresses. */
+ if (addr & 1) {
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
+ }
+
+ insn = cpu_lduw_code(env, addr);
+ opc = insn >> 8;
/* Or in the contents of R1[56:63]. */
insn |= r1 & 0xff;
@@ -2530,6 +2537,7 @@
that ex_value is non-zero, which flags that we are in a state
that requires such execution. */
env->ex_value = insn | ilen;
+ env->ex_target = addr;
}
uint32_t HELPER(mvcos)(CPUS390XState *env, uint64_t dest, uint64_t src,
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
index 21a57d5..2d9b4bb 100644
--- a/target/s390x/tcg/translate.c
+++ b/target/s390x/tcg/translate.c
@@ -156,8 +156,6 @@
typedef struct {
TCGCond cond:8;
bool is_64;
- bool g1;
- bool g2;
union {
struct { TCGv_i64 a, b; } s64;
struct { TCGv_i32 a, b; } s32;
@@ -308,8 +306,6 @@
TCGv_i128 r = tcg_temp_new_i128();
tcg_gen_concat_i64_i128(r, l, h);
- tcg_temp_free_i64(h);
- tcg_temp_free_i64(l);
return r;
}
@@ -722,7 +718,6 @@
c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
c->u.s32.a = cc_op;
c->u.s32.b = cc_op;
- c->g1 = c->g2 = true;
c->is_64 = false;
return;
}
@@ -839,7 +834,6 @@
/* Load up the arguments of the comparison. */
c->is_64 = true;
- c->g1 = c->g2 = false;
switch (old_cc_op) {
case CC_OP_LTGT0_32:
c->is_64 = false;
@@ -861,13 +855,11 @@
case CC_OP_FLOGR:
c->u.s64.a = cc_dst;
c->u.s64.b = tcg_constant_i64(0);
- c->g1 = true;
break;
case CC_OP_LTGT_64:
case CC_OP_LTUGTU_64:
c->u.s64.a = cc_src;
c->u.s64.b = cc_dst;
- c->g1 = c->g2 = true;
break;
case CC_OP_TM_32:
@@ -882,7 +874,6 @@
case CC_OP_SUBU:
c->is_64 = true;
c->u.s64.b = tcg_constant_i64(0);
- c->g1 = true;
switch (mask) {
case 8 | 2:
case 4 | 1: /* result */
@@ -900,7 +891,6 @@
case CC_OP_STATIC:
c->is_64 = false;
c->u.s32.a = cc_op;
- c->g1 = true;
switch (mask) {
case 0x8 | 0x4 | 0x2: /* cc != 3 */
cond = TCG_COND_NE;
@@ -916,7 +906,6 @@
break;
case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
cond = TCG_COND_EQ;
- c->g1 = false;
c->u.s32.a = tcg_temp_new_i32();
c->u.s32.b = tcg_constant_i32(0);
tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
@@ -935,7 +924,6 @@
break;
case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
cond = TCG_COND_NE;
- c->g1 = false;
c->u.s32.a = tcg_temp_new_i32();
c->u.s32.b = tcg_constant_i32(0);
tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
@@ -959,7 +947,6 @@
default:
/* CC is masked by something else: (8 >> cc) & mask. */
cond = TCG_COND_NE;
- c->g1 = false;
c->u.s32.a = tcg_temp_new_i32();
c->u.s32.b = tcg_constant_i32(0);
tcg_gen_shr_i32(c->u.s32.a, tcg_constant_i32(8), cc_op);
@@ -974,24 +961,6 @@
c->cond = cond;
}
-static void free_compare(DisasCompare *c)
-{
- if (!c->g1) {
- if (c->is_64) {
- tcg_temp_free_i64(c->u.s64.a);
- } else {
- tcg_temp_free_i32(c->u.s32.a);
- }
- }
- if (!c->g2) {
- if (c->is_64) {
- tcg_temp_free_i64(c->u.s64.b);
- } else {
- tcg_temp_free_i32(c->u.s32.b);
- }
- }
-}
-
/* ====================================================================== */
/* Define the insn format enumeration. */
#define F0(N) FMT_##N,
@@ -1092,7 +1061,6 @@
them, and store them back. See the "in1", "in2", "prep", "wout" sets
of routines below for more details. */
typedef struct {
- bool g_out, g_out2, g_in1, g_in2;
TCGv_i64 out, out2, in1, in2;
TCGv_i64 addr1;
TCGv_i128 out_128, in1_128, in2_128;
@@ -1292,17 +1260,14 @@
TCGv_i64 z = tcg_constant_i64(0);
tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
tcg_gen_extu_i32_i64(t1, t0);
- tcg_temp_free_i32(t0);
tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
per_branch_cond(s, TCG_COND_NE, t1, z);
- tcg_temp_free_i64(t1);
}
ret = DISAS_PC_UPDATED;
}
egress:
- free_compare(c);
return ret;
}
@@ -1462,11 +1427,11 @@
int shift = s->insn->data & 0xff;
int size = s->insn->data >> 8;
uint64_t mask = ((1ull << size) - 1) << shift;
+ TCGv_i64 t = tcg_temp_new_i64();
- assert(!o->g_in2);
- tcg_gen_shli_i64(o->in2, o->in2, shift);
- tcg_gen_ori_i64(o->in2, o->in2, ~mask);
- tcg_gen_and_i64(o->out, o->in1, o->in2);
+ tcg_gen_shli_i64(t, o->in2, shift);
+ tcg_gen_ori_i64(t, t, ~mask);
+ tcg_gen_and_i64(o->out, o->in1, t);
/* Produce the CC from only the bits manipulated. */
tcg_gen_andi_i64(cc_dst, o->out, mask);
@@ -1555,7 +1520,6 @@
tcg_gen_extu_i32_i64(t, cc_op);
tcg_gen_shli_i64(t, t, 28);
tcg_gen_or_i64(o->out, o->out, t);
- tcg_temp_free_i64(t);
}
static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
@@ -1612,8 +1576,6 @@
c.cond = TCG_COND_NE;
c.is_64 = false;
- c.g1 = false;
- c.g2 = false;
t = tcg_temp_new_i64();
tcg_gen_subi_i64(t, regs[r1], 1);
@@ -1621,7 +1583,6 @@
c.u.s32.a = tcg_temp_new_i32();
c.u.s32.b = tcg_constant_i32(0);
tcg_gen_extrl_i64_i32(c.u.s32.a, t);
- tcg_temp_free_i64(t);
return help_branch(s, &c, is_imm, imm, o->in2);
}
@@ -1635,8 +1596,6 @@
c.cond = TCG_COND_NE;
c.is_64 = false;
- c.g1 = false;
- c.g2 = false;
t = tcg_temp_new_i64();
tcg_gen_shri_i64(t, regs[r1], 32);
@@ -1645,7 +1604,6 @@
c.u.s32.a = tcg_temp_new_i32();
c.u.s32.b = tcg_constant_i32(0);
tcg_gen_extrl_i64_i32(c.u.s32.a, t);
- tcg_temp_free_i64(t);
return help_branch(s, &c, 1, imm, o->in2);
}
@@ -1659,8 +1617,6 @@
c.cond = TCG_COND_NE;
c.is_64 = true;
- c.g1 = true;
- c.g2 = false;
tcg_gen_subi_i64(regs[r1], regs[r1], 1);
c.u.s64.a = regs[r1];
@@ -1680,8 +1636,6 @@
c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
c.is_64 = false;
- c.g1 = false;
- c.g2 = false;
t = tcg_temp_new_i64();
tcg_gen_add_i64(t, regs[r1], regs[r3]);
@@ -1690,7 +1644,6 @@
tcg_gen_extrl_i64_i32(c.u.s32.a, t);
tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
store_reg32_i64(r1, t);
- tcg_temp_free_i64(t);
return help_branch(s, &c, is_imm, imm, o->in2);
}
@@ -1708,15 +1661,12 @@
if (r1 == (r3 | 1)) {
c.u.s64.b = load_reg(r3 | 1);
- c.g2 = false;
} else {
c.u.s64.b = regs[r3 | 1];
- c.g2 = true;
}
tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
c.u.s64.a = regs[r1];
- c.g1 = true;
return help_branch(s, &c, is_imm, imm, o->in2);
}
@@ -1731,7 +1681,7 @@
if (s->insn->data) {
c.cond = tcg_unsigned_cond(c.cond);
}
- c.is_64 = c.g1 = c.g2 = true;
+ c.is_64 = true;
c.u.s64.a = o->in1;
c.u.s64.b = o->in2;
@@ -2012,11 +1962,9 @@
gen_helper_cksm(pair, cpu_env, o->in1, o->in2, regs[r2 + 1]);
set_cc_static(s);
tcg_gen_extr_i128_i64(o->out, len, pair);
- tcg_temp_free_i128(pair);
tcg_gen_add_i64(regs[r2], regs[r2], len);
tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
- tcg_temp_free_i64(len);
return DISAS_NEXT;
}
@@ -2118,7 +2066,6 @@
tcg_gen_extrl_i64_i32(t1, o->in1);
gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
set_cc_static(s);
- tcg_temp_free_i32(t1);
return DISAS_NEXT;
}
@@ -2128,7 +2075,6 @@
gen_helper_clst(pair, cpu_env, regs[0], o->in1, o->in2);
tcg_gen_extr_i128_i64(o->in2, o->in1, pair);
- tcg_temp_free_i128(pair);
set_cc_static(s);
return DISAS_NEXT;
@@ -2140,7 +2086,6 @@
tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
tcg_gen_or_i64(o->out, o->out, t);
- tcg_temp_free_i64(t);
return DISAS_NEXT;
}
@@ -2156,14 +2101,12 @@
addr = get_address(s, 0, b2, d2);
tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
get_mem_index(s), s->insn->data | MO_ALIGN);
- tcg_temp_free_i64(addr);
/* Are the memory and expected values (un)equal? Note that this setcond
produces the output CC value, thus the NE sense of the test. */
cc = tcg_temp_new_i64();
tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
tcg_gen_extrl_i64_i32(cc_op, cc);
- tcg_temp_free_i64(cc);
set_cc_static(s);
return DISAS_NEXT;
@@ -2223,7 +2166,6 @@
tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
get_mem_index(s), mop | MO_ALIGN);
- tcg_temp_free_i64(addr);
/* Are the memory and expected values (un)equal? */
cc = tcg_temp_new_i64();
@@ -2237,14 +2179,12 @@
} else {
tcg_gen_mov_i64(o->out, old);
}
- tcg_temp_free_i64(old);
/* If the comparison was equal, and the LSB of R2 was set,
then we need to flush the TLB (for all cpus). */
tcg_gen_xori_i64(cc, cc, 1);
tcg_gen_and_i64(cc, cc, o->in2);
tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
- tcg_temp_free_i64(cc);
gen_helper_purge(cpu_env);
gen_set_label(lab);
@@ -2259,9 +2199,7 @@
TCGv_i32 t2 = tcg_temp_new_i32();
tcg_gen_extrl_i64_i32(t2, o->in1);
gen_helper_cvd(t1, t2);
- tcg_temp_free_i32(t2);
tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
- tcg_temp_free_i64(t1);
return DISAS_NEXT;
}
@@ -2363,7 +2301,6 @@
gen_helper_divs64(t, cpu_env, o->in1, o->in2);
tcg_gen_extr_i128_i64(o->out2, o->out, t);
- tcg_temp_free_i128(t);
return DISAS_NEXT;
}
@@ -2373,7 +2310,6 @@
gen_helper_divu64(t, cpu_env, o->out, o->out2, o->in2);
tcg_gen_extr_i128_i64(o->out2, o->out, t);
- tcg_temp_free_i128(t);
return DISAS_NEXT;
}
@@ -2428,8 +2364,6 @@
if (r2 != 0) {
store_reg32_i64(r2, psw_mask);
}
-
- tcg_temp_free_i64(t);
return DISAS_NEXT;
}
@@ -2569,7 +2503,6 @@
tcg_gen_movi_i64(tmp, ccm);
gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -2592,8 +2525,6 @@
tcg_gen_extu_i32_i64(t2, cc_op);
tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t2);
return DISAS_NEXT;
}
@@ -2839,19 +2770,22 @@
static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
{
- tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
+ tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
+ MO_TESL | s->insn->data);
return DISAS_NEXT;
}
static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
{
- tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
+ tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
+ MO_TEUL | s->insn->data);
return DISAS_NEXT;
}
static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
{
- tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
+ tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s),
+ MO_TEUQ | s->insn->data);
return DISAS_NEXT;
}
@@ -2925,21 +2859,17 @@
if (c.is_64) {
tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
o->in2, o->in1);
- free_compare(&c);
} else {
TCGv_i32 t32 = tcg_temp_new_i32();
TCGv_i64 t, z;
tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
- free_compare(&c);
t = tcg_temp_new_i64();
tcg_gen_extu_i32_i64(t, t32);
- tcg_temp_free_i32(t32);
z = tcg_constant_i64(0);
tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
- tcg_temp_free_i64(t);
}
return DISAS_NEXT;
@@ -2983,21 +2913,21 @@
static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
{
- TCGv_i64 t1, t2;
+ TCGv_i64 mask, addr;
per_breaking_event(s);
- t1 = tcg_temp_new_i64();
- t2 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
- MO_TEUL | MO_ALIGN_8);
- tcg_gen_addi_i64(o->in2, o->in2, 4);
- tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
- /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
- tcg_gen_shli_i64(t1, t1, 32);
- gen_helper_load_psw(cpu_env, t1, t2);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t2);
+ /*
+ * Convert the short PSW into the normal PSW, similar to what
+ * s390_cpu_load_normal() does.
+ */
+ mask = tcg_temp_new_i64();
+ addr = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(mask, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN_8);
+ tcg_gen_andi_i64(addr, mask, PSW_MASK_SHORT_ADDR);
+ tcg_gen_andi_i64(mask, mask, PSW_MASK_SHORT_CTRL);
+ tcg_gen_xori_i64(mask, mask, PSW_MASK_SHORTPSW);
+ gen_helper_load_psw(cpu_env, mask, addr);
return DISAS_NORETURN;
}
@@ -3014,8 +2944,6 @@
tcg_gen_addi_i64(o->in2, o->in2, 8);
tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
gen_helper_load_psw(cpu_env, t1, t2);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t2);
return DISAS_NORETURN;
}
#endif
@@ -3040,7 +2968,6 @@
if (unlikely(r1 == r3)) {
tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
store_reg32_i64(r1, t1);
- tcg_temp_free(t1);
return DISAS_NEXT;
}
@@ -3055,8 +2982,6 @@
/* Only two registers to read. */
if (((r1 + 1) & 15) == r3) {
- tcg_temp_free(t2);
- tcg_temp_free(t1);
return DISAS_NEXT;
}
@@ -3069,9 +2994,6 @@
tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
store_reg32_i64(r1, t1);
}
- tcg_temp_free(t2);
- tcg_temp_free(t1);
-
return DISAS_NEXT;
}
@@ -3086,7 +3008,6 @@
if (unlikely(r1 == r3)) {
tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
store_reg32h_i64(r1, t1);
- tcg_temp_free(t1);
return DISAS_NEXT;
}
@@ -3101,8 +3022,6 @@
/* Only two registers to read. */
if (((r1 + 1) & 15) == r3) {
- tcg_temp_free(t2);
- tcg_temp_free(t1);
return DISAS_NEXT;
}
@@ -3115,9 +3034,6 @@
tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
store_reg32h_i64(r1, t1);
}
- tcg_temp_free(t2);
- tcg_temp_free(t1);
-
return DISAS_NEXT;
}
@@ -3141,11 +3057,9 @@
tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
tcg_gen_mov_i64(regs[r1], t1);
- tcg_temp_free(t2);
/* Only two registers to read. */
if (((r1 + 1) & 15) == r3) {
- tcg_temp_free(t1);
return DISAS_NEXT;
}
@@ -3157,8 +3071,6 @@
tcg_gen_add_i64(o->in2, o->in2, t1);
tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
}
- tcg_temp_free(t1);
-
return DISAS_NEXT;
}
@@ -3180,8 +3092,6 @@
a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
- tcg_temp_free_i64(a1);
- tcg_temp_free_i64(a2);
/* ... and indicate that we performed them while interlocked. */
gen_op_movi_cc(s, 0);
@@ -3253,9 +3163,7 @@
static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
{
o->out = o->in2;
- o->g_out = o->g_in2;
o->in2 = NULL;
- o->g_in2 = false;
return DISAS_NEXT;
}
@@ -3265,9 +3173,7 @@
TCGv ar1 = tcg_temp_new_i64();
o->out = o->in2;
- o->g_out = o->g_in2;
o->in2 = NULL;
- o->g_in2 = false;
switch (s->base.tb->flags & FLAG_MASK_ASC) {
case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
@@ -3289,8 +3195,6 @@
}
tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
- tcg_temp_free_i64(ar1);
-
return DISAS_NEXT;
}
@@ -3298,11 +3202,8 @@
{
o->out = o->in1;
o->out2 = o->in2;
- o->g_out = o->g_in1;
- o->g_out2 = o->g_in2;
o->in1 = NULL;
o->in2 = NULL;
- o->g_in1 = o->g_in2 = false;
return DISAS_NEXT;
}
@@ -3509,7 +3410,6 @@
{
TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
- tcg_temp_free_i64(r3);
return DISAS_NEXT;
}
@@ -3517,7 +3417,6 @@
{
TCGv_i64 r3 = load_freg(get_field(s, r3));
gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
- tcg_temp_free_i64(r3);
return DISAS_NEXT;
}
@@ -3525,7 +3424,6 @@
{
TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
- tcg_temp_free_i64(r3);
return DISAS_NEXT;
}
@@ -3533,7 +3431,6 @@
{
TCGv_i64 r3 = load_freg(get_field(s, r3));
gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
- tcg_temp_free_i64(r3);
return DISAS_NEXT;
}
@@ -3544,7 +3441,6 @@
tcg_gen_neg_i64(n, o->in2);
tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
- tcg_temp_free_i64(n);
return DISAS_NEXT;
}
@@ -3621,10 +3517,10 @@
int shift = s->insn->data & 0xff;
int size = s->insn->data >> 8;
uint64_t mask = ((1ull << size) - 1) << shift;
+ TCGv_i64 t = tcg_temp_new_i64();
- assert(!o->g_in2);
- tcg_gen_shli_i64(o->in2, o->in2, shift);
- tcg_gen_or_i64(o->out, o->in1, o->in2);
+ tcg_gen_shli_i64(t, o->in2, shift);
+ tcg_gen_or_i64(o->out, o->in1, t);
/* Produce the CC from only the bits manipulated. */
tcg_gen_andi_i64(cc_dst, o->out, mask);
@@ -3804,12 +3700,15 @@
int i3 = get_field(s, i3);
int i4 = get_field(s, i4);
int i5 = get_field(s, i5);
+ TCGv_i64 orig_out;
uint64_t mask;
/* If this is a test-only form, arrange to discard the result. */
if (i3 & 0x80) {
+ tcg_debug_assert(o->out != NULL);
+ orig_out = o->out;
o->out = tcg_temp_new_i64();
- o->g_out = false;
+ tcg_gen_mov_i64(o->out, orig_out);
}
i3 &= 63;
@@ -3879,9 +3778,6 @@
tcg_gen_extrl_i64_i32(t2, o->in2);
tcg_gen_rotl_i32(to, t1, t2);
tcg_gen_extu_i32_i64(o->out, to);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(to);
return DISAS_NEXT;
}
@@ -4022,7 +3918,6 @@
} else {
tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
}
- free_compare(&c);
r1 = get_field(s, r1);
a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
@@ -4037,12 +3932,10 @@
h = tcg_temp_new_i64();
tcg_gen_shri_i64(h, regs[r1], 32);
tcg_gen_qemu_st32(h, a, get_mem_index(s));
- tcg_temp_free_i64(h);
break;
default:
g_assert_not_reached();
}
- tcg_temp_free_i64(a);
gen_set_label(lab);
return DISAS_NEXT;
@@ -4059,9 +3952,6 @@
t = o->in1;
}
gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
- if (s->insn->data == 31) {
- tcg_temp_free_i64(t);
- }
tcg_gen_shl_i64(o->out, o->in1, o->in2);
/* The arithmetic left shift is curious in that it does not affect
the sign bit. Copy that over from the source unchanged. */
@@ -4128,8 +4018,6 @@
tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
-
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -4170,8 +4058,6 @@
/* store second operand in GR1 */
tcg_gen_mov_i64(regs[1], o->in2);
-
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -4189,9 +4075,23 @@
return DISAS_NEXT;
}
+static void gen_check_psw_mask(DisasContext *s)
+{
+ TCGv_i64 reserved = tcg_temp_new_i64();
+ TCGLabel *ok = gen_new_label();
+
+ tcg_gen_andi_i64(reserved, psw_mask, PSW_MASK_RESERVED);
+ tcg_gen_brcondi_i64(TCG_COND_EQ, reserved, 0, ok);
+ gen_program_exception(s, PGM_SPECIFICATION);
+ gen_set_label(ok);
+}
+
static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
{
tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
+
+ gen_check_psw_mask(s);
+
/* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
s->exit_to_mainloop = true;
return DISAS_TOO_MANY;
@@ -4231,9 +4131,6 @@
tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
tcg_gen_addi_i64(o->in2, o->in2, 8);
tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
- tcg_temp_free_i64(c1);
- tcg_temp_free_i64(c2);
- tcg_temp_free_i64(todpr);
/* ??? We don't implement clock states. */
gen_op_movi_cc(s, 0);
return DISAS_NEXT;
@@ -4447,7 +4344,6 @@
t = tcg_temp_new_i64();
tcg_gen_shri_i64(t, psw_mask, 56);
tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
- tcg_temp_free_i64(t);
if (s->fields.op == 0xac) {
tcg_gen_andi_i64(psw_mask, psw_mask,
@@ -4456,6 +4352,8 @@
tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
}
+ gen_check_psw_mask(s);
+
/* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
s->exit_to_mainloop = true;
return DISAS_TOO_MANY;
@@ -4494,13 +4392,15 @@
static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
{
- tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
+ tcg_gen_qemu_st_tl(o->in1, o->in2, get_mem_index(s),
+ MO_TEUL | s->insn->data);
return DISAS_NEXT;
}
static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
{
- tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
+ tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s),
+ MO_TEUQ | s->insn->data);
return DISAS_NEXT;
}
@@ -4558,7 +4458,6 @@
}
break;
}
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -4602,8 +4501,6 @@
tcg_gen_add_i64(o->in2, o->in2, t4);
r1 = (r1 + 1) & 15;
}
-
- tcg_temp_free_i64(t);
return DISAS_NEXT;
}
@@ -4790,7 +4687,6 @@
gen_helper_tre(pair, cpu_env, o->out, o->out2, o->in2);
tcg_gen_extr_i128_i64(o->out2, o->out, pair);
- tcg_temp_free_i128(pair);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -4836,7 +4732,6 @@
}
gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
- tcg_temp_free_i32(tst);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -4955,10 +4850,10 @@
int shift = s->insn->data & 0xff;
int size = s->insn->data >> 8;
uint64_t mask = ((1ull << size) - 1) << shift;
+ TCGv_i64 t = tcg_temp_new_i64();
- assert(!o->g_in2);
- tcg_gen_shli_i64(o->in2, o->in2, shift);
- tcg_gen_xor_i64(o->out, o->in1, o->in2);
+ tcg_gen_shli_i64(t, o->in2, shift);
+ tcg_gen_xor_i64(o->out, o->in1, t);
/* Produce the CC from only the bits manipulated. */
tcg_gen_andi_i64(cc_dst, o->out, mask);
@@ -4989,15 +4884,14 @@
static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
{
- o->out = tcg_const_i64(0);
+ o->out = tcg_constant_i64(0);
return DISAS_NEXT;
}
static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
{
- o->out = tcg_const_i64(0);
+ o->out = tcg_constant_i64(0);
o->out2 = o->out;
- o->g_out2 = true;
return DISAS_NEXT;
}
@@ -5265,7 +5159,6 @@
static void prep_r1(DisasContext *s, DisasOps *o)
{
o->out = regs[get_field(s, r1)];
- o->g_out = true;
}
#define SPEC_prep_r1 0
@@ -5274,7 +5167,6 @@
int r1 = get_field(s, r1);
o->out = regs[r1];
o->out2 = regs[r1 + 1];
- o->g_out = o->g_out2 = true;
}
#define SPEC_prep_r1_P SPEC_r1_even
@@ -5343,7 +5235,6 @@
store_reg32_i64(r1 + 1, o->out);
tcg_gen_shri_i64(t, o->out, 32);
store_reg32_i64(r1, t);
- tcg_temp_free_i64(t);
}
#define SPEC_wout_r1_D32 SPEC_r1_even
@@ -5499,7 +5390,6 @@
static void in1_r1_o(DisasContext *s, DisasOps *o)
{
o->in1 = regs[get_field(s, r1)];
- o->g_in1 = true;
}
#define SPEC_in1_r1_o 0
@@ -5533,7 +5423,6 @@
static void in1_r1p1_o(DisasContext *s, DisasOps *o)
{
o->in1 = regs[get_field(s, r1) + 1];
- o->g_in1 = true;
}
#define SPEC_in1_r1p1_o SPEC_r1_even
@@ -5588,7 +5477,6 @@
static void in1_r3_o(DisasContext *s, DisasOps *o)
{
o->in1 = regs[get_field(s, r3)];
- o->g_in1 = true;
}
#define SPEC_in1_r3_o 0
@@ -5719,7 +5607,6 @@
static void in2_r1_o(DisasContext *s, DisasOps *o)
{
o->in2 = regs[get_field(s, r1)];
- o->g_in2 = true;
}
#define SPEC_in2_r1_o 0
@@ -5754,7 +5641,6 @@
static void in2_r2_o(DisasContext *s, DisasOps *o)
{
o->in2 = regs[get_field(s, r2)];
- o->g_in2 = true;
}
#define SPEC_in2_r2_o 0
@@ -5888,7 +5774,18 @@
static TCGv gen_ri2(DisasContext *s)
{
- return tcg_constant_i64(s->base.pc_next + (int64_t)get_field(s, i2) * 2);
+ int64_t delta = (int64_t)get_field(s, i2) * 2;
+ TCGv ri2;
+
+ if (unlikely(s->ex_value)) {
+ ri2 = tcg_temp_new_i64();
+ tcg_gen_ld_i64(ri2, cpu_env, offsetof(CPUS390XState, ex_target));
+ tcg_gen_addi_i64(ri2, ri2, delta);
+ } else {
+ ri2 = tcg_constant_i64(s->base.pc_next + delta);
+ }
+
+ return ri2;
}
static void in2_ri2(DisasContext *s, DisasOps *o)
@@ -5903,7 +5800,7 @@
int d2 = get_field(s, d2);
if (b2 == 0) {
- o->in2 = tcg_const_i64(d2 & 0x3f);
+ o->in2 = tcg_constant_i64(d2 & 0x3f);
} else {
o->in2 = get_address(s, 0, b2, d2);
tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
@@ -5979,6 +5876,13 @@
#define SPEC_in2_m2_64a 0
#endif
+static void in2_mri2_16s(DisasContext *s, DisasOps *o)
+{
+ o->in2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld16s(o->in2, gen_ri2(s), get_mem_index(s));
+}
+#define SPEC_in2_mri2_16s 0
+
static void in2_mri2_16u(DisasContext *s, DisasOps *o)
{
o->in2 = tcg_temp_new_i64();
@@ -5989,66 +5893,69 @@
static void in2_mri2_32s(DisasContext *s, DisasOps *o)
{
o->in2 = tcg_temp_new_i64();
- tcg_gen_qemu_ld32s(o->in2, gen_ri2(s), get_mem_index(s));
+ tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
+ MO_TESL | MO_ALIGN);
}
#define SPEC_in2_mri2_32s 0
static void in2_mri2_32u(DisasContext *s, DisasOps *o)
{
o->in2 = tcg_temp_new_i64();
- tcg_gen_qemu_ld32u(o->in2, gen_ri2(s), get_mem_index(s));
+ tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
+ MO_TEUL | MO_ALIGN);
}
#define SPEC_in2_mri2_32u 0
static void in2_mri2_64(DisasContext *s, DisasOps *o)
{
o->in2 = tcg_temp_new_i64();
- tcg_gen_qemu_ld64(o->in2, gen_ri2(s), get_mem_index(s));
+ tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s),
+ MO_TEUQ | MO_ALIGN);
}
#define SPEC_in2_mri2_64 0
static void in2_i2(DisasContext *s, DisasOps *o)
{
- o->in2 = tcg_const_i64(get_field(s, i2));
+ o->in2 = tcg_constant_i64(get_field(s, i2));
}
#define SPEC_in2_i2 0
static void in2_i2_8u(DisasContext *s, DisasOps *o)
{
- o->in2 = tcg_const_i64((uint8_t)get_field(s, i2));
+ o->in2 = tcg_constant_i64((uint8_t)get_field(s, i2));
}
#define SPEC_in2_i2_8u 0
static void in2_i2_16u(DisasContext *s, DisasOps *o)
{
- o->in2 = tcg_const_i64((uint16_t)get_field(s, i2));
+ o->in2 = tcg_constant_i64((uint16_t)get_field(s, i2));
}
#define SPEC_in2_i2_16u 0
static void in2_i2_32u(DisasContext *s, DisasOps *o)
{
- o->in2 = tcg_const_i64((uint32_t)get_field(s, i2));
+ o->in2 = tcg_constant_i64((uint32_t)get_field(s, i2));
}
#define SPEC_in2_i2_32u 0
static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
{
uint64_t i2 = (uint16_t)get_field(s, i2);
- o->in2 = tcg_const_i64(i2 << s->insn->data);
+ o->in2 = tcg_constant_i64(i2 << s->insn->data);
}
#define SPEC_in2_i2_16u_shl 0
static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
{
uint64_t i2 = (uint32_t)get_field(s, i2);
- o->in2 = tcg_const_i64(i2 << s->insn->data);
+ o->in2 = tcg_constant_i64(i2 << s->insn->data);
}
#define SPEC_in2_i2_32u_shl 0
#ifndef CONFIG_USER_ONLY
static void in2_insn(DisasContext *s, DisasOps *o)
{
- o->in2 = tcg_const_i64(s->fields.raw_insn);
+ o->in2 = tcg_constant_i64(s->fields.raw_insn);
}
#define SPEC_in2_insn 0
#endif
@@ -6474,31 +6381,6 @@
}
}
- /* Free any temporaries created by the helpers. */
- if (o.out && !o.g_out) {
- tcg_temp_free_i64(o.out);
- }
- if (o.out2 && !o.g_out2) {
- tcg_temp_free_i64(o.out2);
- }
- if (o.in1 && !o.g_in1) {
- tcg_temp_free_i64(o.in1);
- }
- if (o.in2 && !o.g_in2) {
- tcg_temp_free_i64(o.in2);
- }
- if (o.addr1) {
- tcg_temp_free_i64(o.addr1);
- }
- if (o.out_128) {
- tcg_temp_free_i128(o.out_128);
- }
- if (o.in1_128) {
- tcg_temp_free_i128(o.in1_128);
- }
- if (o.in2_128) {
- tcg_temp_free_i128(o.in2_128);
- }
/* io should be the last instruction in tb when icount is enabled */
if (unlikely(icount && ret == DISAS_NEXT)) {
ret = DISAS_TOO_MANY;
diff --git a/target/s390x/tcg/translate_vx.c.inc b/target/s390x/tcg/translate_vx.c.inc
index 3fadc82..43dfbfd 100644
--- a/target/s390x/tcg/translate_vx.c.inc
+++ b/target/s390x/tcg/translate_vx.c.inc
@@ -183,8 +183,6 @@
/* generate the final ptr by adding cpu_env */
tcg_gen_trunc_i64_ptr(ptr, tmp);
tcg_gen_add_ptr(ptr, ptr, cpu_env);
-
- tcg_temp_free_i64(tmp);
}
#define gen_gvec_2(v1, v2, gen) \
@@ -272,13 +270,6 @@
fn(dl, dh, al, ah, bl, bh);
write_vec_element_i64(dh, d, 0, ES_64);
write_vec_element_i64(dl, d, 1, ES_64);
-
- tcg_temp_free_i64(dh);
- tcg_temp_free_i64(dl);
- tcg_temp_free_i64(ah);
- tcg_temp_free_i64(al);
- tcg_temp_free_i64(bh);
- tcg_temp_free_i64(bl);
}
typedef void (*gen_gvec128_4_i64_fn)(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
@@ -305,15 +296,6 @@
fn(dl, dh, al, ah, bl, bh, cl, ch);
write_vec_element_i64(dh, d, 0, ES_64);
write_vec_element_i64(dl, d, 1, ES_64);
-
- tcg_temp_free_i64(dh);
- tcg_temp_free_i64(dl);
- tcg_temp_free_i64(ah);
- tcg_temp_free_i64(al);
- tcg_temp_free_i64(bh);
- tcg_temp_free_i64(bl);
- tcg_temp_free_i64(ch);
- tcg_temp_free_i64(cl);
}
static void gen_addi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
@@ -351,7 +333,6 @@
tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
write_vec_element_i64(tmp, get_field(s, v1), enr, es);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -386,7 +367,6 @@
write_vec_element_i64(t, get_field(s, v1), 0, ES_64);
tcg_gen_movi_i64(t, generate_byte_mask(i2));
write_vec_element_i64(t, get_field(s, v1), 1, ES_64);
- tcg_temp_free_i64(t);
}
return DISAS_NEXT;
}
@@ -427,8 +407,6 @@
tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEUQ);
write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
return DISAS_NEXT;
}
@@ -451,7 +429,6 @@
tmp = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
gen_gvec_dup_i64(es, get_field(s, v1), tmp);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -469,7 +446,6 @@
tmp = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_LE | es);
write_vec_element_i64(tmp, get_field(s, v1), enr, es);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -486,7 +462,6 @@
tmp = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_LE | es);
gen_gvec_dup_i64(es, get_field(s, v1), tmp);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -518,7 +493,6 @@
write_vec_element_i64(tmp, get_field(s, v1), 0, ES_64);
write_vec_element_i64(tcg_constant_i64(0), get_field(s, v1), 1, ES_64);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -572,9 +546,6 @@
write:
write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
return DISAS_NEXT;
}
@@ -592,7 +563,6 @@
tmp = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
write_vec_element_i64(tmp, get_field(s, v1), enr, es);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -647,8 +617,6 @@
write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
return DISAS_NEXT;
}
@@ -688,8 +656,6 @@
default:
g_assert_not_reached();
}
- tcg_temp_free_ptr(ptr);
-
return DISAS_NEXT;
}
@@ -730,7 +696,6 @@
tcg_gen_qemu_ld_i64(t, o->addr1, get_mem_index(s), MO_TE | es);
gen_gvec_dup_imm(es, get_field(s, v1), 0);
write_vec_element_i64(t, get_field(s, v1), enr, es);
- tcg_temp_free_i64(t);
return DISAS_NEXT;
}
@@ -768,9 +733,6 @@
/* Store the last element, loaded first */
write_vec_element_i64(t0, v1, 1, ES_64);
-
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
return DISAS_NEXT;
}
@@ -794,8 +756,6 @@
tcg_gen_addi_ptr(a0, cpu_env, v1_offs);
gen_helper_vll(cpu_env, a0, o->addr1, bytes);
- tcg_temp_free_i64(bytes);
- tcg_temp_free_ptr(a0);
return DISAS_NEXT;
}
@@ -835,8 +795,6 @@
default:
g_assert_not_reached();
}
- tcg_temp_free_ptr(ptr);
-
return DISAS_NEXT;
}
@@ -856,7 +814,6 @@
tcg_gen_addi_i64(o->in2, o->in2, 1);
tcg_gen_addi_ptr(a0, cpu_env, v1_offs);
gen_helper_vll(cpu_env, a0, o->addr1, o->in2);
- tcg_temp_free_ptr(a0);
return DISAS_NEXT;
}
@@ -898,7 +855,6 @@
write_vec_element_i64(tmp, v1, dst_idx, es);
}
}
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -974,7 +930,6 @@
}
write_vec_element_i64(tmp, v1, dst_idx, dst_es);
}
- tcg_temp_free_i64(tmp);
} else {
gen_gvec_3_ool(v1, v2, v3, 0, vpk[es - 1]);
}
@@ -1004,8 +959,6 @@
read_vec_element_i64(t1, get_field(s, v3), i3, ES_64);
write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
return DISAS_NEXT;
}
@@ -1057,7 +1010,6 @@
read_vec_element_i64(tmp, get_field(s, v1), enr, es);
tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -1098,7 +1050,6 @@
write_vec_element_i64(tmp, get_field(s, v1), 0, ES_64);
read_vec_element_i64(tmp, get_field(s, v2), idx2, es | MO_SIGN);
write_vec_element_i64(tmp, get_field(s, v1), 1, ES_64);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -1116,7 +1067,6 @@
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
read_vec_element_i64(tmp, get_field(s, v1), 1, ES_64);
tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEUQ);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -1134,7 +1084,6 @@
tmp = tcg_temp_new_i64();
read_vec_element_i64(tmp, get_field(s, v1), enr, es);
tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_LE | es);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -1189,9 +1138,6 @@
tcg_gen_qemu_st_i64(t0, o->addr1, get_mem_index(s), MO_LEUQ);
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
tcg_gen_qemu_st_i64(t1, o->addr1, get_mem_index(s), MO_LEUQ);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
return DISAS_NEXT;
}
@@ -1209,7 +1155,6 @@
tmp = tcg_temp_new_i64();
read_vec_element_i64(tmp, get_field(s, v1), enr, es);
tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -1251,9 +1196,6 @@
tcg_gen_qemu_st_i64(t0, o->addr1, get_mem_index(s), MO_TEUQ);
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
tcg_gen_qemu_st_i64(t1, o->addr1, get_mem_index(s), MO_TEUQ);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
return DISAS_NEXT;
}
@@ -1284,7 +1226,6 @@
}
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
}
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -1297,7 +1238,6 @@
tcg_gen_addi_i64(o->in2, o->in2, 1);
tcg_gen_addi_ptr(a0, cpu_env, v1_offs);
gen_helper_vstl(cpu_env, a0, o->addr1, o->in2);
- tcg_temp_free_ptr(a0);
return DISAS_NEXT;
}
@@ -1335,7 +1275,6 @@
write_vec_element_i64(tmp, v1, dst_idx, dst_es);
}
}
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -1377,10 +1316,6 @@
/* Isolate and shift the carry into position */
tcg_gen_and_i64(d, d, msb_mask);
tcg_gen_shri_i64(d, d, msb_bit_nr);
-
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t2);
- tcg_temp_free_i64(t3);
}
static void gen_acc8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
@@ -1399,7 +1334,6 @@
tcg_gen_add_i32(t, a, b);
tcg_gen_setcond_i32(TCG_COND_LTU, d, t, b);
- tcg_temp_free_i32(t);
}
static void gen_acc_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
@@ -1408,7 +1342,6 @@
tcg_gen_add_i64(t, a, b);
tcg_gen_setcond_i64(TCG_COND_LTU, d, t, b);
- tcg_temp_free_i64(t);
}
static void gen_acc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
@@ -1422,9 +1355,6 @@
tcg_gen_add2_i64(tl, th, th, zero, ah, zero);
tcg_gen_add2_i64(tl, dl, tl, th, bh, zero);
tcg_gen_mov_i64(dh, zero);
-
- tcg_temp_free_i64(th);
- tcg_temp_free_i64(tl);
}
static DisasJumpType op_vacc(DisasContext *s, DisasOps *o)
@@ -1460,8 +1390,6 @@
tcg_gen_extract_i64(tl, cl, 0, 1);
tcg_gen_add2_i64(dl, dh, al, ah, bl, bh);
tcg_gen_add2_i64(dl, dh, dl, dh, tl, zero);
-
- tcg_temp_free_i64(tl);
}
static DisasJumpType op_vac(DisasContext *s, DisasOps *o)
@@ -1490,9 +1418,6 @@
tcg_gen_add2_i64(tl, th, th, zero, ah, zero);
tcg_gen_add2_i64(tl, dl, tl, th, bh, zero);
tcg_gen_mov_i64(dh, zero);
-
- tcg_temp_free_i64(tl);
- tcg_temp_free_i64(th);
}
static DisasJumpType op_vaccc(DisasContext *s, DisasOps *o)
@@ -1533,9 +1458,6 @@
tcg_gen_addi_i64(t0, t0, 1);
tcg_gen_shri_i64(t0, t0, 1);
tcg_gen_extrl_i64_i32(d, t0);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
static void gen_avg_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl)
@@ -1550,10 +1472,6 @@
tcg_gen_add2_i64(dl, dh, al, ah, bl, bh);
gen_addi2_i64(dl, dh, dl, dh, 1);
tcg_gen_extract2_i64(dl, dl, dh, 1);
-
- tcg_temp_free_i64(dh);
- tcg_temp_free_i64(ah);
- tcg_temp_free_i64(bh);
}
static DisasJumpType op_vavg(DisasContext *s, DisasOps *o)
@@ -1586,9 +1504,6 @@
tcg_gen_addi_i64(t0, t0, 1);
tcg_gen_shri_i64(t0, t0, 1);
tcg_gen_extrl_i64_i32(d, t0);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
static void gen_avgl_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl)
@@ -1599,8 +1514,6 @@
tcg_gen_add2_i64(dl, dh, al, zero, bl, zero);
gen_addi2_i64(dl, dh, dl, dh, 1);
tcg_gen_extract2_i64(dl, dl, dh, 1);
-
- tcg_temp_free_i64(dh);
}
static DisasJumpType op_vavgl(DisasContext *s, DisasOps *o)
@@ -1635,9 +1548,6 @@
}
gen_gvec_dup_imm(ES_32, get_field(s, v1), 0);
write_vec_element_i32(sum, get_field(s, v1), 1, ES_32);
-
- tcg_temp_free_i32(tmp);
- tcg_temp_free_i32(sum);
return DISAS_NEXT;
}
@@ -1682,9 +1592,6 @@
read_vec_element_i64(high, get_field(s, v1), 0, ES_64);
read_vec_element_i64(low, get_field(s, v1), 1, ES_64);
gen_op_update2_cc_i64(s, CC_OP_VC, low, high);
-
- tcg_temp_free_i64(low);
- tcg_temp_free_i64(high);
}
return DISAS_NEXT;
}
@@ -1853,8 +1760,6 @@
tcg_gen_mul_i32(t0, a, b);
tcg_gen_add_i32(d, t0, c);
-
- tcg_temp_free_i32(t0);
}
static void gen_mah_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c)
@@ -1869,10 +1774,6 @@
tcg_gen_mul_i64(t0, t0, t1);
tcg_gen_add_i64(t0, t0, t2);
tcg_gen_extrh_i64_i32(d, t0);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
}
static void gen_malh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c)
@@ -1887,10 +1788,6 @@
tcg_gen_mul_i64(t0, t0, t1);
tcg_gen_add_i64(t0, t0, t2);
tcg_gen_extrh_i64_i32(d, t0);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
}
static DisasJumpType op_vma(DisasContext *s, DisasOps *o)
@@ -1974,7 +1871,6 @@
TCGv_i32 t = tcg_temp_new_i32();
tcg_gen_muls2_i32(t, d, a, b);
- tcg_temp_free_i32(t);
}
static void gen_mlh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
@@ -1982,7 +1878,6 @@
TCGv_i32 t = tcg_temp_new_i32();
tcg_gen_mulu2_i32(t, d, a, b);
- tcg_temp_free_i32(t);
}
static DisasJumpType op_vm(DisasContext *s, DisasOps *o)
@@ -2099,11 +1994,6 @@
/* Store final result into v1. */
write_vec_element_i64(h1, get_field(s, v1), 0, ES_64);
write_vec_element_i64(l1, get_field(s, v1), 1, ES_64);
-
- tcg_temp_free_i64(l1);
- tcg_temp_free_i64(h1);
- tcg_temp_free_i64(l2);
- tcg_temp_free_i64(h2);
return DISAS_NEXT;
}
@@ -2169,8 +2059,6 @@
tcg_gen_and_i32(t, t, b);
tcg_gen_andc_i32(d, d, b);
tcg_gen_or_i32(d, d, t);
-
- tcg_temp_free_i32(t);
}
static void gen_rim_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, int64_t c)
@@ -2181,8 +2069,6 @@
tcg_gen_and_i64(t, t, b);
tcg_gen_andc_i64(d, d, b);
tcg_gen_or_i64(d, d, t);
-
- tcg_temp_free_i64(t);
}
static DisasJumpType op_verim(DisasContext *s, DisasOps *o)
@@ -2291,7 +2177,6 @@
default:
g_assert_not_reached();
}
- tcg_temp_free_i32(shift);
}
return DISAS_NEXT;
}
@@ -2311,7 +2196,6 @@
read_vec_element_i64(shift, get_field(s, v3), 7, ES_8);
tcg_gen_andi_i64(shift, shift, byte ? 0x78 : 7);
gen_gvec_2i_ool(get_field(s, v1), get_field(s, v2), shift, 0, gen);
- tcg_temp_free_i64(shift);
}
return DISAS_NEXT;
}
@@ -2367,10 +2251,6 @@
write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
return DISAS_NEXT;
}
@@ -2397,10 +2277,6 @@
write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
return DISAS_NEXT;
}
@@ -2445,9 +2321,6 @@
/* "invert" the result: -1 -> 0; 0 -> 1 */
tcg_gen_addi_i64(dl, th, 1);
tcg_gen_mov_i64(dh, zero);
-
- tcg_temp_free_i64(th);
- tcg_temp_free_i64(tl);
}
static DisasJumpType op_vscbi(DisasContext *s, DisasOps *o)
@@ -2482,8 +2355,6 @@
tcg_gen_not_i64(tl, bl);
tcg_gen_not_i64(th, bh);
gen_ac2_i64(dl, dh, al, ah, tl, th, cl, ch);
- tcg_temp_free_i64(tl);
- tcg_temp_free_i64(th);
}
static DisasJumpType op_vsbi(DisasContext *s, DisasOps *o)
@@ -2508,9 +2379,6 @@
tcg_gen_not_i64(tl, bl);
tcg_gen_not_i64(th, bh);
gen_accc2_i64(dl, dh, al, ah, tl, th, cl, ch);
-
- tcg_temp_free_i64(tl);
- tcg_temp_free_i64(th);
}
static DisasJumpType op_vsbcbi(DisasContext *s, DisasOps *o)
@@ -2550,8 +2418,6 @@
}
write_vec_element_i64(sum, get_field(s, v1), dst_idx, ES_64);
}
- tcg_temp_free_i64(sum);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -2580,10 +2446,6 @@
}
write_vec_element_i64(sumh, get_field(s, v1), 0, ES_64);
write_vec_element_i64(suml, get_field(s, v1), 1, ES_64);
-
- tcg_temp_free_i64(sumh);
- tcg_temp_free_i64(suml);
- tcg_temp_free_i64(tmpl);
return DISAS_NEXT;
}
@@ -2611,8 +2473,6 @@
}
write_vec_element_i32(sum, get_field(s, v1), dst_idx, ES_32);
}
- tcg_temp_free_i32(sum);
- tcg_temp_free_i32(tmp);
return DISAS_NEXT;
}
@@ -3399,9 +3259,6 @@
read_vec_element_i64(tmp, v2, 1, ES_64);
write_vec_element_i64(tmp, v1, 1, ES_64);
}
-
- tcg_temp_free_i64(tmp);
-
return DISAS_NEXT;
}
diff --git a/target/sh4/cpu-param.h b/target/sh4/cpu-param.h
index 98a0250..a7cdb7e 100644
--- a/target/sh4/cpu-param.h
+++ b/target/sh4/cpu-param.h
@@ -16,6 +16,5 @@
#else
# define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif
-#define NB_MMU_MODES 2
#endif
diff --git a/target/sh4/gdbstub.c b/target/sh4/gdbstub.c
index 3488f68..d8e199f 100644
--- a/target/sh4/gdbstub.c
+++ b/target/sh4/gdbstub.c
@@ -19,7 +19,7 @@
*/
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
/* Hint: Use "set architecture sh4" in GDB to see fpu registers */
/* FIXME: We should use XML for this. */
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
index ad6de41..6e40d5d 100644
--- a/target/sh4/translate.c
+++ b/target/sh4/translate.c
@@ -171,16 +171,16 @@
qemu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
env->sgr, env->dbr, env->delayed_pc, env->fpul);
for (i = 0; i < 24; i += 4) {
- qemu_printf("r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
- i, env->gregs[i], i + 1, env->gregs[i + 1],
- i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
+ qemu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
+ i, env->gregs[i], i + 1, env->gregs[i + 1],
+ i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
}
if (env->flags & TB_FLAG_DELAY_SLOT) {
- qemu_printf("in delay slot (delayed_pc=0x%08x)\n",
- env->delayed_pc);
+ qemu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
+ env->delayed_pc);
} else if (env->flags & TB_FLAG_DELAY_SLOT_COND) {
- qemu_printf("in conditional delay slot (delayed_pc=0x%08x)\n",
- env->delayed_pc);
+ qemu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
+ env->delayed_pc);
} else if (env->flags & TB_FLAG_DELAY_SLOT_RTE) {
qemu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n",
env->delayed_pc);
@@ -526,13 +526,13 @@
return;
case 0x9000: /* mov.w @(disp,PC),Rn */
{
- TCGv addr = tcg_const_i32(ctx->base.pc_next + 4 + B7_0 * 2);
+ TCGv addr = tcg_constant_i32(ctx->base.pc_next + 4 + B7_0 * 2);
tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
}
return;
case 0xd000: /* mov.l @(disp,PC),Rn */
{
- TCGv addr = tcg_const_i32((ctx->base.pc_next + 4 + B7_0 * 4) & ~3);
+ TCGv addr = tcg_constant_i32((ctx->base.pc_next + 4 + B7_0 * 4) & ~3);
tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
}
return;
@@ -694,7 +694,7 @@
case 0x300e: /* addc Rm,Rn */
{
TCGv t0, t1;
- t0 = tcg_const_tl(0);
+ t0 = tcg_constant_tl(0);
t1 = tcg_temp_new();
tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
@@ -754,7 +754,7 @@
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
TCGv t2 = tcg_temp_new();
- TCGv zero = tcg_const_i32(0);
+ TCGv zero = tcg_constant_i32(0);
/* shift left arg1, saving the bit being pushed out and inserting
T on the right */
@@ -849,7 +849,7 @@
return;
case 0x600a: /* negc Rm,Rn */
{
- TCGv t0 = tcg_const_i32(0);
+ TCGv t0 = tcg_constant_i32(0);
tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
REG(B7_4), t0, cpu_sr_t, t0);
tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
@@ -913,7 +913,7 @@
case 0x300a: /* subc Rm,Rn */
{
TCGv t0, t1;
- t0 = tcg_const_tl(0);
+ t0 = tcg_constant_tl(0);
t1 = tcg_temp_new();
tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
@@ -1242,7 +1242,7 @@
TCGv imm;
CHECK_NOT_DELAY_SLOT
gen_save_cpu_state(ctx, true);
- imm = tcg_const_i32(B7_0);
+ imm = tcg_constant_i32(B7_0);
gen_helper_trapa(cpu_env, imm);
ctx->base.is_jmp = DISAS_NORETURN;
}
@@ -1610,12 +1610,9 @@
tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
return;
case 0x401b: /* tas.b @Rn */
- {
- TCGv val = tcg_const_i32(0x80);
- tcg_gen_atomic_fetch_or_i32(val, REG(B11_8), val,
- ctx->memidx, MO_UB);
- tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
- }
+ tcg_gen_atomic_fetch_or_i32(cpu_sr_t, REG(B11_8),
+ tcg_constant_i32(0x80), ctx->memidx, MO_UB);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, cpu_sr_t, 0);
return;
case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
CHECK_FPU_ENABLED
@@ -1712,8 +1709,8 @@
CHECK_FPU_ENABLED
CHECK_FPSCR_PR_1
{
- TCGv m = tcg_const_i32((ctx->opcode >> 8) & 3);
- TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
+ TCGv m = tcg_constant_i32((ctx->opcode >> 8) & 3);
+ TCGv n = tcg_constant_i32((ctx->opcode >> 10) & 3);
gen_helper_fipr(cpu_env, m, n);
return;
}
@@ -1725,7 +1722,7 @@
if ((ctx->opcode & 0x0300) != 0x0100) {
goto do_illegal;
}
- TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
+ TCGv n = tcg_constant_i32((ctx->opcode >> 10) & 3);
gen_helper_ftrv(cpu_env, n);
return;
}
@@ -1929,7 +1926,7 @@
}
op_dst = B11_8;
op_opc = INDEX_op_xor_i32;
- op_arg = tcg_const_i32(-1);
+ op_arg = tcg_constant_i32(-1);
break;
case 0x7000 ... 0x700f: /* add #imm,Rn */
@@ -1937,7 +1934,7 @@
goto fail;
}
op_opc = INDEX_op_add_i32;
- op_arg = tcg_const_i32(B7_0s);
+ op_arg = tcg_constant_i32(B7_0s);
break;
case 0x3000: /* cmp/eq Rm,Rn */
@@ -1983,7 +1980,7 @@
goto fail;
}
op_opc = INDEX_op_setcond_i32;
- op_arg = tcg_const_i32(0);
+ op_arg = tcg_constant_i32(0);
NEXT_INSN;
if ((ctx->opcode & 0xff00) != 0x8900 /* bt label */
diff --git a/target/sparc/cpu-param.h b/target/sparc/cpu-param.h
index 72ddc4a..cb11980 100644
--- a/target/sparc/cpu-param.h
+++ b/target/sparc/cpu-param.h
@@ -16,13 +16,11 @@
# else
# define TARGET_VIRT_ADDR_SPACE_BITS 44
# endif
-# define NB_MMU_MODES 6
#else
# define TARGET_LONG_BITS 32
# define TARGET_PAGE_BITS 12 /* 4k */
# define TARGET_PHYS_ADDR_SPACE_BITS 36
# define TARGET_VIRT_ADDR_SPACE_BITS 32
-# define NB_MMU_MODES 3
#endif
#endif
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
index ed0069d..fb98843 100644
--- a/target/sparc/cpu.h
+++ b/target/sparc/cpu.h
@@ -197,8 +197,7 @@
#define FSR_FTT2 (1ULL << 16)
#define FSR_FTT1 (1ULL << 15)
#define FSR_FTT0 (1ULL << 14)
-//gcc warns about constant overflow for ~FSR_FTT_MASK
-//#define FSR_FTT_MASK (FSR_FTT2 | FSR_FTT1 | FSR_FTT0)
+#define FSR_FTT_MASK (FSR_FTT2 | FSR_FTT1 | FSR_FTT0)
#ifdef TARGET_SPARC64
#define FSR_FTT_NMASK 0xfffffffffffe3fffULL
#define FSR_FTT_CEXC_NMASK 0xfffffffffffe3fe0ULL
diff --git a/target/sparc/gdbstub.c b/target/sparc/gdbstub.c
index 5d1e808..a1c8fdc 100644
--- a/target/sparc/gdbstub.c
+++ b/target/sparc/gdbstub.c
@@ -19,7 +19,7 @@
*/
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
#ifdef TARGET_ABI32
#define gdb_get_rega(buf, val) gdb_get_reg32(buf, val)
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
index 5ee2933..137bdc5 100644
--- a/target/sparc/translate.c
+++ b/target/sparc/translate.c
@@ -2838,7 +2838,7 @@
static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
int width, bool cc, bool left)
{
- TCGv lo1, lo2, t1, t2;
+ TCGv lo1, lo2;
uint64_t amask, tabl, tabr;
int shift, imask, omask;
@@ -2905,10 +2905,8 @@
tcg_gen_shli_tl(lo1, lo1, shift);
tcg_gen_shli_tl(lo2, lo2, shift);
- t1 = tcg_const_tl(tabl);
- t2 = tcg_const_tl(tabr);
- tcg_gen_shr_tl(lo1, t1, lo1);
- tcg_gen_shr_tl(lo2, t2, lo2);
+ tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1);
+ tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2);
tcg_gen_andi_tl(dst, lo1, omask);
tcg_gen_andi_tl(lo2, lo2, omask);
@@ -2927,9 +2925,9 @@
lo2 |= -(s1 == s2)
dst &= lo2
*/
- tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
- tcg_gen_neg_tl(t1, t1);
- tcg_gen_or_tl(lo2, lo2, t1);
+ tcg_gen_setcond_tl(TCG_COND_EQ, lo1, s1, s2);
+ tcg_gen_neg_tl(lo1, lo1);
+ tcg_gen_or_tl(lo2, lo2, lo1);
tcg_gen_and_tl(dst, dst, lo2);
}
diff --git a/target/tricore/cpu-param.h b/target/tricore/cpu-param.h
index 2727913..e29d551 100644
--- a/target/tricore/cpu-param.h
+++ b/target/tricore/cpu-param.h
@@ -12,6 +12,5 @@
#define TARGET_PAGE_BITS 14
#define TARGET_PHYS_ADDR_SPACE_BITS 32
#define TARGET_VIRT_ADDR_SPACE_BITS 32
-#define NB_MMU_MODES 3
#endif
diff --git a/target/tricore/gdbstub.c b/target/tricore/gdbstub.c
index 3a27a7e..e8f8e5e 100644
--- a/target/tricore/gdbstub.c
+++ b/target/tricore/gdbstub.c
@@ -18,7 +18,7 @@
*/
#include "qemu/osdep.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
#define LCX_REGNUM 32
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
index 127f9a9..2646cb3 100644
--- a/target/tricore/translate.c
+++ b/target/tricore/translate.c
@@ -124,7 +124,7 @@
/* Makros for generating helpers */
#define gen_helper_1arg(name, arg) do { \
- TCGv_i32 helper_tmp = tcg_const_i32(arg); \
+ TCGv_i32 helper_tmp = tcg_constant_i32(arg); \
gen_helper_##name(cpu_env, helper_tmp); \
} while (0)
@@ -513,7 +513,7 @@
static inline void gen_maddi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_madd32_d(ret, r1, r2, temp);
}
@@ -579,7 +579,7 @@
gen_maddi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_madd64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
@@ -587,7 +587,7 @@
gen_maddui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_maddu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
@@ -595,21 +595,22 @@
gen_madd_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
+ TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
@@ -621,21 +622,22 @@
gen_maddsu_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
+ TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
@@ -647,22 +649,22 @@
gen_maddsum_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
TCGv_i64 temp64_3 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_concat_i32_i64(temp64_3, r1_low, r1_high);
@@ -682,23 +684,24 @@
gen_madds_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
+ TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv temp3 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
@@ -718,23 +721,24 @@
gen_maddsus_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
+ TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv temp3 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
@@ -752,22 +756,22 @@
gen_maddsums_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_sari_i64(temp64_2, temp64, 32); /* high */
@@ -785,22 +789,22 @@
gen_maddm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
TCGv_i64 temp64_3 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mulm_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mulm_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mulm_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mulm_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
@@ -813,21 +817,21 @@
gen_maddms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mulm_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mulm_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mulm_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mulm_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
@@ -839,20 +843,20 @@
gen_maddr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n,
uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
gen_helper_addr_h(ret, cpu_env, temp64, r1_low, r1_high);
@@ -872,21 +876,22 @@
static inline void
gen_maddsur32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
+ TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_andi_tl(temp2, r1, 0xffff0000);
@@ -899,20 +904,20 @@
gen_maddr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3,
uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
gen_helper_addr_h_ssov(ret, cpu_env, temp64, r1_low, r1_high);
@@ -932,21 +937,22 @@
static inline void
gen_maddsur32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
+ TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_andi_tl(temp2, r1, 0xffff0000);
@@ -957,15 +963,15 @@
static inline void
gen_maddr_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
{
- TCGv temp = tcg_const_i32(n);
- gen_helper_maddr_q(ret, cpu_env, r1, r2, r3, temp);
+ TCGv t_n = tcg_constant_i32(n);
+ gen_helper_maddr_q(ret, cpu_env, r1, r2, r3, t_n);
}
static inline void
gen_maddrs_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
{
- TCGv temp = tcg_const_i32(n);
- gen_helper_maddr_q_ssov(ret, cpu_env, r1, r2, r3, temp);
+ TCGv t_n = tcg_constant_i32(n);
+ gen_helper_maddr_q_ssov(ret, cpu_env, r1, r2, r3, t_n);
}
static inline void
@@ -1176,10 +1182,10 @@
TCGv arg3, uint32_t n)
{
TCGv_i64 r1 = tcg_temp_new_i64();
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
tcg_gen_concat_i32_i64(r1, arg1_low, arg1_high);
- gen_helper_madd64_q_ssov(r1, cpu_env, r1, arg2, arg3, temp);
+ gen_helper_madd64_q_ssov(r1, cpu_env, r1, arg2, arg3, t_n);
tcg_gen_extr_i64_i32(rl, rh, r1);
}
@@ -1218,7 +1224,7 @@
static inline void gen_msubi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_msub32_d(ret, r1, r2, temp);
}
@@ -1254,7 +1260,7 @@
gen_msubi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_msub64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
@@ -1290,13 +1296,13 @@
gen_msubui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_msubu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
static inline void gen_addi_d(TCGv ret, TCGv r1, target_ulong r2)
{
- TCGv temp = tcg_const_i32(r2);
+ TCGv temp = tcg_constant_i32(r2);
gen_add_d(ret, r1, temp);
}
@@ -1326,7 +1332,7 @@
static inline void gen_addi_CC(TCGv ret, TCGv r1, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_add_CC(ret, r1, temp);
}
@@ -1358,7 +1364,7 @@
static inline void gen_addci_CC(TCGv ret, TCGv r1, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_addc_CC(ret, r1, temp);
}
@@ -1369,7 +1375,7 @@
TCGv temp2 = tcg_temp_new();
TCGv result = tcg_temp_new();
TCGv mask = tcg_temp_new();
- TCGv t0 = tcg_const_i32(0);
+ TCGv t0 = tcg_constant_i32(0);
/* create mask for sticky bits */
tcg_gen_setcond_tl(cond, mask, r4, t0);
@@ -1398,7 +1404,7 @@
static inline void gen_condi_add(TCGCond cond, TCGv r1, int32_t r2,
TCGv r3, TCGv r4)
{
- TCGv temp = tcg_const_i32(r2);
+ TCGv temp = tcg_constant_i32(r2);
gen_cond_add(cond, r1, temp, r3, r4);
}
@@ -1486,7 +1492,7 @@
TCGv temp2 = tcg_temp_new();
TCGv result = tcg_temp_new();
TCGv mask = tcg_temp_new();
- TCGv t0 = tcg_const_i32(0);
+ TCGv t0 = tcg_constant_i32(0);
/* create mask for sticky bits */
tcg_gen_setcond_tl(cond, mask, r4, t0);
@@ -1516,21 +1522,22 @@
gen_msub_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
+ TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
@@ -1542,23 +1549,24 @@
gen_msubs_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
+ TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv temp3 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
@@ -1576,22 +1584,22 @@
gen_msubm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
TCGv_i64 temp64_3 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mulm_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mulm_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mulm_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mulm_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
@@ -1604,21 +1612,21 @@
gen_msubms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mulm_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mulm_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mulm_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mulm_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
@@ -1630,20 +1638,20 @@
gen_msubr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n,
uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
gen_helper_subr_h(ret, cpu_env, temp64, r1_low, r1_high);
@@ -1664,20 +1672,20 @@
gen_msubr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3,
uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
gen_helper_subr_h_ssov(ret, cpu_env, temp64, r1_low, r1_high);
@@ -1697,14 +1705,14 @@
static inline void
gen_msubr_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv temp = tcg_constant_i32(n);
gen_helper_msubr_q(ret, cpu_env, r1, r2, r3, temp);
}
static inline void
gen_msubrs_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv temp = tcg_constant_i32(n);
gen_helper_msubr_q_ssov(ret, cpu_env, r1, r2, r3, temp);
}
@@ -1912,10 +1920,10 @@
TCGv arg3, uint32_t n)
{
TCGv_i64 r1 = tcg_temp_new_i64();
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
tcg_gen_concat_i32_i64(r1, arg1_low, arg1_high);
- gen_helper_msub64_q_ssov(r1, cpu_env, r1, arg2, arg3, temp);
+ gen_helper_msub64_q_ssov(r1, cpu_env, r1, arg2, arg3, t_n);
tcg_gen_extr_i64_i32(rl, rh, r1);
}
@@ -1923,21 +1931,22 @@
gen_msubad_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
+ TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
@@ -1949,22 +1958,22 @@
gen_msubadm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
TCGv_i64 temp64_3 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_concat_i32_i64(temp64_3, r1_low, r1_high);
@@ -1981,21 +1990,22 @@
static inline void
gen_msubadr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
+ TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_andi_tl(temp2, r1, 0xffff0000);
@@ -2007,23 +2017,24 @@
gen_msubads_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
+ TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv temp3 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_extr_i64_i32(temp, temp2, temp64);
@@ -2041,22 +2052,22 @@
gen_msubadms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
TCGv_i64 temp64 = tcg_temp_new_i64();
TCGv_i64 temp64_2 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_sari_i64(temp64_2, temp64, 32); /* high */
@@ -2072,21 +2083,22 @@
static inline void
gen_msubadr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
{
- TCGv temp = tcg_const_i32(n);
+ TCGv t_n = tcg_constant_i32(n);
+ TCGv temp = tcg_temp_new();
TCGv temp2 = tcg_temp_new();
TCGv_i64 temp64 = tcg_temp_new_i64();
switch (mode) {
case MODE_LL:
- GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_LU:
- GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UL:
- GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, t_n);
break;
case MODE_UU:
- GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
tcg_gen_andi_tl(temp2, r1, 0xffff0000);
@@ -2137,13 +2149,13 @@
static inline void gen_absdifi(TCGv ret, TCGv r1, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_absdif(ret, r1, temp);
}
static inline void gen_absdifsi(TCGv ret, TCGv r1, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_helper_absdif_ssov(ret, cpu_env, r1, temp);
}
@@ -2169,7 +2181,7 @@
static inline void gen_muli_i32s(TCGv ret, TCGv r1, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_mul_i32s(ret, r1, temp);
}
@@ -2190,7 +2202,7 @@
static inline void gen_muli_i64s(TCGv ret_low, TCGv ret_high, TCGv r1,
int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_mul_i64s(ret_low, ret_high, r1, temp);
}
@@ -2211,31 +2223,32 @@
static inline void gen_muli_i64u(TCGv ret_low, TCGv ret_high, TCGv r1,
int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_mul_i64u(ret_low, ret_high, r1, temp);
}
static inline void gen_mulsi_i32(TCGv ret, TCGv r1, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_helper_mul_ssov(ret, cpu_env, r1, temp);
}
static inline void gen_mulsui_i32(TCGv ret, TCGv r1, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_helper_mul_suov(ret, cpu_env, r1, temp);
}
+
/* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */
static inline void gen_maddsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_helper_madd32_ssov(ret, cpu_env, r1, r2, temp);
}
static inline void gen_maddsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_helper_madd32_suov(ret, cpu_env, r1, r2, temp);
}
@@ -2358,7 +2371,7 @@
gen_maddsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_madds_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
@@ -2376,19 +2389,19 @@
gen_maddsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_maddsu_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
static inline void gen_msubsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_helper_msub32_ssov(ret, cpu_env, r1, r2, temp);
}
static inline void gen_msubsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_helper_msub32_suov(ret, cpu_env, r1, r2, temp);
}
@@ -2406,7 +2419,7 @@
gen_msubsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_msubs_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
@@ -2424,27 +2437,19 @@
gen_msubsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_msubsu_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
}
static void gen_saturate(TCGv ret, TCGv arg, int32_t up, int32_t low)
{
- TCGv sat_neg = tcg_const_i32(low);
- TCGv temp = tcg_const_i32(up);
-
- /* sat_neg = (arg < low ) ? low : arg; */
- tcg_gen_movcond_tl(TCG_COND_LT, sat_neg, arg, sat_neg, sat_neg, arg);
-
- /* ret = (sat_neg > up ) ? up : sat_neg; */
- tcg_gen_movcond_tl(TCG_COND_GT, ret, sat_neg, temp, temp, sat_neg);
+ tcg_gen_smax_tl(ret, arg, tcg_constant_i32(low));
+ tcg_gen_smin_tl(ret, ret, tcg_constant_i32(up));
}
static void gen_saturate_u(TCGv ret, TCGv arg, int32_t up)
{
- TCGv temp = tcg_const_i32(up);
- /* sat_neg = (arg > up ) ? up : arg; */
- tcg_gen_movcond_tl(TCG_COND_GTU, ret, arg, temp, temp, arg);
+ tcg_gen_umin_tl(ret, arg, tcg_constant_i32(up));
}
static void gen_shi(TCGv ret, TCGv r1, int32_t shift_count)
@@ -2495,8 +2500,8 @@
/* clear PSW.V */
tcg_gen_movi_tl(cpu_PSW_V, 0);
} else if (shift_count > 0) {
- TCGv t_max = tcg_const_i32(0x7FFFFFFF >> shift_count);
- TCGv t_min = tcg_const_i32(((int32_t) -0x80000000) >> shift_count);
+ TCGv t_max = tcg_constant_i32(0x7FFFFFFF >> shift_count);
+ TCGv t_min = tcg_constant_i32(((int32_t) -0x80000000) >> shift_count);
/* calc carry */
msk_start = 32 - shift_count;
@@ -2534,7 +2539,7 @@
static void gen_shasi(TCGv ret, TCGv r1, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_shas(ret, r1, temp);
}
@@ -2576,7 +2581,7 @@
static void gen_sh_condi(int cond, TCGv ret, TCGv r1, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_sh_cond(cond, ret, r1, temp);
}
@@ -2587,13 +2592,13 @@
static inline void gen_addsi(TCGv ret, TCGv r1, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_helper_add_ssov(ret, cpu_env, r1, temp);
}
static inline void gen_addsui(TCGv ret, TCGv r1, int32_t con)
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_helper_add_suov(ret, cpu_env, r1, temp);
}
@@ -2663,7 +2668,7 @@
gen_accumulating_condi(int cond, TCGv ret, TCGv r1, int32_t con,
void(*op)(TCGv, TCGv, TCGv))
{
- TCGv temp = tcg_const_i32(con);
+ TCGv temp = tcg_constant_i32(con);
gen_accumulating_cond(cond, ret, r1, temp, op);
}
@@ -2830,8 +2835,8 @@
static void generate_trap(DisasContext *ctx, int class, int tin)
{
- TCGv_i32 classtemp = tcg_const_i32(class);
- TCGv_i32 tintemp = tcg_const_i32(tin);
+ TCGv_i32 classtemp = tcg_constant_i32(class);
+ TCGv_i32 tintemp = tcg_constant_i32(tin);
gen_save_pc(ctx->base.pc_next);
gen_helper_raise_exception_sync(cpu_env, classtemp, tintemp);
@@ -2853,7 +2858,7 @@
static inline void gen_branch_condi(DisasContext *ctx, TCGCond cond, TCGv r1,
int r2, int16_t address)
{
- TCGv temp = tcg_const_i32(r2);
+ TCGv temp = tcg_constant_i32(r2);
gen_branch_cond(ctx, cond, r1, temp, address);
}
@@ -3182,14 +3187,14 @@
cpu_gpr_d[15]);
break;
case OPC1_16_SRC_CMOV:
- temp = tcg_const_tl(0);
- temp2 = tcg_const_tl(const4);
+ temp = tcg_constant_tl(0);
+ temp2 = tcg_constant_tl(const4);
tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
temp2, cpu_gpr_d[r1]);
break;
case OPC1_16_SRC_CMOVN:
- temp = tcg_const_tl(0);
- temp2 = tcg_const_tl(const4);
+ temp = tcg_constant_tl(0);
+ temp2 = tcg_constant_tl(const4);
tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
temp2, cpu_gpr_d[r1]);
break;
@@ -3255,12 +3260,12 @@
tcg_gen_and_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC1_16_SRR_CMOV:
- temp = tcg_const_tl(0);
+ temp = tcg_constant_tl(0);
tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
cpu_gpr_d[r2], cpu_gpr_d[r1]);
break;
case OPC1_16_SRR_CMOVN:
- temp = tcg_const_tl(0);
+ temp = tcg_constant_tl(0);
tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
cpu_gpr_d[r2], cpu_gpr_d[r1]);
break;
@@ -3502,17 +3507,14 @@
{
uint32_t op2;
uint32_t r1;
- TCGv temp;
r1 = MASK_OP_SR_S1D(ctx->opcode);
op2 = MASK_OP_SR_OP2(ctx->opcode);
switch (op2) {
case OPC2_16_SR_RSUB:
- /* overflow only if r1 = -0x80000000 */
- temp = tcg_const_i32(-0x80000000);
- /* calc V bit */
- tcg_gen_setcond_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r1], temp);
+ /* calc V bit -- overflow only if r1 = -0x80000000 */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r1], -0x80000000);
tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
/* calc SV bit */
tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
@@ -3788,7 +3790,7 @@
address = MASK_OP_ABS_OFF18(ctx->opcode);
op2 = MASK_OP_ABS_OP2(ctx->opcode);
- temp = tcg_const_i32(EA_ABS_FORMAT(address));
+ temp = tcg_constant_i32(EA_ABS_FORMAT(address));
switch (op2) {
case OPC2_32_ABS_LD_A:
@@ -3821,7 +3823,7 @@
address = MASK_OP_ABS_OFF18(ctx->opcode);
op2 = MASK_OP_ABS_OP2(ctx->opcode);
- temp = tcg_const_i32(EA_ABS_FORMAT(address));
+ temp = tcg_constant_i32(EA_ABS_FORMAT(address));
switch (op2) {
case OPC2_32_ABS_LD_B:
@@ -3852,7 +3854,7 @@
address = MASK_OP_ABS_OFF18(ctx->opcode);
op2 = MASK_OP_ABS_OP2(ctx->opcode);
- temp = tcg_const_i32(EA_ABS_FORMAT(address));
+ temp = tcg_constant_i32(EA_ABS_FORMAT(address));
switch (op2) {
case OPC2_32_ABS_LDMST:
@@ -3903,7 +3905,7 @@
address = MASK_OP_ABS_OFF18(ctx->opcode);
op2 = MASK_OP_ABS_OP2(ctx->opcode);
- temp = tcg_const_i32(EA_ABS_FORMAT(address));
+ temp = tcg_constant_i32(EA_ABS_FORMAT(address));
switch (op2) {
case OPC2_32_ABS_ST_A:
@@ -3936,7 +3938,7 @@
address = MASK_OP_ABS_OFF18(ctx->opcode);
op2 = MASK_OP_ABS_OP2(ctx->opcode);
- temp = tcg_const_i32(EA_ABS_FORMAT(address));
+ temp = tcg_constant_i32(EA_ABS_FORMAT(address));
switch (op2) {
case OPC2_32_ABS_ST_B:
@@ -4368,7 +4370,7 @@
uint32_t op2;
uint32_t off10;
int32_t r1, r2;
- TCGv temp, temp2, temp3;
+ TCGv temp, temp2, t_off10;
r1 = MASK_OP_BO_S1D(ctx->opcode);
r2 = MASK_OP_BO_S2(ctx->opcode);
@@ -4377,7 +4379,7 @@
temp = tcg_temp_new();
temp2 = tcg_temp_new();
- temp3 = tcg_const_i32(off10);
+ t_off10 = tcg_constant_i32(off10);
CHECK_REG_PAIR(r2);
tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]);
tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
@@ -4391,7 +4393,7 @@
case OPC2_32_BO_CACHEA_WI_CIRC:
case OPC2_32_BO_CACHEA_W_CIRC:
case OPC2_32_BO_CACHEA_I_CIRC:
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_ST_A_BR:
tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
@@ -4399,7 +4401,7 @@
break;
case OPC2_32_BO_ST_A_CIRC:
tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_ST_B_BR:
tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
@@ -4407,7 +4409,7 @@
break;
case OPC2_32_BO_ST_B_CIRC:
tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_ST_D_BR:
CHECK_REG_PAIR(r1);
@@ -4422,7 +4424,7 @@
tcg_gen_rem_tl(temp, temp, temp2);
tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
tcg_gen_qemu_st_tl(cpu_gpr_d[r1+1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_ST_DA_BR:
CHECK_REG_PAIR(r1);
@@ -4437,7 +4439,7 @@
tcg_gen_rem_tl(temp, temp, temp2);
tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
tcg_gen_qemu_st_tl(cpu_gpr_a[r1+1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_ST_H_BR:
tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
@@ -4445,7 +4447,7 @@
break;
case OPC2_32_BO_ST_H_CIRC:
tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_ST_Q_BR:
tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
@@ -4455,7 +4457,7 @@
case OPC2_32_BO_ST_Q_CIRC:
tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
tcg_gen_qemu_st_tl(temp, temp2, ctx->mem_idx, MO_LEUW);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_ST_W_BR:
tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
@@ -4463,7 +4465,7 @@
break;
case OPC2_32_BO_ST_W_CIRC:
tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -4607,8 +4609,7 @@
uint32_t op2;
uint32_t off10;
int r1, r2;
-
- TCGv temp, temp2, temp3;
+ TCGv temp, temp2, t_off10;
r1 = MASK_OP_BO_S1D(ctx->opcode);
r2 = MASK_OP_BO_S2(ctx->opcode);
@@ -4617,7 +4618,7 @@
temp = tcg_temp_new();
temp2 = tcg_temp_new();
- temp3 = tcg_const_i32(off10);
+ t_off10 = tcg_constant_i32(off10);
CHECK_REG_PAIR(r2);
tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]);
tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
@@ -4630,7 +4631,7 @@
break;
case OPC2_32_BO_LD_A_CIRC:
tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_LD_B_BR:
tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB);
@@ -4638,7 +4639,7 @@
break;
case OPC2_32_BO_LD_B_CIRC:
tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_LD_BU_BR:
tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
@@ -4646,7 +4647,7 @@
break;
case OPC2_32_BO_LD_BU_CIRC:
tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_LD_D_BR:
CHECK_REG_PAIR(r1);
@@ -4661,7 +4662,7 @@
tcg_gen_rem_tl(temp, temp, temp2);
tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
tcg_gen_qemu_ld_tl(cpu_gpr_d[r1+1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_LD_DA_BR:
CHECK_REG_PAIR(r1);
@@ -4676,7 +4677,7 @@
tcg_gen_rem_tl(temp, temp, temp2);
tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
tcg_gen_qemu_ld_tl(cpu_gpr_a[r1+1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_LD_H_BR:
tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW);
@@ -4684,7 +4685,7 @@
break;
case OPC2_32_BO_LD_H_CIRC:
tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_LD_HU_BR:
tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
@@ -4692,7 +4693,7 @@
break;
case OPC2_32_BO_LD_HU_CIRC:
tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_LD_Q_BR:
tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
@@ -4702,7 +4703,7 @@
case OPC2_32_BO_LD_Q_CIRC:
tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_LD_W_BR:
tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
@@ -4710,7 +4711,7 @@
break;
case OPC2_32_BO_LD_W_CIRC:
tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -4811,8 +4812,7 @@
uint32_t op2;
uint32_t off10;
int r1, r2;
-
- TCGv temp, temp2, temp3;
+ TCGv temp, temp2, t_off10;
r1 = MASK_OP_BO_S1D(ctx->opcode);
r2 = MASK_OP_BO_S2(ctx->opcode);
@@ -4821,7 +4821,7 @@
temp = tcg_temp_new();
temp2 = tcg_temp_new();
- temp3 = tcg_const_i32(off10);
+ t_off10 = tcg_constant_i32(off10);
CHECK_REG_PAIR(r2);
tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]);
tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
@@ -4833,7 +4833,7 @@
break;
case OPC2_32_BO_LDMST_CIRC:
gen_ldmst(ctx, r1, temp2);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_SWAP_W_BR:
gen_swap(ctx, r1, temp2);
@@ -4841,7 +4841,7 @@
break;
case OPC2_32_BO_SWAP_W_CIRC:
gen_swap(ctx, r1, temp2);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_CMPSWAP_W_BR:
gen_cmpswap(ctx, r1, temp2);
@@ -4849,7 +4849,7 @@
break;
case OPC2_32_BO_CMPSWAP_W_CIRC:
gen_cmpswap(ctx, r1, temp2);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
case OPC2_32_BO_SWAPMSK_W_BR:
gen_swapmsk(ctx, r1, temp2);
@@ -4857,7 +4857,7 @@
break;
case OPC2_32_BO_SWAPMSK_W_CIRC:
gen_swapmsk(ctx, r1, temp2);
- gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -5296,7 +5296,7 @@
case OPC2_32_RCPW_INSERT:
/* if pos + width > 32 undefined result */
if (pos + width <= 32) {
- temp = tcg_const_i32(const4);
+ temp = tcg_constant_i32(const4);
tcg_gen_deposit_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp, pos, width);
}
break;
@@ -5372,14 +5372,14 @@
cpu_gpr_d[r3]);
break;
case OPC2_32_RCR_SEL:
- temp = tcg_const_i32(0);
- temp2 = tcg_const_i32(const9);
+ temp = tcg_constant_i32(0);
+ temp2 = tcg_constant_i32(const9);
tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
cpu_gpr_d[r1], temp2);
break;
case OPC2_32_RCR_SELN:
- temp = tcg_const_i32(0);
- temp2 = tcg_const_i32(const9);
+ temp = tcg_constant_i32(0);
+ temp2 = tcg_constant_i32(const9);
tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
cpu_gpr_d[r1], temp2);
break;
@@ -6256,7 +6256,7 @@
r1 = MASK_OP_RR1_S1(ctx->opcode);
r2 = MASK_OP_RR1_S2(ctx->opcode);
r3 = MASK_OP_RR1_D(ctx->opcode);
- n = tcg_const_i32(MASK_OP_RR1_N(ctx->opcode));
+ n = tcg_constant_i32(MASK_OP_RR1_N(ctx->opcode));
op2 = MASK_OP_RR1_OP2(ctx->opcode);
switch (op2) {
@@ -6550,12 +6550,12 @@
cpu_gpr_d[r3]);
break;
case OPC2_32_RRR_SEL:
- temp = tcg_const_i32(0);
+ temp = tcg_constant_i32(0);
tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR_SELN:
- temp = tcg_const_i32(0);
+ temp = tcg_constant_i32(0);
tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
@@ -6907,7 +6907,7 @@
r4 = MASK_OP_RRR1_D(ctx->opcode);
n = MASK_OP_RRR1_N(ctx->opcode);
- temp = tcg_const_i32(n);
+ temp = tcg_temp_new();
temp2 = tcg_temp_new();
switch (op2) {
@@ -7389,7 +7389,7 @@
r4 = MASK_OP_RRR1_D(ctx->opcode);
n = MASK_OP_RRR1_N(ctx->opcode);
- temp = tcg_const_i32(n);
+ temp = tcg_temp_new();
temp2 = tcg_temp_new();
switch (op2) {
@@ -7957,7 +7957,7 @@
case OPC1_32_ABS_STOREQ:
address = MASK_OP_ABS_OFF18(ctx->opcode);
r1 = MASK_OP_ABS_S1D(ctx->opcode);
- temp = tcg_const_i32(EA_ABS_FORMAT(address));
+ temp = tcg_constant_i32(EA_ABS_FORMAT(address));
temp2 = tcg_temp_new();
tcg_gen_shri_tl(temp2, cpu_gpr_d[r1], 16);
@@ -7966,7 +7966,7 @@
case OPC1_32_ABS_LD_Q:
address = MASK_OP_ABS_OFF18(ctx->opcode);
r1 = MASK_OP_ABS_S1D(ctx->opcode);
- temp = tcg_const_i32(EA_ABS_FORMAT(address));
+ temp = tcg_constant_i32(EA_ABS_FORMAT(address));
tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW);
tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
@@ -7982,7 +7982,7 @@
b = MASK_OP_ABSB_B(ctx->opcode);
bpos = MASK_OP_ABSB_BPOS(ctx->opcode);
- temp = tcg_const_i32(EA_ABS_FORMAT(address));
+ temp = tcg_constant_i32(EA_ABS_FORMAT(address));
temp2 = tcg_temp_new();
tcg_gen_qemu_ld_tl(temp2, temp, ctx->mem_idx, MO_UB);
@@ -8109,7 +8109,7 @@
r2 = MASK_OP_RCRR_S3(ctx->opcode);
r3 = MASK_OP_RCRR_D(ctx->opcode);
const16 = MASK_OP_RCRR_CONST4(ctx->opcode);
- temp = tcg_const_i32(const16);
+ temp = tcg_constant_i32(const16);
temp2 = tcg_temp_new(); /* width*/
temp3 = tcg_temp_new(); /* pos */
diff --git a/target/xtensa/core-dc232b.c b/target/xtensa/core-dc232b.c
index c982d09..9aba266 100644
--- a/target/xtensa/core-dc232b.c
+++ b/target/xtensa/core-dc232b.c
@@ -27,7 +27,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
#include "qemu/host-utils.h"
#include "qemu/timer.h"
diff --git a/target/xtensa/core-dc233c.c b/target/xtensa/core-dc233c.c
index 595ab9a..9b0a625 100644
--- a/target/xtensa/core-dc233c.c
+++ b/target/xtensa/core-dc233c.c
@@ -27,7 +27,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
#include "qemu/host-utils.h"
#include "core-dc233c/core-isa.h"
diff --git a/target/xtensa/core-de212.c b/target/xtensa/core-de212.c
index 50c995b..b08fe22 100644
--- a/target/xtensa/core-de212.c
+++ b/target/xtensa/core-de212.c
@@ -27,7 +27,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
#include "qemu/host-utils.h"
#include "core-de212/core-isa.h"
diff --git a/target/xtensa/core-de233_fpu.c b/target/xtensa/core-de233_fpu.c
index 41af805..8845cdb 100644
--- a/target/xtensa/core-de233_fpu.c
+++ b/target/xtensa/core-de233_fpu.c
@@ -27,7 +27,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
#include "qemu/host-utils.h"
#include "core-de233_fpu/core-isa.h"
diff --git a/target/xtensa/core-dsp3400.c b/target/xtensa/core-dsp3400.c
index 81e425c..c0f94b9 100644
--- a/target/xtensa/core-dsp3400.c
+++ b/target/xtensa/core-dsp3400.c
@@ -27,7 +27,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
#include "qemu/host-utils.h"
#include "core-dsp3400/core-isa.h"
diff --git a/target/xtensa/core-fsf.c b/target/xtensa/core-fsf.c
index 3327c50..310be8d 100644
--- a/target/xtensa/core-fsf.c
+++ b/target/xtensa/core-fsf.c
@@ -27,7 +27,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
#include "qemu/host-utils.h"
#include "core-fsf/core-isa.h"
diff --git a/target/xtensa/core-lx106.c b/target/xtensa/core-lx106.c
index 7a771d0..7f71d08 100644
--- a/target/xtensa/core-lx106.c
+++ b/target/xtensa/core-lx106.c
@@ -27,7 +27,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
#include "qemu/host-utils.h"
#include "core-lx106/core-isa.h"
diff --git a/target/xtensa/core-sample_controller.c b/target/xtensa/core-sample_controller.c
index fd5de55..8867001 100644
--- a/target/xtensa/core-sample_controller.c
+++ b/target/xtensa/core-sample_controller.c
@@ -27,7 +27,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
#include "qemu/host-utils.h"
#include "core-sample_controller/core-isa.h"
diff --git a/target/xtensa/core-test_kc705_be.c b/target/xtensa/core-test_kc705_be.c
index 294c16f..bd082f4 100644
--- a/target/xtensa/core-test_kc705_be.c
+++ b/target/xtensa/core-test_kc705_be.c
@@ -27,7 +27,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
#include "qemu/host-utils.h"
#include "core-test_kc705_be/core-isa.h"
diff --git a/target/xtensa/core-test_mmuhifi_c3.c b/target/xtensa/core-test_mmuhifi_c3.c
index c0e5d32..3090dd0 100644
--- a/target/xtensa/core-test_mmuhifi_c3.c
+++ b/target/xtensa/core-test_mmuhifi_c3.c
@@ -27,7 +27,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
#include "qemu/host-utils.h"
#include "core-test_mmuhifi_c3/core-isa.h"
diff --git a/target/xtensa/cpu-param.h b/target/xtensa/cpu-param.h
index b53e9a3..b1da055 100644
--- a/target/xtensa/cpu-param.h
+++ b/target/xtensa/cpu-param.h
@@ -16,6 +16,5 @@
#else
#define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif
-#define NB_MMU_MODES 4
#endif
diff --git a/target/xtensa/gdbstub.c b/target/xtensa/gdbstub.c
index b669606..4b3bfb7 100644
--- a/target/xtensa/gdbstub.c
+++ b/target/xtensa/gdbstub.c
@@ -19,7 +19,7 @@
*/
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
#include "qemu/log.h"
enum {
diff --git a/target/xtensa/helper.c b/target/xtensa/helper.c
index 2aa9777..dbeb97a 100644
--- a/target/xtensa/helper.c
+++ b/target/xtensa/helper.c
@@ -29,7 +29,7 @@
#include "qemu/log.h"
#include "cpu.h"
#include "exec/exec-all.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
#include "exec/helper-proto.h"
#include "qemu/error-report.h"
#include "qemu/qemu-print.h"
diff --git a/target/xtensa/import_core.sh b/target/xtensa/import_core.sh
index b4c1555..17dfec8 100755
--- a/target/xtensa/import_core.sh
+++ b/target/xtensa/import_core.sh
@@ -41,7 +41,7 @@
cat <<EOF > "${TARGET}.c"
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
#include "qemu/host-utils.h"
#include "core-$NAME/core-isa.h"
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
index 4060a35..4444eb9 100644
--- a/tcg/i386/tcg-target.c.inc
+++ b/tcg/i386/tcg-target.c.inc
@@ -3651,6 +3651,7 @@
break;
case MO_64:
+ t1 = tcg_temp_new_vec(type);
if (imm <= 32) {
/*
* We can emulate a small sign extend by performing an arithmetic
@@ -3659,24 +3660,22 @@
* does not, so we have to bound the smaller shift -- we get the
* same result in the high half either way.
*/
- t1 = tcg_temp_new_vec(type);
tcg_gen_sari_vec(MO_32, t1, v1, MIN(imm, 31));
tcg_gen_shri_vec(MO_64, v0, v1, imm);
vec_gen_4(INDEX_op_x86_blend_vec, type, MO_32,
tcgv_vec_arg(v0), tcgv_vec_arg(v0),
tcgv_vec_arg(t1), 0xaa);
- tcg_temp_free_vec(t1);
} else {
/* Otherwise we will need to use a compare vs 0 to produce
* the sign-extend, shift and merge.
*/
- t1 = tcg_const_zeros_vec(type);
- tcg_gen_cmp_vec(TCG_COND_GT, MO_64, t1, t1, v1);
+ tcg_gen_cmp_vec(TCG_COND_GT, MO_64, t1,
+ tcg_constant_vec(type, MO_64, 0), v1);
tcg_gen_shri_vec(MO_64, v0, v1, imm);
tcg_gen_shli_vec(MO_64, t1, t1, 64 - imm);
tcg_gen_or_vec(MO_64, v0, v0, t1);
- tcg_temp_free_vec(t1);
}
+ tcg_temp_free_vec(t1);
break;
default:
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
index 291a65c..047a832 100644
--- a/tcg/tcg-op-gvec.c
+++ b/tcg/tcg-op-gvec.c
@@ -19,6 +19,7 @@
#include "qemu/osdep.h"
#include "tcg/tcg.h"
+#include "tcg/tcg-temp-internal.h"
#include "tcg/tcg-op.h"
#include "tcg/tcg-op-gvec.h"
#include "tcg/tcg-gvec-desc.h"
diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c
index 966d41d..aeeb243 100644
--- a/tcg/tcg-op-vec.c
+++ b/tcg/tcg-op-vec.c
@@ -19,6 +19,7 @@
#include "qemu/osdep.h"
#include "tcg/tcg.h"
+#include "tcg/tcg-temp-internal.h"
#include "tcg/tcg-op.h"
#include "tcg/tcg-mo.h"
#include "tcg-internal.h"
@@ -228,32 +229,6 @@
}
}
-TCGv_vec tcg_const_zeros_vec(TCGType type)
-{
- TCGv_vec ret = tcg_temp_new_vec(type);
- tcg_gen_dupi_vec(MO_64, ret, 0);
- return ret;
-}
-
-TCGv_vec tcg_const_ones_vec(TCGType type)
-{
- TCGv_vec ret = tcg_temp_new_vec(type);
- tcg_gen_dupi_vec(MO_64, ret, -1);
- return ret;
-}
-
-TCGv_vec tcg_const_zeros_vec_matching(TCGv_vec m)
-{
- TCGTemp *t = tcgv_vec_temp(m);
- return tcg_const_zeros_vec(t->base_type);
-}
-
-TCGv_vec tcg_const_ones_vec_matching(TCGv_vec m)
-{
- TCGTemp *t = tcgv_vec_temp(m);
- return tcg_const_ones_vec(t->base_type);
-}
-
void tcg_gen_dupi_vec(unsigned vece, TCGv_vec r, uint64_t a)
{
TCGTemp *rt = tcgv_vec_temp(r);
@@ -430,9 +405,7 @@
const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
if (!TCG_TARGET_HAS_not_vec || !do_op2(vece, r, a, INDEX_op_not_vec)) {
- TCGv_vec t = tcg_const_ones_vec_matching(r);
- tcg_gen_xor_vec(0, r, a, t);
- tcg_temp_free_vec(t);
+ tcg_gen_xor_vec(0, r, a, tcg_constant_vec_matching(r, 0, -1));
}
tcg_swap_vecop_list(hold_list);
}
@@ -445,9 +418,7 @@
hold_list = tcg_swap_vecop_list(NULL);
if (!TCG_TARGET_HAS_neg_vec || !do_op2(vece, r, a, INDEX_op_neg_vec)) {
- TCGv_vec t = tcg_const_zeros_vec_matching(r);
- tcg_gen_sub_vec(vece, r, t, a);
- tcg_temp_free_vec(t);
+ tcg_gen_sub_vec(vece, r, tcg_constant_vec_matching(r, vece, 0), a);
}
tcg_swap_vecop_list(hold_list);
}
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
index 77658a8..3136cef 100644
--- a/tcg/tcg-op.c
+++ b/tcg/tcg-op.c
@@ -25,6 +25,7 @@
#include "qemu/osdep.h"
#include "exec/exec-all.h"
#include "tcg/tcg.h"
+#include "tcg/tcg-temp-internal.h"
#include "tcg/tcg-op.h"
#include "tcg/tcg-mo.h"
#include "exec/plugin-gen.h"
@@ -1562,9 +1563,7 @@
} else if (is_power_of_2(arg2)) {
tcg_gen_shli_i64(ret, arg1, ctz64(arg2));
} else {
- TCGv_i64 t0 = tcg_const_i64(arg2);
- tcg_gen_mul_i64(ret, arg1, t0);
- tcg_temp_free_i64(t0);
+ tcg_gen_mul_i64(ret, arg1, tcg_constant_i64(arg2));
}
}
@@ -1961,9 +1960,7 @@
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
tcg_temp_free_i32(t);
} else {
- TCGv_i64 t0 = tcg_const_i64(arg2);
- tcg_gen_clz_i64(ret, arg1, t0);
- tcg_temp_free_i64(t0);
+ tcg_gen_clz_i64(ret, arg1, tcg_constant_i64(arg2));
}
}
@@ -2015,9 +2012,7 @@
tcg_gen_ctpop_i64(ret, t);
tcg_temp_free_i64(t);
} else {
- TCGv_i64 t0 = tcg_const_i64(arg2);
- tcg_gen_ctz_i64(ret, arg1, t0);
- tcg_temp_free_i64(t0);
+ tcg_gen_ctz_i64(ret, arg1, tcg_constant_i64(arg2));
}
}
@@ -2813,7 +2808,6 @@
tcg_debug_assert(idx == TB_EXIT_REQUESTED);
}
- plugin_gen_disable_mem_helpers();
tcg_gen_op1i(INDEX_op_exit_tb, val);
}
diff --git a/tcg/tcg.c b/tcg/tcg.c
index e4fccbd..bb52bc0 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -60,6 +60,7 @@
#include "elf.h"
#include "exec/log.h"
#include "tcg/tcg-ldst.h"
+#include "tcg/tcg-temp-internal.h"
#include "tcg-internal.h"
#include "accel/tcg/perf.h"
@@ -1444,22 +1445,6 @@
return tcg_constant_vec(t->base_type, vece, val);
}
-TCGv_i32 tcg_const_i32(int32_t val)
-{
- TCGv_i32 t0;
- t0 = tcg_temp_new_i32();
- tcg_gen_movi_i32(t0, val);
- return t0;
-}
-
-TCGv_i64 tcg_const_i64(int64_t val)
-{
- TCGv_i64 t0;
- t0 = tcg_temp_new_i64();
- tcg_gen_movi_i64(t0, val);
- return t0;
-}
-
/* Return true if OP may appear in the opcode stream.
Test the runtime variable that controls each opcode. */
bool tcg_op_supported(TCGOpcode op)
diff --git a/tests/avocado/avocado_qemu/__init__.py b/tests/avocado/avocado_qemu/__init__.py
index a313e88..cb71f50 100644
--- a/tests/avocado/avocado_qemu/__init__.py
+++ b/tests/avocado/avocado_qemu/__init__.py
@@ -309,6 +309,16 @@
if netdevhelp.find('\n' + netdevname + '\n') < 0:
self.cancel('no support for user networking')
+ def require_multiprocess(self):
+ """
+ Test for the presence of the x-pci-proxy-dev which is required
+ to support multiprocess.
+ """
+ devhelp = run_cmd([self.qemu_bin,
+ '-M', 'none', '-device', 'help'])[0];
+ if devhelp.find('x-pci-proxy-dev') < 0:
+ self.cancel('no support for multiprocess device emulation')
+
def _new_vm(self, name, *args):
self._sd = tempfile.TemporaryDirectory(prefix="qemu_")
vm = QEMUMachine(self.qemu_bin, base_temp_dir=self.workdir,
diff --git a/tests/avocado/machine_aarch64_virt.py b/tests/avocado/machine_aarch64_virt.py
index 25dab8d..a90dc6f 100644
--- a/tests/avocado/machine_aarch64_virt.py
+++ b/tests/avocado/machine_aarch64_virt.py
@@ -38,11 +38,11 @@
:avocado: tags=accel:tcg
"""
iso_url = ('https://dl-cdn.alpinelinux.org/'
- 'alpine/v3.16/releases/aarch64/'
- 'alpine-virt-3.16.3-aarch64.iso')
+ 'alpine/v3.17/releases/aarch64/'
+ 'alpine-standard-3.17.2-aarch64.iso')
# Alpine use sha256 so I recalculated this myself
- iso_sha1 = '0683bc089486d55c91bf6607d5ecb93925769bc0'
+ iso_sha1 = '76284fcd7b41fe899b0c2375ceb8470803eea839'
iso_path = self.fetch_asset(iso_url, asset_hash=iso_sha1)
self.vm.set_console()
@@ -65,7 +65,7 @@
self.vm.add_args('-object', 'rng-random,id=rng0,filename=/dev/urandom')
self.vm.launch()
- self.wait_for_console_pattern('Welcome to Alpine Linux 3.16')
+ self.wait_for_console_pattern('Welcome to Alpine Linux 3.17')
def common_aarch64_virt(self, machine):
diff --git a/tests/avocado/multiprocess.py b/tests/avocado/multiprocess.py
index 80a3b8f..9112a4c 100644
--- a/tests/avocado/multiprocess.py
+++ b/tests/avocado/multiprocess.py
@@ -22,6 +22,7 @@
machine_type):
"""Main test method"""
self.require_accelerator('kvm')
+ self.require_multiprocess()
# Create socketpair to connect proxy and remote processes
proxy_sock, remote_sock = socket.socketpair(socket.AF_UNIX,
diff --git a/tests/avocado/netdev-ethtool.py b/tests/avocado/netdev-ethtool.py
new file mode 100644
index 0000000..f7e9464
--- /dev/null
+++ b/tests/avocado/netdev-ethtool.py
@@ -0,0 +1,116 @@
+# ethtool tests for emulated network devices
+#
+# This test leverages ethtool's --test sequence to validate network
+# device behaviour.
+#
+# SPDX-License-Identifier: GPL-2.0-or-late
+
+from avocado import skip
+from avocado_qemu import QemuSystemTest
+from avocado_qemu import exec_command, exec_command_and_wait_for_pattern
+from avocado_qemu import wait_for_console_pattern
+
+class NetDevEthtool(QemuSystemTest):
+ """
+ :avocado: tags=arch:x86_64
+ :avocado: tags=machine:q35
+ """
+
+ # Runs in about 17s under KVM, 19s under TCG, 25s under GCOV
+ timeout = 45
+
+ # Fetch assets from the netdev-ethtool subdir of my shared test
+ # images directory on fileserver.linaro.org.
+ def get_asset(self, name, sha1):
+ base_url = ('https://fileserver.linaro.org/s/'
+ 'kE4nCFLdQcoBF9t/download?'
+ 'path=%2Fnetdev-ethtool&files=' )
+ url = base_url + name
+ # use explicit name rather than failing to neatly parse the
+ # URL into a unique one
+ return self.fetch_asset(name=name, locations=(url), asset_hash=sha1)
+
+ def common_test_code(self, netdev, extra_args=None, kvm=False):
+
+ # This custom kernel has drivers for all the supported network
+ # devices we can emulate in QEMU
+ kernel = self.get_asset("bzImage",
+ "33469d7802732d5815226166581442395cb289e2")
+
+ rootfs = self.get_asset("rootfs.squashfs",
+ "9793cea7021414ae844bda51f558bd6565b50cdc")
+
+ append = 'printk.time=0 console=ttyS0 '
+ append += 'root=/dev/sr0 rootfstype=squashfs '
+
+ # any additional kernel tweaks for the test
+ if extra_args:
+ append += extra_args
+
+ # finally invoke ethtool directly
+ append += ' init=/usr/sbin/ethtool -- -t eth1 offline'
+
+ # add the rootfs via a readonly cdrom image
+ drive = f"file={rootfs},if=ide,index=0,media=cdrom"
+
+ self.vm.add_args('-kernel', kernel,
+ '-append', append,
+ '-drive', drive,
+ '-device', netdev)
+
+ if kvm:
+ self.vm.add_args('-accel', 'kvm')
+
+ self.vm.set_console(console_index=0)
+ self.vm.launch()
+
+ wait_for_console_pattern(self,
+ "The test result is PASS",
+ "The test result is FAIL",
+ vm=None)
+ # no need to gracefully shutdown, just finish
+ self.vm.kill()
+
+ # Skip testing for MSI for now. Allegedly it was fixed by:
+ # 28e96556ba (igb: Allocate MSI-X vector when testing)
+ # but I'm seeing oops in the kernel
+ @skip("Kernel bug with MSI enabled")
+ def test_igb(self):
+ """
+ :avocado: tags=device:igb
+ """
+ self.common_test_code("igb")
+
+ def test_igb_nomsi(self):
+ """
+ :avocado: tags=device:igb
+ """
+ self.common_test_code("igb", "pci=nomsi")
+
+ def test_igb_nomsi_kvm(self):
+ """
+ :avocado: tags=device:igb
+ """
+ self.require_accelerator('kvm')
+ self.common_test_code("igb", "pci=nomsi", True)
+
+ # It seems the other popular cards we model in QEMU currently fail
+ # the pattern test with:
+ #
+ # pattern test failed (reg 0x00178): got 0x00000000 expected 0x00005A5A
+ #
+ # So for now we skip them.
+
+ @skip("Incomplete reg 0x00178 support")
+ def test_e1000(self):
+ """
+ :avocado: tags=device:e1000
+ """
+ self.common_test_code("e1000")
+
+ @skip("Incomplete reg 0x00178 support")
+ def test_i82550(self):
+ """
+ :avocado: tags=device:i82550
+ """
+ self.common_test_code("i82550")
diff --git a/tests/avocado/tuxrun_baselines.py b/tests/avocado/tuxrun_baselines.py
index 30aaefc..c3fb67f 100644
--- a/tests/avocado/tuxrun_baselines.py
+++ b/tests/avocado/tuxrun_baselines.py
@@ -67,9 +67,6 @@
# The name of the kernel Image file
self.image = self.get_tag('image', "Image")
- # The block device drive type
- self.drive = self.get_tag('drive', "virtio-blk-device")
-
self.root = self.get_tag('root', "vda")
# Occasionally we need extra devices to hook things up
@@ -99,7 +96,7 @@
return (kernel_image, self.workdir + "/rootfs.ext4", dtb)
- def prepare_run(self, kernel, disk, dtb=None, console_index=0):
+ def prepare_run(self, kernel, disk, drive, dtb=None, console_index=0):
"""
Setup to run and add the common parameters to the system
"""
@@ -121,10 +118,8 @@
if self.extradev:
self.vm.add_args('-device', self.extradev)
- # Some machines already define a drive device
- if self.drive != "none":
- self.vm.add_args('-device',
- f"{self.drive},drive=hd0")
+ self.vm.add_args('-device',
+ f"{drive},drive=hd0")
# Some machines need an explicit DTB
if dtb:
@@ -154,7 +149,9 @@
else:
self.vm.wait()
- def common_tuxrun(self, dt=None, haltmsg="reboot: System halted",
+ def common_tuxrun(self, dt=None,
+ drive="virtio-blk-device",
+ haltmsg="reboot: System halted",
console_index=0):
"""
Common path for LKFT tests. Unless we need to do something
@@ -163,7 +160,7 @@
"""
(kernel, disk, dtb) = self.fetch_tuxrun_assets(dt)
- self.prepare_run(kernel, disk, dtb, console_index)
+ self.prepare_run(kernel, disk, drive, dtb, console_index)
self.vm.launch()
self.run_tuxtest_tests(haltmsg)
@@ -206,11 +203,11 @@
:avocado: tags=machine:versatilepb
:avocado: tags=tuxboot:armv5
:avocado: tags=image:zImage
- :avocado: tags=drive:virtio-blk-pci
:avocado: tags=console:ttyAMA0
:avocado: tags=shutdown:nowait
"""
- self.common_tuxrun(dt="versatile-pb.dtb")
+ self.common_tuxrun(drive="virtio-blk-pci",
+ dt="versatile-pb.dtb")
def test_armv7(self):
"""
@@ -244,10 +241,9 @@
:avocado: tags=machine:q35
:avocado: tags=tuxboot:i386
:avocado: tags=image:bzImage
- :avocado: tags=drive:virtio-blk-pci
:avocado: tags=shutdown:nowait
"""
- self.common_tuxrun()
+ self.common_tuxrun(drive="virtio-blk-pci")
def test_mips32(self):
"""
@@ -257,11 +253,10 @@
:avocado: tags=endian:big
:avocado: tags=tuxboot:mips32
:avocado: tags=image:vmlinux
- :avocado: tags=drive:driver=ide-hd,bus=ide.0,unit=0
:avocado: tags=root:sda
:avocado: tags=shutdown:nowait
"""
- self.common_tuxrun()
+ self.common_tuxrun(drive="driver=ide-hd,bus=ide.0,unit=0")
def test_mips32el(self):
"""
@@ -270,11 +265,10 @@
:avocado: tags=cpu:mips32r6-generic
:avocado: tags=tuxboot:mips32el
:avocado: tags=image:vmlinux
- :avocado: tags=drive:driver=ide-hd,bus=ide.0,unit=0
:avocado: tags=root:sda
:avocado: tags=shutdown:nowait
"""
- self.common_tuxrun()
+ self.common_tuxrun(drive="driver=ide-hd,bus=ide.0,unit=0")
@skip("QEMU currently broken") # regression against stable QEMU
def test_mips64(self):
@@ -284,11 +278,10 @@
:avocado: tags=tuxboot:mips64
:avocado: tags=endian:big
:avocado: tags=image:vmlinux
- :avocado: tags=drive:driver=ide-hd,bus=ide.0,unit=0
:avocado: tags=root:sda
:avocado: tags=shutdown:nowait
"""
- self.common_tuxrun()
+ self.common_tuxrun(drive="driver=ide-hd,bus=ide.0,unit=0")
def test_mips64el(self):
"""
@@ -296,11 +289,10 @@
:avocado: tags=machine:malta
:avocado: tags=tuxboot:mips64el
:avocado: tags=image:vmlinux
- :avocado: tags=drive:driver=ide-hd,bus=ide.0,unit=0
:avocado: tags=root:sda
:avocado: tags=shutdown:nowait
"""
- self.common_tuxrun()
+ self.common_tuxrun(drive="driver=ide-hd,bus=ide.0,unit=0")
def test_ppc32(self):
"""
@@ -309,10 +301,9 @@
:avocado: tags=cpu:e500mc
:avocado: tags=tuxboot:ppc32
:avocado: tags=image:uImage
- :avocado: tags=drive:virtio-blk-pci
:avocado: tags=shutdown:nowait
"""
- self.common_tuxrun()
+ self.common_tuxrun(drive="virtio-blk-pci")
def test_ppc64(self):
"""
@@ -324,10 +315,9 @@
:avocado: tags=tuxboot:ppc64
:avocado: tags=image:vmlinux
:avocado: tags=extradev:driver=spapr-vscsi
- :avocado: tags=drive:scsi-hd
:avocado: tags=root:sda
"""
- self.common_tuxrun()
+ self.common_tuxrun(drive="scsi-hd")
def test_ppc64le(self):
"""
@@ -338,10 +328,9 @@
:avocado: tags=tuxboot:ppc64le
:avocado: tags=image:vmlinux
:avocado: tags=extradev:driver=spapr-vscsi
- :avocado: tags=drive:scsi-hd
:avocado: tags=root:sda
"""
- self.common_tuxrun()
+ self.common_tuxrun(drive="scsi-hd")
def test_riscv32(self):
"""
@@ -365,10 +354,10 @@
:avocado: tags=endian:big
:avocado: tags=tuxboot:s390
:avocado: tags=image:bzImage
- :avocado: tags=drive:virtio-blk-ccw
:avocado: tags=shutdown:nowait
"""
- self.common_tuxrun(haltmsg="Requesting system halt")
+ self.common_tuxrun(drive="virtio-blk-ccw",
+ haltmsg="Requesting system halt")
# Note: some segfaults caused by unaligned userspace access
@skipIf(os.getenv('GITLAB_CI'), 'Skipping unstable test on GitLab')
@@ -380,7 +369,6 @@
:avocado: tags=tuxboot:sh4
:avocado: tags=image:zImage
:avocado: tags=root:sda
- :avocado: tags=drive:driver=ide-hd,bus=ide.0,unit=0
:avocado: tags=console:ttySC1
"""
# The test is currently too unstable to do much in userspace
@@ -388,7 +376,9 @@
(kernel, disk, dtb) = self.fetch_tuxrun_assets()
# the console comes on the second serial port
- self.prepare_run(kernel, disk, console_index=1)
+ self.prepare_run(kernel, disk,
+ "driver=ide-hd,bus=ide.0,unit=0",
+ console_index=1)
self.vm.launch()
self.wait_for_console_pattern("Welcome to TuxTest")
@@ -404,10 +394,9 @@
:avocado: tags=tuxboot:sparc64
:avocado: tags=image:vmlinux
:avocado: tags=root:sda
- :avocado: tags=drive:driver=ide-hd,bus=ide.0,unit=0
:avocado: tags=shutdown:nowait
"""
- self.common_tuxrun()
+ self.common_tuxrun(drive="driver=ide-hd,bus=ide.0,unit=0")
def test_x86_64(self):
"""
@@ -417,7 +406,6 @@
:avocado: tags=tuxboot:x86_64
:avocado: tags=image:bzImage
:avocado: tags=root:sda
- :avocado: tags=drive:driver=ide-hd,bus=ide.0,unit=0
:avocado: tags=shutdown:nowait
"""
- self.common_tuxrun()
+ self.common_tuxrun(drive="driver=ide-hd,bus=ide.0,unit=0")
diff --git a/tests/check-block.sh b/tests/check-block.sh
deleted file mode 100755
index 5de2c1b..0000000
--- a/tests/check-block.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/sh
-
-if [ "$#" -eq 0 ]; then
- echo "Usage: $0 fmt..." >&2
- exit 99
-fi
-
-# Honor the SPEED environment variable, just like we do it for "meson test"
-format_list="$@"
-if [ "$SPEED" = "slow" ] || [ "$SPEED" = "thorough" ]; then
- group=
-else
- group="-g auto"
-fi
-
-skip() {
- echo "1..0 #SKIP $*"
- exit 0
-}
-
-if [ -z "$(find . -name 'qemu-system-*' -print)" ]; then
- skip "No qemu-system binary available ==> Not running the qemu-iotests."
-fi
-
-cd tests/qemu-iotests
-
-# QEMU_CHECK_BLOCK_AUTO is used to disable some unstable sub-tests
-export QEMU_CHECK_BLOCK_AUTO=1
-export PYTHONUTF8=1
-# If make was called with -jN we want to call ./check with -j N. Extract the
-# flag from MAKEFLAGS, so that if it absent (or MAKEFLAGS is not defined), JOBS
-# would be an empty line otherwise JOBS is prepared string of flag with value:
-# "-j N"
-# Note, that the following works even if make was called with "-j N" or even
-# "--jobs N", as all these variants becomes simply "-jN" in MAKEFLAGS variable.
-JOBS=$(echo "$MAKEFLAGS" | sed -n 's/\(^\|.* \)-j\([0-9]\+\)\( .*\|$\)/-j \2/p')
-
-ret=0
-for fmt in $format_list ; do
- ${PYTHON} ./check $JOBS -tap -$fmt $group || ret=1
-done
-
-exit $ret
diff --git a/tests/data/acpi/pc/DSDT b/tests/data/acpi/pc/DSDT
index 0b475fb..32d255c 100644
--- a/tests/data/acpi/pc/DSDT
+++ b/tests/data/acpi/pc/DSDT
Binary files differ
diff --git a/tests/data/acpi/pc/DSDT.acpierst b/tests/data/acpi/pc/DSDT.acpierst
index 17ef7ca..33e872b 100644
--- a/tests/data/acpi/pc/DSDT.acpierst
+++ b/tests/data/acpi/pc/DSDT.acpierst
Binary files differ
diff --git a/tests/data/acpi/pc/DSDT.acpihmat b/tests/data/acpi/pc/DSDT.acpihmat
index 675b674..cd84abc 100644
--- a/tests/data/acpi/pc/DSDT.acpihmat
+++ b/tests/data/acpi/pc/DSDT.acpihmat
Binary files differ
diff --git a/tests/data/acpi/pc/DSDT.bridge b/tests/data/acpi/pc/DSDT.bridge
index c1ce061..69a73ea 100644
--- a/tests/data/acpi/pc/DSDT.bridge
+++ b/tests/data/acpi/pc/DSDT.bridge
Binary files differ
diff --git a/tests/data/acpi/pc/DSDT.cphp b/tests/data/acpi/pc/DSDT.cphp
index 754ab85..2037905 100644
--- a/tests/data/acpi/pc/DSDT.cphp
+++ b/tests/data/acpi/pc/DSDT.cphp
Binary files differ
diff --git a/tests/data/acpi/pc/DSDT.dimmpxm b/tests/data/acpi/pc/DSDT.dimmpxm
index 1705033..435496e 100644
--- a/tests/data/acpi/pc/DSDT.dimmpxm
+++ b/tests/data/acpi/pc/DSDT.dimmpxm
Binary files differ
diff --git a/tests/data/acpi/pc/DSDT.hpbridge b/tests/data/acpi/pc/DSDT.hpbridge
index 834c270..b6eafab 100644
--- a/tests/data/acpi/pc/DSDT.hpbridge
+++ b/tests/data/acpi/pc/DSDT.hpbridge
Binary files differ
diff --git a/tests/data/acpi/pc/DSDT.hpbrroot b/tests/data/acpi/pc/DSDT.hpbrroot
index a71ed4f..a4073f3 100644
--- a/tests/data/acpi/pc/DSDT.hpbrroot
+++ b/tests/data/acpi/pc/DSDT.hpbrroot
Binary files differ
diff --git a/tests/data/acpi/pc/DSDT.ipmikcs b/tests/data/acpi/pc/DSDT.ipmikcs
index dd71356..06aa7bfd 100644
--- a/tests/data/acpi/pc/DSDT.ipmikcs
+++ b/tests/data/acpi/pc/DSDT.ipmikcs
Binary files differ
diff --git a/tests/data/acpi/pc/DSDT.memhp b/tests/data/acpi/pc/DSDT.memhp
index 2f895e9..10a0e44 100644
--- a/tests/data/acpi/pc/DSDT.memhp
+++ b/tests/data/acpi/pc/DSDT.memhp
Binary files differ
diff --git a/tests/data/acpi/pc/DSDT.nohpet b/tests/data/acpi/pc/DSDT.nohpet
index c012b63..6905312 100644
--- a/tests/data/acpi/pc/DSDT.nohpet
+++ b/tests/data/acpi/pc/DSDT.nohpet
Binary files differ
diff --git a/tests/data/acpi/pc/DSDT.numamem b/tests/data/acpi/pc/DSDT.numamem
index f2ef4b9..59e3133 100644
--- a/tests/data/acpi/pc/DSDT.numamem
+++ b/tests/data/acpi/pc/DSDT.numamem
Binary files differ
diff --git a/tests/data/acpi/pc/DSDT.roothp b/tests/data/acpi/pc/DSDT.roothp
index 657c826..448d596 100644
--- a/tests/data/acpi/pc/DSDT.roothp
+++ b/tests/data/acpi/pc/DSDT.roothp
Binary files differ
diff --git a/tests/data/acpi/q35/DSDT b/tests/data/acpi/q35/DSDT
index d68c472..720e8cb 100644
--- a/tests/data/acpi/q35/DSDT
+++ b/tests/data/acpi/q35/DSDT
Binary files differ
diff --git a/tests/data/acpi/q35/DSDT.acpierst b/tests/data/acpi/q35/DSDT.acpierst
index de7ae27..f26b1f2 100644
--- a/tests/data/acpi/q35/DSDT.acpierst
+++ b/tests/data/acpi/q35/DSDT.acpierst
Binary files differ
diff --git a/tests/data/acpi/q35/DSDT.acpihmat b/tests/data/acpi/q35/DSDT.acpihmat
index 48e2862..86771f1 100644
--- a/tests/data/acpi/q35/DSDT.acpihmat
+++ b/tests/data/acpi/q35/DSDT.acpihmat
Binary files differ
diff --git a/tests/data/acpi/q35/DSDT.acpihmat-noinitiator b/tests/data/acpi/q35/DSDT.acpihmat-noinitiator
index 30a4aa2..a894a2d 100644
--- a/tests/data/acpi/q35/DSDT.acpihmat-noinitiator
+++ b/tests/data/acpi/q35/DSDT.acpihmat-noinitiator
Binary files differ
diff --git a/tests/data/acpi/q35/DSDT.applesmc b/tests/data/acpi/q35/DSDT.applesmc
index 84e2b5c..276ae1d 100644
--- a/tests/data/acpi/q35/DSDT.applesmc
+++ b/tests/data/acpi/q35/DSDT.applesmc
Binary files differ
diff --git a/tests/data/acpi/q35/DSDT.bridge b/tests/data/acpi/q35/DSDT.bridge
index e411d40..9f8a208 100644
--- a/tests/data/acpi/q35/DSDT.bridge
+++ b/tests/data/acpi/q35/DSDT.bridge
Binary files differ
diff --git a/tests/data/acpi/q35/DSDT.core-count2 b/tests/data/acpi/q35/DSDT.core-count2
index 0603db8..2ec11fe 100644
--- a/tests/data/acpi/q35/DSDT.core-count2
+++ b/tests/data/acpi/q35/DSDT.core-count2
Binary files differ
diff --git a/tests/data/acpi/q35/DSDT.cphp b/tests/data/acpi/q35/DSDT.cphp
index beeb83c..612c85b 100644
--- a/tests/data/acpi/q35/DSDT.cphp
+++ b/tests/data/acpi/q35/DSDT.cphp
Binary files differ
diff --git a/tests/data/acpi/q35/DSDT.cxl b/tests/data/acpi/q35/DSDT.cxl
index 4586b9a..f049f41 100644
--- a/tests/data/acpi/q35/DSDT.cxl
+++ b/tests/data/acpi/q35/DSDT.cxl
Binary files differ
diff --git a/tests/data/acpi/q35/DSDT.dimmpxm b/tests/data/acpi/q35/DSDT.dimmpxm
index 99a93e1..23dabea 100644
--- a/tests/data/acpi/q35/DSDT.dimmpxm
+++ b/tests/data/acpi/q35/DSDT.dimmpxm
Binary files differ
diff --git a/tests/data/acpi/q35/DSDT.ipmibt b/tests/data/acpi/q35/DSDT.ipmibt
index 7f7601d..541bb70 100644
--- a/tests/data/acpi/q35/DSDT.ipmibt
+++ b/tests/data/acpi/q35/DSDT.ipmibt
Binary files differ
diff --git a/tests/data/acpi/q35/DSDT.ipmismbus b/tests/data/acpi/q35/DSDT.ipmismbus
index 6c5d1af..e2d57a3 100644
--- a/tests/data/acpi/q35/DSDT.ipmismbus
+++ b/tests/data/acpi/q35/DSDT.ipmismbus
Binary files differ
diff --git a/tests/data/acpi/q35/DSDT.ivrs b/tests/data/acpi/q35/DSDT.ivrs
index de7ae27..f26b1f2 100644
--- a/tests/data/acpi/q35/DSDT.ivrs
+++ b/tests/data/acpi/q35/DSDT.ivrs
Binary files differ
diff --git a/tests/data/acpi/q35/DSDT.memhp b/tests/data/acpi/q35/DSDT.memhp
index 79bce5c..809d7e2 100644
--- a/tests/data/acpi/q35/DSDT.memhp
+++ b/tests/data/acpi/q35/DSDT.memhp
Binary files differ
diff --git a/tests/data/acpi/q35/DSDT.mmio64 b/tests/data/acpi/q35/DSDT.mmio64
index c249929..ab3fe3c 100644
--- a/tests/data/acpi/q35/DSDT.mmio64
+++ b/tests/data/acpi/q35/DSDT.mmio64
Binary files differ
diff --git a/tests/data/acpi/q35/DSDT.multi-bridge b/tests/data/acpi/q35/DSDT.multi-bridge
index 66b39be..9ae8ee0 100644
--- a/tests/data/acpi/q35/DSDT.multi-bridge
+++ b/tests/data/acpi/q35/DSDT.multi-bridge
Binary files differ
diff --git a/tests/data/acpi/q35/DSDT.noacpihp b/tests/data/acpi/q35/DSDT.noacpihp
new file mode 100644
index 0000000..6ab1f0e
--- /dev/null
+++ b/tests/data/acpi/q35/DSDT.noacpihp
Binary files differ
diff --git a/tests/data/acpi/q35/DSDT.nohpet b/tests/data/acpi/q35/DSDT.nohpet
index 9ff9983..becb5f7 100644
--- a/tests/data/acpi/q35/DSDT.nohpet
+++ b/tests/data/acpi/q35/DSDT.nohpet
Binary files differ
diff --git a/tests/data/acpi/q35/DSDT.numamem b/tests/data/acpi/q35/DSDT.numamem
index 1e7c45e..0cdec0b 100644
--- a/tests/data/acpi/q35/DSDT.numamem
+++ b/tests/data/acpi/q35/DSDT.numamem
Binary files differ
diff --git a/tests/data/acpi/q35/DSDT.pvpanic-isa b/tests/data/acpi/q35/DSDT.pvpanic-isa
index ed47451..6a9904e 100644
--- a/tests/data/acpi/q35/DSDT.pvpanic-isa
+++ b/tests/data/acpi/q35/DSDT.pvpanic-isa
Binary files differ
diff --git a/tests/data/acpi/q35/DSDT.tis.tpm12 b/tests/data/acpi/q35/DSDT.tis.tpm12
index efc2efc..628bf62 100644
--- a/tests/data/acpi/q35/DSDT.tis.tpm12
+++ b/tests/data/acpi/q35/DSDT.tis.tpm12
Binary files differ
diff --git a/tests/data/acpi/q35/DSDT.tis.tpm2 b/tests/data/acpi/q35/DSDT.tis.tpm2
index 6753397..35c6b08 100644
--- a/tests/data/acpi/q35/DSDT.tis.tpm2
+++ b/tests/data/acpi/q35/DSDT.tis.tpm2
Binary files differ
diff --git a/tests/data/acpi/q35/DSDT.viot b/tests/data/acpi/q35/DSDT.viot
index eeb40b3..3ad4d26 100644
--- a/tests/data/acpi/q35/DSDT.viot
+++ b/tests/data/acpi/q35/DSDT.viot
Binary files differ
diff --git a/tests/data/acpi/q35/DSDT.xapic b/tests/data/acpi/q35/DSDT.xapic
index 3aa86f0..d4a34e2 100644
--- a/tests/data/acpi/q35/DSDT.xapic
+++ b/tests/data/acpi/q35/DSDT.xapic
Binary files differ
diff --git a/tests/data/acpi/virt/SRAT.acpihmatvirt b/tests/data/acpi/virt/SRAT.acpihmatvirt
index 691ef56..6fe55dd 100644
--- a/tests/data/acpi/virt/SRAT.acpihmatvirt
+++ b/tests/data/acpi/virt/SRAT.acpihmatvirt
Binary files differ
diff --git a/tests/data/acpi/virt/SSDT.memhp b/tests/data/acpi/virt/SSDT.memhp
index 2fcfc5f..ef93c44 100644
--- a/tests/data/acpi/virt/SSDT.memhp
+++ b/tests/data/acpi/virt/SSDT.memhp
Binary files differ
diff --git a/tests/docker/Makefile.include b/tests/docker/Makefile.include
index 54ed77f..9401525 100644
--- a/tests/docker/Makefile.include
+++ b/tests/docker/Makefile.include
@@ -39,7 +39,7 @@
# General rule for building docker images.
docker-image-%: $(DOCKER_FILES_DIR)/%.docker
$(call quiet-command, \
- $(RUNC) build \
+ DOCKER_BUILDKIT=1 $(RUNC) build \
$(if $V,,--quiet) \
$(if $(NOCACHE),--no-cache, \
$(if $(DOCKER_REGISTRY),--cache-from $(DOCKER_REGISTRY)/qemu/$*)) \
diff --git a/tests/docker/docker.py b/tests/docker/docker.py
index 3a1ed7c..688ef62 100755
--- a/tests/docker/docker.py
+++ b/tests/docker/docker.py
@@ -23,10 +23,10 @@
import tempfile
import re
import signal
+import getpass
from tarfile import TarFile, TarInfo
from io import StringIO, BytesIO
from shutil import copy, rmtree
-from pwd import getpwuid
from datetime import datetime, timedelta
@@ -316,7 +316,7 @@
if user:
uid = os.getuid()
- uname = getpwuid(uid).pw_name
+ uname = getpass.getuser()
tmp_df.write("\n")
tmp_df.write("RUN id %s 2>/dev/null || useradd -u %d -U %s" %
(uname, uid, uname))
@@ -570,7 +570,7 @@
if args.user:
uid = os.getuid()
- uname = getpwuid(uid).pw_name
+ uname = getpass.getuser()
df.write("\n")
df.write("RUN id %s 2>/dev/null || useradd -u %d -U %s" %
(uname, uid, uname))
diff --git a/tests/qemu-iotests/308 b/tests/qemu-iotests/308
index 09275e9..de12b2b 100755
--- a/tests/qemu-iotests/308
+++ b/tests/qemu-iotests/308
@@ -370,6 +370,49 @@
echo '=== Compare copy with original ==='
$QEMU_IMG compare -f raw -F $IMGFMT "$COPIED_IMG" "$TEST_IMG"
+_cleanup_test_img
+
+echo
+echo '=== Writing zeroes while unmapping ==='
+# Regression test for https://gitlab.com/qemu-project/qemu/-/issues/1507
+_make_test_img 64M
+$QEMU_IO -c 'write -s /dev/urandom 0 64M' "$TEST_IMG" | _filter_qemu_io
+
+_launch_qemu
+_send_qemu_cmd $QEMU_HANDLE \
+ "{'execute': 'qmp_capabilities'}" \
+ 'return'
+
+_send_qemu_cmd $QEMU_HANDLE \
+ "{'execute': 'blockdev-add',
+ 'arguments': {
+ 'driver': '$IMGFMT',
+ 'node-name': 'node-format',
+ 'file': {
+ 'driver': 'file',
+ 'filename': '$TEST_IMG'
+ }
+ } }" \
+ 'return'
+
+fuse_export_add 'export' "'mountpoint': '$EXT_MP', 'writable': true"
+
+# Try writing zeroes by unmapping
+$QEMU_IO -f raw -c 'write -zu 0 64M' "$EXT_MP" | _filter_qemu_io
+
+# Check the result
+$QEMU_IO -f raw -c 'read -P 0 0 64M' "$EXT_MP" | _filter_qemu_io
+
+_send_qemu_cmd $QEMU_HANDLE \
+ "{'execute': 'quit'}" \
+ 'return'
+
+wait=yes _cleanup_qemu
+
+# Check the original image
+$QEMU_IO -c 'read -P 0 0 64M' "$TEST_IMG" | _filter_qemu_io
+
+_cleanup_test_img
# success, all done
echo "*** done"
diff --git a/tests/qemu-iotests/308.out b/tests/qemu-iotests/308.out
index e4467a1..d576713 100644
--- a/tests/qemu-iotests/308.out
+++ b/tests/qemu-iotests/308.out
@@ -171,4 +171,39 @@
=== Compare copy with original ===
Images are identical.
+
+=== Writing zeroes while unmapping ===
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
+wrote 67108864/67108864 bytes at offset 0
+64 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+{'execute': 'qmp_capabilities'}
+{"return": {}}
+{'execute': 'blockdev-add',
+ 'arguments': {
+ 'driver': 'IMGFMT',
+ 'node-name': 'node-format',
+ 'file': {
+ 'driver': 'file',
+ 'filename': 'TEST_DIR/t.IMGFMT'
+ }
+ } }
+{"return": {}}
+{'execute': 'block-export-add',
+ 'arguments': {
+ 'type': 'fuse',
+ 'id': 'export',
+ 'node-name': 'node-format',
+ 'mountpoint': 'TEST_DIR/t.IMGFMT.fuse', 'writable': true
+ } }
+{"return": {}}
+wrote 67108864/67108864 bytes at offset 0
+64 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 67108864/67108864 bytes at offset 0
+64 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+{'execute': 'quit'}
+{"return": {}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_EXPORT_DELETED", "data": {"id": "export"}}
+read 67108864/67108864 bytes at offset 0
+64 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
*** done
diff --git a/tests/qemu-iotests/check b/tests/qemu-iotests/check
index 9bdda13..f2e9d27 100755
--- a/tests/qemu-iotests/check
+++ b/tests/qemu-iotests/check
@@ -26,9 +26,23 @@
from testenv import TestEnv
from testrunner import TestRunner
+def get_default_path(follow_link=False):
+ """
+ Try to automagically figure out the path we are running from.
+ """
+ # called from the build tree?
+ if os.path.islink(sys.argv[0]):
+ if follow_link:
+ return os.path.dirname(os.readlink(sys.argv[0]))
+ else:
+ return os.path.dirname(os.path.abspath(sys.argv[0]))
+ else: # or source tree?
+ return os.getcwd()
def make_argparser() -> argparse.ArgumentParser:
- p = argparse.ArgumentParser(description="Test run options")
+ p = argparse.ArgumentParser(
+ description="Test run options",
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument('-n', '--dry-run', action='store_true',
help='show me, do not run tests')
@@ -113,6 +127,11 @@
'middle of the process.')
g_sel.add_argument('tests', metavar='TEST_FILES', nargs='*',
help='tests to run, or "--" followed by a command')
+ g_sel.add_argument('--build-dir', default=get_default_path(),
+ help='Path to iotests build directory')
+ g_sel.add_argument('--source-dir',
+ default=get_default_path(follow_link=True),
+ help='Path to iotests build directory')
return p
@@ -120,11 +139,14 @@
if __name__ == '__main__':
args = make_argparser().parse_args()
- env = TestEnv(imgfmt=args.imgfmt, imgproto=args.imgproto,
+ env = TestEnv(source_dir=args.source_dir,
+ build_dir=args.build_dir,
+ imgfmt=args.imgfmt, imgproto=args.imgproto,
aiomode=args.aiomode, cachemode=args.cachemode,
imgopts=args.imgopts, misalign=args.misalign,
debug=args.debug, valgrind=args.valgrind,
- gdb=args.gdb, qprint=args.print)
+ gdb=args.gdb, qprint=args.print,
+ dry_run=args.dry_run)
if len(sys.argv) > 1 and sys.argv[-len(args.tests)-1] == '--':
if not args.tests:
@@ -162,7 +184,7 @@
sys.exit(str(e))
if args.dry_run:
- print('\n'.join(tests))
+ print('\n'.join([os.path.basename(t) for t in tests]))
else:
with TestRunner(env, tap=args.tap,
color=args.color) as tr:
diff --git a/tests/qemu-iotests/meson.build b/tests/qemu-iotests/meson.build
index 323a4ac..a162f68 100644
--- a/tests/qemu-iotests/meson.build
+++ b/tests/qemu-iotests/meson.build
@@ -32,16 +32,39 @@
endif
endforeach
+qemu_iotests_check_cmd = files('check')
+
foreach format, speed: qemu_iotests_formats
if speed == 'quick'
suites = 'block'
else
suites = ['block-' + speed, speed]
endif
- test('qemu-iotests ' + format, sh, args: [files('../check-block.sh'), format],
- depends: qemu_iotests_binaries, env: qemu_iotests_env,
- protocol: 'tap',
- suite: suites,
- timeout: 0,
- is_parallel: false)
+
+ args = ['-tap', '-' + format]
+ if speed == 'quick'
+ args += ['-g', 'auto']
+ endif
+
+ rc = run_command(
+ [qemu_iotests_check_cmd] + args + ['-n'],
+ check: true,
+ )
+
+ foreach item: rc.stdout().strip().split()
+ args = ['-tap', '-' + format, item,
+ '--source-dir', meson.current_source_dir(),
+ '--build-dir', meson.current_build_dir()]
+ # Some individual tests take as long as 45 seconds
+ # Bump the timeout to 3 minutes for some headroom
+ # on slow machines to minimize spurious failures
+ test('io-' + format + '-' + item,
+ qemu_iotests_check_cmd,
+ args: args,
+ depends: qemu_iotests_binaries,
+ env: qemu_iotests_env,
+ protocol: 'tap',
+ timeout: 180,
+ suite: suites)
+ endforeach
endforeach
diff --git a/tests/qemu-iotests/testenv.py b/tests/qemu-iotests/testenv.py
index a864c74..9a37ad9 100644
--- a/tests/qemu-iotests/testenv.py
+++ b/tests/qemu-iotests/testenv.py
@@ -170,14 +170,16 @@
if not isxfile(b):
sys.exit('Not executable: ' + b)
- def __init__(self, imgfmt: str, imgproto: str, aiomode: str,
+ def __init__(self, source_dir: str, build_dir: str,
+ imgfmt: str, imgproto: str, aiomode: str,
cachemode: Optional[str] = None,
imgopts: Optional[str] = None,
misalign: bool = False,
debug: bool = False,
valgrind: bool = False,
gdb: bool = False,
- qprint: bool = False) -> None:
+ qprint: bool = False,
+ dry_run: bool = False) -> None:
self.imgfmt = imgfmt
self.imgproto = imgproto
self.aiomode = aiomode
@@ -211,18 +213,16 @@
# which are needed to initialize some environment variables. They are
# used by init_*() functions as well.
- if os.path.islink(sys.argv[0]):
- # called from the build tree
- self.source_iotests = os.path.dirname(os.readlink(sys.argv[0]))
- self.build_iotests = os.path.dirname(os.path.abspath(sys.argv[0]))
- else:
- # called from the source tree
- self.source_iotests = os.getcwd()
- self.build_iotests = self.source_iotests
+ self.source_iotests = source_dir
+ self.build_iotests = build_dir
self.build_root = os.path.join(self.build_iotests, '..', '..')
self.init_directories()
+
+ if dry_run:
+ return
+
self.init_binaries()
self.malloc_perturb_ = os.getenv('MALLOC_PERTURB_',
diff --git a/tests/qemu-iotests/testrunner.py b/tests/qemu-iotests/testrunner.py
index 5a771da..7b32227 100644
--- a/tests/qemu-iotests/testrunner.py
+++ b/tests/qemu-iotests/testrunner.py
@@ -24,12 +24,10 @@
import subprocess
import contextlib
import json
-import termios
import shutil
import sys
from multiprocessing import Pool
-from contextlib import contextmanager
-from typing import List, Optional, Iterator, Any, Sequence, Dict, \
+from typing import List, Optional, Any, Sequence, Dict, \
ContextManager
from testenv import TestEnv
@@ -56,22 +54,6 @@
return res
-# We want to save current tty settings during test run,
-# since an aborting qemu call may leave things screwed up.
-@contextmanager
-def savetty() -> Iterator[None]:
- isterm = sys.stdin.isatty()
- if isterm:
- fd = sys.stdin.fileno()
- attr = termios.tcgetattr(fd)
-
- try:
- yield
- finally:
- if isterm:
- termios.tcsetattr(fd, termios.TCSADRAIN, attr)
-
-
class LastElapsedTime(ContextManager['LastElapsedTime']):
""" Cache for elapsed time for tests, to show it during new test run
@@ -169,7 +151,6 @@
self._stack = contextlib.ExitStack()
self._stack.enter_context(self.env)
self._stack.enter_context(self.last_elapsed)
- self._stack.enter_context(savetty())
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
@@ -247,13 +228,11 @@
return f'{test}.out'
- def do_run_test(self, test: str, mp: bool) -> TestResult:
+ def do_run_test(self, test: str) -> TestResult:
"""
Run one test
:param test: test file path
- :param mp: if true, we are in a multiprocessing environment, use
- personal subdirectories for test run
Note: this method may be called from subprocess, so it does not
change ``self`` object in any way!
@@ -276,12 +255,14 @@
args = [str(f_test.resolve())]
env = self.env.prepare_subprocess(args)
- if mp:
- # Split test directories, so that tests running in parallel don't
- # break each other.
- for d in ['TEST_DIR', 'SOCK_DIR']:
- env[d] = os.path.join(env[d], f_test.name)
- Path(env[d]).mkdir(parents=True, exist_ok=True)
+
+ # Split test directories, so that tests running in parallel don't
+ # break each other.
+ for d in ['TEST_DIR', 'SOCK_DIR']:
+ env[d] = os.path.join(
+ env[d],
+ f"{self.env.imgfmt}-{self.env.imgproto}-{f_test.name}")
+ Path(env[d]).mkdir(parents=True, exist_ok=True)
test_dir = env['TEST_DIR']
f_bad = Path(test_dir, f_test.name + '.out.bad')
@@ -294,6 +275,7 @@
t0 = time.time()
with f_bad.open('w', encoding="utf-8") as f:
with subprocess.Popen(args, cwd=str(f_test.parent), env=env,
+ stdin=subprocess.DEVNULL,
stdout=f, stderr=subprocess.STDOUT) as proc:
try:
proc.wait()
@@ -365,7 +347,7 @@
testname = os.path.basename(test)
print(f'# running {self.env.imgfmt} {testname}')
- res = self.do_run_test(test, mp)
+ res = self.do_run_test(test)
end = datetime.datetime.now().strftime('%H:%M:%S')
self.test_print_one_line(test=test,
@@ -391,6 +373,7 @@
casenotrun = []
if self.tap:
+ print('TAP version 13')
self.env.print_env('# ')
print('1..%d' % len(tests))
else:
diff --git a/tests/qtest/bios-tables-test.c b/tests/qtest/bios-tables-test.c
index d29a4e4..8733589 100644
--- a/tests/qtest/bios-tables-test.c
+++ b/tests/qtest/bios-tables-test.c
@@ -949,9 +949,14 @@
data.required_struct_types_len = ARRAY_SIZE(base_required_struct_types);
test_acpi_one("-global PIIX4_PM.acpi-root-pci-hotplug=off "
"-global PIIX4_PM.acpi-pci-hotplug-with-bridge-support=off "
- "-device pci-bridge,chassis_nr=1 "
- "-device pci-testdev,bus=pci.0 "
- "-device pci-testdev,bus=pci.1", &data);
+ "-device pci-bridge,chassis_nr=1,addr=4.0 "
+ "-device pci-testdev,bus=pci.0,addr=5.0 "
+ "-device pci-testdev,bus=pci.0,addr=6.0,acpi-index=101 "
+ "-device pci-testdev,bus=pci.1,addr=1.0 "
+ "-device pci-testdev,bus=pci.1,addr=2.0,acpi-index=201 "
+ "-device pci-bridge,id=nhpbr,chassis_nr=2,shpc=off,addr=7.0 "
+ "-device pci-testdev,bus=nhpbr,addr=1.0,acpi-index=301 "
+ , &data);
free_test_data(&data);
}
@@ -1002,18 +1007,42 @@
free_test_data(&data);
}
+static void test_acpi_q35_tcg_no_acpi_hotplug(void)
+{
+ test_data data;
+
+ memset(&data, 0, sizeof(data));
+ data.machine = MACHINE_Q35;
+ data.variant = ".noacpihp";
+ data.required_struct_types = base_required_struct_types;
+ data.required_struct_types_len = ARRAY_SIZE(base_required_struct_types);
+ test_acpi_one("-global ICH9-LPC.acpi-pci-hotplug-with-bridge-support=off"
+ " -device pci-testdev,bus=pcie.0,acpi-index=101,addr=3.0"
+ " -device pci-bridge,chassis_nr=1,id=shpcbr,addr=4.0"
+ " -device pci-testdev,bus=shpcbr,addr=1.0,acpi-index=201"
+ " -device pci-bridge,chassis_nr=2,shpc=off,id=noshpcbr,addr=5.0"
+ " -device pci-testdev,bus=noshpcbr,addr=1.0,acpi-index=301"
+ " -device pcie-root-port,id=hprp,port=0x0,chassis=1,addr=6.0"
+ " -device pci-testdev,bus=hprp,acpi-index=401"
+ " -device pcie-root-port,id=nohprp,port=0x0,chassis=2,hotplug=off,"
+ "addr=7.0"
+ " -device pci-testdev,bus=nohprp,acpi-index=501"
+ " -device pcie-root-port,id=nohprpint,port=0x0,chassis=3,hotplug=off,"
+ "multifunction=on,addr=8.0"
+ " -device pci-testdev,bus=nohprpint,acpi-index=601,addr=8.1"
+ " -device pcie-root-port,id=hprp2,port=0x0,chassis=4,bus=nohprpint,"
+ "addr=9.0"
+ " -device pci-testdev,bus=hprp2,acpi-index=602"
+ , &data);
+ free_test_data(&data);
+}
+
static void test_acpi_q35_multif_bridge(void)
{
test_data data = {
.machine = MACHINE_Q35,
.variant = ".multi-bridge",
};
-
- if (!qtest_has_device("pcie-root-port")) {
- g_test_skip("Device pcie-root-port is not available");
- goto out;
- }
-
test_vm_prepare("-S"
" -device virtio-balloon,id=balloon0,addr=0x4.0x2"
" -device pcie-root-port,id=rp0,multifunction=on,"
@@ -1025,9 +1054,14 @@
" -device pcie-root-port,id=rphptgt2,port=0x0,chassis=6,addr=2.2"
" -device pcie-root-port,id=rphptgt3,port=0x0,chassis=7,addr=2.3"
" -device pci-testdev,bus=pcie.0,addr=2.4"
+ " -device pci-testdev,bus=pcie.0,addr=2.5,acpi-index=102"
" -device pci-testdev,bus=pcie.0,addr=5.0"
+ " -device pci-testdev,bus=pcie.0,addr=0xf.0,acpi-index=101"
" -device pci-testdev,bus=rp0,addr=0.0"
- " -device pci-testdev,bus=br1", &data);
+ " -device pci-testdev,bus=br1"
+ " -device pcie-root-port,id=rpnohp,chassis=8,addr=0xA.0,hotplug=off"
+ " -device pcie-root-port,id=rp3,chassis=9,bus=rpnohp"
+ , &data);
/* hotplugged bridges section */
qtest_qmp_device_add(data.qts, "pci-bridge", "hpbr1",
@@ -1049,7 +1083,6 @@
/* check that reboot/reset doesn't change any ACPI tables */
qtest_qmp_send(data.qts, "{'execute':'system_reset' }");
process_acpi_tables(&data);
-out:
free_test_data(&data);
}
@@ -1403,11 +1436,6 @@
{
test_data data;
- if (!qtest_has_device("nvdimm")) {
- g_test_skip("Device nvdimm is not available");
- return;
- }
-
memset(&data, 0, sizeof(data));
data.machine = machine;
data.variant = ".dimmpxm";
@@ -1456,11 +1484,6 @@
.scan_len = 256ULL * 1024 * 1024,
};
- if (!qtest_has_device("nvdimm")) {
- g_test_skip("Device nvdimm is not available");
- goto out;
- }
-
data.variant = ".memhp";
test_acpi_one(" -machine nvdimm=on"
" -cpu cortex-a57"
@@ -1474,7 +1497,7 @@
" -device pc-dimm,id=dimm0,memdev=ram2,node=0"
" -device nvdimm,id=dimm1,memdev=nvm0,node=1",
&data);
-out:
+
free_test_data(&data);
}
@@ -1492,11 +1515,6 @@
{
test_data data;
- if (!qtest_has_device("virtio-blk-device")) {
- g_test_skip("Device virtio-blk-device is not available");
- return;
- }
-
test_acpi_microvm_prepare(&data);
test_acpi_one(" -machine microvm,acpi=on,ioapic2=off,rtc=off",
&data);
@@ -1507,11 +1525,6 @@
{
test_data data;
- if (!qtest_has_device("virtio-blk-device")) {
- g_test_skip("Device virtio-blk-device is not available");
- return;
- }
-
test_acpi_microvm_prepare(&data);
data.variant = ".usb";
test_acpi_one(" -machine microvm,acpi=on,ioapic2=off,usb=on,rtc=off",
@@ -1523,11 +1536,6 @@
{
test_data data;
- if (!qtest_has_device("virtio-blk-device")) {
- g_test_skip("Device virtio-blk-device is not available");
- return;
- }
-
test_acpi_microvm_prepare(&data);
data.variant = ".rtc";
test_acpi_one(" -machine microvm,acpi=on,ioapic2=off,rtc=on",
@@ -1539,11 +1547,6 @@
{
test_data data;
- if (!qtest_has_device("virtio-blk-device")) {
- g_test_skip("Device virtio-blk-device is not available");
- return;
- }
-
test_acpi_microvm_prepare(&data);
data.variant = ".pcie";
data.tcg_only = true; /* need constant host-phys-bits */
@@ -1556,11 +1559,6 @@
{
test_data data;
- if (!qtest_has_device("virtio-blk-device")) {
- g_test_skip("Device virtio-blk-device is not available");
- return;
- }
-
test_acpi_microvm_prepare(&data);
data.variant = ".ioapic2";
test_acpi_one(" -machine microvm,acpi=on,ioapic2=on,rtc=off",
@@ -1600,12 +1598,6 @@
.ram_start = 0x40000000ULL,
.scan_len = 128ULL * 1024 * 1024,
};
-
- if (!qtest_has_device("pcie-root-port")) {
- g_test_skip("Device pcie-root-port is not available");
- goto out;
- }
-
/*
* While using -cdrom, the cdrom would auto plugged into pxb-pcie,
* the reason is the bus of pxb-pcie is also root bus, it would lead
@@ -1624,7 +1616,7 @@
" -cpu cortex-a57"
" -device pxb-pcie,bus_nr=128",
&data);
-out:
+
free_test_data(&data);
}
@@ -1687,9 +1679,9 @@
test_acpi_one(" -machine hmat=on"
" -cpu cortex-a57"
" -smp 4,sockets=2"
- " -m 256M"
- " -object memory-backend-ram,size=64M,id=ram0"
- " -object memory-backend-ram,size=64M,id=ram1"
+ " -m 384M"
+ " -object memory-backend-ram,size=128M,id=ram0"
+ " -object memory-backend-ram,size=128M,id=ram1"
" -object memory-backend-ram,size=128M,id=ram2"
" -numa node,nodeid=0,memdev=ram0"
" -numa node,nodeid=1,memdev=ram1"
@@ -1812,12 +1804,6 @@
gchar *params;
test_data data;
- if (!qtest_has_device("virtio-blk-device")) {
- g_test_skip("Device virtio-blk-device is not available");
- g_free(tmp_path);
- return;
- }
-
test_acpi_microvm_prepare(&data);
data.variant = ".pcie";
data.tcg_only = true; /* need constant host-phys-bits */
@@ -1878,11 +1864,6 @@
.variant = ".viot",
};
- if (!qtest_has_device("virtio-iommu")) {
- g_test_skip("Device virtio-iommu is not available");
- goto out;
- }
-
/*
* To keep things interesting, two buses bypass the IOMMU.
* VIOT should only describes the other two buses.
@@ -1893,7 +1874,6 @@
"-device pxb-pcie,bus_nr=0x20,id=pcie.200,bus=pcie.0,bypass_iommu=on "
"-device pxb-pcie,bus_nr=0x30,id=pcie.300,bus=pcie.0",
&data);
-out:
free_test_data(&data);
}
@@ -1954,10 +1934,8 @@
.scan_len = 128ULL * 1024 * 1024,
};
- if (qtest_has_device("virtio-iommu")) {
- test_acpi_one("-cpu cortex-a57 "
- "-device virtio-iommu-pci", &data);
- }
+ test_acpi_one("-cpu cortex-a57 "
+ "-device virtio-iommu-pci", &data);
free_test_data(&data);
}
@@ -2066,11 +2044,6 @@
test_data data;
char *args;
- if (!qtest_has_device("virtio-blk-device")) {
- g_test_skip("Device virtio-blk-device is not available");
- return;
- }
-
test_acpi_microvm_prepare(&data);
args = test_acpi_create_args(&data,
@@ -2161,6 +2134,8 @@
test_acpi_q35_tcg_tpm12_tis);
}
qtest_add_func("acpi/q35/bridge", test_acpi_q35_tcg_bridge);
+ qtest_add_func("acpi/q35/no-acpi-hotplug",
+ test_acpi_q35_tcg_no_acpi_hotplug);
qtest_add_func("acpi/q35/multif-bridge",
test_acpi_q35_multif_bridge);
qtest_add_func("acpi/q35/mmio64", test_acpi_q35_tcg_mmio64);
diff --git a/tests/qtest/e1000e-test.c b/tests/qtest/e1000e-test.c
index b63a4d3..de9738f 100644
--- a/tests/qtest/e1000e-test.c
+++ b/tests/qtest/e1000e-test.c
@@ -27,6 +27,7 @@
#include "qemu/osdep.h"
#include "libqtest-single.h"
#include "libqos/pci-pc.h"
+#include "net/eth.h"
#include "qemu/sockets.h"
#include "qemu/iov.h"
#include "qemu/module.h"
@@ -35,9 +36,13 @@
#include "libqos/e1000e.h"
#include "hw/net/e1000_regs.h"
+static const struct eth_header packet = {
+ .h_dest = E1000E_ADDRESS,
+ .h_source = E1000E_ADDRESS,
+};
+
static void e1000e_send_verify(QE1000E *d, int *test_sockets, QGuestAllocator *alloc)
{
- static const char test[] = "TEST";
struct e1000_tx_desc descr;
char buffer[64];
int ret;
@@ -45,7 +50,7 @@
/* Prepare test data buffer */
uint64_t data = guest_alloc(alloc, sizeof(buffer));
- memwrite(data, test, sizeof(test));
+ memwrite(data, &packet, sizeof(packet));
/* Prepare TX descriptor */
memset(&descr, 0, sizeof(descr));
@@ -71,7 +76,7 @@
g_assert_cmpint(ret, == , sizeof(recv_len));
ret = recv(test_sockets[0], buffer, sizeof(buffer), 0);
g_assert_cmpint(ret, ==, sizeof(buffer));
- g_assert_cmpstr(buffer, == , test);
+ g_assert_false(memcmp(buffer, &packet, sizeof(packet)));
/* Free test data buffer */
guest_free(alloc, data);
@@ -81,15 +86,15 @@
{
union e1000_rx_desc_extended descr;
- char test[] = "TEST";
- int len = htonl(sizeof(test));
+ struct eth_header test_iov = packet;
+ int len = htonl(sizeof(packet));
struct iovec iov[] = {
{
.iov_base = &len,
.iov_len = sizeof(len),
},{
- .iov_base = test,
- .iov_len = sizeof(test),
+ .iov_base = &test_iov,
+ .iov_len = sizeof(packet),
},
};
@@ -97,8 +102,8 @@
int ret;
/* Send a dummy packet to device's socket*/
- ret = iov_send(test_sockets[0], iov, 2, 0, sizeof(len) + sizeof(test));
- g_assert_cmpint(ret, == , sizeof(test) + sizeof(len));
+ ret = iov_send(test_sockets[0], iov, 2, 0, sizeof(len) + sizeof(packet));
+ g_assert_cmpint(ret, == , sizeof(packet) + sizeof(len));
/* Prepare test data buffer */
uint64_t data = guest_alloc(alloc, sizeof(buffer));
@@ -119,7 +124,7 @@
/* Check data sent to the backend */
memread(data, buffer, sizeof(buffer));
- g_assert_cmpstr(buffer, == , test);
+ g_assert_false(memcmp(buffer, &packet, sizeof(packet)));
/* Free test data buffer */
guest_free(alloc, data);
diff --git a/tests/qtest/fuzz/generic_fuzz_configs.h b/tests/qtest/fuzz/generic_fuzz_configs.h
index a825b78..50689da 100644
--- a/tests/qtest/fuzz/generic_fuzz_configs.h
+++ b/tests/qtest/fuzz/generic_fuzz_configs.h
@@ -91,6 +91,11 @@
"-device e1000e,netdev=net0 -netdev user,id=net0",
.objects = "e1000e",
},{
+ .name = "igb",
+ .args = "-M q35 -nodefaults "
+ "-device igb,netdev=net0 -netdev user,id=net0",
+ .objects = "igb",
+ },{
.name = "cirrus-vga",
.args = "-machine q35 -nodefaults -device cirrus-vga",
.objects = "cirrus*",
diff --git a/tests/qtest/igb-test.c b/tests/qtest/igb-test.c
new file mode 100644
index 0000000..3d397ea
--- /dev/null
+++ b/tests/qtest/igb-test.c
@@ -0,0 +1,256 @@
+/*
+ * QTest testcase for igb NIC
+ *
+ * Copyright (c) 2022-2023 Red Hat, Inc.
+ * Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com)
+ * Developed by Daynix Computing LTD (http://www.daynix.com)
+ *
+ * Authors:
+ * Akihiko Odaki <akihiko.odaki@daynix.com>
+ * Dmitry Fleytman <dmitry@daynix.com>
+ * Leonid Bloch <leonid@daynix.com>
+ * Yan Vugenfirer <yan@daynix.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "qemu/osdep.h"
+#include "libqtest-single.h"
+#include "libqos/pci-pc.h"
+#include "net/eth.h"
+#include "qemu/sockets.h"
+#include "qemu/iov.h"
+#include "qemu/module.h"
+#include "qemu/bitops.h"
+#include "libqos/libqos-malloc.h"
+#include "libqos/e1000e.h"
+#include "hw/net/igb_regs.h"
+
+#ifndef _WIN32
+
+static const struct eth_header packet = {
+ .h_dest = E1000E_ADDRESS,
+ .h_source = E1000E_ADDRESS,
+};
+
+static void igb_send_verify(QE1000E *d, int *test_sockets, QGuestAllocator *alloc)
+{
+ union e1000_adv_tx_desc descr;
+ char buffer[64];
+ int ret;
+ uint32_t recv_len;
+
+ /* Prepare test data buffer */
+ uint64_t data = guest_alloc(alloc, sizeof(buffer));
+ memwrite(data, &packet, sizeof(packet));
+
+ /* Prepare TX descriptor */
+ memset(&descr, 0, sizeof(descr));
+ descr.read.buffer_addr = cpu_to_le64(data);
+ descr.read.cmd_type_len = cpu_to_le32(E1000_TXD_CMD_RS |
+ E1000_TXD_CMD_EOP |
+ E1000_TXD_DTYP_D |
+ sizeof(buffer));
+
+ /* Put descriptor to the ring */
+ e1000e_tx_ring_push(d, &descr);
+
+ /* Wait for TX WB interrupt */
+ e1000e_wait_isr(d, E1000E_TX0_MSG_ID);
+
+ /* Check DD bit */
+ g_assert_cmphex(le32_to_cpu(descr.wb.status) & E1000_TXD_STAT_DD, ==,
+ E1000_TXD_STAT_DD);
+
+ /* Check data sent to the backend */
+ ret = recv(test_sockets[0], &recv_len, sizeof(recv_len), 0);
+ g_assert_cmpint(ret, == , sizeof(recv_len));
+ ret = recv(test_sockets[0], buffer, sizeof(buffer), 0);
+ g_assert_cmpint(ret, ==, sizeof(buffer));
+ g_assert_false(memcmp(buffer, &packet, sizeof(packet)));
+
+ /* Free test data buffer */
+ guest_free(alloc, data);
+}
+
+static void igb_receive_verify(QE1000E *d, int *test_sockets, QGuestAllocator *alloc)
+{
+ union e1000_adv_rx_desc descr;
+
+ struct eth_header test_iov = packet;
+ int len = htonl(sizeof(packet));
+ struct iovec iov[] = {
+ {
+ .iov_base = &len,
+ .iov_len = sizeof(len),
+ },{
+ .iov_base = &test_iov,
+ .iov_len = sizeof(packet),
+ },
+ };
+
+ char buffer[64];
+ int ret;
+
+ /* Send a dummy packet to device's socket*/
+ ret = iov_send(test_sockets[0], iov, 2, 0, sizeof(len) + sizeof(packet));
+ g_assert_cmpint(ret, == , sizeof(packet) + sizeof(len));
+
+ /* Prepare test data buffer */
+ uint64_t data = guest_alloc(alloc, sizeof(buffer));
+
+ /* Prepare RX descriptor */
+ memset(&descr, 0, sizeof(descr));
+ descr.read.pkt_addr = cpu_to_le64(data);
+
+ /* Put descriptor to the ring */
+ e1000e_rx_ring_push(d, &descr);
+
+ /* Wait for TX WB interrupt */
+ e1000e_wait_isr(d, E1000E_RX0_MSG_ID);
+
+ /* Check DD bit */
+ g_assert_cmphex(le32_to_cpu(descr.wb.upper.status_error) &
+ E1000_RXD_STAT_DD, ==, E1000_RXD_STAT_DD);
+
+ /* Check data sent to the backend */
+ memread(data, buffer, sizeof(buffer));
+ g_assert_false(memcmp(buffer, &packet, sizeof(packet)));
+
+ /* Free test data buffer */
+ guest_free(alloc, data);
+}
+
+static void test_e1000e_init(void *obj, void *data, QGuestAllocator * alloc)
+{
+ /* init does nothing */
+}
+
+static void test_igb_tx(void *obj, void *data, QGuestAllocator * alloc)
+{
+ QE1000E_PCI *e1000e = obj;
+ QE1000E *d = &e1000e->e1000e;
+ QOSGraphObject *e_object = obj;
+ QPCIDevice *dev = e_object->get_driver(e_object, "pci-device");
+
+ /* FIXME: add spapr support */
+ if (qpci_check_buggy_msi(dev)) {
+ return;
+ }
+
+ igb_send_verify(d, data, alloc);
+}
+
+static void test_igb_rx(void *obj, void *data, QGuestAllocator * alloc)
+{
+ QE1000E_PCI *e1000e = obj;
+ QE1000E *d = &e1000e->e1000e;
+ QOSGraphObject *e_object = obj;
+ QPCIDevice *dev = e_object->get_driver(e_object, "pci-device");
+
+ /* FIXME: add spapr support */
+ if (qpci_check_buggy_msi(dev)) {
+ return;
+ }
+
+ igb_receive_verify(d, data, alloc);
+}
+
+static void test_igb_multiple_transfers(void *obj, void *data,
+ QGuestAllocator *alloc)
+{
+ static const long iterations = 4 * 1024;
+ long i;
+
+ QE1000E_PCI *e1000e = obj;
+ QE1000E *d = &e1000e->e1000e;
+ QOSGraphObject *e_object = obj;
+ QPCIDevice *dev = e_object->get_driver(e_object, "pci-device");
+
+ /* FIXME: add spapr support */
+ if (qpci_check_buggy_msi(dev)) {
+ return;
+ }
+
+ for (i = 0; i < iterations; i++) {
+ igb_send_verify(d, data, alloc);
+ igb_receive_verify(d, data, alloc);
+ }
+
+}
+
+static void data_test_clear(void *sockets)
+{
+ int *test_sockets = sockets;
+
+ close(test_sockets[0]);
+ qos_invalidate_command_line();
+ close(test_sockets[1]);
+ g_free(test_sockets);
+}
+
+static void *data_test_init(GString *cmd_line, void *arg)
+{
+ int *test_sockets = g_new(int, 2);
+ int ret = socketpair(PF_UNIX, SOCK_STREAM, 0, test_sockets);
+ g_assert_cmpint(ret, != , -1);
+
+ g_string_append_printf(cmd_line, " -netdev socket,fd=%d,id=hs0 ",
+ test_sockets[1]);
+
+ g_test_queue_destroy(data_test_clear, test_sockets);
+ return test_sockets;
+}
+
+#endif
+
+static void *data_test_init_no_socket(GString *cmd_line, void *arg)
+{
+ g_string_append(cmd_line, " -netdev hubport,hubid=0,id=hs0 ");
+ return arg;
+}
+
+static void test_igb_hotplug(void *obj, void *data, QGuestAllocator * alloc)
+{
+ QTestState *qts = global_qtest; /* TODO: get rid of global_qtest here */
+ QE1000E_PCI *dev = obj;
+
+ if (dev->pci_dev.bus->not_hotpluggable) {
+ g_test_skip("pci bus does not support hotplug");
+ return;
+ }
+
+ qtest_qmp_device_add(qts, "igb", "igb_net", "{'addr': '0x06'}");
+ qpci_unplug_acpi_device_test(qts, "igb_net", 0x06);
+}
+
+static void register_igb_test(void)
+{
+ QOSGraphTestOptions opts = { 0 };
+
+#ifndef _WIN32
+ opts.before = data_test_init,
+ qos_add_test("init", "igb", test_e1000e_init, &opts);
+ qos_add_test("tx", "igb", test_igb_tx, &opts);
+ qos_add_test("rx", "igb", test_igb_rx, &opts);
+ qos_add_test("multiple_transfers", "igb",
+ test_igb_multiple_transfers, &opts);
+#endif
+
+ opts.before = data_test_init_no_socket;
+ qos_add_test("hotplug", "igb", test_igb_hotplug, &opts);
+}
+
+libqos_init(register_igb_test);
diff --git a/tests/qtest/libqos/e1000e.c b/tests/qtest/libqos/e1000e.c
index 28fb305..925654c 100644
--- a/tests/qtest/libqos/e1000e.c
+++ b/tests/qtest/libqos/e1000e.c
@@ -36,18 +36,6 @@
#define E1000E_RING_LEN (0x1000)
-static void e1000e_macreg_write(QE1000E *d, uint32_t reg, uint32_t val)
-{
- QE1000E_PCI *d_pci = container_of(d, QE1000E_PCI, e1000e);
- qpci_io_writel(&d_pci->pci_dev, d_pci->mac_regs, reg, val);
-}
-
-static uint32_t e1000e_macreg_read(QE1000E *d, uint32_t reg)
-{
- QE1000E_PCI *d_pci = container_of(d, QE1000E_PCI, e1000e);
- return qpci_io_readl(&d_pci->pci_dev, d_pci->mac_regs, reg);
-}
-
void e1000e_tx_ring_push(QE1000E *d, void *descr)
{
QE1000E_PCI *d_pci = container_of(d, QE1000E_PCI, e1000e);
diff --git a/tests/qtest/libqos/e1000e.h b/tests/qtest/libqos/e1000e.h
index 091ce13..30643c8 100644
--- a/tests/qtest/libqos/e1000e.h
+++ b/tests/qtest/libqos/e1000e.h
@@ -25,6 +25,8 @@
#define E1000E_RX0_MSG_ID (0)
#define E1000E_TX0_MSG_ID (1)
+#define E1000E_ADDRESS { 0x52, 0x54, 0x00, 0x12, 0x34, 0x56 }
+
typedef struct QE1000E QE1000E;
typedef struct QE1000E_PCI QE1000E_PCI;
@@ -40,6 +42,18 @@
QE1000E e1000e;
};
+static inline void e1000e_macreg_write(QE1000E *d, uint32_t reg, uint32_t val)
+{
+ QE1000E_PCI *d_pci = container_of(d, QE1000E_PCI, e1000e);
+ qpci_io_writel(&d_pci->pci_dev, d_pci->mac_regs, reg, val);
+}
+
+static inline uint32_t e1000e_macreg_read(QE1000E *d, uint32_t reg)
+{
+ QE1000E_PCI *d_pci = container_of(d, QE1000E_PCI, e1000e);
+ return qpci_io_readl(&d_pci->pci_dev, d_pci->mac_regs, reg);
+}
+
void e1000e_wait_isr(QE1000E *d, uint16_t msg_id);
void e1000e_tx_ring_push(QE1000E *d, void *descr);
void e1000e_rx_ring_push(QE1000E *d, void *descr);
diff --git a/tests/qtest/libqos/igb.c b/tests/qtest/libqos/igb.c
new file mode 100644
index 0000000..12fb531
--- /dev/null
+++ b/tests/qtest/libqos/igb.c
@@ -0,0 +1,185 @@
+/*
+ * libqos driver framework
+ *
+ * Copyright (c) 2022-2023 Red Hat, Inc.
+ * Copyright (c) 2018 Emanuele Giuseppe Esposito <e.emanuelegiuseppe@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>
+ */
+
+#include "qemu/osdep.h"
+#include "hw/net/igb_regs.h"
+#include "hw/net/mii.h"
+#include "hw/pci/pci_ids.h"
+#include "../libqtest.h"
+#include "pci-pc.h"
+#include "qemu/sockets.h"
+#include "qemu/iov.h"
+#include "qemu/module.h"
+#include "qemu/bitops.h"
+#include "libqos-malloc.h"
+#include "qgraph.h"
+#include "e1000e.h"
+
+#define IGB_IVAR_TEST_CFG \
+ ((E1000E_RX0_MSG_ID | E1000_IVAR_VALID) << (igb_ivar_entry_rx(0) * 8) | \
+ ((E1000E_TX0_MSG_ID | E1000_IVAR_VALID) << (igb_ivar_entry_tx(0) * 8)))
+
+#define E1000E_RING_LEN (0x1000)
+
+static void e1000e_foreach_callback(QPCIDevice *dev, int devfn, void *data)
+{
+ QPCIDevice *res = data;
+ memcpy(res, dev, sizeof(QPCIDevice));
+ g_free(dev);
+}
+
+static void e1000e_pci_destructor(QOSGraphObject *obj)
+{
+ QE1000E_PCI *epci = (QE1000E_PCI *) obj;
+ qpci_iounmap(&epci->pci_dev, epci->mac_regs);
+ qpci_msix_disable(&epci->pci_dev);
+}
+
+static void igb_pci_start_hw(QOSGraphObject *obj)
+{
+ static const uint8_t address[] = E1000E_ADDRESS;
+ QE1000E_PCI *d = (QE1000E_PCI *) obj;
+ uint32_t val;
+
+ /* Enable the device */
+ qpci_device_enable(&d->pci_dev);
+
+ /* Reset the device */
+ val = e1000e_macreg_read(&d->e1000e, E1000_CTRL);
+ e1000e_macreg_write(&d->e1000e, E1000_CTRL, val | E1000_CTRL_RST | E1000_CTRL_SLU);
+
+ /* Setup link */
+ e1000e_macreg_write(&d->e1000e, E1000_MDIC,
+ MII_BMCR_AUTOEN | MII_BMCR_ANRESTART |
+ (MII_BMCR << E1000_MDIC_REG_SHIFT) |
+ (1 << E1000_MDIC_PHY_SHIFT) |
+ E1000_MDIC_OP_WRITE);
+
+ qtest_clock_step(d->pci_dev.bus->qts, 900000000);
+
+ /* Enable and configure MSI-X */
+ qpci_msix_enable(&d->pci_dev);
+ e1000e_macreg_write(&d->e1000e, E1000_IVAR0, IGB_IVAR_TEST_CFG);
+
+ /* Check the device link status */
+ val = e1000e_macreg_read(&d->e1000e, E1000_STATUS);
+ g_assert_cmphex(val & E1000_STATUS_LU, ==, E1000_STATUS_LU);
+
+ /* Initialize TX/RX logic */
+ e1000e_macreg_write(&d->e1000e, E1000_RCTL, 0);
+ e1000e_macreg_write(&d->e1000e, E1000_TCTL, 0);
+
+ e1000e_macreg_write(&d->e1000e, E1000_TDBAL(0),
+ (uint32_t) d->e1000e.tx_ring);
+ e1000e_macreg_write(&d->e1000e, E1000_TDBAH(0),
+ (uint32_t) (d->e1000e.tx_ring >> 32));
+ e1000e_macreg_write(&d->e1000e, E1000_TDLEN(0), E1000E_RING_LEN);
+ e1000e_macreg_write(&d->e1000e, E1000_TDT(0), 0);
+ e1000e_macreg_write(&d->e1000e, E1000_TDH(0), 0);
+
+ /* Enable transmit */
+ e1000e_macreg_write(&d->e1000e, E1000_TCTL, E1000_TCTL_EN);
+
+ e1000e_macreg_write(&d->e1000e, E1000_RDBAL(0),
+ (uint32_t)d->e1000e.rx_ring);
+ e1000e_macreg_write(&d->e1000e, E1000_RDBAH(0),
+ (uint32_t)(d->e1000e.rx_ring >> 32));
+ e1000e_macreg_write(&d->e1000e, E1000_RDLEN(0), E1000E_RING_LEN);
+ e1000e_macreg_write(&d->e1000e, E1000_RDT(0), 0);
+ e1000e_macreg_write(&d->e1000e, E1000_RDH(0), 0);
+ e1000e_macreg_write(&d->e1000e, E1000_RA,
+ le32_to_cpu(*(uint32_t *)address));
+ e1000e_macreg_write(&d->e1000e, E1000_RA + 4,
+ E1000_RAH_AV | E1000_RAH_POOL_1 |
+ le16_to_cpu(*(uint16_t *)(address + 4)));
+
+ /* Enable receive */
+ e1000e_macreg_write(&d->e1000e, E1000_RFCTL, E1000_RFCTL_EXTEN);
+ e1000e_macreg_write(&d->e1000e, E1000_RCTL, E1000_RCTL_EN);
+
+ /* Enable all interrupts */
+ e1000e_macreg_write(&d->e1000e, E1000_IMS, 0xFFFFFFFF);
+ e1000e_macreg_write(&d->e1000e, E1000_EIMS, 0xFFFFFFFF);
+
+}
+
+static void *igb_pci_get_driver(void *obj, const char *interface)
+{
+ QE1000E_PCI *epci = obj;
+ if (!g_strcmp0(interface, "igb-if")) {
+ return &epci->e1000e;
+ }
+
+ /* implicit contains */
+ if (!g_strcmp0(interface, "pci-device")) {
+ return &epci->pci_dev;
+ }
+
+ fprintf(stderr, "%s not present in igb\n", interface);
+ g_assert_not_reached();
+}
+
+static void *igb_pci_create(void *pci_bus, QGuestAllocator *alloc, void *addr)
+{
+ QE1000E_PCI *d = g_new0(QE1000E_PCI, 1);
+ QPCIBus *bus = pci_bus;
+ QPCIAddress *address = addr;
+
+ qpci_device_foreach(bus, address->vendor_id, address->device_id,
+ e1000e_foreach_callback, &d->pci_dev);
+
+ /* Map BAR0 (mac registers) */
+ d->mac_regs = qpci_iomap(&d->pci_dev, 0, NULL);
+
+ /* Allocate and setup TX ring */
+ d->e1000e.tx_ring = guest_alloc(alloc, E1000E_RING_LEN);
+ g_assert(d->e1000e.tx_ring != 0);
+
+ /* Allocate and setup RX ring */
+ d->e1000e.rx_ring = guest_alloc(alloc, E1000E_RING_LEN);
+ g_assert(d->e1000e.rx_ring != 0);
+
+ d->obj.get_driver = igb_pci_get_driver;
+ d->obj.start_hw = igb_pci_start_hw;
+ d->obj.destructor = e1000e_pci_destructor;
+
+ return &d->obj;
+}
+
+static void igb_register_nodes(void)
+{
+ QPCIAddress addr = {
+ .vendor_id = PCI_VENDOR_ID_INTEL,
+ .device_id = E1000_DEV_ID_82576,
+ };
+
+ /*
+ * FIXME: every test using this node needs to setup a -netdev socket,id=hs0
+ * otherwise QEMU is not going to start
+ */
+ QOSGraphEdgeOptions opts = {
+ .extra_device_opts = "netdev=hs0",
+ };
+ add_qpci_address(&opts, &addr);
+
+ qos_node_create_driver("igb", igb_pci_create);
+ qos_node_consumes("igb", "pci-bus", &opts);
+}
+
+libqos_init(igb_register_nodes);
diff --git a/tests/qtest/libqos/meson.build b/tests/qtest/libqos/meson.build
index 32f0288..cc209a8 100644
--- a/tests/qtest/libqos/meson.build
+++ b/tests/qtest/libqos/meson.build
@@ -30,6 +30,7 @@
'i2c.c',
'i2c-imx.c',
'i2c-omap.c',
+ 'igb.c',
'sdhci.c',
'tpci200.c',
'virtio.c',
diff --git a/tests/qtest/libqtest.c b/tests/qtest/libqtest.c
index 2bfd460..c3a0ef5 100644
--- a/tests/qtest/libqtest.c
+++ b/tests/qtest/libqtest.c
@@ -124,7 +124,7 @@
(void *)&timeout, sizeof(timeout))) {
fprintf(stderr, "%s failed to set SO_RCVTIMEO: %s\n",
__func__, strerror(errno));
- closesocket(sock);
+ close(sock);
return -1;
}
@@ -135,7 +135,7 @@
if (ret == -1) {
fprintf(stderr, "%s failed: %s\n", __func__, strerror(errno));
}
- closesocket(sock);
+ close(sock);
return ret;
}
@@ -564,8 +564,8 @@
qtest_remove_abrt_handler(s);
qtest_kill_qemu(s);
- closesocket(s->fd);
- closesocket(s->qmp_fd);
+ close(s->fd);
+ close(s->qmp_fd);
g_string_free(s->rx, true);
for (GList *it = s->pending_events; it != NULL; it = it->next) {
@@ -1478,13 +1478,28 @@
qobject_unref(args);
}
-#ifndef _WIN32
void qtest_qmp_add_client(QTestState *qts, const char *protocol, int fd)
{
QDict *resp;
+#ifdef WIN32
+ WSAPROTOCOL_INFOW info;
+ g_autofree char *info64 = NULL;
+ SOCKET s;
+
+ assert(fd_is_socket(fd));
+ s = _get_osfhandle(fd);
+ if (WSADuplicateSocketW(s, GetProcessId((HANDLE)qts->qemu_pid), &info) == SOCKET_ERROR) {
+ g_autofree char *emsg = g_win32_error_message(WSAGetLastError());
+ g_error("WSADuplicateSocketW failed: %s", emsg);
+ }
+ info64 = g_base64_encode((guchar *)&info, sizeof(info));
+ resp = qtest_qmp(qts, "{'execute': 'get-win32-socket',"
+ "'arguments': {'fdname': 'fdname', 'info': %s}}", info64);
+#else
resp = qtest_qmp_fds(qts, &fd, 1, "{'execute': 'getfd',"
"'arguments': {'fdname': 'fdname'}}");
+#endif
g_assert(resp);
g_assert(!qdict_haskey(resp, "event")); /* We don't expect any events */
g_assert(!qdict_haskey(resp, "error"));
@@ -1498,7 +1513,6 @@
g_assert(!qdict_haskey(resp, "error"));
qobject_unref(resp);
}
-#endif
/*
* Generic hot-unplugging test via the device_del QMP command.
diff --git a/tests/qtest/libqtest.h b/tests/qtest/libqtest.h
index fcf1c3c..8d7d450 100644
--- a/tests/qtest/libqtest.h
+++ b/tests/qtest/libqtest.h
@@ -758,17 +758,16 @@
void qtest_qmp_device_add(QTestState *qts, const char *driver, const char *id,
const char *fmt, ...) G_GNUC_PRINTF(4, 5);
-#ifndef _WIN32
/**
* qtest_qmp_add_client:
* @qts: QTestState instance to operate on
* @protocol: the protocol to add to
* @fd: the client file-descriptor
*
- * Call QMP ``getfd`` followed by ``add_client`` with the given @fd.
+ * Call QMP ``getfd`` (on Windows ``get-win32-socket``) followed by
+ * ``add_client`` with the given @fd.
*/
void qtest_qmp_add_client(QTestState *qts, const char *protocol, int fd);
-#endif /* _WIN32 */
/**
* qtest_qmp_device_del_send:
diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build
index 29a4efb..85ea4e8 100644
--- a/tests/qtest/meson.build
+++ b/tests/qtest/meson.build
@@ -101,7 +101,7 @@
'numa-test'
]
-if dbus_display
+if dbus_display and targetos != 'windows'
qtests_i386 += ['dbus-display-test']
endif
@@ -259,6 +259,7 @@
'virtio-scsi-test.c',
'virtio-iommu-test.c',
'vmxnet3-test.c',
+ 'igb-test.c',
)
if config_all_devices.has_key('CONFIG_VIRTIO_SERIAL')
@@ -309,10 +310,12 @@
'netdev-socket': files('netdev-socket.c', '../unit/socket-helpers.c'),
}
-gvnc = dependency('gvnc-1.0', required: false)
-if gvnc.found()
- qtests += {'vnc-display-test': [gvnc]}
- qtests_generic += [ 'vnc-display-test' ]
+if vnc.found()
+ gvnc = dependency('gvnc-1.0', required: false)
+ if gvnc.found()
+ qtests += {'vnc-display-test': [gvnc]}
+ qtests_generic += [ 'vnc-display-test' ]
+ endif
endif
if dbus_display
diff --git a/tests/qtest/microbit-test.c b/tests/qtest/microbit-test.c
index 4bc2670..6022a92 100644
--- a/tests/qtest/microbit-test.c
+++ b/tests/qtest/microbit-test.c
@@ -107,7 +107,7 @@
g_assert_true(recv(sock_fd, s, 10, 0) == 5);
g_assert_true(memcmp(s, "world", 5) == 0);
- closesocket(sock_fd);
+ close(sock_fd);
qtest_quit(qts);
}
diff --git a/tests/qtest/migration-test.c b/tests/qtest/migration-test.c
index d4ab393..3b615b0 100644
--- a/tests/qtest/migration-test.c
+++ b/tests/qtest/migration-test.c
@@ -408,8 +408,8 @@
static void migrate_ensure_non_converge(QTestState *who)
{
- /* Can't converge with 1ms downtime + 30 mbs bandwidth limit */
- migrate_set_parameter_int(who, "max-bandwidth", 30 * 1000 * 1000);
+ /* Can't converge with 1ms downtime + 3 mbs bandwidth limit */
+ migrate_set_parameter_int(who, "max-bandwidth", 3 * 1000 * 1000);
migrate_set_parameter_int(who, "downtime-limit", 1);
}
@@ -1808,7 +1808,7 @@
* E.g., with 1Gb/s bandwith migration may pass without throttling,
* so we need to decrease a bandwidth.
*/
- const int64_t init_pct = 5, inc_pct = 50, max_pct = 95;
+ const int64_t init_pct = 5, inc_pct = 25, max_pct = 95;
if (test_migrate_start(&from, &to, uri, &args)) {
return;
@@ -1835,13 +1835,16 @@
/* Wait for throttling begins */
percentage = 0;
- while (percentage == 0) {
+ do {
percentage = read_migrate_property_int(from, "cpu-throttle-percentage");
- usleep(100);
+ if (percentage != 0) {
+ break;
+ }
+ usleep(20);
g_assert_false(got_stop);
- }
- /* The first percentage of throttling should be equal to init_pct */
- g_assert_cmpint(percentage, ==, init_pct);
+ } while (true);
+ /* The first percentage of throttling should be at least init_pct */
+ g_assert_cmpint(percentage, >=, init_pct);
/* Now, when we tested that throttling works, let it converge */
migrate_ensure_converge(from);
@@ -2459,14 +2462,18 @@
int main(int argc, char **argv)
{
- const bool has_kvm = qtest_has_accel("kvm");
- const bool has_uffd = ufd_version_check();
- const char *arch = qtest_get_arch();
+ bool has_kvm;
+ bool has_uffd;
+ const char *arch;
g_autoptr(GError) err = NULL;
int ret;
g_test_init(&argc, &argv, NULL);
+ has_kvm = qtest_has_accel("kvm");
+ has_uffd = ufd_version_check();
+ arch = qtest_get_arch();
+
/*
* On ppc64, the test only works with kvm-hv, but not with kvm-pr and TCG
* is touchy due to race conditions on dirty bits (especially on PPC for
diff --git a/tests/qtest/netdev-socket.c b/tests/qtest/netdev-socket.c
index 270e424..9cf1b06 100644
--- a/tests/qtest/netdev-socket.c
+++ b/tests/qtest/netdev-socket.c
@@ -99,7 +99,7 @@
nb = i;
for (i = 0; i < nb; i++) {
- closesocket(sock[i]);
+ close(sock[i]);
}
return nb;
@@ -361,8 +361,8 @@
qtest_quit(qts1);
qtest_quit(qts0);
- closesocket(sock[0]);
- closesocket(sock[1]);
+ close(sock[0]);
+ close(sock[1]);
}
#endif
@@ -487,8 +487,8 @@
qtest_quit(qts1);
qtest_quit(qts0);
- closesocket(sv[0]);
- closesocket(sv[1]);
+ close(sv[0]);
+ close(sv[1]);
}
#endif
diff --git a/tests/qtest/readconfig-test.c b/tests/qtest/readconfig-test.c
index 9ef8706..2160603 100644
--- a/tests/qtest/readconfig-test.c
+++ b/tests/qtest/readconfig-test.c
@@ -124,13 +124,15 @@
}
#endif
-static void test_object_rng_resp(QObject *res)
+static void test_object_available(QObject *res, const char *name,
+ const char *type)
{
Visitor *v;
g_autoptr(ObjectPropertyInfoList) objs = NULL;
ObjectPropertyInfoList *tmp;
ObjectPropertyInfo *obj;
- bool seen_rng = false;
+ bool object_available = false;
+ g_autofree char *childtype = g_strdup_printf("child<%s>", type);
g_assert(res);
v = qobject_input_visitor_new(res);
@@ -142,16 +144,15 @@
g_assert(tmp->value);
obj = tmp->value;
- if (g_str_equal(obj->name, "rng0") &&
- g_str_equal(obj->type, "child<rng-builtin>")) {
- seen_rng = true;
+ if (g_str_equal(obj->name, name) && g_str_equal(obj->type, childtype)) {
+ object_available = true;
break;
}
tmp = tmp->next;
}
- g_assert(seen_rng);
+ g_assert(object_available);
visit_free(v);
}
@@ -170,7 +171,27 @@
resp = qtest_qmp(qts,
"{ 'execute': 'qom-list',"
" 'arguments': {'path': '/objects' }}");
- test_object_rng_resp(qdict_get(resp, "return"));
+ test_object_available(qdict_get(resp, "return"), "rng0", "rng-builtin");
+ qobject_unref(resp);
+
+ qtest_quit(qts);
+}
+
+static void test_docs_config_ich9(void)
+{
+ QTestState *qts;
+ QDict *resp;
+ QObject *qobj;
+
+ qts = qtest_initf("-nodefaults -readconfig docs/config/ich9-ehci-uhci.cfg");
+
+ resp = qtest_qmp(qts, "{ 'execute': 'qom-list',"
+ " 'arguments': {'path': '/machine/peripheral' }}");
+ qobj = qdict_get(resp, "return");
+ test_object_available(qobj, "ehci", "ich9-usb-ehci1");
+ test_object_available(qobj, "uhci-1", "ich9-usb-uhci1");
+ test_object_available(qobj, "uhci-2", "ich9-usb-uhci2");
+ test_object_available(qobj, "uhci-3", "ich9-usb-uhci3");
qobject_unref(resp);
qtest_quit(qts);
@@ -186,6 +207,7 @@
if (g_str_equal(arch, "i386") ||
g_str_equal(arch, "x86_64")) {
qtest_add_func("readconfig/x86/memdev", test_x86_memdev);
+ qtest_add_func("readconfig/x86/ich9-ehci-uhci", test_docs_config_ich9);
}
#ifdef CONFIG_SPICE
qtest_add_func("readconfig/spice", test_spice);
diff --git a/tests/qtest/vnc-display-test.c b/tests/qtest/vnc-display-test.c
index e52a432..f8933b0 100644
--- a/tests/qtest/vnc-display-test.c
+++ b/tests/qtest/vnc-display-test.c
@@ -19,7 +19,7 @@
GMainLoop *loop;
} Test;
-#if !defined(WIN32) && !defined(CONFIG_DARWIN)
+#if !defined(CONFIG_DARWIN)
static void on_vnc_error(VncConnection* self,
const char* msg)
@@ -38,10 +38,7 @@
static bool
test_setup(Test *test)
{
-#ifdef WIN32
- g_test_skip("Not supported on Windows yet");
- return false;
-#elif defined(CONFIG_DARWIN)
+#if defined(CONFIG_DARWIN)
g_test_skip("Broken on Darwin");
return false;
#else
@@ -59,7 +56,12 @@
g_signal_connect(test->conn, "vnc-auth-failure",
G_CALLBACK(on_vnc_auth_failure), NULL);
vnc_connection_set_auth_type(test->conn, VNC_CONNECTION_AUTH_NONE);
+
+#ifdef WIN32
+ vnc_connection_open_fd(test->conn, _get_osfhandle(pair[0]));
+#else
vnc_connection_open_fd(test->conn, pair[0]);
+#endif
test->loop = g_main_loop_new(NULL, FALSE);
return true;
diff --git a/tests/tcg/Makefile.target b/tests/tcg/Makefile.target
index a3b0aaf..8318caf 100644
--- a/tests/tcg/Makefile.target
+++ b/tests/tcg/Makefile.target
@@ -201,3 +201,10 @@
distclean:
rm -f config-cc.mak config-target.mak ../config-$(TARGET).mak
+
+.PHONY: help
+help:
+ @echo "TCG tests help $(TARGET_NAME)"
+ @echo "Built with $(CC)"
+ @echo "Available tests:"
+ @$(foreach t,$(RUN_TESTS),echo " $t";)
diff --git a/tests/tcg/aarch64/Makefile.target b/tests/tcg/aarch64/Makefile.target
index db122ab..9e91a20 100644
--- a/tests/tcg/aarch64/Makefile.target
+++ b/tests/tcg/aarch64/Makefile.target
@@ -81,7 +81,7 @@
TESTS += sha512-vector
-ifneq ($(HAVE_GDB_BIN),)
+ifeq ($(HOST_GDB_SUPPORTS_ARCH),y)
GDB_SCRIPT=$(SRC_PATH)/tests/guest-debug/run-test.py
run-gdbstub-sysregs: sysregs
diff --git a/tests/tcg/hexagon/Makefile.target b/tests/tcg/hexagon/Makefile.target
index 18e6a59..0d82dfa 100644
--- a/tests/tcg/hexagon/Makefile.target
+++ b/tests/tcg/hexagon/Makefile.target
@@ -1,5 +1,5 @@
##
-## Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved.
+## Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
@@ -45,6 +45,10 @@
HEX_TESTS += overflow
HEX_TESTS += signal_context
HEX_TESTS += reg_mut
+HEX_TESTS += vector_add_int
+HEX_TESTS += scatter_gather
+HEX_TESTS += hvx_misc
+HEX_TESTS += hvx_histogram
HEX_TESTS += test_abs
HEX_TESTS += test_bitcnt
@@ -78,3 +82,10 @@
usr: usr.c
$(CC) $(CFLAGS) -mv67t -O2 -Wno-inline-asm -Wno-expansion-to-defined $< -o $@ $(LDFLAGS)
+scatter_gather: CFLAGS += -mhvx
+vector_add_int: CFLAGS += -mhvx -fvectorize
+hvx_misc: CFLAGS += -mhvx
+hvx_histogram: CFLAGS += -mhvx -Wno-gnu-folding-constant
+
+hvx_histogram: hvx_histogram.c hvx_histogram_row.S
+ $(CC) $(CFLAGS) $(CROSS_CC_GUEST_CFLAGS) $^ -o $@ $(LDFLAGS)
diff --git a/tests/tcg/hexagon/fpstuff.c b/tests/tcg/hexagon/fpstuff.c
index 56bf562..90ce9a6 100644
--- a/tests/tcg/hexagon/fpstuff.c
+++ b/tests/tcg/hexagon/fpstuff.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2020-2022 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ * Copyright(c) 2020-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -40,6 +40,7 @@
const int SF_small_neg = 0xab98fba8;
const int SF_denorm = 0x00000001;
const int SF_random = 0x346001d6;
+const int SF_neg_zero = 0x80000000;
const long long DF_QNaN = 0x7ff8000000000000ULL;
const long long DF_SNaN = 0x7ff7000000000000ULL;
@@ -536,6 +537,33 @@
check32(result, 0x146001d6);
}
+static void check_sffms(void)
+{
+ int result;
+
+ /* Check that sffms properly deals with -0 */
+ result = SF_neg_zero;
+ asm ("%0 -= sfmpy(%1 , %2)\n\t"
+ : "+r"(result)
+ : "r"(SF_ZERO), "r"(SF_ZERO)
+ : "r12", "r8");
+ check32(result, SF_neg_zero);
+
+ result = SF_ZERO;
+ asm ("%0 -= sfmpy(%1 , %2)\n\t"
+ : "+r"(result)
+ : "r"(SF_neg_zero), "r"(SF_ZERO)
+ : "r12", "r8");
+ check32(result, SF_ZERO);
+
+ result = SF_ZERO;
+ asm ("%0 -= sfmpy(%1 , %2)\n\t"
+ : "+r"(result)
+ : "r"(SF_ZERO), "r"(SF_neg_zero)
+ : "r12", "r8");
+ check32(result, SF_ZERO);
+}
+
static void check_float2int_convs()
{
int res32;
@@ -688,6 +716,7 @@
check_invsqrta();
check_sffixupn();
check_sffixupd();
+ check_sffms();
check_float2int_convs();
puts(err ? "FAIL" : "PASS");
diff --git a/tests/tcg/hexagon/preg_alias.c b/tests/tcg/hexagon/preg_alias.c
index b44a811..8798fbc 100644
--- a/tests/tcg/hexagon/preg_alias.c
+++ b/tests/tcg/hexagon/preg_alias.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -65,7 +65,7 @@
: "=r"(pregs->pregs.p0), "=r"(pregs->pregs.p1),
"=r"(pregs->pregs.p2), "=r"(pregs->pregs.p3)
: "r"(cval)
- : "p0", "p1", "p2", "p3");
+ : "c4", "p0", "p1", "p2", "p3");
}
int err;
@@ -92,7 +92,7 @@
: "=r"(pregs->pregs.p0), "=r"(pregs->pregs.p1),
"=r"(pregs->pregs.p2), "=r"(pregs->pregs.p3), "=r"(c5)
: "r"(cval_pair)
- : "p0", "p1", "p2", "p3");
+ : "c4", "c5", "p0", "p1", "p2", "p3");
check(c5, 0xdeadbeef);
}
@@ -117,7 +117,7 @@
"}\n\t"
: "+r"(result)
: "r"(0xffffffff), "r"(0xff00ffff), "r"(0x837ed653)
- : "p0", "p1", "p2", "p3");
+ : "c4", "p0", "p1", "p2", "p3");
check(result, old_val);
/* Test a predicated store */
@@ -129,7 +129,7 @@
"}\n\t"
:
: "r"(0), "r"(0xffffffff), "r"(&result)
- : "p0", "p1", "p2", "p3", "memory");
+ : "c4", "p0", "p1", "p2", "p3", "memory");
check(result, 0x0);
}
diff --git a/tests/tcg/hexagon/scatter_gather.c b/tests/tcg/hexagon/scatter_gather.c
index b93eb18..bf8b5e0 100644
--- a/tests/tcg/hexagon/scatter_gather.c
+++ b/tests/tcg/hexagon/scatter_gather.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -40,47 +40,6 @@
typedef long HVX_VectorPred __attribute__((__vector_size__(128)))
__attribute__((aligned(128)));
-#define VSCATTER_16(BASE, RGN, OFF, VALS) \
- __builtin_HEXAGON_V6_vscattermh_128B((int)BASE, RGN, OFF, VALS)
-#define VSCATTER_16_MASKED(MASK, BASE, RGN, OFF, VALS) \
- __builtin_HEXAGON_V6_vscattermhq_128B(MASK, (int)BASE, RGN, OFF, VALS)
-#define VSCATTER_32(BASE, RGN, OFF, VALS) \
- __builtin_HEXAGON_V6_vscattermw_128B((int)BASE, RGN, OFF, VALS)
-#define VSCATTER_32_MASKED(MASK, BASE, RGN, OFF, VALS) \
- __builtin_HEXAGON_V6_vscattermwq_128B(MASK, (int)BASE, RGN, OFF, VALS)
-#define VSCATTER_16_32(BASE, RGN, OFF, VALS) \
- __builtin_HEXAGON_V6_vscattermhw_128B((int)BASE, RGN, OFF, VALS)
-#define VSCATTER_16_32_MASKED(MASK, BASE, RGN, OFF, VALS) \
- __builtin_HEXAGON_V6_vscattermhwq_128B(MASK, (int)BASE, RGN, OFF, VALS)
-#define VSCATTER_16_ACC(BASE, RGN, OFF, VALS) \
- __builtin_HEXAGON_V6_vscattermh_add_128B((int)BASE, RGN, OFF, VALS)
-#define VSCATTER_32_ACC(BASE, RGN, OFF, VALS) \
- __builtin_HEXAGON_V6_vscattermw_add_128B((int)BASE, RGN, OFF, VALS)
-#define VSCATTER_16_32_ACC(BASE, RGN, OFF, VALS) \
- __builtin_HEXAGON_V6_vscattermhw_add_128B((int)BASE, RGN, OFF, VALS)
-
-#define VGATHER_16(DSTADDR, BASE, RGN, OFF) \
- __builtin_HEXAGON_V6_vgathermh_128B(DSTADDR, (int)BASE, RGN, OFF)
-#define VGATHER_16_MASKED(DSTADDR, MASK, BASE, RGN, OFF) \
- __builtin_HEXAGON_V6_vgathermhq_128B(DSTADDR, MASK, (int)BASE, RGN, OFF)
-#define VGATHER_32(DSTADDR, BASE, RGN, OFF) \
- __builtin_HEXAGON_V6_vgathermw_128B(DSTADDR, (int)BASE, RGN, OFF)
-#define VGATHER_32_MASKED(DSTADDR, MASK, BASE, RGN, OFF) \
- __builtin_HEXAGON_V6_vgathermwq_128B(DSTADDR, MASK, (int)BASE, RGN, OFF)
-#define VGATHER_16_32(DSTADDR, BASE, RGN, OFF) \
- __builtin_HEXAGON_V6_vgathermhw_128B(DSTADDR, (int)BASE, RGN, OFF)
-#define VGATHER_16_32_MASKED(DSTADDR, MASK, BASE, RGN, OFF) \
- __builtin_HEXAGON_V6_vgathermhwq_128B(DSTADDR, MASK, (int)BASE, RGN, OFF)
-
-#define VSHUFF_H(V) \
- __builtin_HEXAGON_V6_vshuffh_128B(V)
-#define VSPLAT_H(X) \
- __builtin_HEXAGON_V6_lvsplath_128B(X)
-#define VAND_VAL(PRED, VAL) \
- __builtin_HEXAGON_V6_vandvrt_128B(PRED, VAL)
-#define VDEAL_H(V) \
- __builtin_HEXAGON_V6_vdealh_128B(V)
-
int err;
/* define the number of rows/cols in a square matrix */
@@ -108,22 +67,22 @@
unsigned short vgather16_32_ref[MATRIX_SIZE];
/* declare the arrays of offsets */
-unsigned short half_offsets[MATRIX_SIZE];
-unsigned int word_offsets[MATRIX_SIZE];
+unsigned short half_offsets[MATRIX_SIZE] __attribute__((aligned(128)));
+unsigned int word_offsets[MATRIX_SIZE] __attribute__((aligned(128)));
/* declare the arrays of values */
-unsigned short half_values[MATRIX_SIZE];
-unsigned short half_values_acc[MATRIX_SIZE];
-unsigned short half_values_masked[MATRIX_SIZE];
-unsigned int word_values[MATRIX_SIZE];
-unsigned int word_values_acc[MATRIX_SIZE];
-unsigned int word_values_masked[MATRIX_SIZE];
+unsigned short half_values[MATRIX_SIZE] __attribute__((aligned(128)));
+unsigned short half_values_acc[MATRIX_SIZE] __attribute__((aligned(128)));
+unsigned short half_values_masked[MATRIX_SIZE] __attribute__((aligned(128)));
+unsigned int word_values[MATRIX_SIZE] __attribute__((aligned(128)));
+unsigned int word_values_acc[MATRIX_SIZE] __attribute__((aligned(128)));
+unsigned int word_values_masked[MATRIX_SIZE] __attribute__((aligned(128)));
/* declare the arrays of predicates */
-unsigned short half_predicates[MATRIX_SIZE];
-unsigned int word_predicates[MATRIX_SIZE];
+unsigned short half_predicates[MATRIX_SIZE] __attribute__((aligned(128)));
+unsigned int word_predicates[MATRIX_SIZE] __attribute__((aligned(128)));
-/* make this big enough for all the intrinsics */
+/* make this big enough for all the operations */
const size_t region_len = sizeof(vtcm);
/* optionally add sync instructions */
@@ -261,164 +220,201 @@
}
}
-/* scatter the 16 bit elements using intrinsics */
+/* scatter the 16 bit elements using HVX */
void vector_scatter_16(void)
{
- /* copy the offsets and values to vectors */
- HVX_Vector offsets = *(HVX_Vector *)half_offsets;
- HVX_Vector values = *(HVX_Vector *)half_values;
-
- VSCATTER_16(&vtcm.vscatter16, region_len, offsets, values);
+ asm ("m0 = %1\n\t"
+ "v0 = vmem(%2 + #0)\n\t"
+ "v1 = vmem(%3 + #0)\n\t"
+ "vscatter(%0, m0, v0.h).h = v1\n\t"
+ : : "r"(vtcm.vscatter16), "r"(region_len),
+ "r"(half_offsets), "r"(half_values)
+ : "m0", "v0", "v1", "memory");
sync_scatter(vtcm.vscatter16);
}
-/* scatter-accumulate the 16 bit elements using intrinsics */
+/* scatter-accumulate the 16 bit elements using HVX */
void vector_scatter_16_acc(void)
{
- /* copy the offsets and values to vectors */
- HVX_Vector offsets = *(HVX_Vector *)half_offsets;
- HVX_Vector values = *(HVX_Vector *)half_values_acc;
-
- VSCATTER_16_ACC(&vtcm.vscatter16, region_len, offsets, values);
+ asm ("m0 = %1\n\t"
+ "v0 = vmem(%2 + #0)\n\t"
+ "v1 = vmem(%3 + #0)\n\t"
+ "vscatter(%0, m0, v0.h).h += v1\n\t"
+ : : "r"(vtcm.vscatter16), "r"(region_len),
+ "r"(half_offsets), "r"(half_values_acc)
+ : "m0", "v0", "v1", "memory");
sync_scatter(vtcm.vscatter16);
}
-/* scatter the 16 bit elements using intrinsics */
+/* masked scatter the 16 bit elements using HVX */
void vector_scatter_16_masked(void)
{
- /* copy the offsets and values to vectors */
- HVX_Vector offsets = *(HVX_Vector *)half_offsets;
- HVX_Vector values = *(HVX_Vector *)half_values_masked;
- HVX_Vector pred_reg = *(HVX_Vector *)half_predicates;
- HVX_VectorPred preds = VAND_VAL(pred_reg, ~0);
-
- VSCATTER_16_MASKED(preds, &vtcm.vscatter16, region_len, offsets, values);
+ asm ("r1 = #-1\n\t"
+ "v0 = vmem(%0 + #0)\n\t"
+ "q0 = vand(v0, r1)\n\t"
+ "m0 = %2\n\t"
+ "v0 = vmem(%3 + #0)\n\t"
+ "v1 = vmem(%4 + #0)\n\t"
+ "if (q0) vscatter(%1, m0, v0.h).h = v1\n\t"
+ : : "r"(half_predicates), "r"(vtcm.vscatter16), "r"(region_len),
+ "r"(half_offsets), "r"(half_values_masked)
+ : "r1", "q0", "m0", "q0", "v0", "v1", "memory");
sync_scatter(vtcm.vscatter16);
}
-/* scatter the 32 bit elements using intrinsics */
+/* scatter the 32 bit elements using HVX */
void vector_scatter_32(void)
{
- /* copy the offsets and values to vectors */
- HVX_Vector offsetslo = *(HVX_Vector *)word_offsets;
- HVX_Vector offsetshi = *(HVX_Vector *)&word_offsets[MATRIX_SIZE / 2];
- HVX_Vector valueslo = *(HVX_Vector *)word_values;
- HVX_Vector valueshi = *(HVX_Vector *)&word_values[MATRIX_SIZE / 2];
+ HVX_Vector *offsetslo = (HVX_Vector *)word_offsets;
+ HVX_Vector *offsetshi = (HVX_Vector *)&word_offsets[MATRIX_SIZE / 2];
+ HVX_Vector *valueslo = (HVX_Vector *)word_values;
+ HVX_Vector *valueshi = (HVX_Vector *)&word_values[MATRIX_SIZE / 2];
- VSCATTER_32(&vtcm.vscatter32, region_len, offsetslo, valueslo);
- VSCATTER_32(&vtcm.vscatter32, region_len, offsetshi, valueshi);
+ asm ("m0 = %1\n\t"
+ "v0 = vmem(%2 + #0)\n\t"
+ "v1 = vmem(%3 + #0)\n\t"
+ "vscatter(%0, m0, v0.w).w = v1\n\t"
+ : : "r"(vtcm.vscatter32), "r"(region_len),
+ "r"(offsetslo), "r"(valueslo)
+ : "m0", "v0", "v1", "memory");
+ asm ("m0 = %1\n\t"
+ "v0 = vmem(%2 + #0)\n\t"
+ "v1 = vmem(%3 + #0)\n\t"
+ "vscatter(%0, m0, v0.w).w = v1\n\t"
+ : : "r"(vtcm.vscatter32), "r"(region_len),
+ "r"(offsetshi), "r"(valueshi)
+ : "m0", "v0", "v1", "memory");
sync_scatter(vtcm.vscatter32);
}
-/* scatter-acc the 32 bit elements using intrinsics */
+/* scatter-accumulate the 32 bit elements using HVX */
void vector_scatter_32_acc(void)
{
- /* copy the offsets and values to vectors */
- HVX_Vector offsetslo = *(HVX_Vector *)word_offsets;
- HVX_Vector offsetshi = *(HVX_Vector *)&word_offsets[MATRIX_SIZE / 2];
- HVX_Vector valueslo = *(HVX_Vector *)word_values_acc;
- HVX_Vector valueshi = *(HVX_Vector *)&word_values_acc[MATRIX_SIZE / 2];
+ HVX_Vector *offsetslo = (HVX_Vector *)word_offsets;
+ HVX_Vector *offsetshi = (HVX_Vector *)&word_offsets[MATRIX_SIZE / 2];
+ HVX_Vector *valueslo = (HVX_Vector *)word_values_acc;
+ HVX_Vector *valueshi = (HVX_Vector *)&word_values_acc[MATRIX_SIZE / 2];
- VSCATTER_32_ACC(&vtcm.vscatter32, region_len, offsetslo, valueslo);
- VSCATTER_32_ACC(&vtcm.vscatter32, region_len, offsetshi, valueshi);
+ asm ("m0 = %1\n\t"
+ "v0 = vmem(%2 + #0)\n\t"
+ "v1 = vmem(%3 + #0)\n\t"
+ "vscatter(%0, m0, v0.w).w += v1\n\t"
+ : : "r"(vtcm.vscatter32), "r"(region_len),
+ "r"(offsetslo), "r"(valueslo)
+ : "m0", "v0", "v1", "memory");
+ asm ("m0 = %1\n\t"
+ "v0 = vmem(%2 + #0)\n\t"
+ "v1 = vmem(%3 + #0)\n\t"
+ "vscatter(%0, m0, v0.w).w += v1\n\t"
+ : : "r"(vtcm.vscatter32), "r"(region_len),
+ "r"(offsetshi), "r"(valueshi)
+ : "m0", "v0", "v1", "memory");
sync_scatter(vtcm.vscatter32);
}
-/* scatter the 32 bit elements using intrinsics */
+/* masked scatter the 32 bit elements using HVX */
void vector_scatter_32_masked(void)
{
- /* copy the offsets and values to vectors */
- HVX_Vector offsetslo = *(HVX_Vector *)word_offsets;
- HVX_Vector offsetshi = *(HVX_Vector *)&word_offsets[MATRIX_SIZE / 2];
- HVX_Vector valueslo = *(HVX_Vector *)word_values_masked;
- HVX_Vector valueshi = *(HVX_Vector *)&word_values_masked[MATRIX_SIZE / 2];
- HVX_Vector pred_reglo = *(HVX_Vector *)word_predicates;
- HVX_Vector pred_reghi = *(HVX_Vector *)&word_predicates[MATRIX_SIZE / 2];
- HVX_VectorPred predslo = VAND_VAL(pred_reglo, ~0);
- HVX_VectorPred predshi = VAND_VAL(pred_reghi, ~0);
+ HVX_Vector *offsetslo = (HVX_Vector *)word_offsets;
+ HVX_Vector *offsetshi = (HVX_Vector *)&word_offsets[MATRIX_SIZE / 2];
+ HVX_Vector *valueslo = (HVX_Vector *)word_values_masked;
+ HVX_Vector *valueshi = (HVX_Vector *)&word_values_masked[MATRIX_SIZE / 2];
+ HVX_Vector *predslo = (HVX_Vector *)word_predicates;
+ HVX_Vector *predshi = (HVX_Vector *)&word_predicates[MATRIX_SIZE / 2];
- VSCATTER_32_MASKED(predslo, &vtcm.vscatter32, region_len, offsetslo,
- valueslo);
- VSCATTER_32_MASKED(predshi, &vtcm.vscatter32, region_len, offsetshi,
- valueshi);
+ asm ("r1 = #-1\n\t"
+ "v0 = vmem(%0 + #0)\n\t"
+ "q0 = vand(v0, r1)\n\t"
+ "m0 = %2\n\t"
+ "v0 = vmem(%3 + #0)\n\t"
+ "v1 = vmem(%4 + #0)\n\t"
+ "if (q0) vscatter(%1, m0, v0.w).w = v1\n\t"
+ : : "r"(predslo), "r"(vtcm.vscatter32), "r"(region_len),
+ "r"(offsetslo), "r"(valueslo)
+ : "r1", "q0", "m0", "q0", "v0", "v1", "memory");
+ asm ("r1 = #-1\n\t"
+ "v0 = vmem(%0 + #0)\n\t"
+ "q0 = vand(v0, r1)\n\t"
+ "m0 = %2\n\t"
+ "v0 = vmem(%3 + #0)\n\t"
+ "v1 = vmem(%4 + #0)\n\t"
+ "if (q0) vscatter(%1, m0, v0.w).w = v1\n\t"
+ : : "r"(predshi), "r"(vtcm.vscatter32), "r"(region_len),
+ "r"(offsetshi), "r"(valueshi)
+ : "r1", "q0", "m0", "q0", "v0", "v1", "memory");
- sync_scatter(vtcm.vscatter16);
+ sync_scatter(vtcm.vscatter32);
}
-/* scatter the 16 bit elements with 32 bit offsets using intrinsics */
+/* scatter the 16 bit elements with 32 bit offsets using HVX */
void vector_scatter_16_32(void)
{
- HVX_VectorPair offsets;
- HVX_Vector values;
-
- /* get the word offsets in a vector pair */
- offsets = *(HVX_VectorPair *)word_offsets;
-
- /* these values need to be shuffled for the scatter */
- values = *(HVX_Vector *)half_values;
- values = VSHUFF_H(values);
-
- VSCATTER_16_32(&vtcm.vscatter16_32, region_len, offsets, values);
+ asm ("m0 = %1\n\t"
+ "v0 = vmem(%2 + #0)\n\t"
+ "v1 = vmem(%2 + #1)\n\t"
+ "v2 = vmem(%3 + #0)\n\t"
+ "v2.h = vshuff(v2.h)\n\t" /* shuffle the values for the scatter */
+ "vscatter(%0, m0, v1:0.w).h = v2\n\t"
+ : : "r"(vtcm.vscatter16_32), "r"(region_len),
+ "r"(word_offsets), "r"(half_values)
+ : "m0", "v0", "v1", "v2", "memory");
sync_scatter(vtcm.vscatter16_32);
}
-/* scatter-acc the 16 bit elements with 32 bit offsets using intrinsics */
+/* scatter-accumulate the 16 bit elements with 32 bit offsets using HVX */
void vector_scatter_16_32_acc(void)
{
- HVX_VectorPair offsets;
- HVX_Vector values;
-
- /* get the word offsets in a vector pair */
- offsets = *(HVX_VectorPair *)word_offsets;
-
- /* these values need to be shuffled for the scatter */
- values = *(HVX_Vector *)half_values_acc;
- values = VSHUFF_H(values);
-
- VSCATTER_16_32_ACC(&vtcm.vscatter16_32, region_len, offsets, values);
+ asm ("m0 = %1\n\t"
+ "v0 = vmem(%2 + #0)\n\t"
+ "v1 = vmem(%2 + #1)\n\t"
+ "v2 = vmem(%3 + #0)\n\t" \
+ "v2.h = vshuff(v2.h)\n\t" /* shuffle the values for the scatter */
+ "vscatter(%0, m0, v1:0.w).h += v2\n\t"
+ : : "r"(vtcm.vscatter16_32), "r"(region_len),
+ "r"(word_offsets), "r"(half_values_acc)
+ : "m0", "v0", "v1", "v2", "memory");
sync_scatter(vtcm.vscatter16_32);
}
-/* masked scatter the 16 bit elements with 32 bit offsets using intrinsics */
+/* masked scatter the 16 bit elements with 32 bit offsets using HVX */
void vector_scatter_16_32_masked(void)
{
- HVX_VectorPair offsets;
- HVX_Vector values;
- HVX_Vector pred_reg;
-
- /* get the word offsets in a vector pair */
- offsets = *(HVX_VectorPair *)word_offsets;
-
- /* these values need to be shuffled for the scatter */
- values = *(HVX_Vector *)half_values_masked;
- values = VSHUFF_H(values);
-
- pred_reg = *(HVX_Vector *)half_predicates;
- pred_reg = VSHUFF_H(pred_reg);
- HVX_VectorPred preds = VAND_VAL(pred_reg, ~0);
-
- VSCATTER_16_32_MASKED(preds, &vtcm.vscatter16_32, region_len, offsets,
- values);
+ asm ("r1 = #-1\n\t"
+ "v0 = vmem(%0 + #0)\n\t"
+ "v0.h = vshuff(v0.h)\n\t" /* shuffle the predicates */
+ "q0 = vand(v0, r1)\n\t"
+ "m0 = %2\n\t"
+ "v0 = vmem(%3 + #0)\n\t"
+ "v1 = vmem(%3 + #1)\n\t"
+ "v2 = vmem(%4 + #0)\n\t" \
+ "v2.h = vshuff(v2.h)\n\t" /* shuffle the values for the scatter */
+ "if (q0) vscatter(%1, m0, v1:0.w).h = v2\n\t"
+ : : "r"(half_predicates), "r"(vtcm.vscatter16_32), "r"(region_len),
+ "r"(word_offsets), "r"(half_values_masked)
+ : "r1", "q0", "m0", "v0", "v1", "v2", "memory");
sync_scatter(vtcm.vscatter16_32);
}
-/* gather the elements from the scatter16 buffer */
+/* gather the elements from the scatter16 buffer using HVX */
void vector_gather_16(void)
{
- HVX_Vector *vgather = (HVX_Vector *)&vtcm.vgather16;
- HVX_Vector offsets = *(HVX_Vector *)half_offsets;
+ asm ("m0 = %1\n\t"
+ "v0 = vmem(%2 + #0)\n\t"
+ "{ vtmp.h = vgather(%0, m0, v0.h).h\n\t"
+ " vmem(%3 + #0) = vtmp.new }\n\t"
+ : : "r"(vtcm.vscatter16), "r"(region_len),
+ "r"(half_offsets), "r"(vtcm.vgather16)
+ : "m0", "v0", "memory");
- VGATHER_16(vgather, &vtcm.vscatter16, region_len, offsets);
-
- sync_gather(vgather);
+ sync_gather(vtcm.vgather16);
}
static unsigned short gather_16_masked_init(void)
@@ -427,31 +423,51 @@
return letter | (letter << 8);
}
+/* masked gather the elements from the scatter16 buffer using HVX */
void vector_gather_16_masked(void)
{
- HVX_Vector *vgather = (HVX_Vector *)&vtcm.vgather16;
- HVX_Vector offsets = *(HVX_Vector *)half_offsets;
- HVX_Vector pred_reg = *(HVX_Vector *)half_predicates;
- HVX_VectorPred preds = VAND_VAL(pred_reg, ~0);
+ unsigned short init = gather_16_masked_init();
- *vgather = VSPLAT_H(gather_16_masked_init());
- VGATHER_16_MASKED(vgather, preds, &vtcm.vscatter16, region_len, offsets);
+ asm ("v0.h = vsplat(%5)\n\t"
+ "vmem(%4 + #0) = v0\n\t" /* initialize the write area */
+ "r1 = #-1\n\t"
+ "v0 = vmem(%0 + #0)\n\t"
+ "q0 = vand(v0, r1)\n\t"
+ "m0 = %2\n\t"
+ "v0 = vmem(%3 + #0)\n\t"
+ "{ if (q0) vtmp.h = vgather(%1, m0, v0.h).h\n\t"
+ " vmem(%4 + #0) = vtmp.new }\n\t"
+ : : "r"(half_predicates), "r"(vtcm.vscatter16), "r"(region_len),
+ "r"(half_offsets), "r"(vtcm.vgather16), "r"(init)
+ : "r1", "q0", "m0", "v0", "memory");
- sync_gather(vgather);
+ sync_gather(vtcm.vgather16);
}
-/* gather the elements from the scatter32 buffer */
+/* gather the elements from the scatter32 buffer using HVX */
void vector_gather_32(void)
{
- HVX_Vector *vgatherlo = (HVX_Vector *)&vtcm.vgather32;
- HVX_Vector *vgatherhi =
- (HVX_Vector *)((int)&vtcm.vgather32 + (MATRIX_SIZE * 2));
- HVX_Vector offsetslo = *(HVX_Vector *)word_offsets;
- HVX_Vector offsetshi = *(HVX_Vector *)&word_offsets[MATRIX_SIZE / 2];
+ HVX_Vector *vgatherlo = (HVX_Vector *)vtcm.vgather32;
+ HVX_Vector *vgatherhi = (HVX_Vector *)&vtcm.vgather32[MATRIX_SIZE / 2];
+ HVX_Vector *offsetslo = (HVX_Vector *)word_offsets;
+ HVX_Vector *offsetshi = (HVX_Vector *)&word_offsets[MATRIX_SIZE / 2];
- VGATHER_32(vgatherlo, &vtcm.vscatter32, region_len, offsetslo);
- VGATHER_32(vgatherhi, &vtcm.vscatter32, region_len, offsetshi);
+ asm ("m0 = %1\n\t"
+ "v0 = vmem(%2 + #0)\n\t"
+ "{ vtmp.w = vgather(%0, m0, v0.w).w\n\t"
+ " vmem(%3 + #0) = vtmp.new }\n\t"
+ : : "r"(vtcm.vscatter32), "r"(region_len),
+ "r"(offsetslo), "r"(vgatherlo)
+ : "m0", "v0", "memory");
+ asm ("m0 = %1\n\t"
+ "v0 = vmem(%2 + #0)\n\t"
+ "{ vtmp.w = vgather(%0, m0, v0.w).w\n\t"
+ " vmem(%3 + #0) = vtmp.new }\n\t"
+ : : "r"(vtcm.vscatter32), "r"(region_len),
+ "r"(offsetshi), "r"(vgatherhi)
+ : "m0", "v0", "memory");
+ sync_gather(vgatherlo);
sync_gather(vgatherhi);
}
@@ -461,79 +477,88 @@
return letter | (letter << 8) | (letter << 16) | (letter << 24);
}
+/* masked gather the elements from the scatter32 buffer using HVX */
void vector_gather_32_masked(void)
{
- HVX_Vector *vgatherlo = (HVX_Vector *)&vtcm.vgather32;
- HVX_Vector *vgatherhi =
- (HVX_Vector *)((int)&vtcm.vgather32 + (MATRIX_SIZE * 2));
- HVX_Vector offsetslo = *(HVX_Vector *)word_offsets;
- HVX_Vector offsetshi = *(HVX_Vector *)&word_offsets[MATRIX_SIZE / 2];
- HVX_Vector pred_reglo = *(HVX_Vector *)word_predicates;
- HVX_VectorPred predslo = VAND_VAL(pred_reglo, ~0);
- HVX_Vector pred_reghi = *(HVX_Vector *)&word_predicates[MATRIX_SIZE / 2];
- HVX_VectorPred predshi = VAND_VAL(pred_reghi, ~0);
+ unsigned int init = gather_32_masked_init();
+ HVX_Vector *vgatherlo = (HVX_Vector *)vtcm.vgather32;
+ HVX_Vector *vgatherhi = (HVX_Vector *)&vtcm.vgather32[MATRIX_SIZE / 2];
+ HVX_Vector *offsetslo = (HVX_Vector *)word_offsets;
+ HVX_Vector *offsetshi = (HVX_Vector *)&word_offsets[MATRIX_SIZE / 2];
+ HVX_Vector *predslo = (HVX_Vector *)word_predicates;
+ HVX_Vector *predshi = (HVX_Vector *)&word_predicates[MATRIX_SIZE / 2];
- *vgatherlo = VSPLAT_H(gather_32_masked_init());
- *vgatherhi = VSPLAT_H(gather_32_masked_init());
- VGATHER_32_MASKED(vgatherlo, predslo, &vtcm.vscatter32, region_len,
- offsetslo);
- VGATHER_32_MASKED(vgatherhi, predshi, &vtcm.vscatter32, region_len,
- offsetshi);
+ asm ("v0.h = vsplat(%5)\n\t"
+ "vmem(%4 + #0) = v0\n\t" /* initialize the write area */
+ "r1 = #-1\n\t"
+ "v0 = vmem(%0 + #0)\n\t"
+ "q0 = vand(v0, r1)\n\t"
+ "m0 = %2\n\t"
+ "v0 = vmem(%3 + #0)\n\t"
+ "{ if (q0) vtmp.w = vgather(%1, m0, v0.w).w\n\t"
+ " vmem(%4 + #0) = vtmp.new }\n\t"
+ : : "r"(predslo), "r"(vtcm.vscatter32), "r"(region_len),
+ "r"(offsetslo), "r"(vgatherlo), "r"(init)
+ : "r1", "q0", "m0", "v0", "memory");
+ asm ("v0.h = vsplat(%5)\n\t"
+ "vmem(%4 + #0) = v0\n\t" /* initialize the write area */
+ "r1 = #-1\n\t"
+ "v0 = vmem(%0 + #0)\n\t"
+ "q0 = vand(v0, r1)\n\t"
+ "m0 = %2\n\t"
+ "v0 = vmem(%3 + #0)\n\t"
+ "{ if (q0) vtmp.w = vgather(%1, m0, v0.w).w\n\t"
+ " vmem(%4 + #0) = vtmp.new }\n\t"
+ : : "r"(predshi), "r"(vtcm.vscatter32), "r"(region_len),
+ "r"(offsetshi), "r"(vgatherhi), "r"(init)
+ : "r1", "q0", "m0", "v0", "memory");
sync_gather(vgatherlo);
sync_gather(vgatherhi);
}
-/* gather the elements from the scatter16_32 buffer */
+/* gather the elements from the scatter16_32 buffer using HVX */
void vector_gather_16_32(void)
{
- HVX_Vector *vgather;
- HVX_VectorPair offsets;
- HVX_Vector values;
+ asm ("m0 = %1\n\t"
+ "v0 = vmem(%2 + #0)\n\t"
+ "v1 = vmem(%2 + #1)\n\t"
+ "{ vtmp.h = vgather(%0, m0, v1:0.w).h\n\t"
+ " vmem(%3 + #0) = vtmp.new }\n\t"
+ "v0 = vmem(%3 + #0)\n\t"
+ "v0.h = vdeal(v0.h)\n\t" /* deal the elements to get the order back */
+ "vmem(%3 + #0) = v0\n\t"
+ : : "r"(vtcm.vscatter16_32), "r"(region_len),
+ "r"(word_offsets), "r"(vtcm.vgather16_32)
+ : "m0", "v0", "v1", "memory");
- /* get the vtcm address to gather from */
- vgather = (HVX_Vector *)&vtcm.vgather16_32;
-
- /* get the word offsets in a vector pair */
- offsets = *(HVX_VectorPair *)word_offsets;
-
- VGATHER_16_32(vgather, &vtcm.vscatter16_32, region_len, offsets);
-
- /* deal the elements to get the order back */
- values = *(HVX_Vector *)vgather;
- values = VDEAL_H(values);
-
- /* write it back to vtcm address */
- *(HVX_Vector *)vgather = values;
+ sync_gather(vtcm.vgather16_32);
}
+/* masked gather the elements from the scatter16_32 buffer using HVX */
void vector_gather_16_32_masked(void)
{
- HVX_Vector *vgather;
- HVX_VectorPair offsets;
- HVX_Vector pred_reg;
- HVX_VectorPred preds;
- HVX_Vector values;
+ unsigned short init = gather_16_masked_init();
- /* get the vtcm address to gather from */
- vgather = (HVX_Vector *)&vtcm.vgather16_32;
+ asm ("v0.h = vsplat(%5)\n\t"
+ "vmem(%4 + #0) = v0\n\t" /* initialize the write area */
+ "r1 = #-1\n\t"
+ "v0 = vmem(%0 + #0)\n\t"
+ "v0.h = vshuff(v0.h)\n\t" /* shuffle the predicates */
+ "q0 = vand(v0, r1)\n\t"
+ "m0 = %2\n\t"
+ "v0 = vmem(%3 + #0)\n\t"
+ "v1 = vmem(%3 + #1)\n\t"
+ "{ if (q0) vtmp.h = vgather(%1, m0, v1:0.w).h\n\t"
+ " vmem(%4 + #0) = vtmp.new }\n\t"
+ "v0 = vmem(%4 + #0)\n\t"
+ "v0.h = vdeal(v0.h)\n\t" /* deal the elements to get the order back */
+ "vmem(%4 + #0) = v0\n\t"
+ : : "r"(half_predicates), "r"(vtcm.vscatter16_32), "r"(region_len),
+ "r"(word_offsets), "r"(vtcm.vgather16_32), "r"(init)
+ : "r1", "q0", "m0", "v0", "v1", "memory");
- /* get the word offsets in a vector pair */
- offsets = *(HVX_VectorPair *)word_offsets;
- pred_reg = *(HVX_Vector *)half_predicates;
- pred_reg = VSHUFF_H(pred_reg);
- preds = VAND_VAL(pred_reg, ~0);
-
- *vgather = VSPLAT_H(gather_16_masked_init());
- VGATHER_16_32_MASKED(vgather, preds, &vtcm.vscatter16_32, region_len,
- offsets);
-
- /* deal the elements to get the order back */
- values = *(HVX_Vector *)vgather;
- values = VDEAL_H(values);
-
- /* write it back to vtcm address */
- *(HVX_Vector *)vgather = values;
+ sync_gather(vtcm.vgather16_32);
}
static void check_buffer(const char *name, void *c, void *r, size_t size)
@@ -579,6 +604,7 @@
}
}
+/* scatter-accumulate the 16 bit elements using C */
void check_scatter_16_acc()
{
memset(vscatter16_ref, FILL_CHAR,
@@ -589,7 +615,7 @@
SCATTER_BUFFER_SIZE * sizeof(unsigned short));
}
-/* scatter the 16 bit elements using C */
+/* masked scatter the 16 bit elements using C */
void scalar_scatter_16_masked(unsigned short *vscatter16)
{
for (int i = 0; i < MATRIX_SIZE; i++) {
@@ -628,7 +654,7 @@
SCATTER_BUFFER_SIZE * sizeof(unsigned int));
}
-/* scatter the 32 bit elements using C */
+/* scatter-accumulate the 32 bit elements using C */
void scalar_scatter_32_acc(unsigned int *vscatter32)
{
for (int i = 0; i < MATRIX_SIZE; ++i) {
@@ -646,7 +672,7 @@
SCATTER_BUFFER_SIZE * sizeof(unsigned int));
}
-/* scatter the 32 bit elements using C */
+/* masked scatter the 32 bit elements using C */
void scalar_scatter_32_masked(unsigned int *vscatter32)
{
for (int i = 0; i < MATRIX_SIZE; i++) {
@@ -667,7 +693,7 @@
SCATTER_BUFFER_SIZE * sizeof(unsigned int));
}
-/* scatter the 32 bit elements using C */
+/* scatter the 16 bit elements with 32 bit offsets using C */
void scalar_scatter_16_32(unsigned short *vscatter16_32)
{
for (int i = 0; i < MATRIX_SIZE; ++i) {
@@ -684,7 +710,7 @@
SCATTER_BUFFER_SIZE * sizeof(unsigned short));
}
-/* scatter the 32 bit elements using C */
+/* scatter-accumulate the 16 bit elements with 32 bit offsets using C */
void scalar_scatter_16_32_acc(unsigned short *vscatter16_32)
{
for (int i = 0; i < MATRIX_SIZE; ++i) {
@@ -702,6 +728,7 @@
SCATTER_BUFFER_SIZE * sizeof(unsigned short));
}
+/* masked scatter the 16 bit elements with 32 bit offsets using C */
void scalar_scatter_16_32_masked(unsigned short *vscatter16_32)
{
for (int i = 0; i < MATRIX_SIZE; i++) {
@@ -738,6 +765,7 @@
MATRIX_SIZE * sizeof(unsigned short));
}
+/* masked gather the elements from the scatter buffer using C */
void scalar_gather_16_masked(unsigned short *vgather16)
{
for (int i = 0; i < MATRIX_SIZE; ++i) {
@@ -756,7 +784,7 @@
MATRIX_SIZE * sizeof(unsigned short));
}
-/* gather the elements from the scatter buffer using C */
+/* gather the elements from the scatter32 buffer using C */
void scalar_gather_32(unsigned int *vgather32)
{
for (int i = 0; i < MATRIX_SIZE; ++i) {
@@ -772,6 +800,7 @@
MATRIX_SIZE * sizeof(unsigned int));
}
+/* masked gather the elements from the scatter32 buffer using C */
void scalar_gather_32_masked(unsigned int *vgather32)
{
for (int i = 0; i < MATRIX_SIZE; ++i) {
@@ -781,7 +810,6 @@
}
}
-
void check_gather_32_masked(void)
{
memset(vgather32_ref, gather_32_masked_init(),
@@ -791,7 +819,7 @@
vgather32_ref, MATRIX_SIZE * sizeof(unsigned int));
}
-/* gather the elements from the scatter buffer using C */
+/* gather the elements from the scatter16_32 buffer using C */
void scalar_gather_16_32(unsigned short *vgather16_32)
{
for (int i = 0; i < MATRIX_SIZE; ++i) {
@@ -807,6 +835,7 @@
MATRIX_SIZE * sizeof(unsigned short));
}
+/* masked gather the elements from the scatter16_32 buffer using C */
void scalar_gather_16_32_masked(unsigned short *vgather16_32)
{
for (int i = 0; i < MATRIX_SIZE; ++i) {
diff --git a/tests/tcg/multiarch/Makefile.target b/tests/tcg/multiarch/Makefile.target
index ae8b3d7..373db69 100644
--- a/tests/tcg/multiarch/Makefile.target
+++ b/tests/tcg/multiarch/Makefile.target
@@ -64,6 +64,7 @@
$(call run-test, test-mmap-$*, $(QEMU) -p $* $<, $< ($* byte pages))
ifneq ($(HAVE_GDB_BIN),)
+ifeq ($(HOST_GDB_SUPPORTS_ARCH),y)
GDB_SCRIPT=$(SRC_PATH)/tests/guest-debug/run-test.py
run-gdbstub-sha1: sha1
@@ -89,6 +90,10 @@
else
run-gdbstub-%:
+ $(call skip-test, "gdbstub test $*", "no guest arch support")
+endif
+else
+run-gdbstub-%:
$(call skip-test, "gdbstub test $*", "need working gdb")
endif
EXTRA_RUNS += run-gdbstub-sha1 run-gdbstub-qxfer-auxv-read \
diff --git a/tests/tcg/multiarch/system/Makefile.softmmu-target b/tests/tcg/multiarch/system/Makefile.softmmu-target
index 368b64d..5f432c9 100644
--- a/tests/tcg/multiarch/system/Makefile.softmmu-target
+++ b/tests/tcg/multiarch/system/Makefile.softmmu-target
@@ -15,6 +15,7 @@
MULTIARCH_TESTS = $(patsubst $(MULTIARCH_SYSTEM_SRC)/%.c, %, $(MULTIARCH_TEST_SRCS))
ifneq ($(HAVE_GDB_BIN),)
+ifeq ($(HOST_GDB_SUPPORTS_ARCH),y)
GDB_SCRIPT=$(SRC_PATH)/tests/guest-debug/run-test.py
run-gdbstub-memory: memory
@@ -26,7 +27,10 @@
"-monitor none -display none -chardev file$(COMMA)path=$<.out$(COMMA)id=output $(QEMU_OPTS)" \
--bin $< --test $(MULTIARCH_SRC)/gdbstub/memory.py, \
softmmu gdbstub support)
-
+else
+run-gdbstub-%:
+ $(call skip-test, "gdbstub test $*", "no guest arch support")
+endif
else
run-gdbstub-%:
$(call skip-test, "gdbstub test $*", "need working gdb")
diff --git a/tests/tcg/s390x/Makefile.softmmu-target b/tests/tcg/s390x/Makefile.softmmu-target
index 725b6c5..3e7f72a 100644
--- a/tests/tcg/s390x/Makefile.softmmu-target
+++ b/tests/tcg/s390x/Makefile.softmmu-target
@@ -1,11 +1,25 @@
S390X_SRC=$(SRC_PATH)/tests/tcg/s390x
VPATH+=$(S390X_SRC)
QEMU_OPTS=-action panic=exit-failure -kernel
+LINK_SCRIPT=$(S390X_SRC)/softmmu.ld
+LDFLAGS=-nostdlib -static -Wl,-T$(LINK_SCRIPT) -Wl,--build-id=none
-%: %.S
- $(CC) -march=z13 -m64 -nostdlib -static -Wl,-Ttext=0 \
- -Wl,--build-id=none $< -o $@
+%.o: %.S
+ $(CC) -march=z13 -m64 -c $< -o $@
+
+%: %.o $(LINK_SCRIPT)
+ $(CC) $< -o $@ $(LDFLAGS)
TESTS += unaligned-lowcore
TESTS += bal
TESTS += sam
+TESTS += lpsw
+TESTS += lpswe-early
+TESTS += ssm-early
+TESTS += stosm-early
+TESTS += exrl-ssm-early
+
+include $(S390X_SRC)/pgm-specification.mak
+$(PGM_SPECIFICATION_TESTS): pgm-specification-softmmu.o
+$(PGM_SPECIFICATION_TESTS): LDFLAGS+=pgm-specification-softmmu.o
+TESTS += $(PGM_SPECIFICATION_TESTS)
diff --git a/tests/tcg/s390x/Makefile.target b/tests/tcg/s390x/Makefile.target
index 72ad309..0031868 100644
--- a/tests/tcg/s390x/Makefile.target
+++ b/tests/tcg/s390x/Makefile.target
@@ -2,6 +2,9 @@
VPATH+=$(S390X_SRC)
CFLAGS+=-march=zEC12 -m64
+%.o: %.c
+ $(CC) $(CFLAGS) $(EXTRA_CFLAGS) -c $< -o $@
+
config-cc.mak: Makefile
$(quiet-@)( \
$(call cc-option,-march=z14, CROSS_CC_HAS_Z14); \
@@ -28,10 +31,20 @@
TESTS+=clst
TESTS+=long-double
TESTS+=cdsg
+TESTS+=chrl
+TESTS+=rxsbg
+TESTS+=ex-relative-long
cdsg: CFLAGS+=-pthread
cdsg: LDFLAGS+=-pthread
+rxsbg: CFLAGS+=-O2
+
+include $(S390X_SRC)/pgm-specification.mak
+$(PGM_SPECIFICATION_TESTS): pgm-specification-user.o
+$(PGM_SPECIFICATION_TESTS): LDFLAGS+=pgm-specification-user.o
+TESTS += $(PGM_SPECIFICATION_TESTS)
+
Z13_TESTS=vistr
$(Z13_TESTS): CFLAGS+=-march=z13 -O2
TESTS+=$(Z13_TESTS)
@@ -51,7 +64,7 @@
TESTS+=$(Z15_TESTS)
endif
-ifneq ($(HAVE_GDB_BIN),)
+ifeq ($(HOST_GDB_SUPPORTS_ARCH),y)
GDB_SCRIPT=$(SRC_PATH)/tests/guest-debug/run-test.py
run-gdbstub-signals-s390x: signals-s390x
diff --git a/tests/tcg/s390x/br-odd.S b/tests/tcg/s390x/br-odd.S
new file mode 100644
index 0000000..2fae47a
--- /dev/null
+++ b/tests/tcg/s390x/br-odd.S
@@ -0,0 +1,16 @@
+/*
+ * Test BRanching to a non-mapped odd address.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+ .globl test
+test:
+ lgrl %r1,odd_addr
+ br %r1
+
+ .align 8
+odd_addr:
+ .quad 0xDDDDDDDDDDDDDDDD
+ .globl expected_old_psw
+expected_old_psw:
+ .quad 0x180000000,0xDDDDDDDDDDDDDDDD
diff --git a/tests/tcg/s390x/cgrl-unaligned.S b/tests/tcg/s390x/cgrl-unaligned.S
new file mode 100644
index 0000000..164d68f
--- /dev/null
+++ b/tests/tcg/s390x/cgrl-unaligned.S
@@ -0,0 +1,16 @@
+/*
+ * Test CGRL with a non-doubleword aligned address.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+ .globl test
+test:
+ cgrl %r1,unaligned
+
+ .align 8
+ .globl expected_old_psw
+expected_old_psw:
+ .quad 0x180000000,test
+ .long 0
+unaligned:
+ .quad 0
diff --git a/tests/tcg/s390x/chrl.c b/tests/tcg/s390x/chrl.c
new file mode 100644
index 0000000..b1c3a1c
--- /dev/null
+++ b/tests/tcg/s390x/chrl.c
@@ -0,0 +1,80 @@
+#include <stdlib.h>
+#include <assert.h>
+#include <stdint.h>
+
+static void test_chrl(void)
+{
+ uint32_t program_mask, cc;
+
+ asm volatile (
+ ".pushsection .rodata\n"
+ "0:\n\t"
+ ".short 1, 0x8000\n\t"
+ ".popsection\n\t"
+
+ "chrl %[r], 0b\n\t"
+ "ipm %[program_mask]\n"
+ : [program_mask] "=r" (program_mask)
+ : [r] "r" (1)
+ );
+
+ cc = program_mask >> 28;
+ assert(!cc);
+
+ asm volatile (
+ ".pushsection .rodata\n"
+ "0:\n\t"
+ ".short -1, 0x8000\n\t"
+ ".popsection\n\t"
+
+ "chrl %[r], 0b\n\t"
+ "ipm %[program_mask]\n"
+ : [program_mask] "=r" (program_mask)
+ : [r] "r" (-1)
+ );
+
+ cc = program_mask >> 28;
+ assert(!cc);
+}
+
+static void test_cghrl(void)
+{
+ uint32_t program_mask, cc;
+
+ asm volatile (
+ ".pushsection .rodata\n"
+ "0:\n\t"
+ ".short 1, 0x8000, 0, 0\n\t"
+ ".popsection\n\t"
+
+ "cghrl %[r], 0b\n\t"
+ "ipm %[program_mask]\n"
+ : [program_mask] "=r" (program_mask)
+ : [r] "r" (1L)
+ );
+
+ cc = program_mask >> 28;
+ assert(!cc);
+
+ asm volatile (
+ ".pushsection .rodata\n"
+ "0:\n\t"
+ ".short -1, 0x8000, 0, 0\n\t"
+ ".popsection\n\t"
+
+ "cghrl %[r], 0b\n\t"
+ "ipm %[program_mask]\n"
+ : [program_mask] "=r" (program_mask)
+ : [r] "r" (-1L)
+ );
+
+ cc = program_mask >> 28;
+ assert(!cc);
+}
+
+int main(void)
+{
+ test_chrl();
+ test_cghrl();
+ return EXIT_SUCCESS;
+}
diff --git a/tests/tcg/s390x/clrl-unaligned.S b/tests/tcg/s390x/clrl-unaligned.S
new file mode 100644
index 0000000..182b1b6
--- /dev/null
+++ b/tests/tcg/s390x/clrl-unaligned.S
@@ -0,0 +1,16 @@
+/*
+ * Test CLRL with a non-word aligned address.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+ .globl test
+test:
+ clrl %r1,unaligned
+
+ .align 8
+ .globl expected_old_psw
+expected_old_psw:
+ .quad 0x180000000,test
+ .short 0
+unaligned:
+ .long 0
diff --git a/tests/tcg/s390x/crl-unaligned.S b/tests/tcg/s390x/crl-unaligned.S
new file mode 100644
index 0000000..b86fbe0
--- /dev/null
+++ b/tests/tcg/s390x/crl-unaligned.S
@@ -0,0 +1,16 @@
+/*
+ * Test CRL with a non-word aligned address.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+ .globl test
+test:
+ crl %r1,unaligned
+
+ .align 8
+ .globl expected_old_psw
+expected_old_psw:
+ .quad 0x180000000,test
+ .short 0
+unaligned:
+ .long 0
diff --git a/tests/tcg/s390x/ex-odd.S b/tests/tcg/s390x/ex-odd.S
new file mode 100644
index 0000000..4e42a47
--- /dev/null
+++ b/tests/tcg/s390x/ex-odd.S
@@ -0,0 +1,17 @@
+/*
+ * Test EXECUTing a non-mapped odd address.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+ .globl test
+test:
+ lgrl %r1,odd_addr
+fail:
+ ex 0,0(%r1)
+
+ .align 8
+odd_addr:
+ .quad 0xDDDDDDDDDDDDDDDD
+ .globl expected_old_psw
+expected_old_psw:
+ .quad 0x180000000,fail
diff --git a/tests/tcg/s390x/ex-relative-long.c b/tests/tcg/s390x/ex-relative-long.c
new file mode 100644
index 0000000..21fbef6
--- /dev/null
+++ b/tests/tcg/s390x/ex-relative-long.c
@@ -0,0 +1,156 @@
+/* Check EXECUTE with relative long instructions as targets. */
+#include <stdlib.h>
+#include <stdio.h>
+
+struct test {
+ const char *name;
+ long (*func)(long reg, long *cc);
+ long exp_reg;
+ long exp_mem;
+ long exp_cc;
+};
+
+/*
+ * Each test sets the MEM_IDXth element of the mem array to MEM and uses a
+ * single relative long instruction on it. The other elements remain zero.
+ * This is in order to prevent stumbling upon MEM in random memory in case
+ * there is an off-by-a-small-value bug.
+ *
+ * Note that while gcc supports the ZL constraint for relative long operands,
+ * clang doesn't, so the assembly code accesses mem[MEM_IDX] using MEM_ASM.
+ */
+static long mem[0x1000];
+#define MEM_IDX 0x800
+#define MEM_ASM "mem+0x800*8"
+
+/* Initial %r2 value. */
+#define REG 0x1234567887654321
+
+/* Initial mem[MEM_IDX] value. */
+#define MEM 0xfedcba9889abcdef
+
+/* Initial cc value. */
+#define CC 0
+
+/* Relative long instructions and their expected effects. */
+#define FOR_EACH_INSN(F) \
+ F(cgfrl, REG, MEM, 2) \
+ F(cghrl, REG, MEM, 2) \
+ F(cgrl, REG, MEM, 2) \
+ F(chrl, REG, MEM, 1) \
+ F(clgfrl, REG, MEM, 2) \
+ F(clghrl, REG, MEM, 2) \
+ F(clgrl, REG, MEM, 1) \
+ F(clhrl, REG, MEM, 2) \
+ F(clrl, REG, MEM, 1) \
+ F(crl, REG, MEM, 1) \
+ F(larl, (long)&mem[MEM_IDX], MEM, CC) \
+ F(lgfrl, 0xfffffffffedcba98, MEM, CC) \
+ F(lghrl, 0xfffffffffffffedc, MEM, CC) \
+ F(lgrl, MEM, MEM, CC) \
+ F(lhrl, 0x12345678fffffedc, MEM, CC) \
+ F(llghrl, 0x000000000000fedc, MEM, CC) \
+ F(llhrl, 0x123456780000fedc, MEM, CC) \
+ F(lrl, 0x12345678fedcba98, MEM, CC) \
+ F(stgrl, REG, REG, CC) \
+ F(sthrl, REG, 0x4321ba9889abcdef, CC) \
+ F(strl, REG, 0x8765432189abcdef, CC)
+
+/* Test functions. */
+#define DEFINE_EX_TEST(insn, exp_reg, exp_mem, exp_cc) \
+ static long test_ex_ ## insn(long reg, long *cc) \
+ { \
+ register long r2 asm("r2"); \
+ char mask = 0x20; /* make target use %r2 */ \
+ long pm, target; \
+ \
+ r2 = reg; \
+ asm("larl %[target],0f\n" \
+ "cr %%r0,%%r0\n" /* initial cc */ \
+ "ex %[mask],0(%[target])\n" \
+ "jg 1f\n" \
+ "0: " #insn " %%r0," MEM_ASM "\n" \
+ "1: ipm %[pm]\n" \
+ : [target] "=&a" (target), [r2] "+r" (r2), [pm] "=r" (pm) \
+ : [mask] "a" (mask) \
+ : "cc", "memory"); \
+ reg = r2; \
+ *cc = (pm >> 28) & 3; \
+ \
+ return reg; \
+ }
+
+#define DEFINE_EXRL_TEST(insn, exp_reg, exp_mem, exp_cc) \
+ static long test_exrl_ ## insn(long reg, long *cc) \
+ { \
+ register long r2 asm("r2"); \
+ char mask = 0x20; /* make target use %r2 */ \
+ long pm; \
+ \
+ r2 = reg; \
+ asm("cr %%r0,%%r0\n" /* initial cc */ \
+ "exrl %[mask],0f\n" \
+ "jg 1f\n" \
+ "0: " #insn " %%r0," MEM_ASM "\n" \
+ "1: ipm %[pm]\n" \
+ : [r2] "+r" (r2), [pm] "=r" (pm) \
+ : [mask] "a" (mask) \
+ : "cc", "memory"); \
+ reg = r2; \
+ *cc = (pm >> 28) & 3; \
+ \
+ return reg; \
+ }
+
+FOR_EACH_INSN(DEFINE_EX_TEST)
+FOR_EACH_INSN(DEFINE_EXRL_TEST)
+
+/* Test definitions. */
+#define REGISTER_EX_EXRL_TEST(ex_insn, insn, _exp_reg, _exp_mem, _exp_cc) \
+ { \
+ .name = #ex_insn " " #insn, \
+ .func = test_ ## ex_insn ## _ ## insn, \
+ .exp_reg = (_exp_reg), \
+ .exp_mem = (_exp_mem), \
+ .exp_cc = (_exp_cc), \
+ },
+
+#define REGISTER_EX_TEST(insn, exp_reg, exp_mem, exp_cc) \
+ REGISTER_EX_EXRL_TEST(ex, insn, exp_reg, exp_mem, exp_cc)
+
+#define REGISTER_EXRL_TEST(insn, exp_reg, exp_mem, exp_cc) \
+ REGISTER_EX_EXRL_TEST(exrl, insn, exp_reg, exp_mem, exp_cc)
+
+static const struct test tests[] = {
+ FOR_EACH_INSN(REGISTER_EX_TEST)
+ FOR_EACH_INSN(REGISTER_EXRL_TEST)
+};
+
+/* Loop over all tests and run them. */
+int main(void)
+{
+ const struct test *test;
+ int ret = EXIT_SUCCESS;
+ long reg, cc;
+ size_t i;
+
+ for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
+ test = &tests[i];
+ mem[MEM_IDX] = MEM;
+ cc = -1;
+ reg = test->func(REG, &cc);
+#define ASSERT_EQ(expected, actual) do { \
+ if (expected != actual) { \
+ fprintf(stderr, "%s: " #expected " (0x%lx) != " #actual " (0x%lx)\n", \
+ test->name, expected, actual); \
+ ret = EXIT_FAILURE; \
+ } \
+} while (0)
+ ASSERT_EQ(test->exp_reg, reg);
+ ASSERT_EQ(test->exp_mem, mem[MEM_IDX]);
+ ASSERT_EQ(test->exp_cc, cc);
+#undef ASSERT_EQ
+ }
+
+ return ret;
+}
diff --git a/tests/tcg/s390x/exrl-ssm-early.S b/tests/tcg/s390x/exrl-ssm-early.S
new file mode 100644
index 0000000..68fbd87
--- /dev/null
+++ b/tests/tcg/s390x/exrl-ssm-early.S
@@ -0,0 +1,43 @@
+/*
+ * Test early exception recognition using EXRL + SSM.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+ .org 0x8d
+ilc:
+ .org 0x8e
+program_interruption_code:
+ .org 0x150
+program_old_psw:
+ .org 0x1D0 /* program new PSW */
+ .quad 0,pgm
+ .org 0x200 /* lowcore padding */
+
+ .globl _start
+_start:
+ exrl %r0,ssm
+expected_pswa:
+ j failure
+ssm:
+ ssm ssm_op
+
+pgm:
+ chhsi program_interruption_code,0x6 /* specification exception? */
+ jne failure
+ cli ilc,6 /* ilc for EXRL? */
+ jne failure
+ clc program_old_psw(16),expected_old_psw /* correct old PSW? */
+ jne failure
+ lpswe success_psw
+failure:
+ lpswe failure_psw
+
+ssm_op:
+ .byte 0x08 /* bit 4 set */
+ .align 8
+expected_old_psw:
+ .quad 0x0800000180000000,expected_pswa /* bit 2 set */
+success_psw:
+ .quad 0x2000000000000,0xfff /* see is_special_wait_psw() */
+failure_psw:
+ .quad 0x2000000000000,0 /* disabled wait */
diff --git a/tests/tcg/s390x/lgrl-unaligned.S b/tests/tcg/s390x/lgrl-unaligned.S
new file mode 100644
index 0000000..ef8d51d
--- /dev/null
+++ b/tests/tcg/s390x/lgrl-unaligned.S
@@ -0,0 +1,16 @@
+/*
+ * Test LGRL from a non-doubleword aligned address.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+ .globl test
+test:
+ lgrl %r1,unaligned
+
+ .align 8
+ .globl expected_old_psw
+expected_old_psw:
+ .quad 0x180000000,test
+ .long 0
+unaligned:
+ .quad 0
diff --git a/tests/tcg/s390x/llgfrl-unaligned.S b/tests/tcg/s390x/llgfrl-unaligned.S
new file mode 100644
index 0000000..c9b4eea
--- /dev/null
+++ b/tests/tcg/s390x/llgfrl-unaligned.S
@@ -0,0 +1,16 @@
+/*
+ * Test LLGFRL from a non-word aligned address.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+ .globl test
+test:
+ llgfrl %r1,unaligned
+
+ .align 8
+ .globl expected_old_psw
+expected_old_psw:
+ .quad 0x180000000,test
+ .short 0
+unaligned:
+ .long 0
diff --git a/tests/tcg/s390x/lpsw.S b/tests/tcg/s390x/lpsw.S
new file mode 100644
index 0000000..b37dec5
--- /dev/null
+++ b/tests/tcg/s390x/lpsw.S
@@ -0,0 +1,36 @@
+/*
+ * Test the LPSW instruction.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+ .org 0x140
+svc_old_psw:
+ .org 0x1c0 /* supervisor call new PSW */
+ .quad 0x80000000,svc /* 31-bit mode */
+ .org 0x200 /* lowcore padding */
+
+ .globl _start
+_start:
+ lpsw short_psw
+lpsw_target:
+ svc 0
+expected_pswa:
+ j failure
+
+svc:
+ clc svc_old_psw(16),expected_psw /* correct full PSW? */
+ jne failure
+ lpswe success_psw
+failure:
+ lpswe failure_psw
+
+ .align 8
+short_psw:
+ .long 0x90001,0x80000000+lpsw_target /* problem state,
+ 64-bit mode */
+expected_psw:
+ .quad 0x1000180000000,expected_pswa /* corresponds to short_psw */
+success_psw:
+ .quad 0x2000000000000,0xfff /* see is_special_wait_psw() */
+failure_psw:
+ .quad 0x2000000000000,0 /* disabled wait */
diff --git a/tests/tcg/s390x/lpswe-early.S b/tests/tcg/s390x/lpswe-early.S
new file mode 100644
index 0000000..90a7f21
--- /dev/null
+++ b/tests/tcg/s390x/lpswe-early.S
@@ -0,0 +1,38 @@
+/*
+ * Test early exception recognition using LPSWE.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+ .org 0x8d
+ilc:
+ .org 0x8e
+program_interruption_code:
+ .org 0x150
+program_old_psw:
+ .org 0x1D0 /* program new PSW */
+ .quad 0,pgm
+ .org 0x200 /* lowcore padding */
+
+ .globl _start
+_start:
+ lpswe bad_psw
+ j failure
+
+pgm:
+ chhsi program_interruption_code,0x6 /* specification exception? */
+ jne failure
+ cli ilc,0 /* ilc zero? */
+ jne failure
+ clc program_old_psw(16),bad_psw /* correct old PSW? */
+ jne failure
+ lpswe success_psw
+failure:
+ lpswe failure_psw
+
+ .align 8
+bad_psw:
+ .quad 0x8000000000000000,0xfedcba9876543210 /* bit 0 set */
+success_psw:
+ .quad 0x2000000000000,0xfff /* see is_special_wait_psw() */
+failure_psw:
+ .quad 0x2000000000000,0 /* disabled wait */
diff --git a/tests/tcg/s390x/lpswe-unaligned.S b/tests/tcg/s390x/lpswe-unaligned.S
new file mode 100644
index 0000000..989f249
--- /dev/null
+++ b/tests/tcg/s390x/lpswe-unaligned.S
@@ -0,0 +1,18 @@
+/*
+ * Test LPSWE from a non-doubleword aligned address.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+ .globl test
+test:
+ larl %r1,unaligned
+fail:
+ lpswe 0(%r1)
+
+ .align 8
+ .globl expected_old_psw
+expected_old_psw:
+ .quad 0x180000000,fail
+ .long 0
+unaligned:
+ .quad 0
diff --git a/tests/tcg/s390x/lrl-unaligned.S b/tests/tcg/s390x/lrl-unaligned.S
new file mode 100644
index 0000000..11eb07f
--- /dev/null
+++ b/tests/tcg/s390x/lrl-unaligned.S
@@ -0,0 +1,16 @@
+/*
+ * Test LRL from a non-word aligned address.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+ .globl test
+test:
+ lrl %r1,unaligned
+
+ .align 8
+ .globl expected_old_psw
+expected_old_psw:
+ .quad 0x180000000,test
+ .short 0
+unaligned:
+ .long 0
diff --git a/tests/tcg/s390x/pgm-specification-softmmu.S b/tests/tcg/s390x/pgm-specification-softmmu.S
new file mode 100644
index 0000000..d534f4e
--- /dev/null
+++ b/tests/tcg/s390x/pgm-specification-softmmu.S
@@ -0,0 +1,40 @@
+/*
+ * Common softmmu code for specification exception testing.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+ .section .head
+ .org 0x8d
+ilc:
+ .org 0x8e
+program_interruption_code:
+ .org 0x150
+program_old_psw:
+ .org 0x1D0 /* program new PSW */
+ .quad 0x180000000,pgm /* 64-bit mode */
+ .org 0x200 /* lowcore padding */
+
+ .globl _start
+_start:
+ lpswe test_psw
+
+pgm:
+ chhsi program_interruption_code,0x6 /* PGM_SPECIFICATION? */
+ jne failure
+ lg %r0,expected_old_psw+8 /* ilc adjustment */
+ llgc %r1,ilc
+ agr %r0,%r1
+ stg %r0,expected_old_psw+8
+ clc expected_old_psw(16),program_old_psw /* correct location? */
+ jne failure
+ lpswe success_psw
+failure:
+ lpswe failure_psw
+
+ .align 8
+test_psw:
+ .quad 0x180000000,test /* 64-bit mode */
+success_psw:
+ .quad 0x2000180000000,0xfff /* see is_special_wait_psw() */
+failure_psw:
+ .quad 0x2000180000000,0 /* disabled wait */
diff --git a/tests/tcg/s390x/pgm-specification-user.c b/tests/tcg/s390x/pgm-specification-user.c
new file mode 100644
index 0000000..9ee6907
--- /dev/null
+++ b/tests/tcg/s390x/pgm-specification-user.c
@@ -0,0 +1,37 @@
+/*
+ * Common user code for specification exception testing.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#include <assert.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+extern void test(void);
+extern long expected_old_psw[2];
+
+static void handle_sigill(int sig, siginfo_t *info, void *ucontext)
+{
+ if ((long)info->si_addr != expected_old_psw[1]) {
+ _exit(EXIT_FAILURE);
+ }
+ _exit(EXIT_SUCCESS);
+}
+
+int main(void)
+{
+ struct sigaction act;
+ int err;
+
+ memset(&act, 0, sizeof(act));
+ act.sa_sigaction = handle_sigill;
+ act.sa_flags = SA_SIGINFO;
+ err = sigaction(SIGILL, &act, NULL);
+ assert(err == 0);
+
+ test();
+
+ return EXIT_FAILURE;
+}
diff --git a/tests/tcg/s390x/pgm-specification.mak b/tests/tcg/s390x/pgm-specification.mak
new file mode 100644
index 0000000..2999aee
--- /dev/null
+++ b/tests/tcg/s390x/pgm-specification.mak
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+# List of specification exception tests.
+# Shared between the softmmu and the user makefiles.
+PGM_SPECIFICATION_TESTS = \
+ br-odd \
+ cgrl-unaligned \
+ clrl-unaligned \
+ crl-unaligned \
+ ex-odd \
+ lgrl-unaligned \
+ llgfrl-unaligned \
+ lpswe-unaligned \
+ lrl-unaligned \
+ stgrl-unaligned \
+ strl-unaligned
diff --git a/tests/tcg/s390x/rxsbg.c b/tests/tcg/s390x/rxsbg.c
new file mode 100644
index 0000000..4b155db
--- /dev/null
+++ b/tests/tcg/s390x/rxsbg.c
@@ -0,0 +1,46 @@
+/*
+ * Test the RXSBG instruction.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#include <assert.h>
+#include <stdlib.h>
+
+static inline __attribute__((__always_inline__)) void
+rxsbg(unsigned long *r1, unsigned long r2, int i3, int i4, int i5, int *cc)
+{
+ asm("rxsbg %[r1],%[r2],%[i3],%[i4],%[i5]\n"
+ "ipm %[cc]"
+ : [r1] "+r" (*r1), [cc] "=r" (*cc)
+ : [r2] "r" (r2) , [i3] "i" (i3) , [i4] "i" (i4) , [i5] "i" (i5)
+ : "cc");
+ *cc = (*cc >> 28) & 3;
+}
+
+void test_cc0(void)
+{
+ unsigned long r1 = 6;
+ int cc;
+
+ rxsbg(&r1, 3, 61 | 0x80, 62, 1, &cc);
+ assert(r1 == 6);
+ assert(cc == 0);
+}
+
+void test_cc1(void)
+{
+ unsigned long r1 = 2;
+ int cc;
+
+ rxsbg(&r1, 3, 61 | 0x80, 62, 1, &cc);
+ assert(r1 == 2);
+ assert(cc == 1);
+}
+
+int main(void)
+{
+ test_cc0();
+ test_cc1();
+
+ return EXIT_SUCCESS;
+}
diff --git a/tests/tcg/s390x/softmmu.ld b/tests/tcg/s390x/softmmu.ld
new file mode 100644
index 0000000..ea944ea
--- /dev/null
+++ b/tests/tcg/s390x/softmmu.ld
@@ -0,0 +1,20 @@
+/*
+ * Linker script for the softmmu test kernels.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+ENTRY(_start)
+
+SECTIONS {
+ . = 0;
+
+ .text : {
+ *(.head)
+ *(.text)
+ }
+
+ /DISCARD/ : {
+ *(*)
+ }
+}
diff --git a/tests/tcg/s390x/ssm-early.S b/tests/tcg/s390x/ssm-early.S
new file mode 100644
index 0000000..6dfe40c
--- /dev/null
+++ b/tests/tcg/s390x/ssm-early.S
@@ -0,0 +1,41 @@
+/*
+ * Test early exception recognition using SSM.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+ .org 0x8d
+ilc:
+ .org 0x8e
+program_interruption_code:
+ .org 0x150
+program_old_psw:
+ .org 0x1D0 /* program new PSW */
+ .quad 0,pgm
+ .org 0x200 /* lowcore padding */
+
+ .globl _start
+_start:
+ ssm ssm_op
+expected_pswa:
+ j failure
+
+pgm:
+ chhsi program_interruption_code,0x6 /* specification exception? */
+ jne failure
+ cli ilc,4 /* ilc for SSM? */
+ jne failure
+ clc program_old_psw(16),expected_old_psw /* correct old PSW? */
+ jne failure
+ lpswe success_psw
+failure:
+ lpswe failure_psw
+
+ssm_op:
+ .byte 0x20 /* bit 2 set */
+ .align 8
+expected_old_psw:
+ .quad 0x2000000180000000,expected_pswa /* bit 2 set */
+success_psw:
+ .quad 0x2000000000000,0xfff /* see is_special_wait_psw() */
+failure_psw:
+ .quad 0x2000000000000,0 /* disabled wait */
diff --git a/tests/tcg/s390x/stgrl-unaligned.S b/tests/tcg/s390x/stgrl-unaligned.S
new file mode 100644
index 0000000..32df377
--- /dev/null
+++ b/tests/tcg/s390x/stgrl-unaligned.S
@@ -0,0 +1,16 @@
+/*
+ * Test STGRL to a non-doubleword aligned address.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+ .globl test
+test:
+ stgrl %r1,unaligned
+
+ .align 8
+ .globl expected_old_psw
+expected_old_psw:
+ .quad 0x180000000,test
+ .long 0
+unaligned:
+ .quad 0
diff --git a/tests/tcg/s390x/stosm-early.S b/tests/tcg/s390x/stosm-early.S
new file mode 100644
index 0000000..0689924
--- /dev/null
+++ b/tests/tcg/s390x/stosm-early.S
@@ -0,0 +1,41 @@
+/*
+ * Test early exception recognition using STOSM.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+ .org 0x8d
+ilc:
+ .org 0x8e
+program_interruption_code:
+ .org 0x150
+program_old_psw:
+ .org 0x1D0 /* program new PSW */
+ .quad 0,pgm
+ .org 0x200 /* lowcore padding */
+
+ .globl _start
+_start:
+ stosm ssm_op,0x10 /* bit 3 set */
+expected_pswa:
+ j failure
+
+pgm:
+ chhsi program_interruption_code,0x6 /* specification exception? */
+ jne failure
+ cli ilc,4 /* ilc for STOSM? */
+ jne failure
+ clc program_old_psw(16),expected_old_psw /* correct old PSW? */
+ jne failure
+ lpswe success_psw
+failure:
+ lpswe failure_psw
+
+ssm_op:
+ .byte 0
+ .align 8
+expected_old_psw:
+ .quad 0x1000000180000000,expected_pswa /* bit 3 set */
+success_psw:
+ .quad 0x2000000000000,0xfff /* see is_special_wait_psw() */
+failure_psw:
+ .quad 0x2000000000000,0 /* disabled wait */
diff --git a/tests/tcg/s390x/strl-unaligned.S b/tests/tcg/s390x/strl-unaligned.S
new file mode 100644
index 0000000..1d24881
--- /dev/null
+++ b/tests/tcg/s390x/strl-unaligned.S
@@ -0,0 +1,16 @@
+/*
+ * Test STRL to a non-word aligned address.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+ .globl test
+test:
+ strl %r1,unaligned
+
+ .align 8
+ .globl expected_old_psw
+expected_old_psw:
+ .quad 0x180000000,test
+ .short 0
+unaligned:
+ .long 0
diff --git a/tests/tcg/xtensa/Makefile.softmmu-target b/tests/tcg/xtensa/Makefile.softmmu-target
index 973e552..ba6cd9f 100644
--- a/tests/tcg/xtensa/Makefile.softmmu-target
+++ b/tests/tcg/xtensa/Makefile.softmmu-target
@@ -2,7 +2,8 @@
# Xtensa softmmu tests
#
-ifneq ($(TARGET_BIG_ENDIAN),y)
+CORE=dc232b
+ifneq ($(shell $(QEMU) -cpu help | grep -w $(CORE)),)
XTENSA_SRC = $(SRC_PATH)/tests/tcg/xtensa
XTENSA_ALL = $(filter-out $(XTENSA_SRC)/linker.ld.S,$(wildcard $(XTENSA_SRC)/*.S))
@@ -15,7 +16,6 @@
TESTS += $(XTENSA_USABLE_TESTS)
VPATH += $(XTENSA_SRC)
-CORE=dc232b
QEMU_OPTS+=-M sim -cpu $(CORE) -nographic -semihosting -icount 6 $(EXTFLAGS) -kernel
INCLUDE_DIRS = $(SRC_PATH)/target/xtensa/core-$(CORE)
@@ -26,6 +26,7 @@
LDFLAGS = -Tlinker.ld -nostartfiles -nostdlib
CRT = crt.o vectors.o
+CLEANFILES += linker.ld
linker.ld: linker.ld.S
$(CC) $(XTENSA_INC) -E -P $< -o $@
diff --git a/tests/tcg/xtensaeb/Makefile.softmmu-target b/tests/tcg/xtensaeb/Makefile.softmmu-target
new file mode 100644
index 0000000..4204a96
--- /dev/null
+++ b/tests/tcg/xtensaeb/Makefile.softmmu-target
@@ -0,0 +1,5 @@
+#
+# Xtensa softmmu tests
+#
+
+include $(SRC_PATH)/tests/tcg/xtensa/Makefile.softmmu-target
diff --git a/tests/unit/meson.build b/tests/unit/meson.build
index 51f453e..fa63cfe 100644
--- a/tests/unit/meson.build
+++ b/tests/unit/meson.build
@@ -11,6 +11,7 @@
'check-qobject': [],
'check-qjson': [],
'check-qlit': [],
+ 'test-error-report': [],
'test-qobject-output-visitor': [testqapi],
'test-clone-visitor': [testqapi],
'test-qobject-input-visitor': [testqapi],
@@ -47,6 +48,7 @@
'ptimer-test': ['ptimer-test-stubs.c', meson.project_source_root() / 'hw/core/ptimer.c'],
'test-qapi-util': [],
'test-interval-tree': [],
+ 'test-xs-node': [qom],
}
if have_system or have_tools
diff --git a/tests/unit/socket-helpers.c b/tests/unit/socket-helpers.c
index eecadf3..6de27ba 100644
--- a/tests/unit/socket-helpers.c
+++ b/tests/unit/socket-helpers.c
@@ -160,7 +160,7 @@
int fd;
fd = socket(PF_UNIX, SOCK_STREAM, 0);
- closesocket(fd);
+ close(fd);
#ifdef _WIN32
*has_afunix = (fd != (int)INVALID_SOCKET);
diff --git a/tests/unit/test-blockjob.c b/tests/unit/test-blockjob.c
index c0426bd..a130f6f 100644
--- a/tests/unit/test-blockjob.c
+++ b/tests/unit/test-blockjob.c
@@ -531,6 +531,13 @@
g_test_add_func("/blockjob/cancel/standby", test_cancel_standby);
g_test_add_func("/blockjob/cancel/pending", test_cancel_pending);
g_test_add_func("/blockjob/cancel/concluded", test_cancel_concluded);
- g_test_add_func("/blockjob/complete_in_standby", test_complete_in_standby);
+
+ /*
+ * This test is flaky and sometimes fails in CI and otherwise:
+ * don't run unless user opts in via environment variable.
+ */
+ if (getenv("QEMU_TEST_FLAKY_TESTS")) {
+ g_test_add_func("/blockjob/complete_in_standby", test_complete_in_standby);
+ }
return g_test_run();
}
diff --git a/tests/unit/test-error-report.c b/tests/unit/test-error-report.c
new file mode 100644
index 0000000..54319c8
--- /dev/null
+++ b/tests/unit/test-error-report.c
@@ -0,0 +1,139 @@
+/*
+ * Error reporting test
+ *
+ * Copyright (C) 2022 Red Hat Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "glib-compat.h"
+#include <locale.h>
+
+#include "qemu/error-report.h"
+#include "qapi/error.h"
+
+static void
+test_error_report_simple(void)
+{
+ if (g_test_subprocess()) {
+ error_report("%s", "test error");
+ warn_report("%s", "test warn");
+ info_report("%s", "test info");
+ return;
+ }
+
+ g_test_trap_subprocess(NULL, 0, 0);
+ g_test_trap_assert_passed();
+ g_test_trap_assert_stderr("\
+test-error-report: test error*\
+test-error-report: warning: test warn*\
+test-error-report: info: test info*\
+");
+}
+
+static void
+test_error_report_loc(void)
+{
+ if (g_test_subprocess()) {
+ loc_set_file("some-file.c", 7717);
+ error_report("%s", "test error1");
+ loc_set_none();
+ error_report("%s", "test error2");
+ return;
+ }
+
+ g_test_trap_subprocess(NULL, 0, 0);
+ g_test_trap_assert_passed();
+ g_test_trap_assert_stderr("\
+test-error-report:some-file.c:7717: test error1*\
+test-error-report: test error2*\
+");
+}
+
+static void
+test_error_report_glog(void)
+{
+ if (g_test_subprocess()) {
+ g_message("gmessage");
+ return;
+ }
+
+ g_test_trap_subprocess(NULL, 0, 0);
+ g_test_trap_assert_passed();
+ g_test_trap_assert_stderr("test-error-report: info: gmessage*");
+}
+
+static void
+test_error_report_once(void)
+{
+ int i;
+
+ if (g_test_subprocess()) {
+ for (i = 0; i < 3; i++) {
+ warn_report_once("warn");
+ error_report_once("err");
+ }
+ return;
+ }
+
+ g_test_trap_subprocess(NULL, 0, 0);
+ g_test_trap_assert_passed();
+ g_test_trap_assert_stderr("\
+test-error-report: warning: warn*\
+test-error-report: err*\
+");
+}
+
+static void
+test_error_report_timestamp(void)
+{
+ if (g_test_subprocess()) {
+ message_with_timestamp = true;
+ warn_report("warn");
+ error_report("err");
+ return;
+ }
+
+ g_test_trap_subprocess(NULL, 0, 0);
+ g_test_trap_assert_passed();
+ g_test_trap_assert_stderr("\
+*-*-*:*:* test-error-report: warning: warn*\
+*-*-*:*:* test-error-report: err*\
+");
+}
+
+static void
+test_error_warn(void)
+{
+ if (g_test_subprocess()) {
+ error_setg(&error_warn, "Testing &error_warn");
+ return;
+ }
+
+ g_test_trap_subprocess(NULL, 0, 0);
+ g_test_trap_assert_passed();
+ g_test_trap_assert_stderr("\
+test-error-report: warning: Testing &error_warn*\
+");
+}
+
+
+int
+main(int argc, char *argv[])
+{
+ setlocale(LC_ALL, "");
+
+ g_test_init(&argc, &argv, NULL);
+ error_init("test-error-report");
+
+ g_test_add_func("/error-report/simple", test_error_report_simple);
+ g_test_add_func("/error-report/loc", test_error_report_loc);
+ g_test_add_func("/error-report/glog", test_error_report_glog);
+ g_test_add_func("/error-report/once", test_error_report_once);
+ g_test_add_func("/error-report/timestamp", test_error_report_timestamp);
+ g_test_add_func("/error-report/warn", test_error_warn);
+
+ return g_test_run();
+}
diff --git a/tests/unit/test-io-channel-command.c b/tests/unit/test-io-channel-command.c
index c6e66a8..4f02261 100644
--- a/tests/unit/test-io-channel-command.c
+++ b/tests/unit/test-io-channel-command.c
@@ -35,7 +35,7 @@
static void test_io_channel_command_fifo(bool async)
{
g_autofree gchar *tmpdir = g_dir_make_tmp("qemu-test-io-channel.XXXXXX", NULL);
- g_autofree gchar *fifo = g_strdup_printf("%s/%s", tmpdir, TEST_FIFO);
+ g_autofree gchar *fifo = g_build_filename(tmpdir, TEST_FIFO, NULL);
g_autofree gchar *srcargs = g_strdup_printf("%s - PIPE:%s,wronly", socat, fifo);
g_autofree gchar *dstargs = g_strdup_printf("%s PIPE:%s,rdonly -", socat, fifo);
g_auto(GStrv) srcargv = g_strsplit(srcargs, " ", -1);
diff --git a/tests/unit/test-rcu-list.c b/tests/unit/test-rcu-list.c
index 9964171..8f0adb8 100644
--- a/tests/unit/test-rcu-list.c
+++ b/tests/unit/test-rcu-list.c
@@ -151,7 +151,7 @@
#define TEST_NAME "qslist"
#define TEST_LIST_REMOVE_RCU(el, f) \
- QSLIST_REMOVE_RCU(&Q_list_head, el, list_element, f)
+ QSLIST_REMOVE_RCU(&Q_list_head, el, list_element, f)
#define TEST_LIST_INSERT_AFTER_RCU(list_el, el, f) \
QSLIST_INSERT_AFTER_RCU(&Q_list_head, list_el, el, f)
diff --git a/tests/unit/test-xs-node.c b/tests/unit/test-xs-node.c
new file mode 100644
index 0000000..b80d10f
--- /dev/null
+++ b/tests/unit/test-xs-node.c
@@ -0,0 +1,871 @@
+/*
+ * QEMU XenStore XsNode testing
+ *
+ * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/module.h"
+
+static int nr_xs_nodes;
+static GList *xs_node_list;
+
+#define XS_NODE_UNIT_TEST
+
+/*
+ * We don't need the core Xen definitions. And we *do* want to be able
+ * to run the unit tests even on architectures that Xen doesn't support
+ * (because life's too short to bother doing otherwise, and test coverage
+ * doesn't hurt).
+ */
+#define __XEN_PUBLIC_XEN_H__
+typedef unsigned long xen_pfn_t;
+
+#include "hw/i386/kvm/xenstore_impl.c"
+
+#define DOMID_QEMU 0
+#define DOMID_GUEST 1
+
+static void dump_ref(const char *name, XsNode *n, int indent)
+{
+ int i;
+
+ if (!indent && name) {
+ printf("%s:\n", name);
+ }
+
+ for (i = 0; i < indent; i++) {
+ printf(" ");
+ }
+
+ printf("->%p(%d, '%s'): '%.*s'%s%s\n", n, n->ref, n->name,
+ (int)(n->content ? n->content->len : strlen("<empty>")),
+ n->content ? (char *)n->content->data : "<empty>",
+ n->modified_in_tx ? " MODIFIED" : "",
+ n->deleted_in_tx ? " DELETED" : "");
+
+ if (n->children) {
+ g_hash_table_foreach(n->children, (void *)dump_ref,
+ GINT_TO_POINTER(indent + 2));
+ }
+}
+
+/* This doesn't happen in qemu but we want to make valgrind happy */
+static void xs_impl_delete(XenstoreImplState *s, bool last)
+{
+ int err;
+
+ xs_impl_reset_watches(s, DOMID_GUEST);
+ g_assert(!s->nr_domu_watches);
+
+ err = xs_impl_rm(s, DOMID_QEMU, XBT_NULL, "/local");
+ g_assert(!err);
+ g_assert(s->nr_nodes == 1);
+
+ g_hash_table_unref(s->watches);
+ g_hash_table_unref(s->transactions);
+ xs_node_unref(s->root);
+ g_free(s);
+
+ if (!last) {
+ return;
+ }
+
+ if (xs_node_list) {
+ GList *l;
+ for (l = xs_node_list; l; l = l->next) {
+ XsNode *n = l->data;
+ printf("Remaining node at %p name %s ref %u\n", n, n->name,
+ n->ref);
+ }
+ }
+ g_assert(!nr_xs_nodes);
+}
+
+struct compare_walk {
+ char path[XENSTORE_ABS_PATH_MAX + 1];
+ XsNode *parent_2;
+ bool compare_ok;
+};
+
+
+static bool compare_perms(GList *p1, GList *p2)
+{
+ while (p1) {
+ if (!p2 || g_strcmp0(p1->data, p2->data)) {
+ return false;
+ }
+ p1 = p1->next;
+ p2 = p2->next;
+ }
+ return (p2 == NULL);
+}
+
+static bool compare_content(GByteArray *c1, GByteArray *c2)
+{
+ size_t len1 = 0, len2 = 0;
+
+ if (c1) {
+ len1 = c1->len;
+ }
+ if (c2) {
+ len2 = c2->len;
+ }
+ if (len1 != len2) {
+ return false;
+ }
+
+ if (!len1) {
+ return true;
+ }
+
+ return !memcmp(c1->data, c2->data, len1);
+}
+
+static void compare_child(gpointer, gpointer, gpointer);
+
+static void compare_nodes(struct compare_walk *cw, XsNode *n1, XsNode *n2)
+{
+ int nr_children1 = 0, nr_children2 = 0;
+
+ if (n1->children) {
+ nr_children1 = g_hash_table_size(n1->children);
+ }
+ if (n2->children) {
+ nr_children2 = g_hash_table_size(n2->children);
+ }
+
+ if (n1->ref != n2->ref ||
+ n1->deleted_in_tx != n2->deleted_in_tx ||
+ n1->modified_in_tx != n2->modified_in_tx ||
+ !compare_perms(n1->perms, n2->perms) ||
+ !compare_content(n1->content, n2->content) ||
+ nr_children1 != nr_children2) {
+ cw->compare_ok = false;
+ printf("Compare failure on '%s'\n", cw->path);
+ }
+
+ if (nr_children1) {
+ XsNode *oldparent = cw->parent_2;
+ cw->parent_2 = n2;
+ g_hash_table_foreach(n1->children, compare_child, cw);
+
+ cw->parent_2 = oldparent;
+ }
+}
+
+static void compare_child(gpointer key, gpointer val, gpointer opaque)
+{
+ struct compare_walk *cw = opaque;
+ char *childname = key;
+ XsNode *child1 = val;
+ XsNode *child2 = g_hash_table_lookup(cw->parent_2->children, childname);
+ int pathlen = strlen(cw->path);
+
+ if (!child2) {
+ cw->compare_ok = false;
+ printf("Child '%s' does not exist under '%s'\n", childname, cw->path);
+ return;
+ }
+
+ strncat(cw->path, "/", sizeof(cw->path) - 1);
+ strncat(cw->path, childname, sizeof(cw->path) - 1);
+
+ compare_nodes(cw, child1, child2);
+ cw->path[pathlen] = '\0';
+}
+
+static bool compare_trees(XsNode *n1, XsNode *n2)
+{
+ struct compare_walk cw;
+
+ cw.path[0] = '\0';
+ cw.parent_2 = n2;
+ cw.compare_ok = true;
+
+ if (!n1 || !n2) {
+ return false;
+ }
+
+ compare_nodes(&cw, n1, n2);
+ return cw.compare_ok;
+}
+
+static void compare_tx(gpointer key, gpointer val, gpointer opaque)
+{
+ XenstoreImplState *s2 = opaque;
+ XsTransaction *t1 = val, *t2;
+ unsigned int tx_id = GPOINTER_TO_INT(key);
+
+ t2 = g_hash_table_lookup(s2->transactions, key);
+ g_assert(t2);
+
+ g_assert(t1->tx_id == tx_id);
+ g_assert(t2->tx_id == tx_id);
+ g_assert(t1->base_tx == t2->base_tx);
+ g_assert(t1->dom_id == t2->dom_id);
+ if (!compare_trees(t1->root, t2->root)) {
+ printf("Comparison failure in TX %u after serdes:\n", tx_id);
+ dump_ref("Original", t1->root, 0);
+ dump_ref("Deserialised", t2->root, 0);
+ g_assert(0);
+ }
+ g_assert(t1->nr_nodes == t2->nr_nodes);
+}
+
+static int write_str(XenstoreImplState *s, unsigned int dom_id,
+ unsigned int tx_id, const char *path,
+ const char *content)
+{
+ GByteArray *d = g_byte_array_new();
+ int err;
+
+ g_byte_array_append(d, (void *)content, strlen(content));
+ err = xs_impl_write(s, dom_id, tx_id, path, d);
+ g_byte_array_unref(d);
+ return err;
+}
+
+static void watch_cb(void *_str, const char *path, const char *token)
+{
+ GString *str = _str;
+
+ g_string_append(str, path);
+ g_string_append(str, token);
+}
+
+static void check_serdes(XenstoreImplState *s)
+{
+ XenstoreImplState *s2 = xs_impl_create(DOMID_GUEST);
+ GByteArray *bytes = xs_impl_serialize(s);
+ int nr_transactions1, nr_transactions2;
+ int ret;
+
+ ret = xs_impl_deserialize(s2, bytes, DOMID_GUEST, watch_cb, NULL);
+ g_assert(!ret);
+
+ g_byte_array_unref(bytes);
+
+ g_assert(s->last_tx == s2->last_tx);
+ g_assert(s->root_tx == s2->root_tx);
+
+ if (!compare_trees(s->root, s2->root)) {
+ printf("Comparison failure in main tree after serdes:\n");
+ dump_ref("Original", s->root, 0);
+ dump_ref("Deserialised", s2->root, 0);
+ g_assert(0);
+ }
+
+ nr_transactions1 = g_hash_table_size(s->transactions);
+ nr_transactions2 = g_hash_table_size(s2->transactions);
+ g_assert(nr_transactions1 == nr_transactions2);
+
+ g_hash_table_foreach(s->transactions, compare_tx, s2);
+
+ g_assert(s->nr_domu_watches == s2->nr_domu_watches);
+ g_assert(s->nr_domu_transactions == s2->nr_domu_transactions);
+ g_assert(s->nr_nodes == s2->nr_nodes);
+ xs_impl_delete(s2, false);
+}
+
+static XenstoreImplState *setup(void)
+{
+ XenstoreImplState *s = xs_impl_create(DOMID_GUEST);
+ char *abspath;
+ GList *perms;
+ int err;
+
+ abspath = g_strdup_printf("/local/domain/%u", DOMID_GUEST);
+
+ err = write_str(s, DOMID_QEMU, XBT_NULL, abspath, "");
+ g_assert(!err);
+ g_assert(s->nr_nodes == 4);
+
+ perms = g_list_append(NULL, g_strdup_printf("n%u", DOMID_QEMU));
+ perms = g_list_append(perms, g_strdup_printf("r%u", DOMID_GUEST));
+
+ err = xs_impl_set_perms(s, DOMID_QEMU, XBT_NULL, abspath, perms);
+ g_assert(!err);
+
+ g_list_free_full(perms, g_free);
+ g_free(abspath);
+
+ abspath = g_strdup_printf("/local/domain/%u/some", DOMID_GUEST);
+
+ err = write_str(s, DOMID_QEMU, XBT_NULL, abspath, "");
+ g_assert(!err);
+ g_assert(s->nr_nodes == 5);
+
+ perms = g_list_append(NULL, g_strdup_printf("n%u", DOMID_GUEST));
+
+ err = xs_impl_set_perms(s, DOMID_QEMU, XBT_NULL, abspath, perms);
+ g_assert(!err);
+
+ g_list_free_full(perms, g_free);
+ g_free(abspath);
+
+ return s;
+}
+
+static void test_xs_node_simple(void)
+{
+ GByteArray *data = g_byte_array_new();
+ XenstoreImplState *s = setup();
+ GString *guest_watches = g_string_new(NULL);
+ GString *qemu_watches = g_string_new(NULL);
+ GList *items = NULL;
+ XsNode *old_root;
+ uint64_t gencnt;
+ int err;
+
+ g_assert(s);
+
+ err = xs_impl_watch(s, DOMID_GUEST, "some", "guestwatch",
+ watch_cb, guest_watches);
+ g_assert(!err);
+ g_assert(guest_watches->len == strlen("someguestwatch"));
+ g_assert(!strcmp(guest_watches->str, "someguestwatch"));
+ g_string_truncate(guest_watches, 0);
+
+ err = xs_impl_watch(s, 0, "/local/domain/1/some", "qemuwatch",
+ watch_cb, qemu_watches);
+ g_assert(!err);
+ g_assert(qemu_watches->len == strlen("/local/domain/1/someqemuwatch"));
+ g_assert(!strcmp(qemu_watches->str, "/local/domain/1/someqemuwatch"));
+ g_string_truncate(qemu_watches, 0);
+
+ /* Read gives ENOENT when it should */
+ err = xs_impl_read(s, DOMID_GUEST, XBT_NULL, "foo", data);
+ g_assert(err == ENOENT);
+
+ /* Write works */
+ err = write_str(s, DOMID_GUEST, XBT_NULL, "some/relative/path",
+ "something");
+ g_assert(s->nr_nodes == 7);
+ g_assert(!err);
+ g_assert(!strcmp(guest_watches->str,
+ "some/relative/pathguestwatch"));
+ g_assert(!strcmp(qemu_watches->str,
+ "/local/domain/1/some/relative/pathqemuwatch"));
+
+ g_string_truncate(qemu_watches, 0);
+ g_string_truncate(guest_watches, 0);
+ xs_impl_reset_watches(s, 0);
+
+ /* Read gives back what we wrote */
+ err = xs_impl_read(s, DOMID_GUEST, XBT_NULL, "some/relative/path", data);
+ g_assert(!err);
+ g_assert(data->len == strlen("something"));
+ g_assert(!memcmp(data->data, "something", data->len));
+
+ /* Even if we use an abolute path */
+ g_byte_array_set_size(data, 0);
+ err = xs_impl_read(s, DOMID_GUEST, XBT_NULL,
+ "/local/domain/1/some/relative/path", data);
+ g_assert(!err);
+ g_assert(data->len == strlen("something"));
+
+ g_assert(!qemu_watches->len);
+ g_assert(!guest_watches->len);
+ /* Keep a copy, to force COW mode */
+ old_root = xs_node_ref(s->root);
+
+ /* Write somewhere we aren't allowed, in COW mode */
+ err = write_str(s, DOMID_GUEST, XBT_NULL, "/local/domain/badplace",
+ "moredata");
+ g_assert(err == EACCES);
+ g_assert(s->nr_nodes == 7);
+
+ /* Write works again */
+ err = write_str(s, DOMID_GUEST, XBT_NULL,
+ "/local/domain/1/some/relative/path2",
+ "something else");
+ g_assert(!err);
+ g_assert(s->nr_nodes == 8);
+ g_assert(!qemu_watches->len);
+ g_assert(!strcmp(guest_watches->str, "some/relative/path2guestwatch"));
+ g_string_truncate(guest_watches, 0);
+
+ /* Overwrite an existing node */
+ err = write_str(s, DOMID_GUEST, XBT_NULL, "some/relative/path",
+ "another thing");
+ g_assert(!err);
+ g_assert(s->nr_nodes == 8);
+ g_assert(!qemu_watches->len);
+ g_assert(!strcmp(guest_watches->str, "some/relative/pathguestwatch"));
+ g_string_truncate(guest_watches, 0);
+
+ /* We can list the two files we wrote */
+ err = xs_impl_directory(s, DOMID_GUEST, XBT_NULL, "some/relative", &gencnt,
+ &items);
+ g_assert(!err);
+ g_assert(items);
+ g_assert(gencnt == 2);
+ g_assert(!strcmp(items->data, "path"));
+ g_assert(items->next);
+ g_assert(!strcmp(items->next->data, "path2"));
+ g_assert(!items->next->next);
+ g_list_free_full(items, g_free);
+
+ err = xs_impl_unwatch(s, DOMID_GUEST, "some", "guestwatch",
+ watch_cb, guest_watches);
+ g_assert(!err);
+
+ err = xs_impl_unwatch(s, DOMID_GUEST, "some", "guestwatch",
+ watch_cb, guest_watches);
+ g_assert(err == ENOENT);
+
+ err = xs_impl_watch(s, DOMID_GUEST, "some/relative/path2", "watchp2",
+ watch_cb, guest_watches);
+ g_assert(!err);
+ g_assert(guest_watches->len == strlen("some/relative/path2watchp2"));
+ g_assert(!strcmp(guest_watches->str, "some/relative/path2watchp2"));
+ g_string_truncate(guest_watches, 0);
+
+ err = xs_impl_watch(s, DOMID_GUEST, "/local/domain/1/some/relative",
+ "watchrel", watch_cb, guest_watches);
+ g_assert(!err);
+ g_assert(guest_watches->len ==
+ strlen("/local/domain/1/some/relativewatchrel"));
+ g_assert(!strcmp(guest_watches->str,
+ "/local/domain/1/some/relativewatchrel"));
+ g_string_truncate(guest_watches, 0);
+
+ /* Write somewhere else which already existed */
+ err = write_str(s, DOMID_GUEST, XBT_NULL, "some/relative", "moredata");
+ g_assert(!err);
+ g_assert(s->nr_nodes == 8);
+
+ /* Write somewhere we aren't allowed */
+ err = write_str(s, DOMID_GUEST, XBT_NULL, "/local/domain/badplace",
+ "moredata");
+ g_assert(err == EACCES);
+
+ g_assert(!strcmp(guest_watches->str,
+ "/local/domain/1/some/relativewatchrel"));
+ g_string_truncate(guest_watches, 0);
+
+ g_byte_array_set_size(data, 0);
+ err = xs_impl_read(s, DOMID_GUEST, XBT_NULL, "some/relative", data);
+ g_assert(!err);
+ g_assert(data->len == strlen("moredata"));
+ g_assert(!memcmp(data->data, "moredata", data->len));
+
+ /* Overwrite existing data */
+ err = write_str(s, DOMID_GUEST, XBT_NULL, "some/relative", "otherdata");
+ g_assert(!err);
+ g_string_truncate(guest_watches, 0);
+
+ g_byte_array_set_size(data, 0);
+ err = xs_impl_read(s, DOMID_GUEST, XBT_NULL, "some/relative", data);
+ g_assert(!err);
+ g_assert(data->len == strlen("otherdata"));
+ g_assert(!memcmp(data->data, "otherdata", data->len));
+
+ /* Remove the subtree */
+ err = xs_impl_rm(s, DOMID_GUEST, XBT_NULL, "some/relative");
+ g_assert(!err);
+ g_assert(s->nr_nodes == 5);
+
+ /* Each watch fires with the least specific relevant path */
+ g_assert(strstr(guest_watches->str,
+ "some/relative/path2watchp2"));
+ g_assert(strstr(guest_watches->str,
+ "/local/domain/1/some/relativewatchrel"));
+ g_string_truncate(guest_watches, 0);
+
+ g_byte_array_set_size(data, 0);
+ err = xs_impl_read(s, DOMID_GUEST, XBT_NULL, "some/relative", data);
+ g_assert(err == ENOENT);
+ g_byte_array_unref(data);
+
+ xs_impl_reset_watches(s, DOMID_GUEST);
+ g_string_free(qemu_watches, true);
+ g_string_free(guest_watches, true);
+ xs_node_unref(old_root);
+ xs_impl_delete(s, true);
+}
+
+
+static void do_test_xs_node_tx(bool fail, bool commit)
+{
+ XenstoreImplState *s = setup();
+ GString *watches = g_string_new(NULL);
+ GByteArray *data = g_byte_array_new();
+ unsigned int tx_id = XBT_NULL;
+ int err;
+
+ g_assert(s);
+
+ /* Set a watch */
+ err = xs_impl_watch(s, DOMID_GUEST, "some", "watch",
+ watch_cb, watches);
+ g_assert(!err);
+ g_assert(watches->len == strlen("somewatch"));
+ g_assert(!strcmp(watches->str, "somewatch"));
+ g_string_truncate(watches, 0);
+
+ /* Write something */
+ err = write_str(s, DOMID_GUEST, XBT_NULL, "some/relative/path",
+ "something");
+ g_assert(s->nr_nodes == 7);
+ g_assert(!err);
+ g_assert(!strcmp(watches->str,
+ "some/relative/pathwatch"));
+ g_string_truncate(watches, 0);
+
+ /* Create a transaction */
+ err = xs_impl_transaction_start(s, DOMID_GUEST, &tx_id);
+ g_assert(!err);
+
+ if (fail) {
+ /* Write something else in the root */
+ err = write_str(s, DOMID_GUEST, XBT_NULL, "some/relative/path",
+ "another thing");
+ g_assert(!err);
+ g_assert(s->nr_nodes == 7);
+ g_assert(!strcmp(watches->str,
+ "some/relative/pathwatch"));
+ g_string_truncate(watches, 0);
+ }
+
+ g_assert(!watches->len);
+
+ /* Perform a write in the transaction */
+ err = write_str(s, DOMID_GUEST, tx_id, "some/relative/path",
+ "something else");
+ g_assert(!err);
+ g_assert(s->nr_nodes == 7);
+ g_assert(!watches->len);
+
+ err = xs_impl_read(s, DOMID_GUEST, XBT_NULL, "some/relative/path", data);
+ g_assert(!err);
+ if (fail) {
+ g_assert(data->len == strlen("another thing"));
+ g_assert(!memcmp(data->data, "another thing", data->len));
+ } else {
+ g_assert(data->len == strlen("something"));
+ g_assert(!memcmp(data->data, "something", data->len));
+ }
+ g_byte_array_set_size(data, 0);
+
+ err = xs_impl_read(s, DOMID_GUEST, tx_id, "some/relative/path", data);
+ g_assert(!err);
+ g_assert(data->len == strlen("something else"));
+ g_assert(!memcmp(data->data, "something else", data->len));
+ g_byte_array_set_size(data, 0);
+
+ check_serdes(s);
+
+ /* Attempt to commit the transaction */
+ err = xs_impl_transaction_end(s, DOMID_GUEST, tx_id, commit);
+ if (commit && fail) {
+ g_assert(err == EAGAIN);
+ } else {
+ g_assert(!err);
+ }
+ if (commit && !fail) {
+ g_assert(!strcmp(watches->str,
+ "some/relative/pathwatch"));
+ g_string_truncate(watches, 0);
+ } else {
+ g_assert(!watches->len);
+ }
+ g_assert(s->nr_nodes == 7);
+
+ check_serdes(s);
+
+ err = xs_impl_unwatch(s, DOMID_GUEST, "some", "watch",
+ watch_cb, watches);
+ g_assert(!err);
+
+ err = xs_impl_read(s, DOMID_GUEST, XBT_NULL, "some/relative/path", data);
+ g_assert(!err);
+ if (fail) {
+ g_assert(data->len == strlen("another thing"));
+ g_assert(!memcmp(data->data, "another thing", data->len));
+ } else if (commit) {
+ g_assert(data->len == strlen("something else"));
+ g_assert(!memcmp(data->data, "something else", data->len));
+ } else {
+ g_assert(data->len == strlen("something"));
+ g_assert(!memcmp(data->data, "something", data->len));
+ }
+ g_byte_array_unref(data);
+ g_string_free(watches, true);
+ xs_impl_delete(s, true);
+}
+
+static void test_xs_node_tx_fail(void)
+{
+ do_test_xs_node_tx(true, true);
+}
+
+static void test_xs_node_tx_abort(void)
+{
+ do_test_xs_node_tx(false, false);
+ do_test_xs_node_tx(true, false);
+}
+static void test_xs_node_tx_succeed(void)
+{
+ do_test_xs_node_tx(false, true);
+}
+
+static void test_xs_node_tx_rm(void)
+{
+ XenstoreImplState *s = setup();
+ GString *watches = g_string_new(NULL);
+ GByteArray *data = g_byte_array_new();
+ unsigned int tx_id = XBT_NULL;
+ int err;
+
+ g_assert(s);
+
+ /* Set a watch */
+ err = xs_impl_watch(s, DOMID_GUEST, "some", "watch",
+ watch_cb, watches);
+ g_assert(!err);
+ g_assert(watches->len == strlen("somewatch"));
+ g_assert(!strcmp(watches->str, "somewatch"));
+ g_string_truncate(watches, 0);
+
+ /* Write something */
+ err = write_str(s, DOMID_GUEST, XBT_NULL, "some/deep/dark/relative/path",
+ "something");
+ g_assert(!err);
+ g_assert(s->nr_nodes == 9);
+ g_assert(!strcmp(watches->str,
+ "some/deep/dark/relative/pathwatch"));
+ g_string_truncate(watches, 0);
+
+ /* Create a transaction */
+ err = xs_impl_transaction_start(s, DOMID_GUEST, &tx_id);
+ g_assert(!err);
+
+ /* Delete the tree in the transaction */
+ err = xs_impl_rm(s, DOMID_GUEST, tx_id, "some/deep/dark");
+ g_assert(!err);
+ g_assert(s->nr_nodes == 9);
+ g_assert(!watches->len);
+
+ err = xs_impl_read(s, DOMID_GUEST, XBT_NULL, "some/deep/dark/relative/path",
+ data);
+ g_assert(!err);
+ g_assert(data->len == strlen("something"));
+ g_assert(!memcmp(data->data, "something", data->len));
+ g_byte_array_set_size(data, 0);
+
+ check_serdes(s);
+
+ /* Commit the transaction */
+ err = xs_impl_transaction_end(s, DOMID_GUEST, tx_id, true);
+ g_assert(!err);
+ g_assert(s->nr_nodes == 6);
+
+ g_assert(!strcmp(watches->str, "some/deep/darkwatch"));
+ g_string_truncate(watches, 0);
+
+ /* Now the node is gone */
+ err = xs_impl_read(s, DOMID_GUEST, XBT_NULL, "some/deep/dark/relative/path",
+ data);
+ g_assert(err == ENOENT);
+ g_byte_array_unref(data);
+
+ err = xs_impl_unwatch(s, DOMID_GUEST, "some", "watch",
+ watch_cb, watches);
+ g_assert(!err);
+
+ g_string_free(watches, true);
+ xs_impl_delete(s, true);
+}
+
+static void test_xs_node_tx_resurrect(void)
+{
+ XenstoreImplState *s = setup();
+ GString *watches = g_string_new(NULL);
+ GByteArray *data = g_byte_array_new();
+ unsigned int tx_id = XBT_NULL;
+ int err;
+
+ g_assert(s);
+
+ /* Write something */
+ err = write_str(s, DOMID_GUEST, XBT_NULL, "some/deep/dark/relative/path",
+ "something");
+ g_assert(!err);
+ g_assert(s->nr_nodes == 9);
+
+ /* Another node to remain shared */
+ err = write_str(s, DOMID_GUEST, XBT_NULL, "some/place/safe", "keepme");
+ g_assert(!err);
+ g_assert(s->nr_nodes == 11);
+
+ /* This node will be wiped and resurrected */
+ err = write_str(s, DOMID_GUEST, XBT_NULL, "some/deep/dark",
+ "foo");
+ g_assert(!err);
+ g_assert(s->nr_nodes == 11);
+
+ /* Set a watch */
+ err = xs_impl_watch(s, DOMID_GUEST, "some", "watch",
+ watch_cb, watches);
+ g_assert(!err);
+ g_assert(watches->len == strlen("somewatch"));
+ g_assert(!strcmp(watches->str, "somewatch"));
+ g_string_truncate(watches, 0);
+
+ /* Create a transaction */
+ err = xs_impl_transaction_start(s, DOMID_GUEST, &tx_id);
+ g_assert(!err);
+
+ /* Delete the tree in the transaction */
+ err = xs_impl_rm(s, DOMID_GUEST, tx_id, "some/deep");
+ g_assert(!err);
+ g_assert(s->nr_nodes == 11);
+ g_assert(!watches->len);
+
+ /* Resurrect part of it */
+ err = write_str(s, DOMID_GUEST, tx_id, "some/deep/dark/different/path",
+ "something");
+ g_assert(!err);
+ g_assert(s->nr_nodes == 11);
+
+ check_serdes(s);
+
+ /* Commit the transaction */
+ err = xs_impl_transaction_end(s, DOMID_GUEST, tx_id, true);
+ g_assert(!err);
+ g_assert(s->nr_nodes == 11);
+
+ check_serdes(s);
+
+ /* lost data */
+ g_assert(strstr(watches->str, "some/deep/dark/different/pathwatch"));
+ /* topmost deleted */
+ g_assert(strstr(watches->str, "some/deep/dark/relativewatch"));
+ /* lost data */
+ g_assert(strstr(watches->str, "some/deep/darkwatch"));
+
+ g_string_truncate(watches, 0);
+
+ /* Now the node is gone */
+ err = xs_impl_read(s, DOMID_GUEST, XBT_NULL, "some/deep/dark/relative/path",
+ data);
+ g_assert(err == ENOENT);
+ g_byte_array_unref(data);
+
+ check_serdes(s);
+
+ err = xs_impl_unwatch(s, DOMID_GUEST, "some", "watch",
+ watch_cb, watches);
+ g_assert(!err);
+
+ g_string_free(watches, true);
+ xs_impl_delete(s, true);
+}
+
+static void test_xs_node_tx_resurrect2(void)
+{
+ XenstoreImplState *s = setup();
+ GString *watches = g_string_new(NULL);
+ GByteArray *data = g_byte_array_new();
+ unsigned int tx_id = XBT_NULL;
+ int err;
+
+ g_assert(s);
+
+ /* Write something */
+ err = write_str(s, DOMID_GUEST, XBT_NULL, "some/deep/dark/relative/path",
+ "something");
+ g_assert(!err);
+ g_assert(s->nr_nodes == 9);
+
+ /* Another node to remain shared */
+ err = write_str(s, DOMID_GUEST, XBT_NULL, "some/place/safe", "keepme");
+ g_assert(!err);
+ g_assert(s->nr_nodes == 11);
+
+ /* This node will be wiped and resurrected */
+ err = write_str(s, DOMID_GUEST, XBT_NULL, "some/deep/dark",
+ "foo");
+ g_assert(!err);
+ g_assert(s->nr_nodes == 11);
+
+ /* Set a watch */
+ err = xs_impl_watch(s, DOMID_GUEST, "some", "watch",
+ watch_cb, watches);
+ g_assert(!err);
+ g_assert(watches->len == strlen("somewatch"));
+ g_assert(!strcmp(watches->str, "somewatch"));
+ g_string_truncate(watches, 0);
+
+ /* Create a transaction */
+ err = xs_impl_transaction_start(s, DOMID_GUEST, &tx_id);
+ g_assert(!err);
+
+ /* Delete the tree in the transaction */
+ err = xs_impl_rm(s, DOMID_GUEST, tx_id, "some/deep");
+ g_assert(!err);
+ g_assert(s->nr_nodes == 11);
+ g_assert(!watches->len);
+
+ /* Resurrect part of it */
+ err = write_str(s, DOMID_GUEST, tx_id, "some/deep/dark/relative/path",
+ "something");
+ g_assert(!err);
+ g_assert(s->nr_nodes == 11);
+
+ check_serdes(s);
+
+ /* Commit the transaction */
+ err = xs_impl_transaction_end(s, DOMID_GUEST, tx_id, true);
+ g_assert(!err);
+ g_assert(s->nr_nodes == 11);
+
+ check_serdes(s);
+
+ /* lost data */
+ g_assert(strstr(watches->str, "some/deep/dark/relative/pathwatch"));
+ /* lost data */
+ g_assert(strstr(watches->str, "some/deep/darkwatch"));
+
+ g_string_truncate(watches, 0);
+
+ /* Now the node is gone */
+ err = xs_impl_read(s, DOMID_GUEST, XBT_NULL, "some/deep/dark/relative/path",
+ data);
+ g_assert(!err);
+ g_assert(data->len == strlen("something"));
+ g_assert(!memcmp(data->data, "something", data->len));
+
+ g_byte_array_unref(data);
+
+ check_serdes(s);
+
+ err = xs_impl_unwatch(s, DOMID_GUEST, "some", "watch",
+ watch_cb, watches);
+ g_assert(!err);
+
+ g_string_free(watches, true);
+ xs_impl_delete(s, true);
+}
+
+int main(int argc, char **argv)
+{
+ g_test_init(&argc, &argv, NULL);
+ module_call_init(MODULE_INIT_QOM);
+
+ g_test_add_func("/xs_node/simple", test_xs_node_simple);
+ g_test_add_func("/xs_node/tx_abort", test_xs_node_tx_abort);
+ g_test_add_func("/xs_node/tx_fail", test_xs_node_tx_fail);
+ g_test_add_func("/xs_node/tx_succeed", test_xs_node_tx_succeed);
+ g_test_add_func("/xs_node/tx_rm", test_xs_node_tx_rm);
+ g_test_add_func("/xs_node/tx_resurrect", test_xs_node_tx_resurrect);
+ g_test_add_func("/xs_node/tx_resurrect2", test_xs_node_tx_resurrect2);
+
+ return g_test_run();
+}
diff --git a/tests/vm/openbsd b/tests/vm/openbsd
index eaeb201..6b4fc29 100755
--- a/tests/vm/openbsd
+++ b/tests/vm/openbsd
@@ -106,8 +106,7 @@
self.console_wait("Password for root account")
self.console_send("%s\n" % self._config["root_pass"])
self.console_wait_send("Start sshd(8)", "yes\n")
- self.console_wait_send("X Window System", "\n")
- self.console_wait_send("xenodm", "\n")
+ self.console_wait_send("X Window System", "no\n")
self.console_wait_send("console to com0", "\n")
self.console_wait_send("Which speed", "\n")
@@ -124,7 +123,32 @@
self.console_wait_send("timezone", "UTC\n")
self.console_wait_send("root disk", "\n")
self.console_wait_send("(W)hole disk", "\n")
- self.console_wait_send("(A)uto layout", "\n")
+ self.console_wait_send("(A)uto layout", "c\n")
+
+ # 4000 MB / as /dev/sd0a, at start of disk
+ self.console_wait_send("sd0>", "a a\n")
+ self.console_wait_send("offset:", "\n")
+ self.console_wait_send("size:", "4000M\n")
+ self.console_wait_send("FS type", "4.2BSD\n")
+ self.console_wait_send("mount point:", "/\n")
+
+ # 256 MB swap as /dev/sd0b
+ self.console_wait_send("sd0*>", "a b\n")
+ self.console_wait_send("offset:", "\n")
+ self.console_wait_send("size:", "256M\n")
+ self.console_wait_send("FS type", "swap\n")
+
+ # All remaining space for /home as /dev/sd0d
+ # NB, 'c' isn't allowed to be used.
+ self.console_wait_send("sd0*>", "a d\n")
+ self.console_wait_send("offset:", "\n")
+ self.console_wait_send("size:", "\n")
+ self.console_wait_send("FS type", "4.2BSD\n")
+ self.console_wait_send("mount point:", "/home\n")
+
+ self.console_wait_send("sd0*>", "q\n")
+ self.console_wait_send("Write new label?:", "y\n")
+
self.console_wait_send("Location of sets", "cd0\n")
self.console_wait_send("Pathname to the sets", "\n")
self.console_wait_send("Set name(s)", "\n")
diff --git a/tools/ebpf/Makefile.ebpf b/tools/ebpf/Makefile.ebpf
index 8f327ae..3391e7c 100755
--- a/tools/ebpf/Makefile.ebpf
+++ b/tools/ebpf/Makefile.ebpf
@@ -1,9 +1,9 @@
OBJS = rss.bpf.o
-LLC ?= llc
+LLVM_STRIP ?= llvm-strip
CLANG ?= clang
INC_FLAGS = `$(CLANG) -print-file-name=include`
-EXTRA_CFLAGS ?= -O2 -emit-llvm -fno-stack-protector
+EXTRA_CFLAGS ?= -O2 -g -target bpf
all: $(OBJS)
@@ -11,11 +11,13 @@
clean:
rm -f $(OBJS)
+ rm -f rss.bpf.skeleton.h
$(OBJS): %.o:%.c
$(CLANG) $(INC_FLAGS) \
-D__KERNEL__ -D__ASM_SYSREG_H \
-I../include $(LINUXINCLUDE) \
- $(EXTRA_CFLAGS) -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@
+ $(EXTRA_CFLAGS) -c $< -o $@
+ $(LLVM_STRIP) -g $@
bpftool gen skeleton rss.bpf.o > rss.bpf.skeleton.h
cp rss.bpf.skeleton.h ../../ebpf/
diff --git a/tools/ebpf/rss.bpf.c b/tools/ebpf/rss.bpf.c
index e85ec55..20f227e 100644
--- a/tools/ebpf/rss.bpf.c
+++ b/tools/ebpf/rss.bpf.c
@@ -76,29 +76,26 @@
};
};
-struct bpf_map_def SEC("maps")
-tap_rss_map_configurations = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(struct rss_config_t),
- .max_entries = 1,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(struct rss_config_t));
+ __uint(max_entries, 1);
+} tap_rss_map_configurations SEC(".maps");
-struct bpf_map_def SEC("maps")
-tap_rss_map_toeplitz_key = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(struct toeplitz_key_data_t),
- .max_entries = 1,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(struct toeplitz_key_data_t));
+ __uint(max_entries, 1);
+} tap_rss_map_toeplitz_key SEC(".maps");
-struct bpf_map_def SEC("maps")
-tap_rss_map_indirection_table = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(__u16),
- .max_entries = INDIRECTION_TABLE_SIZE,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u16));
+ __uint(max_entries, INDIRECTION_TABLE_SIZE);
+} tap_rss_map_indirection_table SEC(".maps");
static inline void net_rx_rss_add_chunk(__u8 *rss_input, size_t *bytes_written,
const void *ptr, size_t size) {
diff --git a/ui/cocoa.m b/ui/cocoa.m
index 289a2b1..168170a 100644
--- a/ui/cocoa.m
+++ b/ui/cocoa.m
@@ -46,6 +46,7 @@
#include "qemu/cutils.h"
#include "qemu/main-loop.h"
#include "qemu/module.h"
+#include "qemu/error-report.h"
#include <Carbon/Carbon.h>
#include "hw/core/cpu.h"
@@ -1330,10 +1331,15 @@
return NO;
}
-/* Called when QEMU goes into the background */
-- (void) applicationWillResignActive: (NSNotification *)aNotification
+/*
+ * Called when QEMU goes into the background. Note that
+ * [-NSWindowDelegate windowDidResignKey:] is used here instead of
+ * [-NSApplicationDelegate applicationWillResignActive:] because it cannot
+ * detect that the window loses focus when the deck is clicked on macOS 13.2.1.
+ */
+- (void) windowDidResignKey: (NSNotification *)aNotification
{
- COCOA_DEBUG("QemuCocoaAppController: applicationWillResignActive\n");
+ COCOA_DEBUG("%s\n", __func__);
[cocoaView ungrabMouse];
[cocoaView raiseAllKeys];
}
diff --git a/ui/console.c b/ui/console.c
index 98b701f..6e8a3cd 100644
--- a/ui/console.c
+++ b/ui/console.c
@@ -94,6 +94,8 @@
uint32_t head;
QemuUIInfo ui_info;
QEMUTimer *ui_timer;
+ QEMUCursor *cursor;
+ int cursor_x, cursor_y, cursor_on;
const GraphicHwOps *hw_ops;
void *hw;
@@ -1661,6 +1663,12 @@
con = active_console;
}
displaychangelistener_display_console(dcl, con, dcl->con ? &error_fatal : NULL);
+ if (con && con->cursor && dcl->ops->dpy_cursor_define) {
+ dcl->ops->dpy_cursor_define(dcl, con->cursor);
+ }
+ if (con && dcl->ops->dpy_mouse_set) {
+ dcl->ops->dpy_mouse_set(dcl, con->cursor_x, con->cursor_y, con->cursor_on);
+ }
text_console_update_cursor(NULL);
}
@@ -1905,6 +1913,9 @@
DisplayState *s = con->ds;
DisplayChangeListener *dcl;
+ con->cursor_x = x;
+ con->cursor_y = y;
+ con->cursor_on = on;
if (!qemu_console_is_visible(con)) {
return;
}
@@ -1923,6 +1934,8 @@
DisplayState *s = con->ds;
DisplayChangeListener *dcl;
+ cursor_unref(con->cursor);
+ con->cursor = cursor_ref(cursor);
if (!qemu_console_is_visible(con)) {
return;
}
@@ -2288,6 +2301,14 @@
return NULL;
}
+QEMUCursor *qemu_console_get_cursor(QemuConsole *con)
+{
+ if (con == NULL) {
+ con = active_console;
+ }
+ return con->cursor;
+}
+
bool qemu_console_is_visible(QemuConsole *con)
{
return (con == active_console) || (con->dcls > 0);
diff --git a/ui/cursor.c b/ui/cursor.c
index 835f080..6fe6799 100644
--- a/ui/cursor.c
+++ b/ui/cursor.c
@@ -106,12 +106,13 @@
return c;
}
-void cursor_get(QEMUCursor *c)
+QEMUCursor *cursor_ref(QEMUCursor *c)
{
c->refcount++;
+ return c;
}
-void cursor_put(QEMUCursor *c)
+void cursor_unref(QEMUCursor *c)
{
if (c == NULL)
return;
diff --git a/ui/dbus-clipboard.c b/ui/dbus-clipboard.c
index df9a754..fe7fcde 100644
--- a/ui/dbus-clipboard.c
+++ b/ui/dbus-clipboard.c
@@ -204,15 +204,6 @@
g_clear_object(&dpy->clipboard_proxy);
}
-static void
-dbus_on_clipboard_proxy_name_owner_changed(
- DBusDisplay *dpy,
- GObject *object,
- GParamSpec *pspec)
-{
- dbus_clipboard_unregister_proxy(dpy);
-}
-
static gboolean
dbus_clipboard_register(
DBusDisplay *dpy,
@@ -220,6 +211,7 @@
{
g_autoptr(GError) err = NULL;
const char *name = NULL;
+ GDBusConnection *connection = g_dbus_method_invocation_get_connection(invocation);
if (dpy->clipboard_proxy) {
g_dbus_method_invocation_return_error(
@@ -232,7 +224,7 @@
dpy->clipboard_proxy =
qemu_dbus_display1_clipboard_proxy_new_sync(
- g_dbus_method_invocation_get_connection(invocation),
+ connection,
G_DBUS_PROXY_FLAGS_DO_NOT_AUTO_START,
g_dbus_method_invocation_get_sender(invocation),
"/org/qemu/Display1/Clipboard",
@@ -252,7 +244,11 @@
g_object_connect(dpy->clipboard_proxy,
"swapped-signal::notify::g-name-owner",
- dbus_on_clipboard_proxy_name_owner_changed, dpy,
+ dbus_clipboard_unregister_proxy, dpy,
+ NULL);
+ g_object_connect(connection,
+ "swapped-signal::closed",
+ dbus_clipboard_unregister_proxy, dpy,
NULL);
qemu_clipboard_reset_serial();
diff --git a/ui/dbus-console.c b/ui/dbus-console.c
index 0bfaa22..f77bc49 100644
--- a/ui/dbus-console.c
+++ b/ui/dbus-console.c
@@ -412,14 +412,20 @@
}
static void
+dbus_mouse_update_is_absolute(DBusDisplayConsole *ddc)
+{
+ g_object_set(ddc->iface_mouse,
+ "is-absolute", qemu_input_is_absolute(),
+ NULL);
+}
+
+static void
dbus_mouse_mode_change(Notifier *notify, void *data)
{
DBusDisplayConsole *ddc =
container_of(notify, DBusDisplayConsole, mouse_mode_notifier);
- g_object_set(ddc->iface_mouse,
- "is-absolute", qemu_input_is_absolute(),
- NULL);
+ dbus_mouse_update_is_absolute(ddc);
}
int dbus_display_console_get_index(DBusDisplayConsole *ddc)
@@ -492,6 +498,7 @@
register_displaychangelistener(&ddc->dcl);
ddc->mouse_mode_notifier.notify = dbus_mouse_mode_change;
qemu_add_mouse_mode_change_notifier(&ddc->mouse_mode_notifier);
+ dbus_mouse_update_is_absolute(ddc);
return ddc;
}
diff --git a/ui/dbus-listener.c b/ui/dbus-listener.c
index 57d4e40..911acdc 100644
--- a/ui/dbus-listener.c
+++ b/ui/dbus-listener.c
@@ -27,9 +27,11 @@
#include "dbus.h"
#include <gio/gunixfdlist.h>
+#ifdef CONFIG_OPENGL
#include "ui/shader.h"
#include "ui/egl-helpers.h"
#include "ui/egl-context.h"
+#endif
#include "trace.h"
struct _DBusDisplayListener {
@@ -48,6 +50,7 @@
G_DEFINE_TYPE(DBusDisplayListener, dbus_display_listener, G_TYPE_OBJECT)
+#ifdef CONFIG_GBM
static void dbus_update_gl_cb(GObject *source_object,
GAsyncResult *res,
gpointer user_data)
@@ -149,7 +152,7 @@
DBusDisplayListener *ddl = container_of(dcl, DBusDisplayListener, dcl);
DisplaySurface *ds;
GVariant *v_data = NULL;
- egl_fb cursor_fb;
+ egl_fb cursor_fb = EGL_FB_INIT;
if (!dmabuf) {
qemu_dbus_display1_listener_call_mouse_set(
@@ -229,12 +232,14 @@
ddl->gl_updates = 0;
}
}
+#endif
static void dbus_refresh(DisplayChangeListener *dcl)
{
graphic_hw_update(dcl->con);
}
+#ifdef CONFIG_GBM
static void dbus_gl_gfx_update(DisplayChangeListener *dcl,
int x, int y, int w, int h)
{
@@ -242,6 +247,7 @@
ddl->gl_updates++;
}
+#endif
static void dbus_gfx_update(DisplayChangeListener *dcl,
int x, int y, int w, int h)
@@ -296,6 +302,7 @@
DBUS_DEFAULT_TIMEOUT, NULL, NULL, NULL);
}
+#ifdef CONFIG_GBM
static void dbus_gl_gfx_switch(DisplayChangeListener *dcl,
struct DisplaySurface *new_surface)
{
@@ -311,6 +318,7 @@
width, height, 0, 0, width, height);
}
}
+#endif
static void dbus_gfx_switch(DisplayChangeListener *dcl,
struct DisplaySurface *new_surface)
@@ -339,14 +347,13 @@
DBusDisplayListener *ddl = container_of(dcl, DBusDisplayListener, dcl);
GVariant *v_data = NULL;
- cursor_get(c);
v_data = g_variant_new_from_data(
G_VARIANT_TYPE("ay"),
c->data,
c->width * c->height * 4,
TRUE,
- (GDestroyNotify)cursor_put,
- c);
+ (GDestroyNotify)cursor_unref,
+ cursor_ref(c));
qemu_dbus_display1_listener_call_cursor_define(
ddl->proxy,
@@ -362,6 +369,7 @@
NULL);
}
+#ifdef CONFIG_GBM
const DisplayChangeListenerOps dbus_gl_dcl_ops = {
.dpy_name = "dbus-gl",
.dpy_gfx_update = dbus_gl_gfx_update,
@@ -379,6 +387,7 @@
.dpy_gl_release_dmabuf = dbus_release_dmabuf,
.dpy_gl_update = dbus_scanout_update,
};
+#endif
const DisplayChangeListenerOps dbus_dcl_ops = {
.dpy_name = "dbus",
@@ -407,11 +416,12 @@
{
DBusDisplayListener *ddl = DBUS_DISPLAY_LISTENER(object);
+ ddl->dcl.ops = &dbus_dcl_ops;
+#ifdef CONFIG_GBM
if (display_opengl) {
ddl->dcl.ops = &dbus_gl_dcl_ops;
- } else {
- ddl->dcl.ops = &dbus_dcl_ops;
}
+#endif
G_OBJECT_CLASS(dbus_display_listener_parent_class)->constructed(object);
}
diff --git a/ui/dbus.c b/ui/dbus.c
index f2dcba0..b9e9698 100644
--- a/ui/dbus.c
+++ b/ui/dbus.c
@@ -30,8 +30,10 @@
#include "qom/object_interfaces.h"
#include "sysemu/sysemu.h"
#include "ui/dbus-module.h"
+#ifdef CONFIG_OPENGL
#include "ui/egl-helpers.h"
#include "ui/egl-context.h"
+#endif
#include "audio/audio.h"
#include "audio/audio_int.h"
#include "qapi/error.h"
@@ -41,11 +43,14 @@
static DBusDisplay *dbus_display;
+#ifdef CONFIG_OPENGL
static QEMUGLContext dbus_create_context(DisplayGLCtx *dgc,
QEMUGLParams *params)
{
+#ifdef CONFIG_GBM
eglMakeCurrent(qemu_egl_display, EGL_NO_SURFACE, EGL_NO_SURFACE,
qemu_egl_rn_ctx);
+#endif
return qemu_egl_create_context(dgc, params);
}
@@ -53,7 +58,11 @@
dbus_is_compatible_dcl(DisplayGLCtx *dgc,
DisplayChangeListener *dcl)
{
- return dcl->ops == &dbus_gl_dcl_ops || dcl->ops == &dbus_console_dcl_ops;
+ return
+#ifdef CONFIG_GBM
+ dcl->ops == &dbus_gl_dcl_ops ||
+#endif
+ dcl->ops == &dbus_console_dcl_ops;
}
static void
@@ -84,6 +93,7 @@
.dpy_gl_ctx_destroy_texture = dbus_destroy_texture,
.dpy_gl_ctx_update_texture = dbus_update_texture,
};
+#endif
static NotifierList dbus_display_notifiers =
NOTIFIER_LIST_INITIALIZER(dbus_display_notifiers);
@@ -112,10 +122,12 @@
DBusDisplay *dd = DBUS_DISPLAY(o);
g_autoptr(GDBusObjectSkeleton) vm = NULL;
+#ifdef CONFIG_OPENGL
dd->glctx.ops = &dbus_gl_ops;
if (display_opengl) {
dd->glctx.gls = qemu_gl_init_shader();
}
+#endif
dd->iface = qemu_dbus_display1_vm_skeleton_new();
dd->consoles = g_ptr_array_new_with_free_func(g_object_unref);
@@ -152,7 +164,9 @@
g_clear_object(&dd->iface);
g_free(dd->dbus_addr);
g_free(dd->audiodev);
+#ifdef CONFIG_OPENGL
g_clear_pointer(&dd->glctx.gls, qemu_gl_fini_shader);
+#endif
dbus_display = NULL;
}
@@ -220,7 +234,7 @@
dd->audiodev);
return;
}
- audio_state->drv->set_dbus_server(audio_state, dd->server);
+ audio_state->drv->set_dbus_server(audio_state, dd->server, dd->p2p);
}
consoles = g_array_new(FALSE, FALSE, sizeof(guint32));
@@ -290,11 +304,20 @@
g_cancellable_cancel(dbus_display->add_client_cancellable);
}
+#ifdef WIN32
+ socket = g_socket_new_from_fd(_get_osfhandle(csock), &err);
+#else
socket = g_socket_new_from_fd(csock, &err);
+#endif
if (!socket) {
error_setg(errp, "Failed to setup D-Bus socket: %s", err->message);
+ close(csock);
return false;
}
+#ifdef WIN32
+ /* socket owns the SOCKET handle now, so release our osf handle */
+ qemu_close_socket_osfhandle(csock);
+#endif
conn = g_socket_connection_factory_create_connection(socket);
@@ -451,12 +474,11 @@
DisplayGLMode mode = opts->has_gl ? opts->gl : DISPLAYGL_MODE_OFF;
if (mode != DISPLAYGL_MODE_OFF) {
- if (egl_rendernode_init(opts->u.dbus.rendernode, mode) < 0) {
- error_report("dbus: render node init failed");
- exit(1);
- }
-
- display_opengl = 1;
+#ifdef CONFIG_OPENGL
+ egl_init(opts->u.dbus.rendernode, mode, &error_fatal);
+#else
+ error_report("dbus: GL rendering is not supported");
+#endif
}
type_register(&dbus_vc_type_info);
diff --git a/ui/egl-headless.c b/ui/egl-headless.c
index ae07e91..ef70e6a 100644
--- a/ui/egl-headless.c
+++ b/ui/egl-headless.c
@@ -1,7 +1,7 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
#include "qemu/module.h"
-#include "sysemu/sysemu.h"
+#include "qapi/error.h"
#include "ui/console.h"
#include "ui/egl-helpers.h"
#include "ui/egl-context.h"
@@ -191,21 +191,21 @@
static void early_egl_headless_init(DisplayOptions *opts)
{
- display_opengl = 1;
+ DisplayGLMode mode = DISPLAYGL_MODE_ON;
+
+ if (opts->has_gl) {
+ mode = opts->gl;
+ }
+
+ egl_init(opts->u.egl_headless.rendernode, mode, &error_fatal);
}
static void egl_headless_init(DisplayState *ds, DisplayOptions *opts)
{
- DisplayGLMode mode = opts->has_gl ? opts->gl : DISPLAYGL_MODE_ON;
QemuConsole *con;
egl_dpy *edpy;
int idx;
- if (egl_rendernode_init(opts->u.egl_headless.rendernode, mode) < 0) {
- error_report("egl: render node init failed");
- exit(1);
- }
-
for (idx = 0;; idx++) {
DisplayGLCtx *ctx;
diff --git a/ui/egl-helpers.c b/ui/egl-helpers.c
index 3a88245..4203163 100644
--- a/ui/egl-helpers.c
+++ b/ui/egl-helpers.c
@@ -19,6 +19,8 @@
#include "qemu/error-report.h"
#include "ui/console.h"
#include "ui/egl-helpers.h"
+#include "sysemu/sysemu.h"
+#include "qapi/error.h"
EGLDisplay *qemu_egl_display;
EGLConfig qemu_egl_config;
@@ -26,6 +28,48 @@
/* ------------------------------------------------------------------ */
+#if defined(CONFIG_X11) || defined(CONFIG_GBM)
+static const char *egl_get_error_string(void)
+{
+ EGLint error = eglGetError();
+
+ switch (error) {
+ case EGL_SUCCESS:
+ return "EGL_SUCCESS";
+ case EGL_NOT_INITIALIZED:
+ return "EGL_NOT_INITIALIZED";
+ case EGL_BAD_ACCESS:
+ return "EGL_BAD_ACCESS";
+ case EGL_BAD_ALLOC:
+ return "EGL_BAD_ALLOC";
+ case EGL_BAD_ATTRIBUTE:
+ return "EGL_BAD_ATTRIBUTE";
+ case EGL_BAD_CONTEXT:
+ return "EGL_BAD_CONTEXT";
+ case EGL_BAD_CONFIG:
+ return "EGL_BAD_CONFIG";
+ case EGL_BAD_CURRENT_SURFACE:
+ return "EGL_BAD_CURRENT_SURFACE";
+ case EGL_BAD_DISPLAY:
+ return "EGL_BAD_DISPLAY";
+ case EGL_BAD_SURFACE:
+ return "EGL_BAD_SURFACE";
+ case EGL_BAD_MATCH:
+ return "EGL_BAD_MATCH";
+ case EGL_BAD_PARAMETER:
+ return "EGL_BAD_PARAMETER";
+ case EGL_BAD_NATIVE_PIXMAP:
+ return "EGL_BAD_NATIVE_PIXMAP";
+ case EGL_BAD_NATIVE_WINDOW:
+ return "EGL_BAD_NATIVE_WINDOW";
+ case EGL_CONTEXT_LOST:
+ return "EGL_CONTEXT_LOST";
+ default:
+ return "Unknown EGL error";
+ }
+}
+#endif
+
static void egl_fb_delete_texture(egl_fb *fb)
{
if (!fb->delete_texture) {
@@ -438,20 +482,20 @@
qemu_egl_display = qemu_egl_get_display(dpy, platform);
if (qemu_egl_display == EGL_NO_DISPLAY) {
- error_report("egl: eglGetDisplay failed");
+ error_report("egl: eglGetDisplay failed: %s", egl_get_error_string());
return -1;
}
b = eglInitialize(qemu_egl_display, &major, &minor);
if (b == EGL_FALSE) {
- error_report("egl: eglInitialize failed");
+ error_report("egl: eglInitialize failed: %s", egl_get_error_string());
return -1;
}
b = eglBindAPI(gles ? EGL_OPENGL_ES_API : EGL_OPENGL_API);
if (b == EGL_FALSE) {
- error_report("egl: eglBindAPI failed (%s mode)",
- gles ? "gles" : "core");
+ error_report("egl: eglBindAPI failed (%s mode): %s",
+ gles ? "gles" : "core", egl_get_error_string());
return -1;
}
@@ -459,8 +503,8 @@
gles ? conf_att_gles : conf_att_core,
&qemu_egl_config, 1, &n);
if (b == EGL_FALSE || n != 1) {
- error_report("egl: eglChooseConfig failed (%s mode)",
- gles ? "gles" : "core");
+ error_report("egl: eglChooseConfig failed (%s mode): %s",
+ gles ? "gles" : "core", egl_get_error_string());
return -1;
}
@@ -527,3 +571,25 @@
return ectx;
}
+
+bool egl_init(const char *rendernode, DisplayGLMode mode, Error **errp)
+{
+ ERRP_GUARD();
+
+ if (mode == DISPLAYGL_MODE_OFF) {
+ error_setg(errp, "egl: turning off GL doesn't make sense");
+ return false;
+ }
+
+#ifdef CONFIG_GBM
+ if (egl_rendernode_init(rendernode, mode) < 0) {
+ error_setg(errp, "egl: render node init failed");
+ return false;
+ }
+ display_opengl = 1;
+ return true;
+#else
+ error_setg(errp, "egl: not available on this platform");
+ return false;
+#endif
+}
diff --git a/ui/gtk.c b/ui/gtk.c
index fd82e9b..f16e0f8 100644
--- a/ui/gtk.c
+++ b/ui/gtk.c
@@ -450,7 +450,8 @@
GdkDisplay *dpy;
gint x_root, y_root;
- if (qemu_input_is_absolute()) {
+ if (!gtk_widget_get_realized(vc->gfx.drawing_area) ||
+ qemu_input_is_absolute()) {
return;
}
@@ -1783,7 +1784,9 @@
VCChardev *vcd = VC_CHARDEV(chr);
VirtualConsole *vc = vcd->console;
- gd_vc_send_chars(vc);
+ if (vc) {
+ gd_vc_send_chars(vc);
+ }
}
static void gd_vc_chr_set_echo(Chardev *chr, bool echo)
diff --git a/ui/keycodemapdb b/ui/keycodemapdb
index d21009b..f5772a6 160000
--- a/ui/keycodemapdb
+++ b/ui/keycodemapdb
@@ -1 +1 @@
-Subproject commit d21009b1c9f94b740ea66be8e48a1d8ad8124023
+Subproject commit f5772a62ec52591ff6870b7e8ef32482371f22c6
diff --git a/ui/meson.build b/ui/meson.build
index 612ea23..3303697 100644
--- a/ui/meson.build
+++ b/ui/meson.build
@@ -83,7 +83,9 @@
'--interface-prefix', 'org.qemu.',
'--c-namespace', 'QemuDBus',
'--generate-c-code', '@BASENAME@'])
- dbus_ss.add(when: [gio, pixman, opengl, gbm],
+ dbus_display1_lib = static_library('dbus-display1', dbus_display1, dependencies: gio)
+ dbus_display1_dep = declare_dependency(link_with: dbus_display1_lib, include_directories: include_directories('.'))
+ dbus_ss.add(when: [gio, pixman, dbus_display1_dep],
if_true: [files(
'dbus-chardev.c',
'dbus-clipboard.c',
@@ -91,7 +93,7 @@
'dbus-error.c',
'dbus-listener.c',
'dbus.c',
- ), dbus_display1])
+ ), opengl, gbm])
ui_modules += {'dbus' : dbus_ss}
endif
diff --git a/ui/sdl2.c b/ui/sdl2.c
index 8cb7741..b12dec4 100644
--- a/ui/sdl2.c
+++ b/ui/sdl2.c
@@ -58,6 +58,11 @@
#define SDL2_MAX_IDLE_COUNT (2 * GUI_REFRESH_INTERVAL_DEFAULT \
/ SDL2_REFRESH_INTERVAL_BUSY + 1)
+/* introduced in SDL 2.0.10 */
+#ifndef SDL_HINT_RENDER_BATCHING
+#define SDL_HINT_RENDER_BATCHING "SDL_RENDER_BATCHING"
+#endif
+
static void sdl_update_caption(struct sdl2_console *scon);
static struct sdl2_console *get_scon_from_window(uint32_t window_id)
@@ -99,9 +104,20 @@
surface_width(scon->surface),
surface_height(scon->surface),
flags);
- scon->real_renderer = SDL_CreateRenderer(scon->real_window, -1, 0);
if (scon->opengl) {
- scon->winctx = SDL_GL_GetCurrentContext();
+ const char *driver = "opengl";
+
+ if (scon->opts->gl == DISPLAYGL_MODE_ES) {
+ driver = "opengles2";
+ }
+
+ SDL_SetHint(SDL_HINT_RENDER_DRIVER, driver);
+ SDL_SetHint(SDL_HINT_RENDER_BATCHING, "1");
+ }
+ scon->real_renderer = SDL_CreateRenderer(scon->real_window, -1, 0);
+
+ if (scon->opengl) {
+ scon->winctx = SDL_GL_CreateContext(scon->real_window);
}
sdl_update_caption(scon);
}
@@ -112,6 +128,8 @@
return;
}
+ SDL_GL_DeleteContext(scon->winctx);
+ scon->winctx = NULL;
SDL_DestroyRenderer(scon->real_renderer);
scon->real_renderer = NULL;
SDL_DestroyWindow(scon->real_window);
@@ -825,21 +843,9 @@
assert(o->type == DISPLAY_TYPE_SDL);
-#ifdef __linux__
- /* on Linux, SDL may use fbcon|directfb|svgalib when run without
- * accessible $DISPLAY to open X11 window. This is often the case
- * when qemu is run using sudo. But in this case, and when actually
- * run in X11 environment, SDL fights with X11 for the video card,
- * making current display unavailable, often until reboot.
- * So make x11 the default SDL video driver if this variable is unset.
- * This is a bit hackish but saves us from bigger problem.
- * Maybe it's a good idea to fix this in SDL instead.
- */
- if (!g_setenv("SDL_VIDEODRIVER", "x11", 0)) {
- fprintf(stderr, "Could not set SDL_VIDEODRIVER environment variable\n");
- exit(1);
+ if (SDL_GetHintBoolean("QEMU_ENABLE_SDL_LOGGING", SDL_FALSE)) {
+ SDL_LogSetAllPriority(SDL_LOG_PRIORITY_VERBOSE);
}
-#endif
if (SDL_Init(SDL_INIT_VIDEO)) {
fprintf(stderr, "Could not initialize SDL(%s) - exiting\n",
diff --git a/ui/shader/texture-blit-flip.vert b/ui/shader/texture-blit-flip.vert
index ba081fa..f7a448d 100644
--- a/ui/shader/texture-blit-flip.vert
+++ b/ui/shader/texture-blit-flip.vert
@@ -1,4 +1,3 @@
-
#version 300 es
in vec2 in_position;
diff --git a/ui/shader/texture-blit.frag b/ui/shader/texture-blit.frag
index bfa202c..8ed95a4 100644
--- a/ui/shader/texture-blit.frag
+++ b/ui/shader/texture-blit.frag
@@ -1,4 +1,3 @@
-
#version 300 es
uniform sampler2D image;
diff --git a/ui/shader/texture-blit.vert b/ui/shader/texture-blit.vert
index 6fe2744..fb48d70 100644
--- a/ui/shader/texture-blit.vert
+++ b/ui/shader/texture-blit.vert
@@ -1,4 +1,3 @@
-
#version 300 es
in vec2 in_position;
diff --git a/ui/spice-core.c b/ui/spice-core.c
index 76f7c2b..67cfd3c 100644
--- a/ui/spice-core.c
+++ b/ui/spice-core.c
@@ -90,13 +90,23 @@
static void watch_read(void *opaque)
{
SpiceWatch *watch = opaque;
- watch->func(watch->fd, SPICE_WATCH_EVENT_READ, watch->opaque);
+ int fd = watch->fd;
+
+#ifdef WIN32
+ fd = _get_osfhandle(fd);
+#endif
+ watch->func(fd, SPICE_WATCH_EVENT_READ, watch->opaque);
}
static void watch_write(void *opaque)
{
SpiceWatch *watch = opaque;
- watch->func(watch->fd, SPICE_WATCH_EVENT_WRITE, watch->opaque);
+ int fd = watch->fd;
+
+#ifdef WIN32
+ fd = _get_osfhandle(fd);
+#endif
+ watch->func(fd, SPICE_WATCH_EVENT_WRITE, watch->opaque);
}
static void watch_update_mask(SpiceWatch *watch, int event_mask)
@@ -117,6 +127,14 @@
{
SpiceWatch *watch;
+#ifdef WIN32
+ fd = _open_osfhandle(fd, _O_BINARY);
+ if (fd < 0) {
+ error_setg_win32(&error_warn, WSAGetLastError(), "Couldn't associate a FD with the SOCKET");
+ return NULL;
+ }
+#endif
+
watch = g_malloc0(sizeof(*watch));
watch->fd = fd;
watch->func = func;
@@ -129,6 +147,10 @@
static void watch_remove(SpiceWatch *watch)
{
qemu_set_fd_handler(watch->fd, NULL, NULL, NULL);
+#ifdef WIN32
+ /* SOCKET is owned by spice */
+ qemu_close_to_socket(watch->fd);
+#endif
g_free(watch);
}
@@ -820,12 +842,7 @@
"incompatible with -spice port/tls-port");
exit(1);
}
- if (egl_rendernode_init(qemu_opt_get(opts, "rendernode"),
- DISPLAYGL_MODE_ON) != 0) {
- error_report("Failed to initialize EGL render node for SPICE GL");
- exit(1);
- }
- display_opengl = 1;
+ egl_init(qemu_opt_get(opts, "rendernode"), DISPLAYGL_MODE_ON, &error_fatal);
spice_opengl = 1;
}
#endif
@@ -913,6 +930,9 @@
static int qemu_spice_display_add_client(int csock, int skipauth, int tls)
{
+#ifdef WIN32
+ csock = qemu_close_socket_osfhandle(csock);
+#endif
if (tls) {
return spice_server_add_ssl_client(spice_server, csock, skipauth);
} else {
diff --git a/ui/spice-display.c b/ui/spice-display.c
index 16802f9..5bee19a 100644
--- a/ui/spice-display.c
+++ b/ui/spice-display.c
@@ -460,11 +460,11 @@
if (ssd->cursor) {
QEMUCursor *c = ssd->cursor;
assert(ssd->dcl.con);
- cursor_get(c);
+ cursor_ref(c);
qemu_mutex_unlock(&ssd->lock);
dpy_cursor_define(ssd->dcl.con, c);
qemu_mutex_lock(&ssd->lock);
- cursor_put(c);
+ cursor_unref(c);
}
if (ssd->mouse_x != -1 && ssd->mouse_y != -1) {
@@ -765,8 +765,8 @@
SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl);
qemu_mutex_lock(&ssd->lock);
- cursor_get(c);
- cursor_put(ssd->cursor);
+ cursor_ref(c);
+ cursor_unref(ssd->cursor);
ssd->cursor = c;
ssd->hot_x = c->hot_x;
ssd->hot_y = c->hot_y;
diff --git a/ui/vnc.c b/ui/vnc.c
index d9eacad..bbd8b6b 100644
--- a/ui/vnc.c
+++ b/ui/vnc.c
@@ -988,10 +988,10 @@
static int vnc_cursor_define(VncState *vs)
{
- QEMUCursor *c = vs->vd->cursor;
+ QEMUCursor *c = qemu_console_get_cursor(vs->vd->dcl.con);
int isize;
- if (!vs->vd->cursor) {
+ if (!c) {
return -1;
}
@@ -1029,11 +1029,7 @@
VncDisplay *vd = container_of(dcl, VncDisplay, dcl);
VncState *vs;
- cursor_put(vd->cursor);
g_free(vd->cursor_mask);
-
- vd->cursor = c;
- cursor_get(vd->cursor);
vd->cursor_msize = cursor_get_mono_bpl(c) * c->height;
vd->cursor_mask = g_malloc0(vd->cursor_msize);
cursor_get_mono_mask(c, 0, vd->cursor_mask);
diff --git a/ui/vnc.h b/ui/vnc.h
index a60fb13..757fa83 100644
--- a/ui/vnc.h
+++ b/ui/vnc.h
@@ -159,7 +159,6 @@
QKbdState *kbd;
QemuMutex mutex;
- QEMUCursor *cursor;
int cursor_msize;
uint8_t *cursor_mask;
diff --git a/util/aio-posix.c b/util/aio-posix.c
index 6cc6256..a8be940 100644
--- a/util/aio-posix.c
+++ b/util/aio-posix.c
@@ -180,9 +180,9 @@
}
}
-void aio_set_fd_poll(AioContext *ctx, int fd,
- IOHandler *io_poll_begin,
- IOHandler *io_poll_end)
+static void aio_set_fd_poll(AioContext *ctx, int fd,
+ IOHandler *io_poll_begin,
+ IOHandler *io_poll_end)
{
AioHandler *node = find_aio_handler(ctx, fd);
diff --git a/util/aio-win32.c b/util/aio-win32.c
index 80cfe01..6bded00 100644
--- a/util/aio-win32.c
+++ b/util/aio-win32.c
@@ -22,6 +22,7 @@
#include "qemu/sockets.h"
#include "qapi/error.h"
#include "qemu/rcu_queue.h"
+#include "qemu/error-report.h"
struct AioHandler {
EventNotifier *e;
@@ -70,13 +71,20 @@
IOHandler *io_poll_ready,
void *opaque)
{
- /* fd is a SOCKET in our case */
AioHandler *old_node;
AioHandler *node = NULL;
+ SOCKET s;
+
+ if (!fd_is_socket(fd)) {
+ error_report("fd=%d is not a socket, AIO implementation is missing", fd);
+ return;
+ }
+
+ s = _get_osfhandle(fd);
qemu_lockcnt_lock(&ctx->list_lock);
QLIST_FOREACH(old_node, &ctx->aio_handlers, node) {
- if (old_node->pfd.fd == fd && !old_node->deleted) {
+ if (old_node->pfd.fd == s && !old_node->deleted) {
break;
}
}
@@ -87,7 +95,7 @@
/* Alloc and insert if it's not already there */
node = g_new0(AioHandler, 1);
- node->pfd.fd = fd;
+ node->pfd.fd = s;
node->pfd.events = 0;
if (node->io_read) {
@@ -115,7 +123,7 @@
QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
event = event_notifier_get_handle(&ctx->notifier);
- WSAEventSelect(node->pfd.fd, event, bitmask);
+ qemu_socket_select(fd, event, bitmask, NULL);
}
if (old_node) {
aio_remove_fd_handler(ctx, old_node);
@@ -125,13 +133,6 @@
aio_notify(ctx);
}
-void aio_set_fd_poll(AioContext *ctx, int fd,
- IOHandler *io_poll_begin,
- IOHandler *io_poll_end)
-{
- /* Not implemented */
-}
-
void aio_set_event_notifier(AioContext *ctx,
EventNotifier *e,
bool is_external,
diff --git a/util/async.c b/util/async.c
index 0657b75..21016a1 100644
--- a/util/async.c
+++ b/util/async.c
@@ -74,14 +74,21 @@
unsigned old_flags;
/*
- * The memory barrier implicit in qatomic_fetch_or makes sure that:
- * 1. idle & any writes needed by the callback are done before the
- * locations are read in the aio_bh_poll.
- * 2. ctx is loaded before the callback has a chance to execute and bh
- * could be freed.
+ * Synchronizes with atomic_fetch_and() in aio_bh_dequeue(), ensuring that
+ * insertion starts after BH_PENDING is set.
*/
old_flags = qatomic_fetch_or(&bh->flags, BH_PENDING | new_flags);
+
if (!(old_flags & BH_PENDING)) {
+ /*
+ * At this point the bottom half becomes visible to aio_bh_poll().
+ * This insertion thus synchronizes with QSLIST_MOVE_ATOMIC in
+ * aio_bh_poll(), ensuring that:
+ * 1. any writes needed by the callback are visible from the callback
+ * after aio_bh_dequeue() returns bh.
+ * 2. ctx is loaded before the callback has a chance to execute and bh
+ * could be freed.
+ */
QSLIST_INSERT_HEAD_ATOMIC(&ctx->bh_list, bh, next);
}
@@ -107,11 +114,8 @@
QSLIST_REMOVE_HEAD(head, next);
/*
- * The qatomic_and is paired with aio_bh_enqueue(). The implicit memory
- * barrier ensures that the callback sees all writes done by the scheduling
- * thread. It also ensures that the scheduling thread sees the cleared
- * flag before bh->cb has run, and thus will call aio_notify again if
- * necessary.
+ * Synchronizes with qatomic_fetch_or() in aio_bh_enqueue(), ensuring that
+ * the removal finishes before BH_PENDING is reset.
*/
*flags = qatomic_fetch_and(&bh->flags,
~(BH_PENDING | BH_SCHEDULED | BH_IDLE));
@@ -158,6 +162,7 @@
BHListSlice *s;
int ret = 0;
+ /* Synchronizes with QSLIST_INSERT_HEAD_ATOMIC in aio_bh_enqueue(). */
QSLIST_MOVE_ATOMIC(&slice.bh_list, &ctx->bh_list);
QSIMPLEQ_INSERT_TAIL(&ctx->bh_slice_list, &slice, next);
@@ -448,15 +453,15 @@
void aio_notify(AioContext *ctx)
{
/*
- * Write e.g. bh->flags before writing ctx->notified. Pairs with smp_mb in
- * aio_notify_accept.
+ * Write e.g. ctx->bh_list before writing ctx->notified. Pairs with
+ * smp_mb() in aio_notify_accept().
*/
smp_wmb();
qatomic_set(&ctx->notified, true);
/*
- * Write ctx->notified before reading ctx->notify_me. Pairs
- * with smp_mb in aio_ctx_prepare or aio_poll.
+ * Write ctx->notified (and also ctx->bh_list) before reading ctx->notify_me.
+ * Pairs with smp_mb() in aio_ctx_prepare or aio_poll.
*/
smp_mb();
if (qatomic_read(&ctx->notify_me)) {
@@ -469,8 +474,9 @@
qatomic_set(&ctx->notified, false);
/*
- * Write ctx->notified before reading e.g. bh->flags. Pairs with smp_wmb
- * in aio_notify.
+ * Order reads of ctx->notified (in aio_context_notifier_poll()) and the
+ * above clearing of ctx->notified before reads of e.g. bh->flags. Pairs
+ * with smp_wmb() in aio_notify.
*/
smp_mb();
}
@@ -493,6 +499,11 @@
EventNotifier *e = opaque;
AioContext *ctx = container_of(e, AioContext, notifier);
+ /*
+ * No need for load-acquire because we just want to kick the
+ * event loop. aio_notify_accept() takes care of synchronizing
+ * the event loop with the producers.
+ */
return qatomic_read(&ctx->notified);
}
diff --git a/util/bitops.c b/util/bitops.c
index 3fe6b1c..4b647b3 100644
--- a/util/bitops.c
+++ b/util/bitops.c
@@ -71,8 +71,8 @@
found_first:
tmp &= (~0UL >> (BITS_PER_LONG - size));
- if (tmp == 0UL) { /* Are any bits set? */
- return result + size; /* Nope. */
+ if (tmp == 0UL) { /* Are any bits set? */
+ return result + size; /* Nope. */
}
found_middle:
return result + ctzl(tmp);
@@ -120,8 +120,8 @@
found_first:
tmp |= ~0UL << size;
- if (tmp == ~0UL) { /* Are any bits zero? */
- return result + size; /* Nope. */
+ if (tmp == ~0UL) { /* Are any bits zero? */
+ return result + size; /* Nope. */
}
found_middle:
return result + ctzl(~tmp);
diff --git a/util/envlist.c b/util/envlist.c
index ab55534..db937c0 100644
--- a/util/envlist.c
+++ b/util/envlist.c
@@ -3,13 +3,13 @@
#include "qemu/envlist.h"
struct envlist_entry {
- const char *ev_var; /* actual env value */
- QLIST_ENTRY(envlist_entry) ev_link;
+ const char *ev_var; /* actual env value */
+ QLIST_ENTRY(envlist_entry) ev_link;
};
struct envlist {
- QLIST_HEAD(, envlist_entry) el_entries; /* actual entries */
- size_t el_count; /* number of entries */
+ QLIST_HEAD(, envlist_entry) el_entries; /* actual entries */
+ size_t el_count; /* number of entries */
};
static int envlist_parse(envlist_t *envlist,
@@ -21,14 +21,14 @@
envlist_t *
envlist_create(void)
{
- envlist_t *envlist;
+ envlist_t *envlist;
- envlist = g_malloc(sizeof(*envlist));
+ envlist = g_malloc(sizeof(*envlist));
- QLIST_INIT(&envlist->el_entries);
- envlist->el_count = 0;
+ QLIST_INIT(&envlist->el_entries);
+ envlist->el_count = 0;
- return (envlist);
+ return (envlist);
}
/*
@@ -37,18 +37,18 @@
void
envlist_free(envlist_t *envlist)
{
- struct envlist_entry *entry;
+ struct envlist_entry *entry;
- assert(envlist != NULL);
+ assert(envlist != NULL);
- while (envlist->el_entries.lh_first != NULL) {
- entry = envlist->el_entries.lh_first;
- QLIST_REMOVE(entry, ev_link);
+ while (envlist->el_entries.lh_first != NULL) {
+ entry = envlist->el_entries.lh_first;
+ QLIST_REMOVE(entry, ev_link);
- g_free((char *)entry->ev_var);
- g_free(entry);
- }
- g_free(envlist);
+ g_free((char *)entry->ev_var);
+ g_free(entry);
+ }
+ g_free(envlist);
}
/*
@@ -65,7 +65,7 @@
int
envlist_parse_set(envlist_t *envlist, const char *env)
{
- return (envlist_parse(envlist, env, &envlist_setenv));
+ return (envlist_parse(envlist, env, &envlist_setenv));
}
/*
@@ -77,7 +77,7 @@
int
envlist_parse_unset(envlist_t *envlist, const char *env)
{
- return (envlist_parse(envlist, env, &envlist_unsetenv));
+ return (envlist_parse(envlist, env, &envlist_unsetenv));
}
/*
@@ -90,15 +90,15 @@
envlist_parse(envlist_t *envlist, const char *env,
int (*callback)(envlist_t *, const char *))
{
- char *tmpenv, *envvar;
- char *envsave = NULL;
+ char *tmpenv, *envvar;
+ char *envsave = NULL;
int ret = 0;
assert(callback != NULL);
- if ((envlist == NULL) || (env == NULL))
- return (EINVAL);
+ if ((envlist == NULL) || (env == NULL))
+ return (EINVAL);
- tmpenv = g_strdup(env);
+ tmpenv = g_strdup(env);
envsave = tmpenv;
do {
@@ -109,7 +109,7 @@
if ((*callback)(envlist, tmpenv) != 0) {
ret = errno;
break;
- }
+ }
tmpenv = envvar + 1;
} while (envvar != NULL);
@@ -126,42 +126,42 @@
int
envlist_setenv(envlist_t *envlist, const char *env)
{
- struct envlist_entry *entry = NULL;
- const char *eq_sign;
- size_t envname_len;
+ struct envlist_entry *entry = NULL;
+ const char *eq_sign;
+ size_t envname_len;
- if ((envlist == NULL) || (env == NULL))
- return (EINVAL);
+ if ((envlist == NULL) || (env == NULL))
+ return (EINVAL);
- /* find out first equals sign in given env */
- if ((eq_sign = strchr(env, '=')) == NULL)
- return (EINVAL);
- envname_len = eq_sign - env + 1;
+ /* find out first equals sign in given env */
+ if ((eq_sign = strchr(env, '=')) == NULL)
+ return (EINVAL);
+ envname_len = eq_sign - env + 1;
- /*
- * If there already exists variable with given name
- * we remove and release it before allocating a whole
- * new entry.
- */
- for (entry = envlist->el_entries.lh_first; entry != NULL;
- entry = entry->ev_link.le_next) {
- if (strncmp(entry->ev_var, env, envname_len) == 0)
- break;
- }
+ /*
+ * If there already exists variable with given name
+ * we remove and release it before allocating a whole
+ * new entry.
+ */
+ for (entry = envlist->el_entries.lh_first; entry != NULL;
+ entry = entry->ev_link.le_next) {
+ if (strncmp(entry->ev_var, env, envname_len) == 0)
+ break;
+ }
- if (entry != NULL) {
- QLIST_REMOVE(entry, ev_link);
- g_free((char *)entry->ev_var);
- g_free(entry);
- } else {
- envlist->el_count++;
- }
+ if (entry != NULL) {
+ QLIST_REMOVE(entry, ev_link);
+ g_free((char *)entry->ev_var);
+ g_free(entry);
+ } else {
+ envlist->el_count++;
+ }
- entry = g_malloc(sizeof(*entry));
- entry->ev_var = g_strdup(env);
- QLIST_INSERT_HEAD(&envlist->el_entries, entry, ev_link);
+ entry = g_malloc(sizeof(*entry));
+ entry->ev_var = g_strdup(env);
+ QLIST_INSERT_HEAD(&envlist->el_entries, entry, ev_link);
- return (0);
+ return (0);
}
/*
@@ -171,34 +171,34 @@
int
envlist_unsetenv(envlist_t *envlist, const char *env)
{
- struct envlist_entry *entry;
- size_t envname_len;
+ struct envlist_entry *entry;
+ size_t envname_len;
- if ((envlist == NULL) || (env == NULL))
- return (EINVAL);
+ if ((envlist == NULL) || (env == NULL))
+ return (EINVAL);
- /* env is not allowed to contain '=' */
- if (strchr(env, '=') != NULL)
- return (EINVAL);
+ /* env is not allowed to contain '=' */
+ if (strchr(env, '=') != NULL)
+ return (EINVAL);
- /*
- * Find out the requested entry and remove
- * it from the list.
- */
- envname_len = strlen(env);
- for (entry = envlist->el_entries.lh_first; entry != NULL;
- entry = entry->ev_link.le_next) {
- if (strncmp(entry->ev_var, env, envname_len) == 0)
- break;
- }
- if (entry != NULL) {
- QLIST_REMOVE(entry, ev_link);
- g_free((char *)entry->ev_var);
- g_free(entry);
+ /*
+ * Find out the requested entry and remove
+ * it from the list.
+ */
+ envname_len = strlen(env);
+ for (entry = envlist->el_entries.lh_first; entry != NULL;
+ entry = entry->ev_link.le_next) {
+ if (strncmp(entry->ev_var, env, envname_len) == 0)
+ break;
+ }
+ if (entry != NULL) {
+ QLIST_REMOVE(entry, ev_link);
+ g_free((char *)entry->ev_var);
+ g_free(entry);
- envlist->el_count--;
- }
- return (0);
+ envlist->el_count--;
+ }
+ return (0);
}
/*
@@ -214,19 +214,19 @@
char **
envlist_to_environ(const envlist_t *envlist, size_t *count)
{
- struct envlist_entry *entry;
- char **env, **penv;
+ struct envlist_entry *entry;
+ char **env, **penv;
- penv = env = g_new(char *, envlist->el_count + 1);
+ penv = env = g_new(char *, envlist->el_count + 1);
- for (entry = envlist->el_entries.lh_first; entry != NULL;
- entry = entry->ev_link.le_next) {
- *(penv++) = g_strdup(entry->ev_var);
- }
- *penv = NULL; /* NULL terminate the list */
+ for (entry = envlist->el_entries.lh_first; entry != NULL;
+ entry = entry->ev_link.le_next) {
+ *(penv++) = g_strdup(entry->ev_var);
+ }
+ *penv = NULL; /* NULL terminate the list */
- if (count != NULL)
- *count = envlist->el_count;
+ if (count != NULL)
+ *count = envlist->el_count;
- return (env);
+ return (env);
}
diff --git a/util/error.c b/util/error.c
index 1e7af66..5537245 100644
--- a/util/error.c
+++ b/util/error.c
@@ -27,8 +27,9 @@
Error *error_abort;
Error *error_fatal;
+Error *error_warn;
-static void error_handle_fatal(Error **errp, Error *err)
+static void error_handle(Error **errp, Error *err)
{
if (errp == &error_abort) {
fprintf(stderr, "Unexpected error in %s() at %s:%d:\n",
@@ -43,6 +44,9 @@
error_report_err(err);
exit(1);
}
+ if (errp == &error_warn) {
+ warn_report_err(err);
+ }
}
G_GNUC_PRINTF(6, 0)
@@ -71,7 +75,7 @@
err->line = line;
err->func = func;
- error_handle_fatal(errp, err);
+ error_handle(errp, err);
*errp = err;
errno = saved_errno;
@@ -284,7 +288,7 @@
if (!local_err) {
return;
}
- error_handle_fatal(dst_errp, local_err);
+ error_handle(dst_errp, local_err);
if (dst_errp && !*dst_errp) {
*dst_errp = local_err;
} else {
diff --git a/util/log.c b/util/log.c
index 7837ff9..53b4f6c 100644
--- a/util/log.c
+++ b/util/log.c
@@ -489,7 +489,7 @@
"do not chain compiled TBs so that \"exec\" and \"cpu\" show\n"
"complete traces" },
#ifdef CONFIG_PLUGIN
- { CPU_LOG_PLUGIN, "plugin", "output from TCG plugins\n"},
+ { CPU_LOG_PLUGIN, "plugin", "output from TCG plugins"},
#endif
{ LOG_STRACE, "strace",
"log every user-mode syscall, its input, and its result" },
diff --git a/util/main-loop.c b/util/main-loop.c
index 3c0f525..e180c85 100644
--- a/util/main-loop.c
+++ b/util/main-loop.c
@@ -252,10 +252,6 @@
static int glib_pollfds_idx;
static int glib_n_poll_fds;
-void qemu_fd_register(int fd)
-{
-}
-
static void glib_pollfds_fill(int64_t *cur_timeout)
{
GMainContext *context = g_main_context_default();
@@ -414,13 +410,6 @@
}
}
-void qemu_fd_register(int fd)
-{
- WSAEventSelect(fd, event_notifier_get_handle(&qemu_aio_context->notifier),
- FD_READ | FD_ACCEPT | FD_CLOSE |
- FD_CONNECT | FD_WRITE | FD_OOB);
-}
-
static int pollfds_fill(GArray *pollfds, fd_set *rfds, fd_set *wfds,
fd_set *xfds)
{
diff --git a/util/oslib-posix.c b/util/oslib-posix.c
index 77d882e..760390b 100644
--- a/util/oslib-posix.c
+++ b/util/oslib-posix.c
@@ -583,76 +583,6 @@
}
-pid_t qemu_fork(Error **errp)
-{
- sigset_t oldmask, newmask;
- struct sigaction sig_action;
- int saved_errno;
- pid_t pid;
-
- /*
- * Need to block signals now, so that child process can safely
- * kill off caller's signal handlers without a race.
- */
- sigfillset(&newmask);
- if (pthread_sigmask(SIG_SETMASK, &newmask, &oldmask) != 0) {
- error_setg_errno(errp, errno,
- "cannot block signals");
- return -1;
- }
-
- pid = fork();
- saved_errno = errno;
-
- if (pid < 0) {
- /* attempt to restore signal mask, but ignore failure, to
- * avoid obscuring the fork failure */
- (void)pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
- error_setg_errno(errp, saved_errno,
- "cannot fork child process");
- errno = saved_errno;
- return -1;
- } else if (pid) {
- /* parent process */
-
- /* Restore our original signal mask now that the child is
- * safely running. Only documented failures are EFAULT (not
- * possible, since we are using just-grabbed mask) or EINVAL
- * (not possible, since we are using correct arguments). */
- (void)pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
- } else {
- /* child process */
- size_t i;
-
- /* Clear out all signal handlers from parent so nothing
- * unexpected can happen in our child once we unblock
- * signals */
- sig_action.sa_handler = SIG_DFL;
- sig_action.sa_flags = 0;
- sigemptyset(&sig_action.sa_mask);
-
- for (i = 1; i < NSIG; i++) {
- /* Only possible errors are EFAULT or EINVAL The former
- * won't happen, the latter we expect, so no need to check
- * return value */
- (void)sigaction(i, &sig_action, NULL);
- }
-
- /* Unmask all signals in child, since we've no idea what the
- * caller's done with their signal mask and don't want to
- * propagate that to children */
- sigemptyset(&newmask);
- if (pthread_sigmask(SIG_SETMASK, &newmask, NULL) != 0) {
- Error *local_err = NULL;
- error_setg_errno(&local_err, errno,
- "cannot unblock signals");
- error_report_err(local_err);
- _exit(1);
- }
- }
- return pid;
-}
-
void *qemu_alloc_stack(size_t *sz)
{
void *ptr, *guardpage;
diff --git a/util/oslib-win32.c b/util/oslib-win32.c
index 07ade41..a986387 100644
--- a/util/oslib-win32.c
+++ b/util/oslib-win32.c
@@ -180,7 +180,7 @@
void qemu_socket_set_block(int fd)
{
unsigned long opt = 0;
- WSAEventSelect(fd, NULL, 0);
+ qemu_socket_unselect(fd, NULL);
ioctlsocket(fd, FIONBIO, &opt);
}
@@ -283,21 +283,155 @@
}
-pid_t qemu_fork(Error **errp)
+bool qemu_socket_select(int sockfd, WSAEVENT hEventObject,
+ long lNetworkEvents, Error **errp)
{
- errno = ENOSYS;
- error_setg_errno(errp, errno,
- "cannot fork child process");
- return -1;
+ SOCKET s = _get_osfhandle(sockfd);
+
+ if (errp == NULL) {
+ errp = &error_warn;
+ }
+
+ if (s == INVALID_SOCKET) {
+ error_setg(errp, "invalid socket fd=%d", sockfd);
+ return false;
+ }
+
+ if (WSAEventSelect(s, hEventObject, lNetworkEvents) != 0) {
+ error_setg_win32(errp, WSAGetLastError(), "failed to WSAEventSelect()");
+ return false;
+ }
+
+ return true;
}
+bool qemu_socket_unselect(int sockfd, Error **errp)
+{
+ return qemu_socket_select(sockfd, NULL, 0, errp);
+}
+
+int qemu_socketpair(int domain, int type, int protocol, int sv[2])
+{
+ struct sockaddr_un addr = {
+ 0,
+ };
+ socklen_t socklen;
+ int listener = -1;
+ int client = -1;
+ int server = -1;
+ g_autofree char *path = NULL;
+ int tmpfd;
+ u_long arg;
+ int ret = -1;
+
+ g_return_val_if_fail(sv != NULL, -1);
+
+ addr.sun_family = AF_UNIX;
+ socklen = sizeof(addr);
+
+ tmpfd = g_file_open_tmp(NULL, &path, NULL);
+ if (tmpfd == -1 || !path) {
+ errno = EACCES;
+ goto out;
+ }
+
+ close(tmpfd);
+
+ if (strlen(path) >= sizeof(addr.sun_path)) {
+ errno = EINVAL;
+ goto out;
+ }
+
+ strncpy(addr.sun_path, path, sizeof(addr.sun_path) - 1);
+
+ listener = socket(domain, type, protocol);
+ if (listener == -1) {
+ goto out;
+ }
+
+ if (DeleteFile(path) == 0 && GetLastError() != ERROR_FILE_NOT_FOUND) {
+ errno = EACCES;
+ goto out;
+ }
+ g_clear_pointer(&path, g_free);
+
+ if (bind(listener, (struct sockaddr *)&addr, socklen) == -1) {
+ goto out;
+ }
+
+ if (listen(listener, 1) == -1) {
+ goto out;
+ }
+
+ client = socket(domain, type, protocol);
+ if (client == -1) {
+ goto out;
+ }
+
+ arg = 1;
+ if (ioctlsocket(client, FIONBIO, &arg) != NO_ERROR) {
+ goto out;
+ }
+
+ if (connect(client, (struct sockaddr *)&addr, socklen) == -1 &&
+ WSAGetLastError() != WSAEWOULDBLOCK) {
+ goto out;
+ }
+
+ server = accept(listener, NULL, NULL);
+ if (server == -1) {
+ goto out;
+ }
+
+ arg = 0;
+ if (ioctlsocket(client, FIONBIO, &arg) != NO_ERROR) {
+ goto out;
+ }
+
+ arg = 0;
+ if (ioctlsocket(client, SIO_AF_UNIX_GETPEERPID, &arg) != NO_ERROR) {
+ goto out;
+ }
+
+ if (arg != GetCurrentProcessId()) {
+ errno = EPERM;
+ goto out;
+ }
+
+ sv[0] = server;
+ server = -1;
+ sv[1] = client;
+ client = -1;
+ ret = 0;
+
+out:
+ if (listener != -1) {
+ close(listener);
+ }
+ if (client != -1) {
+ close(client);
+ }
+ if (server != -1) {
+ close(server);
+ }
+ if (path) {
+ DeleteFile(path);
+ }
+ return ret;
+}
#undef connect
int qemu_connect_wrap(int sockfd, const struct sockaddr *addr,
socklen_t addrlen)
{
int ret;
- ret = connect(sockfd, addr, addrlen);
+ SOCKET s = _get_osfhandle(sockfd);
+
+ if (s == INVALID_SOCKET) {
+ return -1;
+ }
+
+ ret = connect(s, addr, addrlen);
if (ret < 0) {
if (WSAGetLastError() == WSAEWOULDBLOCK) {
errno = EINPROGRESS;
@@ -313,7 +447,13 @@
int qemu_listen_wrap(int sockfd, int backlog)
{
int ret;
- ret = listen(sockfd, backlog);
+ SOCKET s = _get_osfhandle(sockfd);
+
+ if (s == INVALID_SOCKET) {
+ return -1;
+ }
+
+ ret = listen(s, backlog);
if (ret < 0) {
errno = socket_error();
}
@@ -326,23 +466,101 @@
socklen_t addrlen)
{
int ret;
- ret = bind(sockfd, addr, addrlen);
+ SOCKET s = _get_osfhandle(sockfd);
+
+ if (s == INVALID_SOCKET) {
+ return -1;
+ }
+
+ ret = bind(s, addr, addrlen);
if (ret < 0) {
errno = socket_error();
}
return ret;
}
+#undef close
+int qemu_close_socket_osfhandle(int fd)
+{
+ SOCKET s = _get_osfhandle(fd);
+ DWORD flags = 0;
+
+ /*
+ * If we were to just call _close on the descriptor, it would close the
+ * HANDLE, but it wouldn't free any of the resources associated to the
+ * SOCKET, and we can't call _close after calling closesocket, because
+ * closesocket has already closed the HANDLE, and _close would attempt to
+ * close the HANDLE again, resulting in a double free. We can however
+ * protect the HANDLE from actually being closed long enough to close the
+ * file descriptor, then close the socket itself.
+ */
+ if (!GetHandleInformation((HANDLE)s, &flags)) {
+ errno = EACCES;
+ return -1;
+ }
+
+ if (!SetHandleInformation((HANDLE)s, HANDLE_FLAG_PROTECT_FROM_CLOSE, HANDLE_FLAG_PROTECT_FROM_CLOSE)) {
+ errno = EACCES;
+ return -1;
+ }
+
+ /*
+ * close() returns EBADF since we PROTECT_FROM_CLOSE the underlying handle,
+ * but the FD is actually freed
+ */
+ if (close(fd) < 0 && errno != EBADF) {
+ return -1;
+ }
+
+ if (!SetHandleInformation((HANDLE)s, flags, flags)) {
+ errno = EACCES;
+ return -1;
+ }
+
+ return 0;
+}
+
+int qemu_close_wrap(int fd)
+{
+ SOCKET s = INVALID_SOCKET;
+ int ret = -1;
+
+ if (!fd_is_socket(fd)) {
+ return close(fd);
+ }
+
+ s = _get_osfhandle(fd);
+ qemu_close_socket_osfhandle(fd);
+
+ ret = closesocket(s);
+ if (ret < 0) {
+ errno = socket_error();
+ }
+
+ return ret;
+}
+
#undef socket
int qemu_socket_wrap(int domain, int type, int protocol)
{
- int ret;
- ret = socket(domain, type, protocol);
- if (ret < 0) {
+ SOCKET s;
+ int fd;
+
+ s = socket(domain, type, protocol);
+ if (s == -1) {
errno = socket_error();
+ return -1;
}
- return ret;
+
+ fd = _open_osfhandle(s, _O_BINARY);
+ if (fd < 0) {
+ closesocket(s);
+ /* _open_osfhandle may not set errno, and closesocket() may override it */
+ errno = ENOMEM;
+ }
+
+ return fd;
}
@@ -350,12 +568,27 @@
int qemu_accept_wrap(int sockfd, struct sockaddr *addr,
socklen_t *addrlen)
{
- int ret;
- ret = accept(sockfd, addr, addrlen);
- if (ret < 0) {
- errno = socket_error();
+ int fd;
+ SOCKET s = _get_osfhandle(sockfd);
+
+ if (s == INVALID_SOCKET) {
+ return -1;
}
- return ret;
+
+ s = accept(s, addr, addrlen);
+ if (s == -1) {
+ errno = socket_error();
+ return -1;
+ }
+
+ fd = _open_osfhandle(s, _O_BINARY);
+ if (fd < 0) {
+ closesocket(s);
+ /* _open_osfhandle may not set errno, and closesocket() may override it */
+ errno = ENOMEM;
+ }
+
+ return fd;
}
@@ -363,7 +596,13 @@
int qemu_shutdown_wrap(int sockfd, int how)
{
int ret;
- ret = shutdown(sockfd, how);
+ SOCKET s = _get_osfhandle(sockfd);
+
+ if (s == INVALID_SOCKET) {
+ return -1;
+ }
+
+ ret = shutdown(s, how);
if (ret < 0) {
errno = socket_error();
}
@@ -375,19 +614,13 @@
int qemu_ioctlsocket_wrap(int fd, int req, void *val)
{
int ret;
- ret = ioctlsocket(fd, req, val);
- if (ret < 0) {
- errno = socket_error();
+ SOCKET s = _get_osfhandle(fd);
+
+ if (s == INVALID_SOCKET) {
+ return -1;
}
- return ret;
-}
-
-#undef closesocket
-int qemu_closesocket_wrap(int fd)
-{
- int ret;
- ret = closesocket(fd);
+ ret = ioctlsocket(s, req, val);
if (ret < 0) {
errno = socket_error();
}
@@ -400,7 +633,13 @@
void *optval, socklen_t *optlen)
{
int ret;
- ret = getsockopt(sockfd, level, optname, optval, optlen);
+ SOCKET s = _get_osfhandle(sockfd);
+
+ if (s == INVALID_SOCKET) {
+ return -1;
+ }
+
+ ret = getsockopt(s, level, optname, optval, optlen);
if (ret < 0) {
errno = socket_error();
}
@@ -413,7 +652,13 @@
const void *optval, socklen_t optlen)
{
int ret;
- ret = setsockopt(sockfd, level, optname, optval, optlen);
+ SOCKET s = _get_osfhandle(sockfd);
+
+ if (s == INVALID_SOCKET) {
+ return -1;
+ }
+
+ ret = setsockopt(s, level, optname, optval, optlen);
if (ret < 0) {
errno = socket_error();
}
@@ -426,7 +671,13 @@
socklen_t *addrlen)
{
int ret;
- ret = getpeername(sockfd, addr, addrlen);
+ SOCKET s = _get_osfhandle(sockfd);
+
+ if (s == INVALID_SOCKET) {
+ return -1;
+ }
+
+ ret = getpeername(s, addr, addrlen);
if (ret < 0) {
errno = socket_error();
}
@@ -439,7 +690,13 @@
socklen_t *addrlen)
{
int ret;
- ret = getsockname(sockfd, addr, addrlen);
+ SOCKET s = _get_osfhandle(sockfd);
+
+ if (s == INVALID_SOCKET) {
+ return -1;
+ }
+
+ ret = getsockname(s, addr, addrlen);
if (ret < 0) {
errno = socket_error();
}
@@ -451,7 +708,13 @@
ssize_t qemu_send_wrap(int sockfd, const void *buf, size_t len, int flags)
{
int ret;
- ret = send(sockfd, buf, len, flags);
+ SOCKET s = _get_osfhandle(sockfd);
+
+ if (s == INVALID_SOCKET) {
+ return -1;
+ }
+
+ ret = send(s, buf, len, flags);
if (ret < 0) {
errno = socket_error();
}
@@ -464,7 +727,13 @@
const struct sockaddr *addr, socklen_t addrlen)
{
int ret;
- ret = sendto(sockfd, buf, len, flags, addr, addrlen);
+ SOCKET s = _get_osfhandle(sockfd);
+
+ if (s == INVALID_SOCKET) {
+ return -1;
+ }
+
+ ret = sendto(s, buf, len, flags, addr, addrlen);
if (ret < 0) {
errno = socket_error();
}
@@ -476,7 +745,13 @@
ssize_t qemu_recv_wrap(int sockfd, void *buf, size_t len, int flags)
{
int ret;
- ret = recv(sockfd, buf, len, flags);
+ SOCKET s = _get_osfhandle(sockfd);
+
+ if (s == INVALID_SOCKET) {
+ return -1;
+ }
+
+ ret = recv(s, buf, len, flags);
if (ret < 0) {
errno = socket_error();
}
@@ -489,7 +764,13 @@
struct sockaddr *addr, socklen_t *addrlen)
{
int ret;
- ret = recvfrom(sockfd, buf, len, flags, addr, addrlen);
+ SOCKET s = _get_osfhandle(sockfd);
+
+ if (s == INVALID_SOCKET) {
+ return -1;
+ }
+
+ ret = recvfrom(s, buf, len, flags, addr, addrlen);
if (ret < 0) {
errno = socket_error();
}
diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c
index 58f3f77..84a50a9 100644
--- a/util/qemu-coroutine-lock.c
+++ b/util/qemu-coroutine-lock.c
@@ -201,10 +201,16 @@
trace_qemu_co_mutex_lock_entry(mutex, self);
push_waiter(mutex, &w);
+ /*
+ * Add waiter before reading mutex->handoff. Pairs with qatomic_mb_set
+ * in qemu_co_mutex_unlock.
+ */
+ smp_mb__after_rmw();
+
/* This is the "Responsibility Hand-Off" protocol; a lock() picks from
* a concurrent unlock() the responsibility of waking somebody up.
*/
- old_handoff = qatomic_mb_read(&mutex->handoff);
+ old_handoff = qatomic_read(&mutex->handoff);
if (old_handoff &&
has_waiters(mutex) &&
qatomic_cmpxchg(&mutex->handoff, old_handoff, 0) == old_handoff) {
@@ -303,6 +309,7 @@
}
our_handoff = mutex->sequence;
+ /* Set handoff before checking for waiters. */
qatomic_mb_set(&mutex->handoff, our_handoff);
if (!has_waiters(mutex)) {
/* The concurrent lock has not added itself yet, so it
diff --git a/util/qemu-sockets.c b/util/qemu-sockets.c
index 6538859..c06a4dc 100644
--- a/util/qemu-sockets.c
+++ b/util/qemu-sockets.c
@@ -326,7 +326,7 @@
* recover from this situation, so we need to recreate the
* socket to allow bind attempts for subsequent ports:
*/
- closesocket(slisten);
+ close(slisten);
slisten = -1;
}
}
@@ -337,7 +337,7 @@
listen_failed:
saved_errno = errno;
if (slisten >= 0) {
- closesocket(slisten);
+ close(slisten);
}
freeaddrinfo(res);
errno = saved_errno;
@@ -380,7 +380,7 @@
if (rc < 0) {
error_setg_errno(errp, errno, "Failed to connect to '%s:%s'",
saddr->host, saddr->port);
- closesocket(sock);
+ close(sock);
return -1;
}
@@ -483,7 +483,7 @@
if (ret < 0) {
error_setg_errno(errp, errno, "Unable to set KEEPALIVE");
- closesocket(sock);
+ close(sock);
return -1;
}
}
@@ -580,7 +580,7 @@
err:
if (sock != -1) {
- closesocket(sock);
+ close(sock);
}
if (local) {
freeaddrinfo(local);
@@ -777,7 +777,7 @@
if (rc < 0) {
error_setg_errno(errp, errno, "Failed to connect to '%s:%s'",
vaddr->cid, vaddr->port);
- closesocket(sock);
+ close(sock);
return -1;
}
@@ -814,13 +814,13 @@
if (bind(slisten, (const struct sockaddr *)&svm, sizeof(svm)) != 0) {
error_setg_errno(errp, errno, "Failed to bind socket");
- closesocket(slisten);
+ close(slisten);
return -1;
}
if (listen(slisten, num) != 0) {
error_setg_errno(errp, errno, "Failed to listen on socket");
- closesocket(slisten);
+ close(slisten);
return -1;
}
return slisten;
@@ -978,7 +978,7 @@
err:
g_free(pathbuf);
- closesocket(sock);
+ close(sock);
return -1;
}
@@ -1041,7 +1041,7 @@
return sock;
err:
- closesocket(sock);
+ close(sock);
return -1;
}
@@ -1238,7 +1238,7 @@
*/
if (listen(fd, num) != 0) {
error_setg_errno(errp, errno, "Failed to listen on fd socket");
- closesocket(fd);
+ close(fd);
return -1;
}
break;
diff --git a/util/qemu-thread-posix.c b/util/qemu-thread-posix.c
index 93d2505..b2e26e2 100644
--- a/util/qemu-thread-posix.c
+++ b/util/qemu-thread-posix.c
@@ -384,13 +384,21 @@
void qemu_event_set(QemuEvent *ev)
{
- /* qemu_event_set has release semantics, but because it *loads*
+ assert(ev->initialized);
+
+ /*
+ * Pairs with both qemu_event_reset() and qemu_event_wait().
+ *
+ * qemu_event_set has release semantics, but because it *loads*
* ev->value we need a full memory barrier here.
*/
- assert(ev->initialized);
smp_mb();
if (qatomic_read(&ev->value) != EV_SET) {
- if (qatomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
+ int old = qatomic_xchg(&ev->value, EV_SET);
+
+ /* Pairs with memory barrier in kernel futex_wait system call. */
+ smp_mb__after_rmw();
+ if (old == EV_BUSY) {
/* There were waiters, wake them up. */
qemu_futex_wake(ev, INT_MAX);
}
@@ -399,18 +407,19 @@
void qemu_event_reset(QemuEvent *ev)
{
- unsigned value;
-
assert(ev->initialized);
- value = qatomic_read(&ev->value);
- smp_mb_acquire();
- if (value == EV_SET) {
- /*
- * If there was a concurrent reset (or even reset+wait),
- * do nothing. Otherwise change EV_SET->EV_FREE.
- */
- qatomic_or(&ev->value, EV_FREE);
- }
+
+ /*
+ * If there was a concurrent reset (or even reset+wait),
+ * do nothing. Otherwise change EV_SET->EV_FREE.
+ */
+ qatomic_or(&ev->value, EV_FREE);
+
+ /*
+ * Order reset before checking the condition in the caller.
+ * Pairs with the first memory barrier in qemu_event_set().
+ */
+ smp_mb__after_rmw();
}
void qemu_event_wait(QemuEvent *ev)
@@ -418,20 +427,40 @@
unsigned value;
assert(ev->initialized);
- value = qatomic_read(&ev->value);
- smp_mb_acquire();
+
+ /*
+ * qemu_event_wait must synchronize with qemu_event_set even if it does
+ * not go down the slow path, so this load-acquire is needed that
+ * synchronizes with the first memory barrier in qemu_event_set().
+ *
+ * If we do go down the slow path, there is no requirement at all: we
+ * might miss a qemu_event_set() here but ultimately the memory barrier in
+ * qemu_futex_wait() will ensure the check is done correctly.
+ */
+ value = qatomic_load_acquire(&ev->value);
if (value != EV_SET) {
if (value == EV_FREE) {
/*
- * Leave the event reset and tell qemu_event_set that there
- * are waiters. No need to retry, because there cannot be
- * a concurrent busy->free transition. After the CAS, the
- * event will be either set or busy.
+ * Leave the event reset and tell qemu_event_set that there are
+ * waiters. No need to retry, because there cannot be a concurrent
+ * busy->free transition. After the CAS, the event will be either
+ * set or busy.
+ *
+ * This cmpxchg doesn't have particular ordering requirements if it
+ * succeeds (moving the store earlier can only cause qemu_event_set()
+ * to issue _more_ wakeups), the failing case needs acquire semantics
+ * like the load above.
*/
if (qatomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
return;
}
}
+
+ /*
+ * This is the final check for a concurrent set, so it does need
+ * a smp_mb() pairing with the second barrier of qemu_event_set().
+ * The barrier is inside the FUTEX_WAIT system call.
+ */
qemu_futex_wait(ev, EV_BUSY);
}
}
diff --git a/util/qemu-thread-win32.c b/util/qemu-thread-win32.c
index 69db254..a7fe3cc 100644
--- a/util/qemu-thread-win32.c
+++ b/util/qemu-thread-win32.c
@@ -272,12 +272,20 @@
void qemu_event_set(QemuEvent *ev)
{
assert(ev->initialized);
- /* qemu_event_set has release semantics, but because it *loads*
+
+ /*
+ * Pairs with both qemu_event_reset() and qemu_event_wait().
+ *
+ * qemu_event_set has release semantics, but because it *loads*
* ev->value we need a full memory barrier here.
*/
smp_mb();
if (qatomic_read(&ev->value) != EV_SET) {
- if (qatomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
+ int old = qatomic_xchg(&ev->value, EV_SET);
+
+ /* Pairs with memory barrier after ResetEvent. */
+ smp_mb__after_rmw();
+ if (old == EV_BUSY) {
/* There were waiters, wake them up. */
SetEvent(ev->event);
}
@@ -286,17 +294,19 @@
void qemu_event_reset(QemuEvent *ev)
{
- unsigned value;
-
assert(ev->initialized);
- value = qatomic_read(&ev->value);
- smp_mb_acquire();
- if (value == EV_SET) {
- /* If there was a concurrent reset (or even reset+wait),
- * do nothing. Otherwise change EV_SET->EV_FREE.
- */
- qatomic_or(&ev->value, EV_FREE);
- }
+
+ /*
+ * If there was a concurrent reset (or even reset+wait),
+ * do nothing. Otherwise change EV_SET->EV_FREE.
+ */
+ qatomic_or(&ev->value, EV_FREE);
+
+ /*
+ * Order reset before checking the condition in the caller.
+ * Pairs with the first memory barrier in qemu_event_set().
+ */
+ smp_mb__after_rmw();
}
void qemu_event_wait(QemuEvent *ev)
@@ -304,29 +314,49 @@
unsigned value;
assert(ev->initialized);
- value = qatomic_read(&ev->value);
- smp_mb_acquire();
+
+ /*
+ * qemu_event_wait must synchronize with qemu_event_set even if it does
+ * not go down the slow path, so this load-acquire is needed that
+ * synchronizes with the first memory barrier in qemu_event_set().
+ *
+ * If we do go down the slow path, there is no requirement at all: we
+ * might miss a qemu_event_set() here but ultimately the memory barrier in
+ * qemu_futex_wait() will ensure the check is done correctly.
+ */
+ value = qatomic_load_acquire(&ev->value);
if (value != EV_SET) {
if (value == EV_FREE) {
- /* qemu_event_set is not yet going to call SetEvent, but we are
- * going to do another check for EV_SET below when setting EV_BUSY.
- * At that point it is safe to call WaitForSingleObject.
+ /*
+ * Here the underlying kernel event is reset, but qemu_event_set is
+ * not yet going to call SetEvent. However, there will be another
+ * check for EV_SET below when setting EV_BUSY. At that point it
+ * is safe to call WaitForSingleObject.
*/
ResetEvent(ev->event);
- /* Tell qemu_event_set that there are waiters. No need to retry
- * because there cannot be a concurrent busy->free transition.
- * After the CAS, the event will be either set or busy.
+ /*
+ * It is not clear whether ResetEvent provides this barrier; kernel
+ * APIs (KeResetEvent/KeClearEvent) do not. Better safe than sorry!
+ */
+ smp_mb();
+
+ /*
+ * Leave the event reset and tell qemu_event_set that there are
+ * waiters. No need to retry, because there cannot be a concurrent
+ * busy->free transition. After the CAS, the event will be either
+ * set or busy.
*/
if (qatomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
- value = EV_SET;
- } else {
- value = EV_BUSY;
+ return;
}
}
- if (value == EV_BUSY) {
- WaitForSingleObject(ev->event, INFINITE);
- }
+
+ /*
+ * ev->value is now EV_BUSY. Since we didn't observe EV_SET,
+ * qemu_event_set() must observe EV_BUSY and call SetEvent().
+ */
+ WaitForSingleObject(ev->event, INFINITE);
}
}