Merge tag 'trivial-branch-for-8.0-pull-request' of https://gitlab.com/laurent_vivier/qemu into staging
trivial branch pull request 20230118
# -----BEGIN PGP SIGNATURE-----
#
# iQJGBAABCAAwFiEEzS913cjjpNwuT1Fz8ww4vT8vvjwFAmPHpRASHGxhdXJlbnRA
# dml2aWVyLmV1AAoJEPMMOL0/L748fwEP+wTA6dBYqRnZMCPEkk6yy0nSVr6GF8FA
# i9JrUbRuBf8WT2RAFJEwOyACTaYgCwqU9tu6UxG2ekGfGDtR84HH1yozTAbBuPct
# qoT/cvrQ0/Nfymw1Ia1vH5D6EQiAn+j6/1C41PEHvqTQBMe8E4U8jDIwbXTaJS7j
# QSUDplRfCbSBXQ9ctFrcD6XxX06dj4U9l8L4gl5Uc4B1OmFacyJnfzMIyVRTIhvF
# S4sKB/8B36emFITw/gk+MW5HnBgjEIWvZjof71eglMqo79jmacGeOe8NQi1+ApQ1
# lVmllKewdgLHVwdOGVX4dCJQdhSL/7DjreqtKGrUmhZfJdmCWJdl3jVWqhr4lfME
# U7ytd68iLdKgfKqepc3+WbhA8pWT+brPVpTU9hq17DsNJqeZa6628OguDEtjz9dP
# 4Y4XFZMxFadYl4YaCpGzTN1tqsrO8Ct+Kvq/90nt5FUeTX+i+/WM/9XXNf9MD4VS
# OVCwHXCa4yHMMq2LGV0sWaL7vSI26lv0asKtalAelbVZhVyB1kSZfde2rZXuhoD5
# S2d9x2bcFG6WNlDfyaANkCKyHlxUaOroQVE+y0SqgtaC2oPhuXtG6fusiyvjG9+l
# 9O6jy87e4uR+Xach6MmybMjiPDi0VMvPayVz3BR/6hBZZB/GkLO1OmNQcZiXcbOd
# yROzKPmyZ/q+
# =35x0
# -----END PGP SIGNATURE-----
# gpg: Signature made Wed 18 Jan 2023 07:51:44 GMT
# gpg: using RSA key CD2F75DDC8E3A4DC2E4F5173F30C38BD3F2FBE3C
# gpg: issuer "laurent@vivier.eu"
# gpg: Good signature from "Laurent Vivier <lvivier@redhat.com>" [full]
# gpg: aka "Laurent Vivier <laurent@vivier.eu>" [full]
# gpg: aka "Laurent Vivier (Red Hat) <lvivier@redhat.com>" [full]
# Primary key fingerprint: CD2F 75DD C8E3 A4DC 2E4F 5173 F30C 38BD 3F2F BE3C
* tag 'trivial-branch-for-8.0-pull-request' of https://gitlab.com/laurent_vivier/qemu:
hw/ssi/sifive_spi.c: spelling: reigster
hw/cxl/cxl-host: Fix an error message typo
hw/cxl/cxl-cdat.c: spelling: missmatch
hw/pvrdma: Protect against buggy or malicious guest driver
ccid-card-emulated: fix cast warning/error
hw/i386/pc: Remove unused 'owner' argument from pc_pci_as_mapping_init
tests/qtest/test-hmp: Improve the check for verbose mode
hw/usb: Mark the XLNX_VERSAL-related files as target-independent
hw/intc: Mark more interrupt-controller files as target independent
hw/cpu: Mark arm11 and realview mpcore as target-independent code
hw/arm: Move various units to softmmu_ss[]
hw/tpm: Move tpm_ppi.c out of target-specific source set
hw/intc: Move some files out of the target-specific source set
hw/display: Move omap_lcdc.c out of target-specific source set
Call qemu_socketpair() instead of socketpair() when possible
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
diff --git a/.gitlab-ci.d/cirrus.yml b/.gitlab-ci.d/cirrus.yml
index 785b163..502dfd6 100644
--- a/.gitlab-ci.d/cirrus.yml
+++ b/.gitlab-ci.d/cirrus.yml
@@ -53,7 +53,7 @@
CIRRUS_VM_IMAGE_NAME: freebsd-12-4
CIRRUS_VM_CPUS: 8
CIRRUS_VM_RAM: 8G
- UPDATE_COMMAND: pkg update
+ UPDATE_COMMAND: pkg update; pkg upgrade -y
INSTALL_COMMAND: pkg install -y
TEST_TARGETS: check
@@ -66,7 +66,7 @@
CIRRUS_VM_IMAGE_NAME: freebsd-13-1
CIRRUS_VM_CPUS: 8
CIRRUS_VM_RAM: 8G
- UPDATE_COMMAND: pkg update
+ UPDATE_COMMAND: pkg update; pkg upgrade -y
INSTALL_COMMAND: pkg install -y
TEST_TARGETS: check
diff --git a/MAINTAINERS b/MAINTAINERS
index 0fe50d0..08ad1e5 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -511,7 +511,6 @@
Guest CPU Cores (NVMM)
----------------------
NetBSD Virtual Machine Monitor (NVMM) CPU support
-M: Kamil Rytarowski <kamil@netbsd.org>
M: Reinoud Zandijk <reinoud@netbsd.org>
S: Maintained
F: include/sysemu/nvmm.h
@@ -536,7 +535,6 @@
F: include/qemu/*posix*.h
NETBSD
-M: Kamil Rytarowski <kamil@netbsd.org>
M: Reinoud Zandijk <reinoud@netbsd.org>
M: Ryo ONODERA <ryoon@netbsd.org>
S: Maintained
diff --git a/Makefile b/Makefile
index a48103c..ce2f83a 100644
--- a/Makefile
+++ b/Makefile
@@ -150,7 +150,7 @@
ninja-cmd-goals = $(or $(MAKECMDGOALS), all)
ninja-cmd-goals += $(foreach g, $(MAKECMDGOALS), $(.ninja-goals.$g))
-makefile-targets := build.ninja ctags TAGS cscope dist clean uninstall
+makefile-targets := build.ninja ctags TAGS cscope dist clean
# "ninja -t targets" also lists all prerequisites. If build system
# files are marked as PHONY, however, Make will always try to execute
# "ninja build.ninja".
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 356fe34..04cd1f3 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -572,15 +572,18 @@
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
{
- if (TCG_TARGET_HAS_direct_jump) {
- uintptr_t offset = tb->jmp_target_arg[n];
- uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr;
- uintptr_t jmp_rx = tc_ptr + offset;
- uintptr_t jmp_rw = jmp_rx - tcg_splitwx_diff;
- tb_target_set_jmp_target(tc_ptr, jmp_rx, jmp_rw, addr);
- } else {
- tb->jmp_target_arg[n] = addr;
- }
+ /*
+ * Get the rx view of the structure, from which we find the
+ * executable code address, and tb_target_set_jmp_target can
+ * produce a pc-relative displacement to jmp_target_addr[n].
+ */
+ const TranslationBlock *c_tb = tcg_splitwx_to_rx(tb);
+ uintptr_t offset = tb->jmp_insn_offset[n];
+ uintptr_t jmp_rx = (uintptr_t)tb->tc.ptr + offset;
+ uintptr_t jmp_rw = jmp_rx - tcg_splitwx_diff;
+
+ tb->jmp_target_addr[n] = addr;
+ tb_target_set_jmp_target(c_tb, n, jmp_rx, jmp_rw);
}
static inline void tb_add_jump(TranslationBlock *tb, int n,
@@ -909,64 +912,10 @@
/* main execution loop */
-int cpu_exec(CPUState *cpu)
+static int __attribute__((noinline))
+cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
{
int ret;
- SyncClocks sc = { 0 };
-
- /* replay_interrupt may need current_cpu */
- current_cpu = cpu;
-
- if (cpu_handle_halt(cpu)) {
- return EXCP_HALTED;
- }
-
- rcu_read_lock();
-
- cpu_exec_enter(cpu);
-
- /* Calculate difference between guest clock and host clock.
- * This delay includes the delay of the last cycle, so
- * what we have to do is sleep until it is 0. As for the
- * advance/delay we gain here, we try to fix it next time.
- */
- init_delay_params(&sc, cpu);
-
- /* prepare setjmp context for exception handling */
- if (sigsetjmp(cpu->jmp_env, 0) != 0) {
-#if defined(__clang__)
- /*
- * Some compilers wrongly smash all local variables after
- * siglongjmp (the spec requires that only non-volatile locals
- * which are changed between the sigsetjmp and siglongjmp are
- * permitted to be trashed). There were bug reports for gcc
- * 4.5.0 and clang. The bug is fixed in all versions of gcc
- * that we support, but is still unfixed in clang:
- * https://bugs.llvm.org/show_bug.cgi?id=21183
- *
- * Reload an essential local variable here for those compilers.
- * Newer versions of gcc would complain about this code (-Wclobbered),
- * so we only perform the workaround for clang.
- */
- cpu = current_cpu;
-#else
- /* Non-buggy compilers preserve this; assert the correct value. */
- g_assert(cpu == current_cpu);
-#endif
-
-#ifndef CONFIG_SOFTMMU
- clear_helper_retaddr();
- if (have_mmap_lock()) {
- mmap_unlock();
- }
-#endif
- if (qemu_mutex_iothread_locked()) {
- qemu_mutex_unlock_iothread();
- }
- qemu_plugin_disable_mem_helpers(cpu);
-
- assert_no_pages_locked();
- }
/* if an exception is pending, we execute it here */
while (!cpu_handle_exception(cpu, &ret)) {
@@ -1033,9 +982,60 @@
/* Try to align the host and virtual clocks
if the guest is in advance */
- align_clocks(&sc, cpu);
+ align_clocks(sc, cpu);
}
}
+ return ret;
+}
+
+static int cpu_exec_setjmp(CPUState *cpu, SyncClocks *sc)
+{
+ /* Prepare setjmp context for exception handling. */
+ if (unlikely(sigsetjmp(cpu->jmp_env, 0) != 0)) {
+ /* Non-buggy compilers preserve this; assert the correct value. */
+ g_assert(cpu == current_cpu);
+
+#ifndef CONFIG_SOFTMMU
+ clear_helper_retaddr();
+ if (have_mmap_lock()) {
+ mmap_unlock();
+ }
+#endif
+ if (qemu_mutex_iothread_locked()) {
+ qemu_mutex_unlock_iothread();
+ }
+ qemu_plugin_disable_mem_helpers(cpu);
+
+ assert_no_pages_locked();
+ }
+
+ return cpu_exec_loop(cpu, sc);
+}
+
+int cpu_exec(CPUState *cpu)
+{
+ int ret;
+ SyncClocks sc = { 0 };
+
+ /* replay_interrupt may need current_cpu */
+ current_cpu = cpu;
+
+ if (cpu_handle_halt(cpu)) {
+ return EXCP_HALTED;
+ }
+
+ rcu_read_lock();
+ cpu_exec_enter(cpu);
+
+ /*
+ * Calculate difference between guest clock and host clock.
+ * This delay includes the delay of the last cycle, so
+ * what we have to do is sleep until it is 0. As for the
+ * advance/delay we gain here, we try to fix it next time.
+ */
+ init_delay_params(&sc, cpu);
+
+ ret = cpu_exec_setjmp(cpu, &sc);
cpu_exec_exit(cpu);
rcu_read_unlock();
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 4948729..4e040a1 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1142,7 +1142,7 @@
&xlat, &sz, full->attrs, &prot);
assert(sz >= TARGET_PAGE_SIZE);
- tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
+ tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" HWADDR_FMT_plx
" prot=%x idx=%d\n",
vaddr, full->phys_addr, prot, mmu_idx);
diff --git a/accel/tcg/debuginfo.c b/accel/tcg/debuginfo.c
new file mode 100644
index 0000000..71c66d0
--- /dev/null
+++ b/accel/tcg/debuginfo.c
@@ -0,0 +1,96 @@
+/*
+ * Debug information support.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/lockable.h"
+
+#include <elfutils/libdwfl.h>
+
+#include "debuginfo.h"
+
+static QemuMutex lock;
+static Dwfl *dwfl;
+static const Dwfl_Callbacks dwfl_callbacks = {
+ .find_elf = NULL,
+ .find_debuginfo = dwfl_standard_find_debuginfo,
+ .section_address = NULL,
+ .debuginfo_path = NULL,
+};
+
+__attribute__((constructor))
+static void debuginfo_init(void)
+{
+ qemu_mutex_init(&lock);
+}
+
+void debuginfo_report_elf(const char *name, int fd, uint64_t bias)
+{
+ QEMU_LOCK_GUARD(&lock);
+
+ if (dwfl) {
+ dwfl_report_begin_add(dwfl);
+ } else {
+ dwfl = dwfl_begin(&dwfl_callbacks);
+ }
+
+ if (dwfl) {
+ dwfl_report_elf(dwfl, name, name, fd, bias, true);
+ dwfl_report_end(dwfl, NULL, NULL);
+ }
+}
+
+void debuginfo_lock(void)
+{
+ qemu_mutex_lock(&lock);
+}
+
+void debuginfo_query(struct debuginfo_query *q, size_t n)
+{
+ const char *symbol, *file;
+ Dwfl_Module *dwfl_module;
+ Dwfl_Line *dwfl_line;
+ GElf_Off dwfl_offset;
+ GElf_Sym dwfl_sym;
+ size_t i;
+ int line;
+
+ if (!dwfl) {
+ return;
+ }
+
+ for (i = 0; i < n; i++) {
+ dwfl_module = dwfl_addrmodule(dwfl, q[i].address);
+ if (!dwfl_module) {
+ continue;
+ }
+
+ if (q[i].flags & DEBUGINFO_SYMBOL) {
+ symbol = dwfl_module_addrinfo(dwfl_module, q[i].address,
+ &dwfl_offset, &dwfl_sym,
+ NULL, NULL, NULL);
+ if (symbol) {
+ q[i].symbol = symbol;
+ q[i].offset = dwfl_offset;
+ }
+ }
+
+ if (q[i].flags & DEBUGINFO_LINE) {
+ dwfl_line = dwfl_module_getsrc(dwfl_module, q[i].address);
+ if (dwfl_line) {
+ file = dwfl_lineinfo(dwfl_line, NULL, &line, 0, NULL, NULL);
+ if (file) {
+ q[i].file = file;
+ q[i].line = line;
+ }
+ }
+ }
+ }
+}
+
+void debuginfo_unlock(void)
+{
+ qemu_mutex_unlock(&lock);
+}
diff --git a/accel/tcg/debuginfo.h b/accel/tcg/debuginfo.h
new file mode 100644
index 0000000..7542cfe
--- /dev/null
+++ b/accel/tcg/debuginfo.h
@@ -0,0 +1,77 @@
+/*
+ * Debug information support.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef ACCEL_TCG_DEBUGINFO_H
+#define ACCEL_TCG_DEBUGINFO_H
+
+/*
+ * Debuginfo describing a certain address.
+ */
+struct debuginfo_query {
+ uint64_t address; /* Input: address. */
+ int flags; /* Input: debuginfo subset. */
+ const char *symbol; /* Symbol that the address is part of. */
+ uint64_t offset; /* Offset from the symbol. */
+ const char *file; /* Source file associated with the address. */
+ int line; /* Line number in the source file. */
+};
+
+/*
+ * Debuginfo subsets.
+ */
+#define DEBUGINFO_SYMBOL BIT(1)
+#define DEBUGINFO_LINE BIT(2)
+
+#if defined(CONFIG_TCG) && defined(CONFIG_LIBDW)
+/*
+ * Load debuginfo for the specified guest ELF image.
+ * Return true on success, false on failure.
+ */
+void debuginfo_report_elf(const char *name, int fd, uint64_t bias);
+
+/*
+ * Take the debuginfo lock.
+ */
+void debuginfo_lock(void);
+
+/*
+ * Fill each on N Qs with the debuginfo about Q->ADDRESS as specified by
+ * Q->FLAGS:
+ *
+ * - DEBUGINFO_SYMBOL: update Q->SYMBOL and Q->OFFSET. If symbol debuginfo is
+ * missing, then leave them as is.
+ * - DEBUINFO_LINE: update Q->FILE and Q->LINE. If line debuginfo is missing,
+ * then leave them as is.
+ *
+ * This function must be called under the debuginfo lock. The results can be
+ * accessed only until the debuginfo lock is released.
+ */
+void debuginfo_query(struct debuginfo_query *q, size_t n);
+
+/*
+ * Release the debuginfo lock.
+ */
+void debuginfo_unlock(void);
+#else
+static inline void debuginfo_report_elf(const char *image_name, int image_fd,
+ uint64_t load_bias)
+{
+}
+
+static inline void debuginfo_lock(void)
+{
+}
+
+static inline void debuginfo_query(struct debuginfo_query *q, size_t n)
+{
+}
+
+static inline void debuginfo_unlock(void)
+{
+}
+#endif
+
+#endif
diff --git a/accel/tcg/meson.build b/accel/tcg/meson.build
index 75e1dff..77740b1 100644
--- a/accel/tcg/meson.build
+++ b/accel/tcg/meson.build
@@ -12,6 +12,8 @@
tcg_ss.add(when: 'CONFIG_USER_ONLY', if_true: files('user-exec.c'))
tcg_ss.add(when: 'CONFIG_SOFTMMU', if_false: files('user-exec-stub.c'))
tcg_ss.add(when: 'CONFIG_PLUGIN', if_true: [files('plugin-gen.c')])
+tcg_ss.add(when: libdw, if_true: files('debuginfo.c'))
+tcg_ss.add(when: 'CONFIG_LINUX', if_true: files('perf.c'))
specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss)
specific_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: files(
diff --git a/accel/tcg/perf.c b/accel/tcg/perf.c
new file mode 100644
index 0000000..ae19f6e
--- /dev/null
+++ b/accel/tcg/perf.c
@@ -0,0 +1,375 @@
+/*
+ * Linux perf perf-<pid>.map and jit-<pid>.dump integration.
+ *
+ * The jitdump spec can be found at [1].
+ *
+ * [1] https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/plain/tools/perf/Documentation/jitdump-specification.txt
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "elf.h"
+#include "exec/exec-all.h"
+#include "qemu/timer.h"
+#include "tcg/tcg.h"
+
+#include "debuginfo.h"
+#include "perf.h"
+
+static FILE *safe_fopen_w(const char *path)
+{
+ int saved_errno;
+ FILE *f;
+ int fd;
+
+ /* Delete the old file, if any. */
+ unlink(path);
+
+ /* Avoid symlink attacks by using O_CREAT | O_EXCL. */
+ fd = open(path, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR);
+ if (fd == -1) {
+ return NULL;
+ }
+
+ /* Convert fd to FILE*. */
+ f = fdopen(fd, "w");
+ if (f == NULL) {
+ saved_errno = errno;
+ close(fd);
+ errno = saved_errno;
+ return NULL;
+ }
+
+ return f;
+}
+
+static FILE *perfmap;
+
+void perf_enable_perfmap(void)
+{
+ char map_file[32];
+
+ snprintf(map_file, sizeof(map_file), "/tmp/perf-%d.map", getpid());
+ perfmap = safe_fopen_w(map_file);
+ if (perfmap == NULL) {
+ warn_report("Could not open %s: %s, proceeding without perfmap",
+ map_file, strerror(errno));
+ }
+}
+
+/* Get PC and size of code JITed for guest instruction #INSN. */
+static void get_host_pc_size(uintptr_t *host_pc, uint16_t *host_size,
+ const void *start, size_t insn)
+{
+ uint16_t start_off = insn ? tcg_ctx->gen_insn_end_off[insn - 1] : 0;
+
+ if (host_pc) {
+ *host_pc = (uintptr_t)start + start_off;
+ }
+ if (host_size) {
+ *host_size = tcg_ctx->gen_insn_end_off[insn] - start_off;
+ }
+}
+
+static const char *pretty_symbol(const struct debuginfo_query *q, size_t *len)
+{
+ static __thread char buf[64];
+ int tmp;
+
+ if (!q->symbol) {
+ tmp = snprintf(buf, sizeof(buf), "guest-0x%"PRIx64, q->address);
+ if (len) {
+ *len = MIN(tmp + 1, sizeof(buf));
+ }
+ return buf;
+ }
+
+ if (!q->offset) {
+ if (len) {
+ *len = strlen(q->symbol) + 1;
+ }
+ return q->symbol;
+ }
+
+ tmp = snprintf(buf, sizeof(buf), "%s+0x%"PRIx64, q->symbol, q->offset);
+ if (len) {
+ *len = MIN(tmp + 1, sizeof(buf));
+ }
+ return buf;
+}
+
+static void write_perfmap_entry(const void *start, size_t insn,
+ const struct debuginfo_query *q)
+{
+ uint16_t host_size;
+ uintptr_t host_pc;
+
+ get_host_pc_size(&host_pc, &host_size, start, insn);
+ fprintf(perfmap, "%"PRIxPTR" %"PRIx16" %s\n",
+ host_pc, host_size, pretty_symbol(q, NULL));
+}
+
+static FILE *jitdump;
+
+#define JITHEADER_MAGIC 0x4A695444
+#define JITHEADER_VERSION 1
+
+struct jitheader {
+ uint32_t magic;
+ uint32_t version;
+ uint32_t total_size;
+ uint32_t elf_mach;
+ uint32_t pad1;
+ uint32_t pid;
+ uint64_t timestamp;
+ uint64_t flags;
+};
+
+enum jit_record_type {
+ JIT_CODE_LOAD = 0,
+ JIT_CODE_DEBUG_INFO = 2,
+};
+
+struct jr_prefix {
+ uint32_t id;
+ uint32_t total_size;
+ uint64_t timestamp;
+};
+
+struct jr_code_load {
+ struct jr_prefix p;
+
+ uint32_t pid;
+ uint32_t tid;
+ uint64_t vma;
+ uint64_t code_addr;
+ uint64_t code_size;
+ uint64_t code_index;
+};
+
+struct debug_entry {
+ uint64_t addr;
+ int lineno;
+ int discrim;
+ const char name[];
+};
+
+struct jr_code_debug_info {
+ struct jr_prefix p;
+
+ uint64_t code_addr;
+ uint64_t nr_entry;
+ struct debug_entry entries[];
+};
+
+static uint32_t get_e_machine(void)
+{
+ Elf64_Ehdr elf_header;
+ FILE *exe;
+ size_t n;
+
+ QEMU_BUILD_BUG_ON(offsetof(Elf32_Ehdr, e_machine) !=
+ offsetof(Elf64_Ehdr, e_machine));
+
+ exe = fopen("/proc/self/exe", "r");
+ if (exe == NULL) {
+ return EM_NONE;
+ }
+
+ n = fread(&elf_header, sizeof(elf_header), 1, exe);
+ fclose(exe);
+ if (n != 1) {
+ return EM_NONE;
+ }
+
+ return elf_header.e_machine;
+}
+
+void perf_enable_jitdump(void)
+{
+ struct jitheader header;
+ char jitdump_file[32];
+ void *perf_marker;
+
+ if (!use_rt_clock) {
+ warn_report("CLOCK_MONOTONIC is not available, proceeding without jitdump");
+ return;
+ }
+
+ snprintf(jitdump_file, sizeof(jitdump_file), "jit-%d.dump", getpid());
+ jitdump = safe_fopen_w(jitdump_file);
+ if (jitdump == NULL) {
+ warn_report("Could not open %s: %s, proceeding without jitdump",
+ jitdump_file, strerror(errno));
+ return;
+ }
+
+ /*
+ * `perf inject` will see that the mapped file name in the corresponding
+ * PERF_RECORD_MMAP or PERF_RECORD_MMAP2 event is of the form jit-%d.dump
+ * and will process it as a jitdump file.
+ */
+ perf_marker = mmap(NULL, qemu_real_host_page_size(), PROT_READ | PROT_EXEC,
+ MAP_PRIVATE, fileno(jitdump), 0);
+ if (perf_marker == MAP_FAILED) {
+ warn_report("Could not map %s: %s, proceeding without jitdump",
+ jitdump_file, strerror(errno));
+ fclose(jitdump);
+ jitdump = NULL;
+ return;
+ }
+
+ header.magic = JITHEADER_MAGIC;
+ header.version = JITHEADER_VERSION;
+ header.total_size = sizeof(header);
+ header.elf_mach = get_e_machine();
+ header.pad1 = 0;
+ header.pid = getpid();
+ header.timestamp = get_clock();
+ header.flags = 0;
+ fwrite(&header, sizeof(header), 1, jitdump);
+}
+
+void perf_report_prologue(const void *start, size_t size)
+{
+ if (perfmap) {
+ fprintf(perfmap, "%"PRIxPTR" %zx tcg-prologue-buffer\n",
+ (uintptr_t)start, size);
+ }
+}
+
+/* Write a JIT_CODE_DEBUG_INFO jitdump entry. */
+static void write_jr_code_debug_info(const void *start,
+ const struct debuginfo_query *q,
+ size_t icount)
+{
+ struct jr_code_debug_info rec;
+ struct debug_entry ent;
+ uintptr_t host_pc;
+ int insn;
+
+ /* Write the header. */
+ rec.p.id = JIT_CODE_DEBUG_INFO;
+ rec.p.total_size = sizeof(rec) + sizeof(ent) + 1;
+ rec.p.timestamp = get_clock();
+ rec.code_addr = (uintptr_t)start;
+ rec.nr_entry = 1;
+ for (insn = 0; insn < icount; insn++) {
+ if (q[insn].file) {
+ rec.p.total_size += sizeof(ent) + strlen(q[insn].file) + 1;
+ rec.nr_entry++;
+ }
+ }
+ fwrite(&rec, sizeof(rec), 1, jitdump);
+
+ /* Write the main debug entries. */
+ for (insn = 0; insn < icount; insn++) {
+ if (q[insn].file) {
+ get_host_pc_size(&host_pc, NULL, start, insn);
+ ent.addr = host_pc;
+ ent.lineno = q[insn].line;
+ ent.discrim = 0;
+ fwrite(&ent, sizeof(ent), 1, jitdump);
+ fwrite(q[insn].file, strlen(q[insn].file) + 1, 1, jitdump);
+ }
+ }
+
+ /* Write the trailing debug_entry. */
+ ent.addr = (uintptr_t)start + tcg_ctx->gen_insn_end_off[icount - 1];
+ ent.lineno = 0;
+ ent.discrim = 0;
+ fwrite(&ent, sizeof(ent), 1, jitdump);
+ fwrite("", 1, 1, jitdump);
+}
+
+/* Write a JIT_CODE_LOAD jitdump entry. */
+static void write_jr_code_load(const void *start, uint16_t host_size,
+ const struct debuginfo_query *q)
+{
+ static uint64_t code_index;
+ struct jr_code_load rec;
+ const char *symbol;
+ size_t symbol_size;
+
+ symbol = pretty_symbol(q, &symbol_size);
+ rec.p.id = JIT_CODE_LOAD;
+ rec.p.total_size = sizeof(rec) + symbol_size + host_size;
+ rec.p.timestamp = get_clock();
+ rec.pid = getpid();
+ rec.tid = qemu_get_thread_id();
+ rec.vma = (uintptr_t)start;
+ rec.code_addr = (uintptr_t)start;
+ rec.code_size = host_size;
+ rec.code_index = code_index++;
+ fwrite(&rec, sizeof(rec), 1, jitdump);
+ fwrite(symbol, symbol_size, 1, jitdump);
+ fwrite(start, host_size, 1, jitdump);
+}
+
+void perf_report_code(uint64_t guest_pc, TranslationBlock *tb,
+ const void *start)
+{
+ struct debuginfo_query *q;
+ size_t insn;
+
+ if (!perfmap && !jitdump) {
+ return;
+ }
+
+ q = g_try_malloc0_n(tb->icount, sizeof(*q));
+ if (!q) {
+ return;
+ }
+
+ debuginfo_lock();
+
+ /* Query debuginfo for each guest instruction. */
+ for (insn = 0; insn < tb->icount; insn++) {
+ /* FIXME: This replicates the restore_state_to_opc() logic. */
+ q[insn].address = tcg_ctx->gen_insn_data[insn][0];
+ if (TARGET_TB_PCREL) {
+ q[insn].address |= (guest_pc & TARGET_PAGE_MASK);
+ } else {
+#if defined(TARGET_I386)
+ q[insn].address -= tb->cs_base;
+#endif
+ }
+ q[insn].flags = DEBUGINFO_SYMBOL | (jitdump ? DEBUGINFO_LINE : 0);
+ }
+ debuginfo_query(q, tb->icount);
+
+ /* Emit perfmap entries if needed. */
+ if (perfmap) {
+ flockfile(perfmap);
+ for (insn = 0; insn < tb->icount; insn++) {
+ write_perfmap_entry(start, insn, &q[insn]);
+ }
+ funlockfile(perfmap);
+ }
+
+ /* Emit jitdump entries if needed. */
+ if (jitdump) {
+ flockfile(jitdump);
+ write_jr_code_debug_info(start, q, tb->icount);
+ write_jr_code_load(start, tcg_ctx->gen_insn_end_off[tb->icount - 1],
+ q);
+ funlockfile(jitdump);
+ }
+
+ debuginfo_unlock();
+ g_free(q);
+}
+
+void perf_exit(void)
+{
+ if (perfmap) {
+ fclose(perfmap);
+ perfmap = NULL;
+ }
+
+ if (jitdump) {
+ fclose(jitdump);
+ jitdump = NULL;
+ }
+}
diff --git a/accel/tcg/perf.h b/accel/tcg/perf.h
new file mode 100644
index 0000000..f92dd52
--- /dev/null
+++ b/accel/tcg/perf.h
@@ -0,0 +1,49 @@
+/*
+ * Linux perf perf-<pid>.map and jit-<pid>.dump integration.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef ACCEL_TCG_PERF_H
+#define ACCEL_TCG_PERF_H
+
+#if defined(CONFIG_TCG) && defined(CONFIG_LINUX)
+/* Start writing perf-<pid>.map. */
+void perf_enable_perfmap(void);
+
+/* Start writing jit-<pid>.dump. */
+void perf_enable_jitdump(void);
+
+/* Add information about TCG prologue to profiler maps. */
+void perf_report_prologue(const void *start, size_t size);
+
+/* Add information about JITted guest code to profiler maps. */
+void perf_report_code(uint64_t guest_pc, TranslationBlock *tb,
+ const void *start);
+
+/* Stop writing perf-<pid>.map and/or jit-<pid>.dump. */
+void perf_exit(void);
+#else
+static inline void perf_enable_perfmap(void)
+{
+}
+
+static inline void perf_enable_jitdump(void)
+{
+}
+
+static inline void perf_report_prologue(const void *start, size_t size)
+{
+}
+
+static inline void perf_report_code(uint64_t guest_pc, TranslationBlock *tb,
+ const void *start)
+{
+}
+
+static inline void perf_exit(void)
+{
+}
+#endif
+
+#endif
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index 51ac1f6..9e925c1 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -62,6 +62,7 @@
#include "tb-hash.h"
#include "tb-context.h"
#include "internal.h"
+#include "perf.h"
/* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
@@ -349,7 +350,7 @@
tb->trace_vcpu_dstate = *cpu->trace_dstate;
tb_set_page_addr0(tb, phys_pc);
tb_set_page_addr1(tb, -1);
- tcg_ctx->tb_cflags = cflags;
+ tcg_ctx->gen_tb = tb;
tb_overflow:
#ifdef CONFIG_PROFILER
@@ -406,6 +407,12 @@
}
tb->tc.size = gen_code_size;
+ /*
+ * For TARGET_TB_PCREL, attribute all executions of the generated
+ * code to its first mapping.
+ */
+ perf_report_code(pc, tb, tcg_splitwx_to_rx(gen_code_buf));
+
#ifdef CONFIG_PROFILER
qatomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
qatomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
@@ -501,10 +508,10 @@
tb->jmp_dest[1] = (uintptr_t)NULL;
/* init original jump addresses which have been set during tcg_gen_code() */
- if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
+ if (tb->jmp_reset_offset[0] != TB_JMP_OFFSET_INVALID) {
tb_reset_jump(tb, 0);
}
- if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
+ if (tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) {
tb_reset_jump(tb, 1);
}
@@ -686,9 +693,9 @@
if (tb_page_addr1(tb) != -1) {
tst->cross_page++;
}
- if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
+ if (tb->jmp_reset_offset[0] != TB_JMP_OFFSET_INVALID) {
tst->direct_jmp_count++;
- if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
+ if (tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) {
tst->direct_jmp2_count++;
}
}
diff --git a/docs/devel/tcg.rst b/docs/devel/tcg.rst
index 136a7a0..b4096a1 100644
--- a/docs/devel/tcg.rst
+++ b/docs/devel/tcg.rst
@@ -188,3 +188,26 @@
Finally, the MMU helps tracking dirty pages and pages pointed to by
translation blocks.
+Profiling JITted code
+---------------------
+
+The Linux ``perf`` tool will treat all JITted code as a single block as
+unlike the main code it can't use debug information to link individual
+program counter samples with larger functions. To overcome this
+limitation you can use the ``-perfmap`` or the ``-jitdump`` option to generate
+map files. ``-perfmap`` is lightweight and produces only guest-host mappings.
+``-jitdump`` additionally saves JITed code and guest debug information (if
+available); its output needs to be integrated with the ``perf.data`` file
+before the final report can be viewed.
+
+.. code::
+
+ perf record $QEMU -perfmap $REMAINING_ARGS
+ perf report
+
+ perf record -k 1 $QEMU -jitdump $REMAINING_ARGS
+ DEBUGINFOD_URLS= perf inject -j -i perf.data -o perf.data.jitted
+ perf report -i perf.data.jitted
+
+Note that qemu-system generates mappings only for ``-kernel`` files in ELF
+format.
diff --git a/hw/arm/strongarm.c b/hw/arm/strongarm.c
index 39b8f01..cc73145 100644
--- a/hw/arm/strongarm.c
+++ b/hw/arm/strongarm.c
@@ -151,7 +151,7 @@
case ICPR:
return s->pending;
default:
- printf("%s: Bad register offset 0x" TARGET_FMT_plx "\n",
+ printf("%s: Bad register offset 0x" HWADDR_FMT_plx "\n",
__func__, offset);
return 0;
}
@@ -173,7 +173,7 @@
s->int_idle = (value & 1) ? 0 : ~0;
break;
default:
- printf("%s: Bad register offset 0x" TARGET_FMT_plx "\n",
+ printf("%s: Bad register offset 0x" HWADDR_FMT_plx "\n",
__func__, offset);
break;
}
@@ -333,7 +333,7 @@
((qemu_clock_get_ms(rtc_clock) - s->last_hz) << 15) /
(1000 * ((s->rttr & 0xffff) + 1));
default:
- printf("%s: Bad register 0x" TARGET_FMT_plx "\n", __func__, addr);
+ printf("%s: Bad register 0x" HWADDR_FMT_plx "\n", __func__, addr);
return 0;
}
}
@@ -375,7 +375,7 @@
break;
default:
- printf("%s: Bad register 0x" TARGET_FMT_plx "\n", __func__, addr);
+ printf("%s: Bad register 0x" HWADDR_FMT_plx "\n", __func__, addr);
}
}
@@ -581,7 +581,7 @@
return s->status;
default:
- printf("%s: Bad offset 0x" TARGET_FMT_plx "\n", __func__, offset);
+ printf("%s: Bad offset 0x" HWADDR_FMT_plx "\n", __func__, offset);
}
return 0;
@@ -626,7 +626,7 @@
break;
default:
- printf("%s: Bad offset 0x" TARGET_FMT_plx "\n", __func__, offset);
+ printf("%s: Bad offset 0x" HWADDR_FMT_plx "\n", __func__, offset);
}
}
@@ -782,7 +782,7 @@
return s->ppfr | ~0x7f001;
default:
- printf("%s: Bad offset 0x" TARGET_FMT_plx "\n", __func__, offset);
+ printf("%s: Bad offset 0x" HWADDR_FMT_plx "\n", __func__, offset);
}
return 0;
@@ -817,7 +817,7 @@
break;
default:
- printf("%s: Bad offset 0x" TARGET_FMT_plx "\n", __func__, offset);
+ printf("%s: Bad offset 0x" HWADDR_FMT_plx "\n", __func__, offset);
}
}
@@ -1164,7 +1164,7 @@
return s->utsr1;
default:
- printf("%s: Bad register 0x" TARGET_FMT_plx "\n", __func__, addr);
+ printf("%s: Bad register 0x" HWADDR_FMT_plx "\n", __func__, addr);
return 0;
}
}
@@ -1221,7 +1221,7 @@
break;
default:
- printf("%s: Bad register 0x" TARGET_FMT_plx "\n", __func__, addr);
+ printf("%s: Bad register 0x" HWADDR_FMT_plx "\n", __func__, addr);
}
}
@@ -1443,7 +1443,7 @@
strongarm_ssp_fifo_update(s);
return retval;
default:
- printf("%s: Bad register 0x" TARGET_FMT_plx "\n", __func__, addr);
+ printf("%s: Bad register 0x" HWADDR_FMT_plx "\n", __func__, addr);
break;
}
return 0;
@@ -1509,7 +1509,7 @@
break;
default:
- printf("%s: Bad register 0x" TARGET_FMT_plx "\n", __func__, addr);
+ printf("%s: Bad register 0x" HWADDR_FMT_plx "\n", __func__, addr);
break;
}
}
diff --git a/hw/block/pflash_cfi01.c b/hw/block/pflash_cfi01.c
index 0cbc2fb..36d68c7 100644
--- a/hw/block/pflash_cfi01.c
+++ b/hw/block/pflash_cfi01.c
@@ -645,7 +645,7 @@
error_flash:
qemu_log_mask(LOG_UNIMP, "%s: Unimplemented flash cmd sequence "
- "(offset " TARGET_FMT_plx ", wcycle 0x%x cmd 0x%x value 0x%x)"
+ "(offset " HWADDR_FMT_plx ", wcycle 0x%x cmd 0x%x value 0x%x)"
"\n", __func__, offset, pfl->wcycle, pfl->cmd, value);
mode_read_array:
diff --git a/hw/char/digic-uart.c b/hw/char/digic-uart.c
index 00e5df5..51d4e7d 100644
--- a/hw/char/digic-uart.c
+++ b/hw/char/digic-uart.c
@@ -63,7 +63,7 @@
default:
qemu_log_mask(LOG_UNIMP,
"digic-uart: read access to unknown register 0x"
- TARGET_FMT_plx "\n", addr << 2);
+ HWADDR_FMT_plx "\n", addr << 2);
}
return ret;
@@ -101,7 +101,7 @@
default:
qemu_log_mask(LOG_UNIMP,
"digic-uart: write access to unknown register 0x"
- TARGET_FMT_plx "\n", addr << 2);
+ HWADDR_FMT_plx "\n", addr << 2);
}
}
diff --git a/hw/char/etraxfs_ser.c b/hw/char/etraxfs_ser.c
index e8c3017..8d6422d 100644
--- a/hw/char/etraxfs_ser.c
+++ b/hw/char/etraxfs_ser.c
@@ -113,7 +113,7 @@
break;
default:
r = s->regs[addr];
- D(qemu_log("%s " TARGET_FMT_plx "=%x\n", __func__, addr, r));
+ D(qemu_log("%s " HWADDR_FMT_plx "=%x\n", __func__, addr, r));
break;
}
return r;
@@ -127,7 +127,7 @@
uint32_t value = val64;
unsigned char ch = val64;
- D(qemu_log("%s " TARGET_FMT_plx "=%x\n", __func__, addr, value));
+ D(qemu_log("%s " HWADDR_FMT_plx "=%x\n", __func__, addr, value));
addr >>= 2;
switch (addr)
{
diff --git a/hw/core/loader.c b/hw/core/loader.c
index 0548830..173f8f6 100644
--- a/hw/core/loader.c
+++ b/hw/core/loader.c
@@ -61,6 +61,7 @@
#include "hw/boards.h"
#include "qemu/cutils.h"
#include "sysemu/runstate.h"
+#include "accel/tcg/debuginfo.h"
#include <zlib.h>
@@ -503,6 +504,10 @@
clear_lsb, data_swab, as, load_rom, sym_cb);
}
+ if (ret != ELF_LOAD_FAILED) {
+ debuginfo_report_elf(filename, fd, 0);
+ }
+
fail:
close(fd);
return ret;
@@ -1054,7 +1059,7 @@
rom->mr = mr;
snprintf(devpath, sizeof(devpath), "/rom@%s", file);
} else {
- snprintf(devpath, sizeof(devpath), "/rom@" TARGET_FMT_plx, addr);
+ snprintf(devpath, sizeof(devpath), "/rom@" HWADDR_FMT_plx, addr);
}
}
@@ -1238,10 +1243,10 @@
"\nThe following two regions overlap (in the %s address space):\n",
rom_as_name(rom));
error_printf(
- " %s (addresses 0x" TARGET_FMT_plx " - 0x" TARGET_FMT_plx ")\n",
+ " %s (addresses 0x" HWADDR_FMT_plx " - 0x" HWADDR_FMT_plx ")\n",
last_rom->name, last_rom->addr, last_rom->addr + last_rom->romsize);
error_printf(
- " %s (addresses 0x" TARGET_FMT_plx " - 0x" TARGET_FMT_plx ")\n",
+ " %s (addresses 0x" HWADDR_FMT_plx " - 0x" HWADDR_FMT_plx ")\n",
rom->name, rom->addr, rom->addr + rom->romsize);
}
@@ -1595,7 +1600,7 @@
rom->romsize,
rom->name);
} else if (!rom->fw_file) {
- g_string_append_printf(buf, "addr=" TARGET_FMT_plx
+ g_string_append_printf(buf, "addr=" HWADDR_FMT_plx
" size=0x%06zx mem=%s name=\"%s\"\n",
rom->addr, rom->romsize,
rom->isrom ? "rom" : "ram",
diff --git a/hw/core/sysbus.c b/hw/core/sysbus.c
index 05c1da3..35f902b 100644
--- a/hw/core/sysbus.c
+++ b/hw/core/sysbus.c
@@ -269,7 +269,7 @@
for (i = 0; i < s->num_mmio; i++) {
size = memory_region_size(s->mmio[i].memory);
- monitor_printf(mon, "%*smmio " TARGET_FMT_plx "/" TARGET_FMT_plx "\n",
+ monitor_printf(mon, "%*smmio " HWADDR_FMT_plx "/" HWADDR_FMT_plx "\n",
indent, "", s->mmio[i].addr, size);
}
}
@@ -289,7 +289,7 @@
}
}
if (s->num_mmio) {
- return g_strdup_printf("%s@" TARGET_FMT_plx, qdev_fw_name(dev),
+ return g_strdup_printf("%s@" HWADDR_FMT_plx, qdev_fw_name(dev),
s->mmio[0].addr);
}
if (s->num_pio) {
diff --git a/hw/display/cirrus_vga.c b/hw/display/cirrus_vga.c
index 55c32e3..b80f98b 100644
--- a/hw/display/cirrus_vga.c
+++ b/hw/display/cirrus_vga.c
@@ -2041,7 +2041,7 @@
} else {
val = 0xff;
qemu_log_mask(LOG_GUEST_ERROR,
- "cirrus: mem_readb 0x" TARGET_FMT_plx "\n", addr);
+ "cirrus: mem_readb 0x" HWADDR_FMT_plx "\n", addr);
}
return val;
}
@@ -2105,7 +2105,7 @@
}
} else {
qemu_log_mask(LOG_GUEST_ERROR,
- "cirrus: mem_writeb 0x" TARGET_FMT_plx " "
+ "cirrus: mem_writeb 0x" HWADDR_FMT_plx " "
"value 0x%02" PRIx64 "\n", addr, mem_value);
}
}
diff --git a/hw/display/g364fb.c b/hw/display/g364fb.c
index caca86d..2903cab 100644
--- a/hw/display/g364fb.c
+++ b/hw/display/g364fb.c
@@ -320,7 +320,7 @@
break;
default:
{
- error_report("g364: invalid read at [" TARGET_FMT_plx "]",
+ error_report("g364: invalid read at [" HWADDR_FMT_plx "]",
addr);
val = 0;
break;
@@ -424,7 +424,7 @@
break;
default:
error_report("g364: invalid write of 0x%" PRIx64
- " at [" TARGET_FMT_plx "]", val, addr);
+ " at [" HWADDR_FMT_plx "]", val, addr);
break;
}
}
diff --git a/hw/display/vga.c b/hw/display/vga.c
index 0cb26a7..7a5fdff 100644
--- a/hw/display/vga.c
+++ b/hw/display/vga.c
@@ -875,7 +875,7 @@
uint32_t write_mask, bit_mask, set_mask;
#ifdef DEBUG_VGA_MEM
- printf("vga: [0x" TARGET_FMT_plx "] = 0x%02x\n", addr, val);
+ printf("vga: [0x" HWADDR_FMT_plx "] = 0x%02x\n", addr, val);
#endif
/* convert to VGA memory offset */
memory_map_mode = (s->gr[VGA_GFX_MISC] >> 2) & 3;
@@ -909,7 +909,7 @@
assert(addr < s->vram_size);
s->vram_ptr[addr] = val;
#ifdef DEBUG_VGA_MEM
- printf("vga: chain4: [0x" TARGET_FMT_plx "]\n", addr);
+ printf("vga: chain4: [0x" HWADDR_FMT_plx "]\n", addr);
#endif
s->plane_updated |= mask; /* only used to detect font change */
memory_region_set_dirty(&s->vram, addr, 1);
@@ -925,7 +925,7 @@
}
s->vram_ptr[addr] = val;
#ifdef DEBUG_VGA_MEM
- printf("vga: odd/even: [0x" TARGET_FMT_plx "]\n", addr);
+ printf("vga: odd/even: [0x" HWADDR_FMT_plx "]\n", addr);
#endif
s->plane_updated |= mask; /* only used to detect font change */
memory_region_set_dirty(&s->vram, addr, 1);
@@ -1003,7 +1003,7 @@
(((uint32_t *)s->vram_ptr)[addr] & ~write_mask) |
(val & write_mask);
#ifdef DEBUG_VGA_MEM
- printf("vga: latch: [0x" TARGET_FMT_plx "] mask=0x%08x val=0x%08x\n",
+ printf("vga: latch: [0x" HWADDR_FMT_plx "] mask=0x%08x val=0x%08x\n",
addr * 4, write_mask, val);
#endif
memory_region_set_dirty(&s->vram, addr << 2, sizeof(uint32_t));
diff --git a/hw/dma/etraxfs_dma.c b/hw/dma/etraxfs_dma.c
index c4334e8..0fef00c 100644
--- a/hw/dma/etraxfs_dma.c
+++ b/hw/dma/etraxfs_dma.c
@@ -269,34 +269,34 @@
static void channel_load_d(struct fs_dma_ctrl *ctrl, int c)
{
- hwaddr addr = channel_reg(ctrl, c, RW_SAVED_DATA);
+ hwaddr addr = channel_reg(ctrl, c, RW_SAVED_DATA);
- /* Load and decode. FIXME: handle endianness. */
- D(printf("%s ch=%d addr=" TARGET_FMT_plx "\n", __func__, c, addr));
+ /* Load and decode. FIXME: handle endianness. */
+ D(printf("%s ch=%d addr=" HWADDR_FMT_plx "\n", __func__, c, addr));
cpu_physical_memory_read(addr, &ctrl->channels[c].current_d,
sizeof(ctrl->channels[c].current_d));
- D(dump_d(c, &ctrl->channels[c].current_d));
- ctrl->channels[c].regs[RW_DATA] = addr;
+ D(dump_d(c, &ctrl->channels[c].current_d));
+ ctrl->channels[c].regs[RW_DATA] = addr;
}
static void channel_store_c(struct fs_dma_ctrl *ctrl, int c)
{
- hwaddr addr = channel_reg(ctrl, c, RW_GROUP_DOWN);
+ hwaddr addr = channel_reg(ctrl, c, RW_GROUP_DOWN);
- /* Encode and store. FIXME: handle endianness. */
- D(printf("%s ch=%d addr=" TARGET_FMT_plx "\n", __func__, c, addr));
- D(dump_d(c, &ctrl->channels[c].current_d));
+ /* Encode and store. FIXME: handle endianness. */
+ D(printf("%s ch=%d addr=" HWADDR_FMT_plx "\n", __func__, c, addr));
+ D(dump_d(c, &ctrl->channels[c].current_d));
cpu_physical_memory_write(addr, &ctrl->channels[c].current_c,
sizeof(ctrl->channels[c].current_c));
}
static void channel_store_d(struct fs_dma_ctrl *ctrl, int c)
{
- hwaddr addr = channel_reg(ctrl, c, RW_SAVED_DATA);
+ hwaddr addr = channel_reg(ctrl, c, RW_SAVED_DATA);
- /* Encode and store. FIXME: handle endianness. */
- D(printf("%s ch=%d addr=" TARGET_FMT_plx "\n", __func__, c, addr));
+ /* Encode and store. FIXME: handle endianness. */
+ D(printf("%s ch=%d addr=" HWADDR_FMT_plx "\n", __func__, c, addr));
cpu_physical_memory_write(addr, &ctrl->channels[c].current_d,
sizeof(ctrl->channels[c].current_d));
}
@@ -574,8 +574,8 @@
static uint32_t dma_rinvalid (void *opaque, hwaddr addr)
{
- hw_error("Unsupported short raccess. reg=" TARGET_FMT_plx "\n", addr);
- return 0;
+ hw_error("Unsupported short raccess. reg=" HWADDR_FMT_plx "\n", addr);
+ return 0;
}
static uint64_t
@@ -603,7 +603,7 @@
default:
r = ctrl->channels[c].regs[addr];
- D(printf ("%s c=%d addr=" TARGET_FMT_plx "\n",
+ D(printf("%s c=%d addr=" HWADDR_FMT_plx "\n",
__func__, c, addr));
break;
}
@@ -613,7 +613,7 @@
static void
dma_winvalid (void *opaque, hwaddr addr, uint32_t value)
{
- hw_error("Unsupported short waccess. reg=" TARGET_FMT_plx "\n", addr);
+ hw_error("Unsupported short waccess. reg=" HWADDR_FMT_plx "\n", addr);
}
static void
@@ -686,7 +686,7 @@
break;
default:
- D(printf ("%s c=%d " TARGET_FMT_plx "\n",
+ D(printf("%s c=%d " HWADDR_FMT_plx "\n",
__func__, c, addr));
break;
}
diff --git a/hw/dma/pl330.c b/hw/dma/pl330.c
index e5d521c..e7e67dd 100644
--- a/hw/dma/pl330.c
+++ b/hw/dma/pl330.c
@@ -1373,7 +1373,7 @@
pl330_exec(s);
} else {
qemu_log_mask(LOG_GUEST_ERROR, "pl330: write of illegal value %u "
- "for offset " TARGET_FMT_plx "\n", (unsigned)value,
+ "for offset " HWADDR_FMT_plx "\n", (unsigned)value,
offset);
}
break;
@@ -1384,7 +1384,7 @@
s->dbg[1] = value;
break;
default:
- qemu_log_mask(LOG_GUEST_ERROR, "pl330: bad write offset " TARGET_FMT_plx
+ qemu_log_mask(LOG_GUEST_ERROR, "pl330: bad write offset " HWADDR_FMT_plx
"\n", offset);
break;
}
@@ -1409,7 +1409,7 @@
chan_id = offset >> 5;
if (chan_id >= s->num_chnls) {
qemu_log_mask(LOG_GUEST_ERROR, "pl330: bad read offset "
- TARGET_FMT_plx "\n", offset);
+ HWADDR_FMT_plx "\n", offset);
return 0;
}
switch (offset & 0x1f) {
@@ -1425,7 +1425,7 @@
return s->chan[chan_id].lc[1];
default:
qemu_log_mask(LOG_GUEST_ERROR, "pl330: bad read offset "
- TARGET_FMT_plx "\n", offset);
+ HWADDR_FMT_plx "\n", offset);
return 0;
}
}
@@ -1434,7 +1434,7 @@
chan_id = offset >> 3;
if (chan_id >= s->num_chnls) {
qemu_log_mask(LOG_GUEST_ERROR, "pl330: bad read offset "
- TARGET_FMT_plx "\n", offset);
+ HWADDR_FMT_plx "\n", offset);
return 0;
}
switch ((offset >> 2) & 1) {
@@ -1456,7 +1456,7 @@
chan_id = offset >> 2;
if (chan_id >= s->num_chnls) {
qemu_log_mask(LOG_GUEST_ERROR, "pl330: bad read offset "
- TARGET_FMT_plx "\n", offset);
+ HWADDR_FMT_plx "\n", offset);
return 0;
}
return s->chan[chan_id].fault_type;
@@ -1495,7 +1495,7 @@
return s->debug_status;
default:
qemu_log_mask(LOG_GUEST_ERROR, "pl330: bad read offset "
- TARGET_FMT_plx "\n", offset);
+ HWADDR_FMT_plx "\n", offset);
}
return 0;
}
diff --git a/hw/dma/xilinx_axidma.c b/hw/dma/xilinx_axidma.c
index cbb8f0f..6030c76 100644
--- a/hw/dma/xilinx_axidma.c
+++ b/hw/dma/xilinx_axidma.c
@@ -456,7 +456,7 @@
break;
default:
r = s->regs[addr];
- D(qemu_log("%s ch=%d addr=" TARGET_FMT_plx " v=%x\n",
+ D(qemu_log("%s ch=%d addr=" HWADDR_FMT_plx " v=%x\n",
__func__, sid, addr * 4, r));
break;
}
@@ -509,7 +509,7 @@
}
break;
default:
- D(qemu_log("%s: ch=%d addr=" TARGET_FMT_plx " v=%x\n",
+ D(qemu_log("%s: ch=%d addr=" HWADDR_FMT_plx " v=%x\n",
__func__, sid, addr * 4, (unsigned)value));
s->regs[addr] = value;
break;
diff --git a/hw/dma/xlnx_csu_dma.c b/hw/dma/xlnx_csu_dma.c
index 1ce52ea..8800269 100644
--- a/hw/dma/xlnx_csu_dma.c
+++ b/hw/dma/xlnx_csu_dma.c
@@ -211,7 +211,7 @@
if (result == MEMTX_OK) {
xlnx_csu_dma_data_process(s, buf, len);
} else {
- qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address " TARGET_FMT_plx
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address " HWADDR_FMT_plx
" for mem read", __func__, addr);
s->regs[R_INT_STATUS] |= R_INT_STATUS_AXI_BRESP_ERR_MASK;
xlnx_csu_dma_update_irq(s);
@@ -241,7 +241,7 @@
}
if (result != MEMTX_OK) {
- qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address " TARGET_FMT_plx
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address " HWADDR_FMT_plx
" for mem write", __func__, addr);
s->regs[R_INT_STATUS] |= R_INT_STATUS_AXI_BRESP_ERR_MASK;
xlnx_csu_dma_update_irq(s);
diff --git a/hw/i2c/mpc_i2c.c b/hw/i2c/mpc_i2c.c
index 8453925..219c548 100644
--- a/hw/i2c/mpc_i2c.c
+++ b/hw/i2c/mpc_i2c.c
@@ -224,7 +224,7 @@
break;
}
- DPRINTF("%s: addr " TARGET_FMT_plx " %02" PRIx32 "\n", __func__,
+ DPRINTF("%s: addr " HWADDR_FMT_plx " %02" PRIx32 "\n", __func__,
addr, value);
return (uint64_t)value;
}
@@ -234,7 +234,7 @@
{
MPCI2CState *s = opaque;
- DPRINTF("%s: addr " TARGET_FMT_plx " val %08" PRIx64 "\n", __func__,
+ DPRINTF("%s: addr " HWADDR_FMT_plx " val %08" PRIx64 "\n", __func__,
addr, value);
switch (addr) {
case MPC_I2C_ADR:
diff --git a/hw/i386/multiboot.c b/hw/i386/multiboot.c
index 963e293..3332712 100644
--- a/hw/i386/multiboot.c
+++ b/hw/i386/multiboot.c
@@ -137,7 +137,7 @@
stl_p(p + MB_MOD_END, end);
stl_p(p + MB_MOD_CMDLINE, cmdline_phys);
- mb_debug("mod%02d: "TARGET_FMT_plx" - "TARGET_FMT_plx,
+ mb_debug("mod%02d: "HWADDR_FMT_plx" - "HWADDR_FMT_plx,
s->mb_mods_count, start, end);
s->mb_mods_count++;
@@ -353,7 +353,7 @@
mb_add_mod(&mbs, mbs.mb_buf_phys + offs,
mbs.mb_buf_phys + offs + mb_mod_length, c);
- mb_debug("mod_start: %p\nmod_end: %p\n cmdline: "TARGET_FMT_plx,
+ mb_debug("mod_start: %p\nmod_end: %p\n cmdline: "HWADDR_FMT_plx,
(char *)mbs.mb_buf + offs,
(char *)mbs.mb_buf + offs + mb_mod_length, c);
g_free(one_file);
@@ -382,8 +382,8 @@
stl_p(bootinfo + MBI_MMAP_ADDR, ADDR_E820_MAP);
mb_debug("multiboot: entry_addr = %#x", mh_entry_addr);
- mb_debug(" mb_buf_phys = "TARGET_FMT_plx, mbs.mb_buf_phys);
- mb_debug(" mod_start = "TARGET_FMT_plx,
+ mb_debug(" mb_buf_phys = "HWADDR_FMT_plx, mbs.mb_buf_phys);
+ mb_debug(" mod_start = "HWADDR_FMT_plx,
mbs.mb_buf_phys + mbs.offset_mods);
mb_debug(" mb_mods_count = %d", mbs.mb_mods_count);
diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c
index e4293d6..b9a6f7f 100644
--- a/hw/i386/xen/xen-hvm.c
+++ b/hw/i386/xen/xen-hvm.c
@@ -516,13 +516,13 @@
if (xen_set_mem_type(xen_domid, mem_type,
start_addr >> TARGET_PAGE_BITS,
size >> TARGET_PAGE_BITS)) {
- DPRINTF("xen_set_mem_type error, addr: "TARGET_FMT_plx"\n",
+ DPRINTF("xen_set_mem_type error, addr: "HWADDR_FMT_plx"\n",
start_addr);
}
}
} else {
if (xen_remove_from_physmap(state, start_addr, size) < 0) {
- DPRINTF("physmapping does not exist at "TARGET_FMT_plx"\n", start_addr);
+ DPRINTF("physmapping does not exist at "HWADDR_FMT_plx"\n", start_addr);
}
}
}
@@ -642,8 +642,8 @@
#endif
if (errno == ENODATA) {
memory_region_set_dirty(framebuffer, 0, size);
- DPRINTF("xen: track_dirty_vram failed (0x" TARGET_FMT_plx
- ", 0x" TARGET_FMT_plx "): %s\n",
+ DPRINTF("xen: track_dirty_vram failed (0x" HWADDR_FMT_plx
+ ", 0x" HWADDR_FMT_plx "): %s\n",
start_addr, start_addr + size, strerror(errno));
}
return;
diff --git a/hw/i386/xen/xen-mapcache.c b/hw/i386/xen/xen-mapcache.c
index a2f9309..1d0879d 100644
--- a/hw/i386/xen/xen-mapcache.c
+++ b/hw/i386/xen/xen-mapcache.c
@@ -357,7 +357,7 @@
entry->lock++;
if (entry->lock == 0) {
fprintf(stderr,
- "mapcache entry lock overflow: "TARGET_FMT_plx" -> %p\n",
+ "mapcache entry lock overflow: "HWADDR_FMT_plx" -> %p\n",
entry->paddr_index, entry->vaddr_base);
abort();
}
@@ -404,7 +404,7 @@
if (!found) {
fprintf(stderr, "%s, could not find %p\n", __func__, ptr);
QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
- DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index,
+ DPRINTF(" "HWADDR_FMT_plx" -> %p is present\n", reventry->paddr_index,
reventry->vaddr_req);
}
abort();
@@ -445,7 +445,7 @@
if (!found) {
DPRINTF("%s, could not find %p\n", __func__, buffer);
QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
- DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index, reventry->vaddr_req);
+ DPRINTF(" "HWADDR_FMT_plx" -> %p is present\n", reventry->paddr_index, reventry->vaddr_req);
}
return;
}
@@ -503,7 +503,7 @@
continue;
}
fprintf(stderr, "Locked DMA mapping while invalidating mapcache!"
- " "TARGET_FMT_plx" -> %p is present\n",
+ " "HWADDR_FMT_plx" -> %p is present\n",
reventry->paddr_index, reventry->vaddr_req);
}
@@ -562,7 +562,7 @@
entry = entry->next;
}
if (!entry) {
- DPRINTF("Trying to update an entry for "TARGET_FMT_plx \
+ DPRINTF("Trying to update an entry for "HWADDR_FMT_plx \
"that is not in the mapcache!\n", old_phys_addr);
return NULL;
}
@@ -570,15 +570,15 @@
address_index = new_phys_addr >> MCACHE_BUCKET_SHIFT;
address_offset = new_phys_addr & (MCACHE_BUCKET_SIZE - 1);
- fprintf(stderr, "Replacing a dummy mapcache entry for "TARGET_FMT_plx \
- " with "TARGET_FMT_plx"\n", old_phys_addr, new_phys_addr);
+ fprintf(stderr, "Replacing a dummy mapcache entry for "HWADDR_FMT_plx \
+ " with "HWADDR_FMT_plx"\n", old_phys_addr, new_phys_addr);
xen_remap_bucket(entry, entry->vaddr_base,
cache_size, address_index, false);
if (!test_bits(address_offset >> XC_PAGE_SHIFT,
test_bit_size >> XC_PAGE_SHIFT,
entry->valid_mapping)) {
- DPRINTF("Unable to update a mapcache entry for "TARGET_FMT_plx"!\n",
+ DPRINTF("Unable to update a mapcache entry for "HWADDR_FMT_plx"!\n",
old_phys_addr);
return NULL;
}
diff --git a/hw/i386/xen/xen_platform.c b/hw/i386/xen/xen_platform.c
index 7db0d94..66e6de3 100644
--- a/hw/i386/xen/xen_platform.c
+++ b/hw/i386/xen/xen_platform.c
@@ -445,7 +445,7 @@
unsigned size)
{
DPRINTF("Warning: attempted read from physical address "
- "0x" TARGET_FMT_plx " in xen platform mmio space\n", addr);
+ "0x" HWADDR_FMT_plx " in xen platform mmio space\n", addr);
return 0;
}
@@ -454,7 +454,7 @@
uint64_t val, unsigned size)
{
DPRINTF("Warning: attempted write of 0x%"PRIx64" to physical "
- "address 0x" TARGET_FMT_plx " in xen platform mmio space\n",
+ "address 0x" HWADDR_FMT_plx " in xen platform mmio space\n",
val, addr);
}
diff --git a/hw/intc/arm_gicv3_dist.c b/hw/intc/arm_gicv3_dist.c
index d599fef..35e8506 100644
--- a/hw/intc/arm_gicv3_dist.c
+++ b/hw/intc/arm_gicv3_dist.c
@@ -564,7 +564,7 @@
/* WO registers, return unknown value */
qemu_log_mask(LOG_GUEST_ERROR,
"%s: invalid guest read from WO register at offset "
- TARGET_FMT_plx "\n", __func__, offset);
+ HWADDR_FMT_plx "\n", __func__, offset);
*data = 0;
return true;
default:
@@ -773,7 +773,7 @@
/* RO registers, ignore the write */
qemu_log_mask(LOG_GUEST_ERROR,
"%s: invalid guest write to RO register at offset "
- TARGET_FMT_plx "\n", __func__, offset);
+ HWADDR_FMT_plx "\n", __func__, offset);
return true;
default:
return false;
@@ -838,7 +838,7 @@
if (!r) {
qemu_log_mask(LOG_GUEST_ERROR,
- "%s: invalid guest read at offset " TARGET_FMT_plx
+ "%s: invalid guest read at offset " HWADDR_FMT_plx
" size %u\n", __func__, offset, size);
trace_gicv3_dist_badread(offset, size, attrs.secure);
/* The spec requires that reserved registers are RAZ/WI;
@@ -879,7 +879,7 @@
if (!r) {
qemu_log_mask(LOG_GUEST_ERROR,
- "%s: invalid guest write at offset " TARGET_FMT_plx
+ "%s: invalid guest write at offset " HWADDR_FMT_plx
" size %u\n", __func__, offset, size);
trace_gicv3_dist_badwrite(offset, data, size, attrs.secure);
/* The spec requires that reserved registers are RAZ/WI;
diff --git a/hw/intc/arm_gicv3_its.c b/hw/intc/arm_gicv3_its.c
index 57c79da..43dfd7a 100644
--- a/hw/intc/arm_gicv3_its.c
+++ b/hw/intc/arm_gicv3_its.c
@@ -1633,7 +1633,7 @@
/* RO register, ignore the write */
qemu_log_mask(LOG_GUEST_ERROR,
"%s: invalid guest write to RO register at offset "
- TARGET_FMT_plx "\n", __func__, offset);
+ HWADDR_FMT_plx "\n", __func__, offset);
}
break;
case GITS_CREADR + 4:
@@ -1643,7 +1643,7 @@
/* RO register, ignore the write */
qemu_log_mask(LOG_GUEST_ERROR,
"%s: invalid guest write to RO register at offset "
- TARGET_FMT_plx "\n", __func__, offset);
+ HWADDR_FMT_plx "\n", __func__, offset);
}
break;
case GITS_BASER ... GITS_BASER + 0x3f:
@@ -1675,7 +1675,7 @@
/* RO registers, ignore the write */
qemu_log_mask(LOG_GUEST_ERROR,
"%s: invalid guest write to RO register at offset "
- TARGET_FMT_plx "\n", __func__, offset);
+ HWADDR_FMT_plx "\n", __func__, offset);
break;
default:
result = false;
@@ -1785,14 +1785,14 @@
/* RO register, ignore the write */
qemu_log_mask(LOG_GUEST_ERROR,
"%s: invalid guest write to RO register at offset "
- TARGET_FMT_plx "\n", __func__, offset);
+ HWADDR_FMT_plx "\n", __func__, offset);
}
break;
case GITS_TYPER:
/* RO registers, ignore the write */
qemu_log_mask(LOG_GUEST_ERROR,
"%s: invalid guest write to RO register at offset "
- TARGET_FMT_plx "\n", __func__, offset);
+ HWADDR_FMT_plx "\n", __func__, offset);
break;
default:
result = false;
@@ -1851,7 +1851,7 @@
if (!result) {
qemu_log_mask(LOG_GUEST_ERROR,
- "%s: invalid guest read at offset " TARGET_FMT_plx
+ "%s: invalid guest read at offset " HWADDR_FMT_plx
" size %u\n", __func__, offset, size);
trace_gicv3_its_badread(offset, size);
/*
@@ -1887,7 +1887,7 @@
if (!result) {
qemu_log_mask(LOG_GUEST_ERROR,
- "%s: invalid guest write at offset " TARGET_FMT_plx
+ "%s: invalid guest write at offset " HWADDR_FMT_plx
" size %u\n", __func__, offset, size);
trace_gicv3_its_badwrite(offset, data, size);
/*
diff --git a/hw/intc/arm_gicv3_redist.c b/hw/intc/arm_gicv3_redist.c
index c92ceec..297f7f0 100644
--- a/hw/intc/arm_gicv3_redist.c
+++ b/hw/intc/arm_gicv3_redist.c
@@ -601,7 +601,7 @@
/* RO registers, ignore the write */
qemu_log_mask(LOG_GUEST_ERROR,
"%s: invalid guest write to RO register at offset "
- TARGET_FMT_plx "\n", __func__, offset);
+ HWADDR_FMT_plx "\n", __func__, offset);
return MEMTX_OK;
/*
* VLPI frame registers. We don't need a version check for
@@ -668,7 +668,7 @@
/* RO register, ignore the write */
qemu_log_mask(LOG_GUEST_ERROR,
"%s: invalid guest write to RO register at offset "
- TARGET_FMT_plx "\n", __func__, offset);
+ HWADDR_FMT_plx "\n", __func__, offset);
return MEMTX_OK;
/*
* VLPI frame registers. We don't need a version check for
@@ -727,7 +727,7 @@
if (r != MEMTX_OK) {
qemu_log_mask(LOG_GUEST_ERROR,
- "%s: invalid guest read at offset " TARGET_FMT_plx
+ "%s: invalid guest read at offset " HWADDR_FMT_plx
" size %u\n", __func__, offset, size);
trace_gicv3_redist_badread(gicv3_redist_affid(cs), offset,
size, attrs.secure);
@@ -786,7 +786,7 @@
if (r != MEMTX_OK) {
qemu_log_mask(LOG_GUEST_ERROR,
- "%s: invalid guest write at offset " TARGET_FMT_plx
+ "%s: invalid guest write at offset " HWADDR_FMT_plx
" size %u\n", __func__, offset, size);
trace_gicv3_redist_badwrite(gicv3_redist_affid(cs), offset, data,
size, attrs.secure);
diff --git a/hw/intc/exynos4210_combiner.c b/hw/intc/exynos4210_combiner.c
index a289510..4ba448f 100644
--- a/hw/intc/exynos4210_combiner.c
+++ b/hw/intc/exynos4210_combiner.c
@@ -120,7 +120,7 @@
default:
if (offset >> 2 >= IIC_REGSET_SIZE) {
hw_error("exynos4210.combiner: overflow of reg_set by 0x"
- TARGET_FMT_plx "offset\n", offset);
+ HWADDR_FMT_plx "offset\n", offset);
}
val = s->reg_set[offset >> 2];
}
@@ -184,19 +184,19 @@
if (req_quad_base_n >= IIC_NGRP) {
hw_error("exynos4210.combiner: unallowed write access at offset 0x"
- TARGET_FMT_plx "\n", offset);
+ HWADDR_FMT_plx "\n", offset);
return;
}
if (reg_n > 1) {
hw_error("exynos4210.combiner: unallowed write access at offset 0x"
- TARGET_FMT_plx "\n", offset);
+ HWADDR_FMT_plx "\n", offset);
return;
}
if (offset >> 2 >= IIC_REGSET_SIZE) {
hw_error("exynos4210.combiner: overflow of reg_set by 0x"
- TARGET_FMT_plx "offset\n", offset);
+ HWADDR_FMT_plx "offset\n", offset);
}
s->reg_set[offset >> 2] = val;
@@ -246,7 +246,7 @@
break;
default:
hw_error("exynos4210.combiner: unallowed write access at offset 0x"
- TARGET_FMT_plx "\n", offset);
+ HWADDR_FMT_plx "\n", offset);
break;
}
}
diff --git a/hw/misc/auxbus.c b/hw/misc/auxbus.c
index 8a8012f..28d50d9 100644
--- a/hw/misc/auxbus.c
+++ b/hw/misc/auxbus.c
@@ -299,7 +299,7 @@
s = AUX_SLAVE(dev);
- monitor_printf(mon, "%*smemory " TARGET_FMT_plx "/" TARGET_FMT_plx "\n",
+ monitor_printf(mon, "%*smemory " HWADDR_FMT_plx "/" HWADDR_FMT_plx "\n",
indent, "",
object_property_get_uint(OBJECT(s->mmio), "addr", NULL),
memory_region_size(s->mmio));
diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c
index 8270db5..d66d912 100644
--- a/hw/misc/ivshmem.c
+++ b/hw/misc/ivshmem.c
@@ -179,7 +179,7 @@
addr &= 0xfc;
- IVSHMEM_DPRINTF("writing to addr " TARGET_FMT_plx "\n", addr);
+ IVSHMEM_DPRINTF("writing to addr " HWADDR_FMT_plx "\n", addr);
switch (addr)
{
case INTRMASK:
@@ -207,7 +207,7 @@
}
break;
default:
- IVSHMEM_DPRINTF("Unhandled write " TARGET_FMT_plx "\n", addr);
+ IVSHMEM_DPRINTF("Unhandled write " HWADDR_FMT_plx "\n", addr);
}
}
@@ -233,7 +233,7 @@
break;
default:
- IVSHMEM_DPRINTF("why are we reading " TARGET_FMT_plx "\n", addr);
+ IVSHMEM_DPRINTF("why are we reading " HWADDR_FMT_plx "\n", addr);
ret = 0;
}
diff --git a/hw/misc/macio/mac_dbdma.c b/hw/misc/macio/mac_dbdma.c
index efcc026..43bb1f5 100644
--- a/hw/misc/macio/mac_dbdma.c
+++ b/hw/misc/macio/mac_dbdma.c
@@ -704,7 +704,7 @@
DBDMA_channel *ch = &s->channels[channel];
int reg = (addr - (channel << DBDMA_CHANNEL_SHIFT)) >> 2;
- DBDMA_DPRINTFCH(ch, "writel 0x" TARGET_FMT_plx " <= 0x%08"PRIx64"\n",
+ DBDMA_DPRINTFCH(ch, "writel 0x" HWADDR_FMT_plx " <= 0x%08"PRIx64"\n",
addr, value);
DBDMA_DPRINTFCH(ch, "channel 0x%x reg 0x%x\n",
(uint32_t)addr >> DBDMA_CHANNEL_SHIFT, reg);
@@ -786,7 +786,7 @@
break;
}
- DBDMA_DPRINTFCH(ch, "readl 0x" TARGET_FMT_plx " => 0x%08x\n", addr, value);
+ DBDMA_DPRINTFCH(ch, "readl 0x" HWADDR_FMT_plx " => 0x%08x\n", addr, value);
DBDMA_DPRINTFCH(ch, "channel 0x%x reg 0x%x\n",
(uint32_t)addr >> DBDMA_CHANNEL_SHIFT, reg);
diff --git a/hw/misc/mst_fpga.c b/hw/misc/mst_fpga.c
index 2aaadfa..7692825 100644
--- a/hw/misc/mst_fpga.c
+++ b/hw/misc/mst_fpga.c
@@ -131,7 +131,7 @@
return s->pcmcia1;
default:
printf("Mainstone - mst_fpga_readb: Bad register offset "
- "0x" TARGET_FMT_plx "\n", addr);
+ "0x" HWADDR_FMT_plx "\n", addr);
}
return 0;
}
@@ -185,7 +185,7 @@
break;
default:
printf("Mainstone - mst_fpga_writeb: Bad register offset "
- "0x" TARGET_FMT_plx "\n", addr);
+ "0x" HWADDR_FMT_plx "\n", addr);
}
}
diff --git a/hw/net/allwinner-sun8i-emac.c b/hw/net/allwinner-sun8i-emac.c
index ecc0245..b861d8f 100644
--- a/hw/net/allwinner-sun8i-emac.c
+++ b/hw/net/allwinner-sun8i-emac.c
@@ -663,7 +663,7 @@
break;
default:
qemu_log_mask(LOG_UNIMP, "allwinner-h3-emac: read access to unknown "
- "EMAC register 0x" TARGET_FMT_plx "\n",
+ "EMAC register 0x" HWADDR_FMT_plx "\n",
offset);
}
@@ -760,7 +760,7 @@
break;
default:
qemu_log_mask(LOG_UNIMP, "allwinner-h3-emac: write access to unknown "
- "EMAC register 0x" TARGET_FMT_plx "\n",
+ "EMAC register 0x" HWADDR_FMT_plx "\n",
offset);
}
}
diff --git a/hw/net/allwinner_emac.c b/hw/net/allwinner_emac.c
index ddddf35..372e5b6 100644
--- a/hw/net/allwinner_emac.c
+++ b/hw/net/allwinner_emac.c
@@ -304,7 +304,7 @@
default:
qemu_log_mask(LOG_UNIMP,
"allwinner_emac: read access to unknown register 0x"
- TARGET_FMT_plx "\n", offset);
+ HWADDR_FMT_plx "\n", offset);
ret = 0;
}
@@ -407,7 +407,7 @@
default:
qemu_log_mask(LOG_UNIMP,
"allwinner_emac: write access to unknown register 0x"
- TARGET_FMT_plx "\n", offset);
+ HWADDR_FMT_plx "\n", offset);
}
}
diff --git a/hw/net/fsl_etsec/etsec.c b/hw/net/fsl_etsec/etsec.c
index b75d8e3..c753bfb 100644
--- a/hw/net/fsl_etsec/etsec.c
+++ b/hw/net/fsl_etsec/etsec.c
@@ -99,7 +99,7 @@
break;
}
- DPRINTF("Read 0x%08x @ 0x" TARGET_FMT_plx
+ DPRINTF("Read 0x%08x @ 0x" HWADDR_FMT_plx
" : %s (%s)\n",
ret, addr, reg->name, reg->desc);
@@ -276,7 +276,7 @@
}
}
- DPRINTF("Write 0x%08x @ 0x" TARGET_FMT_plx
+ DPRINTF("Write 0x%08x @ 0x" HWADDR_FMT_plx
" val:0x%08x->0x%08x : %s (%s)\n",
(unsigned int)value, addr, before, reg->value,
reg->name, reg->desc);
diff --git a/hw/net/fsl_etsec/rings.c b/hw/net/fsl_etsec/rings.c
index a32589e..788463f 100644
--- a/hw/net/fsl_etsec/rings.c
+++ b/hw/net/fsl_etsec/rings.c
@@ -109,7 +109,7 @@
{
assert(bd != NULL);
- RING_DEBUG("READ Buffer Descriptor @ 0x" TARGET_FMT_plx"\n", addr);
+ RING_DEBUG("READ Buffer Descriptor @ 0x" HWADDR_FMT_plx"\n", addr);
cpu_physical_memory_read(addr,
bd,
sizeof(eTSEC_rxtx_bd));
@@ -141,7 +141,7 @@
stl_be_p(&bd->bufptr, bd->bufptr);
}
- RING_DEBUG("Write Buffer Descriptor @ 0x" TARGET_FMT_plx"\n", addr);
+ RING_DEBUG("Write Buffer Descriptor @ 0x" HWADDR_FMT_plx"\n", addr);
cpu_physical_memory_write(addr,
bd,
sizeof(eTSEC_rxtx_bd));
diff --git a/hw/net/pcnet.c b/hw/net/pcnet.c
index e63e524..d456094 100644
--- a/hw/net/pcnet.c
+++ b/hw/net/pcnet.c
@@ -908,11 +908,11 @@
s->csr[37] = nnrd >> 16;
#ifdef PCNET_DEBUG
if (bad) {
- printf("pcnet: BAD RMD RECORDS AFTER 0x" TARGET_FMT_plx "\n",
+ printf("pcnet: BAD RMD RECORDS AFTER 0x" HWADDR_FMT_plx "\n",
crda);
}
} else {
- printf("pcnet: BAD RMD RDA=0x" TARGET_FMT_plx "\n", crda);
+ printf("pcnet: BAD RMD RDA=0x" HWADDR_FMT_plx "\n", crda);
#endif
}
}
diff --git a/hw/net/rocker/rocker.c b/hw/net/rocker/rocker.c
index cf54ddf..7ea8eb6 100644
--- a/hw/net/rocker/rocker.c
+++ b/hw/net/rocker/rocker.c
@@ -815,7 +815,7 @@
}
break;
default:
- DPRINTF("not implemented dma reg write(l) addr=0x" TARGET_FMT_plx
+ DPRINTF("not implemented dma reg write(l) addr=0x" HWADDR_FMT_plx
" val=0x%08x (ring %d, addr=0x%02x)\n",
addr, val, index, offset);
break;
@@ -857,7 +857,7 @@
r->lower32 = 0;
break;
default:
- DPRINTF("not implemented write(l) addr=0x" TARGET_FMT_plx
+ DPRINTF("not implemented write(l) addr=0x" HWADDR_FMT_plx
" val=0x%08x\n", addr, val);
break;
}
@@ -876,8 +876,8 @@
desc_ring_set_base_addr(r->rings[index], val);
break;
default:
- DPRINTF("not implemented dma reg write(q) addr=0x" TARGET_FMT_plx
- " val=0x" TARGET_FMT_plx " (ring %d, offset=0x%02x)\n",
+ DPRINTF("not implemented dma reg write(q) addr=0x" HWADDR_FMT_plx
+ " val=0x" HWADDR_FMT_plx " (ring %d, offset=0x%02x)\n",
addr, val, index, offset);
break;
}
@@ -895,8 +895,8 @@
rocker_port_phys_enable_write(r, val);
break;
default:
- DPRINTF("not implemented write(q) addr=0x" TARGET_FMT_plx
- " val=0x" TARGET_FMT_plx "\n", addr, val);
+ DPRINTF("not implemented write(q) addr=0x" HWADDR_FMT_plx
+ " val=0x" HWADDR_FMT_plx "\n", addr, val);
break;
}
}
@@ -987,8 +987,8 @@
static void rocker_mmio_write(void *opaque, hwaddr addr, uint64_t val,
unsigned size)
{
- DPRINTF("Write %s addr " TARGET_FMT_plx
- ", size %u, val " TARGET_FMT_plx "\n",
+ DPRINTF("Write %s addr " HWADDR_FMT_plx
+ ", size %u, val " HWADDR_FMT_plx "\n",
rocker_reg_name(opaque, addr), addr, size, val);
switch (size) {
@@ -1060,7 +1060,7 @@
ret = desc_ring_get_credits(r->rings[index]);
break;
default:
- DPRINTF("not implemented dma reg read(l) addr=0x" TARGET_FMT_plx
+ DPRINTF("not implemented dma reg read(l) addr=0x" HWADDR_FMT_plx
" (ring %d, addr=0x%02x)\n", addr, index, offset);
ret = 0;
break;
@@ -1115,7 +1115,7 @@
ret = (uint32_t)(r->switch_id >> 32);
break;
default:
- DPRINTF("not implemented read(l) addr=0x" TARGET_FMT_plx "\n", addr);
+ DPRINTF("not implemented read(l) addr=0x" HWADDR_FMT_plx "\n", addr);
ret = 0;
break;
}
@@ -1136,7 +1136,7 @@
ret = desc_ring_get_base_addr(r->rings[index]);
break;
default:
- DPRINTF("not implemented dma reg read(q) addr=0x" TARGET_FMT_plx
+ DPRINTF("not implemented dma reg read(q) addr=0x" HWADDR_FMT_plx
" (ring %d, addr=0x%02x)\n", addr, index, offset);
ret = 0;
break;
@@ -1165,7 +1165,7 @@
ret = r->switch_id;
break;
default:
- DPRINTF("not implemented read(q) addr=0x" TARGET_FMT_plx "\n", addr);
+ DPRINTF("not implemented read(q) addr=0x" HWADDR_FMT_plx "\n", addr);
ret = 0;
break;
}
@@ -1174,7 +1174,7 @@
static uint64_t rocker_mmio_read(void *opaque, hwaddr addr, unsigned size)
{
- DPRINTF("Read %s addr " TARGET_FMT_plx ", size %u\n",
+ DPRINTF("Read %s addr " HWADDR_FMT_plx ", size %u\n",
rocker_reg_name(opaque, addr), addr, size);
switch (size) {
diff --git a/hw/net/rocker/rocker_desc.c b/hw/net/rocker/rocker_desc.c
index f3068c9..675383d 100644
--- a/hw/net/rocker/rocker_desc.c
+++ b/hw/net/rocker/rocker_desc.c
@@ -104,7 +104,7 @@
bool desc_ring_set_base_addr(DescRing *ring, uint64_t base_addr)
{
if (base_addr & 0x7) {
- DPRINTF("ERROR: ring[%d] desc base addr (0x" TARGET_FMT_plx
+ DPRINTF("ERROR: ring[%d] desc base addr (0x" HWADDR_FMT_plx
") not 8-byte aligned\n", ring->index, base_addr);
return false;
}
diff --git a/hw/net/xilinx_axienet.c b/hw/net/xilinx_axienet.c
index 990ff3a..7e00965 100644
--- a/hw/net/xilinx_axienet.c
+++ b/hw/net/xilinx_axienet.c
@@ -524,7 +524,7 @@
if (addr < ARRAY_SIZE(s->regs)) {
r = s->regs[addr];
}
- DENET(qemu_log("%s addr=" TARGET_FMT_plx " v=%x\n",
+ DENET(qemu_log("%s addr=" HWADDR_FMT_plx " v=%x\n",
__func__, addr * 4, r));
break;
}
@@ -630,7 +630,7 @@
break;
default:
- DENET(qemu_log("%s addr=" TARGET_FMT_plx " v=%x\n",
+ DENET(qemu_log("%s addr=" HWADDR_FMT_plx " v=%x\n",
__func__, addr * 4, (unsigned)value));
if (addr < ARRAY_SIZE(s->regs)) {
s->regs[addr] = value;
diff --git a/hw/net/xilinx_ethlite.c b/hw/net/xilinx_ethlite.c
index 6e09f7e..99c2281 100644
--- a/hw/net/xilinx_ethlite.c
+++ b/hw/net/xilinx_ethlite.c
@@ -99,7 +99,7 @@
case R_RX_CTRL1:
case R_RX_CTRL0:
r = s->regs[addr];
- D(qemu_log("%s " TARGET_FMT_plx "=%x\n", __func__, addr * 4, r));
+ D(qemu_log("%s " HWADDR_FMT_plx "=%x\n", __func__, addr * 4, r));
break;
default:
@@ -125,7 +125,7 @@
if (addr == R_TX_CTRL1)
base = 0x800 / 4;
- D(qemu_log("%s addr=" TARGET_FMT_plx " val=%x\n",
+ D(qemu_log("%s addr=" HWADDR_FMT_plx " val=%x\n",
__func__, addr * 4, value));
if ((value & (CTRL_P | CTRL_S)) == CTRL_S) {
qemu_send_packet(qemu_get_queue(s->nic),
@@ -155,7 +155,7 @@
case R_TX_LEN0:
case R_TX_LEN1:
case R_TX_GIE0:
- D(qemu_log("%s addr=" TARGET_FMT_plx " val=%x\n",
+ D(qemu_log("%s addr=" HWADDR_FMT_plx " val=%x\n",
__func__, addr * 4, value));
s->regs[addr] = value;
break;
diff --git a/hw/pci-bridge/pci_expander_bridge.c b/hw/pci-bridge/pci_expander_bridge.c
index 870d9ba..e752a21 100644
--- a/hw/pci-bridge/pci_expander_bridge.c
+++ b/hw/pci-bridge/pci_expander_bridge.c
@@ -155,7 +155,7 @@
main_host_sbd = SYS_BUS_DEVICE(main_host);
if (main_host_sbd->num_mmio > 0) {
- return g_strdup_printf(TARGET_FMT_plx ",%x",
+ return g_strdup_printf(HWADDR_FMT_plx ",%x",
main_host_sbd->mmio[0].addr, position + 1);
}
if (main_host_sbd->num_pio > 0) {
diff --git a/hw/pci-host/bonito.c b/hw/pci-host/bonito.c
index ac1eebf..1cf25ba 100644
--- a/hw/pci-host/bonito.c
+++ b/hw/pci-host/bonito.c
@@ -251,7 +251,7 @@
saddr = addr >> 2;
- DPRINTF("bonito_writel "TARGET_FMT_plx" val %lx saddr %x\n",
+ DPRINTF("bonito_writel "HWADDR_FMT_plx" val %lx saddr %x\n",
addr, val, saddr);
switch (saddr) {
case BONITO_BONPONCFG:
@@ -314,7 +314,7 @@
saddr = addr >> 2;
- DPRINTF("bonito_readl "TARGET_FMT_plx"\n", addr);
+ DPRINTF("bonito_readl "HWADDR_FMT_plx"\n", addr);
switch (saddr) {
case BONITO_INTISR:
return s->regs[saddr];
@@ -339,7 +339,7 @@
PCIBonitoState *s = opaque;
PCIDevice *d = PCI_DEVICE(s);
- DPRINTF("bonito_pciconf_writel "TARGET_FMT_plx" val %lx\n", addr, val);
+ DPRINTF("bonito_pciconf_writel "HWADDR_FMT_plx" val %lx\n", addr, val);
d->config_write(d, addr, val, 4);
}
@@ -350,7 +350,7 @@
PCIBonitoState *s = opaque;
PCIDevice *d = PCI_DEVICE(s);
- DPRINTF("bonito_pciconf_readl "TARGET_FMT_plx"\n", addr);
+ DPRINTF("bonito_pciconf_readl "HWADDR_FMT_plx"\n", addr);
return d->config_read(d, addr, 4);
}
@@ -466,7 +466,7 @@
regno = (cfgaddr & BONITO_PCICONF_REG_MASK_HW) >> BONITO_PCICONF_REG_OFFSET;
if (idsel == 0) {
- error_report("error in bonito pci config address 0x" TARGET_FMT_plx
+ error_report("error in bonito pci config address 0x" HWADDR_FMT_plx
",pcimap_cfg=0x%x", addr, s->regs[BONITO_PCIMAP_CFG]);
exit(1);
}
@@ -486,7 +486,7 @@
uint32_t pciaddr;
uint16_t status;
- DPRINTF("bonito_spciconf_write "TARGET_FMT_plx" size %d val %lx\n",
+ DPRINTF("bonito_spciconf_write "HWADDR_FMT_plx" size %d val %lx\n",
addr, size, val);
pciaddr = bonito_sbridge_pciaddr(s, addr);
@@ -516,7 +516,7 @@
uint32_t pciaddr;
uint16_t status;
- DPRINTF("bonito_spciconf_read "TARGET_FMT_plx" size %d\n", addr, size);
+ DPRINTF("bonito_spciconf_read "HWADDR_FMT_plx" size %d\n", addr, size);
pciaddr = bonito_sbridge_pciaddr(s, addr);
diff --git a/hw/pci-host/ppce500.c b/hw/pci-host/ppce500.c
index 568849e..3881424 100644
--- a/hw/pci-host/ppce500.c
+++ b/hw/pci-host/ppce500.c
@@ -189,7 +189,7 @@
break;
}
- pci_debug("%s: win:%lx(addr:" TARGET_FMT_plx ") -> value:%x\n", __func__,
+ pci_debug("%s: win:%lx(addr:" HWADDR_FMT_plx ") -> value:%x\n", __func__,
win, addr, value);
return value;
}
@@ -268,7 +268,7 @@
win = addr & 0xfe0;
- pci_debug("%s: value:%x -> win:%lx(addr:" TARGET_FMT_plx ")\n",
+ pci_debug("%s: value:%x -> win:%lx(addr:" HWADDR_FMT_plx ")\n",
__func__, (unsigned)value, win, addr);
switch (win) {
diff --git a/hw/pci/pci_host.c b/hw/pci/pci_host.c
index ead1d3e..dfd185b 100644
--- a/hw/pci/pci_host.c
+++ b/hw/pci/pci_host.c
@@ -149,7 +149,7 @@
{
PCIHostState *s = opaque;
- PCI_DPRINTF("%s addr " TARGET_FMT_plx " len %d val %"PRIx64"\n",
+ PCI_DPRINTF("%s addr " HWADDR_FMT_plx " len %d val %"PRIx64"\n",
__func__, addr, len, val);
if (addr != 0 || len != 4) {
return;
@@ -163,7 +163,7 @@
PCIHostState *s = opaque;
uint32_t val = s->config_reg;
- PCI_DPRINTF("%s addr " TARGET_FMT_plx " len %d val %"PRIx32"\n",
+ PCI_DPRINTF("%s addr " HWADDR_FMT_plx " len %d val %"PRIx32"\n",
__func__, addr, len, val);
return val;
}
diff --git a/hw/ppc/ppc4xx_sdram.c b/hw/ppc/ppc4xx_sdram.c
index a24c80b..4501fb2 100644
--- a/hw/ppc/ppc4xx_sdram.c
+++ b/hw/ppc/ppc4xx_sdram.c
@@ -500,7 +500,7 @@
bcr = 0x8000;
break;
default:
- error_report("invalid RAM size " TARGET_FMT_plx, ram_size);
+ error_report("invalid RAM size " HWADDR_FMT_plx, ram_size);
return 0;
}
bcr |= ram_base >> 2 & 0xffe00000;
diff --git a/hw/rtc/exynos4210_rtc.c b/hw/rtc/exynos4210_rtc.c
index d1620c7..2b8a38a 100644
--- a/hw/rtc/exynos4210_rtc.c
+++ b/hw/rtc/exynos4210_rtc.c
@@ -374,7 +374,7 @@
default:
qemu_log_mask(LOG_GUEST_ERROR,
- "exynos4210.rtc: bad read offset " TARGET_FMT_plx,
+ "exynos4210.rtc: bad read offset " HWADDR_FMT_plx,
offset);
break;
}
@@ -508,7 +508,7 @@
default:
qemu_log_mask(LOG_GUEST_ERROR,
- "exynos4210.rtc: bad write offset " TARGET_FMT_plx,
+ "exynos4210.rtc: bad write offset " HWADDR_FMT_plx,
offset);
break;
diff --git a/hw/s390x/pv.c b/hw/s390x/pv.c
index 8dfe92d..8a1c714 100644
--- a/hw/s390x/pv.c
+++ b/hw/s390x/pv.c
@@ -20,6 +20,7 @@
#include "exec/confidential-guest-support.h"
#include "hw/s390x/ipl.h"
#include "hw/s390x/pv.h"
+#include "hw/s390x/sclp.h"
#include "target/s390x/kvm/kvm_s390x.h"
static bool info_valid;
@@ -249,6 +250,41 @@
ConfidentialGuestSupportClass parent_class;
};
+/*
+ * If protected virtualization is enabled, the amount of data that the
+ * Read SCP Info Service Call can use is limited to one page. The
+ * available space also depends on the Extended-Length SCCB (ELS)
+ * feature which can take more buffer space to store feature
+ * information. This impacts the maximum number of CPUs supported in
+ * the machine.
+ */
+static uint32_t s390_pv_get_max_cpus(void)
+{
+ int offset_cpu = s390_has_feat(S390_FEAT_EXTENDED_LENGTH_SCCB) ?
+ offsetof(ReadInfo, entries) : SCLP_READ_SCP_INFO_FIXED_CPU_OFFSET;
+
+ return (TARGET_PAGE_SIZE - offset_cpu) / sizeof(CPUEntry);
+}
+
+static bool s390_pv_check_cpus(Error **errp)
+{
+ MachineState *ms = MACHINE(qdev_get_machine());
+ uint32_t pv_max_cpus = s390_pv_get_max_cpus();
+
+ if (ms->smp.max_cpus > pv_max_cpus) {
+ error_setg(errp, "Protected VMs support a maximum of %d CPUs",
+ pv_max_cpus);
+ return false;
+ }
+
+ return true;
+}
+
+static bool s390_pv_guest_check(ConfidentialGuestSupport *cgs, Error **errp)
+{
+ return s390_pv_check_cpus(errp);
+}
+
int s390_pv_kvm_init(ConfidentialGuestSupport *cgs, Error **errp)
{
if (!object_dynamic_cast(OBJECT(cgs), TYPE_S390_PV_GUEST)) {
@@ -261,6 +297,10 @@
return -1;
}
+ if (!s390_pv_guest_check(cgs, errp)) {
+ return -1;
+ }
+
cgs->ready = true;
return 0;
diff --git a/hw/sh4/sh7750.c b/hw/sh4/sh7750.c
index c77792d..ebe0fd9 100644
--- a/hw/sh4/sh7750.c
+++ b/hw/sh4/sh7750.c
@@ -207,13 +207,13 @@
static void error_access(const char *kind, hwaddr addr)
{
- fprintf(stderr, "%s to %s (0x" TARGET_FMT_plx ") not supported\n",
+ fprintf(stderr, "%s to %s (0x" HWADDR_FMT_plx ") not supported\n",
kind, regname(addr), addr);
}
static void ignore_access(const char *kind, hwaddr addr)
{
- fprintf(stderr, "%s to %s (0x" TARGET_FMT_plx ") ignored\n",
+ fprintf(stderr, "%s to %s (0x" HWADDR_FMT_plx ") ignored\n",
kind, regname(addr), addr);
}
diff --git a/hw/ssi/xilinx_spi.c b/hw/ssi/xilinx_spi.c
index b2819a7..5529276 100644
--- a/hw/ssi/xilinx_spi.c
+++ b/hw/ssi/xilinx_spi.c
@@ -232,7 +232,7 @@
break;
}
- DB_PRINT("addr=" TARGET_FMT_plx " = %x\n", addr * 4, r);
+ DB_PRINT("addr=" HWADDR_FMT_plx " = %x\n", addr * 4, r);
xlx_spi_update_irq(s);
return r;
}
@@ -244,7 +244,7 @@
XilinxSPI *s = opaque;
uint32_t value = val64;
- DB_PRINT("addr=" TARGET_FMT_plx " = %x\n", addr, value);
+ DB_PRINT("addr=" HWADDR_FMT_plx " = %x\n", addr, value);
addr >>= 2;
switch (addr) {
case R_SRR:
diff --git a/hw/ssi/xilinx_spips.c b/hw/ssi/xilinx_spips.c
index 1e9dba2..97009d3 100644
--- a/hw/ssi/xilinx_spips.c
+++ b/hw/ssi/xilinx_spips.c
@@ -887,7 +887,7 @@
case R_INTR_STATUS:
ret = s->regs[addr] & IXR_ALL;
s->regs[addr] = 0;
- DB_PRINT_L(0, "addr=" TARGET_FMT_plx " = %x\n", addr * 4, ret);
+ DB_PRINT_L(0, "addr=" HWADDR_FMT_plx " = %x\n", addr * 4, ret);
xilinx_spips_update_ixr(s);
return ret;
case R_INTR_MASK:
@@ -916,12 +916,12 @@
if (!(s->regs[R_CONFIG] & R_CONFIG_ENDIAN)) {
ret <<= 8 * shortfall;
}
- DB_PRINT_L(0, "addr=" TARGET_FMT_plx " = %x\n", addr * 4, ret);
+ DB_PRINT_L(0, "addr=" HWADDR_FMT_plx " = %x\n", addr * 4, ret);
xilinx_spips_check_flush(s);
xilinx_spips_update_ixr(s);
return ret;
}
- DB_PRINT_L(0, "addr=" TARGET_FMT_plx " = %x\n", addr * 4,
+ DB_PRINT_L(0, "addr=" HWADDR_FMT_plx " = %x\n", addr * 4,
s->regs[addr] & mask);
return s->regs[addr] & mask;
@@ -971,7 +971,7 @@
XilinxSPIPS *s = opaque;
bool try_flush = true;
- DB_PRINT_L(0, "addr=" TARGET_FMT_plx " = %x\n", addr, (unsigned)value);
+ DB_PRINT_L(0, "addr=" HWADDR_FMT_plx " = %x\n", addr, (unsigned)value);
addr >>= 2;
switch (addr) {
case R_CONFIG:
diff --git a/hw/timer/digic-timer.c b/hw/timer/digic-timer.c
index d5186f4..973eab4 100644
--- a/hw/timer/digic-timer.c
+++ b/hw/timer/digic-timer.c
@@ -76,7 +76,7 @@
default:
qemu_log_mask(LOG_UNIMP,
"digic-timer: read access to unknown register 0x"
- TARGET_FMT_plx "\n", offset);
+ HWADDR_FMT_plx "\n", offset);
}
return ret;
@@ -116,7 +116,7 @@
default:
qemu_log_mask(LOG_UNIMP,
"digic-timer: read access to unknown register 0x"
- TARGET_FMT_plx "\n", offset);
+ HWADDR_FMT_plx "\n", offset);
}
}
diff --git a/hw/timer/etraxfs_timer.c b/hw/timer/etraxfs_timer.c
index ecc2831..2d6d92e 100644
--- a/hw/timer/etraxfs_timer.c
+++ b/hw/timer/etraxfs_timer.c
@@ -324,8 +324,7 @@
t->rw_ack_intr = 0;
break;
default:
- printf ("%s " TARGET_FMT_plx " %x\n",
- __func__, addr, value);
+ printf("%s " HWADDR_FMT_plx " %x\n", __func__, addr, value);
break;
}
}
diff --git a/hw/timer/exynos4210_mct.c b/hw/timer/exynos4210_mct.c
index e175a9f..c17b247 100644
--- a/hw/timer/exynos4210_mct.c
+++ b/hw/timer/exynos4210_mct.c
@@ -1445,7 +1445,7 @@
case L0_ICNTO: case L1_ICNTO:
case L0_FRCNTO: case L1_FRCNTO:
qemu_log_mask(LOG_GUEST_ERROR,
- "exynos4210.mct: write to RO register " TARGET_FMT_plx,
+ "exynos4210.mct: write to RO register " HWADDR_FMT_plx,
offset);
break;
diff --git a/hw/timer/exynos4210_pwm.c b/hw/timer/exynos4210_pwm.c
index 02924a9..3528d0f 100644
--- a/hw/timer/exynos4210_pwm.c
+++ b/hw/timer/exynos4210_pwm.c
@@ -257,7 +257,7 @@
default:
qemu_log_mask(LOG_GUEST_ERROR,
- "exynos4210.pwm: bad read offset " TARGET_FMT_plx,
+ "exynos4210.pwm: bad read offset " HWADDR_FMT_plx,
offset);
break;
}
@@ -352,7 +352,7 @@
default:
qemu_log_mask(LOG_GUEST_ERROR,
- "exynos4210.pwm: bad write offset " TARGET_FMT_plx,
+ "exynos4210.pwm: bad write offset " HWADDR_FMT_plx,
offset);
break;
diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c
index 103260e..23ba625 100644
--- a/hw/virtio/virtio-mmio.c
+++ b/hw/virtio/virtio-mmio.c
@@ -829,10 +829,10 @@
assert(section.mr);
if (proxy_path) {
- path = g_strdup_printf("%s/virtio-mmio@" TARGET_FMT_plx, proxy_path,
+ path = g_strdup_printf("%s/virtio-mmio@" HWADDR_FMT_plx, proxy_path,
section.offset_within_address_space);
} else {
- path = g_strdup_printf("virtio-mmio@" TARGET_FMT_plx,
+ path = g_strdup_printf("virtio-mmio@" HWADDR_FMT_plx,
section.offset_within_address_space);
}
memory_region_unref(section.mr);
diff --git a/hw/xen/xen_pt.c b/hw/xen/xen_pt.c
index 0ec7e52..8db0532 100644
--- a/hw/xen/xen_pt.c
+++ b/hw/xen/xen_pt.c
@@ -434,7 +434,7 @@
PCIDevice *d = o;
/* if this function is called, that probably means that there is a
* misconfiguration of the IOMMU. */
- XEN_PT_ERR(d, "Should not read BAR through QEMU. @0x"TARGET_FMT_plx"\n",
+ XEN_PT_ERR(d, "Should not read BAR through QEMU. @0x"HWADDR_FMT_plx"\n",
addr);
return 0;
}
@@ -443,7 +443,7 @@
{
PCIDevice *d = o;
/* Same comment as xen_pt_bar_read function */
- XEN_PT_ERR(d, "Should not write BAR through QEMU. @0x"TARGET_FMT_plx"\n",
+ XEN_PT_ERR(d, "Should not write BAR through QEMU. @0x"HWADDR_FMT_plx"\n",
addr);
}
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 25e11b0..54585a9 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -585,9 +585,10 @@
* setting one of the jump targets (or patching the jump instruction). Only
* two of such jumps are supported.
*/
+#define TB_JMP_OFFSET_INVALID 0xffff /* indicates no jump generated */
uint16_t jmp_reset_offset[2]; /* offset of original jump target */
-#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
- uintptr_t jmp_target_arg[2]; /* target address or offset */
+ uint16_t jmp_insn_offset[2]; /* offset of direct jump insn */
+ uintptr_t jmp_target_addr[2]; /* target address */
/*
* Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
diff --git a/include/exec/hwaddr.h b/include/exec/hwaddr.h
index 8f16d17..50fbb2d 100644
--- a/include/exec/hwaddr.h
+++ b/include/exec/hwaddr.h
@@ -10,7 +10,7 @@
typedef uint64_t hwaddr;
#define HWADDR_MAX UINT64_MAX
-#define TARGET_FMT_plx "%016" PRIx64
+#define HWADDR_FMT_plx "%016" PRIx64
#define HWADDR_PRId PRId64
#define HWADDR_PRIi PRIi64
#define HWADDR_PRIo PRIo64
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
index b949d75..6f49717 100644
--- a/include/tcg/tcg.h
+++ b/include/tcg/tcg.h
@@ -552,20 +552,15 @@
int nb_indirects;
int nb_ops;
- /* goto_tb support */
- tcg_insn_unit *code_buf;
- uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */
- uintptr_t *tb_jmp_insn_offset; /* tb->jmp_target_arg if direct_jump */
- uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_arg if !direct_jump */
-
TCGRegSet reserved_regs;
- uint32_t tb_cflags; /* cflags of the current TB */
intptr_t current_frame_offset;
intptr_t frame_start;
intptr_t frame_end;
TCGTemp *frame_temp;
- tcg_insn_unit *code_ptr;
+ TranslationBlock *gen_tb; /* tb for which code is being generated */
+ tcg_insn_unit *code_buf; /* pointer for start of tb */
+ tcg_insn_unit *code_ptr; /* pointer for running end of tb */
#ifdef CONFIG_PROFILER
TCGProfile prof;
@@ -838,6 +833,9 @@
int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start);
+void tb_target_set_jmp_target(const TranslationBlock *, int,
+ uintptr_t, uintptr_t);
+
void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size);
TCGTemp *tcg_global_mem_new_internal(TCGType, TCGv_ptr,
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index 20894b6..5928c14 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -19,6 +19,7 @@
#include "qemu/selfmap.h"
#include "qapi/error.h"
#include "target_signal.h"
+#include "accel/tcg/debuginfo.h"
#ifdef _ARCH_PPC64
#undef ARCH_DLINFO
@@ -3261,6 +3262,8 @@
load_symbols(ehdr, image_fd, load_bias);
}
+ debuginfo_report_elf(image_name, image_fd, load_bias);
+
mmap_unlock();
close(image_fd);
diff --git a/linux-user/exit.c b/linux-user/exit.c
index fa6ef0b..607b6da 100644
--- a/linux-user/exit.c
+++ b/linux-user/exit.c
@@ -17,6 +17,7 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
+#include "accel/tcg/perf.h"
#include "exec/gdbstub.h"
#include "qemu.h"
#include "user-internals.h"
@@ -38,4 +39,5 @@
#endif
gdb_exit(code);
qemu_plugin_user_exit();
+ perf_exit();
}
diff --git a/linux-user/main.c b/linux-user/main.c
index a17fed0..4290651 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -53,6 +53,7 @@
#include "signal-common.h"
#include "loader.h"
#include "user-mmap.h"
+#include "accel/tcg/perf.h"
#ifdef CONFIG_SEMIHOSTING
#include "semihosting/semihost.h"
@@ -423,6 +424,16 @@
}
#endif
+static void handle_arg_perfmap(const char *arg)
+{
+ perf_enable_perfmap();
+}
+
+static void handle_arg_jitdump(const char *arg)
+{
+ perf_enable_jitdump();
+}
+
static QemuPluginList plugins = QTAILQ_HEAD_INITIALIZER(plugins);
#ifdef CONFIG_PLUGIN
@@ -493,6 +504,10 @@
{"xtensa-abi-call0", "QEMU_XTENSA_ABI_CALL0", false, handle_arg_abi_call0,
"", "assume CALL0 Xtensa ABI"},
#endif
+ {"perfmap", "QEMU_PERFMAP", false, handle_arg_perfmap,
+ "", "Generate a /tmp/perf-${pid}.map file for perf"},
+ {"jitdump", "QEMU_JITDUMP", false, handle_arg_jitdump,
+ "", "Generate a jit-${pid}.dump file for perf"},
{NULL, NULL, false, NULL, NULL, NULL}
};
diff --git a/linux-user/meson.build b/linux-user/meson.build
index de4320a..7171dc6 100644
--- a/linux-user/meson.build
+++ b/linux-user/meson.build
@@ -22,6 +22,7 @@
'uname.c',
))
linux_user_ss.add(rt)
+linux_user_ss.add(libdw)
linux_user_ss.add(when: 'TARGET_HAS_BFLT', if_true: files('flatload.c'))
linux_user_ss.add(when: 'TARGET_I386', if_true: files('vm86.c'))
diff --git a/linux-user/signal.c b/linux-user/signal.c
index 61c6fa3..098f3a7 100644
--- a/linux-user/signal.c
+++ b/linux-user/signal.c
@@ -695,7 +695,7 @@
/* abort execution with signal */
static G_NORETURN
-void dump_core_and_abort(int target_sig)
+void dump_core_and_abort(CPUArchState *cpu_env, int target_sig)
{
CPUState *cpu = thread_cpu;
CPUArchState *env = cpu->env_ptr;
@@ -724,6 +724,8 @@
target_sig, strsignal(host_sig), "core dumped" );
}
+ preexit_cleanup(cpu_env, 128 + target_sig);
+
/* The proper exit code for dying from an uncaught signal is
* -<signal>. The kernel doesn't allow exit() or _exit() to pass
* a negative value. To get the proper exit code we need to
@@ -1058,12 +1060,12 @@
sig != TARGET_SIGURG &&
sig != TARGET_SIGWINCH &&
sig != TARGET_SIGCONT) {
- dump_core_and_abort(sig);
+ dump_core_and_abort(cpu_env, sig);
}
} else if (handler == TARGET_SIG_IGN) {
/* ignore sig */
} else if (handler == TARGET_SIG_ERR) {
- dump_core_and_abort(sig);
+ dump_core_and_abort(cpu_env, sig);
} else {
/* compute the blocked signals during the handler execution */
sigset_t *blocked_set;
diff --git a/meson.build b/meson.build
index 5d68a8f..58d8cd6 100644
--- a/meson.build
+++ b/meson.build
@@ -1648,6 +1648,12 @@
endif
endif
+# libdw
+libdw = dependency('libdw',
+ method: 'pkg-config',
+ kwargs: static_kwargs,
+ required: false)
+
#################
# config-host.h #
#################
@@ -1923,6 +1929,7 @@
config_host_data.set('CONFIG_CFI', get_option('cfi'))
config_host_data.set('CONFIG_SELINUX', selinux.found())
config_host_data.set('CONFIG_XEN_BACKEND', xen.found())
+config_host_data.set('CONFIG_LIBDW', libdw.found())
if xen.found()
# protect from xen.version() having less than three components
xen_version = xen.version().split('.') + ['0', '0']
@@ -2331,11 +2338,9 @@
config_host_data.set('CONFIG_AVX2_OPT', get_option('avx2') \
.require(have_cpuid_h, error_message: 'cpuid.h not available, cannot enable AVX2') \
.require(cc.links('''
- #pragma GCC push_options
- #pragma GCC target("avx2")
#include <cpuid.h>
#include <immintrin.h>
- static int bar(void *a) {
+ static int __attribute__((target("avx2"))) bar(void *a) {
__m256i x = *(__m256i *)a;
return _mm256_testz_si256(x, x);
}
@@ -2345,11 +2350,9 @@
config_host_data.set('CONFIG_AVX512F_OPT', get_option('avx512f') \
.require(have_cpuid_h, error_message: 'cpuid.h not available, cannot enable AVX512F') \
.require(cc.links('''
- #pragma GCC push_options
- #pragma GCC target("avx512f")
#include <cpuid.h>
#include <immintrin.h>
- static int bar(void *a) {
+ static int __attribute__((target("avx512f"))) bar(void *a) {
__m512i x = *(__m512i *)a;
return _mm512_test_epi64_mask(x, x);
}
@@ -3976,6 +3979,7 @@
# Dummy dependency, keep .found()
summary_info += {'FUSE lseek': fuse_lseek.found()}
summary_info += {'selinux': selinux}
+summary_info += {'libdw': libdw}
summary(summary_info, bool_yn: true, section: 'Dependencies')
if not supported_cpus.contains(cpu)
diff --git a/monitor/misc.c b/monitor/misc.c
index bf3f1c6..fa0a42c 100644
--- a/monitor/misc.c
+++ b/monitor/misc.c
@@ -566,7 +566,7 @@
while (len > 0) {
if (is_physical) {
- monitor_printf(mon, TARGET_FMT_plx ":", addr);
+ monitor_printf(mon, HWADDR_FMT_plx ":", addr);
} else {
monitor_printf(mon, TARGET_FMT_lx ":", (target_ulong)addr);
}
diff --git a/qemu-options.hx b/qemu-options.hx
index 3aa3a2f..d59d197 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -4838,6 +4838,26 @@
Enable synchronization profiling.
ERST
+#if defined(CONFIG_TCG) && defined(CONFIG_LINUX)
+DEF("perfmap", 0, QEMU_OPTION_perfmap,
+ "-perfmap generate a /tmp/perf-${pid}.map file for perf\n",
+ QEMU_ARCH_ALL)
+SRST
+``-perfmap``
+ Generate a map file for Linux perf tools that will allow basic profiling
+ information to be broken down into basic blocks.
+ERST
+
+DEF("jitdump", 0, QEMU_OPTION_jitdump,
+ "-jitdump generate a jit-${pid}.dump file for perf\n",
+ QEMU_ARCH_ALL)
+SRST
+``-jitdump``
+ Generate a dump file for Linux perf tools that maps basic blocks to symbol
+ names, line numbers and JITted code.
+ERST
+#endif
+
DEFHEADING()
DEFHEADING(Generic object creation:)
diff --git a/softmmu/memory.c b/softmmu/memory.c
index e05332d..9d64efc 100644
--- a/softmmu/memory.c
+++ b/softmmu/memory.c
@@ -1281,7 +1281,7 @@
unsigned size)
{
#ifdef DEBUG_UNASSIGNED
- printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
+ printf("Unassigned mem read " HWADDR_FMT_plx "\n", addr);
#endif
return 0;
}
@@ -1290,7 +1290,7 @@
uint64_t val, unsigned size)
{
#ifdef DEBUG_UNASSIGNED
- printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
+ printf("Unassigned mem write " HWADDR_FMT_plx " = 0x%"PRIx64"\n", addr, val);
#endif
}
@@ -3220,9 +3220,9 @@
for (i = 0; i < level; i++) {
qemu_printf(MTREE_INDENT);
}
- qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
- " (prio %d, %s%s): alias %s @%s " TARGET_FMT_plx
- "-" TARGET_FMT_plx "%s",
+ qemu_printf(HWADDR_FMT_plx "-" HWADDR_FMT_plx
+ " (prio %d, %s%s): alias %s @%s " HWADDR_FMT_plx
+ "-" HWADDR_FMT_plx "%s",
cur_start, cur_end,
mr->priority,
mr->nonvolatile ? "nv-" : "",
@@ -3242,7 +3242,7 @@
for (i = 0; i < level; i++) {
qemu_printf(MTREE_INDENT);
}
- qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
+ qemu_printf(HWADDR_FMT_plx "-" HWADDR_FMT_plx
" (prio %d, %s%s): %s%s",
cur_start, cur_end,
mr->priority,
@@ -3329,8 +3329,8 @@
while (n--) {
mr = range->mr;
if (range->offset_in_region) {
- qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx
- " (prio %d, %s%s): %s @" TARGET_FMT_plx,
+ qemu_printf(MTREE_INDENT HWADDR_FMT_plx "-" HWADDR_FMT_plx
+ " (prio %d, %s%s): %s @" HWADDR_FMT_plx,
int128_get64(range->addr.start),
int128_get64(range->addr.start)
+ MR_SIZE(range->addr.size),
@@ -3340,7 +3340,7 @@
memory_region_name(mr),
range->offset_in_region);
} else {
- qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx
+ qemu_printf(MTREE_INDENT HWADDR_FMT_plx "-" HWADDR_FMT_plx
" (prio %d, %s%s): %s",
int128_get64(range->addr.start),
int128_get64(range->addr.start)
diff --git a/softmmu/memory_mapping.c b/softmmu/memory_mapping.c
index f6f0a82..d7f1d09 100644
--- a/softmmu/memory_mapping.c
+++ b/softmmu/memory_mapping.c
@@ -241,8 +241,8 @@
}
#ifdef DEBUG_GUEST_PHYS_REGION_ADD
- fprintf(stderr, "%s: target_start=" TARGET_FMT_plx " target_end="
- TARGET_FMT_plx ": %s (count: %u)\n", __func__, target_start,
+ fprintf(stderr, "%s: target_start=" HWADDR_FMT_plx " target_end="
+ HWADDR_FMT_plx ": %s (count: %u)\n", __func__, target_start,
target_end, predecessor ? "joined" : "added", g->list->num);
#endif
}
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
index edec095..bf585e4 100644
--- a/softmmu/physmem.c
+++ b/softmmu/physmem.c
@@ -2475,7 +2475,7 @@
MemTxResult res;
#if defined(DEBUG_SUBPAGE)
- printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
+ printf("%s: subpage %p len %u addr " HWADDR_FMT_plx "\n", __func__,
subpage, len, addr);
#endif
res = flatview_read(subpage->fv, addr + subpage->base, attrs, buf, len);
@@ -2493,7 +2493,7 @@
uint8_t buf[8];
#if defined(DEBUG_SUBPAGE)
- printf("%s: subpage %p len %u addr " TARGET_FMT_plx
+ printf("%s: subpage %p len %u addr " HWADDR_FMT_plx
" value %"PRIx64"\n",
__func__, subpage, len, addr, value);
#endif
@@ -2507,7 +2507,7 @@
{
subpage_t *subpage = opaque;
#if defined(DEBUG_SUBPAGE)
- printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
+ printf("%s: subpage %p %c len %u addr " HWADDR_FMT_plx "\n",
__func__, subpage, is_write ? 'w' : 'r', len, addr);
#endif
@@ -2558,7 +2558,7 @@
NULL, TARGET_PAGE_SIZE);
mmio->iomem.subpage = true;
#if defined(DEBUG_SUBPAGE)
- printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
+ printf("%s: %p base " HWADDR_FMT_plx " len %08x\n", __func__,
mmio, base, TARGET_PAGE_SIZE);
#endif
@@ -3703,7 +3703,7 @@
const char *names[] = { " [unassigned]", " [not dirty]",
" [ROM]", " [watch]" };
- qemu_printf(" #%d @" TARGET_FMT_plx ".." TARGET_FMT_plx
+ qemu_printf(" #%d @" HWADDR_FMT_plx ".." HWADDR_FMT_plx
" %s%s%s%s%s",
i,
s->offset_within_address_space,
diff --git a/softmmu/vl.c b/softmmu/vl.c
index 9bd0e52..9177d95 100644
--- a/softmmu/vl.c
+++ b/softmmu/vl.c
@@ -96,6 +96,9 @@
#include "fsdev/qemu-fsdev.h"
#endif
#include "sysemu/qtest.h"
+#ifdef CONFIG_TCG
+#include "accel/tcg/perf.h"
+#endif
#include "disas/disas.h"
@@ -2926,6 +2929,14 @@
case QEMU_OPTION_DFILTER:
qemu_set_dfilter_ranges(optarg, &error_fatal);
break;
+#if defined(CONFIG_TCG) && defined(CONFIG_LINUX)
+ case QEMU_OPTION_perfmap:
+ perf_enable_perfmap();
+ break;
+ case QEMU_OPTION_jitdump:
+ perf_enable_jitdump();
+ break;
+#endif
case QEMU_OPTION_seed:
qemu_guest_random_seed_main(optarg, &error_fatal);
break;
diff --git a/target/i386/monitor.c b/target/i386/monitor.c
index 8e4b4d6..ad5b7b8 100644
--- a/target/i386/monitor.c
+++ b/target/i386/monitor.c
@@ -57,7 +57,7 @@
{
addr = addr_canonical(env, addr);
- monitor_printf(mon, TARGET_FMT_plx ": " TARGET_FMT_plx
+ monitor_printf(mon, HWADDR_FMT_plx ": " HWADDR_FMT_plx
" %c%c%c%c%c%c%c%c%c\n",
addr,
pte & mask,
@@ -258,8 +258,8 @@
prot1 = *plast_prot;
if (prot != prot1) {
if (*pstart != -1) {
- monitor_printf(mon, TARGET_FMT_plx "-" TARGET_FMT_plx " "
- TARGET_FMT_plx " %c%c%c\n",
+ monitor_printf(mon, HWADDR_FMT_plx "-" HWADDR_FMT_plx " "
+ HWADDR_FMT_plx " %c%c%c\n",
addr_canonical(env, *pstart),
addr_canonical(env, end),
addr_canonical(env, end - *pstart),
diff --git a/target/loongarch/tlb_helper.c b/target/loongarch/tlb_helper.c
index c6d1de5..cce1db1 100644
--- a/target/loongarch/tlb_helper.c
+++ b/target/loongarch/tlb_helper.c
@@ -655,7 +655,7 @@
physical & TARGET_PAGE_MASK, prot,
mmu_idx, TARGET_PAGE_SIZE);
qemu_log_mask(CPU_LOG_MMU,
- "%s address=%" VADDR_PRIx " physical " TARGET_FMT_plx
+ "%s address=%" VADDR_PRIx " physical " HWADDR_FMT_plx
" prot %d\n", __func__, address, physical, prot);
return true;
} else {
diff --git a/target/m68k/fpu_helper.c b/target/m68k/fpu_helper.c
index fdc4937..3a37d8f 100644
--- a/target/m68k/fpu_helper.c
+++ b/target/m68k/fpu_helper.c
@@ -515,37 +515,50 @@
return fmovem_postinc(env, addr, mask, cpu_ld_float64_ra);
}
-static void make_quotient(CPUM68KState *env, floatx80 val)
+static void make_quotient(CPUM68KState *env, int sign, uint32_t quotient)
{
- int32_t quotient;
- int sign;
-
- if (floatx80_is_any_nan(val)) {
- return;
- }
-
- quotient = floatx80_to_int32(val, &env->fp_status);
- sign = quotient < 0;
- if (sign) {
- quotient = -quotient;
- }
-
quotient = (sign << 7) | (quotient & 0x7f);
env->fpsr = (env->fpsr & ~FPSR_QT_MASK) | (quotient << FPSR_QT_SHIFT);
}
void HELPER(fmod)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
{
- res->d = floatx80_mod(val1->d, val0->d, &env->fp_status);
+ uint64_t quotient;
+ int sign = extractFloatx80Sign(val1->d) ^ extractFloatx80Sign(val0->d);
- make_quotient(env, res->d);
+ res->d = floatx80_modrem(val1->d, val0->d, true, "ient,
+ &env->fp_status);
+
+ if (floatx80_is_any_nan(res->d)) {
+ return;
+ }
+
+ make_quotient(env, sign, quotient);
}
void HELPER(frem)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
{
- res->d = floatx80_rem(val1->d, val0->d, &env->fp_status);
+ FPReg fp_quot;
+ floatx80 fp_rem;
- make_quotient(env, res->d);
+ fp_rem = floatx80_rem(val1->d, val0->d, &env->fp_status);
+ if (!floatx80_is_any_nan(fp_rem)) {
+ float_status fp_status = { };
+ uint32_t quotient;
+ int sign;
+
+ /* Calculate quotient directly using round to nearest mode */
+ set_float_rounding_mode(float_round_nearest_even, &fp_status);
+ set_floatx80_rounding_precision(
+ get_floatx80_rounding_precision(&env->fp_status), &fp_status);
+ fp_quot.d = floatx80_div(val1->d, val0->d, &fp_status);
+
+ sign = extractFloatx80Sign(fp_quot.d);
+ quotient = floatx80_to_int32(floatx80_abs(fp_quot.d), &env->fp_status);
+ make_quotient(env, sign, quotient);
+ }
+
+ res->d = fp_rem;
}
void HELPER(fgetexp)(CPUM68KState *env, FPReg *res, FPReg *val)
diff --git a/target/microblaze/op_helper.c b/target/microblaze/op_helper.c
index 5b745d0..f637803 100644
--- a/target/microblaze/op_helper.c
+++ b/target/microblaze/op_helper.c
@@ -403,7 +403,7 @@
CPUMBState *env = &cpu->env;
qemu_log_mask(CPU_LOG_INT, "Transaction failed: vaddr 0x%" VADDR_PRIx
- " physaddr 0x" TARGET_FMT_plx " size %d access type %s\n",
+ " physaddr 0x" HWADDR_FMT_plx " size %d access type %s\n",
addr, physaddr, size,
access_type == MMU_INST_FETCH ? "INST_FETCH" :
(access_type == MMU_DATA_LOAD ? "DATA_LOAD" : "DATA_STORE"));
diff --git a/target/mips/tcg/sysemu/tlb_helper.c b/target/mips/tcg/sysemu/tlb_helper.c
index 9d16859..e5e1e9d 100644
--- a/target/mips/tcg/sysemu/tlb_helper.c
+++ b/target/mips/tcg/sysemu/tlb_helper.c
@@ -924,7 +924,7 @@
switch (ret) {
case TLBRET_MATCH:
qemu_log_mask(CPU_LOG_MMU,
- "%s address=%" VADDR_PRIx " physical " TARGET_FMT_plx
+ "%s address=%" VADDR_PRIx " physical " HWADDR_FMT_plx
" prot %d\n", __func__, address, physical, prot);
break;
default:
diff --git a/target/ppc/mmu-hash32.c b/target/ppc/mmu-hash32.c
index cc091c3..3976416 100644
--- a/target/ppc/mmu-hash32.c
+++ b/target/ppc/mmu-hash32.c
@@ -346,24 +346,24 @@
ptem = (vsid << 7) | (pgidx >> 10);
/* Page address translation */
- qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx
- " htab_mask " TARGET_FMT_plx
- " hash " TARGET_FMT_plx "\n",
+ qemu_log_mask(CPU_LOG_MMU, "htab_base " HWADDR_FMT_plx
+ " htab_mask " HWADDR_FMT_plx
+ " hash " HWADDR_FMT_plx "\n",
ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash);
/* Primary PTEG lookup */
- qemu_log_mask(CPU_LOG_MMU, "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
+ qemu_log_mask(CPU_LOG_MMU, "0 htab=" HWADDR_FMT_plx "/" HWADDR_FMT_plx
" vsid=%" PRIx32 " ptem=%" PRIx32
- " hash=" TARGET_FMT_plx "\n",
+ " hash=" HWADDR_FMT_plx "\n",
ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu),
vsid, ptem, hash);
pteg_off = get_pteg_offset32(cpu, hash);
pte_offset = ppc_hash32_pteg_search(cpu, pteg_off, 0, ptem, pte);
if (pte_offset == -1) {
/* Secondary PTEG lookup */
- qemu_log_mask(CPU_LOG_MMU, "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
+ qemu_log_mask(CPU_LOG_MMU, "1 htab=" HWADDR_FMT_plx "/" HWADDR_FMT_plx
" vsid=%" PRIx32 " api=%" PRIx32
- " hash=" TARGET_FMT_plx "\n", ppc_hash32_hpt_base(cpu),
+ " hash=" HWADDR_FMT_plx "\n", ppc_hash32_hpt_base(cpu),
ppc_hash32_hpt_mask(cpu), vsid, ptem, ~hash);
pteg_off = get_pteg_offset32(cpu, ~hash);
pte_offset = ppc_hash32_pteg_search(cpu, pteg_off, 1, ptem, pte);
diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c
index b9b31fd..900f906 100644
--- a/target/ppc/mmu-hash64.c
+++ b/target/ppc/mmu-hash64.c
@@ -697,15 +697,15 @@
/* Page address translation */
qemu_log_mask(CPU_LOG_MMU,
- "htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx
- " hash " TARGET_FMT_plx "\n",
+ "htab_base " HWADDR_FMT_plx " htab_mask " HWADDR_FMT_plx
+ " hash " HWADDR_FMT_plx "\n",
ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), hash);
/* Primary PTEG lookup */
qemu_log_mask(CPU_LOG_MMU,
- "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
+ "0 htab=" HWADDR_FMT_plx "/" HWADDR_FMT_plx
" vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
- " hash=" TARGET_FMT_plx "\n",
+ " hash=" HWADDR_FMT_plx "\n",
ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu),
vsid, ptem, hash);
ptex = ppc_hash64_pteg_search(cpu, hash, sps, ptem, pte, pshift);
@@ -714,9 +714,9 @@
/* Secondary PTEG lookup */
ptem |= HPTE64_V_SECONDARY;
qemu_log_mask(CPU_LOG_MMU,
- "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
+ "1 htab=" HWADDR_FMT_plx "/" HWADDR_FMT_plx
" vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
- " hash=" TARGET_FMT_plx "\n", ppc_hash64_hpt_base(cpu),
+ " hash=" HWADDR_FMT_plx "\n", ppc_hash64_hpt_base(cpu),
ppc_hash64_hpt_mask(cpu), vsid, ptem, ~hash);
ptex = ppc_hash64_pteg_search(cpu, ~hash, sps, ptem, pte, pshift);
diff --git a/target/ppc/mmu_common.c b/target/ppc/mmu_common.c
index 8901f4d..7235a4b 100644
--- a/target/ppc/mmu_common.c
+++ b/target/ppc/mmu_common.c
@@ -252,7 +252,7 @@
}
if (best != -1) {
done:
- qemu_log_mask(CPU_LOG_MMU, "found TLB at addr " TARGET_FMT_plx
+ qemu_log_mask(CPU_LOG_MMU, "found TLB at addr " HWADDR_FMT_plx
" prot=%01x ret=%d\n",
ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret);
/* Update page flags */
@@ -328,7 +328,7 @@
ctx->prot = prot;
ret = check_prot(ctx->prot, access_type);
if (ret == 0) {
- qemu_log_mask(CPU_LOG_MMU, "BAT %d match: r " TARGET_FMT_plx
+ qemu_log_mask(CPU_LOG_MMU, "BAT %d match: r " HWADDR_FMT_plx
" prot=%c%c\n", i, ctx->raddr,
ctx->prot & PAGE_READ ? 'R' : '-',
ctx->prot & PAGE_WRITE ? 'W' : '-');
@@ -403,9 +403,9 @@
/* Check if instruction fetch is allowed, if needed */
if (type != ACCESS_CODE || ctx->nx == 0) {
/* Page address translation */
- qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx
- " htab_mask " TARGET_FMT_plx
- " hash " TARGET_FMT_plx "\n",
+ qemu_log_mask(CPU_LOG_MMU, "htab_base " HWADDR_FMT_plx
+ " htab_mask " HWADDR_FMT_plx
+ " hash " HWADDR_FMT_plx "\n",
ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash);
ctx->hash[0] = hash;
ctx->hash[1] = ~hash;
@@ -420,7 +420,7 @@
hwaddr curaddr;
uint32_t a0, a1, a2, a3;
- qemu_log("Page table: " TARGET_FMT_plx " len " TARGET_FMT_plx
+ qemu_log("Page table: " HWADDR_FMT_plx " len " HWADDR_FMT_plx
"\n", ppc_hash32_hpt_base(cpu),
ppc_hash32_hpt_mask(cpu) + 0x80);
for (curaddr = ppc_hash32_hpt_base(cpu);
@@ -432,7 +432,7 @@
a2 = ldl_phys(cs->as, curaddr + 8);
a3 = ldl_phys(cs->as, curaddr + 12);
if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) {
- qemu_log(TARGET_FMT_plx ": %08x %08x %08x %08x\n",
+ qemu_log(HWADDR_FMT_plx ": %08x %08x %08x %08x\n",
curaddr, a0, a1, a2, a3);
}
}
@@ -578,14 +578,14 @@
if (ret >= 0) {
ctx->raddr = raddr;
qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx
- " => " TARGET_FMT_plx
+ " => " HWADDR_FMT_plx
" %d %d\n", __func__, address, ctx->raddr, ctx->prot,
ret);
return 0;
}
}
qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx
- " => " TARGET_FMT_plx
+ " => " HWADDR_FMT_plx
" %d %d\n", __func__, address, raddr, ctx->prot, ret);
return ret;
@@ -666,11 +666,11 @@
if (ret >= 0) {
ctx->raddr = raddr;
qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx
- " => " TARGET_FMT_plx " %d %d\n", __func__,
+ " => " HWADDR_FMT_plx " %d %d\n", __func__,
address, ctx->raddr, ctx->prot, ret);
} else {
qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx
- " => " TARGET_FMT_plx " %d %d\n", __func__,
+ " => " HWADDR_FMT_plx " %d %d\n", __func__,
address, raddr, ctx->prot, ret);
}
@@ -894,11 +894,11 @@
if (ret >= 0) {
ctx->raddr = raddr;
qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx
- " => " TARGET_FMT_plx " %d %d\n", __func__, address,
+ " => " HWADDR_FMT_plx " %d %d\n", __func__, address,
ctx->raddr, ctx->prot, ret);
} else {
qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx
- " => " TARGET_FMT_plx " %d %d\n", __func__, address,
+ " => " HWADDR_FMT_plx " %d %d\n", __func__, address,
raddr, ctx->prot, ret);
}
diff --git a/target/ppc/mmu_helper.c b/target/ppc/mmu_helper.c
index 2a91f3f..64e3043 100644
--- a/target/ppc/mmu_helper.c
+++ b/target/ppc/mmu_helper.c
@@ -826,7 +826,7 @@
tlb->prot &= ~PAGE_VALID;
}
tlb->PID = env->spr[SPR_40x_PID]; /* PID */
- qemu_log_mask(CPU_LOG_MMU, "%s: set up TLB %d RPN " TARGET_FMT_plx
+ qemu_log_mask(CPU_LOG_MMU, "%s: set up TLB %d RPN " HWADDR_FMT_plx
" EPN " TARGET_FMT_lx " size " TARGET_FMT_lx
" prot %c%c%c%c PID %d\n", __func__,
(int)entry, tlb->RPN, tlb->EPN, tlb->size,
@@ -864,7 +864,7 @@
if (val & PPC4XX_TLBLO_WR) {
tlb->prot |= PAGE_WRITE;
}
- qemu_log_mask(CPU_LOG_MMU, "%s: set up TLB %d RPN " TARGET_FMT_plx
+ qemu_log_mask(CPU_LOG_MMU, "%s: set up TLB %d RPN " HWADDR_FMT_plx
" EPN " TARGET_FMT_lx
" size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
(int)entry, tlb->RPN, tlb->EPN, tlb->size,
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index 8ea3442..9a28816 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -1272,7 +1272,7 @@
qemu_log_mask(CPU_LOG_MMU,
"%s 1st-stage address=%" VADDR_PRIx " ret %d physical "
- TARGET_FMT_plx " prot %d\n",
+ HWADDR_FMT_plx " prot %d\n",
__func__, address, ret, pa, prot);
if (ret == TRANSLATE_SUCCESS) {
@@ -1285,7 +1285,7 @@
qemu_log_mask(CPU_LOG_MMU,
"%s 2nd-stage address=%" VADDR_PRIx " ret %d physical "
- TARGET_FMT_plx " prot %d\n",
+ HWADDR_FMT_plx " prot %d\n",
__func__, im_address, ret, pa, prot2);
prot &= prot2;
@@ -1295,7 +1295,7 @@
size, access_type, mode);
qemu_log_mask(CPU_LOG_MMU,
- "%s PMP address=" TARGET_FMT_plx " ret %d prot"
+ "%s PMP address=" HWADDR_FMT_plx " ret %d prot"
" %d tlb_size " TARGET_FMT_lu "\n",
__func__, pa, ret, prot_pmp, tlb_size);
@@ -1320,7 +1320,7 @@
qemu_log_mask(CPU_LOG_MMU,
"%s address=%" VADDR_PRIx " ret %d physical "
- TARGET_FMT_plx " prot %d\n",
+ HWADDR_FMT_plx " prot %d\n",
__func__, address, ret, pa, prot);
if (ret == TRANSLATE_SUCCESS) {
@@ -1328,7 +1328,7 @@
size, access_type, mode);
qemu_log_mask(CPU_LOG_MMU,
- "%s PMP address=" TARGET_FMT_plx " ret %d prot"
+ "%s PMP address=" HWADDR_FMT_plx " ret %d prot"
" %d tlb_size " TARGET_FMT_lu "\n",
__func__, pa, ret, prot_pmp, tlb_size);
diff --git a/target/riscv/monitor.c b/target/riscv/monitor.c
index 17e63fa..236f93b 100644
--- a/target/riscv/monitor.c
+++ b/target/riscv/monitor.c
@@ -64,7 +64,7 @@
return;
}
- monitor_printf(mon, TARGET_FMT_lx " " TARGET_FMT_plx " " TARGET_FMT_lx
+ monitor_printf(mon, TARGET_FMT_lx " " HWADDR_FMT_plx " " TARGET_FMT_lx
" %c%c%c%c%c%c%c\n",
addr_canonical(va_bits, vaddr),
paddr, size,
diff --git a/target/sparc/ldst_helper.c b/target/sparc/ldst_helper.c
index ec4fae7..a53580d 100644
--- a/target/sparc/ldst_helper.c
+++ b/target/sparc/ldst_helper.c
@@ -430,12 +430,12 @@
#ifdef DEBUG_UNASSIGNED
if (is_asi) {
- printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
+ printf("Unassigned mem %s access of %d byte%s to " HWADDR_FMT_plx
" asi 0x%02x from " TARGET_FMT_lx "\n",
is_exec ? "exec" : is_write ? "write" : "read", size,
size == 1 ? "" : "s", addr, is_asi, env->pc);
} else {
- printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
+ printf("Unassigned mem %s access of %d byte%s to " HWADDR_FMT_plx
" from " TARGET_FMT_lx "\n",
is_exec ? "exec" : is_write ? "write" : "read", size,
size == 1 ? "" : "s", addr, env->pc);
@@ -490,7 +490,7 @@
CPUSPARCState *env = &cpu->env;
#ifdef DEBUG_UNASSIGNED
- printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx
+ printf("Unassigned mem access to " HWADDR_FMT_plx " from " TARGET_FMT_lx
"\n", addr, env->pc);
#endif
diff --git a/target/sparc/mmu_helper.c b/target/sparc/mmu_helper.c
index 919448a..158ec2a 100644
--- a/target/sparc/mmu_helper.c
+++ b/target/sparc/mmu_helper.c
@@ -230,7 +230,7 @@
if (likely(error_code == 0)) {
qemu_log_mask(CPU_LOG_MMU,
"Translate at %" VADDR_PRIx " -> "
- TARGET_FMT_plx ", vaddr " TARGET_FMT_lx "\n",
+ HWADDR_FMT_plx ", vaddr " TARGET_FMT_lx "\n",
address, paddr, vaddr);
tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size);
return true;
@@ -356,27 +356,27 @@
hwaddr pa;
uint32_t pde;
- qemu_printf("Root ptr: " TARGET_FMT_plx ", ctx: %d\n",
+ qemu_printf("Root ptr: " HWADDR_FMT_plx ", ctx: %d\n",
(hwaddr)env->mmuregs[1] << 4, env->mmuregs[2]);
for (n = 0, va = 0; n < 256; n++, va += 16 * 1024 * 1024) {
pde = mmu_probe(env, va, 2);
if (pde) {
pa = cpu_get_phys_page_debug(cs, va);
- qemu_printf("VA: " TARGET_FMT_lx ", PA: " TARGET_FMT_plx
+ qemu_printf("VA: " TARGET_FMT_lx ", PA: " HWADDR_FMT_plx
" PDE: " TARGET_FMT_lx "\n", va, pa, pde);
for (m = 0, va1 = va; m < 64; m++, va1 += 256 * 1024) {
pde = mmu_probe(env, va1, 1);
if (pde) {
pa = cpu_get_phys_page_debug(cs, va1);
qemu_printf(" VA: " TARGET_FMT_lx ", PA: "
- TARGET_FMT_plx " PDE: " TARGET_FMT_lx "\n",
+ HWADDR_FMT_plx " PDE: " TARGET_FMT_lx "\n",
va1, pa, pde);
for (o = 0, va2 = va1; o < 64; o++, va2 += 4 * 1024) {
pde = mmu_probe(env, va2, 0);
if (pde) {
pa = cpu_get_phys_page_debug(cs, va2);
qemu_printf(" VA: " TARGET_FMT_lx ", PA: "
- TARGET_FMT_plx " PTE: "
+ HWADDR_FMT_plx " PTE: "
TARGET_FMT_lx "\n",
va2, pa, pde);
}
diff --git a/target/tricore/helper.c b/target/tricore/helper.c
index 1db3280..114685c 100644
--- a/target/tricore/helper.c
+++ b/target/tricore/helper.c
@@ -79,7 +79,7 @@
address, rw, mmu_idx);
qemu_log_mask(CPU_LOG_MMU, "%s address=" TARGET_FMT_lx " ret %d physical "
- TARGET_FMT_plx " prot %d\n",
+ HWADDR_FMT_plx " prot %d\n",
__func__, (target_ulong)address, ret, physical, prot);
if (ret == TLBRET_MATCH) {
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
index ad1816e..330d26b 100644
--- a/tcg/aarch64/tcg-target.c.inc
+++ b/tcg/aarch64/tcg-target.c.inc
@@ -1353,32 +1353,6 @@
tcg_out_call_int(s, target);
}
-void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
- uintptr_t jmp_rw, uintptr_t addr)
-{
- tcg_insn_unit i1, i2;
- TCGType rt = TCG_TYPE_I64;
- TCGReg rd = TCG_REG_TMP;
- uint64_t pair;
-
- ptrdiff_t offset = addr - jmp_rx;
-
- if (offset == sextract64(offset, 0, 26)) {
- i1 = I3206_B | ((offset >> 2) & 0x3ffffff);
- i2 = NOP;
- } else {
- offset = (addr >> 12) - (jmp_rx >> 12);
-
- /* patch ADRP */
- i1 = I3406_ADRP | (offset & 3) << 29 | (offset & 0x1ffffc) << (5 - 2) | rd;
- /* patch ADDI */
- i2 = I3401_ADDI | rt << 31 | (addr & 0xfff) << 10 | rd << 5 | rd;
- }
- pair = (uint64_t)i2 << 32 | i1;
- qatomic_set((uint64_t *)jmp_rw, pair);
- flush_idcache_range(jmp_rx, jmp_rw, 8);
-}
-
static inline void tcg_out_goto_label(TCGContext *s, TCGLabel *l)
{
if (!l->has_value) {
@@ -1887,6 +1861,54 @@
static const tcg_insn_unit *tb_ret_addr;
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
+{
+ /* Reuse the zeroing that exists for goto_ptr. */
+ if (a0 == 0) {
+ tcg_out_goto_long(s, tcg_code_gen_epilogue);
+ } else {
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, a0);
+ tcg_out_goto_long(s, tb_ret_addr);
+ }
+}
+
+static void tcg_out_goto_tb(TCGContext *s, int which)
+{
+ /*
+ * Direct branch, or indirect address load, will be patched
+ * by tb_target_set_jmp_target. Assert indirect load offset
+ * in range early, regardless of direct branch distance.
+ */
+ intptr_t i_off = tcg_pcrel_diff(s, (void *)get_jmp_target_addr(s, which));
+ tcg_debug_assert(i_off == sextract64(i_off, 0, 21));
+
+ set_jmp_insn_offset(s, which);
+ tcg_out32(s, I3206_B);
+ tcg_out_insn(s, 3207, BR, TCG_REG_TMP);
+ set_jmp_reset_offset(s, which);
+}
+
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
+{
+ uintptr_t d_addr = tb->jmp_target_addr[n];
+ ptrdiff_t d_offset = d_addr - jmp_rx;
+ tcg_insn_unit insn;
+
+ /* Either directly branch, or indirect branch load. */
+ if (d_offset == sextract64(d_offset, 0, 28)) {
+ insn = deposit32(I3206_B, 0, 26, d_offset >> 2);
+ } else {
+ uintptr_t i_addr = (uintptr_t)&tb->jmp_target_addr[n];
+ ptrdiff_t i_offset = i_addr - jmp_rx;
+
+ /* Note that we asserted this in range in tcg_out_goto_tb. */
+ insn = deposit32(I3305_LDR | TCG_REG_TMP, 0, 5, i_offset >> 2);
+ }
+ qatomic_set((uint32_t *)jmp_rw, insn);
+ flush_idcache_range(jmp_rx, jmp_rw, 4);
+}
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
@@ -1906,36 +1928,6 @@
#define REG0(I) (const_args[I] ? TCG_REG_XZR : (TCGReg)args[I])
switch (opc) {
- case INDEX_op_exit_tb:
- /* Reuse the zeroing that exists for goto_ptr. */
- if (a0 == 0) {
- tcg_out_goto_long(s, tcg_code_gen_epilogue);
- } else {
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, a0);
- tcg_out_goto_long(s, tb_ret_addr);
- }
- break;
-
- case INDEX_op_goto_tb:
- tcg_debug_assert(s->tb_jmp_insn_offset != NULL);
- /*
- * Ensure that ADRP+ADD are 8-byte aligned so that an atomic
- * write can be used to patch the target address.
- */
- if ((uintptr_t)s->code_ptr & 7) {
- tcg_out32(s, NOP);
- }
- s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
- /*
- * actual branch destination will be patched by
- * tb_target_set_jmp_target later
- */
- tcg_out_insn(s, 3406, ADRP, TCG_REG_TMP, 0);
- tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_TMP, TCG_REG_TMP, 0);
- tcg_out_insn(s, 3207, BR, TCG_REG_TMP);
- set_jmp_reset_offset(s, a0);
- break;
-
case INDEX_op_goto_ptr:
tcg_out_insn(s, 3207, BR, a0);
break;
@@ -2305,6 +2297,8 @@
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
case INDEX_op_mov_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
+ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
default:
g_assert_not_reached();
}
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
index 413a541..8d24429 100644
--- a/tcg/aarch64/tcg-target.h
+++ b/tcg/aarch64/tcg-target.h
@@ -15,7 +15,7 @@
#define TCG_TARGET_INSN_UNIT_SIZE 4
#define TCG_TARGET_TLB_DISPLACEMENT_BITS 24
-#define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
+#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
typedef enum {
TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3,
@@ -123,7 +123,6 @@
#define TCG_TARGET_HAS_muls2_i64 0
#define TCG_TARGET_HAS_muluh_i64 1
#define TCG_TARGET_HAS_mulsh_i64 1
-#define TCG_TARGET_HAS_direct_jump 1
#define TCG_TARGET_HAS_v64 1
#define TCG_TARGET_HAS_v128 1
@@ -151,9 +150,6 @@
#define TCG_TARGET_DEFAULT_MO (0)
#define TCG_TARGET_HAS_MEMORY_BSWAP 0
-
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
-
#define TCG_TARGET_NEED_LDST_LABELS
#define TCG_TARGET_NEED_POOL_LABELS
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
index 9245ea8..6abe941 100644
--- a/tcg/arm/tcg-target.c.inc
+++ b/tcg/arm/tcg-target.c.inc
@@ -135,6 +135,8 @@
ARITH_BIC = 0xe << 21,
ARITH_MVN = 0xf << 21,
+ INSN_B = 0x0a000000,
+
INSN_CLZ = 0x016f0f10,
INSN_RBIT = 0x06ff0f30,
@@ -546,7 +548,7 @@
static void tcg_out_b_imm(TCGContext *s, ARMCond cond, int32_t offset)
{
- tcg_out32(s, (cond << 28) | 0x0a000000 |
+ tcg_out32(s, (cond << 28) | INSN_B |
(((offset - 8) >> 2) & 0x00ffffff));
}
@@ -1933,6 +1935,62 @@
static void tcg_out_epilogue(TCGContext *s);
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
+{
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, arg);
+ tcg_out_epilogue(s);
+}
+
+static void tcg_out_goto_tb(TCGContext *s, int which)
+{
+ uintptr_t i_addr;
+ intptr_t i_disp;
+
+ /* Direct branch will be patched by tb_target_set_jmp_target. */
+ set_jmp_insn_offset(s, which);
+ tcg_out32(s, INSN_NOP);
+
+ /* When branch is out of range, fall through to indirect. */
+ i_addr = get_jmp_target_addr(s, which);
+ i_disp = tcg_pcrel_diff(s, (void *)i_addr) - 8;
+ tcg_debug_assert(i_disp < 0);
+ if (i_disp >= -0xfff) {
+ tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, i_disp);
+ } else {
+ /*
+ * The TB is close, but outside the 12 bits addressable by
+ * the load. We can extend this to 20 bits with a sub of a
+ * shifted immediate from pc.
+ */
+ int h = -i_disp;
+ int l = h & 0xfff;
+
+ h = encode_imm_nofail(h - l);
+ tcg_out_dat_imm(s, COND_AL, ARITH_SUB, TCG_REG_R0, TCG_REG_PC, h);
+ tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, l);
+ }
+ set_jmp_reset_offset(s, which);
+}
+
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
+{
+ uintptr_t addr = tb->jmp_target_addr[n];
+ ptrdiff_t offset = addr - (jmp_rx + 8);
+ tcg_insn_unit insn;
+
+ /* Either directly branch, or fall through to indirect branch. */
+ if (offset == sextract64(offset, 0, 26)) {
+ /* B <addr> */
+ insn = deposit32((COND_AL << 28) | INSN_B, 0, 24, offset >> 2);
+ } else {
+ insn = INSN_NOP;
+ }
+
+ qatomic_set((uint32_t *)jmp_rw, insn);
+ flush_idcache_range(jmp_rx, jmp_rw, 4);
+}
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
@@ -1941,33 +1999,6 @@
int c;
switch (opc) {
- case INDEX_op_exit_tb:
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, args[0]);
- tcg_out_epilogue(s);
- break;
- case INDEX_op_goto_tb:
- {
- /* Indirect jump method */
- intptr_t ptr, dif, dil;
- TCGReg base = TCG_REG_PC;
-
- tcg_debug_assert(s->tb_jmp_insn_offset == 0);
- ptr = (intptr_t)tcg_splitwx_to_rx(s->tb_jmp_target_addr + args[0]);
- dif = tcg_pcrel_diff(s, (void *)ptr) - 8;
- dil = sextract32(dif, 0, 12);
- if (dif != dil) {
- /* The TB is close, but outside the 12 bits addressable by
- the load. We can extend this to 20 bits with a sub of a
- shifted immediate from pc. In the vastly unlikely event
- the code requires more than 1MB, we'll use 2 insns and
- be no worse off. */
- base = TCG_REG_R0;
- tcg_out_movi32(s, COND_AL, base, ptr - dil);
- }
- tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, base, dil);
- set_jmp_reset_offset(s, args[0]);
- }
- break;
case INDEX_op_goto_ptr:
tcg_out_b_reg(s, COND_AL, args[0]);
break;
@@ -2256,6 +2287,8 @@
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
case INDEX_op_call: /* Always emitted via tcg_out_call. */
+ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
default:
tcg_abort();
}
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
index b7843d2..91b8954 100644
--- a/tcg/arm/tcg-target.h
+++ b/tcg/arm/tcg-target.h
@@ -121,7 +121,6 @@
#define TCG_TARGET_HAS_mulsh_i32 0
#define TCG_TARGET_HAS_div_i32 use_idiv_instructions
#define TCG_TARGET_HAS_rem_i32 0
-#define TCG_TARGET_HAS_direct_jump 0
#define TCG_TARGET_HAS_qemu_st8_i32 0
#define TCG_TARGET_HAS_v64 use_neon_instructions
@@ -150,10 +149,6 @@
#define TCG_TARGET_DEFAULT_MO (0)
#define TCG_TARGET_HAS_MEMORY_BSWAP 0
-
-/* not defined -- call should be eliminated at compile time */
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
-
#define TCG_TARGET_NEED_LDST_LABELS
#define TCG_TARGET_NEED_POOL_LABELS
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
index 58bd587..c71c3e6 100644
--- a/tcg/i386/tcg-target.c.inc
+++ b/tcg/i386/tcg-target.c.inc
@@ -2347,6 +2347,42 @@
#endif
}
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
+{
+ /* Reuse the zeroing that exists for goto_ptr. */
+ if (a0 == 0) {
+ tcg_out_jmp(s, tcg_code_gen_epilogue);
+ } else {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, a0);
+ tcg_out_jmp(s, tb_ret_addr);
+ }
+}
+
+static void tcg_out_goto_tb(TCGContext *s, int which)
+{
+ /*
+ * Jump displacement must be aligned for atomic patching;
+ * see if we need to add extra nops before jump
+ */
+ int gap = QEMU_ALIGN_PTR_UP(s->code_ptr + 1, 4) - s->code_ptr;
+ if (gap != 1) {
+ tcg_out_nopn(s, gap - 1);
+ }
+ tcg_out8(s, OPC_JMP_long); /* jmp im */
+ set_jmp_insn_offset(s, which);
+ tcg_out32(s, 0);
+ set_jmp_reset_offset(s, which);
+}
+
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
+{
+ /* patch the branch destination */
+ uintptr_t addr = tb->jmp_target_addr[n];
+ qatomic_set((int32_t *)jmp_rw, addr - (jmp_rx + 4));
+ /* no need to flush icache explicitly */
+}
+
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
@@ -2371,36 +2407,6 @@
const_a2 = const_args[2];
switch (opc) {
- case INDEX_op_exit_tb:
- /* Reuse the zeroing that exists for goto_ptr. */
- if (a0 == 0) {
- tcg_out_jmp(s, tcg_code_gen_epilogue);
- } else {
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, a0);
- tcg_out_jmp(s, tb_ret_addr);
- }
- break;
- case INDEX_op_goto_tb:
- if (s->tb_jmp_insn_offset) {
- /* direct jump method */
- int gap;
- /* jump displacement must be aligned for atomic patching;
- * see if we need to add extra nops before jump
- */
- gap = QEMU_ALIGN_PTR_UP(s->code_ptr + 1, 4) - s->code_ptr;
- if (gap != 1) {
- tcg_out_nopn(s, gap - 1);
- }
- tcg_out8(s, OPC_JMP_long); /* jmp im */
- s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
- tcg_out32(s, 0);
- } else {
- /* indirect jump method */
- tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1,
- (intptr_t)(s->tb_jmp_target_addr + a0));
- }
- set_jmp_reset_offset(s, a0);
- break;
case INDEX_op_goto_ptr:
/* jmp to the given host address (could be epilogue) */
tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, a0);
@@ -2794,6 +2800,8 @@
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
case INDEX_op_mov_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
+ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
default:
tcg_abort();
}
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
index 7edb7f1..5797a55 100644
--- a/tcg/i386/tcg-target.h
+++ b/tcg/i386/tcg-target.h
@@ -141,7 +141,6 @@
#define TCG_TARGET_HAS_muls2_i32 1
#define TCG_TARGET_HAS_muluh_i32 0
#define TCG_TARGET_HAS_mulsh_i32 0
-#define TCG_TARGET_HAS_direct_jump 1
#if TCG_TARGET_REG_BITS == 64
/* Keep target addresses zero-extended in a register. */
@@ -220,14 +219,6 @@
#define TCG_TARGET_extract_i64_valid(ofs, len) \
(((ofs) == 8 && (len) == 8) || ((ofs) + (len)) == 32)
-static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
- uintptr_t jmp_rw, uintptr_t addr)
-{
- /* patch the branch destination */
- qatomic_set((int32_t *)jmp_rw, addr - (jmp_rx + 4));
- /* no need to flush icache explicitly */
-}
-
/* This defines the natural memory order supported by this
* architecture before guarantees made by various barrier
* instructions.
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
index c9e99e8..3174557 100644
--- a/tcg/loongarch64/tcg-target.c.inc
+++ b/tcg/loongarch64/tcg-target.c.inc
@@ -1039,11 +1039,12 @@
tcg_out32(s, NOP);
}
-void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
- uintptr_t jmp_rw, uintptr_t addr)
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
{
tcg_insn_unit i1, i2;
ptrdiff_t upper, lower;
+ uintptr_t addr = tb->jmp_target_addr[n];
ptrdiff_t offset = (ptrdiff_t)(addr - jmp_rx) >> 2;
if (offset == sextreg(offset, 0, 26)) {
@@ -1068,6 +1069,36 @@
static const tcg_insn_unit *tb_ret_addr;
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
+{
+ /* Reuse the zeroing that exists for goto_ptr. */
+ if (a0 == 0) {
+ tcg_out_call_int(s, tcg_code_gen_epilogue, true);
+ } else {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
+ tcg_out_call_int(s, tb_ret_addr, true);
+ }
+}
+
+static void tcg_out_goto_tb(TCGContext *s, int which)
+{
+ /*
+ * Ensure that patch area is 8-byte aligned so that an
+ * atomic write can be used to patch the target address.
+ */
+ if ((uintptr_t)s->code_ptr & 7) {
+ tcg_out_nop(s);
+ }
+ set_jmp_insn_offset(s, which);
+ /*
+ * actual branch destination will be patched by
+ * tb_target_set_jmp_target later
+ */
+ tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, 0);
+ tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0);
+ set_jmp_reset_offset(s, which);
+}
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
@@ -1078,35 +1109,6 @@
int c2 = const_args[2];
switch (opc) {
- case INDEX_op_exit_tb:
- /* Reuse the zeroing that exists for goto_ptr. */
- if (a0 == 0) {
- tcg_out_call_int(s, tcg_code_gen_epilogue, true);
- } else {
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
- tcg_out_call_int(s, tb_ret_addr, true);
- }
- break;
-
- case INDEX_op_goto_tb:
- tcg_debug_assert(s->tb_jmp_insn_offset != NULL);
- /*
- * Ensure that patch area is 8-byte aligned so that an
- * atomic write can be used to patch the target address.
- */
- if ((uintptr_t)s->code_ptr & 7) {
- tcg_out_nop(s);
- }
- s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
- /*
- * actual branch destination will be patched by
- * tb_target_set_jmp_target later
- */
- tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, 0);
- tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0);
- set_jmp_reset_offset(s, a0);
- break;
-
case INDEX_op_mb:
tcg_out_mb(s, a0);
break;
@@ -1500,6 +1502,8 @@
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
case INDEX_op_mov_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
+ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
default:
g_assert_not_reached();
}
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
index e5f7a1f..1c3e48d 100644
--- a/tcg/loongarch64/tcg-target.h
+++ b/tcg/loongarch64/tcg-target.h
@@ -128,7 +128,6 @@
#define TCG_TARGET_HAS_clz_i32 1
#define TCG_TARGET_HAS_ctz_i32 1
#define TCG_TARGET_HAS_ctpop_i32 0
-#define TCG_TARGET_HAS_direct_jump 1
#define TCG_TARGET_HAS_brcond2 0
#define TCG_TARGET_HAS_setcond2 0
#define TCG_TARGET_HAS_qemu_st8_i32 0
@@ -171,8 +170,6 @@
#define TCG_TARGET_HAS_muluh_i64 1
#define TCG_TARGET_HAS_mulsh_i64 1
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
-
#define TCG_TARGET_DEFAULT_MO (0)
#define TCG_TARGET_NEED_LDST_LABELS
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
index 292e490..6e000d8 100644
--- a/tcg/mips/tcg-target.c.inc
+++ b/tcg/mips/tcg-target.c.inc
@@ -1951,6 +1951,37 @@
}
}
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
+{
+ TCGReg b0 = TCG_REG_ZERO;
+
+ if (a0 & ~0xffff) {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_V0, a0 & ~0xffff);
+ b0 = TCG_REG_V0;
+ }
+ if (!tcg_out_opc_jmp(s, OPC_J, tb_ret_addr)) {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)tb_ret_addr);
+ tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
+ }
+ tcg_out_opc_imm(s, OPC_ORI, TCG_REG_V0, b0, a0 & 0xffff);
+}
+
+static void tcg_out_goto_tb(TCGContext *s, int which)
+{
+ /* indirect jump method */
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO,
+ get_jmp_target_addr(s, which));
+ tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
+ tcg_out_nop(s);
+ set_jmp_reset_offset(s, which);
+}
+
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
+{
+ /* Always indirect, nothing to do */
+}
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
@@ -1970,32 +2001,6 @@
c2 = const_args[2];
switch (opc) {
- case INDEX_op_exit_tb:
- {
- TCGReg b0 = TCG_REG_ZERO;
-
- a0 = (intptr_t)a0;
- if (a0 & ~0xffff) {
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_V0, a0 & ~0xffff);
- b0 = TCG_REG_V0;
- }
- if (!tcg_out_opc_jmp(s, OPC_J, tb_ret_addr)) {
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0,
- (uintptr_t)tb_ret_addr);
- tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
- }
- tcg_out_opc_imm(s, OPC_ORI, TCG_REG_V0, b0, a0 & 0xffff);
- }
- break;
- case INDEX_op_goto_tb:
- /* indirect jump method */
- tcg_debug_assert(s->tb_jmp_insn_offset == 0);
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO,
- (uintptr_t)(s->tb_jmp_target_addr + a0));
- tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
- tcg_out_nop(s);
- set_jmp_reset_offset(s, a0);
- break;
case INDEX_op_goto_ptr:
/* jmp to the given host address (could be epilogue) */
tcg_out_opc_reg(s, OPC_JR, 0, a0, 0);
@@ -2403,6 +2408,8 @@
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
case INDEX_op_mov_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
+ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
default:
tcg_abort();
}
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
index 15721c3..7bc8e15 100644
--- a/tcg/mips/tcg-target.h
+++ b/tcg/mips/tcg-target.h
@@ -134,7 +134,6 @@
#define TCG_TARGET_HAS_muluh_i32 1
#define TCG_TARGET_HAS_mulsh_i32 1
#define TCG_TARGET_HAS_bswap32_i32 1
-#define TCG_TARGET_HAS_direct_jump 0
#if TCG_TARGET_REG_BITS == 64
#define TCG_TARGET_HAS_add2_i32 0
@@ -205,10 +204,6 @@
#define TCG_TARGET_DEFAULT_MO (0)
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
-/* not defined -- call should be eliminated at compile time */
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t)
- QEMU_ERROR("code path is reachable");
-
#define TCG_TARGET_NEED_LDST_LABELS
#endif
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
index e062146..8d6899c 100644
--- a/tcg/ppc/tcg-target.c.inc
+++ b/tcg/ppc/tcg-target.c.inc
@@ -1854,103 +1854,6 @@
tcg_out32(s, insn);
}
-static inline uint64_t make_pair(tcg_insn_unit i1, tcg_insn_unit i2)
-{
- if (HOST_BIG_ENDIAN) {
- return (uint64_t)i1 << 32 | i2;
- }
- return (uint64_t)i2 << 32 | i1;
-}
-
-static inline void ppc64_replace2(uintptr_t rx, uintptr_t rw,
- tcg_insn_unit i0, tcg_insn_unit i1)
-{
-#if TCG_TARGET_REG_BITS == 64
- qatomic_set((uint64_t *)rw, make_pair(i0, i1));
- flush_idcache_range(rx, rw, 8);
-#else
- qemu_build_not_reached();
-#endif
-}
-
-static inline void ppc64_replace4(uintptr_t rx, uintptr_t rw,
- tcg_insn_unit i0, tcg_insn_unit i1,
- tcg_insn_unit i2, tcg_insn_unit i3)
-{
- uint64_t p[2];
-
- p[!HOST_BIG_ENDIAN] = make_pair(i0, i1);
- p[HOST_BIG_ENDIAN] = make_pair(i2, i3);
-
- /*
- * There's no convenient way to get the compiler to allocate a pair
- * of registers at an even index, so copy into r6/r7 and clobber.
- */
- asm("mr %%r6, %1\n\t"
- "mr %%r7, %2\n\t"
- "stq %%r6, %0"
- : "=Q"(*(__int128 *)rw) : "r"(p[0]), "r"(p[1]) : "r6", "r7");
- flush_idcache_range(rx, rw, 16);
-}
-
-void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
- uintptr_t jmp_rw, uintptr_t addr)
-{
- tcg_insn_unit i0, i1, i2, i3;
- intptr_t tb_diff = addr - tc_ptr;
- intptr_t br_diff = addr - (jmp_rx + 4);
- intptr_t lo, hi;
-
- if (TCG_TARGET_REG_BITS == 32) {
- intptr_t diff = addr - jmp_rx;
- tcg_debug_assert(in_range_b(diff));
- qatomic_set((uint32_t *)jmp_rw, B | (diff & 0x3fffffc));
- flush_idcache_range(jmp_rx, jmp_rw, 4);
- return;
- }
-
- /*
- * For 16-bit displacements, we can use a single add + branch.
- * This happens quite often.
- */
- if (tb_diff == (int16_t)tb_diff) {
- i0 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, tb_diff);
- i1 = B | (br_diff & 0x3fffffc);
- ppc64_replace2(jmp_rx, jmp_rw, i0, i1);
- return;
- }
-
- lo = (int16_t)tb_diff;
- hi = (int32_t)(tb_diff - lo);
- assert(tb_diff == hi + lo);
- i0 = ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, hi >> 16);
- i1 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, lo);
-
- /*
- * Without stq from 2.07, we can only update two insns,
- * and those must be the ones that load the target address.
- */
- if (!have_isa_2_07) {
- ppc64_replace2(jmp_rx, jmp_rw, i0, i1);
- return;
- }
-
- /*
- * For 26-bit displacements, we can use a direct branch.
- * Otherwise we still need the indirect branch, which we
- * must restore after a potential direct branch write.
- */
- br_diff -= 4;
- if (in_range_b(br_diff)) {
- i2 = B | (br_diff & 0x3fffffc);
- i3 = NOP;
- } else {
- i2 = MTSPR | RS(TCG_REG_TB) | CTR;
- i3 = BCCTR | BO_ALWAYS;
- }
- ppc64_replace4(jmp_rx, jmp_rw, i0, i1, i2, i3);
-}
-
static void tcg_out_call_int(TCGContext *s, int lk,
const tcg_insn_unit *target)
{
@@ -2616,6 +2519,64 @@
tcg_out32(s, BCLR | BO_ALWAYS);
}
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
+{
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, arg);
+ tcg_out_b(s, 0, tcg_code_gen_epilogue);
+}
+
+static void tcg_out_goto_tb(TCGContext *s, int which)
+{
+ uintptr_t ptr = get_jmp_target_addr(s, which);
+
+ if (USE_REG_TB) {
+ ptrdiff_t offset = tcg_tbrel_diff(s, (void *)ptr);
+ tcg_out_mem_long(s, LD, LDX, TCG_REG_TB, TCG_REG_TB, offset);
+
+ /* Direct branch will be patched by tb_target_set_jmp_target. */
+ set_jmp_insn_offset(s, which);
+ tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
+
+ /* When branch is out of range, fall through to indirect. */
+ tcg_out32(s, BCCTR | BO_ALWAYS);
+
+ /* For the unlinked case, need to reset TCG_REG_TB. */
+ set_jmp_reset_offset(s, which);
+ tcg_out_mem_long(s, ADDI, ADD, TCG_REG_TB, TCG_REG_TB,
+ -tcg_current_code_size(s));
+ } else {
+ /* Direct branch will be patched by tb_target_set_jmp_target. */
+ set_jmp_insn_offset(s, which);
+ tcg_out32(s, NOP);
+
+ /* When branch is out of range, fall through to indirect. */
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, ptr - (int16_t)ptr);
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, (int16_t)ptr);
+ tcg_out32(s, MTSPR | RS(TCG_REG_TMP1) | CTR);
+ tcg_out32(s, BCCTR | BO_ALWAYS);
+ set_jmp_reset_offset(s, which);
+ }
+}
+
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
+{
+ uintptr_t addr = tb->jmp_target_addr[n];
+ intptr_t diff = addr - jmp_rx;
+ tcg_insn_unit insn;
+
+ if (in_range_b(diff)) {
+ insn = B | (diff & 0x3fffffc);
+ } else if (USE_REG_TB) {
+ insn = MTSPR | RS(TCG_REG_TB) | CTR;
+ } else {
+ insn = NOP;
+ }
+
+ qatomic_set((uint32_t *)jmp_rw, insn);
+ flush_idcache_range(jmp_rx, jmp_rw, 4);
+}
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
@@ -2623,42 +2584,6 @@
TCGArg a0, a1, a2;
switch (opc) {
- case INDEX_op_exit_tb:
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, args[0]);
- tcg_out_b(s, 0, tcg_code_gen_epilogue);
- break;
- case INDEX_op_goto_tb:
- if (s->tb_jmp_insn_offset) {
- /* Direct jump. */
- if (TCG_TARGET_REG_BITS == 64) {
- /* Ensure the next insns are 8 or 16-byte aligned. */
- while ((uintptr_t)s->code_ptr & (have_isa_2_07 ? 15 : 7)) {
- tcg_out32(s, NOP);
- }
- s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
- tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0));
- tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0));
- } else {
- s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
- tcg_out32(s, B);
- s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
- break;
- }
- } else {
- /* Indirect jump. */
- tcg_debug_assert(s->tb_jmp_insn_offset == NULL);
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, 0,
- (intptr_t)(s->tb_jmp_insn_offset + args[0]));
- }
- tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
- tcg_out32(s, BCCTR | BO_ALWAYS);
- set_jmp_reset_offset(s, args[0]);
- if (USE_REG_TB) {
- /* For the unlinked case, need to reset TCG_REG_TB. */
- tcg_out_mem_long(s, ADDI, ADD, TCG_REG_TB, TCG_REG_TB,
- -tcg_current_code_size(s));
- }
- break;
case INDEX_op_goto_ptr:
tcg_out32(s, MTSPR | RS(args[0]) | CTR);
if (USE_REG_TB) {
@@ -3185,6 +3110,8 @@
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
case INDEX_op_mov_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
+ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
default:
tcg_abort();
}
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
index b5cd225..af81c5a 100644
--- a/tcg/ppc/tcg-target.h
+++ b/tcg/ppc/tcg-target.h
@@ -27,11 +27,10 @@
#ifdef _ARCH_PPC64
# define TCG_TARGET_REG_BITS 64
-# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
#else
# define TCG_TARGET_REG_BITS 32
-# define MAX_CODE_GEN_BUFFER_SIZE (32 * MiB)
#endif
+#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
#define TCG_TARGET_NB_REGS 64
#define TCG_TARGET_INSN_UNIT_SIZE 4
@@ -108,7 +107,6 @@
#define TCG_TARGET_HAS_muls2_i32 0
#define TCG_TARGET_HAS_muluh_i32 1
#define TCG_TARGET_HAS_mulsh_i32 1
-#define TCG_TARGET_HAS_direct_jump 1
#define TCG_TARGET_HAS_qemu_st8_i32 0
#if TCG_TARGET_REG_BITS == 64
@@ -180,11 +178,8 @@
#define TCG_TARGET_HAS_bitsel_vec have_vsx
#define TCG_TARGET_HAS_cmpsel_vec 0
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
-
#define TCG_TARGET_DEFAULT_MO (0)
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
-
#define TCG_TARGET_NEED_LDST_LABELS
#define TCG_TARGET_NEED_POOL_LABELS
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
index f741e05..fc0edd8 100644
--- a/tcg/riscv/tcg-target.c.inc
+++ b/tcg/riscv/tcg-target.c.inc
@@ -267,6 +267,7 @@
#endif
OPC_FENCE = 0x0000000f,
+ OPC_NOP = OPC_ADDI, /* nop = addi r0,r0,0 */
} RISCVInsn;
/*
@@ -403,7 +404,7 @@
{
int i;
for (i = 0; i < count; ++i) {
- p[i] = encode_i(OPC_ADDI, TCG_REG_ZERO, TCG_REG_ZERO, 0);
+ p[i] = OPC_NOP;
}
}
@@ -1289,6 +1290,47 @@
static const tcg_insn_unit *tb_ret_addr;
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
+{
+ /* Reuse the zeroing that exists for goto_ptr. */
+ if (a0 == 0) {
+ tcg_out_call_int(s, tcg_code_gen_epilogue, true);
+ } else {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
+ tcg_out_call_int(s, tb_ret_addr, true);
+ }
+}
+
+static void tcg_out_goto_tb(TCGContext *s, int which)
+{
+ /* Direct branch will be patched by tb_target_set_jmp_target. */
+ set_jmp_insn_offset(s, which);
+ tcg_out32(s, OPC_JAL);
+
+ /* When branch is out of range, fall through to indirect. */
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO,
+ get_jmp_target_addr(s, which));
+ tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0);
+ set_jmp_reset_offset(s, which);
+}
+
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
+{
+ uintptr_t addr = tb->jmp_target_addr[n];
+ ptrdiff_t offset = addr - jmp_rx;
+ tcg_insn_unit insn;
+
+ /* Either directly branch, or fall through to indirect branch. */
+ if (offset == sextreg(offset, 0, 20)) {
+ insn = encode_uj(OPC_JAL, TCG_REG_ZERO, offset);
+ } else {
+ insn = OPC_NOP;
+ }
+ qatomic_set((uint32_t *)jmp_rw, insn);
+ flush_idcache_range(jmp_rx, jmp_rw, 4);
+}
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
@@ -1299,25 +1341,6 @@
int c2 = const_args[2];
switch (opc) {
- case INDEX_op_exit_tb:
- /* Reuse the zeroing that exists for goto_ptr. */
- if (a0 == 0) {
- tcg_out_call_int(s, tcg_code_gen_epilogue, true);
- } else {
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
- tcg_out_call_int(s, tb_ret_addr, true);
- }
- break;
-
- case INDEX_op_goto_tb:
- assert(s->tb_jmp_insn_offset == 0);
- /* indirect jump method */
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO,
- (uintptr_t)(s->tb_jmp_target_addr + a0));
- tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0);
- set_jmp_reset_offset(s, a0);
- break;
-
case INDEX_op_goto_ptr:
tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0);
break;
@@ -1617,6 +1640,8 @@
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
case INDEX_op_mov_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
+ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
default:
g_assert_not_reached();
}
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
index 232537c..1337bc1 100644
--- a/tcg/riscv/tcg-target.h
+++ b/tcg/riscv/tcg-target.h
@@ -121,7 +121,6 @@
#define TCG_TARGET_HAS_clz_i32 0
#define TCG_TARGET_HAS_ctz_i32 0
#define TCG_TARGET_HAS_ctpop_i32 0
-#define TCG_TARGET_HAS_direct_jump 0
#define TCG_TARGET_HAS_brcond2 1
#define TCG_TARGET_HAS_setcond2 1
#define TCG_TARGET_HAS_qemu_st8_i32 0
@@ -165,9 +164,6 @@
#define TCG_TARGET_HAS_mulsh_i64 1
#endif
-/* not defined -- call should be eliminated at compile time */
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
-
#define TCG_TARGET_DEFAULT_MO (0)
#define TCG_TARGET_NEED_LDST_LABELS
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
index 2b38fd9..218318f 100644
--- a/tcg/s390x/tcg-target.c.inc
+++ b/tcg/s390x/tcg-target.c.inc
@@ -1944,6 +1944,45 @@
#endif
}
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
+{
+ /* Reuse the zeroing that exists for goto_ptr. */
+ if (a0 == 0) {
+ tgen_gotoi(s, S390_CC_ALWAYS, tcg_code_gen_epilogue);
+ } else {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, a0);
+ tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr);
+ }
+}
+
+static void tcg_out_goto_tb(TCGContext *s, int which)
+{
+ /*
+ * Branch displacement must be aligned for atomic patching;
+ * see if we need to add extra nop before branch
+ */
+ if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
+ tcg_out16(s, NOP);
+ }
+ tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
+ set_jmp_insn_offset(s, which);
+ s->code_ptr += 2;
+ set_jmp_reset_offset(s, which);
+}
+
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
+{
+ if (!HAVE_FACILITY(GEN_INST_EXT)) {
+ return;
+ }
+ /* patch the branch destination */
+ uintptr_t addr = tb->jmp_target_addr[n];
+ intptr_t disp = addr - (jmp_rx - 2);
+ qatomic_set((int32_t *)jmp_rw, disp / 2);
+ /* no need to flush icache explicitly */
+}
+
# define OP_32_64(x) \
case glue(glue(INDEX_op_,x),_i32): \
case glue(glue(INDEX_op_,x),_i64)
@@ -1956,32 +1995,6 @@
TCGArg a0, a1, a2;
switch (opc) {
- case INDEX_op_exit_tb:
- /* Reuse the zeroing that exists for goto_ptr. */
- a0 = args[0];
- if (a0 == 0) {
- tgen_gotoi(s, S390_CC_ALWAYS, tcg_code_gen_epilogue);
- } else {
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, a0);
- tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr);
- }
- break;
-
- case INDEX_op_goto_tb:
- a0 = args[0];
- /*
- * branch displacement must be aligned for atomic patching;
- * see if we need to add extra nop before branch
- */
- if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
- tcg_out16(s, NOP);
- }
- tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
- s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
- s->code_ptr += 2;
- set_jmp_reset_offset(s, a0);
- break;
-
case INDEX_op_goto_ptr:
a0 = args[0];
tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, a0);
@@ -2619,6 +2632,8 @@
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
case INDEX_op_mov_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
+ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
default:
tcg_abort();
}
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
index 68dcbc6..e597e47 100644
--- a/tcg/s390x/tcg-target.h
+++ b/tcg/s390x/tcg-target.h
@@ -105,7 +105,6 @@
#define TCG_TARGET_HAS_mulsh_i32 0
#define TCG_TARGET_HAS_extrl_i64_i32 0
#define TCG_TARGET_HAS_extrh_i64_i32 0
-#define TCG_TARGET_HAS_direct_jump 1
#define TCG_TARGET_HAS_qemu_st8_i32 0
#define TCG_TARGET_HAS_div2_i64 1
@@ -174,16 +173,6 @@
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
-
-static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
- uintptr_t jmp_rw, uintptr_t addr)
-{
- /* patch the branch destination */
- intptr_t disp = addr - (jmp_rx - 2);
- qatomic_set((int32_t *)jmp_rw, disp / 2);
- /* no need to flush icache explicitly */
-}
-
#define TCG_TARGET_NEED_LDST_LABELS
#define TCG_TARGET_NEED_POOL_LABELS
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
index eb913f3..dd406bc 100644
--- a/tcg/sparc64/tcg-target.c.inc
+++ b/tcg/sparc64/tcg-target.c.inc
@@ -92,7 +92,6 @@
#endif
#define TCG_REG_TB TCG_REG_I1
-#define USE_REG_TB (sizeof(void *) > 4)
static const int tcg_target_reg_alloc_order[] = {
TCG_REG_L0,
@@ -439,7 +438,7 @@
}
/* A 13-bit constant relative to the TB. */
- if (!in_prologue && USE_REG_TB) {
+ if (!in_prologue) {
test = tcg_tbrel_diff(s, (void *)arg);
if (check_fit_ptr(test, 13)) {
tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD);
@@ -468,7 +467,7 @@
}
/* Use the constant pool, if possible. */
- if (!in_prologue && USE_REG_TB) {
+ if (!in_prologue) {
new_pool_label(s, arg, R_SPARC_13, s->code_ptr,
tcg_tbrel_diff(s, NULL));
tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(TCG_REG_TB));
@@ -537,17 +536,6 @@
return false;
}
-static void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, const void *arg)
-{
- intptr_t diff = tcg_tbrel_diff(s, arg);
- if (USE_REG_TB && check_fit_ptr(diff, 13)) {
- tcg_out_ld(s, TCG_TYPE_PTR, ret, TCG_REG_TB, diff);
- return;
- }
- tcg_out_movi(s, TCG_TYPE_PTR, ret, (uintptr_t)arg & ~0x3ff);
- tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, (uintptr_t)arg & 0x3ff);
-}
-
static void tcg_out_sety(TCGContext *s, TCGReg rs)
{
tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
@@ -1026,10 +1014,8 @@
#endif
/* We choose TCG_REG_TB such that no move is required. */
- if (USE_REG_TB) {
- QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
- }
+ QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
/* delay slot */
@@ -1428,6 +1414,78 @@
#endif /* CONFIG_SOFTMMU */
}
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
+{
+ if (check_fit_ptr(a0, 13)) {
+ tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
+ tcg_out_movi_imm13(s, TCG_REG_O0, a0);
+ return;
+ } else {
+ intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
+ if (check_fit_ptr(tb_diff, 13)) {
+ tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
+ /* Note that TCG_REG_TB has been unwound to O1. */
+ tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
+ return;
+ }
+ }
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
+ tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
+ tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
+}
+
+static void tcg_out_goto_tb(TCGContext *s, int which)
+{
+ ptrdiff_t off = tcg_tbrel_diff(s, (void *)get_jmp_target_addr(s, which));
+
+ /* Direct branch will be patched by tb_target_set_jmp_target. */
+ set_jmp_insn_offset(s, which);
+ tcg_out32(s, CALL);
+ /* delay slot */
+ tcg_debug_assert(check_fit_ptr(off, 13));
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, TCG_REG_TB, off);
+ set_jmp_reset_offset(s, which);
+
+ /*
+ * For the unlinked path of goto_tb, we need to reset TCG_REG_TB
+ * to the beginning of this TB.
+ */
+ off = -tcg_current_code_size(s);
+ if (check_fit_i32(off, 13)) {
+ tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, off, ARITH_ADD);
+ } else {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, off);
+ tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
+ }
+}
+
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
+{
+ uintptr_t addr = tb->jmp_target_addr[n];
+ intptr_t br_disp = (intptr_t)(addr - jmp_rx) >> 2;
+ tcg_insn_unit insn;
+
+ br_disp >>= 2;
+ if (check_fit_ptr(br_disp, 19)) {
+ /* ba,pt %icc, addr */
+ insn = deposit32(INSN_OP(0) | INSN_OP2(1) | INSN_COND(COND_A)
+ | BPCC_ICC | BPCC_PT, 0, 19, br_disp);
+ } else if (check_fit_ptr(br_disp, 22)) {
+ /* ba addr */
+ insn = deposit32(INSN_OP(0) | INSN_OP2(2) | INSN_COND(COND_A),
+ 0, 22, br_disp);
+ } else {
+ /* The code_gen_buffer can't be larger than 2GB. */
+ tcg_debug_assert(check_fit_ptr(br_disp, 30));
+ /* call addr */
+ insn = deposit32(CALL, 0, 30, br_disp);
+ }
+
+ qatomic_set((uint32_t *)jmp_rw, insn);
+ flush_idcache_range(jmp_rx, jmp_rw, 4);
+}
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
@@ -1442,70 +1500,9 @@
c2 = const_args[2];
switch (opc) {
- case INDEX_op_exit_tb:
- if (check_fit_ptr(a0, 13)) {
- tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
- tcg_out_movi_imm13(s, TCG_REG_O0, a0);
- break;
- } else if (USE_REG_TB) {
- intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
- if (check_fit_ptr(tb_diff, 13)) {
- tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
- /* Note that TCG_REG_TB has been unwound to O1. */
- tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
- break;
- }
- }
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
- tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
- tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
- break;
- case INDEX_op_goto_tb:
- if (s->tb_jmp_insn_offset) {
- /* direct jump method */
- if (USE_REG_TB) {
- /* make sure the patch is 8-byte aligned. */
- if ((intptr_t)s->code_ptr & 4) {
- tcg_out_nop(s);
- }
- s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
- tcg_out_sethi(s, TCG_REG_T1, 0);
- tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
- tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
- tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
- } else {
- s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
- tcg_out32(s, CALL);
- tcg_out_nop(s);
- }
- } else {
- /* indirect jump method */
- tcg_out_ld_ptr(s, TCG_REG_TB, s->tb_jmp_target_addr + a0);
- tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
- tcg_out_nop(s);
- }
- set_jmp_reset_offset(s, a0);
-
- /* For the unlinked path of goto_tb, we need to reset
- TCG_REG_TB to the beginning of this TB. */
- if (USE_REG_TB) {
- c = -tcg_current_code_size(s);
- if (check_fit_i32(c, 13)) {
- tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD);
- } else {
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, c);
- tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB,
- TCG_REG_T1, ARITH_ADD);
- }
- }
- break;
case INDEX_op_goto_ptr:
tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
- if (USE_REG_TB) {
- tcg_out_mov_delay(s, TCG_REG_TB, a0);
- } else {
- tcg_out_nop(s);
- }
+ tcg_out_mov_delay(s, TCG_REG_TB, a0);
break;
case INDEX_op_br:
tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
@@ -1716,6 +1713,8 @@
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
case INDEX_op_mov_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
+ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
default:
tcg_abort();
}
@@ -1895,45 +1894,3 @@
{
tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
}
-
-void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
- uintptr_t jmp_rw, uintptr_t addr)
-{
- intptr_t tb_disp = addr - tc_ptr;
- intptr_t br_disp = addr - jmp_rx;
- tcg_insn_unit i1, i2;
-
- /* We can reach the entire address space for ILP32.
- For LP64, the code_gen_buffer can't be larger than 2GB. */
- tcg_debug_assert(tb_disp == (int32_t)tb_disp);
- tcg_debug_assert(br_disp == (int32_t)br_disp);
-
- if (!USE_REG_TB) {
- qatomic_set((uint32_t *)jmp_rw,
- deposit32(CALL, 0, 30, br_disp >> 2));
- flush_idcache_range(jmp_rx, jmp_rw, 4);
- return;
- }
-
- /* This does not exercise the range of the branch, but we do
- still need to be able to load the new value of TCG_REG_TB.
- But this does still happen quite often. */
- if (check_fit_ptr(tb_disp, 13)) {
- /* ba,pt %icc, addr */
- i1 = (INSN_OP(0) | INSN_OP2(1) | INSN_COND(COND_A)
- | BPCC_ICC | BPCC_PT | INSN_OFF19(br_disp));
- i2 = (ARITH_ADD | INSN_RD(TCG_REG_TB) | INSN_RS1(TCG_REG_TB)
- | INSN_IMM13(tb_disp));
- } else if (tb_disp >= 0) {
- i1 = SETHI | INSN_RD(TCG_REG_T1) | ((tb_disp & 0xfffffc00) >> 10);
- i2 = (ARITH_OR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
- | INSN_IMM13(tb_disp & 0x3ff));
- } else {
- i1 = SETHI | INSN_RD(TCG_REG_T1) | ((~tb_disp & 0xfffffc00) >> 10);
- i2 = (ARITH_XOR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
- | INSN_IMM13((tb_disp & 0x3ff) | -0x400));
- }
-
- qatomic_set((uint64_t *)jmp_rw, deposit64(i2, 32, 32, i1));
- flush_idcache_range(jmp_rx, jmp_rw, 8);
-}
diff --git a/tcg/sparc64/tcg-target.h b/tcg/sparc64/tcg-target.h
index 0044ac8..1d6a5c8 100644
--- a/tcg/sparc64/tcg-target.h
+++ b/tcg/sparc64/tcg-target.h
@@ -111,7 +111,6 @@
#define TCG_TARGET_HAS_muls2_i32 1
#define TCG_TARGET_HAS_muluh_i32 0
#define TCG_TARGET_HAS_mulsh_i32 0
-#define TCG_TARGET_HAS_direct_jump 1
#define TCG_TARGET_HAS_qemu_st8_i32 0
#define TCG_TARGET_HAS_extrl_i64_i32 1
@@ -154,9 +153,6 @@
#define TCG_TARGET_DEFAULT_MO (0)
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
-
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
-
#define TCG_TARGET_NEED_POOL_LABELS
#endif
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
index cd1cd4e..9fa9f1b 100644
--- a/tcg/tcg-op.c
+++ b/tcg/tcg-op.c
@@ -86,7 +86,7 @@
void tcg_gen_mb(TCGBar mb_type)
{
- if (tcg_ctx->tb_cflags & CF_PARALLEL) {
+ if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) {
tcg_gen_op1(INDEX_op_mb, mb_type);
}
}
@@ -2782,7 +2782,7 @@
void tcg_gen_goto_tb(unsigned idx)
{
/* We tested CF_NO_GOTO_TB in translator_use_goto_tb. */
- tcg_debug_assert(!(tcg_ctx->tb_cflags & CF_NO_GOTO_TB));
+ tcg_debug_assert(!(tcg_ctx->gen_tb->cflags & CF_NO_GOTO_TB));
/* We only support two chained exits. */
tcg_debug_assert(idx <= TB_EXIT_IDXMAX);
#ifdef CONFIG_DEBUG_TCG
@@ -2798,7 +2798,7 @@
{
TCGv_ptr ptr;
- if (tcg_ctx->tb_cflags & CF_NO_GOTO_PTR) {
+ if (tcg_ctx->gen_tb->cflags & CF_NO_GOTO_PTR) {
tcg_gen_exit_tb(NULL, 0);
return;
}
@@ -3165,7 +3165,7 @@
{
memop = tcg_canonicalize_memop(memop, 0, 0);
- if (!(tcg_ctx->tb_cflags & CF_PARALLEL)) {
+ if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
TCGv_i32 t1 = tcg_temp_new_i32();
TCGv_i32 t2 = tcg_temp_new_i32();
@@ -3203,7 +3203,7 @@
{
memop = tcg_canonicalize_memop(memop, 1, 0);
- if (!(tcg_ctx->tb_cflags & CF_PARALLEL)) {
+ if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
@@ -3364,7 +3364,7 @@
void tcg_gen_atomic_##NAME##_i32 \
(TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, MemOp memop) \
{ \
- if (tcg_ctx->tb_cflags & CF_PARALLEL) { \
+ if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { \
do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \
} else { \
do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW, \
@@ -3374,7 +3374,7 @@
void tcg_gen_atomic_##NAME##_i64 \
(TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, MemOp memop) \
{ \
- if (tcg_ctx->tb_cflags & CF_PARALLEL) { \
+ if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { \
do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \
} else { \
do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW, \
diff --git a/tcg/tcg.c b/tcg/tcg.c
index da91779..d502327 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -61,6 +61,7 @@
#include "exec/log.h"
#include "tcg/tcg-ldst.h"
#include "tcg-internal.h"
+#include "accel/tcg/perf.h"
/* Forward declarations for functions declared in tcg-target.c.inc and
used here. */
@@ -103,6 +104,8 @@
static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
static void tcg_out_movi(TCGContext *s, TCGType type,
TCGReg ret, tcg_target_long arg);
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
+static void tcg_out_goto_tb(TCGContext *s, int which);
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS]);
@@ -308,7 +311,25 @@
* We will check for overflow at the end of the opcode loop in
* tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
*/
- s->tb_jmp_reset_offset[which] = tcg_current_code_size(s);
+ s->gen_tb->jmp_reset_offset[which] = tcg_current_code_size(s);
+}
+
+static void G_GNUC_UNUSED set_jmp_insn_offset(TCGContext *s, int which)
+{
+ /*
+ * We will check for overflow at the end of the opcode loop in
+ * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
+ */
+ s->gen_tb->jmp_insn_offset[which] = tcg_current_code_size(s);
+}
+
+static uintptr_t G_GNUC_UNUSED get_jmp_target_addr(TCGContext *s, int which)
+{
+ /*
+ * Return the read-execute version of the pointer, for the benefit
+ * of any pc-relative addressing mode.
+ */
+ return (uintptr_t)tcg_splitwx_to_rx(&s->gen_tb->jmp_target_addr[which]);
}
/* Signal overflow, starting over with fewer guest insns. */
@@ -913,6 +934,7 @@
#endif
prologue_size = tcg_current_code_size(s);
+ perf_report_prologue(s->code_gen_ptr, prologue_size);
#ifndef CONFIG_TCG_INTERPRETER
flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s->code_buf),
@@ -4643,16 +4665,10 @@
#endif
/* Initialize goto_tb jump offsets. */
- tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
- tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
- tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
- if (TCG_TARGET_HAS_direct_jump) {
- tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
- tcg_ctx->tb_jmp_target_addr = NULL;
- } else {
- tcg_ctx->tb_jmp_insn_offset = NULL;
- tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
- }
+ tb->jmp_reset_offset[0] = TB_JMP_OFFSET_INVALID;
+ tb->jmp_reset_offset[1] = TB_JMP_OFFSET_INVALID;
+ tb->jmp_insn_offset[0] = TB_JMP_OFFSET_INVALID;
+ tb->jmp_insn_offset[1] = TB_JMP_OFFSET_INVALID;
tcg_reg_alloc_start(s);
@@ -4716,6 +4732,12 @@
case INDEX_op_call:
tcg_reg_alloc_call(s, op);
break;
+ case INDEX_op_exit_tb:
+ tcg_out_exit_tb(s, op->args[0]);
+ break;
+ case INDEX_op_goto_tb:
+ tcg_out_goto_tb(s, op->args[0]);
+ break;
case INDEX_op_dup2_vec:
if (tcg_reg_alloc_dup2(s, op)) {
break;
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
index d36a7eb..bc45200 100644
--- a/tcg/tci/tcg-target.c.inc
+++ b/tcg/tci/tcg-target.c.inc
@@ -590,6 +590,24 @@
# define CASE_64(x)
#endif
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
+{
+ tcg_out_op_p(s, INDEX_op_exit_tb, (void *)arg);
+}
+
+static void tcg_out_goto_tb(TCGContext *s, int which)
+{
+ /* indirect jump method. */
+ tcg_out_op_p(s, INDEX_op_goto_tb, (void *)get_jmp_target_addr(s, which));
+ set_jmp_reset_offset(s, which);
+}
+
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
+{
+ /* Always indirect, nothing to do */
+}
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
@@ -597,17 +615,6 @@
TCGOpcode exts;
switch (opc) {
- case INDEX_op_exit_tb:
- tcg_out_op_p(s, opc, (void *)args[0]);
- break;
-
- case INDEX_op_goto_tb:
- tcg_debug_assert(s->tb_jmp_insn_offset == 0);
- /* indirect jump method. */
- tcg_out_op_p(s, opc, s->tb_jmp_target_addr + args[0]);
- set_jmp_reset_offset(s, args[0]);
- break;
-
case INDEX_op_goto_ptr:
tcg_out_op_r(s, opc, args[0]);
break;
@@ -779,6 +786,8 @@
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
case INDEX_op_mov_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
+ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
default:
tcg_abort();
}
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
index 94ec541..1414ab4 100644
--- a/tcg/tci/tcg-target.h
+++ b/tcg/tci/tcg-target.h
@@ -82,7 +82,6 @@
#define TCG_TARGET_HAS_muls2_i32 1
#define TCG_TARGET_HAS_muluh_i32 0
#define TCG_TARGET_HAS_mulsh_i32 0
-#define TCG_TARGET_HAS_direct_jump 0
#define TCG_TARGET_HAS_qemu_st8_i32 0
#if TCG_TARGET_REG_BITS == 64
@@ -176,7 +175,4 @@
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
-/* not defined -- call should be eliminated at compile time */
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
-
#endif /* TCG_TARGET_H */
diff --git a/tests/qtest/e1000e-test.c b/tests/qtest/e1000e-test.c
index 3fc9204..b63a4d3 100644
--- a/tests/qtest/e1000e-test.c
+++ b/tests/qtest/e1000e-test.c
@@ -1,4 +1,4 @@
- /*
+/*
* QTest testcase for e1000e NIC
*
* Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com)
diff --git a/tests/qtest/libqos/e1000e.c b/tests/qtest/libqos/e1000e.c
index 37c794b..28fb305 100644
--- a/tests/qtest/libqos/e1000e.c
+++ b/tests/qtest/libqos/e1000e.c
@@ -51,13 +51,13 @@
void e1000e_tx_ring_push(QE1000E *d, void *descr)
{
QE1000E_PCI *d_pci = container_of(d, QE1000E_PCI, e1000e);
- uint32_t tail = e1000e_macreg_read(d, E1000E_TDT);
- uint32_t len = e1000e_macreg_read(d, E1000E_TDLEN) / E1000_RING_DESC_LEN;
+ uint32_t tail = e1000e_macreg_read(d, E1000_TDT);
+ uint32_t len = e1000e_macreg_read(d, E1000_TDLEN) / E1000_RING_DESC_LEN;
qtest_memwrite(d_pci->pci_dev.bus->qts,
d->tx_ring + tail * E1000_RING_DESC_LEN,
descr, E1000_RING_DESC_LEN);
- e1000e_macreg_write(d, E1000E_TDT, (tail + 1) % len);
+ e1000e_macreg_write(d, E1000_TDT, (tail + 1) % len);
/* Read WB data for the packet transmitted */
qtest_memread(d_pci->pci_dev.bus->qts,
@@ -68,13 +68,13 @@
void e1000e_rx_ring_push(QE1000E *d, void *descr)
{
QE1000E_PCI *d_pci = container_of(d, QE1000E_PCI, e1000e);
- uint32_t tail = e1000e_macreg_read(d, E1000E_RDT);
- uint32_t len = e1000e_macreg_read(d, E1000E_RDLEN) / E1000_RING_DESC_LEN;
+ uint32_t tail = e1000e_macreg_read(d, E1000_RDT);
+ uint32_t len = e1000e_macreg_read(d, E1000_RDLEN) / E1000_RING_DESC_LEN;
qtest_memwrite(d_pci->pci_dev.bus->qts,
d->rx_ring + tail * E1000_RING_DESC_LEN,
descr, E1000_RING_DESC_LEN);
- e1000e_macreg_write(d, E1000E_RDT, (tail + 1) % len);
+ e1000e_macreg_write(d, E1000_RDT, (tail + 1) % len);
/* Read WB data for the packet received */
qtest_memread(d_pci->pci_dev.bus->qts,
@@ -145,8 +145,8 @@
(uint32_t) d->e1000e.tx_ring);
e1000e_macreg_write(&d->e1000e, E1000_TDBAH,
(uint32_t) (d->e1000e.tx_ring >> 32));
- e1000e_macreg_write(&d->e1000e, E1000E_TDLEN, E1000E_RING_LEN);
- e1000e_macreg_write(&d->e1000e, E1000E_TDT, 0);
+ e1000e_macreg_write(&d->e1000e, E1000_TDLEN, E1000E_RING_LEN);
+ e1000e_macreg_write(&d->e1000e, E1000_TDT, 0);
e1000e_macreg_write(&d->e1000e, E1000_TDH, 0);
/* Enable transmit */
@@ -156,8 +156,8 @@
(uint32_t)d->e1000e.rx_ring);
e1000e_macreg_write(&d->e1000e, E1000_RDBAH,
(uint32_t)(d->e1000e.rx_ring >> 32));
- e1000e_macreg_write(&d->e1000e, E1000E_RDLEN, E1000E_RING_LEN);
- e1000e_macreg_write(&d->e1000e, E1000E_RDT, 0);
+ e1000e_macreg_write(&d->e1000e, E1000_RDLEN, E1000E_RING_LEN);
+ e1000e_macreg_write(&d->e1000e, E1000_RDT, 0);
e1000e_macreg_write(&d->e1000e, E1000_RDH, 0);
/* Enable receive */
@@ -222,8 +222,10 @@
.device_id = E1000_DEV_ID_82574L,
};
- /* FIXME: every test using this node needs to setup a -netdev socket,id=hs0
- * otherwise QEMU is not going to start */
+ /*
+ * FIXME: every test using this node needs to setup a -netdev socket,id=hs0
+ * otherwise QEMU is not going to start
+ */
QOSGraphEdgeOptions opts = {
.extra_device_opts = "netdev=hs0",
};
diff --git a/tests/qtest/libqos/e1000e.h b/tests/qtest/libqos/e1000e.h
index 3bf285a..091ce13 100644
--- a/tests/qtest/libqos/e1000e.h
+++ b/tests/qtest/libqos/e1000e.h
@@ -25,11 +25,6 @@
#define E1000E_RX0_MSG_ID (0)
#define E1000E_TX0_MSG_ID (1)
-#define E1000E_TDLEN (0x3808)
-#define E1000E_TDT (0x3818)
-#define E1000E_RDLEN (0x2808)
-#define E1000E_RDT (0x2818)
-
typedef struct QE1000E QE1000E;
typedef struct QE1000E_PCI QE1000E_PCI;
diff --git a/tests/qtest/libqtest.c b/tests/qtest/libqtest.c
index 5cb38f9..6b2216c 100644
--- a/tests/qtest/libqtest.c
+++ b/tests/qtest/libqtest.c
@@ -49,6 +49,8 @@
# define DEV_NULL "nul"
#endif
+#define WAITPID_TIMEOUT 30
+
typedef void (*QTestSendFn)(QTestState *s, const char *buf);
typedef void (*ExternalSendFn)(void *s, const char *buf);
typedef GString* (*QTestRecvFn)(QTestState *);
@@ -202,8 +204,24 @@
{
#ifndef _WIN32
pid_t pid;
+ uint64_t end;
- pid = RETRY_ON_EINTR(waitpid(s->qemu_pid, &s->wstatus, 0));
+ /* poll for a while until sending SIGKILL */
+ end = g_get_monotonic_time() + WAITPID_TIMEOUT * G_TIME_SPAN_SECOND;
+
+ do {
+ pid = waitpid(s->qemu_pid, &s->wstatus, WNOHANG);
+ if (pid != 0) {
+ break;
+ }
+ g_usleep(100 * 1000);
+ } while (g_get_monotonic_time() < end);
+
+ if (pid == 0) {
+ kill(s->qemu_pid, SIGKILL);
+ pid = RETRY_ON_EINTR(waitpid(s->qemu_pid, &s->wstatus, 0));
+ }
+
assert(pid == s->qemu_pid);
#else
DWORD ret;
diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build
index f0ebb5f..1af63f8 100644
--- a/tests/qtest/meson.build
+++ b/tests/qtest/meson.build
@@ -207,11 +207,11 @@
(config_all_devices.has_key('CONFIG_TPM_TIS_SYSBUS') ? ['tpm-tis-device-test'] : []) + \
(config_all_devices.has_key('CONFIG_TPM_TIS_SYSBUS') ? ['tpm-tis-device-swtpm-test'] : []) + \
(config_all_devices.has_key('CONFIG_XLNX_ZYNQMP_ARM') ? ['xlnx-can-test', 'fuzz-xlnx-dp-test'] : []) + \
+ (config_all_devices.has_key('CONFIG_RASPI') ? ['bcm2835-dma-test'] : []) + \
['arm-cpu-features',
'numa-test',
'boot-serial-test',
- 'migration-test',
- 'bcm2835-dma-test']
+ 'migration-test']
qtests_s390x = \
(slirp.found() ? ['pxe-test', 'test-netfilter'] : []) + \
diff --git a/tests/qtest/qom-test.c b/tests/qtest/qom-test.c
index 13510bc..d380261 100644
--- a/tests/qtest/qom-test.c
+++ b/tests/qtest/qom-test.c
@@ -14,6 +14,8 @@
#include "qemu/cutils.h"
#include "libqtest.h"
+static bool verbose;
+
static void test_properties(QTestState *qts, const char *path, bool recurse)
{
char *child_path;
@@ -49,7 +51,9 @@
}
} else {
const char *prop = qdict_get_str(tuple, "name");
- g_test_message("-> %s", prop);
+ if (verbose) {
+ g_test_message("-> %s", prop);
+ }
tmp = qtest_qmp(qts,
"{ 'execute': 'qom-get',"
" 'arguments': { 'path': %s, 'property': %s } }",
@@ -103,6 +107,12 @@
int main(int argc, char **argv)
{
+ char *v_env = getenv("V");
+
+ if (v_env && atoi(v_env) >= 2) {
+ verbose = true;
+ }
+
g_test_init(&argc, &argv, NULL);
qtest_cb_for_every_machine(add_machine_test_case, g_test_quick());
diff --git a/tests/qtest/tpm-emu.c b/tests/qtest/tpm-emu.c
index 2994d1c..73e0000 100644
--- a/tests/qtest/tpm-emu.c
+++ b/tests/qtest/tpm-emu.c
@@ -36,11 +36,18 @@
g_mutex_unlock(&s->data_mutex);
}
+static void tpm_emu_close_ioc(void *ioc)
+{
+ qio_channel_close(ioc, NULL);
+}
+
static void *tpm_emu_tpm_thread(void *data)
{
TPMTestState *s = data;
QIOChannel *ioc = s->tpm_ioc;
+ qtest_add_abrt_handler(tpm_emu_close_ioc, ioc);
+
s->tpm_msg = g_new(struct tpm_hdr, 1);
while (true) {
int minhlen = sizeof(s->tpm_msg->tag) + sizeof(s->tpm_msg->len);
@@ -77,6 +84,7 @@
&error_abort);
}
+ qtest_remove_abrt_handler(ioc);
g_free(s->tpm_msg);
s->tpm_msg = NULL;
object_unref(OBJECT(s->tpm_ioc));
@@ -99,6 +107,7 @@
qio_channel_wait(QIO_CHANNEL(lioc), G_IO_IN);
ioc = QIO_CHANNEL(qio_channel_socket_accept(lioc, &error_abort));
g_assert(ioc);
+ qtest_add_abrt_handler(tpm_emu_close_ioc, ioc);
{
uint32_t cmd = 0;
@@ -190,6 +199,7 @@
}
}
+ qtest_remove_abrt_handler(ioc);
object_unref(OBJECT(ioc));
object_unref(OBJECT(lioc));
return NULL;
diff --git a/tests/vm/haiku.x86_64 b/tests/vm/haiku.x86_64
index 29668bc..71cf75a 100755
--- a/tests/vm/haiku.x86_64
+++ b/tests/vm/haiku.x86_64
@@ -48,8 +48,8 @@
name = "haiku"
arch = "x86_64"
- link = "https://app.vagrantup.com/haiku-os/boxes/r1beta3-x86_64/versions/20220216/providers/libvirt.box"
- csum = "e67d4aacbcc687013d5cc91990ddd86cc5d70a5d28432ae2691944f8ce5d5041"
+ link = "https://app.vagrantup.com/haiku-os/boxes/r1beta4-x86_64/versions/20230114/providers/libvirt.box"
+ csum = "6e72a2a470e03dbc3c5e808664e057bb4022b390dca88e4c7da6188f26f6a3c9"
poweroff = "shutdown"
@@ -80,13 +80,12 @@
"ninja",
]
- # https://dev.haiku-os.org/ticket/16512 virtio disk1 shows up as 0 (reversed order)
BUILD_SCRIPT = """
set -e;
rm -rf /tmp/qemu-test.*
cd $(mktemp -d /tmp/qemu-test.XXXXXX);
mkdir src build; cd src;
- tar -xf /dev/disk/virtual/virtio_block/0/raw;
+ tar -xf /dev/disk/virtual/virtio_block/1/raw;
mkdir -p /usr/bin
ln -s /boot/system/bin/env /usr/bin/env
cd ../build
diff --git a/util/bufferiszero.c b/util/bufferiszero.c
index ec3cd4c..1790ded 100644
--- a/util/bufferiszero.c
+++ b/util/bufferiszero.c
@@ -64,18 +64,11 @@
}
#if defined(CONFIG_AVX512F_OPT) || defined(CONFIG_AVX2_OPT) || defined(__SSE2__)
-/* Do not use push_options pragmas unnecessarily, because clang
- * does not support them.
- */
-#if defined(CONFIG_AVX512F_OPT) || defined(CONFIG_AVX2_OPT)
-#pragma GCC push_options
-#pragma GCC target("sse2")
-#endif
-#include <emmintrin.h>
+#include <immintrin.h>
/* Note that each of these vectorized functions require len >= 64. */
-static bool
+static bool __attribute__((target("sse2")))
buffer_zero_sse2(const void *buf, size_t len)
{
__m128i t = _mm_loadu_si128(buf);
@@ -104,20 +97,9 @@
return _mm_movemask_epi8(_mm_cmpeq_epi8(t, zero)) == 0xFFFF;
}
-#if defined(CONFIG_AVX512F_OPT) || defined(CONFIG_AVX2_OPT)
-#pragma GCC pop_options
-#endif
#ifdef CONFIG_AVX2_OPT
-/* Note that due to restrictions/bugs wrt __builtin functions in gcc <= 4.8,
- * the includes have to be within the corresponding push_options region, and
- * therefore the regions themselves have to be ordered with increasing ISA.
- */
-#pragma GCC push_options
-#pragma GCC target("sse4")
-#include <smmintrin.h>
-
-static bool
+static bool __attribute__((target("sse4")))
buffer_zero_sse4(const void *buf, size_t len)
{
__m128i t = _mm_loadu_si128(buf);
@@ -145,12 +127,7 @@
return _mm_testz_si128(t, t);
}
-#pragma GCC pop_options
-#pragma GCC push_options
-#pragma GCC target("avx2")
-#include <immintrin.h>
-
-static bool
+static bool __attribute__((target("avx2")))
buffer_zero_avx2(const void *buf, size_t len)
{
/* Begin with an unaligned head of 32 bytes. */
@@ -176,15 +153,10 @@
return _mm256_testz_si256(t, t);
}
-#pragma GCC pop_options
#endif /* CONFIG_AVX2_OPT */
#ifdef CONFIG_AVX512F_OPT
-#pragma GCC push_options
-#pragma GCC target("avx512f")
-#include <immintrin.h>
-
-static bool
+static bool __attribute__((target("avx512f")))
buffer_zero_avx512(const void *buf, size_t len)
{
/* Begin with an unaligned head of 64 bytes. */
@@ -210,8 +182,7 @@
return !_mm512_test_epi64_mask(t, t);
}
-#pragma GCC pop_options
-#endif
+#endif /* CONFIG_AVX512F_OPT */
/* Note that for test_buffer_is_zero_next_accel, the most preferred