Merge tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu into staging
virtio,pci: features, cleanups
vdpa:
shadow vq vlan support
net migration with cvq
cxl:
support emulating 4 HDM decoders
serial number extended capability
virtio:
hared dma-buf
Fixes, cleanups all over the place.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
* tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu: (53 commits)
libvhost-user: handle shared_object msg
vhost-user: add shared_object msg
hw/display: introduce virtio-dmabuf
util/uuid: add a hash function
virtio: remove unused next argument from virtqueue_split_read_next_desc()
virtio: remove unnecessary thread fence while reading next descriptor
virtio: use shadow_avail_idx while checking number of heads
libvhost-user.c: add assertion to vu_message_read_default
pcie_sriov: unregister_vfs(): fix error path
hw/i386/pc: improve physical address space bound check for 32-bit x86 systems
amd_iommu: Fix APIC address check
vdpa net: follow VirtIO initialization properly at cvq isolation probing
vdpa net: stop probing if cannot set features
vdpa net: fix error message setting virtio status
hw/pci-bridge/cxl-upstream: Add serial number extended capability support
hw/cxl: Support 4 HDM decoders at all levels of topology
hw/cxl: Fix and use same calculation for HDM decoder block size everywhere
hw/cxl: Add utility functions decoder interleave ways and target count.
hw/cxl: Push cxl_decoder_count_enc() and cxl_decode_ig() into .c
vdpa net: zero vhost_vdpa iova_tree pointer at cleanup
...
Conflicts:
hw/core/machine.c
Context conflict with commit 314e0a84cd5d ("hw/core: remove needless
includes") because it removed an adjacent #include.
diff --git a/MAINTAINERS b/MAINTAINERS
index 5e27ce3..ea91f9e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -139,8 +139,9 @@
S: Maintained
F: softmmu/cpus.c
F: softmmu/watchpoint.c
-F: cpus-common.c
-F: page-vary.c
+F: cpu-common.c
+F: cpu-target.c
+F: page-vary-target.c
F: page-vary-common.c
F: accel/tcg/
F: accel/stubs/tcg-stub.c
@@ -1766,7 +1767,6 @@
R: Philippe Mathieu-Daudé <philmd@linaro.org>
R: Yanan Wang <wangyanan55@huawei.com>
S: Supported
-F: cpu.c
F: hw/core/cpu.c
F: hw/core/machine-qmp-cmds.c
F: hw/core/machine.c
@@ -2913,7 +2913,6 @@
F: softmmu/cpus.c
F: softmmu/cpu-throttle.c
F: softmmu/cpu-timers.c
-F: softmmu/icount.c
F: softmmu/runstate*
F: qapi/run-state.json
@@ -3177,6 +3176,7 @@
Tracing
M: Stefan Hajnoczi <stefanha@redhat.com>
+R: Mads Ynddal <mads@ynddal.dk>
S: Maintained
F: trace/
F: trace-events
@@ -3189,6 +3189,11 @@
F: docs/devel/tracing.rst
T: git https://github.com/stefanha/qemu.git tracing
+Simpletrace
+M: Mads Ynddal <mads@ynddal.dk>
+S: Maintained
+F: scripts/simpletrace.py
+
TPM
M: Stefan Berger <stefanb@linux.ibm.com>
S: Maintained
@@ -3208,7 +3213,8 @@
Migration
M: Juan Quintela <quintela@redhat.com>
-R: Peter Xu <peterx@redhat.com>
+M: Peter Xu <peterx@redhat.com>
+M: Fabiano Rosas <farosas@suse.de>
R: Leonardo Bras <leobras@redhat.com>
S: Maintained
F: hw/core/vmstate-if.c
@@ -3223,6 +3229,15 @@
F: qapi/migration.json
F: tests/migration/
F: util/userfaultfd.c
+X: migration/rdma*
+
+RDMA Migration
+M: Juan Quintela <quintela@redhat.com>
+R: Li Zhijian <lizhijian@fujitsu.com>
+R: Peter Xu <peterx@redhat.com>
+R: Leonardo Bras <leobras@redhat.com>
+S: Odd Fixes
+F: migration/rdma*
Migration dirty limit and dirty page rate
M: Hyman Huang <yong.huang@smartx.com>
diff --git a/accel/accel-common.c b/accel/accel-target.c
similarity index 85%
rename from accel/accel-common.c
rename to accel/accel-target.c
index df72cc9..11d74b4 100644
--- a/accel/accel-common.c
+++ b/accel/accel-target.c
@@ -119,16 +119,37 @@
}
}
-bool accel_cpu_realizefn(CPUState *cpu, Error **errp)
+bool accel_cpu_common_realize(CPUState *cpu, Error **errp)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
+ AccelState *accel = current_accel();
+ AccelClass *acc = ACCEL_GET_CLASS(accel);
- if (cc->accel_cpu && cc->accel_cpu->cpu_realizefn) {
- return cc->accel_cpu->cpu_realizefn(cpu, errp);
+ /* target specific realization */
+ if (cc->accel_cpu && cc->accel_cpu->cpu_target_realize
+ && !cc->accel_cpu->cpu_target_realize(cpu, errp)) {
+ return false;
}
+
+ /* generic realization */
+ if (acc->cpu_common_realize && !acc->cpu_common_realize(cpu, errp)) {
+ return false;
+ }
+
return true;
}
+void accel_cpu_common_unrealize(CPUState *cpu)
+{
+ AccelState *accel = current_accel();
+ AccelClass *acc = ACCEL_GET_CLASS(accel);
+
+ /* generic unrealization */
+ if (acc->cpu_common_unrealize) {
+ acc->cpu_common_unrealize(cpu);
+ }
+}
+
int accel_supported_gdbstub_sstep_flags(void)
{
AccelState *accel = current_accel();
diff --git a/accel/dummy-cpus.c b/accel/dummy-cpus.c
index d6a1b8d..b75c919 100644
--- a/accel/dummy-cpus.c
+++ b/accel/dummy-cpus.c
@@ -27,7 +27,7 @@
qemu_mutex_lock_iothread();
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
- cpu->can_do_io = 1;
+ cpu->neg.can_do_io = true;
current_cpu = cpu;
#ifndef _WIN32
diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c
index 3c94c79..abe7adf 100644
--- a/accel/hvf/hvf-accel-ops.c
+++ b/accel/hvf/hvf-accel-ops.c
@@ -428,7 +428,7 @@
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
- cpu->can_do_io = 1;
+ cpu->neg.can_do_io = true;
current_cpu = cpu;
hvf_init_vcpu(cpu);
diff --git a/accel/kvm/kvm-accel-ops.c b/accel/kvm/kvm-accel-ops.c
index 457eafa..6195150 100644
--- a/accel/kvm/kvm-accel-ops.c
+++ b/accel/kvm/kvm-accel-ops.c
@@ -36,7 +36,7 @@
qemu_mutex_lock_iothread();
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
- cpu->can_do_io = 1;
+ cpu->neg.can_do_io = true;
current_cpu = cpu;
r = kvm_init_vcpu(cpu, &error_fatal);
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index ff1578b..72e1d11 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -2851,7 +2851,13 @@
static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
{
if (!cpu->vcpu_dirty) {
- kvm_arch_get_registers(cpu);
+ int ret = kvm_arch_get_registers(cpu);
+ if (ret) {
+ error_report("Failed to get registers: %s", strerror(-ret));
+ cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
+ vm_stop(RUN_STATE_INTERNAL_ERROR);
+ }
+
cpu->vcpu_dirty = true;
}
}
@@ -2865,7 +2871,13 @@
static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
{
- kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
+ int ret = kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
+ if (ret) {
+ error_report("Failed to put registers after reset: %s", strerror(-ret));
+ cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
+ vm_stop(RUN_STATE_INTERNAL_ERROR);
+ }
+
cpu->vcpu_dirty = false;
}
@@ -2876,7 +2888,12 @@
static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
{
- kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
+ int ret = kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
+ if (ret) {
+ error_report("Failed to put registers after init: %s", strerror(-ret));
+ exit(1);
+ }
+
cpu->vcpu_dirty = false;
}
@@ -2969,7 +2986,14 @@
MemTxAttrs attrs;
if (cpu->vcpu_dirty) {
- kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
+ ret = kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
+ if (ret) {
+ error_report("Failed to put registers after init: %s",
+ strerror(-ret));
+ ret = -1;
+ break;
+ }
+
cpu->vcpu_dirty = false;
}
diff --git a/accel/meson.build b/accel/meson.build
index 638a9a0..fda3157 100644
--- a/accel/meson.build
+++ b/accel/meson.build
@@ -1,5 +1,5 @@
-specific_ss.add(files('accel-common.c', 'accel-blocker.c'))
-system_ss.add(files('accel-softmmu.c'))
+specific_ss.add(files('accel-target.c'))
+system_ss.add(files('accel-softmmu.c', 'accel-blocker.c'))
user_ss.add(files('accel-user.c'))
subdir('tcg')
diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h
index 84c08b1..1dc2151 100644
--- a/accel/tcg/atomic_template.h
+++ b/accel/tcg/atomic_template.h
@@ -73,7 +73,8 @@
ABI_TYPE cmpv, ABI_TYPE newv,
MemOpIdx oi, uintptr_t retaddr)
{
- DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
+ DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
+ DATA_SIZE, retaddr);
DATA_TYPE ret;
#if DATA_SIZE == 16
@@ -90,7 +91,8 @@
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
- DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
+ DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
+ DATA_SIZE, retaddr);
DATA_TYPE ret;
ret = qatomic_xchg__nocheck(haddr, val);
@@ -104,7 +106,7 @@
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
{ \
DATA_TYPE *haddr, ret; \
- haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
+ haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
ret = qatomic_##X(haddr, val); \
ATOMIC_MMU_CLEANUP; \
atomic_trace_rmw_post(env, addr, oi); \
@@ -135,7 +137,7 @@
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
{ \
XDATA_TYPE *haddr, cmp, old, new, val = xval; \
- haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
+ haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
smp_mb(); \
cmp = qatomic_read__nocheck(haddr); \
do { \
@@ -176,7 +178,8 @@
ABI_TYPE cmpv, ABI_TYPE newv,
MemOpIdx oi, uintptr_t retaddr)
{
- DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
+ DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
+ DATA_SIZE, retaddr);
DATA_TYPE ret;
#if DATA_SIZE == 16
@@ -193,7 +196,8 @@
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
- DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
+ DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
+ DATA_SIZE, retaddr);
ABI_TYPE ret;
ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
@@ -207,7 +211,7 @@
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
{ \
DATA_TYPE *haddr, ret; \
- haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
+ haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
ret = qatomic_##X(haddr, BSWAP(val)); \
ATOMIC_MMU_CLEANUP; \
atomic_trace_rmw_post(env, addr, oi); \
@@ -235,7 +239,7 @@
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
{ \
XDATA_TYPE *haddr, ldo, ldn, old, new, val = xval; \
- haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
+ haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
smp_mb(); \
ldn = qatomic_read__nocheck(haddr); \
do { \
diff --git a/accel/tcg/cpu-exec-common.c b/accel/tcg/cpu-exec-common.c
index 7e35d7f..bc9b1a2 100644
--- a/accel/tcg/cpu-exec-common.c
+++ b/accel/tcg/cpu-exec-common.c
@@ -20,9 +20,8 @@
#include "qemu/osdep.h"
#include "sysemu/cpus.h"
#include "sysemu/tcg.h"
-#include "exec/exec-all.h"
#include "qemu/plugin.h"
-#include "internal.h"
+#include "internal-common.h"
bool tcg_allowed;
@@ -36,7 +35,7 @@
void cpu_loop_exit(CPUState *cpu)
{
/* Undo the setting in cpu_tb_exec. */
- cpu->can_do_io = 1;
+ cpu->neg.can_do_io = true;
/* Undo any setting in generated code. */
qemu_plugin_disable_mem_helpers(cpu);
siglongjmp(cpu->jmp_env, 1);
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index e2c494e..1a5bc90 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -42,7 +42,8 @@
#include "tb-jmp-cache.h"
#include "tb-hash.h"
#include "tb-context.h"
-#include "internal.h"
+#include "internal-common.h"
+#include "internal-target.h"
/* -icount align implementation. */
@@ -73,7 +74,7 @@
return;
}
- cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
+ cpu_icount = cpu->icount_extra + cpu->neg.icount_decr.u16.low;
sc->diff_clk += icount_to_ns(sc->last_cpu_icount - cpu_icount);
sc->last_cpu_icount = cpu_icount;
@@ -124,7 +125,7 @@
sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
sc->last_cpu_icount
- = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
+ = cpu->icount_extra + cpu->neg.icount_decr.u16.low;
if (sc->diff_clk < max_delay) {
max_delay = sc->diff_clk;
}
@@ -222,7 +223,7 @@
struct tb_desc desc;
uint32_t h;
- desc.env = cpu->env_ptr;
+ desc.env = cpu_env(cpu);
desc.cs_base = cs_base;
desc.flags = flags;
desc.cflags = cflags;
@@ -444,7 +445,7 @@
static inline TranslationBlock * QEMU_DISABLE_CFI
cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
{
- CPUArchState *env = cpu->env_ptr;
+ CPUArchState *env = cpu_env(cpu);
uintptr_t ret;
TranslationBlock *last_tb;
const void *tb_ptr = itb->tc.ptr;
@@ -455,7 +456,7 @@
qemu_thread_jit_execute();
ret = tcg_qemu_tb_exec(env, tb_ptr);
- cpu->can_do_io = 1;
+ cpu->neg.can_do_io = true;
qemu_plugin_disable_mem_helpers(cpu);
/*
* TODO: Delay swapping back to the read-write region of the TB
@@ -565,7 +566,7 @@
void cpu_exec_step_atomic(CPUState *cpu)
{
- CPUArchState *env = cpu->env_ptr;
+ CPUArchState *env = cpu_env(cpu);
TranslationBlock *tb;
vaddr pc;
uint64_t cs_base;
@@ -717,10 +718,10 @@
if (cpu->exception_index < 0) {
#ifndef CONFIG_USER_ONLY
if (replay_has_exception()
- && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
+ && cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0) {
/* Execute just one insn to trigger exception pending in the log */
cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT)
- | CF_NOIRQ | 1;
+ | CF_LAST_IO | CF_NOIRQ | 1;
}
#endif
return false;
@@ -807,7 +808,7 @@
* Ensure zeroing happens before reading cpu->exit_request or
* cpu->interrupt_request (see also smp_wmb in cpu_exit())
*/
- qatomic_set_mb(&cpu_neg(cpu)->icount_decr.u16.high, 0);
+ qatomic_set_mb(&cpu->neg.icount_decr.u16.high, 0);
if (unlikely(qatomic_read(&cpu->interrupt_request))) {
int interrupt_request;
@@ -898,7 +899,7 @@
if (unlikely(qatomic_read(&cpu->exit_request))
|| (icount_enabled()
&& (cpu->cflags_next_tb == -1 || cpu->cflags_next_tb & CF_USE_ICOUNT)
- && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) {
+ && cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0)) {
qatomic_set(&cpu->exit_request, 0);
if (cpu->exception_index == -1) {
cpu->exception_index = EXCP_INTERRUPT;
@@ -923,7 +924,7 @@
}
*last_tb = NULL;
- insns_left = qatomic_read(&cpu_neg(cpu)->icount_decr.u32);
+ insns_left = qatomic_read(&cpu->neg.icount_decr.u32);
if (insns_left < 0) {
/* Something asked us to stop executing chained TBs; just
* continue round the main loop. Whatever requested the exit
@@ -942,7 +943,7 @@
icount_update(cpu);
/* Refill decrementer and continue execution. */
insns_left = MIN(0xffff, cpu->icount_budget);
- cpu_neg(cpu)->icount_decr.u16.low = insns_left;
+ cpu->neg.icount_decr.u16.low = insns_left;
cpu->icount_extra = cpu->icount_budget - insns_left;
/*
@@ -976,7 +977,7 @@
uint64_t cs_base;
uint32_t flags, cflags;
- cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags);
+ cpu_get_tb_cpu_state(cpu_env(cpu), &pc, &cs_base, &flags);
/*
* When requested, use an exact setting for cflags for the next
@@ -1088,7 +1089,7 @@
return ret;
}
-void tcg_exec_realizefn(CPUState *cpu, Error **errp)
+bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
{
static bool tcg_target_initialized;
CPUClass *cc = CPU_GET_CLASS(cpu);
@@ -1104,6 +1105,8 @@
tcg_iommu_init_notifier_list(cpu);
#endif /* !CONFIG_USER_ONLY */
/* qemu_plugin_vcpu_init_hook delayed until cpu_index assigned. */
+
+ return true;
}
/* undo the initializations in reverse order */
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 3270f65..b8c5e34 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -35,7 +35,8 @@
#include "exec/translate-all.h"
#include "trace.h"
#include "tb-hash.h"
-#include "internal.h"
+#include "internal-common.h"
+#include "internal-target.h"
#ifdef CONFIG_PLUGIN
#include "qemu/plugin-memory.h"
#endif
@@ -240,11 +241,11 @@
memset(desc->vtable, -1, sizeof(desc->vtable));
}
-static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx,
+static void tlb_flush_one_mmuidx_locked(CPUState *cpu, int mmu_idx,
int64_t now)
{
- CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
- CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
+ CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx];
+ CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx];
tlb_mmu_resize_locked(desc, fast, now);
tlb_mmu_flush_locked(desc, fast);
@@ -262,41 +263,39 @@
tlb_mmu_flush_locked(desc, fast);
}
-static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
+static inline void tlb_n_used_entries_inc(CPUState *cpu, uintptr_t mmu_idx)
{
- env_tlb(env)->d[mmu_idx].n_used_entries++;
+ cpu->neg.tlb.d[mmu_idx].n_used_entries++;
}
-static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
+static inline void tlb_n_used_entries_dec(CPUState *cpu, uintptr_t mmu_idx)
{
- env_tlb(env)->d[mmu_idx].n_used_entries--;
+ cpu->neg.tlb.d[mmu_idx].n_used_entries--;
}
void tlb_init(CPUState *cpu)
{
- CPUArchState *env = cpu->env_ptr;
int64_t now = get_clock_realtime();
int i;
- qemu_spin_init(&env_tlb(env)->c.lock);
+ qemu_spin_init(&cpu->neg.tlb.c.lock);
/* All tlbs are initialized flushed. */
- env_tlb(env)->c.dirty = 0;
+ cpu->neg.tlb.c.dirty = 0;
for (i = 0; i < NB_MMU_MODES; i++) {
- tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
+ tlb_mmu_init(&cpu->neg.tlb.d[i], &cpu->neg.tlb.f[i], now);
}
}
void tlb_destroy(CPUState *cpu)
{
- CPUArchState *env = cpu->env_ptr;
int i;
- qemu_spin_destroy(&env_tlb(env)->c.lock);
+ qemu_spin_destroy(&cpu->neg.tlb.c.lock);
for (i = 0; i < NB_MMU_MODES; i++) {
- CPUTLBDesc *desc = &env_tlb(env)->d[i];
- CPUTLBDescFast *fast = &env_tlb(env)->f[i];
+ CPUTLBDesc *desc = &cpu->neg.tlb.d[i];
+ CPUTLBDescFast *fast = &cpu->neg.tlb.f[i];
g_free(fast->table);
g_free(desc->fulltlb);
@@ -328,11 +327,9 @@
size_t full = 0, part = 0, elide = 0;
CPU_FOREACH(cpu) {
- CPUArchState *env = cpu->env_ptr;
-
- full += qatomic_read(&env_tlb(env)->c.full_flush_count);
- part += qatomic_read(&env_tlb(env)->c.part_flush_count);
- elide += qatomic_read(&env_tlb(env)->c.elide_flush_count);
+ full += qatomic_read(&cpu->neg.tlb.c.full_flush_count);
+ part += qatomic_read(&cpu->neg.tlb.c.part_flush_count);
+ elide += qatomic_read(&cpu->neg.tlb.c.elide_flush_count);
}
*pfull = full;
*ppart = part;
@@ -341,7 +338,6 @@
static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
{
- CPUArchState *env = cpu->env_ptr;
uint16_t asked = data.host_int;
uint16_t all_dirty, work, to_clean;
int64_t now = get_clock_realtime();
@@ -350,32 +346,32 @@
tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
- qemu_spin_lock(&env_tlb(env)->c.lock);
+ qemu_spin_lock(&cpu->neg.tlb.c.lock);
- all_dirty = env_tlb(env)->c.dirty;
+ all_dirty = cpu->neg.tlb.c.dirty;
to_clean = asked & all_dirty;
all_dirty &= ~to_clean;
- env_tlb(env)->c.dirty = all_dirty;
+ cpu->neg.tlb.c.dirty = all_dirty;
for (work = to_clean; work != 0; work &= work - 1) {
int mmu_idx = ctz32(work);
- tlb_flush_one_mmuidx_locked(env, mmu_idx, now);
+ tlb_flush_one_mmuidx_locked(cpu, mmu_idx, now);
}
- qemu_spin_unlock(&env_tlb(env)->c.lock);
+ qemu_spin_unlock(&cpu->neg.tlb.c.lock);
tcg_flush_jmp_cache(cpu);
if (to_clean == ALL_MMUIDX_BITS) {
- qatomic_set(&env_tlb(env)->c.full_flush_count,
- env_tlb(env)->c.full_flush_count + 1);
+ qatomic_set(&cpu->neg.tlb.c.full_flush_count,
+ cpu->neg.tlb.c.full_flush_count + 1);
} else {
- qatomic_set(&env_tlb(env)->c.part_flush_count,
- env_tlb(env)->c.part_flush_count + ctpop16(to_clean));
+ qatomic_set(&cpu->neg.tlb.c.part_flush_count,
+ cpu->neg.tlb.c.part_flush_count + ctpop16(to_clean));
if (to_clean != asked) {
- qatomic_set(&env_tlb(env)->c.elide_flush_count,
- env_tlb(env)->c.elide_flush_count +
- ctpop16(asked & ~to_clean));
+ qatomic_set(&cpu->neg.tlb.c.elide_flush_count,
+ cpu->neg.tlb.c.elide_flush_count +
+ ctpop16(asked & ~to_clean));
}
}
}
@@ -470,43 +466,43 @@
}
/* Called with tlb_c.lock held */
-static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx,
+static void tlb_flush_vtlb_page_mask_locked(CPUState *cpu, int mmu_idx,
vaddr page,
vaddr mask)
{
- CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
+ CPUTLBDesc *d = &cpu->neg.tlb.d[mmu_idx];
int k;
- assert_cpu_is_self(env_cpu(env));
+ assert_cpu_is_self(cpu);
for (k = 0; k < CPU_VTLB_SIZE; k++) {
if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) {
- tlb_n_used_entries_dec(env, mmu_idx);
+ tlb_n_used_entries_dec(cpu, mmu_idx);
}
}
}
-static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
+static inline void tlb_flush_vtlb_page_locked(CPUState *cpu, int mmu_idx,
vaddr page)
{
- tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1);
+ tlb_flush_vtlb_page_mask_locked(cpu, mmu_idx, page, -1);
}
-static void tlb_flush_page_locked(CPUArchState *env, int midx, vaddr page)
+static void tlb_flush_page_locked(CPUState *cpu, int midx, vaddr page)
{
- vaddr lp_addr = env_tlb(env)->d[midx].large_page_addr;
- vaddr lp_mask = env_tlb(env)->d[midx].large_page_mask;
+ vaddr lp_addr = cpu->neg.tlb.d[midx].large_page_addr;
+ vaddr lp_mask = cpu->neg.tlb.d[midx].large_page_mask;
/* Check if we need to flush due to large pages. */
if ((page & lp_mask) == lp_addr) {
tlb_debug("forcing full flush midx %d (%016"
VADDR_PRIx "/%016" VADDR_PRIx ")\n",
midx, lp_addr, lp_mask);
- tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
+ tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
} else {
- if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
- tlb_n_used_entries_dec(env, midx);
+ if (tlb_flush_entry_locked(tlb_entry(cpu, midx, page), page)) {
+ tlb_n_used_entries_dec(cpu, midx);
}
- tlb_flush_vtlb_page_locked(env, midx, page);
+ tlb_flush_vtlb_page_locked(cpu, midx, page);
}
}
@@ -523,20 +519,19 @@
vaddr addr,
uint16_t idxmap)
{
- CPUArchState *env = cpu->env_ptr;
int mmu_idx;
assert_cpu_is_self(cpu);
tlb_debug("page addr: %016" VADDR_PRIx " mmu_map:0x%x\n", addr, idxmap);
- qemu_spin_lock(&env_tlb(env)->c.lock);
+ qemu_spin_lock(&cpu->neg.tlb.c.lock);
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
if ((idxmap >> mmu_idx) & 1) {
- tlb_flush_page_locked(env, mmu_idx, addr);
+ tlb_flush_page_locked(cpu, mmu_idx, addr);
}
}
- qemu_spin_unlock(&env_tlb(env)->c.lock);
+ qemu_spin_unlock(&cpu->neg.tlb.c.lock);
/*
* Discard jump cache entries for any tb which might potentially
@@ -709,12 +704,12 @@
tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
}
-static void tlb_flush_range_locked(CPUArchState *env, int midx,
+static void tlb_flush_range_locked(CPUState *cpu, int midx,
vaddr addr, vaddr len,
unsigned bits)
{
- CPUTLBDesc *d = &env_tlb(env)->d[midx];
- CPUTLBDescFast *f = &env_tlb(env)->f[midx];
+ CPUTLBDesc *d = &cpu->neg.tlb.d[midx];
+ CPUTLBDescFast *f = &cpu->neg.tlb.f[midx];
vaddr mask = MAKE_64BIT_MASK(0, bits);
/*
@@ -731,7 +726,7 @@
tlb_debug("forcing full flush midx %d ("
"%016" VADDR_PRIx "/%016" VADDR_PRIx "+%016" VADDR_PRIx ")\n",
midx, addr, mask, len);
- tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
+ tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
return;
}
@@ -744,18 +739,18 @@
tlb_debug("forcing full flush midx %d ("
"%016" VADDR_PRIx "/%016" VADDR_PRIx ")\n",
midx, d->large_page_addr, d->large_page_mask);
- tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
+ tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
return;
}
for (vaddr i = 0; i < len; i += TARGET_PAGE_SIZE) {
vaddr page = addr + i;
- CPUTLBEntry *entry = tlb_entry(env, midx, page);
+ CPUTLBEntry *entry = tlb_entry(cpu, midx, page);
if (tlb_flush_entry_mask_locked(entry, page, mask)) {
- tlb_n_used_entries_dec(env, midx);
+ tlb_n_used_entries_dec(cpu, midx);
}
- tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
+ tlb_flush_vtlb_page_mask_locked(cpu, midx, page, mask);
}
}
@@ -769,7 +764,6 @@
static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
TLBFlushRangeData d)
{
- CPUArchState *env = cpu->env_ptr;
int mmu_idx;
assert_cpu_is_self(cpu);
@@ -777,13 +771,13 @@
tlb_debug("range: %016" VADDR_PRIx "/%u+%016" VADDR_PRIx " mmu_map:0x%x\n",
d.addr, d.bits, d.len, d.idxmap);
- qemu_spin_lock(&env_tlb(env)->c.lock);
+ qemu_spin_lock(&cpu->neg.tlb.c.lock);
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
if ((d.idxmap >> mmu_idx) & 1) {
- tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits);
+ tlb_flush_range_locked(cpu, mmu_idx, d.addr, d.len, d.bits);
}
}
- qemu_spin_unlock(&env_tlb(env)->c.lock);
+ qemu_spin_unlock(&cpu->neg.tlb.c.lock);
/*
* If the length is larger than the jump cache size, then it will take
@@ -1028,27 +1022,24 @@
*/
void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
{
- CPUArchState *env;
-
int mmu_idx;
- env = cpu->env_ptr;
- qemu_spin_lock(&env_tlb(env)->c.lock);
+ qemu_spin_lock(&cpu->neg.tlb.c.lock);
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
unsigned int i;
- unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
+ unsigned int n = tlb_n_entries(&cpu->neg.tlb.f[mmu_idx]);
for (i = 0; i < n; i++) {
- tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
+ tlb_reset_dirty_range_locked(&cpu->neg.tlb.f[mmu_idx].table[i],
start1, length);
}
for (i = 0; i < CPU_VTLB_SIZE; i++) {
- tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i],
+ tlb_reset_dirty_range_locked(&cpu->neg.tlb.d[mmu_idx].vtable[i],
start1, length);
}
}
- qemu_spin_unlock(&env_tlb(env)->c.lock);
+ qemu_spin_unlock(&cpu->neg.tlb.c.lock);
}
/* Called with tlb_c.lock held */
@@ -1064,32 +1055,31 @@
so that it is no longer dirty */
void tlb_set_dirty(CPUState *cpu, vaddr addr)
{
- CPUArchState *env = cpu->env_ptr;
int mmu_idx;
assert_cpu_is_self(cpu);
addr &= TARGET_PAGE_MASK;
- qemu_spin_lock(&env_tlb(env)->c.lock);
+ qemu_spin_lock(&cpu->neg.tlb.c.lock);
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
- tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, addr), addr);
+ tlb_set_dirty1_locked(tlb_entry(cpu, mmu_idx, addr), addr);
}
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
int k;
for (k = 0; k < CPU_VTLB_SIZE; k++) {
- tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], addr);
+ tlb_set_dirty1_locked(&cpu->neg.tlb.d[mmu_idx].vtable[k], addr);
}
}
- qemu_spin_unlock(&env_tlb(env)->c.lock);
+ qemu_spin_unlock(&cpu->neg.tlb.c.lock);
}
/* Our TLB does not support large pages, so remember the area covered by
large pages and trigger a full TLB flush if these are invalidated. */
-static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
+static void tlb_add_large_page(CPUState *cpu, int mmu_idx,
vaddr addr, uint64_t size)
{
- vaddr lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
+ vaddr lp_addr = cpu->neg.tlb.d[mmu_idx].large_page_addr;
vaddr lp_mask = ~(size - 1);
if (lp_addr == (vaddr)-1) {
@@ -1099,13 +1089,13 @@
/* Extend the existing region to include the new page.
This is a compromise between unnecessary flushes and
the cost of maintaining a full variable size TLB. */
- lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask;
+ lp_mask &= cpu->neg.tlb.d[mmu_idx].large_page_mask;
while (((lp_addr ^ addr) & lp_mask) != 0) {
lp_mask <<= 1;
}
}
- env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask;
- env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
+ cpu->neg.tlb.d[mmu_idx].large_page_addr = lp_addr & lp_mask;
+ cpu->neg.tlb.d[mmu_idx].large_page_mask = lp_mask;
}
static inline void tlb_set_compare(CPUTLBEntryFull *full, CPUTLBEntry *ent,
@@ -1137,8 +1127,7 @@
void tlb_set_page_full(CPUState *cpu, int mmu_idx,
vaddr addr, CPUTLBEntryFull *full)
{
- CPUArchState *env = cpu->env_ptr;
- CPUTLB *tlb = env_tlb(env);
+ CPUTLB *tlb = &cpu->neg.tlb;
CPUTLBDesc *desc = &tlb->d[mmu_idx];
MemoryRegionSection *section;
unsigned int index, read_flags, write_flags;
@@ -1155,7 +1144,7 @@
sz = TARGET_PAGE_SIZE;
} else {
sz = (hwaddr)1 << full->lg_page_size;
- tlb_add_large_page(env, mmu_idx, addr, sz);
+ tlb_add_large_page(cpu, mmu_idx, addr, sz);
}
addr_page = addr & TARGET_PAGE_MASK;
paddr_page = full->phys_addr & TARGET_PAGE_MASK;
@@ -1222,8 +1211,8 @@
wp_flags = cpu_watchpoint_address_matches(cpu, addr_page,
TARGET_PAGE_SIZE);
- index = tlb_index(env, mmu_idx, addr_page);
- te = tlb_entry(env, mmu_idx, addr_page);
+ index = tlb_index(cpu, mmu_idx, addr_page);
+ te = tlb_entry(cpu, mmu_idx, addr_page);
/*
* Hold the TLB lock for the rest of the function. We could acquire/release
@@ -1238,7 +1227,7 @@
tlb->c.dirty |= 1 << mmu_idx;
/* Make sure there's no cached translation for the new page. */
- tlb_flush_vtlb_page_locked(env, mmu_idx, addr_page);
+ tlb_flush_vtlb_page_locked(cpu, mmu_idx, addr_page);
/*
* Only evict the old entry to the victim tlb if it's for a
@@ -1251,7 +1240,7 @@
/* Evict the old entry into the victim tlb. */
copy_tlb_helper_locked(tv, te);
desc->vfulltlb[vidx] = desc->fulltlb[index];
- tlb_n_used_entries_dec(env, mmu_idx);
+ tlb_n_used_entries_dec(cpu, mmu_idx);
}
/* refill the tlb */
@@ -1296,7 +1285,7 @@
MMU_DATA_STORE, prot & PAGE_WRITE);
copy_tlb_helper_locked(te, &tn);
- tlb_n_used_entries_inc(env, mmu_idx);
+ tlb_n_used_entries_inc(cpu, mmu_idx);
qemu_spin_unlock(&tlb->c.lock);
}
@@ -1351,17 +1340,16 @@
}
static MemoryRegionSection *
-io_prepare(hwaddr *out_offset, CPUArchState *env, hwaddr xlat,
+io_prepare(hwaddr *out_offset, CPUState *cpu, hwaddr xlat,
MemTxAttrs attrs, vaddr addr, uintptr_t retaddr)
{
- CPUState *cpu = env_cpu(env);
MemoryRegionSection *section;
hwaddr mr_offset;
section = iotlb_to_section(cpu, xlat, attrs);
mr_offset = (xlat & TARGET_PAGE_MASK) + addr;
cpu->mem_io_pc = retaddr;
- if (!cpu->can_do_io) {
+ if (!cpu->neg.can_do_io) {
cpu_io_recompile(cpu, retaddr);
}
@@ -1369,49 +1357,44 @@
return section;
}
-static void io_failed(CPUArchState *env, CPUTLBEntryFull *full, vaddr addr,
+static void io_failed(CPUState *cpu, CPUTLBEntryFull *full, vaddr addr,
unsigned size, MMUAccessType access_type, int mmu_idx,
MemTxResult response, uintptr_t retaddr)
{
- CPUState *cpu = env_cpu(env);
+ if (!cpu->ignore_memory_transaction_failures
+ && cpu->cc->tcg_ops->do_transaction_failed) {
+ hwaddr physaddr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
- if (!cpu->ignore_memory_transaction_failures) {
- CPUClass *cc = CPU_GET_CLASS(cpu);
-
- if (cc->tcg_ops->do_transaction_failed) {
- hwaddr physaddr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
-
- cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
- access_type, mmu_idx,
- full->attrs, response, retaddr);
- }
+ cpu->cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
+ access_type, mmu_idx,
+ full->attrs, response, retaddr);
}
}
/* Return true if ADDR is present in the victim tlb, and has been copied
back to the main tlb. */
-static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
+static bool victim_tlb_hit(CPUState *cpu, size_t mmu_idx, size_t index,
MMUAccessType access_type, vaddr page)
{
size_t vidx;
- assert_cpu_is_self(env_cpu(env));
+ assert_cpu_is_self(cpu);
for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
- CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
+ CPUTLBEntry *vtlb = &cpu->neg.tlb.d[mmu_idx].vtable[vidx];
uint64_t cmp = tlb_read_idx(vtlb, access_type);
if (cmp == page) {
/* Found entry in victim tlb, swap tlb and iotlb. */
- CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index];
+ CPUTLBEntry tmptlb, *tlb = &cpu->neg.tlb.f[mmu_idx].table[index];
- qemu_spin_lock(&env_tlb(env)->c.lock);
+ qemu_spin_lock(&cpu->neg.tlb.c.lock);
copy_tlb_helper_locked(&tmptlb, tlb);
copy_tlb_helper_locked(tlb, vtlb);
copy_tlb_helper_locked(vtlb, &tmptlb);
- qemu_spin_unlock(&env_tlb(env)->c.lock);
+ qemu_spin_unlock(&cpu->neg.tlb.c.lock);
- CPUTLBEntryFull *f1 = &env_tlb(env)->d[mmu_idx].fulltlb[index];
- CPUTLBEntryFull *f2 = &env_tlb(env)->d[mmu_idx].vfulltlb[vidx];
+ CPUTLBEntryFull *f1 = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
+ CPUTLBEntryFull *f2 = &cpu->neg.tlb.d[mmu_idx].vfulltlb[vidx];
CPUTLBEntryFull tmpf;
tmpf = *f1; *f1 = *f2; *f2 = tmpf;
return true;
@@ -1444,26 +1427,24 @@
}
}
-static int probe_access_internal(CPUArchState *env, vaddr addr,
+static int probe_access_internal(CPUState *cpu, vaddr addr,
int fault_size, MMUAccessType access_type,
int mmu_idx, bool nonfault,
void **phost, CPUTLBEntryFull **pfull,
uintptr_t retaddr, bool check_mem_cbs)
{
- uintptr_t index = tlb_index(env, mmu_idx, addr);
- CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
+ uintptr_t index = tlb_index(cpu, mmu_idx, addr);
+ CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr);
uint64_t tlb_addr = tlb_read_idx(entry, access_type);
vaddr page_addr = addr & TARGET_PAGE_MASK;
int flags = TLB_FLAGS_MASK & ~TLB_FORCE_SLOW;
- bool force_mmio = check_mem_cbs && cpu_plugin_mem_cbs_enabled(env_cpu(env));
+ bool force_mmio = check_mem_cbs && cpu_plugin_mem_cbs_enabled(cpu);
CPUTLBEntryFull *full;
if (!tlb_hit_page(tlb_addr, page_addr)) {
- if (!victim_tlb_hit(env, mmu_idx, index, access_type, page_addr)) {
- CPUState *cs = env_cpu(env);
-
- if (!cs->cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
- mmu_idx, nonfault, retaddr)) {
+ if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, page_addr)) {
+ if (!cpu->cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type,
+ mmu_idx, nonfault, retaddr)) {
/* Non-faulting page table read failed. */
*phost = NULL;
*pfull = NULL;
@@ -1471,8 +1452,8 @@
}
/* TLB resize via tlb_fill may have moved the entry. */
- index = tlb_index(env, mmu_idx, addr);
- entry = tlb_entry(env, mmu_idx, addr);
+ index = tlb_index(cpu, mmu_idx, addr);
+ entry = tlb_entry(cpu, mmu_idx, addr);
/*
* With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
@@ -1485,7 +1466,7 @@
}
flags &= tlb_addr;
- *pfull = full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
+ *pfull = full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
flags |= full->slow_flags[access_type];
/* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
@@ -1506,8 +1487,9 @@
bool nonfault, void **phost, CPUTLBEntryFull **pfull,
uintptr_t retaddr)
{
- int flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
- nonfault, phost, pfull, retaddr, true);
+ int flags = probe_access_internal(env_cpu(env), addr, size, access_type,
+ mmu_idx, nonfault, phost, pfull, retaddr,
+ true);
/* Handle clean RAM pages. */
if (unlikely(flags & TLB_NOTDIRTY)) {
@@ -1529,8 +1511,8 @@
phost = phost ? phost : &discard_phost;
pfull = pfull ? pfull : &discard_tlb;
- int flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
- true, phost, pfull, 0, false);
+ int flags = probe_access_internal(env_cpu(env), addr, size, access_type,
+ mmu_idx, true, phost, pfull, 0, false);
/* Handle clean RAM pages. */
if (unlikely(flags & TLB_NOTDIRTY)) {
@@ -1550,8 +1532,9 @@
g_assert(-(addr | TARGET_PAGE_MASK) >= size);
- flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
- nonfault, phost, &full, retaddr, true);
+ flags = probe_access_internal(env_cpu(env), addr, size, access_type,
+ mmu_idx, nonfault, phost, &full, retaddr,
+ true);
/* Handle clean RAM pages. */
if (unlikely(flags & TLB_NOTDIRTY)) {
@@ -1571,8 +1554,9 @@
g_assert(-(addr | TARGET_PAGE_MASK) >= size);
- flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
- false, &host, &full, retaddr, true);
+ flags = probe_access_internal(env_cpu(env), addr, size, access_type,
+ mmu_idx, false, &host, &full, retaddr,
+ true);
/* Per the interface, size == 0 merely faults the access. */
if (size == 0) {
@@ -1604,7 +1588,7 @@
void *host;
int flags;
- flags = probe_access_internal(env, addr, 0, access_type,
+ flags = probe_access_internal(env_cpu(env), addr, 0, access_type,
mmu_idx, true, &host, &full, 0, false);
/* No combination of flags are expected by the caller. */
@@ -1627,7 +1611,7 @@
CPUTLBEntryFull *full;
void *p;
- (void)probe_access_internal(env, addr, 1, MMU_INST_FETCH,
+ (void)probe_access_internal(env_cpu(env), addr, 1, MMU_INST_FETCH,
cpu_mmu_index(env, true), false,
&p, &full, 0, false);
if (p == NULL) {
@@ -1662,9 +1646,8 @@
bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
bool is_store, struct qemu_plugin_hwaddr *data)
{
- CPUArchState *env = cpu->env_ptr;
- CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
- uintptr_t index = tlb_index(env, mmu_idx, addr);
+ CPUTLBEntry *tlbe = tlb_entry(cpu, mmu_idx, addr);
+ uintptr_t index = tlb_index(cpu, mmu_idx, addr);
MMUAccessType access_type = is_store ? MMU_DATA_STORE : MMU_DATA_LOAD;
uint64_t tlb_addr = tlb_read_idx(tlbe, access_type);
CPUTLBEntryFull *full;
@@ -1673,7 +1656,7 @@
return false;
}
- full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
+ full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
data->phys_addr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
/* We must have an iotlb entry for MMIO */
@@ -1712,7 +1695,7 @@
/**
* mmu_lookup1: translate one page
- * @env: cpu context
+ * @cpu: generic cpu state
* @data: lookup parameters
* @mmu_idx: virtual address context
* @access_type: load/store/code
@@ -1723,12 +1706,12 @@
* tlb_fill will longjmp out. Return true if the softmmu tlb for
* @mmu_idx may have resized.
*/
-static bool mmu_lookup1(CPUArchState *env, MMULookupPageData *data,
+static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
int mmu_idx, MMUAccessType access_type, uintptr_t ra)
{
vaddr addr = data->addr;
- uintptr_t index = tlb_index(env, mmu_idx, addr);
- CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
+ uintptr_t index = tlb_index(cpu, mmu_idx, addr);
+ CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr);
uint64_t tlb_addr = tlb_read_idx(entry, access_type);
bool maybe_resized = false;
CPUTLBEntryFull *full;
@@ -1736,17 +1719,17 @@
/* If the TLB entry is for a different page, reload and try again. */
if (!tlb_hit(tlb_addr, addr)) {
- if (!victim_tlb_hit(env, mmu_idx, index, access_type,
+ if (!victim_tlb_hit(cpu, mmu_idx, index, access_type,
addr & TARGET_PAGE_MASK)) {
- tlb_fill(env_cpu(env), addr, data->size, access_type, mmu_idx, ra);
+ tlb_fill(cpu, addr, data->size, access_type, mmu_idx, ra);
maybe_resized = true;
- index = tlb_index(env, mmu_idx, addr);
- entry = tlb_entry(env, mmu_idx, addr);
+ index = tlb_index(cpu, mmu_idx, addr);
+ entry = tlb_entry(cpu, mmu_idx, addr);
}
tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK;
}
- full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
+ full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
flags |= full->slow_flags[access_type];
@@ -1760,7 +1743,7 @@
/**
* mmu_watch_or_dirty
- * @env: cpu context
+ * @cpu: generic cpu state
* @data: lookup parameters
* @access_type: load/store/code
* @ra: return address into tcg generated code, or 0
@@ -1768,7 +1751,7 @@
* Trigger watchpoints for @data.addr:@data.size;
* record writes to protected clean pages.
*/
-static void mmu_watch_or_dirty(CPUArchState *env, MMULookupPageData *data,
+static void mmu_watch_or_dirty(CPUState *cpu, MMULookupPageData *data,
MMUAccessType access_type, uintptr_t ra)
{
CPUTLBEntryFull *full = data->full;
@@ -1779,13 +1762,13 @@
/* On watchpoint hit, this will longjmp out. */
if (flags & TLB_WATCHPOINT) {
int wp = access_type == MMU_DATA_STORE ? BP_MEM_WRITE : BP_MEM_READ;
- cpu_check_watchpoint(env_cpu(env), addr, size, full->attrs, wp, ra);
+ cpu_check_watchpoint(cpu, addr, size, full->attrs, wp, ra);
flags &= ~TLB_WATCHPOINT;
}
/* Note that notdirty is only set for writes. */
if (flags & TLB_NOTDIRTY) {
- notdirty_write(env_cpu(env), addr, size, full, ra);
+ notdirty_write(cpu, addr, size, full, ra);
flags &= ~TLB_NOTDIRTY;
}
data->flags = flags;
@@ -1793,7 +1776,7 @@
/**
* mmu_lookup: translate page(s)
- * @env: cpu context
+ * @cpu: generic cpu state
* @addr: virtual address
* @oi: combined mmu_idx and MemOp
* @ra: return address into tcg generated code, or 0
@@ -1803,7 +1786,7 @@
* Resolve the translation for the page(s) beginning at @addr, for MemOp.size
* bytes. Return true if the lookup crosses a page boundary.
*/
-static bool mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
+static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType type, MMULookupLocals *l)
{
unsigned a_bits;
@@ -1818,7 +1801,7 @@
/* Handle CPU specific unaligned behaviour */
a_bits = get_alignment_bits(l->memop);
if (addr & ((1 << a_bits) - 1)) {
- cpu_unaligned_access(env_cpu(env), addr, type, l->mmu_idx, ra);
+ cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
}
l->page[0].addr = addr;
@@ -1828,11 +1811,11 @@
crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK;
if (likely(!crosspage)) {
- mmu_lookup1(env, &l->page[0], l->mmu_idx, type, ra);
+ mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
flags = l->page[0].flags;
if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
- mmu_watch_or_dirty(env, &l->page[0], type, ra);
+ mmu_watch_or_dirty(cpu, &l->page[0], type, ra);
}
if (unlikely(flags & TLB_BSWAP)) {
l->memop ^= MO_BSWAP;
@@ -1847,16 +1830,16 @@
* Lookup both pages, recognizing exceptions from either. If the
* second lookup potentially resized, refresh first CPUTLBEntryFull.
*/
- mmu_lookup1(env, &l->page[0], l->mmu_idx, type, ra);
- if (mmu_lookup1(env, &l->page[1], l->mmu_idx, type, ra)) {
- uintptr_t index = tlb_index(env, l->mmu_idx, addr);
- l->page[0].full = &env_tlb(env)->d[l->mmu_idx].fulltlb[index];
+ mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
+ if (mmu_lookup1(cpu, &l->page[1], l->mmu_idx, type, ra)) {
+ uintptr_t index = tlb_index(cpu, l->mmu_idx, addr);
+ l->page[0].full = &cpu->neg.tlb.d[l->mmu_idx].fulltlb[index];
}
flags = l->page[0].flags | l->page[1].flags;
if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
- mmu_watch_or_dirty(env, &l->page[0], type, ra);
- mmu_watch_or_dirty(env, &l->page[1], type, ra);
+ mmu_watch_or_dirty(cpu, &l->page[0], type, ra);
+ mmu_watch_or_dirty(cpu, &l->page[1], type, ra);
}
/*
@@ -1874,7 +1857,7 @@
* Probe for an atomic operation. Do not allow unaligned operations,
* or io operations to proceed. Return the host address.
*/
-static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
+static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
int size, uintptr_t retaddr)
{
uintptr_t mmu_idx = get_mmuidx(oi);
@@ -1894,7 +1877,7 @@
/* Enforce guest required alignment. */
if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
/* ??? Maybe indicate atomic op to cpu_unaligned_access */
- cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
+ cpu_unaligned_access(cpu, addr, MMU_DATA_STORE,
mmu_idx, retaddr);
}
@@ -1907,18 +1890,18 @@
goto stop_the_world;
}
- index = tlb_index(env, mmu_idx, addr);
- tlbe = tlb_entry(env, mmu_idx, addr);
+ index = tlb_index(cpu, mmu_idx, addr);
+ tlbe = tlb_entry(cpu, mmu_idx, addr);
/* Check TLB entry and enforce page permissions. */
tlb_addr = tlb_addr_write(tlbe);
if (!tlb_hit(tlb_addr, addr)) {
- if (!victim_tlb_hit(env, mmu_idx, index, MMU_DATA_STORE,
+ if (!victim_tlb_hit(cpu, mmu_idx, index, MMU_DATA_STORE,
addr & TARGET_PAGE_MASK)) {
- tlb_fill(env_cpu(env), addr, size,
+ tlb_fill(cpu, addr, size,
MMU_DATA_STORE, mmu_idx, retaddr);
- index = tlb_index(env, mmu_idx, addr);
- tlbe = tlb_entry(env, mmu_idx, addr);
+ index = tlb_index(cpu, mmu_idx, addr);
+ tlbe = tlb_entry(cpu, mmu_idx, addr);
}
tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
}
@@ -1930,7 +1913,7 @@
* but addr_read will only be -1 if PAGE_READ was unset.
*/
if (unlikely(tlbe->addr_read == -1)) {
- tlb_fill(env_cpu(env), addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
+ tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
/*
* Since we don't support reads and writes to different
* addresses, and we do have the proper page loaded for
@@ -1950,10 +1933,10 @@
}
hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
- full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
+ full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
- notdirty_write(env_cpu(env), addr, size, full, retaddr);
+ notdirty_write(cpu, addr, size, full, retaddr);
}
if (unlikely(tlb_addr & TLB_FORCE_SLOW)) {
@@ -1966,7 +1949,7 @@
wp_flags |= BP_MEM_READ;
}
if (wp_flags) {
- cpu_check_watchpoint(env_cpu(env), addr, size,
+ cpu_check_watchpoint(cpu, addr, size,
full->attrs, wp_flags, retaddr);
}
}
@@ -1974,7 +1957,7 @@
return hostaddr;
stop_the_world:
- cpu_loop_exit_atomic(env_cpu(env), retaddr);
+ cpu_loop_exit_atomic(cpu, retaddr);
}
/*
@@ -1996,7 +1979,7 @@
/**
* do_ld_mmio_beN:
- * @env: cpu context
+ * @cpu: generic cpu state
* @full: page parameters
* @ret_be: accumulated data
* @addr: virtual address
@@ -2008,7 +1991,7 @@
* Load @size bytes from @addr, which is memory-mapped i/o.
* The bytes are concatenated in big-endian order with @ret_be.
*/
-static uint64_t int_ld_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
+static uint64_t int_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
uint64_t ret_be, vaddr addr, int size,
int mmu_idx, MMUAccessType type, uintptr_t ra,
MemoryRegion *mr, hwaddr mr_offset)
@@ -2027,7 +2010,7 @@
r = memory_region_dispatch_read(mr, mr_offset, &val,
this_mop, full->attrs);
if (unlikely(r != MEMTX_OK)) {
- io_failed(env, full, addr, this_size, type, mmu_idx, r, ra);
+ io_failed(cpu, full, addr, this_size, type, mmu_idx, r, ra);
}
if (this_size == 8) {
return val;
@@ -2042,7 +2025,7 @@
return ret_be;
}
-static uint64_t do_ld_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
+static uint64_t do_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
uint64_t ret_be, vaddr addr, int size,
int mmu_idx, MMUAccessType type, uintptr_t ra)
{
@@ -2055,18 +2038,18 @@
tcg_debug_assert(size > 0 && size <= 8);
attrs = full->attrs;
- section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
+ section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
mr = section->mr;
qemu_mutex_lock_iothread();
- ret = int_ld_mmio_beN(env, full, ret_be, addr, size, mmu_idx,
+ ret = int_ld_mmio_beN(cpu, full, ret_be, addr, size, mmu_idx,
type, ra, mr, mr_offset);
qemu_mutex_unlock_iothread();
return ret;
}
-static Int128 do_ld16_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
+static Int128 do_ld16_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
uint64_t ret_be, vaddr addr, int size,
int mmu_idx, uintptr_t ra)
{
@@ -2079,13 +2062,13 @@
tcg_debug_assert(size > 8 && size <= 16);
attrs = full->attrs;
- section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
+ section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
mr = section->mr;
qemu_mutex_lock_iothread();
- a = int_ld_mmio_beN(env, full, ret_be, addr, size - 8, mmu_idx,
+ a = int_ld_mmio_beN(cpu, full, ret_be, addr, size - 8, mmu_idx,
MMU_DATA_LOAD, ra, mr, mr_offset);
- b = int_ld_mmio_beN(env, full, ret_be, addr + size - 8, 8, mmu_idx,
+ b = int_ld_mmio_beN(cpu, full, ret_be, addr + size - 8, 8, mmu_idx,
MMU_DATA_LOAD, ra, mr, mr_offset + size - 8);
qemu_mutex_unlock_iothread();
@@ -2186,11 +2169,11 @@
* As do_ld_bytes_beN, but with one atomic load.
* Eight aligned bytes are guaranteed to cover the load.
*/
-static uint64_t do_ld_whole_be8(CPUArchState *env, uintptr_t ra,
+static uint64_t do_ld_whole_be8(CPUState *cpu, uintptr_t ra,
MMULookupPageData *p, uint64_t ret_be)
{
int o = p->addr & 7;
- uint64_t x = load_atomic8_or_exit(env, ra, p->haddr - o);
+ uint64_t x = load_atomic8_or_exit(cpu, ra, p->haddr - o);
x = cpu_to_be64(x);
x <<= o * 8;
@@ -2206,11 +2189,11 @@
* As do_ld_bytes_beN, but with one atomic load.
* 16 aligned bytes are guaranteed to cover the load.
*/
-static Int128 do_ld_whole_be16(CPUArchState *env, uintptr_t ra,
+static Int128 do_ld_whole_be16(CPUState *cpu, uintptr_t ra,
MMULookupPageData *p, uint64_t ret_be)
{
int o = p->addr & 15;
- Int128 x, y = load_atomic16_or_exit(env, ra, p->haddr - o);
+ Int128 x, y = load_atomic16_or_exit(cpu, ra, p->haddr - o);
int size = p->size;
if (!HOST_BIG_ENDIAN) {
@@ -2226,7 +2209,7 @@
/*
* Wrapper for the above.
*/
-static uint64_t do_ld_beN(CPUArchState *env, MMULookupPageData *p,
+static uint64_t do_ld_beN(CPUState *cpu, MMULookupPageData *p,
uint64_t ret_be, int mmu_idx, MMUAccessType type,
MemOp mop, uintptr_t ra)
{
@@ -2234,7 +2217,7 @@
unsigned tmp, half_size;
if (unlikely(p->flags & TLB_MMIO)) {
- return do_ld_mmio_beN(env, p->full, ret_be, p->addr, p->size,
+ return do_ld_mmio_beN(cpu, p->full, ret_be, p->addr, p->size,
mmu_idx, type, ra);
}
@@ -2258,7 +2241,7 @@
if (!HAVE_al8_fast && p->size < 4) {
return do_ld_whole_be4(p, ret_be);
} else {
- return do_ld_whole_be8(env, ra, p, ret_be);
+ return do_ld_whole_be8(cpu, ra, p, ret_be);
}
}
/* fall through */
@@ -2276,7 +2259,7 @@
/*
* Wrapper for the above, for 8 < size < 16.
*/
-static Int128 do_ld16_beN(CPUArchState *env, MMULookupPageData *p,
+static Int128 do_ld16_beN(CPUState *cpu, MMULookupPageData *p,
uint64_t a, int mmu_idx, MemOp mop, uintptr_t ra)
{
int size = p->size;
@@ -2284,7 +2267,7 @@
MemOp atom;
if (unlikely(p->flags & TLB_MMIO)) {
- return do_ld16_mmio_beN(env, p->full, a, p->addr, size, mmu_idx, ra);
+ return do_ld16_mmio_beN(cpu, p->full, a, p->addr, size, mmu_idx, ra);
}
/*
@@ -2303,7 +2286,7 @@
case MO_ATOM_WITHIN16_PAIR:
/* Since size > 8, this is the half that must be atomic. */
- return do_ld_whole_be16(env, ra, p, a);
+ return do_ld_whole_be16(cpu, ra, p, a);
case MO_ATOM_IFALIGN_PAIR:
/*
@@ -2325,29 +2308,29 @@
return int128_make128(b, a);
}
-static uint8_t do_ld_1(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
+static uint8_t do_ld_1(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
MMUAccessType type, uintptr_t ra)
{
if (unlikely(p->flags & TLB_MMIO)) {
- return do_ld_mmio_beN(env, p->full, 0, p->addr, 1, mmu_idx, type, ra);
+ return do_ld_mmio_beN(cpu, p->full, 0, p->addr, 1, mmu_idx, type, ra);
} else {
return *(uint8_t *)p->haddr;
}
}
-static uint16_t do_ld_2(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
+static uint16_t do_ld_2(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
MMUAccessType type, MemOp memop, uintptr_t ra)
{
uint16_t ret;
if (unlikely(p->flags & TLB_MMIO)) {
- ret = do_ld_mmio_beN(env, p->full, 0, p->addr, 2, mmu_idx, type, ra);
+ ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 2, mmu_idx, type, ra);
if ((memop & MO_BSWAP) == MO_LE) {
ret = bswap16(ret);
}
} else {
/* Perform the load host endian, then swap if necessary. */
- ret = load_atom_2(env, ra, p->haddr, memop);
+ ret = load_atom_2(cpu, ra, p->haddr, memop);
if (memop & MO_BSWAP) {
ret = bswap16(ret);
}
@@ -2355,19 +2338,19 @@
return ret;
}
-static uint32_t do_ld_4(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
+static uint32_t do_ld_4(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
MMUAccessType type, MemOp memop, uintptr_t ra)
{
uint32_t ret;
if (unlikely(p->flags & TLB_MMIO)) {
- ret = do_ld_mmio_beN(env, p->full, 0, p->addr, 4, mmu_idx, type, ra);
+ ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 4, mmu_idx, type, ra);
if ((memop & MO_BSWAP) == MO_LE) {
ret = bswap32(ret);
}
} else {
/* Perform the load host endian. */
- ret = load_atom_4(env, ra, p->haddr, memop);
+ ret = load_atom_4(cpu, ra, p->haddr, memop);
if (memop & MO_BSWAP) {
ret = bswap32(ret);
}
@@ -2375,19 +2358,19 @@
return ret;
}
-static uint64_t do_ld_8(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
+static uint64_t do_ld_8(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
MMUAccessType type, MemOp memop, uintptr_t ra)
{
uint64_t ret;
if (unlikely(p->flags & TLB_MMIO)) {
- ret = do_ld_mmio_beN(env, p->full, 0, p->addr, 8, mmu_idx, type, ra);
+ ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 8, mmu_idx, type, ra);
if ((memop & MO_BSWAP) == MO_LE) {
ret = bswap64(ret);
}
} else {
/* Perform the load host endian. */
- ret = load_atom_8(env, ra, p->haddr, memop);
+ ret = load_atom_8(cpu, ra, p->haddr, memop);
if (memop & MO_BSWAP) {
ret = bswap64(ret);
}
@@ -2395,27 +2378,20 @@
return ret;
}
-static uint8_t do_ld1_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
+static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType access_type)
{
MMULookupLocals l;
bool crosspage;
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
- crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
+ crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
tcg_debug_assert(!crosspage);
- return do_ld_1(env, &l.page[0], l.mmu_idx, access_type, ra);
+ return do_ld_1(cpu, &l.page[0], l.mmu_idx, access_type, ra);
}
-tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
- return do_ld1_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
-}
-
-static uint16_t do_ld2_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
+static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType access_type)
{
MMULookupLocals l;
@@ -2424,13 +2400,13 @@
uint8_t a, b;
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
- crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
+ crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
if (likely(!crosspage)) {
- return do_ld_2(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
+ return do_ld_2(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
}
- a = do_ld_1(env, &l.page[0], l.mmu_idx, access_type, ra);
- b = do_ld_1(env, &l.page[1], l.mmu_idx, access_type, ra);
+ a = do_ld_1(cpu, &l.page[0], l.mmu_idx, access_type, ra);
+ b = do_ld_1(cpu, &l.page[1], l.mmu_idx, access_type, ra);
if ((l.memop & MO_BSWAP) == MO_LE) {
ret = a | (b << 8);
@@ -2440,14 +2416,7 @@
return ret;
}
-tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
- return do_ld2_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
-}
-
-static uint32_t do_ld4_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
+static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType access_type)
{
MMULookupLocals l;
@@ -2455,27 +2424,20 @@
uint32_t ret;
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
- crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
+ crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
if (likely(!crosspage)) {
- return do_ld_4(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
+ return do_ld_4(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
}
- ret = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
- ret = do_ld_beN(env, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
+ ret = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
+ ret = do_ld_beN(cpu, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
if ((l.memop & MO_BSWAP) == MO_LE) {
ret = bswap32(ret);
}
return ret;
}
-tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
- return do_ld4_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
-}
-
-static uint64_t do_ld8_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
+static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType access_type)
{
MMULookupLocals l;
@@ -2483,50 +2445,20 @@
uint64_t ret;
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
- crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
+ crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
if (likely(!crosspage)) {
- return do_ld_8(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
+ return do_ld_8(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
}
- ret = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
- ret = do_ld_beN(env, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
+ ret = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
+ ret = do_ld_beN(cpu, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
if ((l.memop & MO_BSWAP) == MO_LE) {
ret = bswap64(ret);
}
return ret;
}
-uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
- return do_ld8_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
-}
-
-/*
- * Provide signed versions of the load routines as well. We can of course
- * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
- */
-
-tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- return (int8_t)helper_ldub_mmu(env, addr, oi, retaddr);
-}
-
-tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- return (int16_t)helper_lduw_mmu(env, addr, oi, retaddr);
-}
-
-tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- return (int32_t)helper_ldul_mmu(env, addr, oi, retaddr);
-}
-
-static Int128 do_ld16_mmu(CPUArchState *env, vaddr addr,
+static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr,
MemOpIdx oi, uintptr_t ra)
{
MMULookupLocals l;
@@ -2536,17 +2468,17 @@
int first;
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
- crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD, &l);
+ crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_LOAD, &l);
if (likely(!crosspage)) {
if (unlikely(l.page[0].flags & TLB_MMIO)) {
- ret = do_ld16_mmio_beN(env, l.page[0].full, 0, addr, 16,
+ ret = do_ld16_mmio_beN(cpu, l.page[0].full, 0, addr, 16,
l.mmu_idx, ra);
if ((l.memop & MO_BSWAP) == MO_LE) {
ret = bswap128(ret);
}
} else {
/* Perform the load host endian. */
- ret = load_atom_16(env, ra, l.page[0].haddr, l.memop);
+ ret = load_atom_16(cpu, ra, l.page[0].haddr, l.memop);
if (l.memop & MO_BSWAP) {
ret = bswap128(ret);
}
@@ -2558,8 +2490,8 @@
if (first == 8) {
MemOp mop8 = (l.memop & ~MO_SIZE) | MO_64;
- a = do_ld_8(env, &l.page[0], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
- b = do_ld_8(env, &l.page[1], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
+ a = do_ld_8(cpu, &l.page[0], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
+ b = do_ld_8(cpu, &l.page[1], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
if ((mop8 & MO_BSWAP) == MO_LE) {
ret = int128_make128(a, b);
} else {
@@ -2569,15 +2501,15 @@
}
if (first < 8) {
- a = do_ld_beN(env, &l.page[0], 0, l.mmu_idx,
+ a = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx,
MMU_DATA_LOAD, l.memop, ra);
- ret = do_ld16_beN(env, &l.page[1], a, l.mmu_idx, l.memop, ra);
+ ret = do_ld16_beN(cpu, &l.page[1], a, l.mmu_idx, l.memop, ra);
} else {
- ret = do_ld16_beN(env, &l.page[0], 0, l.mmu_idx, l.memop, ra);
+ ret = do_ld16_beN(cpu, &l.page[0], 0, l.mmu_idx, l.memop, ra);
b = int128_getlo(ret);
ret = int128_lshift(ret, l.page[1].size * 8);
a = int128_gethi(ret);
- b = do_ld_beN(env, &l.page[1], b, l.mmu_idx,
+ b = do_ld_beN(cpu, &l.page[1], b, l.mmu_idx,
MMU_DATA_LOAD, l.memop, ra);
ret = int128_make128(b, a);
}
@@ -2587,88 +2519,13 @@
return ret;
}
-Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr,
- uint32_t oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
- return do_ld16_mmu(env, addr, oi, retaddr);
-}
-
-Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, uint32_t oi)
-{
- return helper_ld16_mmu(env, addr, oi, GETPC());
-}
-
-/*
- * Load helpers for cpu_ldst.h.
- */
-
-static void plugin_load_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
-{
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
-}
-
-uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
-{
- uint8_t ret;
-
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_UB);
- ret = do_ld1_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
- plugin_load_cb(env, addr, oi);
- return ret;
-}
-
-uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
-{
- uint16_t ret;
-
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
- ret = do_ld2_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
- plugin_load_cb(env, addr, oi);
- return ret;
-}
-
-uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
-{
- uint32_t ret;
-
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
- ret = do_ld4_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
- plugin_load_cb(env, addr, oi);
- return ret;
-}
-
-uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
-{
- uint64_t ret;
-
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
- ret = do_ld8_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
- plugin_load_cb(env, addr, oi);
- return ret;
-}
-
-Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
-{
- Int128 ret;
-
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
- ret = do_ld16_mmu(env, addr, oi, ra);
- plugin_load_cb(env, addr, oi);
- return ret;
-}
-
/*
* Store Helpers
*/
/**
* do_st_mmio_leN:
- * @env: cpu context
+ * @cpu: generic cpu state
* @full: page parameters
* @val_le: data to store
* @addr: virtual address
@@ -2681,7 +2538,7 @@
* The bytes to store are extracted in little-endian order from @val_le;
* return the bytes of @val_le beyond @p->size that have not been stored.
*/
-static uint64_t int_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
+static uint64_t int_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
uint64_t val_le, vaddr addr, int size,
int mmu_idx, uintptr_t ra,
MemoryRegion *mr, hwaddr mr_offset)
@@ -2699,7 +2556,7 @@
r = memory_region_dispatch_write(mr, mr_offset, val_le,
this_mop, full->attrs);
if (unlikely(r != MEMTX_OK)) {
- io_failed(env, full, addr, this_size, MMU_DATA_STORE,
+ io_failed(cpu, full, addr, this_size, MMU_DATA_STORE,
mmu_idx, r, ra);
}
if (this_size == 8) {
@@ -2715,7 +2572,7 @@
return val_le;
}
-static uint64_t do_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
+static uint64_t do_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
uint64_t val_le, vaddr addr, int size,
int mmu_idx, uintptr_t ra)
{
@@ -2728,18 +2585,18 @@
tcg_debug_assert(size > 0 && size <= 8);
attrs = full->attrs;
- section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
+ section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
mr = section->mr;
qemu_mutex_lock_iothread();
- ret = int_st_mmio_leN(env, full, val_le, addr, size, mmu_idx,
+ ret = int_st_mmio_leN(cpu, full, val_le, addr, size, mmu_idx,
ra, mr, mr_offset);
qemu_mutex_unlock_iothread();
return ret;
}
-static uint64_t do_st16_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
+static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
Int128 val_le, vaddr addr, int size,
int mmu_idx, uintptr_t ra)
{
@@ -2752,13 +2609,13 @@
tcg_debug_assert(size > 8 && size <= 16);
attrs = full->attrs;
- section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
+ section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
mr = section->mr;
qemu_mutex_lock_iothread();
- int_st_mmio_leN(env, full, int128_getlo(val_le), addr, 8,
+ int_st_mmio_leN(cpu, full, int128_getlo(val_le), addr, 8,
mmu_idx, ra, mr, mr_offset);
- ret = int_st_mmio_leN(env, full, int128_gethi(val_le), addr + 8,
+ ret = int_st_mmio_leN(cpu, full, int128_gethi(val_le), addr + 8,
size - 8, mmu_idx, ra, mr, mr_offset + 8);
qemu_mutex_unlock_iothread();
@@ -2768,7 +2625,7 @@
/*
* Wrapper for the above.
*/
-static uint64_t do_st_leN(CPUArchState *env, MMULookupPageData *p,
+static uint64_t do_st_leN(CPUState *cpu, MMULookupPageData *p,
uint64_t val_le, int mmu_idx,
MemOp mop, uintptr_t ra)
{
@@ -2776,7 +2633,7 @@
unsigned tmp, half_size;
if (unlikely(p->flags & TLB_MMIO)) {
- return do_st_mmio_leN(env, p->full, val_le, p->addr,
+ return do_st_mmio_leN(cpu, p->full, val_le, p->addr,
p->size, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
return val_le >> (p->size * 8);
@@ -2804,7 +2661,7 @@
} else if (HAVE_al8) {
return store_whole_le8(p->haddr, p->size, val_le);
} else {
- cpu_loop_exit_atomic(env_cpu(env), ra);
+ cpu_loop_exit_atomic(cpu, ra);
}
}
/* fall through */
@@ -2822,7 +2679,7 @@
/*
* Wrapper for the above, for 8 < size < 16.
*/
-static uint64_t do_st16_leN(CPUArchState *env, MMULookupPageData *p,
+static uint64_t do_st16_leN(CPUState *cpu, MMULookupPageData *p,
Int128 val_le, int mmu_idx,
MemOp mop, uintptr_t ra)
{
@@ -2830,7 +2687,7 @@
MemOp atom;
if (unlikely(p->flags & TLB_MMIO)) {
- return do_st16_mmio_leN(env, p->full, val_le, p->addr,
+ return do_st16_mmio_leN(cpu, p->full, val_le, p->addr,
size, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
return int128_gethi(val_le) >> ((size - 8) * 8);
@@ -2850,7 +2707,7 @@
case MO_ATOM_WITHIN16_PAIR:
/* Since size > 8, this is the half that must be atomic. */
if (!HAVE_ATOMIC128_RW) {
- cpu_loop_exit_atomic(env_cpu(env), ra);
+ cpu_loop_exit_atomic(cpu, ra);
}
return store_whole_le16(p->haddr, p->size, val_le);
@@ -2871,11 +2728,11 @@
}
}
-static void do_st_1(CPUArchState *env, MMULookupPageData *p, uint8_t val,
+static void do_st_1(CPUState *cpu, MMULookupPageData *p, uint8_t val,
int mmu_idx, uintptr_t ra)
{
if (unlikely(p->flags & TLB_MMIO)) {
- do_st_mmio_leN(env, p->full, val, p->addr, 1, mmu_idx, ra);
+ do_st_mmio_leN(cpu, p->full, val, p->addr, 1, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
/* nothing */
} else {
@@ -2883,14 +2740,14 @@
}
}
-static void do_st_2(CPUArchState *env, MMULookupPageData *p, uint16_t val,
+static void do_st_2(CPUState *cpu, MMULookupPageData *p, uint16_t val,
int mmu_idx, MemOp memop, uintptr_t ra)
{
if (unlikely(p->flags & TLB_MMIO)) {
if ((memop & MO_BSWAP) != MO_LE) {
val = bswap16(val);
}
- do_st_mmio_leN(env, p->full, val, p->addr, 2, mmu_idx, ra);
+ do_st_mmio_leN(cpu, p->full, val, p->addr, 2, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
/* nothing */
} else {
@@ -2898,18 +2755,18 @@
if (memop & MO_BSWAP) {
val = bswap16(val);
}
- store_atom_2(env, ra, p->haddr, memop, val);
+ store_atom_2(cpu, ra, p->haddr, memop, val);
}
}
-static void do_st_4(CPUArchState *env, MMULookupPageData *p, uint32_t val,
+static void do_st_4(CPUState *cpu, MMULookupPageData *p, uint32_t val,
int mmu_idx, MemOp memop, uintptr_t ra)
{
if (unlikely(p->flags & TLB_MMIO)) {
if ((memop & MO_BSWAP) != MO_LE) {
val = bswap32(val);
}
- do_st_mmio_leN(env, p->full, val, p->addr, 4, mmu_idx, ra);
+ do_st_mmio_leN(cpu, p->full, val, p->addr, 4, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
/* nothing */
} else {
@@ -2917,18 +2774,18 @@
if (memop & MO_BSWAP) {
val = bswap32(val);
}
- store_atom_4(env, ra, p->haddr, memop, val);
+ store_atom_4(cpu, ra, p->haddr, memop, val);
}
}
-static void do_st_8(CPUArchState *env, MMULookupPageData *p, uint64_t val,
+static void do_st_8(CPUState *cpu, MMULookupPageData *p, uint64_t val,
int mmu_idx, MemOp memop, uintptr_t ra)
{
if (unlikely(p->flags & TLB_MMIO)) {
if ((memop & MO_BSWAP) != MO_LE) {
val = bswap64(val);
}
- do_st_mmio_leN(env, p->full, val, p->addr, 8, mmu_idx, ra);
+ do_st_mmio_leN(cpu, p->full, val, p->addr, 8, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
/* nothing */
} else {
@@ -2936,25 +2793,24 @@
if (memop & MO_BSWAP) {
val = bswap64(val);
}
- store_atom_8(env, ra, p->haddr, memop, val);
+ store_atom_8(cpu, ra, p->haddr, memop, val);
}
}
-void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
- MemOpIdx oi, uintptr_t ra)
+static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val,
+ MemOpIdx oi, uintptr_t ra)
{
MMULookupLocals l;
bool crosspage;
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
- crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
+ crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
tcg_debug_assert(!crosspage);
- do_st_1(env, &l.page[0], val, l.mmu_idx, ra);
+ do_st_1(cpu, &l.page[0], val, l.mmu_idx, ra);
}
-static void do_st2_mmu(CPUArchState *env, vaddr addr, uint16_t val,
+static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val,
MemOpIdx oi, uintptr_t ra)
{
MMULookupLocals l;
@@ -2962,9 +2818,9 @@
uint8_t a, b;
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
- crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
+ crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) {
- do_st_2(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
+ do_st_2(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
return;
}
@@ -2973,27 +2829,20 @@
} else {
b = val, a = val >> 8;
}
- do_st_1(env, &l.page[0], a, l.mmu_idx, ra);
- do_st_1(env, &l.page[1], b, l.mmu_idx, ra);
+ do_st_1(cpu, &l.page[0], a, l.mmu_idx, ra);
+ do_st_1(cpu, &l.page[1], b, l.mmu_idx, ra);
}
-void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
- do_st2_mmu(env, addr, val, oi, retaddr);
-}
-
-static void do_st4_mmu(CPUArchState *env, vaddr addr, uint32_t val,
+static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val,
MemOpIdx oi, uintptr_t ra)
{
MMULookupLocals l;
bool crosspage;
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
- crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
+ crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) {
- do_st_4(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
+ do_st_4(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
return;
}
@@ -3001,27 +2850,20 @@
if ((l.memop & MO_BSWAP) != MO_LE) {
val = bswap32(val);
}
- val = do_st_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
- (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra);
+ val = do_st_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
+ (void) do_st_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
}
-void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
- do_st4_mmu(env, addr, val, oi, retaddr);
-}
-
-static void do_st8_mmu(CPUArchState *env, vaddr addr, uint64_t val,
+static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val,
MemOpIdx oi, uintptr_t ra)
{
MMULookupLocals l;
bool crosspage;
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
- crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
+ crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) {
- do_st_8(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
+ do_st_8(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
return;
}
@@ -3029,18 +2871,11 @@
if ((l.memop & MO_BSWAP) != MO_LE) {
val = bswap64(val);
}
- val = do_st_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
- (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra);
+ val = do_st_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
+ (void) do_st_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
}
-void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
- do_st8_mmu(env, addr, val, oi, retaddr);
-}
-
-static void do_st16_mmu(CPUArchState *env, vaddr addr, Int128 val,
+static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val,
MemOpIdx oi, uintptr_t ra)
{
MMULookupLocals l;
@@ -3049,13 +2884,13 @@
int first;
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
- crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
+ crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) {
if (unlikely(l.page[0].flags & TLB_MMIO)) {
if ((l.memop & MO_BSWAP) != MO_LE) {
val = bswap128(val);
}
- do_st16_mmio_leN(env, l.page[0].full, val, addr, 16, l.mmu_idx, ra);
+ do_st16_mmio_leN(cpu, l.page[0].full, val, addr, 16, l.mmu_idx, ra);
} else if (unlikely(l.page[0].flags & TLB_DISCARD_WRITE)) {
/* nothing */
} else {
@@ -3063,7 +2898,7 @@
if (l.memop & MO_BSWAP) {
val = bswap128(val);
}
- store_atom_16(env, ra, l.page[0].haddr, l.memop, val);
+ store_atom_16(cpu, ra, l.page[0].haddr, l.memop, val);
}
return;
}
@@ -3080,8 +2915,8 @@
} else {
a = int128_getlo(val), b = int128_gethi(val);
}
- do_st_8(env, &l.page[0], a, l.mmu_idx, mop8, ra);
- do_st_8(env, &l.page[1], b, l.mmu_idx, mop8, ra);
+ do_st_8(cpu, &l.page[0], a, l.mmu_idx, mop8, ra);
+ do_st_8(cpu, &l.page[1], b, l.mmu_idx, mop8, ra);
return;
}
@@ -3089,75 +2924,15 @@
val = bswap128(val);
}
if (first < 8) {
- do_st_leN(env, &l.page[0], int128_getlo(val), l.mmu_idx, l.memop, ra);
+ do_st_leN(cpu, &l.page[0], int128_getlo(val), l.mmu_idx, l.memop, ra);
val = int128_urshift(val, first * 8);
- do_st16_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra);
+ do_st16_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
} else {
- b = do_st16_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
- do_st_leN(env, &l.page[1], b, l.mmu_idx, l.memop, ra);
+ b = do_st16_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
+ do_st_leN(cpu, &l.page[1], b, l.mmu_idx, l.memop, ra);
}
}
-void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
- do_st16_mmu(env, addr, val, oi, retaddr);
-}
-
-void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
-{
- helper_st16_mmu(env, addr, val, oi, GETPC());
-}
-
-/*
- * Store Helpers for cpu_ldst.h
- */
-
-static void plugin_store_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
-{
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
-}
-
-void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- helper_stb_mmu(env, addr, val, oi, retaddr);
- plugin_store_cb(env, addr, oi);
-}
-
-void cpu_stw_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
- do_st2_mmu(env, addr, val, oi, retaddr);
- plugin_store_cb(env, addr, oi);
-}
-
-void cpu_stl_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
- do_st4_mmu(env, addr, val, oi, retaddr);
- plugin_store_cb(env, addr, oi);
-}
-
-void cpu_stq_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
- do_st8_mmu(env, addr, val, oi, retaddr);
- plugin_store_cb(env, addr, oi);
-}
-
-void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
- do_st16_mmu(env, addr, val, oi, retaddr);
- plugin_store_cb(env, addr, oi);
-}
-
#include "ldst_common.c.inc"
/*
@@ -3196,47 +2971,47 @@
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
{
MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
- return do_ld1_mmu(env, addr, oi, 0, MMU_INST_FETCH);
+ return do_ld1_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH);
}
uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
{
MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
- return do_ld2_mmu(env, addr, oi, 0, MMU_INST_FETCH);
+ return do_ld2_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH);
}
uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
{
MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
- return do_ld4_mmu(env, addr, oi, 0, MMU_INST_FETCH);
+ return do_ld4_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH);
}
uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
{
MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true));
- return do_ld8_mmu(env, addr, oi, 0, MMU_INST_FETCH);
+ return do_ld8_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH);
}
uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t retaddr)
{
- return do_ld1_mmu(env, addr, oi, retaddr, MMU_INST_FETCH);
+ return do_ld1_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
}
uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t retaddr)
{
- return do_ld2_mmu(env, addr, oi, retaddr, MMU_INST_FETCH);
+ return do_ld2_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
}
uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t retaddr)
{
- return do_ld4_mmu(env, addr, oi, retaddr, MMU_INST_FETCH);
+ return do_ld4_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
}
uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t retaddr)
{
- return do_ld8_mmu(env, addr, oi, retaddr, MMU_INST_FETCH);
+ return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
}
diff --git a/softmmu/icount.c b/accel/tcg/icount-common.c
similarity index 98%
rename from softmmu/icount.c
rename to accel/tcg/icount-common.c
index 144e248..0bf5bb5 100644
--- a/softmmu/icount.c
+++ b/accel/tcg/icount-common.c
@@ -27,7 +27,6 @@
#include "migration/vmstate.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
-#include "exec/exec-all.h"
#include "sysemu/cpus.h"
#include "sysemu/qtest.h"
#include "qemu/main-loop.h"
@@ -38,7 +37,7 @@
#include "hw/core/cpu.h"
#include "sysemu/cpu-timers.h"
#include "sysemu/cpu-throttle.h"
-#include "timers-state.h"
+#include "softmmu/timers-state.h"
/*
* ICOUNT: Instruction Counter
@@ -75,7 +74,7 @@
static int64_t icount_get_executed(CPUState *cpu)
{
return (cpu->icount_budget -
- (cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra));
+ (cpu->neg.icount_decr.u16.low + cpu->icount_extra));
}
/*
@@ -111,7 +110,7 @@
CPUState *cpu = current_cpu;
if (cpu && cpu->running) {
- if (!cpu->can_do_io) {
+ if (!cpu->neg.can_do_io) {
error_report("Bad icount read");
exit(1);
}
diff --git a/accel/tcg/internal-common.h b/accel/tcg/internal-common.h
new file mode 100644
index 0000000..3b2277e
--- /dev/null
+++ b/accel/tcg/internal-common.h
@@ -0,0 +1,28 @@
+/*
+ * Internal execution defines for qemu (target agnostic)
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+#ifndef ACCEL_TCG_INTERNAL_COMMON_H
+#define ACCEL_TCG_INTERNAL_COMMON_H
+
+#include "exec/translation-block.h"
+
+extern int64_t max_delay;
+extern int64_t max_advance;
+
+void dump_exec_info(GString *buf);
+
+/*
+ * Return true if CS is not running in parallel with other cpus, either
+ * because there are no other cpus or we are within an exclusive context.
+ */
+static inline bool cpu_in_serial_context(CPUState *cs)
+{
+ return !(cs->tcg_cflags & CF_PARALLEL) || cpu_in_exclusive_context(cs);
+}
+
+#endif
diff --git a/accel/tcg/internal.h b/accel/tcg/internal-target.h
similarity index 88%
rename from accel/tcg/internal.h
rename to accel/tcg/internal-target.h
index e8cbbde..4e36cf8 100644
--- a/accel/tcg/internal.h
+++ b/accel/tcg/internal-target.h
@@ -1,13 +1,13 @@
/*
- * Internal execution defines for qemu
+ * Internal execution defines for qemu (target specific)
*
* Copyright (c) 2003 Fabrice Bellard
*
* SPDX-License-Identifier: LGPL-2.1-or-later
*/
-#ifndef ACCEL_TCG_INTERNAL_H
-#define ACCEL_TCG_INTERNAL_H
+#ifndef ACCEL_TCG_INTERNAL_TARGET_H
+#define ACCEL_TCG_INTERNAL_TARGET_H
#include "exec/exec-all.h"
#include "exec/translate-all.h"
@@ -80,6 +80,9 @@
void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
uintptr_t host_pc);
+bool tcg_exec_realizefn(CPUState *cpu, Error **errp);
+void tcg_exec_unrealizefn(CPUState *cpu);
+
/* Return the current PC from CPU, which may be cached in TB. */
static inline vaddr log_pc(CPUState *cpu, const TranslationBlock *tb)
{
@@ -90,18 +93,6 @@
}
}
-/*
- * Return true if CS is not running in parallel with other cpus, either
- * because there are no other cpus or we are within an exclusive context.
- */
-static inline bool cpu_in_serial_context(CPUState *cs)
-{
- return !(cs->tcg_cflags & CF_PARALLEL) || cpu_in_exclusive_context(cs);
-}
-
-extern int64_t max_delay;
-extern int64_t max_advance;
-
extern bool one_insn_per_tb;
/**
diff --git a/accel/tcg/ldst_atomicity.c.inc b/accel/tcg/ldst_atomicity.c.inc
index 1b793e6..1cf5b92 100644
--- a/accel/tcg/ldst_atomicity.c.inc
+++ b/accel/tcg/ldst_atomicity.c.inc
@@ -26,7 +26,7 @@
* If the operation must be split into two operations to be
* examined separately for atomicity, return -lg2.
*/
-static int required_atomicity(CPUArchState *env, uintptr_t p, MemOp memop)
+static int required_atomicity(CPUState *cpu, uintptr_t p, MemOp memop)
{
MemOp atom = memop & MO_ATOM_MASK;
MemOp size = memop & MO_SIZE;
@@ -93,7 +93,7 @@
* host atomicity in order to avoid racing. This reduction
* avoids looping with cpu_loop_exit_atomic.
*/
- if (cpu_in_serial_context(env_cpu(env))) {
+ if (cpu_in_serial_context(cpu)) {
return MO_8;
}
return atmax;
@@ -139,14 +139,14 @@
/**
* load_atomic8_or_exit:
- * @env: cpu context
+ * @cpu: generic cpu state
* @ra: host unwind address
* @pv: host address
*
* Atomically load 8 aligned bytes from @pv.
* If this is not possible, longjmp out to restart serially.
*/
-static uint64_t load_atomic8_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
+static uint64_t load_atomic8_or_exit(CPUState *cpu, uintptr_t ra, void *pv)
{
if (HAVE_al8) {
return load_atomic8(pv);
@@ -168,19 +168,19 @@
#endif
/* Ultimate fallback: re-execute in serial context. */
- cpu_loop_exit_atomic(env_cpu(env), ra);
+ cpu_loop_exit_atomic(cpu, ra);
}
/**
* load_atomic16_or_exit:
- * @env: cpu context
+ * @cpu: generic cpu state
* @ra: host unwind address
* @pv: host address
*
* Atomically load 16 aligned bytes from @pv.
* If this is not possible, longjmp out to restart serially.
*/
-static Int128 load_atomic16_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
+static Int128 load_atomic16_or_exit(CPUState *cpu, uintptr_t ra, void *pv)
{
Int128 *p = __builtin_assume_aligned(pv, 16);
@@ -212,7 +212,7 @@
}
/* Ultimate fallback: re-execute in serial context. */
- cpu_loop_exit_atomic(env_cpu(env), ra);
+ cpu_loop_exit_atomic(cpu, ra);
}
/**
@@ -263,7 +263,7 @@
/**
* load_atom_extract_al8_or_exit:
- * @env: cpu context
+ * @cpu: generic cpu state
* @ra: host unwind address
* @pv: host address
* @s: object size in bytes, @s <= 4.
@@ -273,7 +273,7 @@
* 8-byte load and extract.
* The value is returned in the low bits of a uint32_t.
*/
-static uint32_t load_atom_extract_al8_or_exit(CPUArchState *env, uintptr_t ra,
+static uint32_t load_atom_extract_al8_or_exit(CPUState *cpu, uintptr_t ra,
void *pv, int s)
{
uintptr_t pi = (uintptr_t)pv;
@@ -281,12 +281,12 @@
int shr = (HOST_BIG_ENDIAN ? 8 - s - o : o) * 8;
pv = (void *)(pi & ~7);
- return load_atomic8_or_exit(env, ra, pv) >> shr;
+ return load_atomic8_or_exit(cpu, ra, pv) >> shr;
}
/**
* load_atom_extract_al16_or_exit:
- * @env: cpu context
+ * @cpu: generic cpu state
* @ra: host unwind address
* @p: host address
* @s: object size in bytes, @s <= 8.
@@ -299,7 +299,7 @@
*
* If this is not possible, longjmp out to restart serially.
*/
-static uint64_t load_atom_extract_al16_or_exit(CPUArchState *env, uintptr_t ra,
+static uint64_t load_atom_extract_al16_or_exit(CPUState *cpu, uintptr_t ra,
void *pv, int s)
{
uintptr_t pi = (uintptr_t)pv;
@@ -312,7 +312,7 @@
* Provoke SIGBUS if possible otherwise.
*/
pv = (void *)(pi & ~7);
- r = load_atomic16_or_exit(env, ra, pv);
+ r = load_atomic16_or_exit(cpu, ra, pv);
r = int128_urshift(r, shr);
return int128_getlo(r);
@@ -394,7 +394,7 @@
*
* Load 2 bytes from @p, honoring the atomicity of @memop.
*/
-static uint16_t load_atom_2(CPUArchState *env, uintptr_t ra,
+static uint16_t load_atom_2(CPUState *cpu, uintptr_t ra,
void *pv, MemOp memop)
{
uintptr_t pi = (uintptr_t)pv;
@@ -410,7 +410,7 @@
}
}
- atmax = required_atomicity(env, pi, memop);
+ atmax = required_atomicity(cpu, pi, memop);
switch (atmax) {
case MO_8:
return lduw_he_p(pv);
@@ -421,9 +421,9 @@
return load_atomic4(pv - 1) >> 8;
}
if ((pi & 15) != 7) {
- return load_atom_extract_al8_or_exit(env, ra, pv, 2);
+ return load_atom_extract_al8_or_exit(cpu, ra, pv, 2);
}
- return load_atom_extract_al16_or_exit(env, ra, pv, 2);
+ return load_atom_extract_al16_or_exit(cpu, ra, pv, 2);
default:
g_assert_not_reached();
}
@@ -436,7 +436,7 @@
*
* Load 4 bytes from @p, honoring the atomicity of @memop.
*/
-static uint32_t load_atom_4(CPUArchState *env, uintptr_t ra,
+static uint32_t load_atom_4(CPUState *cpu, uintptr_t ra,
void *pv, MemOp memop)
{
uintptr_t pi = (uintptr_t)pv;
@@ -452,7 +452,7 @@
}
}
- atmax = required_atomicity(env, pi, memop);
+ atmax = required_atomicity(cpu, pi, memop);
switch (atmax) {
case MO_8:
case MO_16:
@@ -466,9 +466,9 @@
return load_atom_extract_al4x2(pv);
case MO_32:
if (!(pi & 4)) {
- return load_atom_extract_al8_or_exit(env, ra, pv, 4);
+ return load_atom_extract_al8_or_exit(cpu, ra, pv, 4);
}
- return load_atom_extract_al16_or_exit(env, ra, pv, 4);
+ return load_atom_extract_al16_or_exit(cpu, ra, pv, 4);
default:
g_assert_not_reached();
}
@@ -481,7 +481,7 @@
*
* Load 8 bytes from @p, honoring the atomicity of @memop.
*/
-static uint64_t load_atom_8(CPUArchState *env, uintptr_t ra,
+static uint64_t load_atom_8(CPUState *cpu, uintptr_t ra,
void *pv, MemOp memop)
{
uintptr_t pi = (uintptr_t)pv;
@@ -498,12 +498,12 @@
return load_atom_extract_al16_or_al8(pv, 8);
}
- atmax = required_atomicity(env, pi, memop);
+ atmax = required_atomicity(cpu, pi, memop);
if (atmax == MO_64) {
if (!HAVE_al8 && (pi & 7) == 0) {
- load_atomic8_or_exit(env, ra, pv);
+ load_atomic8_or_exit(cpu, ra, pv);
}
- return load_atom_extract_al16_or_exit(env, ra, pv, 8);
+ return load_atom_extract_al16_or_exit(cpu, ra, pv, 8);
}
if (HAVE_al8_fast) {
return load_atom_extract_al8x2(pv);
@@ -519,7 +519,7 @@
if (HAVE_al8) {
return load_atom_extract_al8x2(pv);
}
- cpu_loop_exit_atomic(env_cpu(env), ra);
+ cpu_loop_exit_atomic(cpu, ra);
default:
g_assert_not_reached();
}
@@ -532,7 +532,7 @@
*
* Load 16 bytes from @p, honoring the atomicity of @memop.
*/
-static Int128 load_atom_16(CPUArchState *env, uintptr_t ra,
+static Int128 load_atom_16(CPUState *cpu, uintptr_t ra,
void *pv, MemOp memop)
{
uintptr_t pi = (uintptr_t)pv;
@@ -548,7 +548,7 @@
return atomic16_read_ro(pv);
}
- atmax = required_atomicity(env, pi, memop);
+ atmax = required_atomicity(cpu, pi, memop);
switch (atmax) {
case MO_8:
memcpy(&r, pv, 16);
@@ -563,20 +563,20 @@
break;
case MO_64:
if (!HAVE_al8) {
- cpu_loop_exit_atomic(env_cpu(env), ra);
+ cpu_loop_exit_atomic(cpu, ra);
}
a = load_atomic8(pv);
b = load_atomic8(pv + 8);
break;
case -MO_64:
if (!HAVE_al8) {
- cpu_loop_exit_atomic(env_cpu(env), ra);
+ cpu_loop_exit_atomic(cpu, ra);
}
a = load_atom_extract_al8x2(pv);
b = load_atom_extract_al8x2(pv + 8);
break;
case MO_128:
- return load_atomic16_or_exit(env, ra, pv);
+ return load_atomic16_or_exit(cpu, ra, pv);
default:
g_assert_not_reached();
}
@@ -857,7 +857,7 @@
*
* Store 2 bytes to @p, honoring the atomicity of @memop.
*/
-static void store_atom_2(CPUArchState *env, uintptr_t ra,
+static void store_atom_2(CPUState *cpu, uintptr_t ra,
void *pv, MemOp memop, uint16_t val)
{
uintptr_t pi = (uintptr_t)pv;
@@ -868,7 +868,7 @@
return;
}
- atmax = required_atomicity(env, pi, memop);
+ atmax = required_atomicity(cpu, pi, memop);
if (atmax == MO_8) {
stw_he_p(pv, val);
return;
@@ -897,7 +897,7 @@
g_assert_not_reached();
}
- cpu_loop_exit_atomic(env_cpu(env), ra);
+ cpu_loop_exit_atomic(cpu, ra);
}
/**
@@ -908,7 +908,7 @@
*
* Store 4 bytes to @p, honoring the atomicity of @memop.
*/
-static void store_atom_4(CPUArchState *env, uintptr_t ra,
+static void store_atom_4(CPUState *cpu, uintptr_t ra,
void *pv, MemOp memop, uint32_t val)
{
uintptr_t pi = (uintptr_t)pv;
@@ -919,7 +919,7 @@
return;
}
- atmax = required_atomicity(env, pi, memop);
+ atmax = required_atomicity(cpu, pi, memop);
switch (atmax) {
case MO_8:
stl_he_p(pv, val);
@@ -961,7 +961,7 @@
return;
}
}
- cpu_loop_exit_atomic(env_cpu(env), ra);
+ cpu_loop_exit_atomic(cpu, ra);
default:
g_assert_not_reached();
}
@@ -975,7 +975,7 @@
*
* Store 8 bytes to @p, honoring the atomicity of @memop.
*/
-static void store_atom_8(CPUArchState *env, uintptr_t ra,
+static void store_atom_8(CPUState *cpu, uintptr_t ra,
void *pv, MemOp memop, uint64_t val)
{
uintptr_t pi = (uintptr_t)pv;
@@ -986,7 +986,7 @@
return;
}
- atmax = required_atomicity(env, pi, memop);
+ atmax = required_atomicity(cpu, pi, memop);
switch (atmax) {
case MO_8:
stq_he_p(pv, val);
@@ -1029,7 +1029,7 @@
default:
g_assert_not_reached();
}
- cpu_loop_exit_atomic(env_cpu(env), ra);
+ cpu_loop_exit_atomic(cpu, ra);
}
/**
@@ -1040,7 +1040,7 @@
*
* Store 16 bytes to @p, honoring the atomicity of @memop.
*/
-static void store_atom_16(CPUArchState *env, uintptr_t ra,
+static void store_atom_16(CPUState *cpu, uintptr_t ra,
void *pv, MemOp memop, Int128 val)
{
uintptr_t pi = (uintptr_t)pv;
@@ -1052,7 +1052,7 @@
return;
}
- atmax = required_atomicity(env, pi, memop);
+ atmax = required_atomicity(cpu, pi, memop);
a = HOST_BIG_ENDIAN ? int128_gethi(val) : int128_getlo(val);
b = HOST_BIG_ENDIAN ? int128_getlo(val) : int128_gethi(val);
@@ -1111,5 +1111,5 @@
default:
g_assert_not_reached();
}
- cpu_loop_exit_atomic(env_cpu(env), ra);
+ cpu_loop_exit_atomic(cpu, ra);
}
diff --git a/accel/tcg/ldst_common.c.inc b/accel/tcg/ldst_common.c.inc
index 5f8144b..4483351 100644
--- a/accel/tcg/ldst_common.c.inc
+++ b/accel/tcg/ldst_common.c.inc
@@ -8,6 +8,231 @@
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
+/*
+ * Load helpers for tcg-ldst.h
+ */
+
+tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t retaddr)
+{
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
+ return do_ld1_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
+}
+
+tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t retaddr)
+{
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
+ return do_ld2_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
+}
+
+tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t retaddr)
+{
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
+ return do_ld4_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
+}
+
+uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t retaddr)
+{
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
+ return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
+}
+
+/*
+ * Provide signed versions of the load routines as well. We can of course
+ * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
+ */
+
+tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t retaddr)
+{
+ return (int8_t)helper_ldub_mmu(env, addr, oi, retaddr);
+}
+
+tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t retaddr)
+{
+ return (int16_t)helper_lduw_mmu(env, addr, oi, retaddr);
+}
+
+tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t retaddr)
+{
+ return (int32_t)helper_ldul_mmu(env, addr, oi, retaddr);
+}
+
+Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t retaddr)
+{
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
+ return do_ld16_mmu(env_cpu(env), addr, oi, retaddr);
+}
+
+Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, uint32_t oi)
+{
+ return helper_ld16_mmu(env, addr, oi, GETPC());
+}
+
+/*
+ * Store helpers for tcg-ldst.h
+ */
+
+void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
+ MemOpIdx oi, uintptr_t ra)
+{
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
+ do_st1_mmu(env_cpu(env), addr, val, oi, ra);
+}
+
+void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
+ MemOpIdx oi, uintptr_t retaddr)
+{
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
+ do_st2_mmu(env_cpu(env), addr, val, oi, retaddr);
+}
+
+void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
+ MemOpIdx oi, uintptr_t retaddr)
+{
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
+ do_st4_mmu(env_cpu(env), addr, val, oi, retaddr);
+}
+
+void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val,
+ MemOpIdx oi, uintptr_t retaddr)
+{
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
+ do_st8_mmu(env_cpu(env), addr, val, oi, retaddr);
+}
+
+void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val,
+ MemOpIdx oi, uintptr_t retaddr)
+{
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
+ do_st16_mmu(env_cpu(env), addr, val, oi, retaddr);
+}
+
+void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
+{
+ helper_st16_mmu(env, addr, val, oi, GETPC());
+}
+
+/*
+ * Load helpers for cpu_ldst.h
+ */
+
+static void plugin_load_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
+{
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
+}
+
+uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
+{
+ uint8_t ret;
+
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_UB);
+ ret = do_ld1_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
+ plugin_load_cb(env, addr, oi);
+ return ret;
+}
+
+uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra)
+{
+ uint16_t ret;
+
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
+ ret = do_ld2_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
+ plugin_load_cb(env, addr, oi);
+ return ret;
+}
+
+uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra)
+{
+ uint32_t ret;
+
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
+ ret = do_ld4_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
+ plugin_load_cb(env, addr, oi);
+ return ret;
+}
+
+uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra)
+{
+ uint64_t ret;
+
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
+ ret = do_ld8_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
+ plugin_load_cb(env, addr, oi);
+ return ret;
+}
+
+Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra)
+{
+ Int128 ret;
+
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
+ ret = do_ld16_mmu(env_cpu(env), addr, oi, ra);
+ plugin_load_cb(env, addr, oi);
+ return ret;
+}
+
+/*
+ * Store helpers for cpu_ldst.h
+ */
+
+static void plugin_store_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
+{
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
+}
+
+void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
+ MemOpIdx oi, uintptr_t retaddr)
+{
+ helper_stb_mmu(env, addr, val, oi, retaddr);
+ plugin_store_cb(env, addr, oi);
+}
+
+void cpu_stw_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
+ MemOpIdx oi, uintptr_t retaddr)
+{
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
+ do_st2_mmu(env_cpu(env), addr, val, oi, retaddr);
+ plugin_store_cb(env, addr, oi);
+}
+
+void cpu_stl_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
+ MemOpIdx oi, uintptr_t retaddr)
+{
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
+ do_st4_mmu(env_cpu(env), addr, val, oi, retaddr);
+ plugin_store_cb(env, addr, oi);
+}
+
+void cpu_stq_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
+ MemOpIdx oi, uintptr_t retaddr)
+{
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
+ do_st8_mmu(env_cpu(env), addr, val, oi, retaddr);
+ plugin_store_cb(env, addr, oi);
+}
+
+void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
+ MemOpIdx oi, uintptr_t retaddr)
+{
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
+ do_st16_mmu(env_cpu(env), addr, val, oi, retaddr);
+ plugin_store_cb(env, addr, oi);
+}
+
+/*
+ * Wrappers of the above
+ */
uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
diff --git a/accel/tcg/meson.build b/accel/tcg/meson.build
index 8ace783..8783edd 100644
--- a/accel/tcg/meson.build
+++ b/accel/tcg/meson.build
@@ -1,7 +1,9 @@
tcg_ss = ss.source_set()
+common_ss.add(when: 'CONFIG_TCG', if_true: files(
+ 'cpu-exec-common.c',
+))
tcg_ss.add(files(
'tcg-all.c',
- 'cpu-exec-common.c',
'cpu-exec.c',
'tb-maint.c',
'tcg-runtime-gvec.c',
@@ -20,6 +22,10 @@
specific_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files(
'cputlb.c',
+))
+
+system_ss.add(when: ['CONFIG_TCG'], if_true: files(
+ 'icount-common.c',
'monitor.c',
))
diff --git a/accel/tcg/monitor.c b/accel/tcg/monitor.c
index d48de23..caf1189 100644
--- a/accel/tcg/monitor.c
+++ b/accel/tcg/monitor.c
@@ -16,7 +16,7 @@
#include "sysemu/cpu-timers.h"
#include "sysemu/tcg.h"
#include "tcg/tcg.h"
-#include "internal.h"
+#include "internal-common.h"
static void dump_drift_info(GString *buf)
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
index 5c13615..d31c999 100644
--- a/accel/tcg/plugin-gen.c
+++ b/accel/tcg/plugin-gen.c
@@ -104,7 +104,7 @@
TCGv_ptr udata = tcg_temp_ebb_new_ptr();
tcg_gen_movi_ptr(udata, 0);
- tcg_gen_ld_i32(cpu_index, cpu_env,
+ tcg_gen_ld_i32(cpu_index, tcg_env,
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
gen_helper_plugin_vcpu_udata_cb(cpu_index, udata);
@@ -138,7 +138,7 @@
tcg_gen_movi_i32(meminfo, info);
tcg_gen_movi_ptr(udata, 0);
- tcg_gen_ld_i32(cpu_index, cpu_env,
+ tcg_gen_ld_i32(cpu_index, tcg_env,
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, addr, udata);
@@ -157,7 +157,7 @@
TCGv_ptr ptr = tcg_temp_ebb_new_ptr();
tcg_gen_movi_ptr(ptr, 0);
- tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) -
+ tcg_gen_st_ptr(ptr, tcg_env, offsetof(CPUState, plugin_mem_cbs) -
offsetof(ArchCPU, env));
tcg_temp_free_ptr(ptr);
}
@@ -581,7 +581,7 @@
if (!tcg_ctx->plugin_tb->mem_helper) {
return;
}
- tcg_gen_st_ptr(tcg_constant_ptr(NULL), cpu_env,
+ tcg_gen_st_ptr(tcg_constant_ptr(NULL), tcg_env,
offsetof(CPUState, plugin_mem_cbs) - offsetof(ArchCPU, env));
}
@@ -849,7 +849,7 @@
} else {
if (ptb->vaddr2 == -1) {
ptb->vaddr2 = TARGET_PAGE_ALIGN(db->pc_first);
- get_page_addr_code_hostp(cpu->env_ptr, ptb->vaddr2, &ptb->haddr2);
+ get_page_addr_code_hostp(cpu_env(cpu), ptb->vaddr2, &ptb->haddr2);
}
pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2;
}
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
index 32ae8af..e678d20 100644
--- a/accel/tcg/tb-maint.c
+++ b/accel/tcg/tb-maint.c
@@ -29,7 +29,8 @@
#include "tcg/tcg.h"
#include "tb-hash.h"
#include "tb-context.h"
-#include "internal.h"
+#include "internal-common.h"
+#include "internal-target.h"
/* List iterators for lists of tagged pointers in TranslationBlock. */
@@ -207,13 +208,12 @@
{
PageDesc *pd;
void **lp;
- int i;
/* Level 1. Always allocated. */
lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
/* Level 2..N-1. */
- for (i = v_l2_levels; i > 0; i--) {
+ for (int i = v_l2_levels; i > 0; i--) {
void **p = qatomic_rcu_read(lp);
if (p == NULL) {
@@ -1083,7 +1083,8 @@
if (current_tb_modified) {
/* Force execution of one insn next time. */
CPUState *cpu = current_cpu;
- cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(current_cpu);
+ cpu->cflags_next_tb =
+ 1 | CF_LAST_IO | CF_NOIRQ | curr_cflags(current_cpu);
return true;
}
return false;
@@ -1153,7 +1154,8 @@
if (current_tb_modified) {
page_collection_unlock(pages);
/* Force execution of one insn next time. */
- current_cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(current_cpu);
+ current_cpu->cflags_next_tb =
+ 1 | CF_LAST_IO | CF_NOIRQ | curr_cflags(current_cpu);
mmap_unlock();
cpu_loop_exit_noexc(current_cpu);
}
diff --git a/accel/tcg/tcg-accel-ops-icount.c b/accel/tcg/tcg-accel-ops-icount.c
index 3d2cfbb..b25685f 100644
--- a/accel/tcg/tcg-accel-ops-icount.c
+++ b/accel/tcg/tcg-accel-ops-icount.c
@@ -111,14 +111,14 @@
* each vCPU execution. However u16.high can be raised
* asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
*/
- g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0);
+ g_assert(cpu->neg.icount_decr.u16.low == 0);
g_assert(cpu->icount_extra == 0);
replay_mutex_lock();
cpu->icount_budget = MIN(icount_get_limit(), cpu_budget);
insns_left = MIN(0xffff, cpu->icount_budget);
- cpu_neg(cpu)->icount_decr.u16.low = insns_left;
+ cpu->neg.icount_decr.u16.low = insns_left;
cpu->icount_extra = cpu->icount_budget - insns_left;
if (cpu->icount_budget == 0) {
@@ -138,7 +138,7 @@
icount_update(cpu);
/* Reset the counters */
- cpu_neg(cpu)->icount_decr.u16.low = 0;
+ cpu->neg.icount_decr.u16.low = 0;
cpu->icount_extra = 0;
cpu->icount_budget = 0;
@@ -153,7 +153,7 @@
tcg_handle_interrupt(cpu, mask);
if (qemu_cpu_is_self(cpu) &&
- !cpu->can_do_io
+ !cpu->neg.can_do_io
&& (mask & ~old_mask) != 0) {
cpu_abort(cpu, "Raised interrupt while not in I/O function");
}
diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c
index 4b0dfb4..fac8009 100644
--- a/accel/tcg/tcg-accel-ops-mttcg.c
+++ b/accel/tcg/tcg-accel-ops-mttcg.c
@@ -32,7 +32,7 @@
#include "qemu/guest-random.h"
#include "exec/exec-all.h"
#include "hw/boards.h"
-#include "tcg/tcg.h"
+#include "tcg/startup.h"
#include "tcg-accel-ops.h"
#include "tcg-accel-ops-mttcg.h"
@@ -80,7 +80,7 @@
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
- cpu->can_do_io = 1;
+ cpu->neg.can_do_io = true;
current_cpu = cpu;
cpu_thread_signal_created(cpu);
qemu_guest_random_seed_thread_part2(cpu->random_seed);
diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c
index 2d52328..611932f 100644
--- a/accel/tcg/tcg-accel-ops-rr.c
+++ b/accel/tcg/tcg-accel-ops-rr.c
@@ -32,7 +32,7 @@
#include "qemu/notify.h"
#include "qemu/guest-random.h"
#include "exec/exec-all.h"
-#include "tcg/tcg.h"
+#include "tcg/startup.h"
#include "tcg-accel-ops.h"
#include "tcg-accel-ops-rr.h"
#include "tcg-accel-ops-icount.h"
@@ -192,7 +192,7 @@
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
- cpu->can_do_io = 1;
+ cpu->neg.can_do_io = true;
cpu_thread_signal_created(cpu);
qemu_guest_random_seed_thread_part2(cpu->random_seed);
@@ -334,7 +334,7 @@
cpu->thread = single_tcg_cpu_thread;
cpu->halt_cond = single_tcg_halt_cond;
cpu->thread_id = first_cpu->thread_id;
- cpu->can_do_io = 1;
+ cpu->neg.can_do_io = 1;
cpu->created = true;
}
}
diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c
index 3973591..d885cc1 100644
--- a/accel/tcg/tcg-accel-ops.c
+++ b/accel/tcg/tcg-accel-ops.c
@@ -91,7 +91,7 @@
if (!qemu_cpu_is_self(cpu)) {
qemu_cpu_kick(cpu);
} else {
- qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
+ qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
}
}
diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c
index 03dfd67..c6619f5 100644
--- a/accel/tcg/tcg-all.c
+++ b/accel/tcg/tcg-all.c
@@ -27,7 +27,7 @@
#include "sysemu/tcg.h"
#include "exec/replay-core.h"
#include "sysemu/cpu-timers.h"
-#include "tcg/tcg.h"
+#include "tcg/startup.h"
#include "tcg/oversized-guest.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
@@ -38,7 +38,7 @@
#if !defined(CONFIG_USER_ONLY)
#include "hw/boards.h"
#endif
-#include "internal.h"
+#include "internal-target.h"
struct TCGState {
AccelState parent_obj;
@@ -121,7 +121,7 @@
* There's no guest base to take into account, so go ahead and
* initialize the prologue now.
*/
- tcg_prologue_init(tcg_ctx);
+ tcg_prologue_init();
#endif
return 0;
@@ -227,6 +227,8 @@
AccelClass *ac = ACCEL_CLASS(oc);
ac->name = "tcg";
ac->init_machine = tcg_init_machine;
+ ac->cpu_common_realize = tcg_exec_realizefn;
+ ac->cpu_common_unrealize = tcg_exec_unrealizefn;
ac->allowed = &tcg_allowed;
ac->gdbstub_supported_sstep_flags = tcg_gdbstub_supported_sstep_flags;
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index b2d4e22..8cb6ad3 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -61,7 +61,8 @@
#include "tb-jmp-cache.h"
#include "tb-hash.h"
#include "tb-context.h"
-#include "internal.h"
+#include "internal-common.h"
+#include "internal-target.h"
#include "perf.h"
#include "tcg/insn-start-words.h"
@@ -214,7 +215,7 @@
* Reset the cycle counter to the start of the block and
* shift if to the number of actually executed instructions.
*/
- cpu_neg(cpu)->icount_decr.u16.low += insns_left;
+ cpu->neg.icount_decr.u16.low += insns_left;
}
cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data);
@@ -288,7 +289,7 @@
vaddr pc, uint64_t cs_base,
uint32_t flags, int cflags)
{
- CPUArchState *env = cpu->env_ptr;
+ CPUArchState *env = cpu_env(cpu);
TranslationBlock *tb, *existing_tb;
tb_page_addr_t phys_pc, phys_p2;
tcg_insn_unit *gen_code_buf;
@@ -344,8 +345,6 @@
tcg_ctx->page_bits = TARGET_PAGE_BITS;
tcg_ctx->page_mask = TARGET_PAGE_MASK;
tcg_ctx->tlb_dyn_max_bits = CPU_TLB_DYN_MAX_BITS;
- tcg_ctx->tlb_fast_offset =
- (int)offsetof(ArchCPU, neg.tlb.f) - (int)offsetof(ArchCPU, env);
#endif
tcg_ctx->insn_start_words = TARGET_INSN_START_WORDS;
#ifdef TCG_GUEST_DEFAULT_MO
@@ -580,7 +579,7 @@
} else {
/* The exception probably happened in a helper. The CPU state should
have been saved before calling it. Fetch the PC from there. */
- CPUArchState *env = cpu->env_ptr;
+ CPUArchState *env = cpu_env(cpu);
vaddr pc;
uint64_t cs_base;
tb_page_addr_t addr;
@@ -623,7 +622,7 @@
cc = CPU_GET_CLASS(cpu);
if (cc->tcg_ops->io_recompile_replay_branch &&
cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) {
- cpu_neg(cpu)->icount_decr.u16.low++;
+ cpu->neg.icount_decr.u16.low++;
n = 2;
}
@@ -779,7 +778,7 @@
{
g_assert(qemu_mutex_iothread_locked());
cpu->interrupt_request |= mask;
- qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
+ qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
}
#endif /* CONFIG_USER_ONLY */
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
index 1a6a544..e7abcd8 100644
--- a/accel/tcg/translator.c
+++ b/accel/tcg/translator.c
@@ -14,28 +14,23 @@
#include "exec/translator.h"
#include "exec/plugin-gen.h"
#include "tcg/tcg-op-common.h"
-#include "internal.h"
+#include "internal-target.h"
-static void gen_io_start(void)
+static void set_can_do_io(DisasContextBase *db, bool val)
{
- tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
- offsetof(ArchCPU, parent_obj.can_do_io) -
- offsetof(ArchCPU, env));
+ if (db->saved_can_do_io != val) {
+ db->saved_can_do_io = val;
+
+ QEMU_BUILD_BUG_ON(sizeof_field(CPUState, neg.can_do_io) != 1);
+ tcg_gen_st8_i32(tcg_constant_i32(val), tcg_env,
+ offsetof(ArchCPU, parent_obj.neg.can_do_io) -
+ offsetof(ArchCPU, env));
+ }
}
bool translator_io_start(DisasContextBase *db)
{
- uint32_t cflags = tb_cflags(db->tb);
-
- if (!(cflags & CF_USE_ICOUNT)) {
- return false;
- }
- if (db->num_insns == db->max_insns && (cflags & CF_LAST_IO)) {
- /* Already started in translator_loop. */
- return true;
- }
-
- gen_io_start();
+ set_can_do_io(db, true);
/*
* Ensure that this instruction will be the last in the TB.
@@ -47,14 +42,17 @@
return true;
}
-static TCGOp *gen_tb_start(uint32_t cflags)
+static TCGOp *gen_tb_start(DisasContextBase *db, uint32_t cflags)
{
- TCGv_i32 count = tcg_temp_new_i32();
+ TCGv_i32 count = NULL;
TCGOp *icount_start_insn = NULL;
- tcg_gen_ld_i32(count, cpu_env,
- offsetof(ArchCPU, neg.icount_decr.u32) -
- offsetof(ArchCPU, env));
+ if ((cflags & CF_USE_ICOUNT) || !(cflags & CF_NOIRQ)) {
+ count = tcg_temp_new_i32();
+ tcg_gen_ld_i32(count, tcg_env,
+ offsetof(ArchCPU, parent_obj.neg.icount_decr.u32)
+ - offsetof(ArchCPU, env));
+ }
if (cflags & CF_USE_ICOUNT) {
/*
@@ -81,21 +79,18 @@
}
if (cflags & CF_USE_ICOUNT) {
- tcg_gen_st16_i32(count, cpu_env,
- offsetof(ArchCPU, neg.icount_decr.u16.low) -
- offsetof(ArchCPU, env));
- /*
- * cpu->can_do_io is cleared automatically here at the beginning of
- * each translation block. The cost is minimal and only paid for
- * -icount, plus it would be very easy to forget doing it in the
- * translator. Doing it here means we don't need a gen_io_end() to
- * go with gen_io_start().
- */
- tcg_gen_st_i32(tcg_constant_i32(0), cpu_env,
- offsetof(ArchCPU, parent_obj.can_do_io) -
- offsetof(ArchCPU, env));
+ tcg_gen_st16_i32(count, tcg_env,
+ offsetof(ArchCPU, parent_obj.neg.icount_decr.u16.low)
+ - offsetof(ArchCPU, env));
}
+ /*
+ * cpu->neg.can_do_io is set automatically here at the beginning of
+ * each translation block. The cost is minimal, plus it would be
+ * very easy to forget doing it in the translator.
+ */
+ set_can_do_io(db, db->max_insns == 1 && (cflags & CF_LAST_IO));
+
return icount_start_insn;
}
@@ -144,6 +139,7 @@
db->num_insns = 0;
db->max_insns = *max_insns;
db->singlestep_enabled = cflags & CF_SINGLE_STEP;
+ db->saved_can_do_io = -1;
db->host_addr[0] = host_pc;
db->host_addr[1] = NULL;
@@ -151,11 +147,17 @@
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
/* Start translating. */
- icount_start_insn = gen_tb_start(cflags);
+ icount_start_insn = gen_tb_start(db, cflags);
ops->tb_start(db, cpu);
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
- plugin_enabled = plugin_gen_tb_start(cpu, db, cflags & CF_MEMI_ONLY);
+ if (cflags & CF_MEMI_ONLY) {
+ /* We should only see CF_MEMI_ONLY for io_recompile. */
+ assert(cflags & CF_LAST_IO);
+ plugin_enabled = plugin_gen_tb_start(cpu, db, true);
+ } else {
+ plugin_enabled = plugin_gen_tb_start(cpu, db, false);
+ }
while (true) {
*max_insns = ++db->num_insns;
@@ -172,13 +174,9 @@
the next instruction. */
if (db->num_insns == db->max_insns && (cflags & CF_LAST_IO)) {
/* Accept I/O on the last instruction. */
- gen_io_start();
- ops->translate_insn(db, cpu);
- } else {
- /* we should only see CF_MEMI_ONLY for io_recompile */
- tcg_debug_assert(!(cflags & CF_MEMI_ONLY));
- ops->translate_insn(db, cpu);
+ set_can_do_io(db, true);
}
+ ops->translate_insn(db, cpu);
/*
* We can't instrument after instructions that change control
diff --git a/accel/tcg/user-exec-stub.c b/accel/tcg/user-exec-stub.c
index 874e1f1..2dc6fd9 100644
--- a/accel/tcg/user-exec-stub.c
+++ b/accel/tcg/user-exec-stub.c
@@ -2,8 +2,6 @@
#include "hw/core/cpu.h"
#include "exec/replay-core.h"
-bool enable_cpu_pm = false;
-
void cpu_resume(CPUState *cpu)
{
}
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
index ab48cb4..5bf2761 100644
--- a/accel/tcg/user-exec.c
+++ b/accel/tcg/user-exec.c
@@ -29,7 +29,8 @@
#include "qemu/atomic128.h"
#include "trace/trace-root.h"
#include "tcg/tcg-ldst.h"
-#include "internal.h"
+#include "internal-common.h"
+#include "internal-target.h"
__thread uintptr_t helper_retaddr;
@@ -941,7 +942,7 @@
/* The softmmu versions of these helpers are in cputlb.c. */
-static void *cpu_mmu_lookup(CPUArchState *env, vaddr addr,
+static void *cpu_mmu_lookup(CPUState *cpu, vaddr addr,
MemOp mop, uintptr_t ra, MMUAccessType type)
{
int a_bits = get_alignment_bits(mop);
@@ -949,60 +950,39 @@
/* Enforce guest required alignment. */
if (unlikely(addr & ((1 << a_bits) - 1))) {
- cpu_loop_exit_sigbus(env_cpu(env), addr, type, ra);
+ cpu_loop_exit_sigbus(cpu, addr, type, ra);
}
- ret = g2h(env_cpu(env), addr);
+ ret = g2h(cpu, addr);
set_helper_retaddr(ra);
return ret;
}
#include "ldst_atomicity.c.inc"
-static uint8_t do_ld1_mmu(CPUArchState *env, abi_ptr addr,
- MemOp mop, uintptr_t ra)
+static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
+ uintptr_t ra, MMUAccessType access_type)
{
void *haddr;
uint8_t ret;
- tcg_debug_assert((mop & MO_SIZE) == MO_8);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
- haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
+ haddr = cpu_mmu_lookup(cpu, addr, get_memop(oi), ra, access_type);
ret = ldub_p(haddr);
clear_helper_retaddr();
return ret;
}
-tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t ra)
-{
- return do_ld1_mmu(env, addr, get_memop(oi), ra);
-}
-
-tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t ra)
-{
- return (int8_t)do_ld1_mmu(env, addr, get_memop(oi), ra);
-}
-
-uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
-{
- uint8_t ret = do_ld1_mmu(env, addr, get_memop(oi), ra);
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
- return ret;
-}
-
-static uint16_t do_ld2_mmu(CPUArchState *env, abi_ptr addr,
- MemOp mop, uintptr_t ra)
+static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
+ uintptr_t ra, MMUAccessType access_type)
{
void *haddr;
uint16_t ret;
+ MemOp mop = get_memop(oi);
- tcg_debug_assert((mop & MO_SIZE) == MO_16);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
- haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
- ret = load_atom_2(env, ra, haddr, mop);
+ haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
+ ret = load_atom_2(cpu, ra, haddr, mop);
clear_helper_retaddr();
if (mop & MO_BSWAP) {
@@ -1011,36 +991,16 @@
return ret;
}
-tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t ra)
-{
- return do_ld2_mmu(env, addr, get_memop(oi), ra);
-}
-
-tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t ra)
-{
- return (int16_t)do_ld2_mmu(env, addr, get_memop(oi), ra);
-}
-
-uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
-{
- uint16_t ret = do_ld2_mmu(env, addr, get_memop(oi), ra);
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
- return ret;
-}
-
-static uint32_t do_ld4_mmu(CPUArchState *env, abi_ptr addr,
- MemOp mop, uintptr_t ra)
+static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
+ uintptr_t ra, MMUAccessType access_type)
{
void *haddr;
uint32_t ret;
+ MemOp mop = get_memop(oi);
- tcg_debug_assert((mop & MO_SIZE) == MO_32);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
- haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
- ret = load_atom_4(env, ra, haddr, mop);
+ haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
+ ret = load_atom_4(cpu, ra, haddr, mop);
clear_helper_retaddr();
if (mop & MO_BSWAP) {
@@ -1049,36 +1009,16 @@
return ret;
}
-tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t ra)
-{
- return do_ld4_mmu(env, addr, get_memop(oi), ra);
-}
-
-tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t ra)
-{
- return (int32_t)do_ld4_mmu(env, addr, get_memop(oi), ra);
-}
-
-uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
-{
- uint32_t ret = do_ld4_mmu(env, addr, get_memop(oi), ra);
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
- return ret;
-}
-
-static uint64_t do_ld8_mmu(CPUArchState *env, abi_ptr addr,
- MemOp mop, uintptr_t ra)
+static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
+ uintptr_t ra, MMUAccessType access_type)
{
void *haddr;
uint64_t ret;
+ MemOp mop = get_memop(oi);
- tcg_debug_assert((mop & MO_SIZE) == MO_64);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
- haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
- ret = load_atom_8(env, ra, haddr, mop);
+ haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
+ ret = load_atom_8(cpu, ra, haddr, mop);
clear_helper_retaddr();
if (mop & MO_BSWAP) {
@@ -1087,30 +1027,17 @@
return ret;
}
-uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t ra)
-{
- return do_ld8_mmu(env, addr, get_memop(oi), ra);
-}
-
-uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
-{
- uint64_t ret = do_ld8_mmu(env, addr, get_memop(oi), ra);
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
- return ret;
-}
-
-static Int128 do_ld16_mmu(CPUArchState *env, abi_ptr addr,
- MemOp mop, uintptr_t ra)
+static Int128 do_ld16_mmu(CPUState *cpu, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra)
{
void *haddr;
Int128 ret;
+ MemOp mop = get_memop(oi);
tcg_debug_assert((mop & MO_SIZE) == MO_128);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
- haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
- ret = load_atom_16(env, ra, haddr, mop);
+ haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_LOAD);
+ ret = load_atom_16(cpu, ra, haddr, mop);
clear_helper_retaddr();
if (mop & MO_BSWAP) {
@@ -1119,171 +1046,81 @@
return ret;
}
-Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr,
+static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val,
MemOpIdx oi, uintptr_t ra)
{
- return do_ld16_mmu(env, addr, get_memop(oi), ra);
-}
-
-Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, MemOpIdx oi)
-{
- return helper_ld16_mmu(env, addr, oi, GETPC());
-}
-
-Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
-{
- Int128 ret = do_ld16_mmu(env, addr, get_memop(oi), ra);
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
- return ret;
-}
-
-static void do_st1_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
- MemOp mop, uintptr_t ra)
-{
void *haddr;
- tcg_debug_assert((mop & MO_SIZE) == MO_8);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
- haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
+ haddr = cpu_mmu_lookup(cpu, addr, get_memop(oi), ra, MMU_DATA_STORE);
stb_p(haddr, val);
clear_helper_retaddr();
}
-void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
- MemOpIdx oi, uintptr_t ra)
-{
- do_st1_mmu(env, addr, val, get_memop(oi), ra);
-}
-
-void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
- MemOpIdx oi, uintptr_t ra)
-{
- do_st1_mmu(env, addr, val, get_memop(oi), ra);
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
-}
-
-static void do_st2_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
- MemOp mop, uintptr_t ra)
+static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val,
+ MemOpIdx oi, uintptr_t ra)
{
void *haddr;
+ MemOp mop = get_memop(oi);
- tcg_debug_assert((mop & MO_SIZE) == MO_16);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
- haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
+ haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) {
val = bswap16(val);
}
- store_atom_2(env, ra, haddr, mop, val);
+ store_atom_2(cpu, ra, haddr, mop, val);
clear_helper_retaddr();
}
-void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
- MemOpIdx oi, uintptr_t ra)
-{
- do_st2_mmu(env, addr, val, get_memop(oi), ra);
-}
-
-void cpu_stw_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
- MemOpIdx oi, uintptr_t ra)
-{
- do_st2_mmu(env, addr, val, get_memop(oi), ra);
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
-}
-
-static void do_st4_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
- MemOp mop, uintptr_t ra)
+static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val,
+ MemOpIdx oi, uintptr_t ra)
{
void *haddr;
+ MemOp mop = get_memop(oi);
- tcg_debug_assert((mop & MO_SIZE) == MO_32);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
- haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
+ haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) {
val = bswap32(val);
}
- store_atom_4(env, ra, haddr, mop, val);
+ store_atom_4(cpu, ra, haddr, mop, val);
clear_helper_retaddr();
}
-void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
- MemOpIdx oi, uintptr_t ra)
-{
- do_st4_mmu(env, addr, val, get_memop(oi), ra);
-}
-
-void cpu_stl_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
- MemOpIdx oi, uintptr_t ra)
-{
- do_st4_mmu(env, addr, val, get_memop(oi), ra);
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
-}
-
-static void do_st8_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
- MemOp mop, uintptr_t ra)
+static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val,
+ MemOpIdx oi, uintptr_t ra)
{
void *haddr;
+ MemOp mop = get_memop(oi);
- tcg_debug_assert((mop & MO_SIZE) == MO_64);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
- haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
+ haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) {
val = bswap64(val);
}
- store_atom_8(env, ra, haddr, mop, val);
+ store_atom_8(cpu, ra, haddr, mop, val);
clear_helper_retaddr();
}
-void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val,
- MemOpIdx oi, uintptr_t ra)
-{
- do_st8_mmu(env, addr, val, get_memop(oi), ra);
-}
-
-void cpu_stq_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
- MemOpIdx oi, uintptr_t ra)
-{
- do_st8_mmu(env, addr, val, get_memop(oi), ra);
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
-}
-
-static void do_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
- MemOp mop, uintptr_t ra)
+static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val,
+ MemOpIdx oi, uintptr_t ra)
{
void *haddr;
+ MemOpIdx mop = get_memop(oi);
- tcg_debug_assert((mop & MO_SIZE) == MO_128);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
- haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
+ haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) {
val = bswap128(val);
}
- store_atom_16(env, ra, haddr, mop, val);
+ store_atom_16(cpu, ra, haddr, mop, val);
clear_helper_retaddr();
}
-void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val,
- MemOpIdx oi, uintptr_t ra)
-{
- do_st16_mmu(env, addr, val, get_memop(oi), ra);
-}
-
-void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
-{
- helper_st16_mmu(env, addr, val, oi, GETPC());
-}
-
-void cpu_st16_mmu(CPUArchState *env, abi_ptr addr,
- Int128 val, MemOpIdx oi, uintptr_t ra)
-{
- do_st16_mmu(env, addr, val, get_memop(oi), ra);
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
-}
-
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
{
uint32_t ret;
@@ -1330,7 +1167,7 @@
void *haddr;
uint8_t ret;
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH);
+ haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
ret = ldub_p(haddr);
clear_helper_retaddr();
return ret;
@@ -1342,7 +1179,7 @@
void *haddr;
uint16_t ret;
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH);
+ haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
ret = lduw_p(haddr);
clear_helper_retaddr();
if (get_memop(oi) & MO_BSWAP) {
@@ -1357,7 +1194,7 @@
void *haddr;
uint32_t ret;
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH);
+ haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
ret = ldl_p(haddr);
clear_helper_retaddr();
if (get_memop(oi) & MO_BSWAP) {
@@ -1372,7 +1209,7 @@
void *haddr;
uint64_t ret;
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
+ haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
ret = ldq_p(haddr);
clear_helper_retaddr();
if (get_memop(oi) & MO_BSWAP) {
@@ -1386,7 +1223,7 @@
/*
* Do not allow unaligned operations to proceed. Return the host address.
*/
-static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
+static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
int size, uintptr_t retaddr)
{
MemOp mop = get_memop(oi);
@@ -1395,15 +1232,15 @@
/* Enforce guest required alignment. */
if (unlikely(addr & ((1 << a_bits) - 1))) {
- cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, retaddr);
+ cpu_loop_exit_sigbus(cpu, addr, MMU_DATA_STORE, retaddr);
}
/* Enforce qemu required alignment. */
if (unlikely(addr & (size - 1))) {
- cpu_loop_exit_atomic(env_cpu(env), retaddr);
+ cpu_loop_exit_atomic(cpu, retaddr);
}
- ret = g2h(env_cpu(env), addr);
+ ret = g2h(cpu, addr);
set_helper_retaddr(retaddr);
return ret;
}
diff --git a/audio/alsaaudio.c b/audio/alsaaudio.c
index 057571d..cacae1e 100644
--- a/audio/alsaaudio.c
+++ b/audio/alsaaudio.c
@@ -904,7 +904,7 @@
}
}
-static void *alsa_audio_init(Audiodev *dev)
+static void *alsa_audio_init(Audiodev *dev, Error **errp)
{
AudiodevAlsaOptions *aopts;
assert(dev->driver == AUDIODEV_DRIVER_ALSA);
@@ -960,7 +960,6 @@
.init = alsa_audio_init,
.fini = alsa_audio_fini,
.pcm_ops = &alsa_pcm_ops,
- .can_be_default = 1,
.max_voices_out = INT_MAX,
.max_voices_in = INT_MAX,
.voice_size_out = sizeof (ALSAVoiceOut),
diff --git a/audio/audio-hmp-cmds.c b/audio/audio-hmp-cmds.c
index 1237ce9..c9608b7 100644
--- a/audio/audio-hmp-cmds.c
+++ b/audio/audio-hmp-cmds.c
@@ -26,6 +26,7 @@
#include "audio/audio.h"
#include "monitor/hmp.h"
#include "monitor/monitor.h"
+#include "qapi/error.h"
#include "qapi/qmp/qdict.h"
static QLIST_HEAD (capture_list_head, CaptureState) capture_head;
@@ -65,10 +66,11 @@
int nchannels = qdict_get_try_int(qdict, "nchannels", 2);
const char *audiodev = qdict_get_str(qdict, "audiodev");
CaptureState *s;
- AudioState *as = audio_state_by_name(audiodev);
+ Error *local_err = NULL;
+ AudioState *as = audio_state_by_name(audiodev, &local_err);
if (!as) {
- monitor_printf(mon, "Audiodev '%s' not found\n", audiodev);
+ error_report_err(local_err);
return;
}
diff --git a/audio/audio.c b/audio/audio.c
index 90c7c49..730bf24 100644
--- a/audio/audio.c
+++ b/audio/audio.c
@@ -32,7 +32,9 @@
#include "qapi/qobject-input-visitor.h"
#include "qapi/qapi-visit-audio.h"
#include "qapi/qapi-commands-audio.h"
+#include "qapi/qmp/qdict.h"
#include "qemu/cutils.h"
+#include "qemu/error-report.h"
#include "qemu/log.h"
#include "qemu/module.h"
#include "qemu/help_option.h"
@@ -61,19 +63,22 @@
"spice",
CONFIG_AUDIO_DRIVERS
"none",
- "wav",
NULL
};
static QLIST_HEAD(, audio_driver) audio_drivers;
-static AudiodevListHead audiodevs = QSIMPLEQ_HEAD_INITIALIZER(audiodevs);
+static AudiodevListHead audiodevs =
+ QSIMPLEQ_HEAD_INITIALIZER(audiodevs);
+static AudiodevListHead default_audiodevs =
+ QSIMPLEQ_HEAD_INITIALIZER(default_audiodevs);
+
void audio_driver_register(audio_driver *drv)
{
QLIST_INSERT_HEAD(&audio_drivers, drv, next);
}
-audio_driver *audio_driver_lookup(const char *name)
+static audio_driver *audio_driver_lookup(const char *name)
{
struct audio_driver *d;
Error *local_err = NULL;
@@ -111,8 +116,6 @@
#endif
};
-static bool legacy_config = true;
-
int audio_bug (const char *funcname, int cond)
{
if (cond) {
@@ -1553,9 +1556,11 @@
}
static int audio_driver_init(AudioState *s, struct audio_driver *drv,
- bool msg, Audiodev *dev)
+ Audiodev *dev, Error **errp)
{
- s->drv_opaque = drv->init(dev);
+ Error *local_err = NULL;
+
+ s->drv_opaque = drv->init(dev, &local_err);
if (s->drv_opaque) {
if (!drv->pcm_ops->get_buffer_in) {
@@ -1567,13 +1572,15 @@
drv->pcm_ops->put_buffer_out = audio_generic_put_buffer_out;
}
- audio_init_nb_voices_out(s, drv);
- audio_init_nb_voices_in(s, drv);
+ audio_init_nb_voices_out(s, drv, 1);
+ audio_init_nb_voices_in(s, drv, 0);
s->drv = drv;
return 0;
} else {
- if (msg) {
- dolog("Could not init `%s' audio driver\n", drv->name);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ } else {
+ error_setg(errp, "Could not init `%s' audio driver", drv->name);
}
return -1;
}
@@ -1681,17 +1688,45 @@
static void audio_validate_opts(Audiodev *dev, Error **errp);
-static AudiodevListEntry *audiodev_find(
- AudiodevListHead *head, const char *drvname)
+static void audio_create_default_audiodevs(void)
{
- AudiodevListEntry *e;
- QSIMPLEQ_FOREACH(e, head, next) {
- if (strcmp(AudiodevDriver_str(e->dev->driver), drvname) == 0) {
- return e;
- }
+ const char *drvname = getenv("QEMU_AUDIO_DRV");
+
+ if (!defaults_enabled()) {
+ return;
}
- return NULL;
+ /* QEMU_AUDIO_DRV=none is used by libqtest. */
+ if (drvname && !g_str_equal(drvname, "none")) {
+ error_report("Please use -audiodev instead of QEMU_AUDIO_*");
+ exit(1);
+ }
+
+ for (int i = 0; audio_prio_list[i]; i++) {
+ if (drvname && !g_str_equal(drvname, audio_prio_list[i])) {
+ continue;
+ }
+
+ if (audio_driver_lookup(audio_prio_list[i])) {
+ QDict *dict = qdict_new();
+ Audiodev *dev = NULL;
+ AudiodevListEntry *e;
+ Visitor *v;
+
+ qdict_put_str(dict, "driver", audio_prio_list[i]);
+ qdict_put_str(dict, "id", "#default");
+
+ v = qobject_input_visitor_new_keyval(QOBJECT(dict));
+ qobject_unref(dict);
+ visit_type_Audiodev(v, NULL, &dev, &error_fatal);
+ visit_free(v);
+
+ audio_validate_opts(dev, &error_abort);
+ e = g_new0(AudiodevListEntry, 1);
+ e->dev = dev;
+ QSIMPLEQ_INSERT_TAIL(&default_audiodevs, e, next);
+ }
+ }
}
/*
@@ -1700,62 +1735,16 @@
* if dev == NULL => legacy implicit initialization, return the already created
* state or create a new one
*/
-static AudioState *audio_init(Audiodev *dev, const char *name)
+static AudioState *audio_init(Audiodev *dev, Error **errp)
{
static bool atexit_registered;
- size_t i;
int done = 0;
- const char *drvname = NULL;
- VMChangeStateEntry *e;
+ const char *drvname;
+ VMChangeStateEntry *vmse;
AudioState *s;
struct audio_driver *driver;
- /* silence gcc warning about uninitialized variable */
- AudiodevListHead head = QSIMPLEQ_HEAD_INITIALIZER(head);
-
- if (using_spice) {
- /*
- * When using spice allow the spice audio driver being picked
- * as default.
- *
- * Temporary hack. Using audio devices without explicit
- * audiodev= property is already deprecated. Same goes for
- * the -soundhw switch. Once this support gets finally
- * removed we can also drop the concept of a default audio
- * backend and this can go away.
- */
- driver = audio_driver_lookup("spice");
- if (driver) {
- driver->can_be_default = 1;
- }
- }
-
- if (dev) {
- /* -audiodev option */
- legacy_config = false;
- drvname = AudiodevDriver_str(dev->driver);
- } else if (!QTAILQ_EMPTY(&audio_states)) {
- if (!legacy_config) {
- dolog("Device %s: audiodev default parameter is deprecated, please "
- "specify audiodev=%s\n", name,
- QTAILQ_FIRST(&audio_states)->dev->id);
- }
- return QTAILQ_FIRST(&audio_states);
- } else {
- /* legacy implicit initialization */
- head = audio_handle_legacy_opts();
- /*
- * In case of legacy initialization, all Audiodevs in the list will have
- * the same configuration (except the driver), so it doesn't matter which
- * one we chose. We need an Audiodev to set up AudioState before we can
- * init a driver. Also note that dev at this point is still in the
- * list.
- */
- dev = QSIMPLEQ_FIRST(&head)->dev;
- audio_validate_opts(dev, &error_abort);
- }
s = g_new0(AudioState, 1);
- s->dev = dev;
QLIST_INIT (&s->hw_head_out);
QLIST_INIT (&s->hw_head_in);
@@ -1767,56 +1756,35 @@
s->ts = timer_new_ns(QEMU_CLOCK_VIRTUAL, audio_timer, s);
- s->nb_hw_voices_out = audio_get_pdo_out(dev)->voices;
- s->nb_hw_voices_in = audio_get_pdo_in(dev)->voices;
-
- if (s->nb_hw_voices_out < 1) {
- dolog ("Bogus number of playback voices %d, setting to 1\n",
- s->nb_hw_voices_out);
- s->nb_hw_voices_out = 1;
- }
-
- if (s->nb_hw_voices_in < 0) {
- dolog ("Bogus number of capture voices %d, setting to 0\n",
- s->nb_hw_voices_in);
- s->nb_hw_voices_in = 0;
- }
-
- if (drvname) {
+ if (dev) {
+ /* -audiodev option */
+ s->dev = dev;
+ drvname = AudiodevDriver_str(dev->driver);
driver = audio_driver_lookup(drvname);
if (driver) {
- done = !audio_driver_init(s, driver, true, dev);
+ done = !audio_driver_init(s, driver, dev, errp);
} else {
- dolog ("Unknown audio driver `%s'\n", drvname);
+ error_setg(errp, "Unknown audio driver `%s'\n", drvname);
}
if (!done) {
- free_audio_state(s);
- return NULL;
+ goto out;
}
} else {
- for (i = 0; audio_prio_list[i]; i++) {
- AudiodevListEntry *e = audiodev_find(&head, audio_prio_list[i]);
- driver = audio_driver_lookup(audio_prio_list[i]);
-
- if (e && driver) {
- s->dev = dev = e->dev;
- audio_validate_opts(dev, &error_abort);
- done = !audio_driver_init(s, driver, false, dev);
- if (done) {
- e->dev = NULL;
- break;
- }
+ for (;;) {
+ AudiodevListEntry *e = QSIMPLEQ_FIRST(&default_audiodevs);
+ if (!e) {
+ error_setg(errp, "no default audio driver available");
+ goto out;
}
+ s->dev = dev = e->dev;
+ drvname = AudiodevDriver_str(dev->driver);
+ driver = audio_driver_lookup(drvname);
+ if (!audio_driver_init(s, driver, dev, NULL)) {
+ break;
+ }
+ QSIMPLEQ_REMOVE_HEAD(&default_audiodevs, next);
}
}
- audio_free_audiodev_list(&head);
-
- if (!done) {
- driver = audio_driver_lookup("none");
- done = !audio_driver_init(s, driver, false, dev);
- assert(done);
- dolog("warning: Using timer based audio emulation\n");
- }
if (dev->timer_period <= 0) {
s->period_ticks = 1;
@@ -1824,8 +1792,8 @@
s->period_ticks = dev->timer_period * (int64_t)SCALE_US;
}
- e = qemu_add_vm_change_state_handler (audio_vm_change_state_handler, s);
- if (!e) {
+ vmse = qemu_add_vm_change_state_handler (audio_vm_change_state_handler, s);
+ if (!vmse) {
dolog ("warning: Could not register change state handler\n"
"(Audio can continue looping even after stopping the VM)\n");
}
@@ -1834,27 +1802,50 @@
QLIST_INIT (&s->card_head);
vmstate_register (NULL, 0, &vmstate_audio, s);
return s;
+
+out:
+ free_audio_state(s);
+ return NULL;
}
-void audio_free_audiodev_list(AudiodevListHead *head)
-{
- AudiodevListEntry *e;
- while ((e = QSIMPLEQ_FIRST(head))) {
- QSIMPLEQ_REMOVE_HEAD(head, next);
- qapi_free_Audiodev(e->dev);
- g_free(e);
- }
-}
-
-void AUD_register_card (const char *name, QEMUSoundCard *card)
+bool AUD_register_card (const char *name, QEMUSoundCard *card, Error **errp)
{
if (!card->state) {
- card->state = audio_init(NULL, name);
+ if (!QTAILQ_EMPTY(&audio_states)) {
+ /*
+ * FIXME: once it is possible to create an arbitrary
+ * default device via -audio DRIVER,OPT=VALUE (no "model"),
+ * replace this special case with the default AudioState*,
+ * storing it in a separate global. For now, keep the
+ * warning to encourage moving off magic use of the first
+ * -audiodev.
+ */
+ if (QSIMPLEQ_EMPTY(&default_audiodevs)) {
+ dolog("Device %s: audiodev default parameter is deprecated, please "
+ "specify audiodev=%s\n", name,
+ QTAILQ_FIRST(&audio_states)->dev->id);
+ }
+ card->state = QTAILQ_FIRST(&audio_states);
+ } else {
+ if (QSIMPLEQ_EMPTY(&default_audiodevs)) {
+ audio_create_default_audiodevs();
+ }
+ card->state = audio_init(NULL, errp);
+ if (!card->state) {
+ if (!QSIMPLEQ_EMPTY(&audiodevs)) {
+ error_append_hint(errp, "Perhaps you wanted to set audiodev=%s?",
+ QSIMPLEQ_FIRST(&audiodevs)->dev->id);
+ }
+ return false;
+ }
+ }
}
card->name = g_strdup (name);
memset (&card->entries, 0, sizeof (card->entries));
QLIST_INSERT_HEAD(&card->state->card_head, card, entries);
+
+ return true;
}
void AUD_remove_card (QEMUSoundCard *card)
@@ -1876,10 +1867,8 @@
struct capture_callback *cb;
if (!s) {
- if (!legacy_config) {
- dolog("Capturing without setting an audiodev is deprecated\n");
- }
- s = audio_init(NULL, NULL);
+ error_report("Capturing without setting an audiodev is not supported");
+ abort();
}
if (!audio_get_pdo_out(s->dev)->mixing_engine) {
@@ -1900,10 +1889,8 @@
cap = audio_pcm_capture_find_specific(s, as);
if (cap) {
QLIST_INSERT_HEAD (&cap->cb_head, cb, entries);
- return cap;
} else {
HWVoiceOut *hw;
- CaptureVoiceOut *cap;
cap = g_malloc0(sizeof(*cap));
@@ -1937,8 +1924,9 @@
QLIST_FOREACH(hw, &s->hw_head_out, entries) {
audio_attach_capture (hw);
}
- return cap;
}
+
+ return cap;
}
void AUD_del_capture (CaptureVoiceOut *cap, void *cb_opaque)
@@ -2184,17 +2172,13 @@
QSIMPLEQ_INSERT_TAIL(&audiodevs, e, next);
}
-bool audio_init_audiodevs(void)
+void audio_init_audiodevs(void)
{
AudiodevListEntry *e;
QSIMPLEQ_FOREACH(e, &audiodevs, next) {
- if (!audio_init(e->dev, NULL)) {
- return false;
- }
+ audio_init(e->dev, &error_fatal);
}
-
- return true;
}
audsettings audiodev_to_audsettings(AudiodevPerDirectionOptions *pdo)
@@ -2256,7 +2240,7 @@
audioformat_bytes_per_sample(as->fmt);
}
-AudioState *audio_state_by_name(const char *name)
+AudioState *audio_state_by_name(const char *name, Error **errp)
{
AudioState *s;
QTAILQ_FOREACH(s, &audio_states, list) {
@@ -2265,6 +2249,7 @@
return s;
}
}
+ error_setg(errp, "audiodev '%s' not found", name);
return NULL;
}
diff --git a/audio/audio.h b/audio/audio.h
index 01bdc56..80f3f92 100644
--- a/audio/audio.h
+++ b/audio/audio.h
@@ -94,7 +94,7 @@
void AUD_vlog (const char *cap, const char *fmt, va_list ap) G_GNUC_PRINTF(2, 0);
void AUD_log (const char *cap, const char *fmt, ...) G_GNUC_PRINTF(2, 3);
-void AUD_register_card (const char *name, QEMUSoundCard *card);
+bool AUD_register_card (const char *name, QEMUSoundCard *card, Error **errp);
void AUD_remove_card (QEMUSoundCard *card);
CaptureVoiceOut *AUD_add_capture(
AudioState *s,
@@ -170,11 +170,10 @@
void audio_define(Audiodev *audio);
void audio_parse_option(const char *opt);
-bool audio_init_audiodevs(void);
+void audio_init_audiodevs(void);
void audio_help(void);
-void audio_legacy_help(void);
-AudioState *audio_state_by_name(const char *name);
+AudioState *audio_state_by_name(const char *name, Error **errp);
const char *audio_get_id(QEMUSoundCard *card);
#define DEFINE_AUDIO_PROPERTIES(_s, _f) \
diff --git a/audio/audio_int.h b/audio/audio_int.h
index e57ff50..2d079d0 100644
--- a/audio/audio_int.h
+++ b/audio/audio_int.h
@@ -140,13 +140,12 @@
struct audio_driver {
const char *name;
const char *descr;
- void *(*init) (Audiodev *);
+ void *(*init) (Audiodev *, Error **);
void (*fini) (void *);
#ifdef CONFIG_GIO
void (*set_dbus_server) (AudioState *s, GDBusObjectManagerServer *manager, bool p2p);
#endif
struct audio_pcm_ops *pcm_ops;
- int can_be_default;
int max_voices_out;
int max_voices_in;
size_t voice_size_out;
@@ -243,7 +242,6 @@
extern const char *audio_prio_list[];
void audio_driver_register(audio_driver *drv);
-audio_driver *audio_driver_lookup(const char *name);
void audio_pcm_init_info (struct audio_pcm_info *info, struct audsettings *as);
void audio_pcm_info_clear_buf (struct audio_pcm_info *info, void *buf, int len);
@@ -297,9 +295,6 @@
} AudiodevListEntry;
typedef QSIMPLEQ_HEAD(, AudiodevListEntry) AudiodevListHead;
-AudiodevListHead audio_handle_legacy_opts(void);
-
-void audio_free_audiodev_list(AudiodevListHead *head);
void audio_create_pdos(Audiodev *dev);
AudiodevPerDirectionOptions *audio_get_pdo_in(Audiodev *dev);
diff --git a/audio/audio_legacy.c b/audio/audio_legacy.c
deleted file mode 100644
index dc72ba5..0000000
--- a/audio/audio_legacy.c
+++ /dev/null
@@ -1,591 +0,0 @@
-/*
- * QEMU Audio subsystem: legacy configuration handling
- *
- * Copyright (c) 2015-2019 Zoltán KÅ‘vágó <DirtY.iCE.hu@gmail.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-#include "qemu/osdep.h"
-#include "audio.h"
-#include "audio_int.h"
-#include "qemu/cutils.h"
-#include "qemu/timer.h"
-#include "qapi/error.h"
-#include "qapi/qapi-visit-audio.h"
-#include "qapi/visitor-impl.h"
-
-#define AUDIO_CAP "audio-legacy"
-#include "audio_int.h"
-
-static uint32_t toui32(const char *str)
-{
- uint64_t ret;
- if (parse_uint_full(str, 10, &ret) || ret > UINT32_MAX) {
- dolog("Invalid integer value `%s'\n", str);
- exit(1);
- }
- return ret;
-}
-
-/* helper functions to convert env variables */
-static void get_bool(const char *env, bool *dst, bool *has_dst)
-{
- const char *val = getenv(env);
- if (val) {
- *dst = toui32(val) != 0;
- *has_dst = true;
- }
-}
-
-static void get_int(const char *env, uint32_t *dst, bool *has_dst)
-{
- const char *val = getenv(env);
- if (val) {
- *dst = toui32(val);
- *has_dst = true;
- }
-}
-
-static void get_str(const char *env, char **dst)
-{
- const char *val = getenv(env);
- if (val) {
- g_free(*dst);
- *dst = g_strdup(val);
- }
-}
-
-static void get_fmt(const char *env, AudioFormat *dst, bool *has_dst)
-{
- const char *val = getenv(env);
- if (val) {
- size_t i;
- for (i = 0; AudioFormat_lookup.size; ++i) {
- if (strcasecmp(val, AudioFormat_lookup.array[i]) == 0) {
- *dst = i;
- *has_dst = true;
- return;
- }
- }
-
- dolog("Invalid audio format `%s'\n", val);
- exit(1);
- }
-}
-
-
-#if defined(CONFIG_AUDIO_ALSA) || defined(CONFIG_AUDIO_DSOUND)
-static void get_millis_to_usecs(const char *env, uint32_t *dst, bool *has_dst)
-{
- const char *val = getenv(env);
- if (val) {
- *dst = toui32(val) * 1000;
- *has_dst = true;
- }
-}
-#endif
-
-#if defined(CONFIG_AUDIO_ALSA) || defined(CONFIG_AUDIO_COREAUDIO) || \
- defined(CONFIG_AUDIO_PA) || defined(CONFIG_AUDIO_SDL) || \
- defined(CONFIG_AUDIO_DSOUND) || defined(CONFIG_AUDIO_OSS)
-static uint32_t frames_to_usecs(uint32_t frames,
- AudiodevPerDirectionOptions *pdo)
-{
- uint32_t freq = pdo->has_frequency ? pdo->frequency : 44100;
- return (frames * 1000000 + freq / 2) / freq;
-}
-#endif
-
-#ifdef CONFIG_AUDIO_COREAUDIO
-static void get_frames_to_usecs(const char *env, uint32_t *dst, bool *has_dst,
- AudiodevPerDirectionOptions *pdo)
-{
- const char *val = getenv(env);
- if (val) {
- *dst = frames_to_usecs(toui32(val), pdo);
- *has_dst = true;
- }
-}
-#endif
-
-#if defined(CONFIG_AUDIO_PA) || defined(CONFIG_AUDIO_SDL) || \
- defined(CONFIG_AUDIO_DSOUND) || defined(CONFIG_AUDIO_OSS)
-static uint32_t samples_to_usecs(uint32_t samples,
- AudiodevPerDirectionOptions *pdo)
-{
- uint32_t channels = pdo->has_channels ? pdo->channels : 2;
- return frames_to_usecs(samples / channels, pdo);
-}
-#endif
-
-#if defined(CONFIG_AUDIO_PA) || defined(CONFIG_AUDIO_SDL)
-static void get_samples_to_usecs(const char *env, uint32_t *dst, bool *has_dst,
- AudiodevPerDirectionOptions *pdo)
-{
- const char *val = getenv(env);
- if (val) {
- *dst = samples_to_usecs(toui32(val), pdo);
- *has_dst = true;
- }
-}
-#endif
-
-#if defined(CONFIG_AUDIO_DSOUND) || defined(CONFIG_AUDIO_OSS)
-static uint32_t bytes_to_usecs(uint32_t bytes, AudiodevPerDirectionOptions *pdo)
-{
- AudioFormat fmt = pdo->has_format ? pdo->format : AUDIO_FORMAT_S16;
- uint32_t bytes_per_sample = audioformat_bytes_per_sample(fmt);
- return samples_to_usecs(bytes / bytes_per_sample, pdo);
-}
-
-static void get_bytes_to_usecs(const char *env, uint32_t *dst, bool *has_dst,
- AudiodevPerDirectionOptions *pdo)
-{
- const char *val = getenv(env);
- if (val) {
- *dst = bytes_to_usecs(toui32(val), pdo);
- *has_dst = true;
- }
-}
-#endif
-
-/* backend specific functions */
-
-#ifdef CONFIG_AUDIO_ALSA
-/* ALSA */
-static void handle_alsa_per_direction(
- AudiodevAlsaPerDirectionOptions *apdo, const char *prefix)
-{
- char buf[64];
- size_t len = strlen(prefix);
- bool size_in_usecs = false;
- bool dummy;
-
- memcpy(buf, prefix, len);
- strcpy(buf + len, "TRY_POLL");
- get_bool(buf, &apdo->try_poll, &apdo->has_try_poll);
-
- strcpy(buf + len, "DEV");
- get_str(buf, &apdo->dev);
-
- strcpy(buf + len, "SIZE_IN_USEC");
- get_bool(buf, &size_in_usecs, &dummy);
-
- strcpy(buf + len, "PERIOD_SIZE");
- get_int(buf, &apdo->period_length, &apdo->has_period_length);
- if (apdo->has_period_length && !size_in_usecs) {
- apdo->period_length = frames_to_usecs(
- apdo->period_length,
- qapi_AudiodevAlsaPerDirectionOptions_base(apdo));
- }
-
- strcpy(buf + len, "BUFFER_SIZE");
- get_int(buf, &apdo->buffer_length, &apdo->has_buffer_length);
- if (apdo->has_buffer_length && !size_in_usecs) {
- apdo->buffer_length = frames_to_usecs(
- apdo->buffer_length,
- qapi_AudiodevAlsaPerDirectionOptions_base(apdo));
- }
-}
-
-static void handle_alsa(Audiodev *dev)
-{
- AudiodevAlsaOptions *aopt = &dev->u.alsa;
- handle_alsa_per_direction(aopt->in, "QEMU_ALSA_ADC_");
- handle_alsa_per_direction(aopt->out, "QEMU_ALSA_DAC_");
-
- get_millis_to_usecs("QEMU_ALSA_THRESHOLD",
- &aopt->threshold, &aopt->has_threshold);
-}
-#endif
-
-#ifdef CONFIG_AUDIO_COREAUDIO
-/* coreaudio */
-static void handle_coreaudio(Audiodev *dev)
-{
- get_frames_to_usecs(
- "QEMU_COREAUDIO_BUFFER_SIZE",
- &dev->u.coreaudio.out->buffer_length,
- &dev->u.coreaudio.out->has_buffer_length,
- qapi_AudiodevCoreaudioPerDirectionOptions_base(dev->u.coreaudio.out));
- get_int("QEMU_COREAUDIO_BUFFER_COUNT",
- &dev->u.coreaudio.out->buffer_count,
- &dev->u.coreaudio.out->has_buffer_count);
-}
-#endif
-
-#ifdef CONFIG_AUDIO_DSOUND
-/* dsound */
-static void handle_dsound(Audiodev *dev)
-{
- get_millis_to_usecs("QEMU_DSOUND_LATENCY_MILLIS",
- &dev->u.dsound.latency, &dev->u.dsound.has_latency);
- get_bytes_to_usecs("QEMU_DSOUND_BUFSIZE_OUT",
- &dev->u.dsound.out->buffer_length,
- &dev->u.dsound.out->has_buffer_length,
- dev->u.dsound.out);
- get_bytes_to_usecs("QEMU_DSOUND_BUFSIZE_IN",
- &dev->u.dsound.in->buffer_length,
- &dev->u.dsound.in->has_buffer_length,
- dev->u.dsound.in);
-}
-#endif
-
-#ifdef CONFIG_AUDIO_OSS
-/* OSS */
-static void handle_oss_per_direction(
- AudiodevOssPerDirectionOptions *opdo, const char *try_poll_env,
- const char *dev_env)
-{
- get_bool(try_poll_env, &opdo->try_poll, &opdo->has_try_poll);
- get_str(dev_env, &opdo->dev);
-
- get_bytes_to_usecs("QEMU_OSS_FRAGSIZE",
- &opdo->buffer_length, &opdo->has_buffer_length,
- qapi_AudiodevOssPerDirectionOptions_base(opdo));
- get_int("QEMU_OSS_NFRAGS", &opdo->buffer_count,
- &opdo->has_buffer_count);
-}
-
-static void handle_oss(Audiodev *dev)
-{
- AudiodevOssOptions *oopt = &dev->u.oss;
- handle_oss_per_direction(oopt->in, "QEMU_AUDIO_ADC_TRY_POLL",
- "QEMU_OSS_ADC_DEV");
- handle_oss_per_direction(oopt->out, "QEMU_AUDIO_DAC_TRY_POLL",
- "QEMU_OSS_DAC_DEV");
-
- get_bool("QEMU_OSS_MMAP", &oopt->try_mmap, &oopt->has_try_mmap);
- get_bool("QEMU_OSS_EXCLUSIVE", &oopt->exclusive, &oopt->has_exclusive);
- get_int("QEMU_OSS_POLICY", &oopt->dsp_policy, &oopt->has_dsp_policy);
-}
-#endif
-
-#ifdef CONFIG_AUDIO_PA
-/* pulseaudio */
-static void handle_pa_per_direction(
- AudiodevPaPerDirectionOptions *ppdo, const char *env)
-{
- get_str(env, &ppdo->name);
-}
-
-static void handle_pa(Audiodev *dev)
-{
- handle_pa_per_direction(dev->u.pa.in, "QEMU_PA_SOURCE");
- handle_pa_per_direction(dev->u.pa.out, "QEMU_PA_SINK");
-
- get_samples_to_usecs(
- "QEMU_PA_SAMPLES", &dev->u.pa.in->buffer_length,
- &dev->u.pa.in->has_buffer_length,
- qapi_AudiodevPaPerDirectionOptions_base(dev->u.pa.in));
- get_samples_to_usecs(
- "QEMU_PA_SAMPLES", &dev->u.pa.out->buffer_length,
- &dev->u.pa.out->has_buffer_length,
- qapi_AudiodevPaPerDirectionOptions_base(dev->u.pa.out));
-
- get_str("QEMU_PA_SERVER", &dev->u.pa.server);
-}
-#endif
-
-#ifdef CONFIG_AUDIO_SDL
-/* SDL */
-static void handle_sdl(Audiodev *dev)
-{
- /* SDL is output only */
- get_samples_to_usecs("QEMU_SDL_SAMPLES", &dev->u.sdl.out->buffer_length,
- &dev->u.sdl.out->has_buffer_length,
- qapi_AudiodevSdlPerDirectionOptions_base(dev->u.sdl.out));
-}
-#endif
-
-/* wav */
-static void handle_wav(Audiodev *dev)
-{
- get_int("QEMU_WAV_FREQUENCY",
- &dev->u.wav.out->frequency, &dev->u.wav.out->has_frequency);
- get_fmt("QEMU_WAV_FORMAT", &dev->u.wav.out->format,
- &dev->u.wav.out->has_format);
- get_int("QEMU_WAV_DAC_FIXED_CHANNELS",
- &dev->u.wav.out->channels, &dev->u.wav.out->has_channels);
- get_str("QEMU_WAV_PATH", &dev->u.wav.path);
-}
-
-/* general */
-static void handle_per_direction(
- AudiodevPerDirectionOptions *pdo, const char *prefix)
-{
- char buf[64];
- size_t len = strlen(prefix);
-
- memcpy(buf, prefix, len);
- strcpy(buf + len, "FIXED_SETTINGS");
- get_bool(buf, &pdo->fixed_settings, &pdo->has_fixed_settings);
-
- strcpy(buf + len, "FIXED_FREQ");
- get_int(buf, &pdo->frequency, &pdo->has_frequency);
-
- strcpy(buf + len, "FIXED_FMT");
- get_fmt(buf, &pdo->format, &pdo->has_format);
-
- strcpy(buf + len, "FIXED_CHANNELS");
- get_int(buf, &pdo->channels, &pdo->has_channels);
-
- strcpy(buf + len, "VOICES");
- get_int(buf, &pdo->voices, &pdo->has_voices);
-}
-
-static AudiodevListEntry *legacy_opt(const char *drvname)
-{
- AudiodevListEntry *e = g_new0(AudiodevListEntry, 1);
- e->dev = g_new0(Audiodev, 1);
- e->dev->id = g_strdup(drvname);
- e->dev->driver = qapi_enum_parse(
- &AudiodevDriver_lookup, drvname, -1, &error_abort);
-
- audio_create_pdos(e->dev);
-
- handle_per_direction(audio_get_pdo_in(e->dev), "QEMU_AUDIO_ADC_");
- handle_per_direction(audio_get_pdo_out(e->dev), "QEMU_AUDIO_DAC_");
-
- /* Original description: Timer period in HZ (0 - use lowest possible) */
- get_int("QEMU_AUDIO_TIMER_PERIOD",
- &e->dev->timer_period, &e->dev->has_timer_period);
- if (e->dev->has_timer_period && e->dev->timer_period) {
- e->dev->timer_period = NANOSECONDS_PER_SECOND / 1000 /
- e->dev->timer_period;
- }
-
- switch (e->dev->driver) {
-#ifdef CONFIG_AUDIO_ALSA
- case AUDIODEV_DRIVER_ALSA:
- handle_alsa(e->dev);
- break;
-#endif
-
-#ifdef CONFIG_AUDIO_COREAUDIO
- case AUDIODEV_DRIVER_COREAUDIO:
- handle_coreaudio(e->dev);
- break;
-#endif
-
-#ifdef CONFIG_AUDIO_DSOUND
- case AUDIODEV_DRIVER_DSOUND:
- handle_dsound(e->dev);
- break;
-#endif
-
-#ifdef CONFIG_AUDIO_OSS
- case AUDIODEV_DRIVER_OSS:
- handle_oss(e->dev);
- break;
-#endif
-
-#ifdef CONFIG_AUDIO_PA
- case AUDIODEV_DRIVER_PA:
- handle_pa(e->dev);
- break;
-#endif
-
-#ifdef CONFIG_AUDIO_SDL
- case AUDIODEV_DRIVER_SDL:
- handle_sdl(e->dev);
- break;
-#endif
-
- case AUDIODEV_DRIVER_WAV:
- handle_wav(e->dev);
- break;
-
- default:
- break;
- }
-
- return e;
-}
-
-AudiodevListHead audio_handle_legacy_opts(void)
-{
- const char *drvname = getenv("QEMU_AUDIO_DRV");
- AudiodevListHead head = QSIMPLEQ_HEAD_INITIALIZER(head);
-
- if (drvname) {
- AudiodevListEntry *e;
- audio_driver *driver = audio_driver_lookup(drvname);
- if (!driver) {
- dolog("Unknown audio driver `%s'\n", drvname);
- exit(1);
- }
- e = legacy_opt(drvname);
- QSIMPLEQ_INSERT_TAIL(&head, e, next);
- } else {
- for (int i = 0; audio_prio_list[i]; i++) {
- audio_driver *driver = audio_driver_lookup(audio_prio_list[i]);
- if (driver && driver->can_be_default) {
- AudiodevListEntry *e = legacy_opt(driver->name);
- QSIMPLEQ_INSERT_TAIL(&head, e, next);
- }
- }
- if (QSIMPLEQ_EMPTY(&head)) {
- dolog("Internal error: no default audio driver available\n");
- exit(1);
- }
- }
-
- return head;
-}
-
-/* visitor to print -audiodev option */
-typedef struct {
- Visitor visitor;
-
- bool comma;
- GList *path;
-} LegacyPrintVisitor;
-
-static bool lv_start_struct(Visitor *v, const char *name, void **obj,
- size_t size, Error **errp)
-{
- LegacyPrintVisitor *lv = (LegacyPrintVisitor *) v;
- lv->path = g_list_append(lv->path, g_strdup(name));
- return true;
-}
-
-static void lv_end_struct(Visitor *v, void **obj)
-{
- LegacyPrintVisitor *lv = (LegacyPrintVisitor *) v;
- lv->path = g_list_delete_link(lv->path, g_list_last(lv->path));
-}
-
-static void lv_print_key(Visitor *v, const char *name)
-{
- GList *e;
- LegacyPrintVisitor *lv = (LegacyPrintVisitor *) v;
- if (lv->comma) {
- putchar(',');
- } else {
- lv->comma = true;
- }
-
- for (e = lv->path; e; e = e->next) {
- if (e->data) {
- printf("%s.", (const char *) e->data);
- }
- }
-
- printf("%s=", name);
-}
-
-static bool lv_type_int64(Visitor *v, const char *name, int64_t *obj,
- Error **errp)
-{
- lv_print_key(v, name);
- printf("%" PRIi64, *obj);
- return true;
-}
-
-static bool lv_type_uint64(Visitor *v, const char *name, uint64_t *obj,
- Error **errp)
-{
- lv_print_key(v, name);
- printf("%" PRIu64, *obj);
- return true;
-}
-
-static bool lv_type_bool(Visitor *v, const char *name, bool *obj, Error **errp)
-{
- lv_print_key(v, name);
- printf("%s", *obj ? "on" : "off");
- return true;
-}
-
-static bool lv_type_str(Visitor *v, const char *name, char **obj, Error **errp)
-{
- const char *str = *obj;
- lv_print_key(v, name);
-
- while (*str) {
- if (*str == ',') {
- putchar(',');
- }
- putchar(*str++);
- }
- return true;
-}
-
-static void lv_complete(Visitor *v, void *opaque)
-{
- LegacyPrintVisitor *lv = (LegacyPrintVisitor *) v;
- assert(lv->path == NULL);
-}
-
-static void lv_free(Visitor *v)
-{
- LegacyPrintVisitor *lv = (LegacyPrintVisitor *) v;
-
- g_list_free_full(lv->path, g_free);
- g_free(lv);
-}
-
-static Visitor *legacy_visitor_new(void)
-{
- LegacyPrintVisitor *lv = g_new0(LegacyPrintVisitor, 1);
-
- lv->visitor.start_struct = lv_start_struct;
- lv->visitor.end_struct = lv_end_struct;
- /* lists not supported */
- lv->visitor.type_int64 = lv_type_int64;
- lv->visitor.type_uint64 = lv_type_uint64;
- lv->visitor.type_bool = lv_type_bool;
- lv->visitor.type_str = lv_type_str;
-
- lv->visitor.type = VISITOR_OUTPUT;
- lv->visitor.complete = lv_complete;
- lv->visitor.free = lv_free;
-
- return &lv->visitor;
-}
-
-void audio_legacy_help(void)
-{
- AudiodevListHead head;
- AudiodevListEntry *e;
-
- printf("Environment variable based configuration deprecated.\n");
- printf("Please use the new -audiodev option.\n");
-
- head = audio_handle_legacy_opts();
- printf("\nEquivalent -audiodev to your current environment variables:\n");
- if (!getenv("QEMU_AUDIO_DRV")) {
- printf("(Since you didn't specify QEMU_AUDIO_DRV, I'll list all "
- "possibilities)\n");
- }
-
- QSIMPLEQ_FOREACH(e, &head, next) {
- Visitor *v;
- Audiodev *dev = e->dev;
- printf("-audiodev ");
-
- v = legacy_visitor_new();
- visit_type_Audiodev(v, NULL, &dev, &error_abort);
- visit_free(v);
-
- printf("\n");
- }
- audio_free_audiodev_list(&head);
-}
diff --git a/audio/audio_template.h b/audio/audio_template.h
index dc0c74a..7ccfec0 100644
--- a/audio/audio_template.h
+++ b/audio/audio_template.h
@@ -37,11 +37,12 @@
#endif
static void glue(audio_init_nb_voices_, TYPE)(AudioState *s,
- struct audio_driver *drv)
+ struct audio_driver *drv, int min_voices)
{
int max_voices = glue (drv->max_voices_, TYPE);
size_t voice_size = glue(drv->voice_size_, TYPE);
+ glue (s->nb_hw_voices_, TYPE) = glue(audio_get_pdo_, TYPE)(s->dev)->voices;
if (glue (s->nb_hw_voices_, TYPE) > max_voices) {
if (!max_voices) {
#ifdef DAC
@@ -56,6 +57,12 @@
glue (s->nb_hw_voices_, TYPE) = max_voices;
}
+ if (glue (s->nb_hw_voices_, TYPE) < min_voices) {
+ dolog ("Bogus number of " NAME " voices %d, setting to %d\n",
+ glue (s->nb_hw_voices_, TYPE),
+ min_voices);
+ }
+
if (audio_bug(__func__, !voice_size && max_voices)) {
dolog ("drv=`%s' voice_size=0 max_voices=%d\n",
drv->name, max_voices);
diff --git a/audio/coreaudio.m b/audio/coreaudio.m
index 4695291..8cd129a 100644
--- a/audio/coreaudio.m
+++ b/audio/coreaudio.m
@@ -644,7 +644,7 @@
update_device_playback_state(core);
}
-static void *coreaudio_audio_init(Audiodev *dev)
+static void *coreaudio_audio_init(Audiodev *dev, Error **errp)
{
return dev;
}
@@ -673,7 +673,6 @@
.init = coreaudio_audio_init,
.fini = coreaudio_audio_fini,
.pcm_ops = &coreaudio_pcm_ops,
- .can_be_default = 1,
.max_voices_out = 1,
.max_voices_in = 0,
.voice_size_out = sizeof (coreaudioVoiceOut),
diff --git a/audio/dbusaudio.c b/audio/dbusaudio.c
index 7a11fbf..60fcf64 100644
--- a/audio/dbusaudio.c
+++ b/audio/dbusaudio.c
@@ -395,7 +395,7 @@
}
static void *
-dbus_audio_init(Audiodev *dev)
+dbus_audio_init(Audiodev *dev, Error **errp)
{
DBusAudio *da = g_new0(DBusAudio, 1);
@@ -676,7 +676,6 @@
.fini = dbus_audio_fini,
.set_dbus_server = dbus_audio_set_server,
.pcm_ops = &dbus_pcm_ops,
- .can_be_default = 1,
.max_voices_out = INT_MAX,
.max_voices_in = INT_MAX,
.voice_size_out = sizeof(DBusVoiceOut),
diff --git a/audio/dsoundaudio.c b/audio/dsoundaudio.c
index 3fb67ec..f3bb48d 100644
--- a/audio/dsoundaudio.c
+++ b/audio/dsoundaudio.c
@@ -619,7 +619,7 @@
g_free(s);
}
-static void *dsound_audio_init(Audiodev *dev)
+static void *dsound_audio_init(Audiodev *dev, Error **errp)
{
int err;
HRESULT hr;
@@ -721,7 +721,6 @@
.init = dsound_audio_init,
.fini = dsound_audio_fini,
.pcm_ops = &dsound_pcm_ops,
- .can_be_default = 1,
.max_voices_out = INT_MAX,
.max_voices_in = 1,
.voice_size_out = sizeof (DSoundVoiceOut),
diff --git a/audio/jackaudio.c b/audio/jackaudio.c
index e1eaa34..974a3ca 100644
--- a/audio/jackaudio.c
+++ b/audio/jackaudio.c
@@ -645,7 +645,7 @@
}
#endif
-static void *qjack_init(Audiodev *dev)
+static void *qjack_init(Audiodev *dev, Error **errp)
{
assert(dev->driver == AUDIODEV_DRIVER_JACK);
return dev;
@@ -676,7 +676,6 @@
.init = qjack_init,
.fini = qjack_fini,
.pcm_ops = &jack_pcm_ops,
- .can_be_default = 1,
.max_voices_out = INT_MAX,
.max_voices_in = INT_MAX,
.voice_size_out = sizeof(QJackOut),
diff --git a/audio/meson.build b/audio/meson.build
index df4d968..c8f6586 100644
--- a/audio/meson.build
+++ b/audio/meson.build
@@ -1,7 +1,6 @@
system_ss.add([spice_headers, files('audio.c')])
system_ss.add(files(
'audio-hmp-cmds.c',
- 'audio_legacy.c',
'mixeng.c',
'noaudio.c',
'wavaudio.c',
diff --git a/audio/noaudio.c b/audio/noaudio.c
index 4fdee5a..1b60d85 100644
--- a/audio/noaudio.c
+++ b/audio/noaudio.c
@@ -104,7 +104,7 @@
}
}
-static void *no_audio_init(Audiodev *dev)
+static void *no_audio_init(Audiodev *dev, Error **errp)
{
return &no_audio_init;
}
@@ -135,7 +135,6 @@
.init = no_audio_init,
.fini = no_audio_fini,
.pcm_ops = &no_pcm_ops,
- .can_be_default = 1,
.max_voices_out = INT_MAX,
.max_voices_in = INT_MAX,
.voice_size_out = sizeof (NoVoiceOut),
diff --git a/audio/ossaudio.c b/audio/ossaudio.c
index e8d732b..3f31852 100644
--- a/audio/ossaudio.c
+++ b/audio/ossaudio.c
@@ -28,6 +28,7 @@
#include "qemu/main-loop.h"
#include "qemu/module.h"
#include "qemu/host-utils.h"
+#include "qapi/error.h"
#include "audio.h"
#include "trace.h"
@@ -736,7 +737,7 @@
}
}
-static void *oss_audio_init(Audiodev *dev)
+static void *oss_audio_init(Audiodev *dev, Error **errp)
{
AudiodevOssOptions *oopts;
assert(dev->driver == AUDIODEV_DRIVER_OSS);
@@ -745,8 +746,12 @@
oss_init_per_direction(oopts->in);
oss_init_per_direction(oopts->out);
- if (access(oopts->in->dev ?: "/dev/dsp", R_OK | W_OK) < 0 ||
- access(oopts->out->dev ?: "/dev/dsp", R_OK | W_OK) < 0) {
+ if (access(oopts->in->dev ?: "/dev/dsp", R_OK | W_OK) < 0) {
+ error_setg_errno(errp, errno, "%s not accessible", oopts->in->dev ?: "/dev/dsp");
+ return NULL;
+ }
+ if (access(oopts->out->dev ?: "/dev/dsp", R_OK | W_OK) < 0) {
+ error_setg_errno(errp, errno, "%s not accessible", oopts->out->dev ?: "/dev/dsp");
return NULL;
}
return dev;
@@ -779,7 +784,6 @@
.init = oss_audio_init,
.fini = oss_audio_fini,
.pcm_ops = &oss_pcm_ops,
- .can_be_default = 1,
.max_voices_out = INT_MAX,
.max_voices_in = INT_MAX,
.voice_size_out = sizeof (OSSVoiceOut),
diff --git a/audio/paaudio.c b/audio/paaudio.c
index 529b39d..f3193b0 100644
--- a/audio/paaudio.c
+++ b/audio/paaudio.c
@@ -3,7 +3,7 @@
#include "qemu/osdep.h"
#include "qemu/module.h"
#include "audio.h"
-#include "qapi/opts-visitor.h"
+#include "qapi/error.h"
#include <pulse/pulseaudio.h>
@@ -818,7 +818,7 @@
return NULL;
}
-static void *qpa_audio_init(Audiodev *dev)
+static void *qpa_audio_init(Audiodev *dev, Error **errp)
{
paaudio *g;
AudiodevPaOptions *popts = &dev->u.pa;
@@ -834,10 +834,12 @@
runtime = getenv("XDG_RUNTIME_DIR");
if (!runtime) {
+ error_setg(errp, "XDG_RUNTIME_DIR not set");
return NULL;
}
snprintf(pidfile, sizeof(pidfile), "%s/pulse/pid", runtime);
if (stat(pidfile, &st) != 0) {
+ error_setg_errno(errp, errno, "could not stat pidfile %s", pidfile);
return NULL;
}
}
@@ -867,6 +869,7 @@
}
if (!g->conn) {
g_free(g);
+ error_setg(errp, "could not connect to PulseAudio server");
return NULL;
}
@@ -928,7 +931,6 @@
.init = qpa_audio_init,
.fini = qpa_audio_fini,
.pcm_ops = &qpa_pcm_ops,
- .can_be_default = 1,
.max_voices_out = INT_MAX,
.max_voices_in = INT_MAX,
.voice_size_out = sizeof (PAVoiceOut),
diff --git a/audio/pwaudio.c b/audio/pwaudio.c
index b6a3873..3ce5f65 100644
--- a/audio/pwaudio.c
+++ b/audio/pwaudio.c
@@ -13,6 +13,7 @@
#include "audio.h"
#include <errno.h>
#include "qemu/error-report.h"
+#include "qapi/error.h"
#include <spa/param/audio/format-utils.h>
#include <spa/utils/ringbuffer.h>
#include <spa/utils/result.h>
@@ -736,7 +737,7 @@
};
static void *
-qpw_audio_init(Audiodev *dev)
+qpw_audio_init(Audiodev *dev, Error **errp)
{
g_autofree pwaudio *pw = g_new0(pwaudio, 1);
@@ -748,19 +749,19 @@
pw->dev = dev;
pw->thread_loop = pw_thread_loop_new("PipeWire thread loop", NULL);
if (pw->thread_loop == NULL) {
- error_report("Could not create PipeWire loop: %s", g_strerror(errno));
+ error_setg_errno(errp, errno, "Could not create PipeWire loop");
goto fail;
}
pw->context =
pw_context_new(pw_thread_loop_get_loop(pw->thread_loop), NULL, 0);
if (pw->context == NULL) {
- error_report("Could not create PipeWire context: %s", g_strerror(errno));
+ error_setg_errno(errp, errno, "Could not create PipeWire context");
goto fail;
}
if (pw_thread_loop_start(pw->thread_loop) < 0) {
- error_report("Could not start PipeWire loop: %s", g_strerror(errno));
+ error_setg_errno(errp, errno, "Could not start PipeWire loop");
goto fail;
}
@@ -769,13 +770,13 @@
pw->core = pw_context_connect(pw->context, NULL, 0);
if (pw->core == NULL) {
pw_thread_loop_unlock(pw->thread_loop);
- goto fail;
+ goto fail_error;
}
if (pw_core_add_listener(pw->core, &pw->core_listener,
&core_events, pw) < 0) {
pw_thread_loop_unlock(pw->thread_loop);
- goto fail;
+ goto fail_error;
}
if (wait_resync(pw) < 0) {
pw_thread_loop_unlock(pw->thread_loop);
@@ -785,8 +786,9 @@
return g_steal_pointer(&pw);
+fail_error:
+ error_setg(errp, "Failed to initialize PW context");
fail:
- AUD_log(AUDIO_CAP, "Failed to initialize PW context");
if (pw->thread_loop) {
pw_thread_loop_stop(pw->thread_loop);
}
@@ -841,7 +843,6 @@
.init = qpw_audio_init,
.fini = qpw_audio_fini,
.pcm_ops = &qpw_pcm_ops,
- .can_be_default = 1,
.max_voices_out = INT_MAX,
.max_voices_in = INT_MAX,
.voice_size_out = sizeof(PWVoiceOut),
diff --git a/audio/sdlaudio.c b/audio/sdlaudio.c
index 68a237b..641357e 100644
--- a/audio/sdlaudio.c
+++ b/audio/sdlaudio.c
@@ -26,6 +26,7 @@
#include <SDL.h>
#include <SDL_thread.h>
#include "qemu/module.h"
+#include "qapi/error.h"
#include "audio.h"
#ifndef _WIN32
@@ -449,10 +450,10 @@
SDL_PauseAudioDevice(sdl->devid, !enable);
}
-static void *sdl_audio_init(Audiodev *dev)
+static void *sdl_audio_init(Audiodev *dev, Error **errp)
{
if (SDL_InitSubSystem (SDL_INIT_AUDIO)) {
- sdl_logerr ("SDL failed to initialize audio subsystem\n");
+ error_setg(errp, "SDL failed to initialize audio subsystem");
return NULL;
}
@@ -493,7 +494,6 @@
.init = sdl_audio_init,
.fini = sdl_audio_fini,
.pcm_ops = &sdl_pcm_ops,
- .can_be_default = 1,
.max_voices_out = INT_MAX,
.max_voices_in = INT_MAX,
.voice_size_out = sizeof(SDLVoiceOut),
diff --git a/audio/sndioaudio.c b/audio/sndioaudio.c
index 3fde01f..8eb35e1 100644
--- a/audio/sndioaudio.c
+++ b/audio/sndioaudio.c
@@ -518,7 +518,7 @@
sndio_fini(self);
}
-static void *sndio_audio_init(Audiodev *dev)
+static void *sndio_audio_init(Audiodev *dev, Error **errp)
{
assert(dev->driver == AUDIODEV_DRIVER_SNDIO);
return dev;
@@ -550,7 +550,6 @@
.init = sndio_audio_init,
.fini = sndio_audio_fini,
.pcm_ops = &sndio_pcm_ops,
- .can_be_default = 1,
.max_voices_out = INT_MAX,
.max_voices_in = INT_MAX,
.voice_size_out = sizeof(SndioVoice),
diff --git a/audio/spiceaudio.c b/audio/spiceaudio.c
index d17ef1a..7f02f72 100644
--- a/audio/spiceaudio.c
+++ b/audio/spiceaudio.c
@@ -22,6 +22,7 @@
#include "qemu/module.h"
#include "qemu/error-report.h"
#include "qemu/timer.h"
+#include "qapi/error.h"
#include "ui/qemu-spice.h"
#define AUDIO_CAP "spice"
@@ -71,11 +72,13 @@
.base.minor_version = SPICE_INTERFACE_RECORD_MINOR,
};
-static void *spice_audio_init(Audiodev *dev)
+static void *spice_audio_init(Audiodev *dev, Error **errp)
{
if (!using_spice) {
+ error_setg(errp, "Cannot use spice audio without -spice");
return NULL;
}
+
return &spice_audio_init;
}
diff --git a/audio/wavaudio.c b/audio/wavaudio.c
index 6445a2c..ea20fed 100644
--- a/audio/wavaudio.c
+++ b/audio/wavaudio.c
@@ -182,7 +182,7 @@
}
}
-static void *wav_audio_init(Audiodev *dev)
+static void *wav_audio_init(Audiodev *dev, Error **errp)
{
assert(dev->driver == AUDIODEV_DRIVER_WAV);
return dev;
@@ -208,7 +208,6 @@
.init = wav_audio_init,
.fini = wav_audio_fini,
.pcm_ops = &wav_pcm_ops,
- .can_be_default = 0,
.max_voices_out = 1,
.max_voices_in = 0,
.voice_size_out = sizeof (WAVVoiceOut),
diff --git a/block.c b/block.c
index e7f349b..af04c8a 100644
--- a/block.c
+++ b/block.c
@@ -3072,18 +3072,19 @@
&local_err);
if (ret < 0 && child_class->change_aio_ctx) {
- Transaction *tran = tran_new();
+ Transaction *aio_ctx_tran = tran_new();
GHashTable *visited = g_hash_table_new(NULL, NULL);
bool ret_child;
g_hash_table_add(visited, new_child);
ret_child = child_class->change_aio_ctx(new_child, child_ctx,
- visited, tran, NULL);
+ visited, aio_ctx_tran,
+ NULL);
if (ret_child == true) {
error_free(local_err);
ret = 0;
}
- tran_finalize(tran, ret_child == true ? 0 : -1);
+ tran_finalize(aio_ctx_tran, ret_child == true ? 0 : -1);
g_hash_table_destroy(visited);
}
@@ -6208,12 +6209,12 @@
QLIST_FOREACH(drv, &bdrv_drivers, list) {
if (drv->format_name) {
bool found = false;
- int i = count;
if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, read_only)) {
continue;
}
+ i = count;
while (formats && i && !found) {
found = !strcmp(formats[--i], drv->format_name);
}
diff --git a/block/io.c b/block/io.c
index 209a6da..e7f9448 100644
--- a/block/io.c
+++ b/block/io.c
@@ -387,7 +387,8 @@
bdrv_do_drained_begin(bs, parent, false);
}
-void bdrv_drained_begin(BlockDriverState *bs)
+void coroutine_mixed_fn
+bdrv_drained_begin(BlockDriverState *bs)
{
IO_OR_GS_CODE();
bdrv_do_drained_begin(bs, NULL, true);
@@ -506,7 +507,7 @@
}
}
-void bdrv_drain_all_begin(void)
+void coroutine_mixed_fn bdrv_drain_all_begin(void)
{
BlockDriverState *bs = NULL;
diff --git a/block/monitor/bitmap-qmp-cmds.c b/block/monitor/bitmap-qmp-cmds.c
index 55f778f..70d01a3 100644
--- a/block/monitor/bitmap-qmp-cmds.c
+++ b/block/monitor/bitmap-qmp-cmds.c
@@ -258,37 +258,38 @@
bdrv_disable_dirty_bitmap(bitmap);
}
-BdrvDirtyBitmap *block_dirty_bitmap_merge(const char *node, const char *target,
+BdrvDirtyBitmap *block_dirty_bitmap_merge(const char *dst_node,
+ const char *dst_bitmap,
BlockDirtyBitmapOrStrList *bms,
HBitmap **backup, Error **errp)
{
BlockDriverState *bs;
BdrvDirtyBitmap *dst, *src;
BlockDirtyBitmapOrStrList *lst;
+ const char *src_node, *src_bitmap;
HBitmap *local_backup = NULL;
GLOBAL_STATE_CODE();
- dst = block_dirty_bitmap_lookup(node, target, &bs, errp);
+ dst = block_dirty_bitmap_lookup(dst_node, dst_bitmap, &bs, errp);
if (!dst) {
return NULL;
}
for (lst = bms; lst; lst = lst->next) {
switch (lst->value->type) {
- const char *name, *node;
case QTYPE_QSTRING:
- name = lst->value->u.local;
- src = bdrv_find_dirty_bitmap(bs, name);
+ src_bitmap = lst->value->u.local;
+ src = bdrv_find_dirty_bitmap(bs, src_bitmap);
if (!src) {
- error_setg(errp, "Dirty bitmap '%s' not found", name);
+ error_setg(errp, "Dirty bitmap '%s' not found", src_bitmap);
goto fail;
}
break;
case QTYPE_QDICT:
- node = lst->value->u.external.node;
- name = lst->value->u.external.name;
- src = block_dirty_bitmap_lookup(node, name, NULL, errp);
+ src_node = lst->value->u.external.node;
+ src_bitmap = lst->value->u.external.name;
+ src = block_dirty_bitmap_lookup(src_node, src_bitmap, NULL, errp);
if (!src) {
goto fail;
}
diff --git a/block/nbd.c b/block/nbd.c
index cc48580..4a7f37d 100644
--- a/block/nbd.c
+++ b/block/nbd.c
@@ -339,7 +339,7 @@
* We have connected, but must fail for other reasons.
* Send NBD_CMD_DISC as a courtesy to the server.
*/
- NBDRequest request = { .type = NBD_CMD_DISC };
+ NBDRequest request = { .type = NBD_CMD_DISC, .mode = s->info.mode };
nbd_send_request(s->ioc, &request);
@@ -463,7 +463,8 @@
nbd_channel_error(s, ret);
return ret;
}
- if (nbd_reply_is_structured(&s->reply) && !s->info.structured_reply) {
+ if (nbd_reply_is_structured(&s->reply) &&
+ s->info.mode < NBD_MODE_STRUCTURED) {
nbd_channel_error(s, -EINVAL);
return -EINVAL;
}
@@ -519,6 +520,7 @@
qemu_co_mutex_lock(&s->send_mutex);
request->cookie = INDEX_TO_COOKIE(i);
+ request->mode = s->info.mode;
assert(s->ioc);
@@ -608,7 +610,7 @@
static int nbd_parse_blockstatus_payload(BDRVNBDState *s,
NBDStructuredReplyChunk *chunk,
uint8_t *payload, uint64_t orig_length,
- NBDExtent *extent, Error **errp)
+ NBDExtent32 *extent, Error **errp)
{
uint32_t context_id;
@@ -866,7 +868,7 @@
}
/* handle structured reply chunk */
- assert(s->info.structured_reply);
+ assert(s->info.mode >= NBD_MODE_STRUCTURED);
chunk = &s->reply.structured;
if (chunk->type == NBD_REPLY_TYPE_NONE) {
@@ -1070,7 +1072,8 @@
void *payload = NULL;
Error *local_err = NULL;
- NBD_FOREACH_REPLY_CHUNK(s, iter, cookie, s->info.structured_reply,
+ NBD_FOREACH_REPLY_CHUNK(s, iter, cookie,
+ s->info.mode >= NBD_MODE_STRUCTURED,
qiov, &reply, &payload)
{
int ret;
@@ -1115,7 +1118,7 @@
static int coroutine_fn
nbd_co_receive_blockstatus_reply(BDRVNBDState *s, uint64_t cookie,
- uint64_t length, NBDExtent *extent,
+ uint64_t length, NBDExtent32 *extent,
int *request_ret, Error **errp)
{
NBDReplyChunkIter iter;
@@ -1302,10 +1305,11 @@
NBDRequest request = {
.type = NBD_CMD_WRITE_ZEROES,
.from = offset,
- .len = bytes, /* .len is uint32_t actually */
+ .len = bytes,
};
- assert(bytes <= UINT32_MAX); /* rely on max_pwrite_zeroes */
+ /* rely on max_pwrite_zeroes */
+ assert(bytes <= UINT32_MAX || s->info.mode >= NBD_MODE_EXTENDED);
assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
if (!(s->info.flags & NBD_FLAG_SEND_WRITE_ZEROES)) {
@@ -1352,10 +1356,11 @@
NBDRequest request = {
.type = NBD_CMD_TRIM,
.from = offset,
- .len = bytes, /* len is uint32_t */
+ .len = bytes,
};
- assert(bytes <= UINT32_MAX); /* rely on max_pdiscard */
+ /* rely on max_pdiscard */
+ assert(bytes <= UINT32_MAX || s->info.mode >= NBD_MODE_EXTENDED);
assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
if (!(s->info.flags & NBD_FLAG_SEND_TRIM) || !bytes) {
@@ -1370,15 +1375,14 @@
int64_t *pnum, int64_t *map, BlockDriverState **file)
{
int ret, request_ret;
- NBDExtent extent = { 0 };
+ NBDExtent32 extent = { 0 };
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
Error *local_err = NULL;
NBDRequest request = {
.type = NBD_CMD_BLOCK_STATUS,
.from = offset,
- .len = MIN(QEMU_ALIGN_DOWN(INT_MAX, bs->bl.request_alignment),
- MIN(bytes, s->info.size - offset)),
+ .len = MIN(bytes, s->info.size - offset),
.flags = NBD_CMD_FLAG_REQ_ONE,
};
@@ -1388,6 +1392,10 @@
*file = bs;
return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
}
+ if (s->info.mode < NBD_MODE_EXTENDED) {
+ request.len = MIN(QEMU_ALIGN_DOWN(INT_MAX, bs->bl.request_alignment),
+ request.len);
+ }
/*
* Work around the fact that the block layer doesn't do
@@ -1463,7 +1471,7 @@
static void nbd_client_close(BlockDriverState *bs)
{
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
- NBDRequest request = { .type = NBD_CMD_DISC };
+ NBDRequest request = { .type = NBD_CMD_DISC, .mode = s->info.mode };
if (s->ioc) {
nbd_send_request(s->ioc, &request);
@@ -1952,6 +1960,14 @@
bs->bl.max_pwrite_zeroes = max;
bs->bl.max_transfer = max;
+ /*
+ * Assume that if the server supports extended headers, it also
+ * supports unlimited size zero and trim commands.
+ */
+ if (s->info.mode >= NBD_MODE_EXTENDED) {
+ bs->bl.max_pdiscard = bs->bl.max_pwrite_zeroes = 0;
+ }
+
if (s->info.opt_block &&
s->info.opt_block > bs->bl.opt_transfer) {
bs->bl.opt_transfer = s->info.opt_block;
diff --git a/block/qcow2-bitmap.c b/block/qcow2-bitmap.c
index 037fa2d..ffd5cd3 100644
--- a/block/qcow2-bitmap.c
+++ b/block/qcow2-bitmap.c
@@ -1555,7 +1555,6 @@
FOR_EACH_DIRTY_BITMAP(bs, bitmap) {
const char *name = bdrv_dirty_bitmap_name(bitmap);
uint32_t granularity = bdrv_dirty_bitmap_granularity(bitmap);
- Qcow2Bitmap *bm;
if (!bdrv_dirty_bitmap_get_persistence(bitmap) ||
bdrv_dirty_bitmap_inconsistent(bitmap)) {
@@ -1625,7 +1624,7 @@
/* allocate clusters and store bitmaps */
QSIMPLEQ_FOREACH(bm, bm_list, entry) {
- BdrvDirtyBitmap *bitmap = bm->dirty_bitmap;
+ bitmap = bm->dirty_bitmap;
if (bitmap == NULL || bdrv_dirty_bitmap_readonly(bitmap)) {
continue;
diff --git a/block/qcow2.c b/block/qcow2.c
index af43d59d..5a3c537 100644
--- a/block/qcow2.c
+++ b/block/qcow2.c
@@ -5288,7 +5288,7 @@
return spec_info;
}
-static int qcow2_has_zero_init(BlockDriverState *bs)
+static int coroutine_mixed_fn qcow2_has_zero_init(BlockDriverState *bs)
{
BDRVQcow2State *s = bs->opaque;
bool preallocated;
diff --git a/block/qed.c b/block/qed.c
index b2604d9..45ae320 100644
--- a/block/qed.c
+++ b/block/qed.c
@@ -570,8 +570,8 @@
qemu_co_mutex_unlock(&s->table_lock);
}
-static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags,
- Error **errp)
+static int coroutine_mixed_fn bdrv_qed_open(BlockDriverState *bs, QDict *options,
+ int flags, Error **errp)
{
QEDOpenCo qoc = {
.bs = bs,
diff --git a/block/rbd.c b/block/rbd.c
index 9786714..472ca05 100644
--- a/block/rbd.c
+++ b/block/rbd.c
@@ -1290,7 +1290,7 @@
* operations that exceed the current size.
*/
if (offset + bytes > s->image_size) {
- int r = qemu_rbd_resize(bs, offset + bytes);
+ r = qemu_rbd_resize(bs, offset + bytes);
if (r < 0) {
return r;
}
diff --git a/block/stream.c b/block/stream.c
index e4da214..133cb72 100644
--- a/block/stream.c
+++ b/block/stream.c
@@ -292,7 +292,6 @@
/* Make sure that the image is opened in read-write mode */
bs_read_only = bdrv_is_read_only(bs);
if (bs_read_only) {
- int ret;
/* Hold the chain during reopen */
if (bdrv_freeze_backing_chain(bs, above_base, errp) < 0) {
return;
diff --git a/block/throttle-groups.c b/block/throttle-groups.c
index 3eda4c4..f5c0fac 100644
--- a/block/throttle-groups.c
+++ b/block/throttle-groups.c
@@ -317,8 +317,8 @@
* @tgm: the current ThrottleGroupMember
* @direction: the ThrottleDirection
*/
-static void schedule_next_request(ThrottleGroupMember *tgm,
- ThrottleDirection direction)
+static void coroutine_mixed_fn schedule_next_request(ThrottleGroupMember *tgm,
+ ThrottleDirection direction)
{
ThrottleState *ts = tgm->throttle_state;
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
diff --git a/block/trace-events b/block/trace-events
index 6f121b7..925aa55 100644
--- a/block/trace-events
+++ b/block/trace-events
@@ -167,7 +167,7 @@
nbd_parse_blockstatus_compliance(const char *err) "ignoring extra data from non-compliant server: %s"
nbd_structured_read_compliance(const char *type) "server sent non-compliant unaligned read %s chunk"
nbd_read_reply_entry_fail(int ret, const char *err) "ret = %d, err: %s"
-nbd_co_request_fail(uint64_t from, uint32_t len, uint64_t handle, uint16_t flags, uint16_t type, const char *name, int ret, const char *err) "Request failed { .from = %" PRIu64", .len = %" PRIu32 ", .handle = %" PRIu64 ", .flags = 0x%" PRIx16 ", .type = %" PRIu16 " (%s) } ret = %d, err: %s"
+nbd_co_request_fail(uint64_t from, uint64_t len, uint64_t handle, uint16_t flags, uint16_t type, const char *name, int ret, const char *err) "Request failed { .from = %" PRIu64", .len = %" PRIu64 ", .handle = %" PRIu64 ", .flags = 0x%" PRIx16 ", .type = %" PRIu16 " (%s) } ret = %d, err: %s"
nbd_client_handshake(const char *export_name) "export '%s'"
nbd_client_handshake_success(const char *export_name) "export '%s'"
nbd_reconnect_attempt(unsigned in_flight) "in_flight %u"
diff --git a/block/vdi.c b/block/vdi.c
index 6c35309..934e1b8 100644
--- a/block/vdi.c
+++ b/block/vdi.c
@@ -634,7 +634,6 @@
bmap_entry = le32_to_cpu(s->bmap[block_index]);
if (!VDI_IS_ALLOCATED(bmap_entry)) {
/* Allocate new block and write to it. */
- uint64_t data_offset;
qemu_co_rwlock_upgrade(&s->bmap_lock);
bmap_entry = le32_to_cpu(s->bmap[block_index]);
if (VDI_IS_ALLOCATED(bmap_entry)) {
@@ -700,7 +699,7 @@
/* One or more new blocks were allocated. */
VdiHeader *header;
uint8_t *base;
- uint64_t offset;
+ uint64_t bmap_offset;
uint32_t n_sectors;
g_free(block);
@@ -723,11 +722,11 @@
bmap_first /= (SECTOR_SIZE / sizeof(uint32_t));
bmap_last /= (SECTOR_SIZE / sizeof(uint32_t));
n_sectors = bmap_last - bmap_first + 1;
- offset = s->bmap_sector + bmap_first;
+ bmap_offset = s->bmap_sector + bmap_first;
base = ((uint8_t *)&s->bmap[0]) + bmap_first * SECTOR_SIZE;
logout("will write %u block map sectors starting from entry %u\n",
n_sectors, bmap_first);
- ret = bdrv_co_pwrite(bs->file, offset * SECTOR_SIZE,
+ ret = bdrv_co_pwrite(bs->file, bmap_offset * SECTOR_SIZE,
n_sectors * SECTOR_SIZE, base, 0);
}
diff --git a/block/vvfat.c b/block/vvfat.c
index 0ddc91f..856b479 100644
--- a/block/vvfat.c
+++ b/block/vvfat.c
@@ -777,7 +777,6 @@
while((entry=readdir(dir))) {
unsigned int length=strlen(dirname)+2+strlen(entry->d_name);
char* buffer;
- direntry_t* direntry;
struct stat st;
int is_dot=!strcmp(entry->d_name,".");
int is_dotdot=!strcmp(entry->d_name,"..");
@@ -857,7 +856,7 @@
/* fill with zeroes up to the end of the cluster */
while(s->directory.next%(0x10*s->sectors_per_cluster)) {
- direntry_t* direntry=array_get_next(&(s->directory));
+ direntry = array_get_next(&(s->directory));
memset(direntry,0,sizeof(direntry_t));
}
@@ -1962,24 +1961,24 @@
* This is horribly inefficient, but that is okay, since
* it is rarely executed, if at all.
*/
- int64_t offset = cluster2sector(s, cluster_num);
+ int64_t offs = cluster2sector(s, cluster_num);
vvfat_close_current_file(s);
for (i = 0; i < s->sectors_per_cluster; i++) {
int res;
res = bdrv_is_allocated(s->qcow->bs,
- (offset + i) * BDRV_SECTOR_SIZE,
+ (offs + i) * BDRV_SECTOR_SIZE,
BDRV_SECTOR_SIZE, NULL);
if (res < 0) {
return -1;
}
if (!res) {
- res = vvfat_read(s->bs, offset, s->cluster_buffer, 1);
+ res = vvfat_read(s->bs, offs, s->cluster_buffer, 1);
if (res) {
return -1;
}
- res = bdrv_co_pwrite(s->qcow, offset * BDRV_SECTOR_SIZE,
+ res = bdrv_co_pwrite(s->qcow, offs * BDRV_SECTOR_SIZE,
BDRV_SECTOR_SIZE, s->cluster_buffer,
0);
if (res < 0) {
@@ -2467,8 +2466,9 @@
for (c = first_cluster; !fat_eof(s, c); c = modified_fat_get(s, c)) {
direntry_t *first_direntry;
- void* direntry = array_get(&(s->directory), current_dir_index);
- int ret = vvfat_read(s->bs, cluster2sector(s, c), direntry,
+
+ direntry = array_get(&(s->directory), current_dir_index);
+ ret = vvfat_read(s->bs, cluster2sector(s, c), (uint8_t *)direntry,
s->sectors_per_cluster);
if (ret)
return ret;
@@ -2690,12 +2690,12 @@
direntry_t* direntry = array_get(&(s->directory),
mapping->info.dir.first_dir_index);
uint32_t c = mapping->begin;
- int i = 0;
+ int j = 0;
/* recurse */
while (!fat_eof(s, c)) {
do {
- direntry_t* d = direntry + i;
+ direntry_t *d = direntry + j;
if (is_file(d) || (is_directory(d) && !is_dot(d))) {
int l;
@@ -2716,8 +2716,8 @@
schedule_rename(s, m->begin, new_path);
}
- i++;
- } while((i % (0x10 * s->sectors_per_cluster)) != 0);
+ j++;
+ } while (j % (0x10 * s->sectors_per_cluster) != 0);
c = fat_get(s, c);
}
}
@@ -2804,16 +2804,16 @@
int begin = commit->param.new_file.first_cluster;
mapping_t* mapping = find_mapping_for_cluster(s, begin);
direntry_t* entry;
- int i;
+ int j;
/* find direntry */
- for (i = 0; i < s->directory.next; i++) {
- entry = array_get(&(s->directory), i);
+ for (j = 0; j < s->directory.next; j++) {
+ entry = array_get(&(s->directory), j);
if (is_file(entry) && begin_of_direntry(entry) == begin)
break;
}
- if (i >= s->directory.next) {
+ if (j >= s->directory.next) {
fail = -6;
continue;
}
@@ -2833,8 +2833,9 @@
mapping->mode = MODE_NORMAL;
mapping->info.file.offset = 0;
- if (commit_one_file(s, i, 0))
+ if (commit_one_file(s, j, 0)) {
fail = -7;
+ }
break;
}
diff --git a/bsd-user/bsd-mem.c b/bsd-user/bsd-mem.c
new file mode 100644
index 0000000..2ab1334
--- /dev/null
+++ b/bsd-user/bsd-mem.c
@@ -0,0 +1,104 @@
+/*
+ * memory management system conversion routines
+ *
+ * Copyright (c) 2013 Stacey D. Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu.h"
+#include "qemu-bsd.h"
+
+struct bsd_shm_regions bsd_shm_regions[N_BSD_SHM_REGIONS];
+
+abi_ulong target_brk;
+abi_ulong initial_target_brk;
+
+void target_set_brk(abi_ulong new_brk)
+{
+ target_brk = TARGET_PAGE_ALIGN(new_brk);
+ initial_target_brk = target_brk;
+}
+
+void target_to_host_ipc_perm__locked(struct ipc_perm *host_ip,
+ struct target_ipc_perm *target_ip)
+{
+ __get_user(host_ip->cuid, &target_ip->cuid);
+ __get_user(host_ip->cgid, &target_ip->cgid);
+ __get_user(host_ip->uid, &target_ip->uid);
+ __get_user(host_ip->gid, &target_ip->gid);
+ __get_user(host_ip->mode, &target_ip->mode);
+ __get_user(host_ip->seq, &target_ip->seq);
+ __get_user(host_ip->key, &target_ip->key);
+}
+
+abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
+ abi_ulong target_addr)
+{
+ struct target_shmid_ds *target_sd;
+
+ if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) {
+ return -TARGET_EFAULT;
+ }
+
+ target_to_host_ipc_perm__locked(&(host_sd->shm_perm),
+ &(target_sd->shm_perm));
+
+ __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
+ __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
+ __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
+ __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
+ __get_user(host_sd->shm_atime, &target_sd->shm_atime);
+ __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
+ __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
+ unlock_user_struct(target_sd, target_addr, 0);
+
+ return 0;
+}
+
+void host_to_target_ipc_perm__locked(struct target_ipc_perm *target_ip,
+ struct ipc_perm *host_ip)
+{
+ __put_user(host_ip->cuid, &target_ip->cuid);
+ __put_user(host_ip->cgid, &target_ip->cgid);
+ __put_user(host_ip->uid, &target_ip->uid);
+ __put_user(host_ip->gid, &target_ip->gid);
+ __put_user(host_ip->mode, &target_ip->mode);
+ __put_user(host_ip->seq, &target_ip->seq);
+ __put_user(host_ip->key, &target_ip->key);
+}
+
+abi_long host_to_target_shmid_ds(abi_ulong target_addr,
+ struct shmid_ds *host_sd)
+{
+ struct target_shmid_ds *target_sd;
+
+ if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) {
+ return -TARGET_EFAULT;
+ }
+
+ host_to_target_ipc_perm__locked(&(target_sd->shm_perm),
+ &(host_sd->shm_perm));
+
+ __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
+ __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
+ __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
+ __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
+ __put_user(host_sd->shm_atime, &target_sd->shm_atime);
+ __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
+ __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
+ unlock_user_struct(target_sd, target_addr, 1);
+
+ return 0;
+}
diff --git a/bsd-user/bsd-mem.h b/bsd-user/bsd-mem.h
new file mode 100644
index 0000000..c3e72e3
--- /dev/null
+++ b/bsd-user/bsd-mem.h
@@ -0,0 +1,452 @@
+/*
+ * memory management system call shims and definitions
+ *
+ * Copyright (c) 2013-15 Stacey D. Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef BSD_USER_BSD_MEM_H
+#define BSD_USER_BSD_MEM_H
+
+#include <sys/types.h>
+#include <sys/ipc.h>
+#include <sys/mman.h>
+#include <sys/shm.h>
+#include <fcntl.h>
+
+#include "qemu-bsd.h"
+
+extern struct bsd_shm_regions bsd_shm_regions[];
+extern abi_ulong target_brk;
+extern abi_ulong initial_target_brk;
+
+/* mmap(2) */
+static inline abi_long do_bsd_mmap(void *cpu_env, abi_long arg1, abi_long arg2,
+ abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6, abi_long arg7,
+ abi_long arg8)
+{
+ if (regpairs_aligned(cpu_env) != 0) {
+ arg6 = arg7;
+ arg7 = arg8;
+ }
+ return get_errno(target_mmap(arg1, arg2, arg3,
+ target_to_host_bitmask(arg4, mmap_flags_tbl),
+ arg5, target_arg64(arg6, arg7)));
+}
+
+/* munmap(2) */
+static inline abi_long do_bsd_munmap(abi_long arg1, abi_long arg2)
+{
+ return get_errno(target_munmap(arg1, arg2));
+}
+
+/* mprotect(2) */
+static inline abi_long do_bsd_mprotect(abi_long arg1, abi_long arg2,
+ abi_long arg3)
+{
+ return get_errno(target_mprotect(arg1, arg2, arg3));
+}
+
+/* msync(2) */
+static inline abi_long do_bsd_msync(abi_long addr, abi_long len, abi_long flags)
+{
+ if (!guest_range_valid_untagged(addr, len)) {
+ /* It seems odd, but POSIX wants this to be ENOMEM */
+ return -TARGET_ENOMEM;
+ }
+
+ return get_errno(msync(g2h_untagged(addr), len, flags));
+}
+
+/* mlock(2) */
+static inline abi_long do_bsd_mlock(abi_long arg1, abi_long arg2)
+{
+ if (!guest_range_valid_untagged(arg1, arg2)) {
+ return -TARGET_EINVAL;
+ }
+ return get_errno(mlock(g2h_untagged(arg1), arg2));
+}
+
+/* munlock(2) */
+static inline abi_long do_bsd_munlock(abi_long arg1, abi_long arg2)
+{
+ if (!guest_range_valid_untagged(arg1, arg2)) {
+ return -TARGET_EINVAL;
+ }
+ return get_errno(munlock(g2h_untagged(arg1), arg2));
+}
+
+/* mlockall(2) */
+static inline abi_long do_bsd_mlockall(abi_long arg1)
+{
+ return get_errno(mlockall(arg1));
+}
+
+/* munlockall(2) */
+static inline abi_long do_bsd_munlockall(void)
+{
+ return get_errno(munlockall());
+}
+
+/* madvise(2) */
+static inline abi_long do_bsd_madvise(abi_long arg1, abi_long arg2,
+ abi_long arg3)
+{
+ abi_ulong len;
+ int ret = 0;
+ abi_long start = arg1;
+ abi_long len_in = arg2;
+ abi_long advice = arg3;
+
+ if (start & ~TARGET_PAGE_MASK) {
+ return -TARGET_EINVAL;
+ }
+ if (len_in == 0) {
+ return 0;
+ }
+ len = TARGET_PAGE_ALIGN(len_in);
+ if (len == 0 || !guest_range_valid_untagged(start, len)) {
+ return -TARGET_EINVAL;
+ }
+
+ /*
+ * Most advice values are hints, so ignoring and returning success is ok.
+ *
+ * However, some advice values such as MADV_DONTNEED, are not hints and
+ * need to be emulated.
+ *
+ * A straight passthrough for those may not be safe because qemu sometimes
+ * turns private file-backed mappings into anonymous mappings.
+ * If all guest pages have PAGE_PASSTHROUGH set, mappings have the
+ * same semantics for the host as for the guest.
+ *
+ * MADV_DONTNEED is passed through, if possible.
+ * If passthrough isn't possible, we nevertheless (wrongly!) return
+ * success, which is broken but some userspace programs fail to work
+ * otherwise. Completely implementing such emulation is quite complicated
+ * though.
+ */
+ mmap_lock();
+ switch (advice) {
+ case MADV_DONTNEED:
+ if (page_check_range(start, len, PAGE_PASSTHROUGH)) {
+ ret = get_errno(madvise(g2h_untagged(start), len, advice));
+ if (ret == 0) {
+ page_reset_target_data(start, start + len - 1);
+ }
+ }
+ }
+ mmap_unlock();
+
+ return ret;
+}
+
+/* minherit(2) */
+static inline abi_long do_bsd_minherit(abi_long addr, abi_long len,
+ abi_long inherit)
+{
+ return get_errno(minherit(g2h_untagged(addr), len, inherit));
+}
+
+/* mincore(2) */
+static inline abi_long do_bsd_mincore(abi_ulong target_addr, abi_ulong len,
+ abi_ulong target_vec)
+{
+ abi_long ret;
+ void *p;
+ abi_ulong vec_len = DIV_ROUND_UP(len, TARGET_PAGE_SIZE);
+
+ if (!guest_range_valid_untagged(target_addr, len)
+ || !page_check_range(target_addr, len, PAGE_VALID)) {
+ return -TARGET_EFAULT;
+ }
+
+ p = lock_user(VERIFY_WRITE, target_vec, vec_len, 0);
+ if (p == NULL) {
+ return -TARGET_EFAULT;
+ }
+ ret = get_errno(mincore(g2h_untagged(target_addr), len, p));
+ unlock_user(p, target_vec, vec_len);
+
+ return ret;
+}
+
+/* do_brk() must return target values and target errnos. */
+static inline abi_long do_obreak(abi_ulong brk_val)
+{
+ abi_long mapped_addr;
+ abi_ulong new_brk;
+ abi_ulong old_brk;
+
+ /* brk pointers are always untagged */
+
+ /* do not allow to shrink below initial brk value */
+ if (brk_val < initial_target_brk) {
+ return target_brk;
+ }
+
+ new_brk = TARGET_PAGE_ALIGN(brk_val);
+ old_brk = TARGET_PAGE_ALIGN(target_brk);
+
+ /* new and old target_brk might be on the same page */
+ if (new_brk == old_brk) {
+ target_brk = brk_val;
+ return target_brk;
+ }
+
+ /* Release heap if necesary */
+ if (new_brk < old_brk) {
+ target_munmap(new_brk, old_brk - new_brk);
+
+ target_brk = brk_val;
+ return target_brk;
+ }
+
+ mapped_addr = target_mmap(old_brk, new_brk - old_brk,
+ PROT_READ | PROT_WRITE,
+ MAP_FIXED | MAP_EXCL | MAP_ANON | MAP_PRIVATE,
+ -1, 0);
+
+ if (mapped_addr == old_brk) {
+ target_brk = brk_val;
+ return target_brk;
+ }
+
+ /* For everything else, return the previous break. */
+ return target_brk;
+}
+
+/* shm_open(2) */
+static inline abi_long do_bsd_shm_open(abi_ulong arg1, abi_long arg2,
+ abi_long arg3)
+{
+ int ret;
+ void *p;
+
+ if (arg1 == (uintptr_t)SHM_ANON) {
+ p = SHM_ANON;
+ } else {
+ p = lock_user_string(arg1);
+ if (p == NULL) {
+ return -TARGET_EFAULT;
+ }
+ }
+ ret = get_errno(shm_open(p, target_to_host_bitmask(arg2, fcntl_flags_tbl),
+ arg3));
+
+ if (p != SHM_ANON) {
+ unlock_user(p, arg1, 0);
+ }
+
+ return ret;
+}
+
+/* shm_unlink(2) */
+static inline abi_long do_bsd_shm_unlink(abi_ulong arg1)
+{
+ int ret;
+ void *p;
+
+ p = lock_user_string(arg1);
+ if (p == NULL) {
+ return -TARGET_EFAULT;
+ }
+ ret = get_errno(shm_unlink(p)); /* XXX path(p)? */
+ unlock_user(p, arg1, 0);
+
+ return ret;
+}
+
+/* shmget(2) */
+static inline abi_long do_bsd_shmget(abi_long arg1, abi_ulong arg2,
+ abi_long arg3)
+{
+ return get_errno(shmget(arg1, arg2, arg3));
+}
+
+/* shmctl(2) */
+static inline abi_long do_bsd_shmctl(abi_long shmid, abi_long cmd,
+ abi_ulong buff)
+{
+ struct shmid_ds dsarg;
+ abi_long ret = -TARGET_EINVAL;
+
+ cmd &= 0xff;
+
+ switch (cmd) {
+ case IPC_STAT:
+ if (target_to_host_shmid_ds(&dsarg, buff)) {
+ return -TARGET_EFAULT;
+ }
+ ret = get_errno(shmctl(shmid, cmd, &dsarg));
+ if (host_to_target_shmid_ds(buff, &dsarg)) {
+ return -TARGET_EFAULT;
+ }
+ break;
+
+ case IPC_SET:
+ if (target_to_host_shmid_ds(&dsarg, buff)) {
+ return -TARGET_EFAULT;
+ }
+ ret = get_errno(shmctl(shmid, cmd, &dsarg));
+ break;
+
+ case IPC_RMID:
+ ret = get_errno(shmctl(shmid, cmd, NULL));
+ break;
+
+ default:
+ ret = -TARGET_EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* shmat(2) */
+static inline abi_long do_bsd_shmat(int shmid, abi_ulong shmaddr, int shmflg)
+{
+ abi_ulong raddr;
+ abi_long ret;
+ struct shmid_ds shm_info;
+
+ /* Find out the length of the shared memory segment. */
+ ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
+ if (is_error(ret)) {
+ /* Can't get the length */
+ return ret;
+ }
+
+ if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
+ return -TARGET_EINVAL;
+ }
+
+ WITH_MMAP_LOCK_GUARD() {
+ void *host_raddr;
+
+ if (shmaddr) {
+ host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
+ } else {
+ abi_ulong mmap_start;
+
+ mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
+
+ if (mmap_start == -1) {
+ return -TARGET_ENOMEM;
+ }
+ host_raddr = shmat(shmid, g2h_untagged(mmap_start),
+ shmflg | SHM_REMAP);
+ }
+
+ if (host_raddr == (void *)-1) {
+ return get_errno(-1);
+ }
+ raddr = h2g(host_raddr);
+
+ page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
+ PAGE_VALID | PAGE_RESET | PAGE_READ |
+ (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
+
+ for (int i = 0; i < N_BSD_SHM_REGIONS; i++) {
+ if (bsd_shm_regions[i].start == 0) {
+ bsd_shm_regions[i].start = raddr;
+ bsd_shm_regions[i].size = shm_info.shm_segsz;
+ break;
+ }
+ }
+ }
+
+ return raddr;
+}
+
+/* shmdt(2) */
+static inline abi_long do_bsd_shmdt(abi_ulong shmaddr)
+{
+ abi_long ret;
+
+ WITH_MMAP_LOCK_GUARD() {
+ int i;
+
+ for (i = 0; i < N_BSD_SHM_REGIONS; ++i) {
+ if (bsd_shm_regions[i].start == shmaddr) {
+ break;
+ }
+ }
+
+ if (i == N_BSD_SHM_REGIONS) {
+ return -TARGET_EINVAL;
+ }
+
+ ret = get_errno(shmdt(g2h_untagged(shmaddr)));
+ if (ret == 0) {
+ abi_ulong size = bsd_shm_regions[i].size;
+
+ bsd_shm_regions[i].start = 0;
+ page_set_flags(shmaddr, shmaddr + size - 1, 0);
+ mmap_reserve(shmaddr, size);
+ }
+ }
+
+ return ret;
+}
+
+static inline abi_long do_bsd_vadvise(void)
+{
+ /* See sys_ovadvise() in vm_unix.c */
+ return -TARGET_EINVAL;
+}
+
+static inline abi_long do_bsd_sbrk(void)
+{
+ /* see sys_sbrk() in vm_mmap.c */
+ return -TARGET_EOPNOTSUPP;
+}
+
+static inline abi_long do_bsd_sstk(void)
+{
+ /* see sys_sstk() in vm_mmap.c */
+ return -TARGET_EOPNOTSUPP;
+}
+
+#endif /* BSD_USER_BSD_MEM_H */
diff --git a/bsd-user/bsd-proc.c b/bsd-user/bsd-proc.c
new file mode 100644
index 0000000..ca3c1bf
--- /dev/null
+++ b/bsd-user/bsd-proc.c
@@ -0,0 +1,145 @@
+/*
+ * BSD process related system call helpers
+ *
+ * Copyright (c) 2013-14 Stacey D. Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/cpuset.h>
+#include <sys/resource.h>
+#include <sys/wait.h>
+
+#include "qemu.h"
+#include "qemu-bsd.h"
+#include "signal-common.h"
+
+#include "bsd-proc.h"
+
+/*
+ * resource/rusage conversion
+ */
+int target_to_host_resource(int code)
+{
+ return code;
+}
+
+rlim_t target_to_host_rlim(abi_llong target_rlim)
+{
+ return tswap64(target_rlim);
+}
+
+abi_llong host_to_target_rlim(rlim_t rlim)
+{
+ return tswap64(rlim);
+}
+
+void h2g_rusage(const struct rusage *rusage,
+ struct target_freebsd_rusage *target_rusage)
+{
+ __put_user(rusage->ru_utime.tv_sec, &target_rusage->ru_utime.tv_sec);
+ __put_user(rusage->ru_utime.tv_usec, &target_rusage->ru_utime.tv_usec);
+
+ __put_user(rusage->ru_stime.tv_sec, &target_rusage->ru_stime.tv_sec);
+ __put_user(rusage->ru_stime.tv_usec, &target_rusage->ru_stime.tv_usec);
+
+ __put_user(rusage->ru_maxrss, &target_rusage->ru_maxrss);
+ __put_user(rusage->ru_idrss, &target_rusage->ru_idrss);
+ __put_user(rusage->ru_idrss, &target_rusage->ru_idrss);
+ __put_user(rusage->ru_isrss, &target_rusage->ru_isrss);
+ __put_user(rusage->ru_minflt, &target_rusage->ru_minflt);
+ __put_user(rusage->ru_majflt, &target_rusage->ru_majflt);
+ __put_user(rusage->ru_nswap, &target_rusage->ru_nswap);
+ __put_user(rusage->ru_inblock, &target_rusage->ru_inblock);
+ __put_user(rusage->ru_oublock, &target_rusage->ru_oublock);
+ __put_user(rusage->ru_msgsnd, &target_rusage->ru_msgsnd);
+ __put_user(rusage->ru_msgrcv, &target_rusage->ru_msgrcv);
+ __put_user(rusage->ru_nsignals, &target_rusage->ru_nsignals);
+ __put_user(rusage->ru_nvcsw, &target_rusage->ru_nvcsw);
+ __put_user(rusage->ru_nivcsw, &target_rusage->ru_nivcsw);
+}
+
+abi_long host_to_target_rusage(abi_ulong target_addr,
+ const struct rusage *rusage)
+{
+ struct target_freebsd_rusage *target_rusage;
+
+ if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) {
+ return -TARGET_EFAULT;
+ }
+ h2g_rusage(rusage, target_rusage);
+ unlock_user_struct(target_rusage, target_addr, 1);
+
+ return 0;
+}
+
+abi_long host_to_target_wrusage(abi_ulong target_addr,
+ const struct __wrusage *wrusage)
+{
+ struct target_freebsd__wrusage *target_wrusage;
+
+ if (!lock_user_struct(VERIFY_WRITE, target_wrusage, target_addr, 0)) {
+ return -TARGET_EFAULT;
+ }
+ h2g_rusage(&wrusage->wru_self, &target_wrusage->wru_self);
+ h2g_rusage(&wrusage->wru_children, &target_wrusage->wru_children);
+ unlock_user_struct(target_wrusage, target_addr, 1);
+
+ return 0;
+}
+
+/*
+ * wait status conversion.
+ *
+ * Map host to target signal numbers for the wait family of syscalls.
+ * Assume all other status bits are the same.
+ */
+int host_to_target_waitstatus(int status)
+{
+ if (WIFSIGNALED(status)) {
+ return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
+ }
+ if (WIFSTOPPED(status)) {
+ return (host_to_target_signal(WSTOPSIG(status)) << 8) | (status & 0xff);
+ }
+ return status;
+}
+
+int bsd_get_ncpu(void)
+{
+ int ncpu = -1;
+ cpuset_t mask;
+
+ CPU_ZERO(&mask);
+
+ if (cpuset_getaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID, -1, sizeof(mask),
+ &mask) == 0) {
+ ncpu = CPU_COUNT(&mask);
+ }
+
+ if (ncpu == -1) {
+ ncpu = sysconf(_SC_NPROCESSORS_ONLN);
+ }
+
+ if (ncpu == -1) {
+ gemu_log("XXX Missing bsd_get_ncpu() implementation\n");
+ ncpu = 1;
+ }
+
+ return ncpu;
+}
+
diff --git a/bsd-user/bsd-proc.h b/bsd-user/bsd-proc.h
index a1061bf..8b1c2de 100644
--- a/bsd-user/bsd-proc.h
+++ b/bsd-user/bsd-proc.h
@@ -22,12 +22,16 @@
#include <sys/resource.h>
+#include "qemu-bsd.h"
+#include "gdbstub/syscalls.h"
+#include "qemu/plugin.h"
+
+extern int _getlogin(char*, int);
+int bsd_get_ncpu(void);
+
/* exit(2) */
static inline abi_long do_bsd_exit(void *cpu_env, abi_long arg1)
{
-#ifdef TARGET_GPROF
- _mcleanup();
-#endif
gdb_exit(arg1);
qemu_plugin_user_exit();
_exit(arg1);
@@ -35,4 +39,376 @@
return 0;
}
+/* getgroups(2) */
+static inline abi_long do_bsd_getgroups(abi_long gidsetsize, abi_long arg2)
+{
+ abi_long ret;
+ uint32_t *target_grouplist;
+ g_autofree gid_t *grouplist;
+ int i;
+
+ grouplist = g_try_new(gid_t, gidsetsize);
+ ret = get_errno(getgroups(gidsetsize, grouplist));
+ if (gidsetsize != 0) {
+ if (!is_error(ret)) {
+ target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
+ if (!target_grouplist) {
+ return -TARGET_EFAULT;
+ }
+ for (i = 0; i < ret; i++) {
+ target_grouplist[i] = tswap32(grouplist[i]);
+ }
+ unlock_user(target_grouplist, arg2, gidsetsize * 2);
+ }
+ }
+ return ret;
+}
+
+/* setgroups(2) */
+static inline abi_long do_bsd_setgroups(abi_long gidsetsize, abi_long arg2)
+{
+ uint32_t *target_grouplist;
+ g_autofree gid_t *grouplist;
+ int i;
+
+ grouplist = g_try_new(gid_t, gidsetsize);
+ target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
+ if (!target_grouplist) {
+ return -TARGET_EFAULT;
+ }
+ for (i = 0; i < gidsetsize; i++) {
+ grouplist[i] = tswap32(target_grouplist[i]);
+ }
+ unlock_user(target_grouplist, arg2, 0);
+ return get_errno(setgroups(gidsetsize, grouplist));
+}
+
+/* umask(2) */
+static inline abi_long do_bsd_umask(abi_long arg1)
+{
+ return get_errno(umask(arg1));
+}
+
+/* setlogin(2) */
+static inline abi_long do_bsd_setlogin(abi_long arg1)
+{
+ abi_long ret;
+ void *p;
+
+ p = lock_user_string(arg1);
+ if (p == NULL) {
+ return -TARGET_EFAULT;
+ }
+ ret = get_errno(setlogin(p));
+ unlock_user(p, arg1, 0);
+
+ return ret;
+}
+
+/* getlogin(2) */
+static inline abi_long do_bsd_getlogin(abi_long arg1, abi_long arg2)
+{
+ abi_long ret;
+ void *p;
+
+ p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
+ if (p == NULL) {
+ return -TARGET_EFAULT;
+ }
+ ret = get_errno(_getlogin(p, arg2));
+ unlock_user(p, arg1, arg2);
+
+ return ret;
+}
+
+/* getrusage(2) */
+static inline abi_long do_bsd_getrusage(abi_long who, abi_ulong target_addr)
+{
+ abi_long ret;
+ struct rusage rusage;
+
+ ret = get_errno(getrusage(who, &rusage));
+ if (!is_error(ret)) {
+ host_to_target_rusage(target_addr, &rusage);
+ }
+ return ret;
+}
+
+/* getrlimit(2) */
+static inline abi_long do_bsd_getrlimit(abi_long arg1, abi_ulong arg2)
+{
+ abi_long ret;
+ int resource = target_to_host_resource(arg1);
+ struct target_rlimit *target_rlim;
+ struct rlimit rlim;
+
+ switch (resource) {
+ case RLIMIT_STACK:
+ rlim.rlim_cur = target_dflssiz;
+ rlim.rlim_max = target_maxssiz;
+ ret = 0;
+ break;
+
+ case RLIMIT_DATA:
+ rlim.rlim_cur = target_dfldsiz;
+ rlim.rlim_max = target_maxdsiz;
+ ret = 0;
+ break;
+
+ default:
+ ret = get_errno(getrlimit(resource, &rlim));
+ break;
+ }
+ if (!is_error(ret)) {
+ if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) {
+ return -TARGET_EFAULT;
+ }
+ target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
+ target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
+ unlock_user_struct(target_rlim, arg2, 1);
+ }
+ return ret;
+}
+
+/* setrlimit(2) */
+static inline abi_long do_bsd_setrlimit(abi_long arg1, abi_ulong arg2)
+{
+ abi_long ret;
+ int resource = target_to_host_resource(arg1);
+ struct target_rlimit *target_rlim;
+ struct rlimit rlim;
+
+ if (RLIMIT_STACK == resource) {
+ /* XXX We should, maybe, allow the stack size to shrink */
+ ret = -TARGET_EPERM;
+ } else {
+ if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) {
+ return -TARGET_EFAULT;
+ }
+ rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
+ rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
+ unlock_user_struct(target_rlim, arg2, 0);
+ ret = get_errno(setrlimit(resource, &rlim));
+ }
+ return ret;
+}
+
+/* getpid(2) */
+static inline abi_long do_bsd_getpid(void)
+{
+ return get_errno(getpid());
+}
+
+/* getppid(2) */
+static inline abi_long do_bsd_getppid(void)
+{
+ return get_errno(getppid());
+}
+
+/* getuid(2) */
+static inline abi_long do_bsd_getuid(void)
+{
+ return get_errno(getuid());
+}
+
+/* geteuid(2) */
+static inline abi_long do_bsd_geteuid(void)
+{
+ return get_errno(geteuid());
+}
+
+/* getgid(2) */
+static inline abi_long do_bsd_getgid(void)
+{
+ return get_errno(getgid());
+}
+
+/* getegid(2) */
+static inline abi_long do_bsd_getegid(void)
+{
+ return get_errno(getegid());
+}
+
+/* setuid(2) */
+static inline abi_long do_bsd_setuid(abi_long arg1)
+{
+ return get_errno(setuid(arg1));
+}
+
+/* seteuid(2) */
+static inline abi_long do_bsd_seteuid(abi_long arg1)
+{
+ return get_errno(seteuid(arg1));
+}
+
+/* setgid(2) */
+static inline abi_long do_bsd_setgid(abi_long arg1)
+{
+ return get_errno(setgid(arg1));
+}
+
+/* setegid(2) */
+static inline abi_long do_bsd_setegid(abi_long arg1)
+{
+ return get_errno(setegid(arg1));
+}
+
+/* getpgid(2) */
+static inline abi_long do_bsd_getpgid(pid_t pid)
+{
+ return get_errno(getpgid(pid));
+}
+
+/* setpgid(2) */
+static inline abi_long do_bsd_setpgid(int pid, int pgrp)
+{
+ return get_errno(setpgid(pid, pgrp));
+}
+
+/* getpgrp(2) */
+static inline abi_long do_bsd_getpgrp(void)
+{
+ return get_errno(getpgrp());
+}
+
+/* setreuid(2) */
+static inline abi_long do_bsd_setreuid(abi_long arg1, abi_long arg2)
+{
+ return get_errno(setreuid(arg1, arg2));
+}
+
+/* setregid(2) */
+static inline abi_long do_bsd_setregid(abi_long arg1, abi_long arg2)
+{
+ return get_errno(setregid(arg1, arg2));
+}
+
+/* setresgid(2) */
+static inline abi_long do_bsd_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
+{
+ return get_errno(setresgid(rgid, egid, sgid));
+}
+
+/* setresuid(2) */
+static inline abi_long do_bsd_setresuid(uid_t ruid, uid_t euid, uid_t suid)
+{
+ return get_errno(setresuid(ruid, euid, suid));
+}
+
+/* getresuid(2) */
+static inline abi_long do_bsd_getresuid(abi_ulong arg1, abi_ulong arg2,
+ abi_ulong arg3)
+{
+ abi_long ret;
+ uid_t ruid, euid, suid;
+
+ ret = get_errno(getresuid(&ruid, &euid, &suid));
+ if (is_error(ret)) {
+ return ret;
+ }
+ if (put_user_s32(ruid, arg1)) {
+ return -TARGET_EFAULT;
+ }
+ if (put_user_s32(euid, arg2)) {
+ return -TARGET_EFAULT;
+ }
+ if (put_user_s32(suid, arg3)) {
+ return -TARGET_EFAULT;
+ }
+ return ret;
+}
+
+/* getresgid(2) */
+static inline abi_long do_bsd_getresgid(abi_ulong arg1, abi_ulong arg2,
+ abi_ulong arg3)
+{
+ abi_long ret;
+ uid_t ruid, euid, suid;
+
+ ret = get_errno(getresgid(&ruid, &euid, &suid));
+ if (is_error(ret)) {
+ return ret;
+ }
+ if (put_user_s32(ruid, arg1)) {
+ return -TARGET_EFAULT;
+ }
+ if (put_user_s32(euid, arg2)) {
+ return -TARGET_EFAULT;
+ }
+ if (put_user_s32(suid, arg3)) {
+ return -TARGET_EFAULT;
+ }
+ return ret;
+}
+
+/* getsid(2) */
+static inline abi_long do_bsd_getsid(abi_long arg1)
+{
+ return get_errno(getsid(arg1));
+}
+
+/* setsid(2) */
+static inline abi_long do_bsd_setsid(void)
+{
+ return get_errno(setsid());
+}
+
+/* issetugid(2) */
+static inline abi_long do_bsd_issetugid(void)
+{
+ return get_errno(issetugid());
+}
+
+/* profil(2) */
+static inline abi_long do_bsd_profil(abi_long arg1, abi_long arg2,
+ abi_long arg3, abi_long arg4)
+{
+ return -TARGET_ENOSYS;
+}
+
+/* ktrace(2) */
+static inline abi_long do_bsd_ktrace(abi_long arg1, abi_long arg2,
+ abi_long arg3, abi_long arg4)
+{
+ return -TARGET_ENOSYS;
+}
+
+/* utrace(2) */
+static inline abi_long do_bsd_utrace(abi_long arg1, abi_long arg2)
+{
+ return -TARGET_ENOSYS;
+}
+
+
+/* ptrace(2) */
+static inline abi_long do_bsd_ptrace(abi_long arg1, abi_long arg2,
+ abi_long arg3, abi_long arg4)
+{
+ return -TARGET_ENOSYS;
+}
+
+/* getpriority(2) */
+static inline abi_long do_bsd_getpriority(abi_long which, abi_long who)
+{
+ abi_long ret;
+ /*
+ * Note that negative values are valid for getpriority, so we must
+ * differentiate based on errno settings.
+ */
+ errno = 0;
+ ret = getpriority(which, who);
+ if (ret == -1 && errno != 0) {
+ return -host_to_target_errno(errno);
+ }
+
+ return ret;
+}
+
+/* setpriority(2) */
+static inline abi_long do_bsd_setpriority(abi_long which, abi_long who,
+ abi_long prio)
+{
+ return get_errno(setpriority(which, who, prio));
+}
+
#endif /* !BSD_PROC_H_ */
diff --git a/bsd-user/freebsd/meson.build b/bsd-user/freebsd/meson.build
index f2f047c..8fd6c7c 100644
--- a/bsd-user/freebsd/meson.build
+++ b/bsd-user/freebsd/meson.build
@@ -1,5 +1,6 @@
bsd_user_ss.add(files(
'os-stat.c',
+ 'os-proc.c',
'os-sys.c',
'os-syscall.c',
))
diff --git a/bsd-user/freebsd/os-misc.h b/bsd-user/freebsd/os-misc.h
new file mode 100644
index 0000000..7114576
--- /dev/null
+++ b/bsd-user/freebsd/os-misc.h
@@ -0,0 +1,98 @@
+/*
+ * miscellaneous FreeBSD system call shims
+ *
+ * Copyright (c) 2013-14 Stacey D. Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef OS_MISC_H
+#define OS_MISC_H
+
+#include <sys/cpuset.h>
+#include <sys/random.h>
+#include <sched.h>
+
+/*
+ * shm_open2 isn't exported, but the __sys_ alias is. We can use either for the
+ * static version, but to dynamically link we have to use the sys version.
+ */
+int __sys_shm_open2(const char *path, int flags, mode_t mode, int shmflags,
+ const char *);
+
+#if defined(__FreeBSD_version) && __FreeBSD_version >= 1300048
+/* shm_open2(2) */
+static inline abi_long do_freebsd_shm_open2(abi_ulong pathptr, abi_ulong flags,
+ abi_long mode, abi_ulong shmflags, abi_ulong nameptr)
+{
+ int ret;
+ void *uname, *upath;
+
+ if (pathptr == (uintptr_t)SHM_ANON) {
+ upath = SHM_ANON;
+ } else {
+ upath = lock_user_string(pathptr);
+ if (upath == NULL) {
+ return -TARGET_EFAULT;
+ }
+ }
+
+ uname = NULL;
+ if (nameptr != 0) {
+ uname = lock_user_string(nameptr);
+ if (uname == NULL) {
+ unlock_user(upath, pathptr, 0);
+ return -TARGET_EFAULT;
+ }
+ }
+ ret = get_errno(__sys_shm_open2(upath,
+ target_to_host_bitmask(flags, fcntl_flags_tbl), mode,
+ target_to_host_bitmask(shmflags, shmflag_flags_tbl), uname));
+
+ if (upath != SHM_ANON) {
+ unlock_user(upath, pathptr, 0);
+ }
+ if (uname != NULL) {
+ unlock_user(uname, nameptr, 0);
+ }
+ return ret;
+}
+#endif /* __FreeBSD_version >= 1300048 */
+
+#if defined(__FreeBSD_version) && __FreeBSD_version >= 1300049
+/* shm_rename(2) */
+static inline abi_long do_freebsd_shm_rename(abi_ulong fromptr, abi_ulong toptr,
+ abi_ulong flags)
+{
+ int ret;
+ void *ufrom, *uto;
+
+ ufrom = lock_user_string(fromptr);
+ if (ufrom == NULL) {
+ return -TARGET_EFAULT;
+ }
+ uto = lock_user_string(toptr);
+ if (uto == NULL) {
+ unlock_user(ufrom, fromptr, 0);
+ return -TARGET_EFAULT;
+ }
+ ret = get_errno(shm_rename(ufrom, uto, flags));
+ unlock_user(ufrom, fromptr, 0);
+ unlock_user(uto, toptr, 0);
+
+ return ret;
+}
+#endif /* __FreeBSD_version >= 1300049 */
+
+#endif /* OS_MISC_H */
diff --git a/bsd-user/freebsd/os-proc.c b/bsd-user/freebsd/os-proc.c
new file mode 100644
index 0000000..4e67ae4
--- /dev/null
+++ b/bsd-user/freebsd/os-proc.c
@@ -0,0 +1,480 @@
+/*
+ * FreeBSD process related emulation code
+ *
+ * Copyright (c) 2013-15 Stacey D. Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+
+#include <sys/param.h>
+#include <sys/queue.h>
+#include <sys/sysctl.h>
+struct kinfo_proc;
+#include <libprocstat.h>
+
+#include "qemu.h"
+
+/*
+ * Get the filename for the given file descriptor.
+ * Note that this may return NULL (fail) if no longer cached in the kernel.
+ */
+static char *
+get_filename_from_fd(pid_t pid, int fd, char *filename, size_t len)
+{
+ char *ret = NULL;
+ unsigned int cnt;
+ struct procstat *procstat = NULL;
+ struct kinfo_proc *kp = NULL;
+ struct filestat_list *head = NULL;
+ struct filestat *fst;
+
+ procstat = procstat_open_sysctl();
+ if (procstat == NULL) {
+ goto out;
+ }
+
+ kp = procstat_getprocs(procstat, KERN_PROC_PID, pid, &cnt);
+ if (kp == NULL) {
+ goto out;
+ }
+
+ head = procstat_getfiles(procstat, kp, 0);
+ if (head == NULL) {
+ goto out;
+ }
+
+ STAILQ_FOREACH(fst, head, next) {
+ if (fd == fst->fs_fd) {
+ if (fst->fs_path != NULL) {
+ (void)strlcpy(filename, fst->fs_path, len);
+ ret = filename;
+ }
+ break;
+ }
+ }
+
+out:
+ if (head != NULL) {
+ procstat_freefiles(procstat, head);
+ }
+ if (kp != NULL) {
+ procstat_freeprocs(procstat, kp);
+ }
+ if (procstat != NULL) {
+ procstat_close(procstat);
+ }
+ return ret;
+}
+
+/*
+ * execve/fexecve
+ */
+abi_long freebsd_exec_common(abi_ulong path_or_fd, abi_ulong guest_argp,
+ abi_ulong guest_envp, int do_fexec)
+{
+ char **argp, **envp, **qargp, **qarg1, **qarg0, **qargend;
+ int argc, envc;
+ abi_ulong gp;
+ abi_ulong addr;
+ char **q;
+ int total_size = 0;
+ void *p;
+ abi_long ret;
+
+ argc = 0;
+ for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
+ if (get_user_ual(addr, gp)) {
+ return -TARGET_EFAULT;
+ }
+ if (!addr) {
+ break;
+ }
+ argc++;
+ }
+ envc = 0;
+ for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
+ if (get_user_ual(addr, gp)) {
+ return -TARGET_EFAULT;
+ }
+ if (!addr) {
+ break;
+ }
+ envc++;
+ }
+
+ qarg0 = argp = g_new0(char *, argc + 9);
+ /* save the first agrument for the emulator */
+ *argp++ = (char *)getprogname();
+ qargp = argp;
+ *argp++ = (char *)getprogname();
+ qarg1 = argp;
+ envp = g_new0(char *, envc + 1);
+ for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
+ if (get_user_ual(addr, gp)) {
+ ret = -TARGET_EFAULT;
+ goto execve_end;
+ }
+ if (!addr) {
+ break;
+ }
+ *q = lock_user_string(addr);
+ if (*q == NULL) {
+ ret = -TARGET_EFAULT;
+ goto execve_end;
+ }
+ total_size += strlen(*q) + 1;
+ }
+ *q++ = NULL;
+ qargend = q;
+
+ for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
+ if (get_user_ual(addr, gp)) {
+ ret = -TARGET_EFAULT;
+ goto execve_end;
+ }
+ if (!addr) {
+ break;
+ }
+ *q = lock_user_string(addr);
+ if (*q == NULL) {
+ ret = -TARGET_EFAULT;
+ goto execve_end;
+ }
+ total_size += strlen(*q) + 1;
+ }
+ *q = NULL;
+
+ /*
+ * This case will not be caught by the host's execve() if its
+ * page size is bigger than the target's.
+ */
+ if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
+ ret = -TARGET_E2BIG;
+ goto execve_end;
+ }
+
+ if (do_fexec) {
+ if (((int)path_or_fd > 0 &&
+ is_target_elf_binary((int)path_or_fd)) == 1) {
+ char execpath[PATH_MAX];
+
+ /*
+ * The executable is an elf binary for the target
+ * arch. execve() it using the emulator if we can
+ * determine the filename path from the fd.
+ */
+ if (get_filename_from_fd(getpid(), (int)path_or_fd, execpath,
+ sizeof(execpath)) != NULL) {
+ memmove(qarg1 + 2, qarg1, (qargend - qarg1) * sizeof(*qarg1));
+ qarg1[1] = qarg1[0];
+ qarg1[0] = (char *)"-0";
+ qarg1 += 2;
+ qargend += 2;
+ *qarg1 = execpath;
+#ifndef DONT_INHERIT_INTERP_PREFIX
+ memmove(qarg1 + 2, qarg1, (qargend - qarg1) * sizeof(*qarg1));
+ *qarg1++ = (char *)"-L";
+ *qarg1++ = (char *)interp_prefix;
+#endif
+ ret = get_errno(execve(qemu_proc_pathname, qargp, envp));
+ } else {
+ /* Getting the filename path failed. */
+ ret = -TARGET_EBADF;
+ goto execve_end;
+ }
+ } else {
+ ret = get_errno(fexecve((int)path_or_fd, argp, envp));
+ }
+ } else {
+ int fd;
+
+ p = lock_user_string(path_or_fd);
+ if (p == NULL) {
+ ret = -TARGET_EFAULT;
+ goto execve_end;
+ }
+
+ /*
+ * Check the header and see if it a target elf binary. If so
+ * then execute using qemu user mode emulator.
+ */
+ fd = open(p, O_RDONLY | O_CLOEXEC);
+ if (fd > 0 && is_target_elf_binary(fd) == 1) {
+ close(fd);
+ /* execve() as a target binary using emulator. */
+ memmove(qarg1 + 2, qarg1, (qargend - qarg1) * sizeof(*qarg1));
+ qarg1[1] = qarg1[0];
+ qarg1[0] = (char *)"-0";
+ qarg1 += 2;
+ qargend += 2;
+ *qarg1 = (char *)p;
+#ifndef DONT_INHERIT_INTERP_PREFIX
+ memmove(qarg1 + 2, qarg1, (qargend - qarg1) * sizeof(*qarg1));
+ *qarg1++ = (char *)"-L";
+ *qarg1++ = (char *)interp_prefix;
+#endif
+ ret = get_errno(execve(qemu_proc_pathname, qargp, envp));
+ } else {
+ close(fd);
+ /* Execve() as a host native binary. */
+ ret = get_errno(execve(p, argp, envp));
+ }
+ unlock_user(p, path_or_fd, 0);
+ }
+
+execve_end:
+ for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
+ if (get_user_ual(addr, gp) || !addr) {
+ break;
+ }
+ unlock_user(*q, addr, 0);
+ }
+
+ for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
+ if (get_user_ual(addr, gp) || !addr) {
+ break;
+ }
+ unlock_user(*q, addr, 0);
+ }
+
+ g_free(qarg0);
+ g_free(envp);
+
+ return ret;
+}
+
+#include <sys/procctl.h>
+
+static abi_long
+t2h_procctl_cmd(int target_cmd, int *host_cmd)
+{
+ switch (target_cmd) {
+ case TARGET_PROC_SPROTECT:
+ *host_cmd = PROC_SPROTECT;
+ break;
+
+ case TARGET_PROC_REAP_ACQUIRE:
+ *host_cmd = PROC_REAP_ACQUIRE;
+ break;
+
+ case TARGET_PROC_REAP_RELEASE:
+ *host_cmd = PROC_REAP_RELEASE;
+ break;
+
+ case TARGET_PROC_REAP_STATUS:
+ *host_cmd = PROC_REAP_STATUS;
+ break;
+
+ case TARGET_PROC_REAP_KILL:
+ *host_cmd = PROC_REAP_KILL;
+ break;
+
+ default:
+ return -TARGET_EINVAL;
+ }
+
+ return 0;
+}
+
+static abi_long
+h2t_reaper_status(struct procctl_reaper_status *host_rs,
+ abi_ulong target_rs_addr)
+{
+ struct target_procctl_reaper_status *target_rs;
+
+ if (!lock_user_struct(VERIFY_WRITE, target_rs, target_rs_addr, 0)) {
+ return -TARGET_EFAULT;
+ }
+ __put_user(host_rs->rs_flags, &target_rs->rs_flags);
+ __put_user(host_rs->rs_children, &target_rs->rs_children);
+ __put_user(host_rs->rs_descendants, &target_rs->rs_descendants);
+ __put_user(host_rs->rs_reaper, &target_rs->rs_reaper);
+ __put_user(host_rs->rs_pid, &target_rs->rs_pid);
+ unlock_user_struct(target_rs, target_rs_addr, 1);
+
+ return 0;
+}
+
+static abi_long
+t2h_reaper_kill(abi_ulong target_rk_addr, struct procctl_reaper_kill *host_rk)
+{
+ struct target_procctl_reaper_kill *target_rk;
+
+ if (!lock_user_struct(VERIFY_READ, target_rk, target_rk_addr, 1)) {
+ return -TARGET_EFAULT;
+ }
+ __get_user(host_rk->rk_sig, &target_rk->rk_sig);
+ __get_user(host_rk->rk_flags, &target_rk->rk_flags);
+ __get_user(host_rk->rk_subtree, &target_rk->rk_subtree);
+ __get_user(host_rk->rk_killed, &target_rk->rk_killed);
+ __get_user(host_rk->rk_fpid, &target_rk->rk_fpid);
+ unlock_user_struct(target_rk, target_rk_addr, 0);
+
+ return 0;
+}
+
+static abi_long
+h2t_reaper_kill(struct procctl_reaper_kill *host_rk, abi_ulong target_rk_addr)
+{
+ struct target_procctl_reaper_kill *target_rk;
+
+ if (!lock_user_struct(VERIFY_WRITE, target_rk, target_rk_addr, 0)) {
+ return -TARGET_EFAULT;
+ }
+ __put_user(host_rk->rk_sig, &target_rk->rk_sig);
+ __put_user(host_rk->rk_flags, &target_rk->rk_flags);
+ __put_user(host_rk->rk_subtree, &target_rk->rk_subtree);
+ __put_user(host_rk->rk_killed, &target_rk->rk_killed);
+ __put_user(host_rk->rk_fpid, &target_rk->rk_fpid);
+ unlock_user_struct(target_rk, target_rk_addr, 1);
+
+ return 0;
+}
+
+static abi_long
+h2t_procctl_reaper_pidinfo(struct procctl_reaper_pidinfo *host_pi,
+ abi_ulong target_pi_addr)
+{
+ struct target_procctl_reaper_pidinfo *target_pi;
+
+ if (!lock_user_struct(VERIFY_WRITE, target_pi, target_pi_addr, 0)) {
+ return -TARGET_EFAULT;
+ }
+ __put_user(host_pi->pi_pid, &target_pi->pi_pid);
+ __put_user(host_pi->pi_subtree, &target_pi->pi_subtree);
+ __put_user(host_pi->pi_flags, &target_pi->pi_flags);
+ unlock_user_struct(target_pi, target_pi_addr, 1);
+
+ return 0;
+}
+
+abi_long
+do_freebsd_procctl(void *cpu_env, int idtype, abi_ulong arg2, abi_ulong arg3,
+ abi_ulong arg4, abi_ulong arg5, abi_ulong arg6)
+{
+ abi_long error = 0, target_rp_pids;
+ void *data;
+ int host_cmd, flags;
+ uint32_t u, target_rp_count;
+ g_autofree union {
+ struct procctl_reaper_status rs;
+ struct procctl_reaper_pids rp;
+ struct procctl_reaper_kill rk;
+ } host;
+ struct target_procctl_reaper_pids *target_rp;
+ id_t id; /* 64-bit */
+ int target_cmd;
+ abi_ulong target_arg;
+
+#if TARGET_ABI_BITS == 32
+ /* See if we need to align the register pairs. */
+ if (regpairs_aligned(cpu_env)) {
+ id = (id_t)target_arg64(arg3, arg4);
+ target_cmd = (int)arg5;
+ target_arg = arg6;
+ } else {
+ id = (id_t)target_arg64(arg2, arg3);
+ target_cmd = (int)arg4;
+ target_arg = arg5;
+ }
+#else
+ id = (id_t)arg2;
+ target_cmd = (int)arg3;
+ target_arg = arg4;
+#endif
+
+ error = t2h_procctl_cmd(target_cmd, &host_cmd);
+ if (error) {
+ return error;
+ }
+ switch (host_cmd) {
+ case PROC_SPROTECT:
+ data = &flags;
+ break;
+
+ case PROC_REAP_ACQUIRE:
+ case PROC_REAP_RELEASE:
+ if (target_arg == 0) {
+ data = NULL;
+ } else {
+ error = -TARGET_EINVAL;
+ }
+ break;
+
+ case PROC_REAP_STATUS:
+ data = &host.rs;
+ break;
+
+ case PROC_REAP_GETPIDS:
+ if (!lock_user_struct(VERIFY_READ, target_rp, target_arg, 1)) {
+ return -TARGET_EFAULT;
+ }
+ __get_user(target_rp_count, &target_rp->rp_count);
+ __get_user(target_rp_pids, &target_rp->rp_pids);
+ unlock_user_struct(target_rp, target_arg, 0);
+ host.rp.rp_count = target_rp_count;
+ host.rp.rp_pids = g_try_new(struct procctl_reaper_pidinfo,
+ target_rp_count);
+
+ if (host.rp.rp_pids == NULL) {
+ error = -TARGET_ENOMEM;
+ } else {
+ data = &host.rp;
+ }
+ break;
+
+ case PROC_REAP_KILL:
+ error = t2h_reaper_kill(target_arg, &host.rk);
+ break;
+ }
+
+ if (error) {
+ return error;
+ }
+ error = get_errno(procctl(idtype, id, host_cmd, data));
+
+ if (error) {
+ return error;
+ }
+ switch (host_cmd) {
+ case PROC_SPROTECT:
+ if (put_user_s32(flags, target_arg)) {
+ return -TARGET_EFAULT;
+ }
+ break;
+
+ case PROC_REAP_STATUS:
+ error = h2t_reaper_status(&host.rs, target_arg);
+ break;
+
+ case PROC_REAP_GETPIDS:
+ /* copyout reaper pidinfo */
+ for (u = 0; u < target_rp_count; u++) {
+ error = h2t_procctl_reaper_pidinfo(&host.rp.rp_pids[u],
+ target_rp_pids +
+ (u * sizeof(struct target_procctl_reaper_pidinfo)));
+ if (error) {
+ break;
+ }
+ }
+ break;
+
+ case PROC_REAP_KILL:
+ error = h2t_reaper_kill(&host.rk, target_arg);
+ break;
+ }
+
+ return error;
+}
diff --git a/bsd-user/freebsd/os-proc.h b/bsd-user/freebsd/os-proc.h
new file mode 100644
index 0000000..d641878
--- /dev/null
+++ b/bsd-user/freebsd/os-proc.h
@@ -0,0 +1,293 @@
+/*
+ * process related system call shims and definitions
+ *
+ * Copyright (c) 2013-14 Stacey D. Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef BSD_USER_FREEBSD_OS_PROC_H
+#define BSD_USER_FREEBSD_OS_PROC_H
+
+#include <sys/param.h>
+#include <sys/procctl.h>
+#include <sys/signal.h>
+#include <sys/types.h>
+#include <sys/procdesc.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "target_arch_cpu.h"
+
+pid_t safe_wait4(pid_t wpid, int *status, int options, struct rusage *rusage);
+pid_t safe_wait6(idtype_t idtype, id_t id, int *status, int options,
+ struct __wrusage *wrusage, siginfo_t *infop);
+
+extern int __setugid(int flag);
+
+/* execve(2) */
+static inline abi_long do_freebsd_execve(abi_ulong path_or_fd, abi_ulong argp,
+ abi_ulong envp)
+{
+
+ return freebsd_exec_common(path_or_fd, argp, envp, 0);
+}
+
+/* fexecve(2) */
+static inline abi_long do_freebsd_fexecve(abi_ulong path_or_fd, abi_ulong argp,
+ abi_ulong envp)
+{
+
+ return freebsd_exec_common(path_or_fd, argp, envp, 1);
+}
+
+/* wait4(2) */
+static inline abi_long do_freebsd_wait4(abi_long arg1, abi_ulong target_status,
+ abi_long arg3, abi_ulong target_rusage)
+{
+ abi_long ret;
+ int status;
+ struct rusage rusage, *rusage_ptr = NULL;
+
+ if (target_rusage) {
+ rusage_ptr = &rusage;
+ }
+ ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
+
+ if (ret < 0) {
+ return ret;
+ }
+ if (target_status != 0) {
+ status = host_to_target_waitstatus(status);
+ if (put_user_s32(status, target_status) != 0) {
+ return -TARGET_EFAULT;
+ }
+ }
+ if (target_rusage != 0) {
+ host_to_target_rusage(target_rusage, &rusage);
+ }
+ return ret;
+}
+
+/* wait6(2) */
+static inline abi_long do_freebsd_wait6(void *cpu_env, abi_long idtype,
+ abi_long id1, abi_long id2,
+ abi_ulong target_status, abi_long options, abi_ulong target_wrusage,
+ abi_ulong target_infop, abi_ulong pad1)
+{
+ abi_long ret;
+ int status;
+ struct __wrusage wrusage, *wrusage_ptr = NULL;
+ siginfo_t info;
+ void *p;
+
+ if (regpairs_aligned(cpu_env) != 0) {
+ /* printf("shifting args\n"); */
+ /* 64-bit id is aligned, so shift all the arguments over by one */
+ id1 = id2;
+ id2 = target_status;
+ target_status = options;
+ options = target_wrusage;
+ target_wrusage = target_infop;
+ target_infop = pad1;
+ }
+
+ if (target_wrusage) {
+ wrusage_ptr = &wrusage;
+ }
+ ret = get_errno(safe_wait6(idtype, target_arg64(id1, id2),
+ &status, options, wrusage_ptr, &info));
+
+ if (ret < 0) {
+ return ret;
+ }
+ if (target_status != 0) {
+ status = host_to_target_waitstatus(status);
+ if (put_user_s32(status, target_status) != 0) {
+ return -TARGET_EFAULT;
+ }
+ }
+ if (target_wrusage != 0) {
+ host_to_target_wrusage(target_wrusage, &wrusage);
+ }
+ if (target_infop != 0) {
+ p = lock_user(VERIFY_WRITE, target_infop, sizeof(target_siginfo_t), 0);
+ if (p == NULL) {
+ return -TARGET_EFAULT;
+ }
+ host_to_target_siginfo(p, &info);
+ unlock_user(p, target_infop, sizeof(target_siginfo_t));
+ }
+ return ret;
+}
+
+/* setloginclass(2) */
+static inline abi_long do_freebsd_setloginclass(abi_ulong arg1)
+{
+ abi_long ret;
+ void *p;
+
+ p = lock_user_string(arg1);
+ if (p == NULL) {
+ return -TARGET_EFAULT;
+ }
+ ret = get_errno(setloginclass(p));
+ unlock_user(p, arg1, 0);
+
+ return ret;
+}
+
+/* getloginclass(2) */
+static inline abi_long do_freebsd_getloginclass(abi_ulong arg1, abi_ulong arg2)
+{
+ abi_long ret;
+ void *p;
+
+ p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
+ if (p == NULL) {
+ return -TARGET_EFAULT;
+ }
+ ret = get_errno(getloginclass(p, arg2));
+ unlock_user(p, arg1, arg2);
+
+ return ret;
+}
+
+/* pdgetpid(2) */
+static inline abi_long do_freebsd_pdgetpid(abi_long fd, abi_ulong target_pidp)
+{
+ abi_long ret;
+ pid_t pid;
+
+ ret = get_errno(pdgetpid(fd, &pid));
+ if (!is_error(ret)) {
+ if (put_user_u32(pid, target_pidp)) {
+ return -TARGET_EFAULT;
+ }
+ }
+ return ret;
+}
+
+/* undocumented __setugid */
+static inline abi_long do_freebsd___setugid(abi_long arg1)
+{
+ return -TARGET_ENOSYS;
+}
+
+/* fork(2) */
+static inline abi_long do_freebsd_fork(void *cpu_env)
+{
+ abi_long ret;
+ abi_ulong child_flag;
+
+ fork_start();
+ ret = fork();
+ if (ret == 0) {
+ /* child */
+ child_flag = 1;
+ target_cpu_clone_regs(cpu_env, 0);
+ } else {
+ /* parent */
+ child_flag = 0;
+ }
+
+ /*
+ * The fork system call sets a child flag in the second return
+ * value: 0 for parent process, 1 for child process.
+ */
+ set_second_rval(cpu_env, child_flag);
+
+ fork_end(child_flag);
+
+ return ret;
+}
+
+/* vfork(2) */
+static inline abi_long do_freebsd_vfork(void *cpu_env)
+{
+ return do_freebsd_fork(cpu_env);
+}
+
+/* rfork(2) */
+static inline abi_long do_freebsd_rfork(void *cpu_env, abi_long flags)
+{
+ abi_long ret;
+ abi_ulong child_flag;
+
+ /*
+ * XXX We need to handle RFMEM here, as well. Neither are safe to execute
+ * as-is on x86 hosts because they'll split memory but not the stack,
+ * wreaking havoc on host architectures that use the stack to store the
+ * return address as both threads try to pop it off. Rejecting RFSPAWN
+ * entirely for now is ok, the only consumer at the moment is posix_spawn
+ * and it will fall back to classic vfork(2) if we return EINVAL.
+ */
+ if ((flags & TARGET_RFSPAWN) != 0) {
+ return -TARGET_EINVAL;
+ }
+ fork_start();
+ ret = rfork(flags);
+ if (ret == 0) {
+ /* child */
+ child_flag = 1;
+ target_cpu_clone_regs(cpu_env, 0);
+ } else {
+ /* parent */
+ child_flag = 0;
+ }
+
+ /*
+ * The fork system call sets a child flag in the second return
+ * value: 0 for parent process, 1 for child process.
+ */
+ set_second_rval(cpu_env, child_flag);
+ fork_end(child_flag);
+
+ return ret;
+
+}
+
+/* pdfork(2) */
+static inline abi_long do_freebsd_pdfork(void *cpu_env, abi_ulong target_fdp,
+ abi_long flags)
+{
+ abi_long ret;
+ abi_ulong child_flag;
+ int fd;
+
+ fork_start();
+ ret = pdfork(&fd, flags);
+ if (ret == 0) {
+ /* child */
+ child_flag = 1;
+ target_cpu_clone_regs(cpu_env, 0);
+ } else {
+ /* parent */
+ child_flag = 0;
+ if (put_user_s32(fd, target_fdp)) {
+ return -TARGET_EFAULT;
+ }
+ }
+
+ /*
+ * The fork system call sets a child flag in the second return
+ * value: 0 for parent process, 1 for child process.
+ */
+ set_second_rval(cpu_env, child_flag);
+ fork_end(child_flag);
+
+ return ret;
+}
+
+#endif /* BSD_USER_FREEBSD_OS_PROC_H */
diff --git a/bsd-user/freebsd/os-syscall.c b/bsd-user/freebsd/os-syscall.c
index fa60df5..ca2f6fd 100644
--- a/bsd-user/freebsd/os-syscall.c
+++ b/bsd-user/freebsd/os-syscall.c
@@ -33,11 +33,15 @@
#include "signal-common.h"
#include "user/syscall-trace.h"
+/* BSD independent syscall shims */
#include "bsd-file.h"
+#include "bsd-mem.h"
#include "bsd-proc.h"
-/* *BSD dependent syscall shims */
+/* BSD dependent syscall shims */
#include "os-stat.h"
+#include "os-proc.h"
+#include "os-misc.h"
/* I/O */
safe_syscall3(int, open, const char *, path, int, flags, mode_t, mode);
@@ -58,9 +62,11 @@
safe_syscall4(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
off_t, offset);
-void target_set_brk(abi_ulong new_brk)
-{
-}
+/* used in os-proc */
+safe_syscall4(pid_t, wait4, pid_t, wpid, int *, status, int, options,
+ struct rusage *, rusage);
+safe_syscall6(pid_t, wait6, idtype_t, idtype, id_t, id, int *, status, int,
+ options, struct __wrusage *, wrusage, siginfo_t *, infop);
/*
* errno conversion.
@@ -219,10 +225,207 @@
/*
* process system calls
*/
+ case TARGET_FREEBSD_NR_fork: /* fork(2) */
+ ret = do_freebsd_fork(cpu_env);
+ break;
+
+ case TARGET_FREEBSD_NR_vfork: /* vfork(2) */
+ ret = do_freebsd_vfork(cpu_env);
+ break;
+
+ case TARGET_FREEBSD_NR_rfork: /* rfork(2) */
+ ret = do_freebsd_rfork(cpu_env, arg1);
+ break;
+
+ case TARGET_FREEBSD_NR_pdfork: /* pdfork(2) */
+ ret = do_freebsd_pdfork(cpu_env, arg1, arg2);
+ break;
+
+ case TARGET_FREEBSD_NR_execve: /* execve(2) */
+ ret = do_freebsd_execve(arg1, arg2, arg3);
+ break;
+
+ case TARGET_FREEBSD_NR_fexecve: /* fexecve(2) */
+ ret = do_freebsd_fexecve(arg1, arg2, arg3);
+ break;
+
+ case TARGET_FREEBSD_NR_wait4: /* wait4(2) */
+ ret = do_freebsd_wait4(arg1, arg2, arg3, arg4);
+ break;
+
+ case TARGET_FREEBSD_NR_wait6: /* wait6(2) */
+ ret = do_freebsd_wait6(cpu_env, arg1, arg2, arg3,
+ arg4, arg5, arg6, arg7, arg8);
+ break;
+
case TARGET_FREEBSD_NR_exit: /* exit(2) */
ret = do_bsd_exit(cpu_env, arg1);
break;
+ case TARGET_FREEBSD_NR_getgroups: /* getgroups(2) */
+ ret = do_bsd_getgroups(arg1, arg2);
+ break;
+
+ case TARGET_FREEBSD_NR_setgroups: /* setgroups(2) */
+ ret = do_bsd_setgroups(arg1, arg2);
+ break;
+
+ case TARGET_FREEBSD_NR_umask: /* umask(2) */
+ ret = do_bsd_umask(arg1);
+ break;
+
+ case TARGET_FREEBSD_NR_setlogin: /* setlogin(2) */
+ ret = do_bsd_setlogin(arg1);
+ break;
+
+ case TARGET_FREEBSD_NR_getlogin: /* getlogin(2) */
+ ret = do_bsd_getlogin(arg1, arg2);
+ break;
+
+ case TARGET_FREEBSD_NR_getrusage: /* getrusage(2) */
+ ret = do_bsd_getrusage(arg1, arg2);
+ break;
+
+ case TARGET_FREEBSD_NR_getrlimit: /* getrlimit(2) */
+ ret = do_bsd_getrlimit(arg1, arg2);
+ break;
+
+ case TARGET_FREEBSD_NR_setrlimit: /* setrlimit(2) */
+ ret = do_bsd_setrlimit(arg1, arg2);
+ break;
+
+ case TARGET_FREEBSD_NR_getpid: /* getpid(2) */
+ ret = do_bsd_getpid();
+ break;
+
+ case TARGET_FREEBSD_NR_getppid: /* getppid(2) */
+ ret = do_bsd_getppid();
+ break;
+
+ case TARGET_FREEBSD_NR_getuid: /* getuid(2) */
+ ret = do_bsd_getuid();
+ break;
+
+ case TARGET_FREEBSD_NR_geteuid: /* geteuid(2) */
+ ret = do_bsd_geteuid();
+ break;
+
+ case TARGET_FREEBSD_NR_getgid: /* getgid(2) */
+ ret = do_bsd_getgid();
+ break;
+
+ case TARGET_FREEBSD_NR_getegid: /* getegid(2) */
+ ret = do_bsd_getegid();
+ break;
+
+ case TARGET_FREEBSD_NR_setuid: /* setuid(2) */
+ ret = do_bsd_setuid(arg1);
+ break;
+
+ case TARGET_FREEBSD_NR_seteuid: /* seteuid(2) */
+ ret = do_bsd_seteuid(arg1);
+ break;
+
+ case TARGET_FREEBSD_NR_setgid: /* setgid(2) */
+ ret = do_bsd_setgid(arg1);
+ break;
+
+ case TARGET_FREEBSD_NR_setegid: /* setegid(2) */
+ ret = do_bsd_setegid(arg1);
+ break;
+
+ case TARGET_FREEBSD_NR_getpgrp: /* getpgrp(2) */
+ ret = do_bsd_getpgrp();
+ break;
+
+ case TARGET_FREEBSD_NR_getpgid: /* getpgid(2) */
+ ret = do_bsd_getpgid(arg1);
+ break;
+
+ case TARGET_FREEBSD_NR_setpgid: /* setpgid(2) */
+ ret = do_bsd_setpgid(arg1, arg2);
+ break;
+
+ case TARGET_FREEBSD_NR_setreuid: /* setreuid(2) */
+ ret = do_bsd_setreuid(arg1, arg2);
+ break;
+
+ case TARGET_FREEBSD_NR_setregid: /* setregid(2) */
+ ret = do_bsd_setregid(arg1, arg2);
+ break;
+
+ case TARGET_FREEBSD_NR_getresuid: /* getresuid(2) */
+ ret = do_bsd_getresuid(arg1, arg2, arg3);
+ break;
+
+ case TARGET_FREEBSD_NR_getresgid: /* getresgid(2) */
+ ret = do_bsd_getresgid(arg1, arg2, arg3);
+ break;
+
+ case TARGET_FREEBSD_NR_setresuid: /* setresuid(2) */
+ ret = do_bsd_setresuid(arg1, arg2, arg3);
+ break;
+
+ case TARGET_FREEBSD_NR_setresgid: /* setresgid(2) */
+ ret = do_bsd_setresgid(arg1, arg2, arg3);
+ break;
+
+ case TARGET_FREEBSD_NR_getsid: /* getsid(2) */
+ ret = do_bsd_getsid(arg1);
+ break;
+
+ case TARGET_FREEBSD_NR_setsid: /* setsid(2) */
+ ret = do_bsd_setsid();
+ break;
+
+ case TARGET_FREEBSD_NR_issetugid: /* issetugid(2) */
+ ret = do_bsd_issetugid();
+ break;
+
+ case TARGET_FREEBSD_NR_profil: /* profil(2) */
+ ret = do_bsd_profil(arg1, arg2, arg3, arg4);
+ break;
+
+ case TARGET_FREEBSD_NR_ktrace: /* ktrace(2) */
+ ret = do_bsd_ktrace(arg1, arg2, arg3, arg4);
+ break;
+
+ case TARGET_FREEBSD_NR_setloginclass: /* setloginclass(2) */
+ ret = do_freebsd_setloginclass(arg1);
+ break;
+
+ case TARGET_FREEBSD_NR_getloginclass: /* getloginclass(2) */
+ ret = do_freebsd_getloginclass(arg1, arg2);
+ break;
+
+ case TARGET_FREEBSD_NR_pdgetpid: /* pdgetpid(2) */
+ ret = do_freebsd_pdgetpid(arg1, arg2);
+ break;
+
+ case TARGET_FREEBSD_NR___setugid: /* undocumented */
+ ret = do_freebsd___setugid(arg1);
+ break;
+
+ case TARGET_FREEBSD_NR_utrace: /* utrace(2) */
+ ret = do_bsd_utrace(arg1, arg2);
+ break;
+
+ case TARGET_FREEBSD_NR_ptrace: /* ptrace(2) */
+ ret = do_bsd_ptrace(arg1, arg2, arg3, arg4);
+ break;
+
+ case TARGET_FREEBSD_NR_getpriority: /* getpriority(2) */
+ ret = do_bsd_getpriority(arg1, arg2);
+ break;
+
+ case TARGET_FREEBSD_NR_setpriority: /* setpriority(2) */
+ ret = do_bsd_setpriority(arg1, arg2, arg3);
+ break;
+
+ case TARGET_FREEBSD_NR_procctl: /* procctl(2) */
+ ret = do_freebsd_procctl(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
+ break;
+
/*
* File system calls.
*/
@@ -592,6 +795,108 @@
ret = do_freebsd_fcntl(arg1, arg2, arg3);
break;
+ /*
+ * Memory management system calls.
+ */
+ case TARGET_FREEBSD_NR_mmap: /* mmap(2) */
+ ret = do_bsd_mmap(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6, arg7,
+ arg8);
+ break;
+
+ case TARGET_FREEBSD_NR_munmap: /* munmap(2) */
+ ret = do_bsd_munmap(arg1, arg2);
+ break;
+
+ case TARGET_FREEBSD_NR_mprotect: /* mprotect(2) */
+ ret = do_bsd_mprotect(arg1, arg2, arg3);
+ break;
+
+ case TARGET_FREEBSD_NR_msync: /* msync(2) */
+ ret = do_bsd_msync(arg1, arg2, arg3);
+ break;
+
+ case TARGET_FREEBSD_NR_mlock: /* mlock(2) */
+ ret = do_bsd_mlock(arg1, arg2);
+ break;
+
+ case TARGET_FREEBSD_NR_munlock: /* munlock(2) */
+ ret = do_bsd_munlock(arg1, arg2);
+ break;
+
+ case TARGET_FREEBSD_NR_mlockall: /* mlockall(2) */
+ ret = do_bsd_mlockall(arg1);
+ break;
+
+ case TARGET_FREEBSD_NR_munlockall: /* munlockall(2) */
+ ret = do_bsd_munlockall();
+ break;
+
+ case TARGET_FREEBSD_NR_madvise: /* madvise(2) */
+ ret = do_bsd_madvise(arg1, arg2, arg3);
+ break;
+
+ case TARGET_FREEBSD_NR_minherit: /* minherit(2) */
+ ret = do_bsd_minherit(arg1, arg2, arg3);
+ break;
+
+ case TARGET_FREEBSD_NR_mincore: /* mincore(2) */
+ ret = do_bsd_mincore(arg1, arg2, arg3);
+ break;
+
+ case TARGET_FREEBSD_NR_freebsd12_shm_open: /* shm_open(2) */
+ ret = do_bsd_shm_open(arg1, arg2, arg3);
+ break;
+
+#if defined(__FreeBSD_version) && __FreeBSD_version >= 1300048
+ case TARGET_FREEBSD_NR_shm_open2: /* shm_open2(2) */
+ ret = do_freebsd_shm_open2(arg1, arg2, arg3, arg4, arg5);
+ break;
+#endif
+
+#if defined(__FreeBSD_version) && __FreeBSD_version >= 1300049
+ case TARGET_FREEBSD_NR_shm_rename: /* shm_rename(2) */
+ ret = do_freebsd_shm_rename(arg1, arg2, arg3);
+ break;
+#endif
+
+ case TARGET_FREEBSD_NR_shm_unlink: /* shm_unlink(2) */
+ ret = do_bsd_shm_unlink(arg1);
+ break;
+
+ case TARGET_FREEBSD_NR_shmget: /* shmget(2) */
+ ret = do_bsd_shmget(arg1, arg2, arg3);
+ break;
+
+ case TARGET_FREEBSD_NR_shmctl: /* shmctl(2) */
+ ret = do_bsd_shmctl(arg1, arg2, arg3);
+ break;
+
+ case TARGET_FREEBSD_NR_shmat: /* shmat(2) */
+ ret = do_bsd_shmat(arg1, arg2, arg3);
+ break;
+
+ case TARGET_FREEBSD_NR_shmdt: /* shmdt(2) */
+ ret = do_bsd_shmdt(arg1);
+ break;
+
+ case TARGET_FREEBSD_NR_freebsd11_vadvise:
+ ret = do_bsd_vadvise();
+ break;
+
+ case TARGET_FREEBSD_NR_sbrk:
+ ret = do_bsd_sbrk();
+ break;
+
+ case TARGET_FREEBSD_NR_sstk:
+ ret = do_bsd_sstk();
+ break;
+
+ /*
+ * Misc
+ */
+ case TARGET_FREEBSD_NR_break:
+ ret = do_obreak(arg1);
+ break;
/*
* sys{ctl, arch, call}
diff --git a/bsd-user/main.c b/bsd-user/main.c
index f913cb5..c402fad 100644
--- a/bsd-user/main.c
+++ b/bsd-user/main.c
@@ -36,7 +36,7 @@
#include "qemu/help_option.h"
#include "qemu/module.h"
#include "exec/exec-all.h"
-#include "tcg/tcg.h"
+#include "tcg/startup.h"
#include "qemu/timer.h"
#include "qemu/envlist.h"
#include "qemu/cutils.h"
@@ -88,7 +88,7 @@
unsigned long reserved_va;
#endif
-static const char *interp_prefix = CONFIG_QEMU_INTERP_PREFIX;
+const char *interp_prefix = CONFIG_QEMU_INTERP_PREFIX;
const char *qemu_uname_release;
char qemu_proc_pathname[PATH_MAX]; /* full path to exeutable */
@@ -462,7 +462,7 @@
ac->init_machine(NULL);
}
cpu = cpu_create(cpu_type);
- env = cpu->env_ptr;
+ env = cpu_env(cpu);
cpu_reset(cpu);
thread_cpu = cpu;
@@ -586,7 +586,7 @@
* generating the prologue until now so that the prologue can take
* the real value of GUEST_BASE into account.
*/
- tcg_prologue_init(tcg_ctx);
+ tcg_prologue_init();
target_cpu_init(env, regs);
diff --git a/bsd-user/meson.build b/bsd-user/meson.build
index 5243122..c6bfd3b 100644
--- a/bsd-user/meson.build
+++ b/bsd-user/meson.build
@@ -7,6 +7,8 @@
common_user_inc += include_directories('include')
bsd_user_ss.add(files(
+ 'bsd-mem.c',
+ 'bsd-proc.c',
'bsdload.c',
'elfload.c',
'main.c',
@@ -16,6 +18,11 @@
'uaccess.c',
))
+elf = cc.find_library('elf', required: true)
+procstat = cc.find_library('procstat', required: true)
+kvm = cc.find_library('kvm', required: true)
+bsd_user_ss.add(elf, procstat, kvm)
+
# Pull in the OS-specific build glue, if any
subdir(targetos)
diff --git a/bsd-user/mmap.c b/bsd-user/mmap.c
index 8e148a2..3ef11b2 100644
--- a/bsd-user/mmap.c
+++ b/bsd-user/mmap.c
@@ -636,7 +636,7 @@
return -1;
}
-static void mmap_reserve(abi_ulong start, abi_ulong size)
+void mmap_reserve(abi_ulong start, abi_ulong size)
{
abi_ulong real_start;
abi_ulong real_end;
diff --git a/bsd-user/qemu-bsd.h b/bsd-user/qemu-bsd.h
new file mode 100644
index 0000000..ffc64bb
--- /dev/null
+++ b/bsd-user/qemu-bsd.h
@@ -0,0 +1,58 @@
+/*
+ * BSD conversion extern declarations
+ *
+ * Copyright (c) 2013 Stacey D. Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef QEMU_BSD_H
+#define QEMU_BSD_H
+
+#include <sys/types.h>
+#include <sys/resource.h>
+#include <sys/ipc.h>
+#include <sys/msg.h>
+#include <sys/resource.h>
+#include <sys/sem.h>
+#include <sys/shm.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <sys/uuid.h>
+#include <sys/wait.h>
+#include <netinet/in.h>
+
+/* bsd-proc.c */
+int target_to_host_resource(int code);
+rlim_t target_to_host_rlim(abi_llong target_rlim);
+abi_llong host_to_target_rlim(rlim_t rlim);
+abi_long host_to_target_rusage(abi_ulong target_addr,
+ const struct rusage *rusage);
+abi_long host_to_target_wrusage(abi_ulong target_addr,
+ const struct __wrusage *wrusage);
+int host_to_target_waitstatus(int status);
+void h2g_rusage(const struct rusage *rusage,
+ struct target_freebsd_rusage *target_rusage);
+
+/* bsd-mem.c */
+void target_to_host_ipc_perm__locked(struct ipc_perm *host_ip,
+ struct target_ipc_perm *target_ip);
+void host_to_target_ipc_perm__locked(struct target_ipc_perm *target_ip,
+ struct ipc_perm *host_ip);
+abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
+ abi_ulong target_addr);
+abi_long host_to_target_shmid_ds(abi_ulong target_addr,
+ struct shmid_ds *host_sd);
+
+#endif /* QEMU_BSD_H */
diff --git a/bsd-user/qemu.h b/bsd-user/qemu.h
index d950713..dc842ff 100644
--- a/bsd-user/qemu.h
+++ b/bsd-user/qemu.h
@@ -111,6 +111,7 @@
} __attribute__((aligned(16))) TaskState;
void stop_all_tasks(void);
+extern const char *interp_prefix;
extern const char *qemu_uname_release;
/*
@@ -232,6 +233,7 @@
int target_msync(abi_ulong start, abi_ulong len, int flags);
extern abi_ulong mmap_next_start;
abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size);
+void mmap_reserve(abi_ulong start, abi_ulong size);
void TSA_NO_TSA mmap_fork_start(void);
void TSA_NO_TSA mmap_fork_end(int child);
@@ -249,6 +251,12 @@
bool is_error(abi_long ret);
int host_to_target_errno(int err);
+/* os-proc.c */
+abi_long freebsd_exec_common(abi_ulong path_or_fd, abi_ulong guest_argp,
+ abi_ulong guest_envp, int do_fexec);
+abi_long do_freebsd_procctl(void *cpu_env, int idtype, abi_ulong arg2,
+ abi_ulong arg3, abi_ulong arg4, abi_ulong arg5, abi_ulong arg6);
+
/* os-sys.c */
abi_long do_freebsd_sysctl(CPUArchState *env, abi_ulong namep, int32_t namelen,
abi_ulong oldp, abi_ulong oldlenp, abi_ulong newp, abi_ulong newlen);
diff --git a/bsd-user/signal-common.h b/bsd-user/signal-common.h
index c044e81..77d7c7a 100644
--- a/bsd-user/signal-common.h
+++ b/bsd-user/signal-common.h
@@ -35,6 +35,7 @@
abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp);
long do_sigreturn(CPUArchState *env, abi_ulong addr);
void force_sig_fault(int sig, int code, abi_ulong addr);
+void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info);
int host_to_target_signal(int sig);
void host_to_target_sigset(target_sigset_t *d, const sigset_t *s);
void process_pending_signals(CPUArchState *env);
diff --git a/bsd-user/signal.c b/bsd-user/signal.c
index b6beab6..ca31470 100644
--- a/bsd-user/signal.c
+++ b/bsd-user/signal.c
@@ -311,6 +311,12 @@
}
}
+void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
+{
+ host_to_target_siginfo_noswap(tinfo, info);
+ tswap_siginfo(tinfo, tinfo);
+}
+
int block_signals(void)
{
TaskState *ts = (TaskState *)thread_cpu->opaque;
@@ -351,8 +357,8 @@
static G_NORETURN
void dump_core_and_abort(int target_sig)
{
- CPUArchState *env = thread_cpu->env_ptr;
- CPUState *cpu = env_cpu(env);
+ CPUState *cpu = thread_cpu;
+ CPUArchState *env = cpu_env(cpu);
TaskState *ts = cpu->opaque;
int core_dumped = 0;
int host_sig;
@@ -457,7 +463,7 @@
void force_sig_fault(int sig, int code, abi_ulong addr)
{
CPUState *cpu = thread_cpu;
- CPUArchState *env = cpu->env_ptr;
+ CPUArchState *env = cpu_env(cpu);
target_siginfo_t info = {};
info.si_signo = sig;
@@ -469,8 +475,7 @@
static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
{
- CPUArchState *env = thread_cpu->env_ptr;
- CPUState *cpu = env_cpu(env);
+ CPUState *cpu = thread_cpu;
TaskState *ts = cpu->opaque;
target_siginfo_t tinfo;
ucontext_t *uc = puc;
@@ -848,11 +853,6 @@
act.sa_flags = SA_SIGINFO;
for (i = 1; i <= TARGET_NSIG; i++) {
-#ifdef CONFIG_GPROF
- if (i == TARGET_SIGPROF) {
- continue;
- }
-#endif
host_sig = target_to_host_signal(i);
sigaction(host_sig, NULL, &oact);
if (oact.sa_sigaction == (void *)SIG_IGN) {
diff --git a/bsd-user/syscall_defs.h b/bsd-user/syscall_defs.h
index 9c90616..52f84d5 100644
--- a/bsd-user/syscall_defs.h
+++ b/bsd-user/syscall_defs.h
@@ -56,8 +56,47 @@
};
/*
+ * sys/ipc.h
+ */
+struct target_ipc_perm {
+ uint32_t cuid; /* creator user id */
+ uint32_t cgid; /* creator group id */
+ uint32_t uid; /* user id */
+ uint32_t gid; /* group id */
+ uint16_t mode; /* r/w permission */
+ uint16_t seq; /* sequence # */
+ abi_long key; /* user specified msg/sem/shm key */
+};
+
+#define TARGET_IPC_RMID 0 /* remove identifier */
+#define TARGET_IPC_SET 1 /* set options */
+#define TARGET_IPC_STAT 2 /* get options */
+
+/*
+ * sys/shm.h
+ */
+struct target_shmid_ds {
+ struct target_ipc_perm shm_perm; /* peration permission structure */
+ abi_ulong shm_segsz; /* size of segment in bytes */
+ int32_t shm_lpid; /* process ID of last shared memory op */
+ int32_t shm_cpid; /* process ID of creator */
+ int32_t shm_nattch; /* number of current attaches */
+ target_time_t shm_atime; /* time of last shmat() */
+ target_time_t shm_dtime; /* time of last shmdt() */
+ target_time_t shm_ctime; /* time of last change by shmctl() */
+};
+
+#define N_BSD_SHM_REGIONS 32
+struct bsd_shm_regions {
+ abi_long start;
+ abi_long size;
+};
+
+/*
* sys/mman.h
*/
+#define TARGET_MADV_DONTNEED 4 /* dont need these pages */
+
#define TARGET_FREEBSD_MAP_RESERVED0080 0x0080 /* previously misimplemented */
/* MAP_INHERIT */
#define TARGET_FREEBSD_MAP_RESERVED0100 0x0100 /* previously unimplemented */
@@ -130,11 +169,7 @@
/*
* sys/resource.h
*/
-#if defined(__FreeBSD__)
#define TARGET_RLIM_INFINITY RLIM_INFINITY
-#else
-#define TARGET_RLIM_INFINITY ((abi_ulong)-1)
-#endif
#define TARGET_RLIMIT_CPU 0
#define TARGET_RLIMIT_FSIZE 1
@@ -390,6 +425,52 @@
int32_t l_sysid;
} QEMU_PACKED;
+/* sys/unistd.h */
+/* user: vfork(2) semantics, clear signals */
+#define TARGET_RFSPAWN (1U << 31)
+
+/*
+ * from sys/procctl.h
+ */
+#define TARGET_PROC_SPROTECT 1
+#define TARGET_PROC_REAP_ACQUIRE 2
+#define TARGET_PROC_REAP_RELEASE 3
+#define TARGET_PROC_REAP_STATUS 4
+#define TARGET_PROC_REAP_GETPIDS 5
+#define TARGET_PROC_REAP_KILL 6
+
+struct target_procctl_reaper_status {
+ uint32_t rs_flags;
+ uint32_t rs_children;
+ uint32_t rs_descendants;
+ uint32_t rs_reaper;
+ uint32_t rs_pid;
+ uint32_t rs_pad0[15];
+};
+
+struct target_procctl_reaper_pidinfo {
+ uint32_t pi_pid;
+ uint32_t pi_subtree;
+ uint32_t pi_flags;
+ uint32_t pi_pad0[15];
+};
+
+struct target_procctl_reaper_pids {
+ uint32_t rp_count;
+ uint32_t rp_pad0[15];
+ abi_ulong rp_pids;
+};
+
+struct target_procctl_reaper_kill {
+ int32_t rk_sig;
+ uint32_t rk_flags;
+ uint32_t rk_subtree;
+ uint32_t rk_killed;
+ uint32_t rk_fpid;
+ uint32_t rk_pad0[15];
+};
+
+
#define safe_syscall0(type, name) \
type safe_##name(void) \
{ \
diff --git a/chardev/char-pty.c b/chardev/char-pty.c
index 4e5deac..cc2f761 100644
--- a/chardev/char-pty.c
+++ b/chardev/char-pty.c
@@ -106,11 +106,27 @@
static int char_pty_chr_write(Chardev *chr, const uint8_t *buf, int len)
{
PtyChardev *s = PTY_CHARDEV(chr);
+ GPollFD pfd;
+ int rc;
- if (!s->connected) {
- return len;
+ if (s->connected) {
+ return io_channel_send(s->ioc, buf, len);
}
- return io_channel_send(s->ioc, buf, len);
+
+ /*
+ * The other side might already be re-connected, but the timer might
+ * not have fired yet. So let's check here whether we can write again:
+ */
+ pfd.fd = QIO_CHANNEL_FILE(s->ioc)->fd;
+ pfd.events = G_IO_OUT;
+ pfd.revents = 0;
+ rc = RETRY_ON_EINTR(g_poll(&pfd, 1, 0));
+ g_assert(rc >= 0);
+ if (!(pfd.revents & G_IO_HUP) && (pfd.revents & G_IO_OUT)) {
+ io_channel_send(s->ioc, buf, len);
+ }
+
+ return len;
}
static GSource *pty_chr_add_watch(Chardev *chr, GIOCondition cond)
diff --git a/cpus-common.c b/cpu-common.c
similarity index 100%
rename from cpus-common.c
rename to cpu-common.c
diff --git a/cpu.c b/cpu-target.c
similarity index 97%
rename from cpu.c
rename to cpu-target.c
index 0769b0b..658d179 100644
--- a/cpu.c
+++ b/cpu-target.c
@@ -136,15 +136,10 @@
/* cache the cpu class for the hotpath */
cpu->cc = CPU_GET_CLASS(cpu);
- if (!accel_cpu_realizefn(cpu, errp)) {
+ if (!accel_cpu_common_realize(cpu, errp)) {
return;
}
- /* NB: errp parameter is unused currently */
- if (tcg_enabled()) {
- tcg_exec_realizefn(cpu, errp);
- }
-
/* Wait until cpu initialization complete before exposing cpu. */
cpu_list_add(cpu);
@@ -187,11 +182,9 @@
cpu_list_remove(cpu);
/*
* Now that the vCPU has been removed from the RCU list, we can call
- * tcg_exec_unrealizefn, which may free fields using call_rcu.
+ * accel_cpu_common_unrealize, which may free fields using call_rcu.
*/
- if (tcg_enabled()) {
- tcg_exec_unrealizefn(cpu);
- }
+ accel_cpu_common_unrealize(cpu);
}
/*
diff --git a/crypto/cipher-gnutls.c.inc b/crypto/cipher-gnutls.c.inc
index 501e4e0..d3e231c 100644
--- a/crypto/cipher-gnutls.c.inc
+++ b/crypto/cipher-gnutls.c.inc
@@ -113,7 +113,7 @@
while (len) {
gnutls_cipher_hd_t handle;
gnutls_datum_t gkey = { (unsigned char *)ctx->key, ctx->nkey };
- int err = gnutls_cipher_init(&handle, ctx->galg, &gkey, NULL);
+ err = gnutls_cipher_init(&handle, ctx->galg, &gkey, NULL);
if (err != 0) {
error_setg(errp, "Cannot initialize cipher: %s",
gnutls_strerror(err));
@@ -174,7 +174,7 @@
while (len) {
gnutls_cipher_hd_t handle;
gnutls_datum_t gkey = { (unsigned char *)ctx->key, ctx->nkey };
- int err = gnutls_cipher_init(&handle, ctx->galg, &gkey, NULL);
+ err = gnutls_cipher_init(&handle, ctx->galg, &gkey, NULL);
if (err != 0) {
error_setg(errp, "Cannot initialize cipher: %s",
gnutls_strerror(err));
diff --git a/crypto/meson.build b/crypto/meson.build
index 9ac1a89..c46f9c2 100644
--- a/crypto/meson.build
+++ b/crypto/meson.build
@@ -46,7 +46,8 @@
if have_afalg
crypto_ss.add(if_true: files('afalg.c', 'cipher-afalg.c', 'hash-afalg.c'))
endif
-crypto_ss.add(when: gnutls, if_true: files('tls-cipher-suites.c'))
+
+system_ss.add(when: gnutls, if_true: files('tls-cipher-suites.c'))
util_ss.add(files(
'aes.c',
diff --git a/crypto/tls-cipher-suites.c b/crypto/tls-cipher-suites.c
index 5e4f597..d0df4ba 100644
--- a/crypto/tls-cipher-suites.c
+++ b/crypto/tls-cipher-suites.c
@@ -52,7 +52,6 @@
byte_array = g_byte_array_new();
for (i = 0;; i++) {
- int ret;
unsigned idx;
const char *name;
IANA_TLS_CIPHER cipher;
diff --git a/disas/m68k.c b/disas/m68k.c
index aefaecf..1f16e29 100644
--- a/disas/m68k.c
+++ b/disas/m68k.c
@@ -1632,10 +1632,10 @@
case '2':
case '3':
{
- int val = fetch_arg (buffer, place, 5, info);
+ int reg = fetch_arg (buffer, place, 5, info);
const char *name = 0;
- switch (val)
+ switch (reg)
{
case 2: name = "%tt0"; break;
case 3: name = "%tt1"; break;
@@ -1655,12 +1655,12 @@
int break_reg = ((buffer[3] >> 2) & 7);
(*info->fprintf_func)
- (info->stream, val == 0x1c ? "%%bad%d" : "%%bac%d",
+ (info->stream, reg == 0x1c ? "%%bad%d" : "%%bac%d",
break_reg);
}
break;
default:
- (*info->fprintf_func) (info->stream, "<mmu register %d>", val);
+ (*info->fprintf_func) (info->stream, "<mmu register %d>", reg);
}
if (name)
(*info->fprintf_func) (info->stream, "%s", name);
diff --git a/docs/about/deprecated.rst b/docs/about/deprecated.rst
index dc4da95..3b074b9 100644
--- a/docs/about/deprecated.rst
+++ b/docs/about/deprecated.rst
@@ -20,38 +20,14 @@
What follows is a list of all features currently marked as
deprecated.
-Build options
--------------
-
-``gprof`` builds (since 8.0)
-''''''''''''''''''''''''''''
-
-The ``--enable-gprof`` configure setting relies on compiler
-instrumentation to gather its data which can distort the generated
-profile. As other non-instrumenting tools are available that give a
-more holistic view of the system with non-instrumented binaries we are
-deprecating the build option and no longer defend it in CI. The
-``--enable-gcov`` build option remains for analysis test case
-coverage.
-
System emulator command line arguments
--------------------------------------
-``QEMU_AUDIO_`` environment variables and ``-audio-help`` (since 4.0)
-'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
-
-The ``-audiodev`` argument is now the preferred way to specify audio
-backend settings instead of environment variables. To ease migration to
-the new format, the ``-audiodev-help`` option can be used to convert
-the current values of the environment variables to ``-audiodev`` options.
-
-Creating sound card devices and vnc without ``audiodev=`` property (since 4.2)
-''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
+Creating sound card devices without ``audiodev=`` property (since 4.2)
+''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
When not using the deprecated legacy audio config, each sound card
-should specify an ``audiodev=`` property. Additionally, when using
-vnc, you should specify an ``audiodev=`` property if you plan to
-transmit audio through the VNC protocol.
+should specify an ``audiodev=`` property.
Short-form boolean options (since 6.0)
''''''''''''''''''''''''''''''''''''''
@@ -277,14 +253,6 @@
better reflects the way this property affects all random data within
the device tree blob, not just the ``kaslr-seed`` node.
-``pc-i440fx-1.4`` up to ``pc-i440fx-1.7`` (since 7.0)
-'''''''''''''''''''''''''''''''''''''''''''''''''''''
-
-These old machine types are quite neglected nowadays and thus might have
-various pitfalls with regards to live migration. Use a newer machine type
-instead.
-
-
Backend options
---------------
diff --git a/docs/about/removed-features.rst b/docs/about/removed-features.rst
index c2043fd..e83ed08 100644
--- a/docs/about/removed-features.rst
+++ b/docs/about/removed-features.rst
@@ -436,6 +436,18 @@
option which lets the password be securely provided on the command
line using a ``secret`` object instance.
+``QEMU_AUDIO_`` environment variables and ``-audio-help`` (removed in 8.2)
+''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
+
+The ``-audiodev`` and ``-audio`` command line options are now the only
+way to specify audio backend settings.
+
+Creating vnc without ``audiodev=`` property (removed in 8.2)
+''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
+
+When using vnc, you should specify an ``audiodev=`` property if
+you plan to transmit audio through the VNC protocol.
+
QEMU Machine Protocol (QMP) commands
------------------------------------
@@ -715,8 +727,8 @@
This machine has been renamed ``fuloong2e``.
-``pc-0.10`` up to ``pc-1.3`` (removed in 4.0 up to 6.0)
-'''''''''''''''''''''''''''''''''''''''''''''''''''''''
+``pc-0.10`` up to ``pc-i440fx-1.7`` (removed in 4.0 up to 8.2)
+''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
These machine types were very old and likely could not be used for live
migration from old QEMU versions anymore. Use a newer machine type instead.
diff --git a/gdbstub/gdbstub.c b/gdbstub/gdbstub.c
index 349d348..8eea214 100644
--- a/gdbstub/gdbstub.c
+++ b/gdbstub/gdbstub.c
@@ -423,7 +423,7 @@
static int gdb_read_register(CPUState *cpu, GByteArray *buf, int reg)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
- CPUArchState *env = cpu->env_ptr;
+ CPUArchState *env = cpu_env(cpu);
GDBRegisterState *r;
if (reg < cc->gdb_num_core_regs) {
@@ -441,7 +441,7 @@
static int gdb_write_register(CPUState *cpu, uint8_t *mem_buf, int reg)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
- CPUArchState *env = cpu->env_ptr;
+ CPUArchState *env = cpu_env(cpu);
GDBRegisterState *r;
if (reg < cc->gdb_num_core_regs) {
diff --git a/gdbstub/meson.build b/gdbstub/meson.build
index 9500b9d..a5a1f4e 100644
--- a/gdbstub/meson.build
+++ b/gdbstub/meson.build
@@ -21,12 +21,12 @@
gdb_user_ss.sources() + genh,
name_suffix: 'fa',
c_args: '-DCONFIG_USER_ONLY',
- build_by_default: have_user)
+ build_by_default: false)
libgdb_softmmu = static_library('gdb_softmmu',
gdb_system_ss.sources() + genh,
name_suffix: 'fa',
- build_by_default: have_system)
+ build_by_default: false)
gdb_user = declare_dependency(link_whole: libgdb_user)
user_ss.add(gdb_user)
diff --git a/gdbstub/user-target.c b/gdbstub/user-target.c
index 6e21c31..c4bba4c 100644
--- a/gdbstub/user-target.c
+++ b/gdbstub/user-target.c
@@ -310,7 +310,7 @@
uint64_t mode = get_param(params, 2)->val_ull;
#ifdef CONFIG_LINUX
- int fd = do_guest_openat(gdbserver_state.g_cpu->env_ptr, 0, filename,
+ int fd = do_guest_openat(cpu_env(gdbserver_state.g_cpu), 0, filename,
flags, mode, false);
#else
int fd = open(filename, flags, mode);
diff --git a/hw/acpi/cpu_hotplug.c b/hw/acpi/cpu_hotplug.c
index ff14c3f..634bbec 100644
--- a/hw/acpi/cpu_hotplug.c
+++ b/hw/acpi/cpu_hotplug.c
@@ -265,26 +265,27 @@
/* build Processor object for each processor */
for (i = 0; i < apic_ids->len; i++) {
- int apic_id = apic_ids->cpus[i].arch_id;
+ int cpu_apic_id = apic_ids->cpus[i].arch_id;
- assert(apic_id < ACPI_CPU_HOTPLUG_ID_LIMIT);
+ assert(cpu_apic_id < ACPI_CPU_HOTPLUG_ID_LIMIT);
- dev = aml_processor(i, 0, 0, "CP%.02X", apic_id);
+ dev = aml_processor(i, 0, 0, "CP%.02X", cpu_apic_id);
method = aml_method("_MAT", 0, AML_NOTSERIALIZED);
aml_append(method,
- aml_return(aml_call2(CPU_MAT_METHOD, aml_int(apic_id), aml_int(i))
+ aml_return(aml_call2(CPU_MAT_METHOD,
+ aml_int(cpu_apic_id), aml_int(i))
));
aml_append(dev, method);
method = aml_method("_STA", 0, AML_NOTSERIALIZED);
aml_append(method,
- aml_return(aml_call1(CPU_STATUS_METHOD, aml_int(apic_id))));
+ aml_return(aml_call1(CPU_STATUS_METHOD, aml_int(cpu_apic_id))));
aml_append(dev, method);
method = aml_method("_EJ0", 1, AML_NOTSERIALIZED);
aml_append(method,
- aml_return(aml_call2(CPU_EJECT_METHOD, aml_int(apic_id),
+ aml_return(aml_call2(CPU_EJECT_METHOD, aml_int(cpu_apic_id),
aml_arg(0)))
);
aml_append(dev, method);
@@ -298,11 +299,11 @@
/* Arg0 = APIC ID */
method = aml_method(AML_NOTIFY_METHOD, 2, AML_NOTSERIALIZED);
for (i = 0; i < apic_ids->len; i++) {
- int apic_id = apic_ids->cpus[i].arch_id;
+ int cpu_apic_id = apic_ids->cpus[i].arch_id;
- if_ctx = aml_if(aml_equal(aml_arg(0), aml_int(apic_id)));
+ if_ctx = aml_if(aml_equal(aml_arg(0), aml_int(cpu_apic_id)));
aml_append(if_ctx,
- aml_notify(aml_name("CP%.02X", apic_id), aml_arg(1))
+ aml_notify(aml_name("CP%.02X", cpu_apic_id), aml_arg(1))
);
aml_append(method, if_ctx);
}
@@ -319,13 +320,13 @@
aml_varpackage(x86ms->apic_id_limit);
for (i = 0, apic_idx = 0; i < apic_ids->len; i++) {
- int apic_id = apic_ids->cpus[i].arch_id;
+ int cpu_apic_id = apic_ids->cpus[i].arch_id;
- for (; apic_idx < apic_id; apic_idx++) {
+ for (; apic_idx < cpu_apic_id; apic_idx++) {
aml_append(pkg, aml_int(0));
}
aml_append(pkg, aml_int(apic_ids->cpus[i].cpu ? 1 : 0));
- apic_idx = apic_id + 1;
+ apic_idx = cpu_apic_id + 1;
}
aml_append(sb_scope, aml_name_decl(CPU_ON_BITMAP, pkg));
aml_append(ctx, sb_scope);
diff --git a/hw/arm/allwinner-r40.c b/hw/arm/allwinner-r40.c
index 7d29eb2..a0d367c 100644
--- a/hw/arm/allwinner-r40.c
+++ b/hw/arm/allwinner-r40.c
@@ -296,10 +296,9 @@
{
const char *r40_nic_models[] = { "gmac", "emac", NULL };
AwR40State *s = AW_R40(dev);
- unsigned i;
/* CPUs */
- for (i = 0; i < AW_R40_NUM_CPUS; i++) {
+ for (unsigned i = 0; i < AW_R40_NUM_CPUS; i++) {
/*
* Disable secondary CPUs. Guest EL3 firmware will start
@@ -335,7 +334,7 @@
* maintenance interrupt signal to the appropriate GIC PPI inputs,
* and the GIC's IRQ/FIQ/VIRQ/VFIQ interrupt outputs to the CPU's inputs.
*/
- for (i = 0; i < AW_R40_NUM_CPUS; i++) {
+ for (unsigned i = 0; i < AW_R40_NUM_CPUS; i++) {
DeviceState *cpudev = DEVICE(&s->cpus[i]);
int ppibase = AW_R40_GIC_NUM_SPI + i * GIC_INTERNAL + GIC_NR_SGIS;
int irq;
@@ -494,7 +493,7 @@
qdev_get_gpio_in(DEVICE(&s->gic), AW_R40_GIC_SPI_EMAC));
/* Unimplemented devices */
- for (i = 0; i < ARRAY_SIZE(r40_unimplemented); i++) {
+ for (unsigned i = 0; i < ARRAY_SIZE(r40_unimplemented); i++) {
create_unimplemented_device(r40_unimplemented[i].device_name,
r40_unimplemented[i].base,
r40_unimplemented[i].size);
diff --git a/hw/arm/armsse.c b/hw/arm/armsse.c
index 11cd08b..31acbf7 100644
--- a/hw/arm/armsse.c
+++ b/hw/arm/armsse.c
@@ -1468,7 +1468,6 @@
if (info->has_cachectrl) {
for (i = 0; i < info->num_cpus; i++) {
char *name = g_strdup_printf("cachectrl%d", i);
- MemoryRegion *mr;
qdev_prop_set_string(DEVICE(&s->cachectrl[i]), "name", name);
g_free(name);
@@ -1484,7 +1483,6 @@
if (info->has_cpusecctrl) {
for (i = 0; i < info->num_cpus; i++) {
char *name = g_strdup_printf("CPUSECCTRL%d", i);
- MemoryRegion *mr;
qdev_prop_set_string(DEVICE(&s->cpusecctrl[i]), "name", name);
g_free(name);
@@ -1499,7 +1497,6 @@
}
if (info->has_cpuid) {
for (i = 0; i < info->num_cpus; i++) {
- MemoryRegion *mr;
qdev_prop_set_uint32(DEVICE(&s->cpuid[i]), "CPUID", i);
if (!sysbus_realize(SYS_BUS_DEVICE(&s->cpuid[i]), errp)) {
@@ -1512,7 +1509,6 @@
}
if (info->has_cpu_pwrctrl) {
for (i = 0; i < info->num_cpus; i++) {
- MemoryRegion *mr;
if (!sysbus_realize(SYS_BUS_DEVICE(&s->cpu_pwrctrl[i]), errp)) {
return;
@@ -1605,7 +1601,7 @@
/* Wire up the splitters for the MPC IRQs */
for (i = 0; i < IOTS_NUM_EXP_MPC + info->sram_banks; i++) {
SplitIRQ *splitter = &s->mpc_irq_splitter[i];
- DeviceState *dev_splitter = DEVICE(splitter);
+ DeviceState *devs = DEVICE(splitter);
if (!object_property_set_int(OBJECT(splitter), "num-lines", 2,
errp)) {
@@ -1617,22 +1613,22 @@
if (i < IOTS_NUM_EXP_MPC) {
/* Splitter input is from GPIO input line */
- s->mpcexp_status_in[i] = qdev_get_gpio_in(dev_splitter, 0);
- qdev_connect_gpio_out(dev_splitter, 0,
+ s->mpcexp_status_in[i] = qdev_get_gpio_in(devs, 0);
+ qdev_connect_gpio_out(devs, 0,
qdev_get_gpio_in_named(dev_secctl,
"mpcexp_status", i));
} else {
/* Splitter input is from our own MPC */
qdev_connect_gpio_out_named(DEVICE(&s->mpc[i - IOTS_NUM_EXP_MPC]),
"irq", 0,
- qdev_get_gpio_in(dev_splitter, 0));
- qdev_connect_gpio_out(dev_splitter, 0,
+ qdev_get_gpio_in(devs, 0));
+ qdev_connect_gpio_out(devs, 0,
qdev_get_gpio_in_named(dev_secctl,
"mpc_status",
i - IOTS_NUM_EXP_MPC));
}
- qdev_connect_gpio_out(dev_splitter, 1,
+ qdev_connect_gpio_out(devs, 1,
qdev_get_gpio_in(DEVICE(&s->mpc_irq_orgate), i));
}
/* Create GPIO inputs which will pass the line state for our
diff --git a/hw/arm/armv7m.c b/hw/arm/armv7m.c
index bf173b1..1f78e18 100644
--- a/hw/arm/armv7m.c
+++ b/hw/arm/armv7m.c
@@ -517,7 +517,7 @@
for (i = 0; i < ARRAY_SIZE(s->bitband); i++) {
if (s->enable_bitband) {
Object *obj = OBJECT(&s->bitband[i]);
- SysBusDevice *sbd = SYS_BUS_DEVICE(&s->bitband[i]);
+ sbd = SYS_BUS_DEVICE(&s->bitband[i]);
if (!object_property_set_int(obj, "base",
bitband_input_addr[i], errp)) {
diff --git a/hw/arm/aspeed_ast2600.c b/hw/arm/aspeed_ast2600.c
index a8b3a80..e122e1c 100644
--- a/hw/arm/aspeed_ast2600.c
+++ b/hw/arm/aspeed_ast2600.c
@@ -388,7 +388,7 @@
aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->timerctrl), 0,
sc->memmap[ASPEED_DEV_TIMER1]);
for (i = 0; i < ASPEED_TIMER_NR_TIMERS; i++) {
- qemu_irq irq = aspeed_soc_get_irq(s, ASPEED_DEV_TIMER1 + i);
+ irq = aspeed_soc_get_irq(s, ASPEED_DEV_TIMER1 + i);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->timerctrl), i, irq);
}
@@ -413,8 +413,8 @@
}
aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->i2c), 0, sc->memmap[ASPEED_DEV_I2C]);
for (i = 0; i < ASPEED_I2C_GET_CLASS(&s->i2c)->num_busses; i++) {
- qemu_irq irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore),
- sc->irqmap[ASPEED_DEV_I2C] + i);
+ irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore),
+ sc->irqmap[ASPEED_DEV_I2C] + i);
/* The AST2600 I2C controller has one IRQ per bus. */
sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c.busses[i]), 0, irq);
}
@@ -611,8 +611,8 @@
}
aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->i3c), 0, sc->memmap[ASPEED_DEV_I3C]);
for (i = 0; i < ASPEED_I3C_NR_DEVICES; i++) {
- qemu_irq irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore),
- sc->irqmap[ASPEED_DEV_I3C] + i);
+ irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore),
+ sc->irqmap[ASPEED_DEV_I3C] + i);
/* The AST2600 I3C controller has one IRQ per bus. */
sysbus_connect_irq(SYS_BUS_DEVICE(&s->i3c.devices[i]), 0, irq);
}
diff --git a/hw/arm/integratorcp.c b/hw/arm/integratorcp.c
index b109ece..d176e9a 100644
--- a/hw/arm/integratorcp.c
+++ b/hw/arm/integratorcp.c
@@ -27,6 +27,7 @@
#include "hw/irq.h"
#include "hw/sd/sd.h"
#include "qom/object.h"
+#include "audio/audio.h"
#define TYPE_INTEGRATOR_CM "integrator_core"
OBJECT_DECLARE_SIMPLE_TYPE(IntegratorCMState, INTEGRATOR_CM)
@@ -660,7 +661,13 @@
&error_fatal);
}
- sysbus_create_varargs("pl041", 0x1d000000, pic[25], NULL);
+ dev = qdev_new("pl041");
+ if (machine->audiodev) {
+ qdev_prop_set_string(dev, "audiodev", machine->audiodev);
+ }
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0x1d000000);
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[25]);
if (nd_table[0].used)
smc91c111_init(&nd_table[0], 0xc8000000, pic[27]);
@@ -678,6 +685,8 @@
mc->ignore_memory_transaction_failures = true;
mc->default_cpu_type = ARM_CPU_TYPE_NAME("arm926");
mc->default_ram_id = "integrator.ram";
+
+ machine_add_audiodev_property(mc);
}
DEFINE_MACHINE("integratorcp", integratorcp_machine_init)
diff --git a/hw/arm/musicpal.c b/hw/arm/musicpal.c
index dc4e43e..9703bfb 100644
--- a/hw/arm/musicpal.c
+++ b/hw/arm/musicpal.c
@@ -37,9 +37,9 @@
#include "qemu/cutils.h"
#include "qom/object.h"
#include "hw/net/mv88w8618_eth.h"
+#include "audio/audio.h"
#include "qemu/error-report.h"
-
#define MP_MISC_BASE 0x80002000
#define MP_MISC_SIZE 0x00001000
@@ -1326,7 +1326,12 @@
qdev_connect_gpio_out(key_dev, i, qdev_get_gpio_in(dev, i + 15));
}
- wm8750_dev = i2c_slave_create_simple(i2c, TYPE_WM8750, MP_WM_ADDR);
+ wm8750_dev = i2c_slave_new(TYPE_WM8750, MP_WM_ADDR);
+ if (machine->audiodev) {
+ qdev_prop_set_string(DEVICE(wm8750_dev), "audiodev", machine->audiodev);
+ }
+ i2c_slave_realize_and_unref(wm8750_dev, i2c, &error_abort);
+
dev = qdev_new(TYPE_MV88W8618_AUDIO);
s = SYS_BUS_DEVICE(dev);
object_property_set_link(OBJECT(dev), "wm8750", OBJECT(wm8750_dev),
@@ -1347,6 +1352,8 @@
mc->default_cpu_type = ARM_CPU_TYPE_NAME("arm926");
mc->default_ram_size = MP_RAM_DEFAULT_SIZE;
mc->default_ram_id = "musicpal.ram";
+
+ machine_add_audiodev_property(mc);
}
DEFINE_MACHINE("musicpal", musicpal_machine_init)
diff --git a/hw/arm/nseries.c b/hw/arm/nseries.c
index 9e49e9e..35aff46 100644
--- a/hw/arm/nseries.c
+++ b/hw/arm/nseries.c
@@ -1432,6 +1432,8 @@
/* Actually two chips of 0x4000000 bytes each */
mc->default_ram_size = 0x08000000;
mc->default_ram_id = "omap2.dram";
+
+ machine_add_audiodev_property(mc);
}
static const TypeInfo n800_type = {
@@ -1452,6 +1454,8 @@
/* Actually two chips of 0x4000000 bytes each */
mc->default_ram_size = 0x08000000;
mc->default_ram_id = "omap2.dram";
+
+ machine_add_audiodev_property(mc);
}
static const TypeInfo n810_type = {
diff --git a/hw/arm/omap2.c b/hw/arm/omap2.c
index d5a2ae7..f170728 100644
--- a/hw/arm/omap2.c
+++ b/hw/arm/omap2.c
@@ -37,6 +37,7 @@
#include "hw/block/flash.h"
#include "hw/arm/soc_dma.h"
#include "hw/sysbus.h"
+#include "hw/boards.h"
#include "audio/audio.h"
/* Enhanced Audio Controller (CODEC only) */
@@ -609,7 +610,11 @@
s->codec.txdrq = *drq;
omap_eac_reset(s);
- AUD_register_card("OMAP EAC", &s->codec.card);
+ if (current_machine->audiodev) {
+ s->codec.card.name = g_strdup(current_machine->audiodev);
+ s->codec.card.state = audio_state_by_name(s->codec.card.name, &error_fatal);
+ }
+ AUD_register_card("OMAP EAC", &s->codec.card, &error_fatal);
memory_region_init_io(&s->iomem, NULL, &omap_eac_ops, s, "omap.eac",
omap_l4_region_size(ta, 0));
diff --git a/hw/arm/palm.c b/hw/arm/palm.c
index 17c11ac..b86f2c3 100644
--- a/hw/arm/palm.c
+++ b/hw/arm/palm.c
@@ -310,6 +310,8 @@
mc->default_cpu_type = ARM_CPU_TYPE_NAME("ti925t");
mc->default_ram_size = 0x02000000;
mc->default_ram_id = "omap1.dram";
+
+ machine_add_audiodev_property(mc);
}
DEFINE_MACHINE("cheetah", palmte_machine_init)
diff --git a/hw/arm/realview.c b/hw/arm/realview.c
index a5aa2f0..8f89526 100644
--- a/hw/arm/realview.c
+++ b/hw/arm/realview.c
@@ -29,6 +29,7 @@
#include "hw/irq.h"
#include "hw/i2c/arm_sbcon_i2c.h"
#include "hw/sd/sd.h"
+#include "audio/audio.h"
#define SMP_BOOT_ADDR 0xe0000000
#define SMP_BOOTREG_ADDR 0x10000030
@@ -207,6 +208,9 @@
pl041 = qdev_new("pl041");
qdev_prop_set_uint32(pl041, "nc_fifo_depth", 512);
+ if (machine->audiodev) {
+ qdev_prop_set_string(pl041, "audiodev", machine->audiodev);
+ }
sysbus_realize_and_unref(SYS_BUS_DEVICE(pl041), &error_fatal);
sysbus_mmio_map(SYS_BUS_DEVICE(pl041), 0, 0x10004000);
sysbus_connect_irq(SYS_BUS_DEVICE(pl041), 0, pic[19]);
@@ -412,6 +416,8 @@
mc->block_default_type = IF_SCSI;
mc->ignore_memory_transaction_failures = true;
mc->default_cpu_type = ARM_CPU_TYPE_NAME("arm926");
+
+ machine_add_audiodev_property(mc);
}
static const TypeInfo realview_eb_type = {
@@ -430,6 +436,8 @@
mc->max_cpus = 4;
mc->ignore_memory_transaction_failures = true;
mc->default_cpu_type = ARM_CPU_TYPE_NAME("arm11mpcore");
+
+ machine_add_audiodev_property(mc);
}
static const TypeInfo realview_eb_mpcore_type = {
@@ -446,6 +454,8 @@
mc->init = realview_pb_a8_init;
mc->ignore_memory_transaction_failures = true;
mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a8");
+
+ machine_add_audiodev_property(mc);
}
static const TypeInfo realview_pb_a8_type = {
@@ -463,6 +473,8 @@
mc->max_cpus = 4;
mc->ignore_memory_transaction_failures = true;
mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a9");
+
+ machine_add_audiodev_property(mc);
}
static const TypeInfo realview_pbx_a9_type = {
diff --git a/hw/arm/smmuv3-internal.h b/hw/arm/smmuv3-internal.h
index 6d1c1ed..648c2e3 100644
--- a/hw/arm/smmuv3-internal.h
+++ b/hw/arm/smmuv3-internal.h
@@ -328,12 +328,9 @@
#define CMD_TTL(x) extract32((x)->word[2], 8 , 2)
#define CMD_TG(x) extract32((x)->word[2], 10, 2)
#define CMD_STE_RANGE(x) extract32((x)->word[2], 0 , 5)
-#define CMD_ADDR(x) ({ \
- uint64_t high = (uint64_t)(x)->word[3]; \
- uint64_t low = extract32((x)->word[2], 12, 20); \
- uint64_t addr = high << 32 | (low << 12); \
- addr; \
- })
+#define CMD_ADDR(x) \
+ (((uint64_t)((x)->word[3]) << 32) | \
+ ((extract64((x)->word[2], 12, 20)) << 12))
#define SMMU_FEATURE_2LVL_STE (1 << 0)
@@ -533,21 +530,13 @@
#define STE_S2S(x) extract32((x)->word[5], 25, 1)
#define STE_S2R(x) extract32((x)->word[5], 26, 1)
-#define STE_CTXPTR(x) \
- ({ \
- unsigned long addr; \
- addr = (uint64_t)extract32((x)->word[1], 0, 16) << 32; \
- addr |= (uint64_t)((x)->word[0] & 0xffffffc0); \
- addr; \
- })
+#define STE_CTXPTR(x) \
+ ((extract64((x)->word[1], 0, 16) << 32) | \
+ ((x)->word[0] & 0xffffffc0))
-#define STE_S2TTB(x) \
- ({ \
- unsigned long addr; \
- addr = (uint64_t)extract32((x)->word[7], 0, 16) << 32; \
- addr |= (uint64_t)((x)->word[6] & 0xfffffff0); \
- addr; \
- })
+#define STE_S2TTB(x) \
+ ((extract64((x)->word[7], 0, 16) << 32) | \
+ ((x)->word[6] & 0xfffffff0))
static inline int oas2bits(int oas_field)
{
@@ -585,14 +574,10 @@
#define CD_VALID(x) extract32((x)->word[0], 31, 1)
#define CD_ASID(x) extract32((x)->word[1], 16, 16)
-#define CD_TTB(x, sel) \
- ({ \
- uint64_t hi, lo; \
- hi = extract32((x)->word[(sel) * 2 + 3], 0, 19); \
- hi <<= 32; \
- lo = (x)->word[(sel) * 2 + 2] & ~0xfULL; \
- hi | lo; \
- })
+#define CD_TTB(x, sel) \
+ ((extract64((x)->word[(sel) * 2 + 3], 0, 19) << 32) | \
+ ((x)->word[(sel) * 2 + 2] & ~0xfULL))
+
#define CD_HAD(x, sel) extract32((x)->word[(sel) * 2 + 2], 1, 1)
#define CD_TSZ(x, sel) extract32((x)->word[0], (16 * (sel)) + 0, 6)
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
index 1e9be8e..6f2b2bd 100644
--- a/hw/arm/smmuv3.c
+++ b/hw/arm/smmuv3.c
@@ -1040,8 +1040,8 @@
SMMUv3State *s = sdev->smmu;
if (!tg) {
- SMMUEventInfo event = {.inval_ste_allowed = true};
- SMMUTransCfg *cfg = smmuv3_get_config(sdev, &event);
+ SMMUEventInfo eventinfo = {.inval_ste_allowed = true};
+ SMMUTransCfg *cfg = smmuv3_get_config(sdev, &eventinfo);
SMMUTransTableInfo *tt;
if (!cfg) {
diff --git a/hw/arm/spitz.c b/hw/arm/spitz.c
index f732fe0..cc268c6 100644
--- a/hw/arm/spitz.c
+++ b/hw/arm/spitz.c
@@ -35,6 +35,7 @@
#include "exec/address-spaces.h"
#include "cpu.h"
#include "qom/object.h"
+#include "audio/audio.h"
enum spitz_model_e { spitz, akita, borzoi, terrier };
@@ -774,15 +775,19 @@
i2c_slave_set_address(wm, SPITZ_WM_ADDRL);
}
-static void spitz_i2c_setup(PXA2xxState *cpu)
+static void spitz_i2c_setup(MachineState *machine, PXA2xxState *cpu)
{
/* Attach the CPU on one end of our I2C bus. */
I2CBus *bus = pxa2xx_i2c_bus(cpu->i2c[0]);
- DeviceState *wm;
-
/* Attach a WM8750 to the bus */
- wm = DEVICE(i2c_slave_create_simple(bus, TYPE_WM8750, 0));
+ I2CSlave *i2c_dev = i2c_slave_new(TYPE_WM8750, 0);
+ DeviceState *wm = DEVICE(i2c_dev);
+
+ if (machine->audiodev) {
+ qdev_prop_set_string(wm, "audiodev", machine->audiodev);
+ }
+ i2c_slave_realize_and_unref(i2c_dev, bus, &error_abort);
spitz_wm8750_addr(wm, 0, 0);
qdev_connect_gpio_out(cpu->gpio, SPITZ_GPIO_WM,
@@ -1013,7 +1018,7 @@
spitz_gpio_setup(mpu, (model == akita) ? 1 : 2);
- spitz_i2c_setup(mpu);
+ spitz_i2c_setup(machine, mpu);
if (model == akita)
spitz_akita_i2c_setup(mpu);
@@ -1037,6 +1042,8 @@
mc->block_default_type = IF_IDE;
mc->ignore_memory_transaction_failures = true;
mc->init = spitz_common_init;
+
+ machine_add_audiodev_property(mc);
}
static const TypeInfo spitz_common_info = {
diff --git a/hw/arm/versatilepb.c b/hw/arm/versatilepb.c
index 05b9462..2f22dc8 100644
--- a/hw/arm/versatilepb.c
+++ b/hw/arm/versatilepb.c
@@ -26,6 +26,7 @@
#include "hw/char/pl011.h"
#include "hw/sd/sd.h"
#include "qom/object.h"
+#include "audio/audio.h"
#define VERSATILE_FLASH_ADDR 0x34000000
#define VERSATILE_FLASH_SIZE (64 * 1024 * 1024)
@@ -343,6 +344,9 @@
/* Add PL041 AACI Interface to the LM4549 codec */
pl041 = qdev_new("pl041");
qdev_prop_set_uint32(pl041, "nc_fifo_depth", 512);
+ if (machine->audiodev) {
+ qdev_prop_set_string(pl041, "audiodev", machine->audiodev);
+ }
sysbus_realize_and_unref(SYS_BUS_DEVICE(pl041), &error_fatal);
sysbus_mmio_map(SYS_BUS_DEVICE(pl041), 0, 0x10004000);
sysbus_connect_irq(SYS_BUS_DEVICE(pl041), 0, sic[24]);
@@ -416,6 +420,8 @@
mc->ignore_memory_transaction_failures = true;
mc->default_cpu_type = ARM_CPU_TYPE_NAME("arm926");
mc->default_ram_id = "versatile.ram";
+
+ machine_add_audiodev_property(mc);
}
static const TypeInfo versatilepb_type = {
@@ -434,6 +440,8 @@
mc->ignore_memory_transaction_failures = true;
mc->default_cpu_type = ARM_CPU_TYPE_NAME("arm926");
mc->default_ram_id = "versatile.ram";
+
+ machine_add_audiodev_property(mc);
}
static const TypeInfo versatileab_type = {
diff --git a/hw/arm/vexpress.c b/hw/arm/vexpress.c
index 56abadd..8ff37f5 100644
--- a/hw/arm/vexpress.c
+++ b/hw/arm/vexpress.c
@@ -44,6 +44,7 @@
#include "hw/i2c/arm_sbcon_i2c.h"
#include "hw/sd/sd.h"
#include "qom/object.h"
+#include "audio/audio.h"
#define VEXPRESS_BOARD_ID 0x8e0
#define VEXPRESS_FLASH_SIZE (64 * 1024 * 1024)
@@ -613,6 +614,9 @@
pl041 = qdev_new("pl041");
qdev_prop_set_uint32(pl041, "nc_fifo_depth", 512);
+ if (machine->audiodev) {
+ qdev_prop_set_string(pl041, "audiodev", machine->audiodev);
+ }
sysbus_realize_and_unref(SYS_BUS_DEVICE(pl041), &error_fatal);
sysbus_mmio_map(SYS_BUS_DEVICE(pl041), 0, map[VE_PL041]);
sysbus_connect_irq(SYS_BUS_DEVICE(pl041), 0, pic[11]);
@@ -776,6 +780,7 @@
mc->ignore_memory_transaction_failures = true;
mc->default_ram_id = "vexpress.highmem";
+ machine_add_audiodev_property(mc);
object_class_property_add_bool(oc, "secure", vexpress_get_secure,
vexpress_set_secure);
object_class_property_set_description(oc, "secure",
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 8ad78b2..15e7424 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -801,7 +801,6 @@
for (i = 0; i < smp_cpus; i++) {
DeviceState *cpudev = DEVICE(qemu_get_cpu(i));
int ppibase = NUM_IRQS + i * GIC_INTERNAL + GIC_NR_SGIS;
- int irq;
/* Mapping from the output timer irq lines from the CPU to the
* GIC PPI inputs we use for the virt board.
*/
@@ -812,7 +811,7 @@
[GTIMER_SEC] = ARCH_TIMER_S_EL1_IRQ,
};
- for (irq = 0; irq < ARRAY_SIZE(timer_irq); irq++) {
+ for (unsigned irq = 0; irq < ARRAY_SIZE(timer_irq); irq++) {
qdev_connect_gpio_out(cpudev, irq,
qdev_get_gpio_in(vms->gic,
ppibase + timer_irq[irq]));
diff --git a/hw/arm/xlnx-zcu102.c b/hw/arm/xlnx-zcu102.c
index 21483f7..c5a07cf 100644
--- a/hw/arm/xlnx-zcu102.c
+++ b/hw/arm/xlnx-zcu102.c
@@ -24,6 +24,7 @@
#include "sysemu/device_tree.h"
#include "qom/object.h"
#include "net/can_emu.h"
+#include "audio/audio.h"
struct XlnxZCU102 {
MachineState parent_obj;
@@ -143,6 +144,10 @@
object_initialize_child(OBJECT(machine), "soc", &s->soc, TYPE_XLNX_ZYNQMP);
+ if (machine->audiodev) {
+ qdev_prop_set_string(DEVICE(&s->soc.dp), "audiodev", machine->audiodev);
+ }
+
object_property_set_link(OBJECT(&s->soc), "ddr-ram", OBJECT(machine->ram),
&error_abort);
object_property_set_bool(OBJECT(&s->soc), "secure", s->secure,
@@ -275,6 +280,7 @@
mc->default_cpus = XLNX_ZYNQMP_NUM_APU_CPUS;
mc->default_ram_id = "ddr-ram";
+ machine_add_audiodev_property(mc);
object_class_property_add_bool(oc, "secure", zcu102_get_secure,
zcu102_set_secure);
object_class_property_set_description(oc, "secure",
diff --git a/hw/arm/z2.c b/hw/arm/z2.c
index dc25304..d9a08fa 100644
--- a/hw/arm/z2.c
+++ b/hw/arm/z2.c
@@ -27,6 +27,7 @@
#include "exec/address-spaces.h"
#include "cpu.h"
#include "qom/object.h"
+#include "qapi/error.h"
#ifdef DEBUG_Z2
#define DPRINTF(fmt, ...) \
@@ -307,6 +308,7 @@
void *z2_lcd;
I2CBus *bus;
DeviceState *wm;
+ I2CSlave *i2c_dev;
/* Setup CPU & memory */
mpu = pxa270_init(z2_binfo.ram_size, machine->cpu_type);
@@ -328,8 +330,17 @@
type_register_static(&aer915_info);
z2_lcd = ssi_create_peripheral(mpu->ssp[1], TYPE_ZIPIT_LCD);
bus = pxa2xx_i2c_bus(mpu->i2c[0]);
+
i2c_slave_create_simple(bus, TYPE_AER915, 0x55);
- wm = DEVICE(i2c_slave_create_simple(bus, TYPE_WM8750, 0x1b));
+
+ i2c_dev = i2c_slave_new(TYPE_WM8750, 0x1b);
+ wm = DEVICE(i2c_dev);
+
+ if (machine->audiodev) {
+ qdev_prop_set_string(wm, "audiodev", machine->audiodev);
+ }
+ i2c_slave_realize_and_unref(i2c_dev, bus, &error_abort);
+
mpu->i2s->opaque = wm;
mpu->i2s->codec_out = wm8750_dac_dat;
mpu->i2s->codec_in = wm8750_adc_dat;
@@ -348,6 +359,8 @@
mc->init = z2_init;
mc->ignore_memory_transaction_failures = true;
mc->default_cpu_type = ARM_CPU_TYPE_NAME("pxa270-c5");
+
+ machine_add_audiodev_property(mc);
}
DEFINE_MACHINE("z2", z2_machine_init)
diff --git a/hw/audio/ac97.c b/hw/audio/ac97.c
index c2a5ce0..6a7a2dc 100644
--- a/hw/audio/ac97.c
+++ b/hw/audio/ac97.c
@@ -1273,6 +1273,10 @@
AC97LinkState *s = AC97(dev);
uint8_t *c = s->dev.config;
+ if (!AUD_register_card ("ac97", &s->card, errp)) {
+ return;
+ }
+
/* TODO: no need to override */
c[PCI_COMMAND] = 0x00; /* pcicmd pci command rw, ro */
c[PCI_COMMAND + 1] = 0x00;
@@ -1306,7 +1310,7 @@
"ac97-nabm", 256);
pci_register_bar(&s->dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->io_nam);
pci_register_bar(&s->dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &s->io_nabm);
- AUD_register_card("ac97", &s->card);
+
ac97_on_reset(DEVICE(s));
}
diff --git a/hw/audio/adlib.c b/hw/audio/adlib.c
index 5f979b1..bd73806 100644
--- a/hw/audio/adlib.c
+++ b/hw/audio/adlib.c
@@ -255,6 +255,10 @@
AdlibState *s = ADLIB(dev);
struct audsettings as;
+ if (!AUD_register_card ("adlib", &s->card, errp)) {
+ return;
+ }
+
s->opl = OPLCreate (3579545, s->freq);
if (!s->opl) {
error_setg (errp, "OPLCreate %d failed", s->freq);
@@ -270,8 +274,6 @@
as.fmt = AUDIO_FORMAT_S16;
as.endianness = AUDIO_HOST_ENDIANNESS;
- AUD_register_card ("adlib", &s->card);
-
s->voice = AUD_open_out (
&s->card,
s->voice,
diff --git a/hw/audio/cs4231a.c b/hw/audio/cs4231a.c
index 5c6d643..3aa1057 100644
--- a/hw/audio/cs4231a.c
+++ b/hw/audio/cs4231a.c
@@ -678,13 +678,15 @@
return;
}
+ if (!AUD_register_card ("cs4231a", &s->card, errp)) {
+ return;
+ }
+
s->pic = isa_bus_get_irq(bus, s->irq);
k = ISADMA_GET_CLASS(s->isa_dma);
k->register_channel(s->isa_dma, s->dma, cs_dma_read, s);
isa_register_ioport (d, &s->ioports, s->port);
-
- AUD_register_card ("cs4231a", &s->card);
}
static Property cs4231a_properties[] = {
diff --git a/hw/audio/es1370.c b/hw/audio/es1370.c
index 4f738a0..90f73d4 100644
--- a/hw/audio/es1370.c
+++ b/hw/audio/es1370.c
@@ -853,6 +853,10 @@
ES1370State *s = ES1370(dev);
uint8_t *c = s->dev.config;
+ if (!AUD_register_card ("es1370", &s->card, errp)) {
+ return;
+ }
+
c[PCI_STATUS + 1] = PCI_STATUS_DEVSEL_SLOW >> 8;
#if 0
@@ -868,7 +872,6 @@
memory_region_init_io (&s->io, OBJECT(s), &es1370_io_ops, s, "es1370", 256);
pci_register_bar (&s->dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->io);
- AUD_register_card ("es1370", &s->card);
es1370_reset (s);
}
diff --git a/hw/audio/gus.c b/hw/audio/gus.c
index 787345c..6c2b586 100644
--- a/hw/audio/gus.c
+++ b/hw/audio/gus.c
@@ -241,14 +241,16 @@
IsaDmaClass *k;
struct audsettings as;
+ if (!AUD_register_card ("gus", &s->card, errp)) {
+ return;
+ }
+
s->isa_dma = isa_bus_get_dma(bus, s->emu.gusdma);
if (!s->isa_dma) {
error_setg(errp, "ISA controller does not support DMA");
return;
}
- AUD_register_card ("gus", &s->card);
-
as.freq = s->freq;
as.nchannels = 2;
as.fmt = AUDIO_FORMAT_S16;
diff --git a/hw/audio/hda-codec.c b/hw/audio/hda-codec.c
index a26048c..b9ad1f4 100644
--- a/hw/audio/hda-codec.c
+++ b/hw/audio/hda-codec.c
@@ -685,11 +685,14 @@
const desc_param *param;
uint32_t i, type;
+ if (!AUD_register_card("hda", &a->card, errp)) {
+ return;
+ }
+
a->desc = desc;
a->name = object_get_typename(OBJECT(a));
dprint(a, 1, "%s: cad %d\n", __func__, a->hda.cad);
- AUD_register_card("hda", &a->card);
for (i = 0; i < a->desc->nnodes; i++) {
node = a->desc->nodes + i;
param = hda_codec_find_param(node, AC_PAR_AUDIO_WIDGET_CAP);
diff --git a/hw/audio/lm4549.c b/hw/audio/lm4549.c
index 418041b..e7bfcc4 100644
--- a/hw/audio/lm4549.c
+++ b/hw/audio/lm4549.c
@@ -281,6 +281,11 @@
{
struct audsettings as;
+ /* Register an audio card */
+ if (!AUD_register_card("lm4549", &s->card, errp)) {
+ return;
+ }
+
/* Store the callback and opaque pointer */
s->data_req_cb = data_req_cb;
s->opaque = opaque;
@@ -288,9 +293,6 @@
/* Init the registers */
lm4549_reset(s);
- /* Register an audio card */
- AUD_register_card("lm4549", &s->card);
-
/* Open a default voice */
as.freq = 48000;
as.nchannels = 2;
diff --git a/hw/audio/pcspk.c b/hw/audio/pcspk.c
index daf92a4..fe7f07c 100644
--- a/hw/audio/pcspk.c
+++ b/hw/audio/pcspk.c
@@ -123,8 +123,6 @@
return 0;
}
- AUD_register_card(s_spk, &s->card);
-
s->voice = AUD_open_out(&s->card, s->voice, s_spk, s, pcspk_callback, &as);
if (!s->voice) {
AUD_log(s_spk, "Could not open voice\n");
@@ -191,7 +189,7 @@
isa_register_ioport(isadev, &s->ioport, s->iobase);
- if (s->card.state) {
+ if (s->card.state && AUD_register_card(s_spk, &s->card, errp)) {
pcspk_audio_init(s);
}
diff --git a/hw/audio/sb16.c b/hw/audio/sb16.c
index 535cccc..18f6d25 100644
--- a/hw/audio/sb16.c
+++ b/hw/audio/sb16.c
@@ -1402,6 +1402,10 @@
SB16State *s = SB16 (dev);
IsaDmaClass *k;
+ if (!AUD_register_card ("sb16", &s->card, errp)) {
+ return;
+ }
+
s->isa_hdma = isa_bus_get_dma(bus, s->hdma);
s->isa_dma = isa_bus_get_dma(bus, s->dma);
if (!s->isa_dma || !s->isa_hdma) {
@@ -1434,8 +1438,6 @@
k->register_channel(s->isa_dma, s->dma, SB_read_DMA, s);
s->can_write = 1;
-
- AUD_register_card ("sb16", &s->card);
}
static Property sb16_properties[] = {
diff --git a/hw/audio/via-ac97.c b/hw/audio/via-ac97.c
index 676254b..30095a4 100644
--- a/hw/audio/via-ac97.c
+++ b/hw/audio/via-ac97.c
@@ -426,6 +426,10 @@
ViaAC97State *s = VIA_AC97(pci_dev);
Object *o = OBJECT(s);
+ if (!AUD_register_card ("via-ac97", &s->card, errp)) {
+ return;
+ }
+
/*
* Command register Bus Master bit is documented to be fixed at 0 but it's
* needed for PCI DMA to work in QEMU. The pegasos2 firmware writes 0 here
@@ -445,8 +449,6 @@
pci_register_bar(pci_dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &s->fm);
memory_region_init_io(&s->midi, o, &midi_ops, s, "via-ac97.midi", 4);
pci_register_bar(pci_dev, 2, PCI_BASE_ADDRESS_SPACE_IO, &s->midi);
-
- AUD_register_card ("via-ac97", &s->card);
}
static void via_ac97_exit(PCIDevice *dev)
diff --git a/hw/audio/wm8750.c b/hw/audio/wm8750.c
index b5722b3..57954a63 100644
--- a/hw/audio/wm8750.c
+++ b/hw/audio/wm8750.c
@@ -624,7 +624,10 @@
{
WM8750State *s = WM8750(dev);
- AUD_register_card(CODEC, &s->card);
+ if (!AUD_register_card(CODEC, &s->card, errp)) {
+ return;
+ }
+
wm8750_reset(I2C_SLAVE(s));
}
diff --git a/hw/block/xen-block.c b/hw/block/xen-block.c
index 3906b90..a07cd7e 100644
--- a/hw/block/xen-block.c
+++ b/hw/block/xen-block.c
@@ -369,7 +369,7 @@
case XEN_BLOCK_VDEV_TYPE_XVD:
case XEN_BLOCK_VDEV_TYPE_HD:
case XEN_BLOCK_VDEV_TYPE_SD: {
- char *name = disk_to_vbd_name(vdev->disk);
+ char *vbd_name = disk_to_vbd_name(vdev->disk);
str = g_strdup_printf("%s%s%lu",
(vdev->type == XEN_BLOCK_VDEV_TYPE_XVD) ?
@@ -377,8 +377,8 @@
(vdev->type == XEN_BLOCK_VDEV_TYPE_HD) ?
"hd" :
"sd",
- name, vdev->partition);
- g_free(name);
+ vbd_name, vdev->partition);
+ g_free(vbd_name);
break;
}
default:
diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c
index ced66c2..4d40699 100644
--- a/hw/core/cpu-common.c
+++ b/hw/core/cpu-common.c
@@ -86,7 +86,7 @@
qatomic_set(&cpu->exit_request, 1);
/* Ensure cpu_exec will see the exit request after TCG has exited. */
smp_wmb();
- qatomic_set(&cpu->icount_decr_ptr->u16.high, -1);
+ qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
}
static int cpu_common_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg)
@@ -130,8 +130,8 @@
cpu->halted = cpu->start_powered_off;
cpu->mem_io_pc = 0;
cpu->icount_extra = 0;
- qatomic_set(&cpu->icount_decr_ptr->u32, 0);
- cpu->can_do_io = 1;
+ qatomic_set(&cpu->neg.icount_decr.u32, 0);
+ cpu->neg.can_do_io = true;
cpu->exception_index = -1;
cpu->crash_occurred = false;
cpu->cflags_next_tb = -1;
diff --git a/hw/core/machine.c b/hw/core/machine.c
index 9ae8f79..cfd1edf 100644
--- a/hw/core/machine.c
+++ b/hw/core/machine.c
@@ -11,35 +11,26 @@
*/
#include "qemu/osdep.h"
-#include "qemu/option.h"
#include "qemu/accel.h"
#include "sysemu/replay.h"
-#include "qemu/units.h"
#include "hw/boards.h"
#include "hw/loader.h"
#include "qapi/error.h"
-#include "qapi/qapi-visit-common.h"
#include "qapi/qapi-visit-machine.h"
-#include "qapi/visitor.h"
#include "qom/object_interfaces.h"
-#include "hw/sysbus.h"
#include "sysemu/cpus.h"
#include "sysemu/sysemu.h"
#include "sysemu/reset.h"
#include "sysemu/runstate.h"
-#include "sysemu/numa.h"
#include "sysemu/xen.h"
-#include "qemu/error-report.h"
#include "sysemu/qtest.h"
-#include "hw/pci/pci.h"
#include "hw/pci/pci_bridge.h"
#include "hw/mem/nvdimm.h"
#include "migration/global_state.h"
-#include "migration/vmstate.h"
#include "exec/confidential-guest-support.h"
-#include "hw/virtio/virtio.h"
#include "hw/virtio/virtio-pci.h"
#include "hw/virtio/virtio-net.h"
+#include "audio/audio.h"
GlobalProperty hw_compat_8_1[] = {
{ TYPE_PCI_BRIDGE, "x-pci-express-writeable-slt-bug", "true" },
@@ -689,6 +680,26 @@
return allowed;
}
+static char *machine_get_audiodev(Object *obj, Error **errp)
+{
+ MachineState *ms = MACHINE(obj);
+
+ return g_strdup(ms->audiodev);
+}
+
+static void machine_set_audiodev(Object *obj, const char *value,
+ Error **errp)
+{
+ MachineState *ms = MACHINE(obj);
+
+ if (!audio_state_by_name(value, errp)) {
+ return;
+ }
+
+ g_free(ms->audiodev);
+ ms->audiodev = g_strdup(value);
+}
+
HotpluggableCPUList *machine_query_hotpluggable_cpus(MachineState *machine)
{
int i;
@@ -934,6 +945,17 @@
qapi_free_BootConfiguration(config);
}
+void machine_add_audiodev_property(MachineClass *mc)
+{
+ ObjectClass *oc = OBJECT_CLASS(mc);
+
+ object_class_property_add_str(oc, "audiodev",
+ machine_get_audiodev,
+ machine_set_audiodev);
+ object_class_property_set_description(oc, "audiodev",
+ "Audiodev to use for default machine devices");
+}
+
static void machine_class_init(ObjectClass *oc, void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -1085,8 +1107,6 @@
ms->maxram_size = mc->default_ram_size;
if (mc->nvdimm_supported) {
- Object *obj = OBJECT(ms);
-
ms->nvdimms_state = g_new0(NVDIMMState, 1);
object_property_add_bool(obj, "nvdimm",
machine_get_nvdimm, machine_set_nvdimm);
@@ -1139,6 +1159,7 @@
g_free(ms->device_memory);
g_free(ms->nvdimms_state);
g_free(ms->numa_state);
+ g_free(ms->audiodev);
}
bool machine_usb(MachineState *machine)
diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c
index 41b7e68..6883406 100644
--- a/hw/core/qdev-properties-system.c
+++ b/hw/core/qdev-properties-system.c
@@ -480,24 +480,16 @@
Property *prop = opaque;
QEMUSoundCard *card = object_field_prop_ptr(obj, prop);
AudioState *state;
- int err = 0;
- char *str;
+ g_autofree char *str = NULL;
if (!visit_type_str(v, name, &str, errp)) {
return;
}
- state = audio_state_by_name(str);
-
- if (!state) {
- err = -ENOENT;
- goto out;
+ state = audio_state_by_name(str, errp);
+ if (state) {
+ card->state = state;
}
- card->state = state;
-
-out:
- error_set_from_qdev_prop_error(errp, err, obj, name, str);
- g_free(str);
}
const PropertyInfo qdev_prop_audiodev = {
diff --git a/hw/display/ramfb.c b/hw/display/ramfb.c
index 79b9754..c2b002d 100644
--- a/hw/display/ramfb.c
+++ b/hw/display/ramfb.c
@@ -97,6 +97,7 @@
s->width = width;
s->height = height;
+ qemu_free_displaysurface(s->ds);
s->ds = surface;
}
diff --git a/hw/display/xlnx_dp.c b/hw/display/xlnx_dp.c
index 341e91e..eee8f33 100644
--- a/hw/display/xlnx_dp.c
+++ b/hw/display/xlnx_dp.c
@@ -1302,6 +1302,10 @@
DisplaySurface *surface;
struct audsettings as;
+ if (!AUD_register_card("xlnx_dp.audio", &s->aud_card, errp)) {
+ return;
+ }
+
aux_bus_realize(s->aux_bus);
qdev_realize(DEVICE(s->dpcd), BUS(s->aux_bus), &error_fatal);
@@ -1320,8 +1324,6 @@
as.fmt = AUDIO_FORMAT_S16;
as.endianness = 0;
- AUD_register_card("xlnx_dp.audio", &s->aud_card);
-
s->amixer_output_stream = AUD_open_out(&s->aud_card,
s->amixer_output_stream,
"xlnx_dp.audio.out",
diff --git a/hw/i2c/aspeed_i2c.c b/hw/i2c/aspeed_i2c.c
index 7275d40..1037c22 100644
--- a/hw/i2c/aspeed_i2c.c
+++ b/hw/i2c/aspeed_i2c.c
@@ -312,7 +312,6 @@
SHARED_ARRAY_FIELD_DP32(bus->regs, reg_pool_ctrl, RX_COUNT, i & 0xff);
SHARED_ARRAY_FIELD_DP32(bus->regs, reg_cmd, RX_BUFF_EN, 0);
} else if (SHARED_ARRAY_FIELD_EX32(bus->regs, reg_cmd, RX_DMA_EN)) {
- uint8_t data;
/* In new mode, clear how many bytes we RXed */
if (aspeed_i2c_is_new_mode(bus->controller)) {
ARRAY_FIELD_DP32(bus->regs, I2CM_DMA_LEN_STS, RX_LEN, 0);
diff --git a/hw/i2c/pm_smbus.c b/hw/i2c/pm_smbus.c
index 9ad6a47..4e1b8a5 100644
--- a/hw/i2c/pm_smbus.c
+++ b/hw/i2c/pm_smbus.c
@@ -279,7 +279,7 @@
if (!read && s->smb_index == s->smb_data0) {
uint8_t prot = (s->smb_ctl >> 2) & 0x07;
uint8_t cmd = s->smb_cmd;
- uint8_t addr = s->smb_addr >> 1;
+ uint8_t smb_addr = s->smb_addr >> 1;
int ret;
if (prot == PROT_I2C_BLOCK_READ) {
@@ -287,7 +287,7 @@
goto out;
}
- ret = smbus_write_block(s->smbus, addr, cmd, s->smb_data,
+ ret = smbus_write_block(s->smbus, smb_addr, cmd, s->smb_data,
s->smb_data0, !s->i2c_enable);
if (ret < 0) {
s->smb_stat |= STS_DEV_ERR;
diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
index 863a939..3f2b27c 100644
--- a/hw/i386/acpi-build.c
+++ b/hw/i386/acpi-build.c
@@ -1585,12 +1585,12 @@
aml_append(dev, aml_name_decl("_UID", aml_int(bus_num)));
aml_append(dev, aml_name_decl("_BBN", aml_int(bus_num)));
if (pci_bus_is_cxl(bus)) {
- struct Aml *pkg = aml_package(2);
+ struct Aml *aml_pkg = aml_package(2);
aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0016")));
- aml_append(pkg, aml_eisaid("PNP0A08"));
- aml_append(pkg, aml_eisaid("PNP0A03"));
- aml_append(dev, aml_name_decl("_CID", pkg));
+ aml_append(aml_pkg, aml_eisaid("PNP0A08"));
+ aml_append(aml_pkg, aml_eisaid("PNP0A03"));
+ aml_append(dev, aml_name_decl("_CID", aml_pkg));
build_cxl_osc_method(dev);
} else if (pci_bus_is_express(bus)) {
aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A08")));
@@ -1783,14 +1783,14 @@
Object *pci_host = acpi_get_i386_pci_host();
if (pci_host) {
- PCIBus *bus = PCI_HOST_BRIDGE(pci_host)->bus;
- Aml *scope = aml_scope("PCI0");
+ PCIBus *pbus = PCI_HOST_BRIDGE(pci_host)->bus;
+ Aml *ascope = aml_scope("PCI0");
/* Scan all PCI buses. Generate tables to support hotplug. */
- build_append_pci_bus_devices(scope, bus);
- if (object_property_find(OBJECT(bus), ACPI_PCIHP_PROP_BSEL)) {
- build_append_pcihp_slots(scope, bus);
+ build_append_pci_bus_devices(ascope, pbus);
+ if (object_property_find(OBJECT(pbus), ACPI_PCIHP_PROP_BSEL)) {
+ build_append_pcihp_slots(ascope, pbus);
}
- aml_append(sb_scope, scope);
+ aml_append(sb_scope, ascope);
}
}
@@ -1842,10 +1842,10 @@
bool has_pcnt;
Object *pci_host = acpi_get_i386_pci_host();
- PCIBus *bus = PCI_HOST_BRIDGE(pci_host)->bus;
+ PCIBus *b = PCI_HOST_BRIDGE(pci_host)->bus;
scope = aml_scope("\\_SB.PCI0");
- has_pcnt = build_append_notfication_callback(scope, bus);
+ has_pcnt = build_append_notfication_callback(scope, b);
if (has_pcnt) {
aml_append(dsdt, scope);
}
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
index c0ce896..2c832ab 100644
--- a/hw/i386/intel_iommu.c
+++ b/hw/i386/intel_iommu.c
@@ -3744,7 +3744,7 @@
/* Unmap the whole range in the notifier's scope. */
static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n)
{
- hwaddr size, remain;
+ hwaddr total, remain;
hwaddr start = n->start;
hwaddr end = n->end;
IntelIOMMUState *s = as->iommu_state;
@@ -3765,7 +3765,7 @@
}
assert(start <= end);
- size = remain = end - start + 1;
+ total = remain = end - start + 1;
while (remain >= VTD_PAGE_SIZE) {
IOMMUTLBEvent event;
@@ -3793,10 +3793,10 @@
trace_vtd_as_unmap_whole(pci_bus_num(as->bus),
VTD_PCI_SLOT(as->devfn),
VTD_PCI_FUNC(as->devfn),
- n->start, size);
+ n->start, total);
map.iova = n->start;
- map.size = size - 1; /* Inclusive */
+ map.size = total - 1; /* Inclusive */
iova_tree_remove(as->iova_tree, map);
}
diff --git a/hw/i386/kvm/clock.c b/hw/i386/kvm/clock.c
index 34348a3..f25977d 100644
--- a/hw/i386/kvm/clock.c
+++ b/hw/i386/kvm/clock.c
@@ -66,7 +66,7 @@
static uint64_t kvmclock_current_nsec(KVMClockState *s)
{
CPUState *cpu = first_cpu;
- CPUX86State *env = cpu->env_ptr;
+ CPUX86State *env = cpu_env(cpu);
hwaddr kvmclock_struct_pa;
uint64_t migration_tsc = env->tsc;
struct pvclock_vcpu_time_info time;
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index a532d42..aad7e8c 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -24,79 +24,40 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
-#include "hw/i386/x86.h"
#include "hw/i386/pc.h"
#include "hw/char/serial.h"
#include "hw/char/parallel.h"
-#include "hw/i386/topology.h"
#include "hw/i386/fw_cfg.h"
#include "hw/i386/vmport.h"
#include "sysemu/cpus.h"
-#include "hw/block/fdc.h"
#include "hw/ide/internal.h"
-#include "hw/ide/isa.h"
-#include "hw/pci/pci.h"
-#include "hw/pci/pci_bus.h"
-#include "hw/pci-bridge/pci_expander_bridge.h"
-#include "hw/nvram/fw_cfg.h"
#include "hw/timer/hpet.h"
-#include "hw/firmware/smbios.h"
#include "hw/loader.h"
-#include "elf.h"
-#include "migration/vmstate.h"
-#include "multiboot.h"
#include "hw/rtc/mc146818rtc.h"
#include "hw/intc/i8259.h"
-#include "hw/intc/ioapic.h"
#include "hw/timer/i8254.h"
#include "hw/input/i8042.h"
-#include "hw/irq.h"
#include "hw/audio/pcspk.h"
-#include "hw/pci/msi.h"
-#include "hw/sysbus.h"
#include "sysemu/sysemu.h"
-#include "sysemu/tcg.h"
-#include "sysemu/numa.h"
-#include "sysemu/kvm.h"
#include "sysemu/xen.h"
#include "sysemu/reset.h"
-#include "sysemu/runstate.h"
#include "kvm/kvm_i386.h"
#include "hw/xen/xen.h"
-#include "hw/xen/start_info.h"
-#include "ui/qemu-spice.h"
-#include "exec/memory.h"
-#include "qemu/bitmap.h"
-#include "qemu/config-file.h"
#include "qemu/error-report.h"
-#include "qemu/option.h"
-#include "qemu/cutils.h"
-#include "hw/acpi/acpi.h"
#include "hw/acpi/cpu_hotplug.h"
#include "acpi-build.h"
-#include "hw/mem/pc-dimm.h"
#include "hw/mem/nvdimm.h"
-#include "hw/cxl/cxl.h"
#include "hw/cxl/cxl_host.h"
-#include "qapi/error.h"
-#include "qapi/qapi-visit-common.h"
-#include "qapi/qapi-visit-machine.h"
-#include "qapi/visitor.h"
-#include "hw/core/cpu.h"
#include "hw/usb.h"
#include "hw/i386/intel_iommu.h"
#include "hw/net/ne2000-isa.h"
-#include "standard-headers/asm-x86/bootparam.h"
#include "hw/virtio/virtio-iommu.h"
#include "hw/virtio/virtio-md-pci.h"
#include "hw/i386/kvm/xen_overlay.h"
#include "hw/i386/kvm/xen_evtchn.h"
#include "hw/i386/kvm/xen_gnttab.h"
#include "hw/i386/kvm/xen_xenstore.h"
-#include "sysemu/replay.h"
-#include "target/i386/cpu.h"
#include "e820_memory_layout.h"
-#include "fw_cfg.h"
#include "trace.h"
#include CONFIG_DEVICES
@@ -359,60 +320,6 @@
};
const size_t pc_compat_2_0_len = G_N_ELEMENTS(pc_compat_2_0);
-GlobalProperty pc_compat_1_7[] = {
- PC_CPU_MODEL_IDS("1.7.0")
- { TYPE_USB_DEVICE, "msos-desc", "no" },
- { "PIIX4_PM", ACPI_PM_PROP_ACPI_PCIHP_BRIDGE, "off" },
- { "hpet", HPET_INTCAP, "4" },
-};
-const size_t pc_compat_1_7_len = G_N_ELEMENTS(pc_compat_1_7);
-
-GlobalProperty pc_compat_1_6[] = {
- PC_CPU_MODEL_IDS("1.6.0")
- { "e1000", "mitigation", "off" },
- { "qemu64-" TYPE_X86_CPU, "model", "2" },
- { "qemu32-" TYPE_X86_CPU, "model", "3" },
- { "i440FX-pcihost", "short_root_bus", "1" },
- { "q35-pcihost", "short_root_bus", "1" },
-};
-const size_t pc_compat_1_6_len = G_N_ELEMENTS(pc_compat_1_6);
-
-GlobalProperty pc_compat_1_5[] = {
- PC_CPU_MODEL_IDS("1.5.0")
- { "Conroe-" TYPE_X86_CPU, "model", "2" },
- { "Conroe-" TYPE_X86_CPU, "min-level", "2" },
- { "Penryn-" TYPE_X86_CPU, "model", "2" },
- { "Penryn-" TYPE_X86_CPU, "min-level", "2" },
- { "Nehalem-" TYPE_X86_CPU, "model", "2" },
- { "Nehalem-" TYPE_X86_CPU, "min-level", "2" },
- { "virtio-net-pci", "any_layout", "off" },
- { TYPE_X86_CPU, "pmu", "on" },
- { "i440FX-pcihost", "short_root_bus", "0" },
- { "q35-pcihost", "short_root_bus", "0" },
-};
-const size_t pc_compat_1_5_len = G_N_ELEMENTS(pc_compat_1_5);
-
-GlobalProperty pc_compat_1_4[] = {
- PC_CPU_MODEL_IDS("1.4.0")
- { "scsi-hd", "discard_granularity", "0" },
- { "scsi-cd", "discard_granularity", "0" },
- { "ide-hd", "discard_granularity", "0" },
- { "ide-cd", "discard_granularity", "0" },
- { "virtio-blk-pci", "discard_granularity", "0" },
- /* DEV_NVECTORS_UNSPECIFIED as a uint32_t string: */
- { "virtio-serial-pci", "vectors", "0xFFFFFFFF" },
- { "virtio-net-pci", "ctrl_guest_offloads", "off" },
- { "e1000", "romfile", "pxe-e1000.rom" },
- { "ne2k_pci", "romfile", "pxe-ne2k_pci.rom" },
- { "pcnet", "romfile", "pxe-pcnet.rom" },
- { "rtl8139", "romfile", "pxe-rtl8139.rom" },
- { "virtio-net-pci", "romfile", "pxe-virtio.rom" },
- { "486-" TYPE_X86_CPU, "model", "0" },
- { "n270" "-" TYPE_X86_CPU, "movbe", "off" },
- { "Westmere" "-" TYPE_X86_CPU, "pclmulqdq", "off" },
-};
-const size_t pc_compat_1_4_len = G_N_ELEMENTS(pc_compat_1_4);
-
GSIState *pc_gsi_create(qemu_irq **irqs, bool pci_enabled)
{
GSIState *s;
@@ -1319,9 +1226,9 @@
exit(1);
}
/*
- * For pc-piix-*, hpet's intcap is always IRQ2. For pc-q35-1.7 and
- * earlier, use IRQ2 for compat. Otherwise, use IRQ16~23, IRQ8 and
- * IRQ2.
+ * For pc-piix-*, hpet's intcap is always IRQ2. For pc-q35-*,
+ * use IRQ16~23, IRQ8 and IRQ2. If the user has already set
+ * the property, use whatever mask they specified.
*/
uint8_t compat = object_property_get_uint(OBJECT(hpet),
HPET_INTCAP, NULL);
diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
index 7100375..e36a326 100644
--- a/hw/i386/pc_piix.c
+++ b/hw/i386/pc_piix.c
@@ -423,27 +423,6 @@
pc_compat_2_1_fn(machine);
}
-static void pc_compat_1_7_fn(MachineState *machine)
-{
- pc_compat_2_0_fn(machine);
- x86_cpu_change_kvm_default("x2apic", NULL);
-}
-
-static void pc_compat_1_6_fn(MachineState *machine)
-{
- pc_compat_1_7_fn(machine);
-}
-
-static void pc_compat_1_5_fn(MachineState *machine)
-{
- pc_compat_1_6_fn(machine);
-}
-
-static void pc_compat_1_4_fn(MachineState *machine)
-{
- pc_compat_1_5_fn(machine);
-}
-
#ifdef CONFIG_ISAPC
static void pc_init_isa(MachineState *machine)
{
@@ -880,58 +859,6 @@
DEFINE_I440FX_MACHINE(v2_0, "pc-i440fx-2.0", pc_compat_2_0_fn,
pc_i440fx_2_0_machine_options);
-static void pc_i440fx_1_7_machine_options(MachineClass *m)
-{
- PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
-
- pc_i440fx_2_0_machine_options(m);
- m->hw_version = "1.7.0";
- m->default_machine_opts = NULL;
- m->option_rom_has_mr = true;
- m->deprecation_reason = "old and unattended - use a newer version instead";
- compat_props_add(m->compat_props, pc_compat_1_7, pc_compat_1_7_len);
- pcmc->smbios_defaults = false;
- pcmc->gigabyte_align = false;
- pcmc->legacy_acpi_table_size = 6414;
-}
-
-DEFINE_I440FX_MACHINE(v1_7, "pc-i440fx-1.7", pc_compat_1_7_fn,
- pc_i440fx_1_7_machine_options);
-
-static void pc_i440fx_1_6_machine_options(MachineClass *m)
-{
- PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
-
- pc_i440fx_1_7_machine_options(m);
- m->hw_version = "1.6.0";
- m->rom_file_has_mr = false;
- compat_props_add(m->compat_props, pc_compat_1_6, pc_compat_1_6_len);
- pcmc->has_acpi_build = false;
-}
-
-DEFINE_I440FX_MACHINE(v1_6, "pc-i440fx-1.6", pc_compat_1_6_fn,
- pc_i440fx_1_6_machine_options);
-
-static void pc_i440fx_1_5_machine_options(MachineClass *m)
-{
- pc_i440fx_1_6_machine_options(m);
- m->hw_version = "1.5.0";
- compat_props_add(m->compat_props, pc_compat_1_5, pc_compat_1_5_len);
-}
-
-DEFINE_I440FX_MACHINE(v1_5, "pc-i440fx-1.5", pc_compat_1_5_fn,
- pc_i440fx_1_5_machine_options);
-
-static void pc_i440fx_1_4_machine_options(MachineClass *m)
-{
- pc_i440fx_1_5_machine_options(m);
- m->hw_version = "1.4.0";
- compat_props_add(m->compat_props, pc_compat_1_4, pc_compat_1_4_len);
-}
-
-DEFINE_I440FX_MACHINE(v1_4, "pc-i440fx-1.4", pc_compat_1_4_fn,
- pc_i440fx_1_4_machine_options);
-
#ifdef CONFIG_ISAPC
static void isapc_machine_options(MachineClass *m)
{
diff --git a/hw/input/tsc210x.c b/hw/input/tsc210x.c
index f568759..950506f 100644
--- a/hw/input/tsc210x.c
+++ b/hw/input/tsc210x.c
@@ -27,6 +27,7 @@
#include "sysemu/reset.h"
#include "ui/console.h"
#include "hw/arm/omap.h" /* For I2SCodec */
+#include "hw/boards.h" /* for current_machine */
#include "hw/input/tsc2xxx.h"
#include "hw/irq.h"
#include "migration/vmstate.h"
@@ -1097,7 +1098,11 @@
qemu_add_mouse_event_handler(tsc210x_touchscreen_event, s, 1, name);
- AUD_register_card(s->name, &s->card);
+ if (current_machine->audiodev) {
+ s->card.name = g_strdup(current_machine->audiodev);
+ s->card.state = audio_state_by_name(s->card.name, &error_fatal);
+ }
+ AUD_register_card(s->name, &s->card, &error_fatal);
qemu_register_reset((void *) tsc210x_reset, s);
vmstate_register(NULL, 0, vmsd, s);
diff --git a/hw/intc/arm_gicv3_its.c b/hw/intc/arm_gicv3_its.c
index 5f552b4..52e9aca 100644
--- a/hw/intc/arm_gicv3_its.c
+++ b/hw/intc/arm_gicv3_its.c
@@ -545,10 +545,10 @@
}
if (cmdres == CMD_CONTINUE_OK && cmd == DISCARD) {
- ITEntry ite = {};
+ ITEntry i = {};
/* remove mapping from interrupt translation table */
- ite.valid = false;
- return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL;
+ i.valid = false;
+ return update_ite(s, eventid, &dte, &i) ? CMD_CONTINUE_OK : CMD_STALL;
}
return CMD_CONTINUE_OK;
}
diff --git a/hw/intc/mips_gic.c b/hw/intc/mips_gic.c
index 4bdc3b1..77ba734 100644
--- a/hw/intc/mips_gic.c
+++ b/hw/intc/mips_gic.c
@@ -423,7 +423,7 @@
/* Register the env for all VPs with the GIC */
for (i = 0; i < s->num_vps; i++) {
if (cs != NULL) {
- s->vps[i].env = cs->env_ptr;
+ s->vps[i].env = cpu_env(cs);
cs = CPU_NEXT(cs);
} else {
error_setg(errp,
diff --git a/hw/intc/openpic.c b/hw/intc/openpic.c
index c757adb..a6f91d4 100644
--- a/hw/intc/openpic.c
+++ b/hw/intc/openpic.c
@@ -610,11 +610,8 @@
case 0x10B0:
case 0x10C0:
case 0x10D0:
- {
- int idx;
- idx = (addr - 0x10A0) >> 4;
- write_IRQreg_ivpr(opp, opp->irq_ipi0 + idx, val);
- }
+ idx = (addr - 0x10A0) >> 4;
+ write_IRQreg_ivpr(opp, opp->irq_ipi0 + idx, val);
break;
case 0x10E0: /* SPVE */
opp->spve = val & opp->vector_mask;
diff --git a/hw/intc/riscv_aclint.c b/hw/intc/riscv_aclint.c
index 25cf7a5..ab1a0b4 100644
--- a/hw/intc/riscv_aclint.c
+++ b/hw/intc/riscv_aclint.c
@@ -131,7 +131,7 @@
size_t hartid = mtimer->hartid_base +
((addr - mtimer->timecmp_base) >> 3);
CPUState *cpu = cpu_by_arch_id(hartid);
- CPURISCVState *env = cpu ? cpu->env_ptr : NULL;
+ CPURISCVState *env = cpu ? cpu_env(cpu) : NULL;
if (!env) {
qemu_log_mask(LOG_GUEST_ERROR,
"aclint-mtimer: invalid hartid: %zu", hartid);
@@ -174,7 +174,7 @@
size_t hartid = mtimer->hartid_base +
((addr - mtimer->timecmp_base) >> 3);
CPUState *cpu = cpu_by_arch_id(hartid);
- CPURISCVState *env = cpu ? cpu->env_ptr : NULL;
+ CPURISCVState *env = cpu ? cpu_env(cpu) : NULL;
if (!env) {
qemu_log_mask(LOG_GUEST_ERROR,
"aclint-mtimer: invalid hartid: %zu", hartid);
@@ -233,7 +233,7 @@
/* Check if timer interrupt is triggered for each hart. */
for (i = 0; i < mtimer->num_harts; i++) {
CPUState *cpu = cpu_by_arch_id(mtimer->hartid_base + i);
- CPURISCVState *env = cpu ? cpu->env_ptr : NULL;
+ CPURISCVState *env = cpu ? cpu_env(cpu) : NULL;
if (!env) {
continue;
}
@@ -375,7 +375,7 @@
for (i = 0; i < num_harts; i++) {
CPUState *cpu = cpu_by_arch_id(hartid_base + i);
RISCVCPU *rvcpu = RISCV_CPU(cpu);
- CPURISCVState *env = cpu ? cpu->env_ptr : NULL;
+ CPURISCVState *env = cpu ? cpu_env(cpu) : NULL;
riscv_aclint_mtimer_callback *cb =
g_new0(riscv_aclint_mtimer_callback, 1);
@@ -409,7 +409,7 @@
if (addr < (swi->num_harts << 2)) {
size_t hartid = swi->hartid_base + (addr >> 2);
CPUState *cpu = cpu_by_arch_id(hartid);
- CPURISCVState *env = cpu ? cpu->env_ptr : NULL;
+ CPURISCVState *env = cpu ? cpu_env(cpu) : NULL;
if (!env) {
qemu_log_mask(LOG_GUEST_ERROR,
"aclint-swi: invalid hartid: %zu", hartid);
@@ -432,7 +432,7 @@
if (addr < (swi->num_harts << 2)) {
size_t hartid = swi->hartid_base + (addr >> 2);
CPUState *cpu = cpu_by_arch_id(hartid);
- CPURISCVState *env = cpu ? cpu->env_ptr : NULL;
+ CPURISCVState *env = cpu ? cpu_env(cpu) : NULL;
if (!env) {
qemu_log_mask(LOG_GUEST_ERROR,
"aclint-swi: invalid hartid: %zu", hartid);
diff --git a/hw/intc/riscv_imsic.c b/hw/intc/riscv_imsic.c
index 760dbdd..b31d079 100644
--- a/hw/intc/riscv_imsic.c
+++ b/hw/intc/riscv_imsic.c
@@ -333,7 +333,7 @@
RISCVIMSICState *imsic = RISCV_IMSIC(dev);
RISCVCPU *rcpu = RISCV_CPU(cpu_by_arch_id(imsic->hartid));
CPUState *cpu = cpu_by_arch_id(imsic->hartid);
- CPURISCVState *env = cpu ? cpu->env_ptr : NULL;
+ CPURISCVState *env = cpu ? cpu_env(cpu) : NULL;
if (!kvm_irqchip_in_kernel()) {
imsic->num_eistate = imsic->num_pages * imsic->num_irqs;
diff --git a/hw/m68k/bootinfo.h b/hw/m68k/bootinfo.h
index a3d37e3..0e6e3ee 100644
--- a/hw/m68k/bootinfo.h
+++ b/hw/m68k/bootinfo.h
@@ -44,15 +44,14 @@
#define BOOTINFOSTR(base, id, string) \
do { \
- int i; \
stw_p(base, id); \
base += 2; \
stw_p(base, \
(sizeof(struct bi_record) + strlen(string) + \
1 /* null termination */ + 3 /* padding */) & ~3); \
base += 2; \
- for (i = 0; string[i]; i++) { \
- stb_p(base++, string[i]); \
+ for (unsigned i_ = 0; string[i_]; i_++) { \
+ stb_p(base++, string[i_]); \
} \
stb_p(base++, 0); \
base = QEMU_ALIGN_PTR_UP(base, 4); \
@@ -60,7 +59,6 @@
#define BOOTINFODATA(base, id, data, len) \
do { \
- int i; \
stw_p(base, id); \
base += 2; \
stw_p(base, \
@@ -69,8 +67,8 @@
base += 2; \
stw_p(base, len); \
base += 2; \
- for (i = 0; i < len; ++i) { \
- stb_p(base++, data[i]); \
+ for (unsigned i_ = 0; i_ < len; ++i_) { \
+ stb_p(base++, data[i_]); \
} \
base = QEMU_ALIGN_PTR_UP(base, 4); \
} while (0)
diff --git a/hw/microblaze/petalogix_ml605_mmu.c b/hw/microblaze/petalogix_ml605_mmu.c
index ea0fb68..fb7889c 100644
--- a/hw/microblaze/petalogix_ml605_mmu.c
+++ b/hw/microblaze/petalogix_ml605_mmu.c
@@ -183,7 +183,7 @@
spi = (SSIBus *)qdev_get_child_bus(dev, "spi");
for (i = 0; i < NUM_SPI_FLASHES; i++) {
- DriveInfo *dinfo = drive_get(IF_MTD, 0, i);
+ dinfo = drive_get(IF_MTD, 0, i);
qemu_irq cs_line;
dev = qdev_new("n25q128");
diff --git a/hw/mips/fuloong2e.c b/hw/mips/fuloong2e.c
index c827f61..c610963 100644
--- a/hw/mips/fuloong2e.c
+++ b/hw/mips/fuloong2e.c
@@ -295,9 +295,17 @@
pci_bus = bonito_init((qemu_irq *)&(env->irq[2]));
/* South bridge -> IP5 */
- pci_dev = pci_create_simple_multifunction(pci_bus,
- PCI_DEVFN(FULOONG2E_VIA_SLOT, 0),
- TYPE_VT82C686B_ISA);
+ pci_dev = pci_new_multifunction(PCI_DEVFN(FULOONG2E_VIA_SLOT, 0),
+ TYPE_VT82C686B_ISA);
+
+ /* Set properties on individual devices before realizing the south bridge */
+ if (machine->audiodev) {
+ dev = DEVICE(object_resolve_path_component(OBJECT(pci_dev), "ac97"));
+ qdev_prop_set_string(dev, "audiodev", machine->audiodev);
+ }
+
+ pci_realize_and_unref(pci_dev, pci_bus, &error_abort);
+
object_property_add_alias(OBJECT(machine), "rtc-time",
object_resolve_path_component(OBJECT(pci_dev),
"rtc"),
@@ -337,6 +345,7 @@
mc->default_ram_size = 256 * MiB;
mc->default_ram_id = "fuloong2e.ram";
mc->minimum_page_bits = 14;
+ machine_add_audiodev_property(mc);
}
DEFINE_MACHINE("fuloong2e", mips_fuloong2e_machine_init)
diff --git a/hw/misc/arm_sysctl.c b/hw/misc/arm_sysctl.c
index 42d4693..3e4f4b0 100644
--- a/hw/misc/arm_sysctl.c
+++ b/hw/misc/arm_sysctl.c
@@ -534,12 +534,12 @@
s->sys_cfgstat |= 2; /* error */
}
} else {
- uint32_t val;
+ uint32_t data;
if (!vexpress_cfgctrl_read(s, dcc, function, site, position,
- device, &val)) {
+ device, &data)) {
s->sys_cfgstat |= 2; /* error */
} else {
- s->sys_cfgdata = val;
+ s->sys_cfgdata = data;
}
}
}
diff --git a/hw/misc/aspeed_i3c.c b/hw/misc/aspeed_i3c.c
index f54f5da..d1ff617 100644
--- a/hw/misc/aspeed_i3c.c
+++ b/hw/misc/aspeed_i3c.c
@@ -296,13 +296,13 @@
memory_region_add_subregion(&s->iomem_container, 0x0, &s->iomem);
for (i = 0; i < ASPEED_I3C_NR_DEVICES; ++i) {
- Object *dev = OBJECT(&s->devices[i]);
+ Object *i3c_dev = OBJECT(&s->devices[i]);
- if (!object_property_set_uint(dev, "device-id", i, errp)) {
+ if (!object_property_set_uint(i3c_dev, "device-id", i, errp)) {
return;
}
- if (!sysbus_realize(SYS_BUS_DEVICE(dev), errp)) {
+ if (!sysbus_realize(SYS_BUS_DEVICE(i3c_dev), errp)) {
return;
}
diff --git a/hw/net/e1000.c b/hw/net/e1000.c
index 093c2d4..548bcab 100644
--- a/hw/net/e1000.c
+++ b/hw/net/e1000.c
@@ -127,13 +127,9 @@
QEMUTimer *flush_queue_timer;
/* Compatibility flags for migration to/from qemu 1.3.0 and older */
-#define E1000_FLAG_AUTONEG_BIT 0
-#define E1000_FLAG_MIT_BIT 1
#define E1000_FLAG_MAC_BIT 2
#define E1000_FLAG_TSO_BIT 3
#define E1000_FLAG_VET_BIT 4
-#define E1000_FLAG_AUTONEG (1 << E1000_FLAG_AUTONEG_BIT)
-#define E1000_FLAG_MIT (1 << E1000_FLAG_MIT_BIT)
#define E1000_FLAG_MAC (1 << E1000_FLAG_MAC_BIT)
#define E1000_FLAG_TSO (1 << E1000_FLAG_TSO_BIT)
#define E1000_FLAG_VET (1 << E1000_FLAG_VET_BIT)
@@ -180,7 +176,7 @@
static bool
have_autoneg(E1000State *s)
{
- return chkflag(AUTONEG) && (s->phy_reg[MII_BMCR] & MII_BMCR_AUTOEN);
+ return (s->phy_reg[MII_BMCR] & MII_BMCR_AUTOEN);
}
static void
@@ -308,35 +304,34 @@
if (s->mit_timer_on) {
return;
}
- if (chkflag(MIT)) {
- /* Compute the next mitigation delay according to pending
- * interrupts and the current values of RADV (provided
- * RDTR!=0), TADV and ITR.
- * Then rearm the timer.
- */
- mit_delay = 0;
- if (s->mit_ide &&
- (pending_ints & (E1000_ICR_TXQE | E1000_ICR_TXDW))) {
- mit_update_delay(&mit_delay, s->mac_reg[TADV] * 4);
- }
- if (s->mac_reg[RDTR] && (pending_ints & E1000_ICS_RXT0)) {
- mit_update_delay(&mit_delay, s->mac_reg[RADV] * 4);
- }
- mit_update_delay(&mit_delay, s->mac_reg[ITR]);
- /*
- * According to e1000 SPEC, the Ethernet controller guarantees
- * a maximum observable interrupt rate of 7813 interrupts/sec.
- * Thus if mit_delay < 500 then the delay should be set to the
- * minimum delay possible which is 500.
- */
- mit_delay = (mit_delay < 500) ? 500 : mit_delay;
-
- s->mit_timer_on = 1;
- timer_mod(s->mit_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
- mit_delay * 256);
- s->mit_ide = 0;
+ /* Compute the next mitigation delay according to pending
+ * interrupts and the current values of RADV (provided
+ * RDTR!=0), TADV and ITR.
+ * Then rearm the timer.
+ */
+ mit_delay = 0;
+ if (s->mit_ide &&
+ (pending_ints & (E1000_ICR_TXQE | E1000_ICR_TXDW))) {
+ mit_update_delay(&mit_delay, s->mac_reg[TADV] * 4);
}
+ if (s->mac_reg[RDTR] && (pending_ints & E1000_ICS_RXT0)) {
+ mit_update_delay(&mit_delay, s->mac_reg[RADV] * 4);
+ }
+ mit_update_delay(&mit_delay, s->mac_reg[ITR]);
+
+ /*
+ * According to e1000 SPEC, the Ethernet controller guarantees
+ * a maximum observable interrupt rate of 7813 interrupts/sec.
+ * Thus if mit_delay < 500 then the delay should be set to the
+ * minimum delay possible which is 500.
+ */
+ mit_delay = (mit_delay < 500) ? 500 : mit_delay;
+
+ s->mit_timer_on = 1;
+ timer_mod(s->mit_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
+ mit_delay * 256);
+ s->mit_ide = 0;
}
s->mit_irq_level = (pending_ints != 0);
@@ -1223,9 +1218,6 @@
* n - flag needed
* p - partially implenented */
static const uint8_t mac_reg_access[0x8000] = {
- [RDTR] = markflag(MIT), [TADV] = markflag(MIT),
- [RADV] = markflag(MIT), [ITR] = markflag(MIT),
-
[IPAV] = markflag(MAC), [WUC] = markflag(MAC),
[IP6AT] = markflag(MAC), [IP4AT] = markflag(MAC),
[FFVT] = markflag(MAC), [WUPM] = markflag(MAC),
@@ -1394,11 +1386,6 @@
E1000State *s = opaque;
NetClientState *nc = qemu_get_queue(s->nic);
- if (!chkflag(MIT)) {
- s->mac_reg[ITR] = s->mac_reg[RDTR] = s->mac_reg[RADV] =
- s->mac_reg[TADV] = 0;
- s->mit_irq_level = false;
- }
s->mit_ide = 0;
s->mit_timer_on = true;
timer_mod(s->mit_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1);
@@ -1432,13 +1419,6 @@
return 0;
}
-static bool e1000_mit_state_needed(void *opaque)
-{
- E1000State *s = opaque;
-
- return chkflag(MIT);
-}
-
static bool e1000_full_mac_needed(void *opaque)
{
E1000State *s = opaque;
@@ -1457,7 +1437,6 @@
.name = "e1000/mit_state",
.version_id = 1,
.minimum_version_id = 1,
- .needed = e1000_mit_state_needed,
.fields = (VMStateField[]) {
VMSTATE_UINT32(mac_reg[RDTR], E1000State),
VMSTATE_UINT32(mac_reg[RADV], E1000State),
@@ -1699,10 +1678,6 @@
static Property e1000_properties[] = {
DEFINE_NIC_PROPERTIES(E1000State, conf),
- DEFINE_PROP_BIT("autonegotiation", E1000State,
- compat_flags, E1000_FLAG_AUTONEG_BIT, true),
- DEFINE_PROP_BIT("mitigation", E1000State,
- compat_flags, E1000_FLAG_MIT_BIT, true),
DEFINE_PROP_BIT("extra_mac_registers", E1000State,
compat_flags, E1000_FLAG_MAC_BIT, true),
DEFINE_PROP_BIT("migrate_tso_props", E1000State,
diff --git a/hw/nios2/10m50_devboard.c b/hw/nios2/10m50_devboard.c
index 91383fb..952a0dc 100644
--- a/hw/nios2/10m50_devboard.c
+++ b/hw/nios2/10m50_devboard.c
@@ -98,7 +98,7 @@
qdev_realize_and_unref(DEVICE(cpu), NULL, &error_fatal);
if (nms->vic) {
- DeviceState *dev = qdev_new(TYPE_NIOS2_VIC);
+ dev = qdev_new(TYPE_NIOS2_VIC);
MemoryRegion *dev_mr;
qemu_irq cpu_irq;
@@ -107,7 +107,7 @@
cpu_irq = qdev_get_gpio_in_named(DEVICE(cpu), "EIC", 0);
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, cpu_irq);
- for (int i = 0; i < 32; i++) {
+ for (i = 0; i < 32; i++) {
irq[i] = qdev_get_gpio_in(dev, i);
}
diff --git a/hw/nvme/ns.c b/hw/nvme/ns.c
index 44aba8f..0eabcf5 100644
--- a/hw/nvme/ns.c
+++ b/hw/nvme/ns.c
@@ -107,7 +107,7 @@
ns->pif = ns->params.pif;
- static const NvmeLBAF lbaf[16] = {
+ static const NvmeLBAF defaults[16] = {
[0] = { .ds = 9 },
[1] = { .ds = 9, .ms = 8 },
[2] = { .ds = 9, .ms = 16 },
@@ -120,7 +120,7 @@
ns->nlbaf = 8;
- memcpy(&id_ns->lbaf, &lbaf, sizeof(lbaf));
+ memcpy(&id_ns->lbaf, &defaults, sizeof(defaults));
for (i = 0; i < ns->nlbaf; i++) {
NvmeLBAF *lbaf = &id_ns->lbaf[i];
diff --git a/hw/nvram/meson.build b/hw/nvram/meson.build
index 988dff6..75e415b 100644
--- a/hw/nvram/meson.build
+++ b/hw/nvram/meson.build
@@ -1,8 +1,4 @@
-if have_system or have_tools
- # QOM interfaces must be available anytime QOM is used.
- qom_ss.add(files('fw_cfg-interface.c'))
-endif
-
+system_ss.add(files('fw_cfg-interface.c'))
system_ss.add(files('fw_cfg.c'))
system_ss.add(when: 'CONFIG_CHRP_NVRAM', if_true: files('chrp_nvram.c'))
system_ss.add(when: 'CONFIG_DS1225Y', if_true: files('ds1225y.c'))
diff --git a/hw/pci-host/i440fx.c b/hw/pci-host/i440fx.c
index 62d6287..653cc3f 100644
--- a/hw/pci-host/i440fx.c
+++ b/hw/pci-host/i440fx.c
@@ -56,7 +56,6 @@
uint64_t above_4g_mem_size;
uint64_t pci_hole64_size;
bool pci_hole64_fix;
- uint32_t short_root_bus;
char *pci_type;
};
@@ -351,19 +350,12 @@
static const char *i440fx_pcihost_root_bus_path(PCIHostState *host_bridge,
PCIBus *rootbus)
{
- I440FXState *s = I440FX_PCI_HOST_BRIDGE(host_bridge);
-
- /* For backwards compat with old device paths */
- if (s->short_root_bus) {
- return "0000";
- }
return "0000:00";
}
static Property i440fx_props[] = {
DEFINE_PROP_SIZE(PCI_HOST_PROP_PCI_HOLE64_SIZE, I440FXState,
pci_hole64_size, I440FX_PCI_HOST_HOLE64_SIZE_DEFAULT),
- DEFINE_PROP_UINT32("short_root_bus", I440FXState, short_root_bus, 0),
DEFINE_PROP_SIZE(PCI_HOST_BELOW_4G_MEM_SIZE, I440FXState,
below_4g_mem_size, 0),
DEFINE_PROP_SIZE(PCI_HOST_ABOVE_4G_MEM_SIZE, I440FXState,
diff --git a/hw/pci-host/q35.c b/hw/pci-host/q35.c
index 91c46df..08534bc 100644
--- a/hw/pci-host/q35.c
+++ b/hw/pci-host/q35.c
@@ -73,12 +73,6 @@
static const char *q35_host_root_bus_path(PCIHostState *host_bridge,
PCIBus *rootbus)
{
- Q35PCIHost *s = Q35_HOST_DEVICE(host_bridge);
-
- /* For backwards compat with old device paths */
- if (s->mch.short_root_bus) {
- return "0000";
- }
return "0000:00";
}
@@ -181,7 +175,6 @@
MCH_HOST_BRIDGE_PCIEXBAR_DEFAULT),
DEFINE_PROP_SIZE(PCI_HOST_PROP_PCI_HOLE64_SIZE, Q35PCIHost,
mch.pci_hole64_size, Q35_PCI_HOST_HOLE64_SIZE_DEFAULT),
- DEFINE_PROP_UINT32("short_root_bus", Q35PCIHost, mch.short_root_bus, 0),
DEFINE_PROP_SIZE(PCI_HOST_BELOW_4G_MEM_SIZE, Q35PCIHost,
mch.below_4g_mem_size, 0),
DEFINE_PROP_SIZE(PCI_HOST_ABOVE_4G_MEM_SIZE, Q35PCIHost,
diff --git a/hw/ppc/e500.c b/hw/ppc/e500.c
index d5b6820..e04114f 100644
--- a/hw/ppc/e500.c
+++ b/hw/ppc/e500.c
@@ -373,7 +373,7 @@
MachineState *machine = MACHINE(pms);
unsigned int smp_cpus = machine->smp.cpus;
const PPCE500MachineClass *pmc = PPCE500_MACHINE_GET_CLASS(pms);
- CPUPPCState *env = first_cpu->env_ptr;
+ CPUPPCState *env = cpu_env(first_cpu);
int ret = -1;
uint64_t mem_reg_property[] = { 0, cpu_to_be64(machine->ram_size) };
int fdt_size;
@@ -499,7 +499,7 @@
if (cpu == NULL) {
continue;
}
- env = cpu->env_ptr;
+ env = cpu_env(cpu);
cpu_name = g_strdup_printf("/cpus/PowerPC,8544@%x", i);
qemu_fdt_add_subnode(fdt, cpu_name);
diff --git a/hw/ppc/pegasos2.c b/hw/ppc/pegasos2.c
index bd397cf..3203a4a 100644
--- a/hw/ppc/pegasos2.c
+++ b/hw/ppc/pegasos2.c
@@ -180,8 +180,15 @@
pci_bus_irqs(pci_bus, pegasos2_pci_irq, pm, PCI_NUM_PINS);
/* VIA VT8231 South Bridge (multifunction PCI device) */
- via = OBJECT(pci_create_simple_multifunction(pci_bus, PCI_DEVFN(12, 0),
- TYPE_VT8231_ISA));
+ via = OBJECT(pci_new_multifunction(PCI_DEVFN(12, 0), TYPE_VT8231_ISA));
+
+ /* Set properties on individual devices before realizing the south bridge */
+ if (machine->audiodev) {
+ dev = PCI_DEVICE(object_resolve_path_component(via, "ac97"));
+ qdev_prop_set_string(DEVICE(dev), "audiodev", machine->audiodev);
+ }
+
+ pci_realize_and_unref(PCI_DEVICE(via), pci_bus, &error_abort);
for (i = 0; i < PCI_NUM_PINS; i++) {
pm->via_pirq[i] = qdev_get_gpio_in_named(DEVICE(via), "pirq", i);
}
@@ -556,6 +563,7 @@
mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("7457_v1.2");
mc->default_ram_id = "pegasos2.ram";
mc->default_ram_size = 512 * MiB;
+ machine_add_audiodev_property(mc);
vhc->cpu_in_nested = pegasos2_cpu_in_nested;
vhc->hypercall = pegasos2_hypercall;
diff --git a/hw/ppc/pnv_psi.c b/hw/ppc/pnv_psi.c
index daaa2f0..26460d2 100644
--- a/hw/ppc/pnv_psi.c
+++ b/hw/ppc/pnv_psi.c
@@ -738,8 +738,9 @@
}
} else {
if (!(psi->regs[reg] & PSIHB9_ESB_CI_VALID)) {
- hwaddr addr = val & ~(PSIHB9_ESB_CI_VALID | PSIHB10_ESB_CI_64K);
- memory_region_add_subregion(sysmem, addr,
+ hwaddr esb_addr =
+ val & ~(PSIHB9_ESB_CI_VALID | PSIHB10_ESB_CI_64K);
+ memory_region_add_subregion(sysmem, esb_addr,
&psi9->source.esb_mmio);
}
}
diff --git a/hw/ppc/prep.c b/hw/ppc/prep.c
index f6fd35f..137276b 100644
--- a/hw/ppc/prep.c
+++ b/hw/ppc/prep.c
@@ -45,6 +45,7 @@
#include "trace.h"
#include "elf.h"
#include "qemu/units.h"
+#include "audio/audio.h"
/* SMP is not enabled, for now */
#define MAX_CPUS 1
@@ -310,6 +311,10 @@
dev = DEVICE(isa_dev);
qdev_prop_set_uint32(dev, "iobase", 0x830);
qdev_prop_set_uint32(dev, "irq", 10);
+
+ if (machine->audiodev) {
+ qdev_prop_set_string(dev, "audiodev", machine->audiodev);
+ }
isa_realize_and_unref(isa_dev, isa_bus, &error_fatal);
isa_dev = isa_new("pc87312");
@@ -426,6 +431,8 @@
mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("604");
mc->default_display = "std";
mc->default_nic = "pcnet";
+
+ machine_add_audiodev_property(mc);
}
DEFINE_MACHINE("40p", ibm_40p_machine_init)
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 1f1aa2a..cb84067 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -780,6 +780,26 @@
pcc->lrg_decr_bits)));
}
+static void spapr_dt_one_cpu(void *fdt, SpaprMachineState *spapr, CPUState *cs,
+ int cpus_offset)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ int index = spapr_get_vcpu_id(cpu);
+ DeviceClass *dc = DEVICE_GET_CLASS(cs);
+ g_autofree char *nodename = NULL;
+ int offset;
+
+ if (!spapr_is_thread0_in_vcore(spapr, cpu)) {
+ return;
+ }
+
+ nodename = g_strdup_printf("%s@%x", dc->fw_name, index);
+ offset = fdt_add_subnode(fdt, cpus_offset, nodename);
+ _FDT(offset);
+ spapr_dt_cpu(cs, fdt, offset, spapr);
+}
+
+
static void spapr_dt_cpus(void *fdt, SpaprMachineState *spapr)
{
CPUState **rev;
@@ -809,21 +829,7 @@
}
for (i = n_cpus - 1; i >= 0; i--) {
- CPUState *cs = rev[i];
- PowerPCCPU *cpu = POWERPC_CPU(cs);
- int index = spapr_get_vcpu_id(cpu);
- DeviceClass *dc = DEVICE_GET_CLASS(cs);
- g_autofree char *nodename = NULL;
- int offset;
-
- if (!spapr_is_thread0_in_vcore(spapr, cpu)) {
- continue;
- }
-
- nodename = g_strdup_printf("%s@%x", dc->fw_name, index);
- offset = fdt_add_subnode(fdt, cpus_offset, nodename);
- _FDT(offset);
- spapr_dt_cpu(cs, fdt, offset, spapr);
+ spapr_dt_one_cpu(fdt, spapr, rev[i], cpus_offset);
}
g_free(rev);
@@ -1119,7 +1125,7 @@
* Older KVM versions with older guest kernels were broken
* with the magic page, don't allow the guest to map it.
*/
- if (!kvmppc_get_hypercall(first_cpu->env_ptr, hypercall,
+ if (!kvmppc_get_hypercall(cpu_env(first_cpu), hypercall,
sizeof(hypercall))) {
_FDT(fdt_setprop(fdt, hypervisor, "hcall-instructions",
hypercall, sizeof(hypercall)));
@@ -2659,8 +2665,6 @@
}
if (smc->pre_2_10_has_unused_icps) {
- int i;
-
for (i = 0; i < spapr_max_server_number(spapr); i++) {
/* Dummy entries get deregistered when real ICPState objects
* are registered during CPU core hotplug.
@@ -3210,8 +3214,8 @@
if (g_str_equal("pci-bridge", qdev_fw_name(dev))) {
/* SLOF uses "pci" instead of "pci-bridge" for PCI bridges */
- PCIDevice *pcidev = CAST(PCIDevice, dev, TYPE_PCI_DEVICE);
- return g_strdup_printf("pci@%x", PCI_SLOT(pcidev->devfn));
+ PCIDevice *pdev = CAST(PCIDevice, dev, TYPE_PCI_DEVICE);
+ return g_strdup_printf("pci@%x", PCI_SLOT(pdev->devfn));
}
if (pcidev) {
diff --git a/hw/ppc/spapr_drc.c b/hw/ppc/spapr_drc.c
index b5c400a..2b99d3b 100644
--- a/hw/ppc/spapr_drc.c
+++ b/hw/ppc/spapr_drc.c
@@ -341,7 +341,7 @@
fdt_depth = 0;
do {
- const char *name = NULL;
+ const char *dt_name = NULL;
const struct fdt_property *prop = NULL;
int prop_len = 0, name_len = 0;
uint32_t tag;
@@ -351,8 +351,8 @@
switch (tag) {
case FDT_BEGIN_NODE:
fdt_depth++;
- name = fdt_get_name(fdt, fdt_offset, &name_len);
- if (!visit_start_struct(v, name, NULL, 0, errp)) {
+ dt_name = fdt_get_name(fdt, fdt_offset, &name_len);
+ if (!visit_start_struct(v, dt_name, NULL, 0, errp)) {
return;
}
break;
@@ -369,8 +369,8 @@
case FDT_PROP: {
int i;
prop = fdt_get_property_by_offset(fdt, fdt_offset, &prop_len);
- name = fdt_string(fdt, fdt32_to_cpu(prop->nameoff));
- if (!visit_start_list(v, name, NULL, 0, errp)) {
+ dt_name = fdt_string(fdt, fdt32_to_cpu(prop->nameoff));
+ if (!visit_start_list(v, dt_name, NULL, 0, errp)) {
return;
}
for (i = 0; i < prop_len; i++) {
@@ -1237,8 +1237,6 @@
case FDT_END_NODE:
drc->ccs_depth--;
if (drc->ccs_depth == 0) {
- uint32_t drc_index = spapr_drc_index(drc);
-
/* done sending the device tree, move to configured state */
trace_spapr_drc_set_configured(drc_index);
drc->state = drck->ready_state;
diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c
index ce14959..370c5a9 100644
--- a/hw/ppc/spapr_pci.c
+++ b/hw/ppc/spapr_pci.c
@@ -1826,9 +1826,9 @@
(SpaprMachineState *) object_dynamic_cast(qdev_get_machine(),
TYPE_SPAPR_MACHINE);
SpaprMachineClass *smc = spapr ? SPAPR_MACHINE_GET_CLASS(spapr) : NULL;
- SysBusDevice *s = SYS_BUS_DEVICE(dev);
- SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(s);
- PCIHostState *phb = PCI_HOST_BRIDGE(s);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+ SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(sbd);
+ PCIHostState *phb = PCI_HOST_BRIDGE(sbd);
MachineState *ms = MACHINE(spapr);
char *namebuf;
int i;
diff --git a/hw/riscv/opentitan.c b/hw/riscv/opentitan.c
index 6a2fcc4..436503f 100644
--- a/hw/riscv/opentitan.c
+++ b/hw/riscv/opentitan.c
@@ -227,7 +227,7 @@
IRQ_M_TIMER));
/* SPI-Hosts */
- for (int i = 0; i < OPENTITAN_NUM_SPI_HOSTS; ++i) {
+ for (i = 0; i < OPENTITAN_NUM_SPI_HOSTS; ++i) {
dev = DEVICE(&(s->spi_host[i]));
if (!sysbus_realize(SYS_BUS_DEVICE(&s->spi_host[i]), errp)) {
return;
diff --git a/hw/scsi/esp.c b/hw/scsi/esp.c
index e52188d..9b11d8c 100644
--- a/hw/scsi/esp.c
+++ b/hw/scsi/esp.c
@@ -759,7 +759,8 @@
}
if (to_device) {
- len = MIN(fifo8_num_used(&s->fifo), ESP_FIFO_SZ);
+ len = MIN(s->async_len, ESP_FIFO_SZ);
+ len = MIN(len, fifo8_num_used(&s->fifo));
esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
s->async_buf += len;
s->async_len -= len;
@@ -1395,7 +1396,7 @@
parent_esp_reset(s, irq, level);
break;
case 1:
- esp_dma_enable(opaque, irq, level);
+ esp_dma_enable(s, irq, level);
break;
}
}
diff --git a/hw/scsi/mptsas.c b/hw/scsi/mptsas.c
index 3de288b..75d3ab8 100644
--- a/hw/scsi/mptsas.c
+++ b/hw/scsi/mptsas.c
@@ -192,7 +192,7 @@
return addr;
}
-static int mptsas_build_sgl(MPTSASState *s, MPTSASRequest *req, hwaddr addr)
+static int mptsas_build_sgl(MPTSASState *s, MPTSASRequest *req, hwaddr req_addr)
{
PCIDevice *pci = (PCIDevice *) s;
hwaddr next_chain_addr;
@@ -201,8 +201,8 @@
uint32_t chain_offset;
chain_offset = req->scsi_io.ChainOffset;
- next_chain_addr = addr + chain_offset * sizeof(uint32_t);
- sgaddr = addr + sizeof(MPIMsgSCSIIORequest);
+ next_chain_addr = req_addr + chain_offset * sizeof(uint32_t);
+ sgaddr = req_addr + sizeof(MPIMsgSCSIIORequest);
pci_dma_sglist_init(&req->qsg, pci, 4);
left = req->scsi_io.DataLength;
diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
index e0d79c7..6691f5e 100644
--- a/hw/scsi/scsi-disk.c
+++ b/hw/scsi/scsi-disk.c
@@ -1628,9 +1628,10 @@
* Since the existing code only checks/updates bits 8-15 of the block
* size, restrict ourselves to the same requirement for now to ensure
* that a block size set by a block descriptor and then read back by
- * a subsequent SCSI command will be the same
+ * a subsequent SCSI command will be the same. Also disallow a block
+ * size of 256 since we cannot handle anything below BDRV_SECTOR_SIZE.
*/
- if (bs && !(bs & ~0xff00) && bs != s->qdev.blocksize) {
+ if (bs && !(bs & ~0xfe00) && bs != s->qdev.blocksize) {
s->qdev.blocksize = bs;
trace_scsi_disk_mode_select_set_blocksize(s->qdev.blocksize);
}
@@ -1958,6 +1959,10 @@
scsi_disk_emulate_write_same(r, r->iov.iov_base);
break;
+ case FORMAT_UNIT:
+ scsi_req_complete(&r->req, GOOD);
+ break;
+
default:
abort();
}
diff --git a/hw/smbios/smbios.c b/hw/smbios/smbios.c
index b753705..2a90601 100644
--- a/hw/smbios/smbios.c
+++ b/hw/smbios/smbios.c
@@ -1423,13 +1423,14 @@
if (!qemu_opts_validate(opts, qemu_smbios_type8_opts, errp)) {
return;
}
- struct type8_instance *t;
- t = g_new0(struct type8_instance, 1);
- save_opt(&t->internal_reference, opts, "internal_reference");
- save_opt(&t->external_reference, opts, "external_reference");
- t->connector_type = qemu_opt_get_number(opts, "connector_type", 0);
- t->port_type = qemu_opt_get_number(opts, "port_type", 0);
- QTAILQ_INSERT_TAIL(&type8, t, next);
+ struct type8_instance *t8_i;
+ t8_i = g_new0(struct type8_instance, 1);
+ save_opt(&t8_i->internal_reference, opts, "internal_reference");
+ save_opt(&t8_i->external_reference, opts, "external_reference");
+ t8_i->connector_type = qemu_opt_get_number(opts,
+ "connector_type", 0);
+ t8_i->port_type = qemu_opt_get_number(opts, "port_type", 0);
+ QTAILQ_INSERT_TAIL(&type8, t8_i, next);
return;
case 11:
if (!qemu_opts_validate(opts, qemu_smbios_type11_opts, errp)) {
@@ -1452,27 +1453,27 @@
type17.speed = qemu_opt_get_number(opts, "speed", 0);
return;
case 41: {
- struct type41_instance *t;
+ struct type41_instance *t41_i;
Error *local_err = NULL;
if (!qemu_opts_validate(opts, qemu_smbios_type41_opts, errp)) {
return;
}
- t = g_new0(struct type41_instance, 1);
- save_opt(&t->designation, opts, "designation");
- t->kind = qapi_enum_parse(&type41_kind_lookup,
- qemu_opt_get(opts, "kind"),
- 0, &local_err) + 1;
- t->kind |= 0x80; /* enabled */
+ t41_i = g_new0(struct type41_instance, 1);
+ save_opt(&t41_i->designation, opts, "designation");
+ t41_i->kind = qapi_enum_parse(&type41_kind_lookup,
+ qemu_opt_get(opts, "kind"),
+ 0, &local_err) + 1;
+ t41_i->kind |= 0x80; /* enabled */
if (local_err != NULL) {
error_propagate(errp, local_err);
- g_free(t);
+ g_free(t41_i);
return;
}
- t->instance = qemu_opt_get_number(opts, "instance", 1);
- save_opt(&t->pcidev, opts, "pcidev");
+ t41_i->instance = qemu_opt_get_number(opts, "instance", 1);
+ save_opt(&t41_i->pcidev, opts, "pcidev");
- QTAILQ_INSERT_TAIL(&type41, t, next);
+ QTAILQ_INSERT_TAIL(&type41, t41_i, next);
return;
}
default:
diff --git a/hw/timer/aspeed_timer.c b/hw/timer/aspeed_timer.c
index 9c20b3d..72161f0 100644
--- a/hw/timer/aspeed_timer.c
+++ b/hw/timer/aspeed_timer.c
@@ -167,7 +167,7 @@
qemu_set_irq(t->irq, t->level);
}
- next = MAX(MAX(calculate_match(t, 0), calculate_match(t, 1)), 0);
+ next = MAX(calculate_match(t, 0), calculate_match(t, 1));
t->start = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
return calculate_time(t, next);
diff --git a/hw/tricore/tricore_testdevice.c b/hw/tricore/tricore_testdevice.c
index a1563aa..9028d97 100644
--- a/hw/tricore/tricore_testdevice.c
+++ b/hw/tricore/tricore_testdevice.c
@@ -16,6 +16,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/log.h"
#include "hw/sysbus.h"
#include "hw/qdev-properties.h"
#include "hw/tricore/tricore_testdevice.h"
@@ -23,6 +24,9 @@
static void tricore_testdevice_write(void *opaque, hwaddr offset,
uint64_t value, unsigned size)
{
+ if (value != 0) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Test %" PRIu64 " failed!\n", value);
+ }
exit(value);
}
diff --git a/hw/usb/dev-audio.c b/hw/usb/dev-audio.c
index 8748c1b..d5ac1f8 100644
--- a/hw/usb/dev-audio.c
+++ b/hw/usb/dev-audio.c
@@ -944,12 +944,15 @@
USBAudioState *s = USB_AUDIO(dev);
int i;
+ if (!AUD_register_card(TYPE_USB_AUDIO, &s->card, errp)) {
+ return;
+ }
+
dev->usb_desc = s->multi ? &desc_audio_multi : &desc_audio;
usb_desc_create_serial(dev);
usb_desc_init(dev);
s->dev.opaque = s;
- AUD_register_card(TYPE_USB_AUDIO, &s->card);
s->out.altset = ALTSET_OFF;
s->out.vol.mute = false;
diff --git a/include/block/nbd.h b/include/block/nbd.h
index f672b76..8a765e7 100644
--- a/include/block/nbd.h
+++ b/include/block/nbd.h
@@ -60,20 +60,22 @@
NBD_MODE_EXPORT_NAME, /* newstyle but only OPT_EXPORT_NAME safe */
NBD_MODE_SIMPLE, /* newstyle but only simple replies */
NBD_MODE_STRUCTURED, /* newstyle, structured replies enabled */
- /* TODO add NBD_MODE_EXTENDED */
+ NBD_MODE_EXTENDED, /* newstyle, extended headers enabled */
} NBDMode;
-/* Transmission phase structs
- *
- * Note: these are _NOT_ the same as the network representation of an NBD
- * request and reply!
+/* Transmission phase structs */
+
+/*
+ * Note: NBDRequest is _NOT_ the same as the network representation of an NBD
+ * request!
*/
typedef struct NBDRequest {
uint64_t cookie;
- uint64_t from;
- uint32_t len;
+ uint64_t from; /* Offset touched by the command */
+ uint64_t len; /* Effect length; 32 bit limit without extended headers */
uint16_t flags; /* NBD_CMD_FLAG_* */
- uint16_t type; /* NBD_CMD_* */
+ uint16_t type; /* NBD_CMD_* */
+ NBDMode mode; /* Determines which network representation to use */
} NBDRequest;
typedef struct NBDSimpleReply {
@@ -91,20 +93,36 @@
uint32_t length; /* length of payload */
} QEMU_PACKED NBDStructuredReplyChunk;
+typedef struct NBDExtendedReplyChunk {
+ uint32_t magic; /* NBD_EXTENDED_REPLY_MAGIC */
+ uint16_t flags; /* combination of NBD_REPLY_FLAG_* */
+ uint16_t type; /* NBD_REPLY_TYPE_* */
+ uint64_t cookie; /* request handle */
+ uint64_t offset; /* request offset */
+ uint64_t length; /* length of payload */
+} QEMU_PACKED NBDExtendedReplyChunk;
+
typedef union NBDReply {
NBDSimpleReply simple;
NBDStructuredReplyChunk structured;
+ NBDExtendedReplyChunk extended;
struct {
/*
- * @magic and @cookie fields have the same offset and size both in
- * simple reply and structured reply chunk, so let them be accessible
- * without ".simple." or ".structured." specification
+ * @magic and @cookie fields have the same offset and size in all
+ * forms of replies, so let them be accessible without ".simple.",
+ * ".structured.", or ".extended." specifications.
*/
uint32_t magic;
uint32_t _skip;
uint64_t cookie;
- } QEMU_PACKED;
+ };
} NBDReply;
+QEMU_BUILD_BUG_ON(offsetof(NBDReply, simple.cookie) !=
+ offsetof(NBDReply, cookie));
+QEMU_BUILD_BUG_ON(offsetof(NBDReply, structured.cookie) !=
+ offsetof(NBDReply, cookie));
+QEMU_BUILD_BUG_ON(offsetof(NBDReply, extended.cookie) !=
+ offsetof(NBDReply, cookie));
/* Header of chunk for NBD_REPLY_TYPE_OFFSET_DATA */
typedef struct NBDStructuredReadData {
@@ -131,14 +149,34 @@
typedef struct NBDStructuredMeta {
/* header's length >= 12 (at least one extent) */
uint32_t context_id;
- /* extents follows */
+ /* NBDExtent32 extents[] follows, array length implied by header */
} QEMU_PACKED NBDStructuredMeta;
-/* Extent chunk for NBD_REPLY_TYPE_BLOCK_STATUS */
-typedef struct NBDExtent {
+/* Extent array element for NBD_REPLY_TYPE_BLOCK_STATUS */
+typedef struct NBDExtent32 {
uint32_t length;
uint32_t flags; /* NBD_STATE_* */
-} QEMU_PACKED NBDExtent;
+} QEMU_PACKED NBDExtent32;
+
+/* Header of NBD_REPLY_TYPE_BLOCK_STATUS_EXT */
+typedef struct NBDExtendedMeta {
+ /* header's length >= 24 (at least one extent) */
+ uint32_t context_id;
+ uint32_t count; /* header length must be count * 16 + 8 */
+ /* NBDExtent64 extents[count] follows */
+} QEMU_PACKED NBDExtendedMeta;
+
+/* Extent array element for NBD_REPLY_TYPE_BLOCK_STATUS_EXT */
+typedef struct NBDExtent64 {
+ uint64_t length;
+ uint64_t flags; /* NBD_STATE_* */
+} QEMU_PACKED NBDExtent64;
+
+/* Client payload for limiting NBD_CMD_BLOCK_STATUS reply */
+typedef struct NBDBlockStatusPayload {
+ uint64_t effect_length;
+ /* uint32_t ids[] follows, array length implied by header */
+} QEMU_PACKED NBDBlockStatusPayload;
/* Transmission (export) flags: sent from server to client during handshake,
but describe what will happen during transmission */
@@ -156,20 +194,22 @@
NBD_FLAG_SEND_RESIZE_BIT = 9, /* Send resize */
NBD_FLAG_SEND_CACHE_BIT = 10, /* Send CACHE (prefetch) */
NBD_FLAG_SEND_FAST_ZERO_BIT = 11, /* FAST_ZERO flag for WRITE_ZEROES */
+ NBD_FLAG_BLOCK_STAT_PAYLOAD_BIT = 12, /* PAYLOAD flag for BLOCK_STATUS */
};
-#define NBD_FLAG_HAS_FLAGS (1 << NBD_FLAG_HAS_FLAGS_BIT)
-#define NBD_FLAG_READ_ONLY (1 << NBD_FLAG_READ_ONLY_BIT)
-#define NBD_FLAG_SEND_FLUSH (1 << NBD_FLAG_SEND_FLUSH_BIT)
-#define NBD_FLAG_SEND_FUA (1 << NBD_FLAG_SEND_FUA_BIT)
-#define NBD_FLAG_ROTATIONAL (1 << NBD_FLAG_ROTATIONAL_BIT)
-#define NBD_FLAG_SEND_TRIM (1 << NBD_FLAG_SEND_TRIM_BIT)
-#define NBD_FLAG_SEND_WRITE_ZEROES (1 << NBD_FLAG_SEND_WRITE_ZEROES_BIT)
-#define NBD_FLAG_SEND_DF (1 << NBD_FLAG_SEND_DF_BIT)
-#define NBD_FLAG_CAN_MULTI_CONN (1 << NBD_FLAG_CAN_MULTI_CONN_BIT)
-#define NBD_FLAG_SEND_RESIZE (1 << NBD_FLAG_SEND_RESIZE_BIT)
-#define NBD_FLAG_SEND_CACHE (1 << NBD_FLAG_SEND_CACHE_BIT)
-#define NBD_FLAG_SEND_FAST_ZERO (1 << NBD_FLAG_SEND_FAST_ZERO_BIT)
+#define NBD_FLAG_HAS_FLAGS (1 << NBD_FLAG_HAS_FLAGS_BIT)
+#define NBD_FLAG_READ_ONLY (1 << NBD_FLAG_READ_ONLY_BIT)
+#define NBD_FLAG_SEND_FLUSH (1 << NBD_FLAG_SEND_FLUSH_BIT)
+#define NBD_FLAG_SEND_FUA (1 << NBD_FLAG_SEND_FUA_BIT)
+#define NBD_FLAG_ROTATIONAL (1 << NBD_FLAG_ROTATIONAL_BIT)
+#define NBD_FLAG_SEND_TRIM (1 << NBD_FLAG_SEND_TRIM_BIT)
+#define NBD_FLAG_SEND_WRITE_ZEROES (1 << NBD_FLAG_SEND_WRITE_ZEROES_BIT)
+#define NBD_FLAG_SEND_DF (1 << NBD_FLAG_SEND_DF_BIT)
+#define NBD_FLAG_CAN_MULTI_CONN (1 << NBD_FLAG_CAN_MULTI_CONN_BIT)
+#define NBD_FLAG_SEND_RESIZE (1 << NBD_FLAG_SEND_RESIZE_BIT)
+#define NBD_FLAG_SEND_CACHE (1 << NBD_FLAG_SEND_CACHE_BIT)
+#define NBD_FLAG_SEND_FAST_ZERO (1 << NBD_FLAG_SEND_FAST_ZERO_BIT)
+#define NBD_FLAG_BLOCK_STAT_PAYLOAD (1 << NBD_FLAG_BLOCK_STAT_PAYLOAD_BIT)
/* New-style handshake (global) flags, sent from server to client, and
control what will happen during handshake phase. */
@@ -192,6 +232,7 @@
#define NBD_OPT_STRUCTURED_REPLY (8)
#define NBD_OPT_LIST_META_CONTEXT (9)
#define NBD_OPT_SET_META_CONTEXT (10)
+#define NBD_OPT_EXTENDED_HEADERS (11)
/* Option reply types. */
#define NBD_REP_ERR(value) ((UINT32_C(1) << 31) | (value))
@@ -209,6 +250,8 @@
#define NBD_REP_ERR_UNKNOWN NBD_REP_ERR(6) /* Export unknown */
#define NBD_REP_ERR_SHUTDOWN NBD_REP_ERR(7) /* Server shutting down */
#define NBD_REP_ERR_BLOCK_SIZE_REQD NBD_REP_ERR(8) /* Need INFO_BLOCK_SIZE */
+#define NBD_REP_ERR_TOO_BIG NBD_REP_ERR(9) /* Payload size overflow */
+#define NBD_REP_ERR_EXT_HEADER_REQD NBD_REP_ERR(10) /* Need extended headers */
/* Info types, used during NBD_REP_INFO */
#define NBD_INFO_EXPORT 0
@@ -217,12 +260,14 @@
#define NBD_INFO_BLOCK_SIZE 3
/* Request flags, sent from client to server during transmission phase */
-#define NBD_CMD_FLAG_FUA (1 << 0) /* 'force unit access' during write */
-#define NBD_CMD_FLAG_NO_HOLE (1 << 1) /* don't punch hole on zero run */
-#define NBD_CMD_FLAG_DF (1 << 2) /* don't fragment structured read */
-#define NBD_CMD_FLAG_REQ_ONE (1 << 3) /* only one extent in BLOCK_STATUS
- * reply chunk */
-#define NBD_CMD_FLAG_FAST_ZERO (1 << 4) /* fail if WRITE_ZEROES is not fast */
+#define NBD_CMD_FLAG_FUA (1 << 0) /* 'force unit access' during write */
+#define NBD_CMD_FLAG_NO_HOLE (1 << 1) /* don't punch hole on zero run */
+#define NBD_CMD_FLAG_DF (1 << 2) /* don't fragment structured read */
+#define NBD_CMD_FLAG_REQ_ONE (1 << 3) \
+ /* only one extent in BLOCK_STATUS reply chunk */
+#define NBD_CMD_FLAG_FAST_ZERO (1 << 4) /* fail if WRITE_ZEROES is not fast */
+#define NBD_CMD_FLAG_PAYLOAD_LEN (1 << 5) \
+ /* length describes payload, not effect; only with ext header */
/* Supported request types */
enum {
@@ -248,22 +293,31 @@
*/
#define NBD_MAX_STRING_SIZE 4096
-/* Two types of reply structures */
+/* Two types of request structures, a given client will only use 1 */
+#define NBD_REQUEST_MAGIC 0x25609513
+#define NBD_EXTENDED_REQUEST_MAGIC 0x21e41c71
+
+/*
+ * Three types of reply structures, but what a client expects depends
+ * on NBD_OPT_STRUCTURED_REPLY and NBD_OPT_EXTENDED_HEADERS.
+ */
#define NBD_SIMPLE_REPLY_MAGIC 0x67446698
#define NBD_STRUCTURED_REPLY_MAGIC 0x668e33ef
+#define NBD_EXTENDED_REPLY_MAGIC 0x6e8a278c
-/* Structured reply flags */
+/* Chunk reply flags (for structured and extended replies) */
#define NBD_REPLY_FLAG_DONE (1 << 0) /* This reply-chunk is last */
-/* Structured reply types */
+/* Chunk reply types */
#define NBD_REPLY_ERR(value) ((1 << 15) | (value))
-#define NBD_REPLY_TYPE_NONE 0
-#define NBD_REPLY_TYPE_OFFSET_DATA 1
-#define NBD_REPLY_TYPE_OFFSET_HOLE 2
-#define NBD_REPLY_TYPE_BLOCK_STATUS 5
-#define NBD_REPLY_TYPE_ERROR NBD_REPLY_ERR(1)
-#define NBD_REPLY_TYPE_ERROR_OFFSET NBD_REPLY_ERR(2)
+#define NBD_REPLY_TYPE_NONE 0
+#define NBD_REPLY_TYPE_OFFSET_DATA 1
+#define NBD_REPLY_TYPE_OFFSET_HOLE 2
+#define NBD_REPLY_TYPE_BLOCK_STATUS 5
+#define NBD_REPLY_TYPE_BLOCK_STATUS_EXT 6
+#define NBD_REPLY_TYPE_ERROR NBD_REPLY_ERR(1)
+#define NBD_REPLY_TYPE_ERROR_OFFSET NBD_REPLY_ERR(2)
/* Extent flags for base:allocation in NBD_REPLY_TYPE_BLOCK_STATUS */
#define NBD_STATE_HOLE (1 << 0)
@@ -305,7 +359,7 @@
/* In-out fields, set by client before nbd_receive_negotiate() and
* updated by server results during nbd_receive_negotiate() */
- bool structured_reply;
+ NBDMode mode; /* input maximum mode tolerated; output actual mode chosen */
bool base_allocation; /* base:allocation context for NBD_CMD_BLOCK_STATUS */
/* Set by server results during nbd_receive_negotiate() and
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index c2c6216..5340907 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -26,13 +26,6 @@
#include "hw/core/cpu.h"
#include "qemu/rcu.h"
-#define EXCP_INTERRUPT 0x10000 /* async interruption */
-#define EXCP_HLT 0x10001 /* hlt instruction reached */
-#define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */
-#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
-#define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */
-#define EXCP_ATOMIC 0x10005 /* stop-the-world and emulate atomic */
-
/* some important defines:
*
* HOST_BIG_ENDIAN : whether the host cpu is big endian and
@@ -413,29 +406,14 @@
return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK);
}
-#ifdef CONFIG_TCG
-/* accel/tcg/translate-all.c */
-void dump_exec_info(GString *buf);
-#endif /* CONFIG_TCG */
-
#endif /* !CONFIG_USER_ONLY */
/* accel/tcg/cpu-exec.c */
int cpu_exec(CPUState *cpu);
-void tcg_exec_realizefn(CPUState *cpu, Error **errp);
-void tcg_exec_unrealizefn(CPUState *cpu);
-/**
- * cpu_set_cpustate_pointers(cpu)
- * @cpu: The cpu object
- *
- * Set the generic pointers in CPUState into the outer object.
- */
-static inline void cpu_set_cpustate_pointers(ArchCPU *cpu)
-{
- cpu->parent_obj.env_ptr = &cpu->env;
- cpu->parent_obj.icount_decr_ptr = &cpu->neg.icount_decr;
-}
+/* Validate correct placement of CPUArchState. */
+QEMU_BUILD_BUG_ON(offsetof(ArchCPU, parent_obj) != 0);
+QEMU_BUILD_BUG_ON(offsetof(ArchCPU, env) != sizeof(CPUState));
/**
* env_archcpu(env)
@@ -445,7 +423,7 @@
*/
static inline ArchCPU *env_archcpu(CPUArchState *env)
{
- return container_of(env, ArchCPU, env);
+ return (void *)env - sizeof(CPUState);
}
/**
@@ -456,42 +434,7 @@
*/
static inline CPUState *env_cpu(CPUArchState *env)
{
- return &env_archcpu(env)->parent_obj;
-}
-
-/**
- * env_neg(env)
- * @env: The architecture environment
- *
- * Return the CPUNegativeOffsetState associated with the environment.
- */
-static inline CPUNegativeOffsetState *env_neg(CPUArchState *env)
-{
- ArchCPU *arch_cpu = container_of(env, ArchCPU, env);
- return &arch_cpu->neg;
-}
-
-/**
- * cpu_neg(cpu)
- * @cpu: The generic CPUState
- *
- * Return the CPUNegativeOffsetState associated with the cpu.
- */
-static inline CPUNegativeOffsetState *cpu_neg(CPUState *cpu)
-{
- ArchCPU *arch_cpu = container_of(cpu, ArchCPU, parent_obj);
- return &arch_cpu->neg;
-}
-
-/**
- * env_tlb(env)
- * @env: The architecture environment
- *
- * Return the CPUTLB state associated with the environment.
- */
-static inline CPUTLB *env_tlb(CPUArchState *env)
-{
- return &env_neg(env)->tlb;
+ return (void *)env - sizeof(CPUState);
}
#endif /* CPU_ALL_H */
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
index 41788c0..605b160 100644
--- a/include/exec/cpu-common.h
+++ b/include/exec/cpu-common.h
@@ -7,6 +7,13 @@
#include "exec/hwaddr.h"
#endif
+#define EXCP_INTERRUPT 0x10000 /* async interruption */
+#define EXCP_HLT 0x10001 /* hlt instruction reached */
+#define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */
+#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
+#define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */
+#define EXCP_ATOMIC 0x10005 /* stop-the-world and emulate atomic */
+
/**
* vaddr:
* Type wide enough to contain any #target_ulong virtual address.
@@ -166,4 +173,36 @@
/* vl.c */
void list_cpus(void);
+#ifdef CONFIG_TCG
+/**
+ * cpu_unwind_state_data:
+ * @cpu: the cpu context
+ * @host_pc: the host pc within the translation
+ * @data: output data
+ *
+ * Attempt to load the the unwind state for a host pc occurring in
+ * translated code. If @host_pc is not in translated code, the
+ * function returns false; otherwise @data is loaded.
+ * This is the same unwind info as given to restore_state_to_opc.
+ */
+bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data);
+
+/**
+ * cpu_restore_state:
+ * @cpu: the cpu context
+ * @host_pc: the host pc within the translation
+ * @return: true if state was restored, false otherwise
+ *
+ * Attempt to restore the state for a fault occurring in translated
+ * code. If @host_pc is not in translated code no state is
+ * restored and the function returns false.
+ */
+bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc);
+
+G_NORETURN void cpu_loop_exit_noexc(CPUState *cpu);
+G_NORETURN void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
+#endif /* CONFIG_TCG */
+G_NORETURN void cpu_loop_exit(CPUState *cpu);
+G_NORETURN void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
+
#endif /* CPU_COMMON_H */
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index 3502878..3915438 100644
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -54,18 +54,7 @@
#include "exec/target_long.h"
-/*
- * Fix the number of mmu modes to 16, which is also the maximum
- * supported by the softmmu tlb api.
- */
-#define NB_MMU_MODES 16
-
#if defined(CONFIG_SOFTMMU) && defined(CONFIG_TCG)
-#include "exec/tlb-common.h"
-
-/* use a fully associative victim tlb of 8 entries */
-#define CPU_VTLB_SIZE 8
-
#define CPU_TLB_DYN_MIN_BITS 6
#define CPU_TLB_DYN_DEFAULT_BITS 8
@@ -91,131 +80,4 @@
#endif /* CONFIG_SOFTMMU && CONFIG_TCG */
-#if defined(CONFIG_SOFTMMU)
-/*
- * The full TLB entry, which is not accessed by generated TCG code,
- * so the layout is not as critical as that of CPUTLBEntry. This is
- * also why we don't want to combine the two structs.
- */
-typedef struct CPUTLBEntryFull {
- /*
- * @xlat_section contains:
- * - For ram, an offset which must be added to the virtual address
- * to obtain the ram_addr_t of the target RAM
- * - For other memory regions,
- * + in the lower TARGET_PAGE_BITS, the physical section number
- * + with the TARGET_PAGE_BITS masked off, the offset within
- * the target MemoryRegion
- */
- hwaddr xlat_section;
-
- /*
- * @phys_addr contains the physical address in the address space
- * given by cpu_asidx_from_attrs(cpu, @attrs).
- */
- hwaddr phys_addr;
-
- /* @attrs contains the memory transaction attributes for the page. */
- MemTxAttrs attrs;
-
- /* @prot contains the complete protections for the page. */
- uint8_t prot;
-
- /* @lg_page_size contains the log2 of the page size. */
- uint8_t lg_page_size;
-
- /*
- * Additional tlb flags for use by the slow path. If non-zero,
- * the corresponding CPUTLBEntry comparator must have TLB_FORCE_SLOW.
- */
- uint8_t slow_flags[MMU_ACCESS_COUNT];
-
- /*
- * Allow target-specific additions to this structure.
- * This may be used to cache items from the guest cpu
- * page tables for later use by the implementation.
- */
-#ifdef TARGET_PAGE_ENTRY_EXTRA
- TARGET_PAGE_ENTRY_EXTRA
-#endif
-} CPUTLBEntryFull;
-#endif /* CONFIG_SOFTMMU */
-
-#if defined(CONFIG_SOFTMMU) && defined(CONFIG_TCG)
-/*
- * Data elements that are per MMU mode, minus the bits accessed by
- * the TCG fast path.
- */
-typedef struct CPUTLBDesc {
- /*
- * Describe a region covering all of the large pages allocated
- * into the tlb. When any page within this region is flushed,
- * we must flush the entire tlb. The region is matched if
- * (addr & large_page_mask) == large_page_addr.
- */
- vaddr large_page_addr;
- vaddr large_page_mask;
- /* host time (in ns) at the beginning of the time window */
- int64_t window_begin_ns;
- /* maximum number of entries observed in the window */
- size_t window_max_entries;
- size_t n_used_entries;
- /* The next index to use in the tlb victim table. */
- size_t vindex;
- /* The tlb victim table, in two parts. */
- CPUTLBEntry vtable[CPU_VTLB_SIZE];
- CPUTLBEntryFull vfulltlb[CPU_VTLB_SIZE];
- CPUTLBEntryFull *fulltlb;
-} CPUTLBDesc;
-
-/*
- * Data elements that are shared between all MMU modes.
- */
-typedef struct CPUTLBCommon {
- /* Serialize updates to f.table and d.vtable, and others as noted. */
- QemuSpin lock;
- /*
- * Within dirty, for each bit N, modifications have been made to
- * mmu_idx N since the last time that mmu_idx was flushed.
- * Protected by tlb_c.lock.
- */
- uint16_t dirty;
- /*
- * Statistics. These are not lock protected, but are read and
- * written atomically. This allows the monitor to print a snapshot
- * of the stats without interfering with the cpu.
- */
- size_t full_flush_count;
- size_t part_flush_count;
- size_t elide_flush_count;
-} CPUTLBCommon;
-
-/*
- * The entire softmmu tlb, for all MMU modes.
- * The meaning of each of the MMU modes is defined in the target code.
- * Since this is placed within CPUNegativeOffsetState, the smallest
- * negative offsets are at the end of the struct.
- */
-
-typedef struct CPUTLB {
- CPUTLBCommon c;
- CPUTLBDesc d[NB_MMU_MODES];
- CPUTLBDescFast f[NB_MMU_MODES];
-} CPUTLB;
-
-#else
-
-typedef struct CPUTLB { } CPUTLB;
-
-#endif /* CONFIG_SOFTMMU && CONFIG_TCG */
-
-/*
- * This structure must be placed in ArchCPU immediately
- * before CPUArchState, as a field named "neg".
- */
-typedef struct CPUNegativeOffsetState {
- CPUTLB tlb;
- IcountDecr icount_decr;
-} CPUNegativeOffsetState;
-
#endif
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
index da10ba1..6061e33 100644
--- a/include/exec/cpu_ldst.h
+++ b/include/exec/cpu_ldst.h
@@ -361,19 +361,19 @@
}
/* Find the TLB index corresponding to the mmu_idx + address pair. */
-static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx,
+static inline uintptr_t tlb_index(CPUState *cpu, uintptr_t mmu_idx,
vaddr addr)
{
- uintptr_t size_mask = env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS;
+ uintptr_t size_mask = cpu->neg.tlb.f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS;
return (addr >> TARGET_PAGE_BITS) & size_mask;
}
/* Find the TLB entry corresponding to the mmu_idx + address pair. */
-static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
+static inline CPUTLBEntry *tlb_entry(CPUState *cpu, uintptr_t mmu_idx,
vaddr addr)
{
- return &env_tlb(env)->f[mmu_idx].table[tlb_index(env, mmu_idx, addr)];
+ return &cpu->neg.tlb.f[mmu_idx].table[tlb_index(cpu, mmu_idx, addr)];
}
#endif /* defined(CONFIG_USER_ONLY) */
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index b2f5cd4..ee90ef1 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -28,36 +28,6 @@
#include "qemu/clang-tsa.h"
/**
- * cpu_unwind_state_data:
- * @cpu: the cpu context
- * @host_pc: the host pc within the translation
- * @data: output data
- *
- * Attempt to load the the unwind state for a host pc occurring in
- * translated code. If @host_pc is not in translated code, the
- * function returns false; otherwise @data is loaded.
- * This is the same unwind info as given to restore_state_to_opc.
- */
-bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data);
-
-/**
- * cpu_restore_state:
- * @cpu: the cpu context
- * @host_pc: the host pc within the translation
- * @return: true if state was restored, false otherwise
- *
- * Attempt to restore the state for a fault occurring in translated
- * code. If @host_pc is not in translated code no state is
- * restored and the function returns false.
- */
-bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc);
-
-G_NORETURN void cpu_loop_exit_noexc(CPUState *cpu);
-G_NORETURN void cpu_loop_exit(CPUState *cpu);
-G_NORETURN void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
-G_NORETURN void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
-
-/**
* cpu_loop_exit_requested:
* @cpu: The CPU state to be tested
*
@@ -71,7 +41,7 @@
*/
static inline bool cpu_loop_exit_requested(CPUState *cpu)
{
- return (int32_t)qatomic_read(&cpu_neg(cpu)->icount_decr.u32) < 0;
+ return (int32_t)qatomic_read(&cpu->neg.icount_decr.u32) < 0;
}
#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
diff --git a/include/exec/translator.h b/include/exec/translator.h
index 4e17c4f..9d9e980 100644
--- a/include/exec/translator.h
+++ b/include/exec/translator.h
@@ -72,6 +72,7 @@
* @num_insns: Number of translated instructions (including current).
* @max_insns: Maximum number of instructions to be translated in this TB.
* @singlestep_enabled: "Hardware" single stepping enabled.
+ * @saved_can_do_io: Known value of cpu->neg.can_do_io, or -1 for unknown.
*
* Architecture-agnostic disassembly context.
*/
@@ -83,6 +84,7 @@
int num_insns;
int max_insns;
bool singlestep_enabled;
+ int8_t saved_can_do_io;
void *host_addr[2];
} DisasContextBase;
diff --git a/include/hw/boards.h b/include/hw/boards.h
index 6c67af1..55a64a1 100644
--- a/include/hw/boards.h
+++ b/include/hw/boards.h
@@ -24,6 +24,7 @@
extern MachineState *current_machine;
+void machine_add_audiodev_property(MachineClass *mc);
void machine_run_board_init(MachineState *machine, const char *mem_path, Error **errp);
bool machine_usb(MachineState *machine);
int machine_phandle_start(MachineState *machine);
@@ -358,6 +359,14 @@
MemoryRegion *ram;
DeviceMemoryState *device_memory;
+ /*
+ * Included in MachineState for simplicity, but not supported
+ * unless machine_add_audiodev_property is called. Boards
+ * that have embedded audio devices can call it from the
+ * machine init function and forward the property to the device.
+ */
+ char *audiodev;
+
ram_addr_t ram_size;
ram_addr_t maxram_size;
uint64_t ram_slots;
diff --git a/include/hw/core/accel-cpu.h b/include/hw/core/accel-cpu.h
index 5dbfd79..24dad45 100644
--- a/include/hw/core/accel-cpu.h
+++ b/include/hw/core/accel-cpu.h
@@ -32,7 +32,7 @@
void (*cpu_class_init)(CPUClass *cc);
void (*cpu_instance_init)(CPUState *cpu);
- bool (*cpu_realizefn)(CPUState *cpu, Error **errp);
+ bool (*cpu_target_realize)(CPUState *cpu, Error **errp);
} AccelCPUClass;
#endif /* ACCEL_CPU_H */
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index 648b5b3..e02bc59 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -25,6 +25,7 @@
#include "exec/cpu-common.h"
#include "exec/hwaddr.h"
#include "exec/memattrs.h"
+#include "exec/tlb-common.h"
#include "qapi/qapi-types-run-state.h"
#include "qemu/bitmap.h"
#include "qemu/rcu_queue.h"
@@ -193,6 +194,137 @@
};
/*
+ * Fix the number of mmu modes to 16, which is also the maximum
+ * supported by the softmmu tlb api.
+ */
+#define NB_MMU_MODES 16
+
+/* Use a fully associative victim tlb of 8 entries. */
+#define CPU_VTLB_SIZE 8
+
+/*
+ * The full TLB entry, which is not accessed by generated TCG code,
+ * so the layout is not as critical as that of CPUTLBEntry. This is
+ * also why we don't want to combine the two structs.
+ */
+typedef struct CPUTLBEntryFull {
+ /*
+ * @xlat_section contains:
+ * - in the lower TARGET_PAGE_BITS, a physical section number
+ * - with the lower TARGET_PAGE_BITS masked off, an offset which
+ * must be added to the virtual address to obtain:
+ * + the ram_addr_t of the target RAM (if the physical section
+ * number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM)
+ * + the offset within the target MemoryRegion (otherwise)
+ */
+ hwaddr xlat_section;
+
+ /*
+ * @phys_addr contains the physical address in the address space
+ * given by cpu_asidx_from_attrs(cpu, @attrs).
+ */
+ hwaddr phys_addr;
+
+ /* @attrs contains the memory transaction attributes for the page. */
+ MemTxAttrs attrs;
+
+ /* @prot contains the complete protections for the page. */
+ uint8_t prot;
+
+ /* @lg_page_size contains the log2 of the page size. */
+ uint8_t lg_page_size;
+
+ /*
+ * Additional tlb flags for use by the slow path. If non-zero,
+ * the corresponding CPUTLBEntry comparator must have TLB_FORCE_SLOW.
+ */
+ uint8_t slow_flags[MMU_ACCESS_COUNT];
+
+ /*
+ * Allow target-specific additions to this structure.
+ * This may be used to cache items from the guest cpu
+ * page tables for later use by the implementation.
+ */
+ union {
+ /*
+ * Cache the attrs and shareability fields from the page table entry.
+ *
+ * For ARMMMUIdx_Stage2*, pte_attrs is the S2 descriptor bits [5:2].
+ * Otherwise, pte_attrs is the same as the MAIR_EL1 8-bit format.
+ * For shareability and guarded, as in the SH and GP fields respectively
+ * of the VMSAv8-64 PTEs.
+ */
+ struct {
+ uint8_t pte_attrs;
+ uint8_t shareability;
+ bool guarded;
+ } arm;
+ } extra;
+} CPUTLBEntryFull;
+
+/*
+ * Data elements that are per MMU mode, minus the bits accessed by
+ * the TCG fast path.
+ */
+typedef struct CPUTLBDesc {
+ /*
+ * Describe a region covering all of the large pages allocated
+ * into the tlb. When any page within this region is flushed,
+ * we must flush the entire tlb. The region is matched if
+ * (addr & large_page_mask) == large_page_addr.
+ */
+ vaddr large_page_addr;
+ vaddr large_page_mask;
+ /* host time (in ns) at the beginning of the time window */
+ int64_t window_begin_ns;
+ /* maximum number of entries observed in the window */
+ size_t window_max_entries;
+ size_t n_used_entries;
+ /* The next index to use in the tlb victim table. */
+ size_t vindex;
+ /* The tlb victim table, in two parts. */
+ CPUTLBEntry vtable[CPU_VTLB_SIZE];
+ CPUTLBEntryFull vfulltlb[CPU_VTLB_SIZE];
+ CPUTLBEntryFull *fulltlb;
+} CPUTLBDesc;
+
+/*
+ * Data elements that are shared between all MMU modes.
+ */
+typedef struct CPUTLBCommon {
+ /* Serialize updates to f.table and d.vtable, and others as noted. */
+ QemuSpin lock;
+ /*
+ * Within dirty, for each bit N, modifications have been made to
+ * mmu_idx N since the last time that mmu_idx was flushed.
+ * Protected by tlb_c.lock.
+ */
+ uint16_t dirty;
+ /*
+ * Statistics. These are not lock protected, but are read and
+ * written atomically. This allows the monitor to print a snapshot
+ * of the stats without interfering with the cpu.
+ */
+ size_t full_flush_count;
+ size_t part_flush_count;
+ size_t elide_flush_count;
+} CPUTLBCommon;
+
+/*
+ * The entire softmmu tlb, for all MMU modes.
+ * The meaning of each of the MMU modes is defined in the target code.
+ * Since this is placed within CPUNegativeOffsetState, the smallest
+ * negative offsets are at the end of the struct.
+ */
+typedef struct CPUTLB {
+#ifdef CONFIG_TCG
+ CPUTLBCommon c;
+ CPUTLBDesc d[NB_MMU_MODES];
+ CPUTLBDescFast f[NB_MMU_MODES];
+#endif
+} CPUTLB;
+
+/*
* Low 16 bits: number of cycles left, used only in icount mode.
* High 16 bits: Set to -1 to force TCG to stop executing linked TBs
* for this CPU and return to its top level loop (even in non-icount mode).
@@ -212,6 +344,16 @@
} u16;
} IcountDecr;
+/*
+ * Elements of CPUState most efficiently accessed from CPUArchState,
+ * via small negative offsets.
+ */
+typedef struct CPUNegativeOffsetState {
+ CPUTLB tlb;
+ IcountDecr icount_decr;
+ bool can_do_io;
+} CPUNegativeOffsetState;
+
typedef struct CPUBreakpoint {
vaddr pc;
int flags; /* BP_* */
@@ -279,16 +421,12 @@
* @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
* @singlestep_enabled: Flags for single-stepping.
* @icount_extra: Instructions until next timer event.
- * @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution
- * requires that IO only be performed on the last instruction of a TB
- * so that interrupts take effect immediately.
+ * @neg.can_do_io: True if memory-mapped IO is allowed.
* @cpu_ases: Pointer to array of CPUAddressSpaces (which define the
* AddressSpaces this CPU has)
* @num_ases: number of CPUAddressSpaces in @cpu_ases
* @as: Pointer to the first AddressSpace, for the convenience of targets which
* only have a single AddressSpace
- * @env_ptr: Pointer to subclass-specific CPUArchState field.
- * @icount_decr_ptr: Pointer to IcountDecr field within subclass.
* @gdb_regs: Additional GDB registers.
* @gdb_num_regs: Number of total registers accessible to GDB.
* @gdb_num_g_regs: Number of registers in GDB 'g' packets.
@@ -312,6 +450,9 @@
* dirty ring structure.
*
* State of one CPU core or thread.
+ *
+ * Align, in order to match possible alignment required by CPUArchState,
+ * and eliminate a hole between CPUState and CPUArchState within ArchCPU.
*/
struct CPUState {
/*< private >*/
@@ -359,9 +500,6 @@
AddressSpace *as;
MemoryRegion *memory;
- CPUArchState *env_ptr;
- IcountDecr *icount_decr_ptr;
-
CPUJumpCache *tb_jmp_cache;
struct GDBRegisterState *gdb_regs;
@@ -405,7 +543,6 @@
int cluster_index;
uint32_t tcg_cflags;
uint32_t halted;
- uint32_t can_do_io;
int32_t exception_index;
AccelCPUState *accel;
@@ -430,8 +567,24 @@
/* track IOMMUs whose translations we've cached in the TCG TLB */
GArray *iommu_notifiers;
+
+ /*
+ * MUST BE LAST in order to minimize the displacement to CPUArchState.
+ */
+ char neg_align[-sizeof(CPUNegativeOffsetState) % 16] QEMU_ALIGNED(16);
+ CPUNegativeOffsetState neg;
};
+/* Validate placement of CPUNegativeOffsetState. */
+QEMU_BUILD_BUG_ON(offsetof(CPUState, neg) !=
+ sizeof(CPUState) - sizeof(CPUNegativeOffsetState));
+
+static inline CPUArchState *cpu_env(CPUState *cpu)
+{
+ /* We validate that CPUArchState follows CPUState in cpu-all.h. */
+ return (CPUArchState *)(cpu + 1);
+}
+
typedef QTAILQ_HEAD(CPUTailQ, CPUState) CPUTailQ;
extern CPUTailQ cpus;
diff --git a/include/hw/pci-host/q35.h b/include/hw/pci-host/q35.h
index 1d98bbf..bafcbe6 100644
--- a/include/hw/pci-host/q35.h
+++ b/include/hw/pci-host/q35.h
@@ -54,7 +54,6 @@
uint64_t below_4g_mem_size;
uint64_t above_4g_mem_size;
uint64_t pci_hole64_size;
- uint32_t short_root_bus;
uint16_t ext_tseg_mbytes;
};
diff --git a/include/hw/ppc/fdt.h b/include/hw/ppc/fdt.h
index a8cd850..b56ac2a 100644
--- a/include/hw/ppc/fdt.h
+++ b/include/hw/ppc/fdt.h
@@ -15,10 +15,10 @@
#define _FDT(exp) \
do { \
- int ret = (exp); \
- if (ret < 0) { \
- error_report("error creating device tree: %s: %s", \
- #exp, fdt_strerror(ret)); \
+ int _ret = (exp); \
+ if (_ret < 0) { \
+ error_report("error creating device tree: %s: %s", \
+ #exp, fdt_strerror(_ret)); \
exit(1); \
} \
} while (0)
diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h
index e4db910..1a31fb7 100644
--- a/include/migration/vmstate.h
+++ b/include/migration/vmstate.h
@@ -1196,9 +1196,11 @@
void *opaque, int version_id);
int vmstate_save_state(QEMUFile *f, const VMStateDescription *vmsd,
void *opaque, JSONWriter *vmdesc);
+int vmstate_save_state_with_err(QEMUFile *f, const VMStateDescription *vmsd,
+ void *opaque, JSONWriter *vmdesc, Error **errp);
int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd,
void *opaque, JSONWriter *vmdesc,
- int version_id);
+ int version_id, Error **errp);
bool vmstate_save_needed(const VMStateDescription *vmsd, void *opaque);
diff --git a/include/qapi/qmp/qobject.h b/include/qapi/qmp/qobject.h
index 9003b71..89b97d8 100644
--- a/include/qapi/qmp/qobject.h
+++ b/include/qapi/qmp/qobject.h
@@ -45,10 +45,16 @@
struct QObjectBase_ base;
};
-#define QOBJECT(obj) ({ \
+/*
+ * Preprocessor sorcery ahead: use a different identifier for the
+ * local variable in each expansion, so we can nest macro calls
+ * without shadowing variables.
+ */
+#define QOBJECT_INTERNAL(obj, _obj) ({ \
typeof(obj) _obj = (obj); \
- _obj ? container_of(&(_obj)->base, QObject, base) : NULL; \
+ _obj ? container_of(&_obj->base, QObject, base) : NULL; \
})
+#define QOBJECT(obj) QOBJECT_INTERNAL((obj), MAKE_IDENTFIER(_obj))
/* Required for qobject_to() */
#define QTYPE_CAST_TO_QNull QTYPE_QNULL
diff --git a/include/qemu/accel.h b/include/qemu/accel.h
index e84db2e..972a849 100644
--- a/include/qemu/accel.h
+++ b/include/qemu/accel.h
@@ -43,6 +43,8 @@
bool (*has_memory)(MachineState *ms, AddressSpace *as,
hwaddr start_addr, hwaddr size);
#endif
+ bool (*cpu_common_realize)(CPUState *cpu, Error **errp);
+ void (*cpu_common_unrealize)(CPUState *cpu);
/* gdbstub related hooks */
int (*gdbstub_supported_sstep_flags)(void);
@@ -90,11 +92,17 @@
void accel_cpu_instance_init(CPUState *cpu);
/**
- * accel_cpu_realizefn:
+ * accel_cpu_common_realize:
* @cpu: The CPU that needs to call accel-specific cpu realization.
* @errp: currently unused.
*/
-bool accel_cpu_realizefn(CPUState *cpu, Error **errp);
+bool accel_cpu_common_realize(CPUState *cpu, Error **errp);
+
+/**
+ * accel_cpu_common_unrealize:
+ * @cpu: The CPU that needs to call accel-specific cpu unrealization.
+ */
+void accel_cpu_common_unrealize(CPUState *cpu);
/**
* accel_supported_gdbstub_sstep_flags:
diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
index d95612f..f1d3d17 100644
--- a/include/qemu/atomic.h
+++ b/include/qemu/atomic.h
@@ -157,13 +157,20 @@
smp_read_barrier_depends();
#endif
-#define qatomic_rcu_read(ptr) \
- ({ \
+/*
+ * Preprocessor sorcery ahead: use a different identifier for the
+ * local variable in each expansion, so we can nest macro calls
+ * without shadowing variables.
+ */
+#define qatomic_rcu_read_internal(ptr, _val) \
+ ({ \
qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
- typeof_strip_qual(*ptr) _val; \
- qatomic_rcu_read__nocheck(ptr, &_val); \
- _val; \
+ typeof_strip_qual(*ptr) _val; \
+ qatomic_rcu_read__nocheck(ptr, &_val); \
+ _val; \
})
+#define qatomic_rcu_read(ptr) \
+ qatomic_rcu_read_internal((ptr), MAKE_IDENTFIER(_val))
#define qatomic_rcu_set(ptr, i) do { \
qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
diff --git a/include/qemu/compiler.h b/include/qemu/compiler.h
index a309f90..1109482 100644
--- a/include/qemu/compiler.h
+++ b/include/qemu/compiler.h
@@ -37,6 +37,9 @@
#define tostring(s) #s
#endif
+/* Expands into an identifier stemN, where N is another number each time */
+#define MAKE_IDENTFIER(stem) glue(stem, __COUNTER__)
+
#ifndef likely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
@@ -197,4 +200,16 @@
#define BUILTIN_SUBCLL_BROKEN
#endif
+#if __has_attribute(annotate)
+#define QEMU_ANNOTATE(x) __attribute__((annotate(x)))
+#else
+#define QEMU_ANNOTATE(x)
+#endif
+
+#if __has_attribute(used)
+# define QEMU_USED __attribute__((used))
+#else
+# define QEMU_USED
+#endif
+
#endif /* COMPILER_H */
diff --git a/include/qemu/osdep.h b/include/qemu/osdep.h
index 2897720..475a1c6 100644
--- a/include/qemu/osdep.h
+++ b/include/qemu/osdep.h
@@ -27,6 +27,10 @@
#ifndef QEMU_OSDEP_H
#define QEMU_OSDEP_H
+#if !defined _FORTIFY_SOURCE && defined __OPTIMIZE__ && __OPTIMIZE__ && defined __linux__
+# define _FORTIFY_SOURCE 2
+#endif
+
#include "config-host.h"
#ifdef NEED_CPU_H
#include CONFIG_TARGET
@@ -185,7 +189,7 @@
* }
*/
#ifdef __clang__
-#define coroutine_fn __attribute__((__annotate__("coroutine_fn")))
+#define coroutine_fn QEMU_ANNOTATE("coroutine_fn")
#else
#define coroutine_fn
#endif
@@ -195,7 +199,7 @@
* but can handle running in non-coroutine context too.
*/
#ifdef __clang__
-#define coroutine_mixed_fn __attribute__((__annotate__("coroutine_mixed_fn")))
+#define coroutine_mixed_fn QEMU_ANNOTATE("coroutine_mixed_fn")
#else
#define coroutine_mixed_fn
#endif
@@ -224,7 +228,7 @@
* }
*/
#ifdef __clang__
-#define no_coroutine_fn __attribute__((__annotate__("no_coroutine_fn")))
+#define no_coroutine_fn QEMU_ANNOTATE("no_coroutine_fn")
#else
#define no_coroutine_fn
#endif
@@ -383,19 +387,28 @@
* determined by the pre-processor instead of the compiler, you'll
* have to open-code it. Sadly, Coverity is severely confused by the
* constant variants, so we have to dumb things down there.
+ *
+ * Preprocessor sorcery ahead: use different identifiers for the local
+ * variables in each expansion, so we can nest macro calls without
+ * shadowing variables.
*/
-#undef MIN
-#define MIN(a, b) \
+#define MIN_INTERNAL(a, b, _a, _b) \
({ \
typeof(1 ? (a) : (b)) _a = (a), _b = (b); \
_a < _b ? _a : _b; \
})
-#undef MAX
-#define MAX(a, b) \
+#undef MIN
+#define MIN(a, b) \
+ MIN_INTERNAL((a), (b), MAKE_IDENTFIER(_a), MAKE_IDENTFIER(_b))
+
+#define MAX_INTERNAL(a, b, _a, _b) \
({ \
typeof(1 ? (a) : (b)) _a = (a), _b = (b); \
_a > _b ? _a : _b; \
})
+#undef MAX
+#define MAX(a, b) \
+ MAX_INTERNAL((a), (b), MAKE_IDENTFIER(_a), MAKE_IDENTFIER(_b))
#ifdef __COVERITY__
# define MIN_CONST(a, b) ((a) < (b) ? (a) : (b))
@@ -416,14 +429,18 @@
/*
* Minimum function that returns zero only if both values are zero.
* Intended for use with unsigned values only.
+ *
+ * Preprocessor sorcery ahead: use different identifiers for the local
+ * variables in each expansion, so we can nest macro calls without
+ * shadowing variables.
*/
-#ifndef MIN_NON_ZERO
-#define MIN_NON_ZERO(a, b) \
+#define MIN_NON_ZERO_INTERNAL(a, b, _a, _b) \
({ \
typeof(1 ? (a) : (b)) _a = (a), _b = (b); \
_a == 0 ? _b : (_b == 0 || _b > _a) ? _a : _b; \
})
-#endif
+#define MIN_NON_ZERO(a, b) \
+ MIN_NON_ZERO_INTERNAL((a), (b), MAKE_IDENTFIER(_a), MAKE_IDENTFIER(_b))
/*
* Round number down to multiple. Safe when m is not a power of 2 (see
diff --git a/include/sysemu/device_tree.h b/include/sysemu/device_tree.h
index ca5339b..8eab395 100644
--- a/include/sysemu/device_tree.h
+++ b/include/sysemu/device_tree.h
@@ -126,10 +126,8 @@
#define qemu_fdt_setprop_cells(fdt, node_path, property, ...) \
do { \
uint32_t qdt_tmp[] = { __VA_ARGS__ }; \
- int i; \
- \
- for (i = 0; i < ARRAY_SIZE(qdt_tmp); i++) { \
- qdt_tmp[i] = cpu_to_be32(qdt_tmp[i]); \
+ for (unsigned i_ = 0; i_ < ARRAY_SIZE(qdt_tmp); i_++) { \
+ qdt_tmp[i_] = cpu_to_be32(qdt_tmp[i_]); \
} \
qemu_fdt_setprop(fdt, node_path, property, qdt_tmp, \
sizeof(qdt_tmp)); \
diff --git a/include/tcg/startup.h b/include/tcg/startup.h
new file mode 100644
index 0000000..f713057
--- /dev/null
+++ b/include/tcg/startup.h
@@ -0,0 +1,58 @@
+/*
+ * Tiny Code Generator for QEMU: definitions used by runtime startup
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef TCG_STARTUP_H
+#define TCG_STARTUP_H
+
+/**
+ * tcg_init: Initialize the TCG runtime
+ * @tb_size: translation buffer size
+ * @splitwx: use separate rw and rx mappings
+ * @max_cpus: number of vcpus in system mode
+ *
+ * Allocate and initialize TCG resources, especially the JIT buffer.
+ * In user-only mode, @max_cpus is unused.
+ */
+void tcg_init(size_t tb_size, int splitwx, unsigned max_cpus);
+
+/**
+ * tcg_register_thread: Register this thread with the TCG runtime
+ *
+ * All TCG threads except the parent (i.e. the one that called the TCG
+ * accelerator's init_machine() method) must register with this
+ * function before initiating translation.
+ */
+void tcg_register_thread(void);
+
+/**
+ * tcg_prologue_init(): Generate the code for the TCG prologue
+ *
+ * In softmmu this is done automatically as part of the TCG
+ * accelerator's init_machine() method, but for user-mode, the
+ * user-mode code must call this function after it has loaded
+ * the guest binary and the value of guest_base is known.
+ */
+void tcg_prologue_init(void);
+
+#endif
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
index c9c6d77..680ff00 100644
--- a/include/tcg/tcg.h
+++ b/include/tcg/tcg.h
@@ -489,7 +489,6 @@
TCGType addr_type; /* TCG_TYPE_I32 or TCG_TYPE_I64 */
#ifdef CONFIG_SOFTMMU
- int tlb_fast_offset;
int page_mask;
uint8_t page_bits;
uint8_t tlb_dyn_max_bits;
@@ -577,7 +576,7 @@
extern __thread TCGContext *tcg_ctx;
extern const void *tcg_code_gen_epilogue;
extern uintptr_t tcg_splitwx_diff;
-extern TCGv_env cpu_env;
+extern TCGv_env tcg_env;
bool in_code_gen_buffer(const void *p);
@@ -783,9 +782,6 @@
}
}
-void tcg_init(size_t tb_size, int splitwx, unsigned max_cpus);
-void tcg_register_thread(void);
-void tcg_prologue_init(TCGContext *s);
void tcg_func_start(TCGContext *s);
int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start);
diff --git a/include/ui/console.h b/include/ui/console.h
index 28882f1..acb61a7 100644
--- a/include/ui/console.h
+++ b/include/ui/console.h
@@ -422,7 +422,6 @@
bool qemu_console_is_graphic(QemuConsole *con);
bool qemu_console_is_fixedsize(QemuConsole *con);
bool qemu_console_is_gl_blocked(QemuConsole *con);
-bool qemu_console_is_multihead(DeviceState *dev);
char *qemu_console_get_label(QemuConsole *con);
int qemu_console_get_index(QemuConsole *con);
uint32_t qemu_console_get_head(QemuConsole *con);
diff --git a/include/ui/input.h b/include/ui/input.h
index c29a730..24d8e45 100644
--- a/include/ui/input.h
+++ b/include/ui/input.h
@@ -57,7 +57,7 @@
void qemu_input_update_buttons(QemuConsole *src, uint32_t *button_map,
uint32_t button_old, uint32_t button_new);
-bool qemu_input_is_absolute(void);
+bool qemu_input_is_absolute(QemuConsole *con);
int qemu_input_scale_axis(int value,
int min_in, int max_in,
int min_out, int max_out);
diff --git a/include/ui/qemu-pixman.h b/include/ui/qemu-pixman.h
index 51f8709..e587c48 100644
--- a/include/ui/qemu-pixman.h
+++ b/include/ui/qemu-pixman.h
@@ -32,6 +32,8 @@
# define PIXMAN_LE_r8g8b8 PIXMAN_b8g8r8
# define PIXMAN_LE_a8r8g8b8 PIXMAN_b8g8r8a8
# define PIXMAN_LE_x8r8g8b8 PIXMAN_b8g8r8x8
+# define PIXMAN_LE_a8b8g8r8 PIXMAN_r8g8b8a8
+# define PIXMAN_LE_x8b8g8r8 PIXMAN_r8g8b8x8
#else
# define PIXMAN_BE_r8g8b8 PIXMAN_b8g8r8
# define PIXMAN_BE_x8r8g8b8 PIXMAN_b8g8r8x8
@@ -45,6 +47,8 @@
# define PIXMAN_LE_r8g8b8 PIXMAN_r8g8b8
# define PIXMAN_LE_a8r8g8b8 PIXMAN_a8r8g8b8
# define PIXMAN_LE_x8r8g8b8 PIXMAN_x8r8g8b8
+# define PIXMAN_LE_a8b8g8r8 PIXMAN_a8b8g8r8
+# define PIXMAN_LE_x8b8g8r8 PIXMAN_x8b8g8r8
#endif
#define QEMU_PIXMAN_COLOR(r, g, b) \
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index db75cd4..f21e2e0 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -593,7 +593,7 @@
static const char *get_elf_platform(void)
{
- CPUARMState *env = thread_cpu->env_ptr;
+ CPUARMState *env = cpu_env(thread_cpu);
#if TARGET_BIG_ENDIAN
# define END "b"
@@ -4430,7 +4430,7 @@
if (cpu == thread_cpu) {
continue;
}
- fill_thread_info(info, cpu->env_ptr);
+ fill_thread_info(info, cpu_env(cpu));
}
}
diff --git a/linux-user/exit.c b/linux-user/exit.c
index 3017d28..5026631 100644
--- a/linux-user/exit.c
+++ b/linux-user/exit.c
@@ -22,9 +22,6 @@
#include "qemu.h"
#include "user-internals.h"
#include "qemu/plugin.h"
-#ifdef CONFIG_GPROF
-#include <sys/gmon.h>
-#endif
#ifdef CONFIG_GCOV
extern void __gcov_dump(void);
@@ -32,9 +29,6 @@
void preexit_cleanup(CPUArchState *env, int code)
{
-#ifdef CONFIG_GPROF
- _mcleanup();
-#endif
#ifdef CONFIG_GCOV
__gcov_dump();
#endif
diff --git a/linux-user/hppa/signal.c b/linux-user/hppa/signal.c
index bda6e54..ec5f541 100644
--- a/linux-user/hppa/signal.c
+++ b/linux-user/hppa/signal.c
@@ -25,7 +25,7 @@
struct target_sigcontext {
abi_ulong sc_flags;
abi_ulong sc_gr[32];
- uint64_t sc_fr[32];
+ abi_ullong sc_fr[32];
abi_ulong sc_iasq[2];
abi_ulong sc_iaoq[2];
abi_ulong sc_sar;
diff --git a/linux-user/i386/cpu_loop.c b/linux-user/i386/cpu_loop.c
index ef2dcb3..42ecb4b 100644
--- a/linux-user/i386/cpu_loop.c
+++ b/linux-user/i386/cpu_loop.c
@@ -323,7 +323,7 @@
static void target_cpu_free(void *obj)
{
- CPUArchState *env = ((CPUState *)obj)->env_ptr;
+ CPUArchState *env = cpu_env(obj);
target_munmap(env->gdt.base, sizeof(uint64_t) * TARGET_GDT_ENTRIES);
g_free(obj);
}
diff --git a/linux-user/main.c b/linux-user/main.c
index 96be354..0c23584 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -41,7 +41,7 @@
#include "exec/exec-all.h"
#include "exec/gdbstub.h"
#include "gdbstub/user.h"
-#include "tcg/tcg.h"
+#include "tcg/startup.h"
#include "qemu/timer.h"
#include "qemu/envlist.h"
#include "qemu/guest-random.h"
@@ -229,7 +229,7 @@
{
CPUState *cpu = env_cpu(env);
CPUState *new_cpu = cpu_create(cpu_type);
- CPUArchState *new_env = new_cpu->env_ptr;
+ CPUArchState *new_env = cpu_env(new_cpu);
CPUBreakpoint *bp;
/* Reset non arch specific state */
@@ -794,7 +794,7 @@
ac->init_machine(NULL);
}
cpu = cpu_create(cpu_type);
- env = cpu->env_ptr;
+ env = cpu_env(cpu);
cpu_reset(cpu);
thread_cpu = cpu;
@@ -994,7 +994,7 @@
/* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
generating the prologue until now so that the prologue can take
the real value of GUEST_BASE into account. */
- tcg_prologue_init(tcg_ctx);
+ tcg_prologue_init();
target_cpu_copy_regs(env, regs);
diff --git a/linux-user/signal.c b/linux-user/signal.c
index 748a98f..a67ab47 100644
--- a/linux-user/signal.c
+++ b/linux-user/signal.c
@@ -588,11 +588,6 @@
act.sa_flags = SA_SIGINFO;
act.sa_sigaction = host_signal_handler;
for(i = 1; i <= TARGET_NSIG; i++) {
-#ifdef CONFIG_GPROF
- if (i == TARGET_SIGPROF) {
- continue;
- }
-#endif
host_sig = target_to_host_signal(i);
sigaction(host_sig, NULL, &oact);
if (oact.sa_sigaction == (void *)SIG_IGN) {
@@ -618,7 +613,7 @@
void force_sig(int sig)
{
CPUState *cpu = thread_cpu;
- CPUArchState *env = cpu->env_ptr;
+ CPUArchState *env = cpu_env(cpu);
target_siginfo_t info = {};
info.si_signo = sig;
@@ -636,7 +631,7 @@
void force_sig_fault(int sig, int code, abi_ulong addr)
{
CPUState *cpu = thread_cpu;
- CPUArchState *env = cpu->env_ptr;
+ CPUArchState *env = cpu_env(cpu);
target_siginfo_t info = {};
info.si_signo = sig;
@@ -695,10 +690,9 @@
/* abort execution with signal */
static G_NORETURN
-void dump_core_and_abort(CPUArchState *cpu_env, int target_sig)
+void dump_core_and_abort(CPUArchState *env, int target_sig)
{
- CPUState *cpu = thread_cpu;
- CPUArchState *env = cpu->env_ptr;
+ CPUState *cpu = env_cpu(env);
TaskState *ts = (TaskState *)cpu->opaque;
int host_sig, core_dumped = 0;
struct sigaction act;
@@ -724,7 +718,7 @@
target_sig, strsignal(host_sig), "core dumped" );
}
- preexit_cleanup(cpu_env, 128 + target_sig);
+ preexit_cleanup(env, 128 + target_sig);
/* The proper exit code for dying from an uncaught signal is
* -<signal>. The kernel doesn't allow exit() or _exit() to pass
@@ -783,8 +777,8 @@
static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
{
- CPUArchState *env = thread_cpu->env_ptr;
- CPUState *cpu = env_cpu(env);
+ CPUState *cpu = thread_cpu;
+ CPUArchState *env = cpu_env(cpu);
TaskState *ts = cpu->opaque;
target_siginfo_t tinfo;
host_sigcontext *uc = puc;
diff --git a/linux-user/strace.c b/linux-user/strace.c
index e0ab804..cf26e55 100644
--- a/linux-user/strace.c
+++ b/linux-user/strace.c
@@ -367,7 +367,6 @@
switch (sa_family) {
case AF_UNIX: {
struct target_sockaddr_un *un = (struct target_sockaddr_un *)sa;
- int i;
qemu_log("{sun_family=AF_UNIX,sun_path=\"");
for (i = 0; i < addrlen -
offsetof(struct target_sockaddr_un, sun_path) &&
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index 3521a2d..c6ffadd 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -23,6 +23,7 @@
#include "qemu/memfd.h"
#include "qemu/queue.h"
#include "qemu/plugin.h"
+#include "tcg/startup.h"
#include "target_mman.h"
#include <elf.h>
#include <endian.h>
@@ -141,7 +142,6 @@
#include "special-errno.h"
#include "qapi/error.h"
#include "fd-trans.h"
-#include "tcg/tcg.h"
#include "cpu_loop-common.h"
#ifndef CLONE_IO
diff --git a/meson.build b/meson.build
index 5139db2..3bb64b5 100644
--- a/meson.build
+++ b/meson.build
@@ -254,11 +254,6 @@
qemu_common_flags = ['-march=i486'] + qemu_common_flags
endif
-if get_option('gprof')
- qemu_common_flags += ['-p']
- qemu_ldflags += ['-p']
-endif
-
if get_option('prefer_static')
qemu_ldflags += get_option('b_pie') ? '-static-pie' : '-static'
endif
@@ -479,16 +474,6 @@
qemu_cxxflags = ['-D__STDC_LIMIT_MACROS', '-D__STDC_CONSTANT_MACROS', '-D__STDC_FORMAT_MACROS'] + qemu_cflags
endif
-# clang does not support glibc + FORTIFY_SOURCE (is it still true?)
-if get_option('optimization') != '0' and targetos == 'linux'
- if cc.get_id() == 'gcc'
- qemu_cflags += ['-U_FORTIFY_SOURCE', '-D_FORTIFY_SOURCE=2']
- endif
- if 'cpp' in all_languages and cxx.get_id() == 'gcc'
- qemu_cxxflags += ['-U_FORTIFY_SOURCE', '-D_FORTIFY_SOURCE=2']
- endif
-endif
-
add_project_arguments(qemu_cflags, native: false, language: 'c')
add_project_arguments(cc.get_supported_arguments(warn_flags), native: false, language: 'c')
if 'cpp' in all_languages
@@ -2214,7 +2199,6 @@
config_host_data.set('CONFIG_DEBUG_MUTEX', get_option('debug_mutex'))
config_host_data.set('CONFIG_DEBUG_STACK_USAGE', get_option('debug_stack_usage'))
config_host_data.set('CONFIG_DEBUG_TCG', get_option('debug_tcg'))
-config_host_data.set('CONFIG_GPROF', get_option('gprof'))
config_host_data.set('CONFIG_LIVE_BLOCK_MIGRATION', get_option('live_block_migration').allowed())
config_host_data.set('CONFIG_QOM_CAST_DEBUG', get_option('qom_cast_debug'))
config_host_data.set('CONFIG_REPLICATION', get_option('replication').allowed())
@@ -3070,6 +3054,9 @@
endif
if fdt_opt in ['enabled', 'auto', 'system']
+ if get_option('wrap_mode') == 'nodownload'
+ fdt_opt = 'system'
+ endif
fdt = cc.find_library('fdt', required: fdt_opt == 'system')
if fdt.found() and cc.links('''
#include <libfdt.h>
@@ -3177,7 +3164,6 @@
input: files(d[0]),
output: d[1],
capture: true,
- build_by_default: true, # to be removed when added to a target
command: [hxtool, '-h', '@INPUT0@'])
endforeach
genh += hxdep
@@ -3363,12 +3349,15 @@
qom_ss = qom_ss.apply(config_targetos, strict: false)
libqom = static_library('qom', qom_ss.sources() + genh,
dependencies: [qom_ss.dependencies()],
- name_suffix: 'fa')
+ name_suffix: 'fa',
+ build_by_default: false)
qom = declare_dependency(link_whole: libqom)
event_loop_base = files('event-loop-base.c')
-event_loop_base = static_library('event-loop-base', sources: event_loop_base + genh,
- build_by_default: true)
+event_loop_base = static_library('event-loop-base',
+ sources: event_loop_base + genh,
+ name_suffix: 'fa',
+ build_by_default: false)
event_loop_base = declare_dependency(link_whole: event_loop_base,
dependencies: [qom])
@@ -3377,6 +3366,7 @@
util_ss.add_all(trace_ss)
util_ss = util_ss.apply(config_all, strict: false)
libqemuutil = static_library('qemuutil',
+ build_by_default: false,
sources: util_ss.sources() + stub_ss.sources() + genh,
dependencies: [util_ss.dependencies(), libm, threads, glib, socket, malloc, pixman])
qemuutil = declare_dependency(link_with: libqemuutil,
@@ -3425,8 +3415,8 @@
system_ss.add(when: 'CONFIG_WIN32', if_true: [files('os-win32.c')])
endif
-common_ss.add(files('cpus-common.c'))
-specific_ss.add(files('cpu.c'))
+common_ss.add(files('cpu-common.c'))
+specific_ss.add(files('cpu-target.c'))
subdir('softmmu')
@@ -3448,7 +3438,7 @@
pagevary = declare_dependency(link_with: pagevary)
endif
common_ss.add(pagevary)
-specific_ss.add(files('page-vary.c'))
+specific_ss.add(files('page-vary-target.c'))
subdir('backends')
subdir('disas')
@@ -4122,12 +4112,6 @@
summary_info += {'avx2 optimization': config_host_data.get('CONFIG_AVX2_OPT')}
summary_info += {'avx512bw optimization': config_host_data.get('CONFIG_AVX512BW_OPT')}
summary_info += {'avx512f optimization': config_host_data.get('CONFIG_AVX512F_OPT')}
-if get_option('gprof')
- gprof_info = 'YES (deprecated)'
-else
- gprof_info = get_option('gprof')
-endif
-summary_info += {'gprof': gprof_info}
summary_info += {'gcov': get_option('b_coverage')}
summary_info += {'thread sanitizer': get_option('tsan')}
summary_info += {'CFI support': get_option('cfi')}
diff --git a/meson_options.txt b/meson_options.txt
index 57e265c..6a17b90 100644
--- a/meson_options.txt
+++ b/meson_options.txt
@@ -348,9 +348,6 @@
description: 'measure coroutine stack usage')
option('qom_cast_debug', type: 'boolean', value: true,
description: 'cast debugging support')
-option('gprof', type: 'boolean', value: false,
- description: 'QEMU profiling with gprof',
- deprecated: true)
option('slirp_smbd', type : 'feature', value : 'auto',
description: 'use smbd (at path --smbd=*) in slirp networking')
diff --git a/migration/block.c b/migration/block.c
index 86c2256..5f93087 100644
--- a/migration/block.c
+++ b/migration/block.c
@@ -440,8 +440,8 @@
/* Can only insert new BDSes now because doing so while iterating block
* devices may end up in a deadlock (iterating the new BDSes, too). */
for (i = 0; i < num_bs; i++) {
- BlkMigDevState *bmds = bmds_bs[i].bmds;
- BlockDriverState *bs = bmds_bs[i].bs;
+ bmds = bmds_bs[i].bmds;
+ bs = bmds_bs[i].bs;
if (bmds) {
ret = blk_insert_bs(bmds->blk, bs, &local_err);
@@ -755,7 +755,7 @@
static int block_save_iterate(QEMUFile *f, void *opaque)
{
int ret;
- uint64_t last_bytes = qemu_file_transferred(f);
+ uint64_t last_bytes = qemu_file_transferred_noflush(f);
trace_migration_block_save("iterate", block_mig_state.submitted,
block_mig_state.transferred);
@@ -807,7 +807,7 @@
}
qemu_put_be64(f, BLK_MIG_FLAG_EOS);
- uint64_t delta_bytes = qemu_file_transferred(f) - last_bytes;
+ uint64_t delta_bytes = qemu_file_transferred_noflush(f) - last_bytes;
return (delta_bytes > 0);
}
diff --git a/migration/file.c b/migration/file.c
new file mode 100644
index 0000000..cf5b1bf
--- /dev/null
+++ b/migration/file.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2021-2023 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/cutils.h"
+#include "qapi/error.h"
+#include "channel.h"
+#include "file.h"
+#include "migration.h"
+#include "io/channel-file.h"
+#include "io/channel-util.h"
+#include "trace.h"
+
+#define OFFSET_OPTION ",offset="
+
+/* Remove the offset option from @filespec and return it in @offsetp. */
+
+static int file_parse_offset(char *filespec, uint64_t *offsetp, Error **errp)
+{
+ char *option = strstr(filespec, OFFSET_OPTION);
+ int ret;
+
+ if (option) {
+ *option = 0;
+ option += sizeof(OFFSET_OPTION) - 1;
+ ret = qemu_strtosz(option, NULL, offsetp);
+ if (ret) {
+ error_setg_errno(errp, -ret, "file URI has bad offset %s", option);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+void file_start_outgoing_migration(MigrationState *s, const char *filespec,
+ Error **errp)
+{
+ g_autofree char *filename = g_strdup(filespec);
+ g_autoptr(QIOChannelFile) fioc = NULL;
+ uint64_t offset = 0;
+ QIOChannel *ioc;
+
+ trace_migration_file_outgoing(filename);
+
+ if (file_parse_offset(filename, &offset, errp)) {
+ return;
+ }
+
+ fioc = qio_channel_file_new_path(filename, O_CREAT | O_WRONLY | O_TRUNC,
+ 0600, errp);
+ if (!fioc) {
+ return;
+ }
+
+ ioc = QIO_CHANNEL(fioc);
+ if (offset && qio_channel_io_seek(ioc, offset, SEEK_SET, errp) < 0) {
+ return;
+ }
+ qio_channel_set_name(ioc, "migration-file-outgoing");
+ migration_channel_connect(s, ioc, NULL, NULL);
+}
+
+static gboolean file_accept_incoming_migration(QIOChannel *ioc,
+ GIOCondition condition,
+ gpointer opaque)
+{
+ migration_channel_process_incoming(ioc);
+ object_unref(OBJECT(ioc));
+ return G_SOURCE_REMOVE;
+}
+
+void file_start_incoming_migration(const char *filespec, Error **errp)
+{
+ g_autofree char *filename = g_strdup(filespec);
+ QIOChannelFile *fioc = NULL;
+ uint64_t offset = 0;
+ QIOChannel *ioc;
+
+ trace_migration_file_incoming(filename);
+
+ if (file_parse_offset(filename, &offset, errp)) {
+ return;
+ }
+
+ fioc = qio_channel_file_new_path(filename, O_RDONLY, 0, errp);
+ if (!fioc) {
+ return;
+ }
+
+ ioc = QIO_CHANNEL(fioc);
+ if (offset && qio_channel_io_seek(ioc, offset, SEEK_SET, errp) < 0) {
+ return;
+ }
+ qio_channel_set_name(QIO_CHANNEL(ioc), "migration-file-incoming");
+ qio_channel_add_watch_full(ioc, G_IO_IN,
+ file_accept_incoming_migration,
+ NULL, NULL,
+ g_main_context_get_thread_default());
+}
diff --git a/migration/file.h b/migration/file.h
new file mode 100644
index 0000000..90fa484
--- /dev/null
+++ b/migration/file.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2021-2023 Oracle and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_MIGRATION_FILE_H
+#define QEMU_MIGRATION_FILE_H
+void file_start_incoming_migration(const char *filename, Error **errp);
+
+void file_start_outgoing_migration(MigrationState *s, const char *filename,
+ Error **errp);
+#endif
diff --git a/migration/meson.build b/migration/meson.build
index 1ae2852..92b1cc4 100644
--- a/migration/meson.build
+++ b/migration/meson.build
@@ -16,6 +16,7 @@
'dirtyrate.c',
'exec.c',
'fd.c',
+ 'file.c',
'global_state.c',
'migration-hmp-cmds.c',
'migration.c',
diff --git a/migration/migration-stats.c b/migration/migration-stats.c
index 095d6d7..84e11e6 100644
--- a/migration/migration-stats.c
+++ b/migration/migration-stats.c
@@ -61,8 +61,9 @@
uint64_t migration_transferred_bytes(QEMUFile *f)
{
uint64_t multifd = stat64_get(&mig_stats.multifd_bytes);
+ uint64_t rdma = stat64_get(&mig_stats.rdma_bytes);
uint64_t qemu_file = qemu_file_transferred(f);
- trace_migration_transferred_bytes(qemu_file, multifd);
- return qemu_file + multifd;
+ trace_migration_transferred_bytes(qemu_file, multifd, rdma);
+ return qemu_file + multifd + rdma;
}
diff --git a/migration/migration-stats.h b/migration/migration-stats.h
index ac2260e..2358caa 100644
--- a/migration/migration-stats.h
+++ b/migration/migration-stats.h
@@ -90,6 +90,10 @@
*/
Stat64 rate_limit_max;
/*
+ * Number of bytes sent through RDMA.
+ */
+ Stat64 rdma_bytes;
+ /*
* Total number of bytes transferred.
*/
Stat64 transferred;
diff --git a/migration/migration.c b/migration/migration.c
index d61e572..585d3c8 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -20,6 +20,7 @@
#include "migration/blocker.h"
#include "exec.h"
#include "fd.h"
+#include "file.h"
#include "socket.h"
#include "sysemu/runstate.h"
#include "sysemu/sysemu.h"
@@ -98,6 +99,7 @@
int *current_active_state,
int new_state);
static void migrate_fd_cancel(MigrationState *s);
+static int await_return_path_close_on_source(MigrationState *s);
static bool migration_needs_multiple_sockets(void)
{
@@ -153,6 +155,7 @@
qemu_sem_init(¤t_incoming->postcopy_qemufile_dst_done, 0);
qemu_mutex_init(¤t_incoming->page_request_mutex);
+ qemu_cond_init(¤t_incoming->page_request_cond);
current_incoming->page_requested = g_tree_new(page_request_addr_cmp);
migration_object_check(current_migration, &error_fatal);
@@ -367,7 +370,7 @@
* things like g_tree_lookup() will return TRUE (1) when found.
*/
g_tree_insert(mis->page_requested, aligned, (gpointer)1);
- mis->page_requested_count++;
+ qatomic_inc(&mis->page_requested_count);
trace_postcopy_page_req_add(aligned, mis->page_requested_count);
}
}
@@ -447,6 +450,8 @@
exec_start_incoming_migration(p, errp);
} else if (strstart(uri, "fd:", &p)) {
fd_start_incoming_migration(p, errp);
+ } else if (strstart(uri, "file:", &p)) {
+ file_start_incoming_migration(p, errp);
} else {
error_setg(errp, "unknown migration protocol: %s", uri);
}
@@ -1177,11 +1182,11 @@
qemu_fclose(tmp);
}
- if (s->postcopy_qemufile_src) {
- migration_ioc_unregister_yank_from_file(s->postcopy_qemufile_src);
- qemu_fclose(s->postcopy_qemufile_src);
- s->postcopy_qemufile_src = NULL;
- }
+ /*
+ * We already cleaned up to_dst_file, so errors from the return
+ * path might be due to that, ignore them.
+ */
+ await_return_path_close_on_source(s);
assert(!migration_is_active(s));
@@ -1245,7 +1250,7 @@
static void migrate_fd_cancel(MigrationState *s)
{
int old_state ;
- QEMUFile *f = migrate_get_current()->to_dst_file;
+
trace_migrate_fd_cancel();
WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) {
@@ -1271,11 +1276,13 @@
* If we're unlucky the migration code might be stuck somewhere in a
* send/write while the network has failed and is waiting to timeout;
* if we've got shutdown(2) available then we can force it to quit.
- * The outgoing qemu file gets closed in migrate_fd_cleanup that is
- * called in a bh, so there is no race against this cancel.
*/
- if (s->state == MIGRATION_STATUS_CANCELLING && f) {
- qemu_file_shutdown(f);
+ if (s->state == MIGRATION_STATUS_CANCELLING) {
+ WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) {
+ if (s->to_dst_file) {
+ qemu_file_shutdown(s->to_dst_file);
+ }
+ }
}
if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) {
Error *local_err = NULL;
@@ -1535,12 +1542,14 @@
{
MigrationState *ms = migrate_get_current();
MigrationIncomingState *mis = migration_incoming_get_current();
- int ret;
+ int ret = 0;
if (ms->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
/* Source side, during postcopy */
qemu_mutex_lock(&ms->qemu_file_lock);
- ret = qemu_file_shutdown(ms->to_dst_file);
+ if (ms->to_dst_file) {
+ ret = qemu_file_shutdown(ms->to_dst_file);
+ }
qemu_mutex_unlock(&ms->qemu_file_lock);
if (ret) {
error_setg(errp, "Failed to pause source migration");
@@ -1696,16 +1705,14 @@
exec_start_outgoing_migration(s, p, &local_err);
} else if (strstart(uri, "fd:", &p)) {
fd_start_outgoing_migration(s, p, &local_err);
+ } else if (strstart(uri, "file:", &p)) {
+ file_start_outgoing_migration(s, p, &local_err);
} else {
- if (!resume_requested) {
- yank_unregister_instance(MIGRATION_YANK_INSTANCE);
- }
error_setg(&local_err, QERR_INVALID_PARAMETER_VALUE, "uri",
"a valid migration protocol");
migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
MIGRATION_STATUS_FAILED);
block_cleanup_parameters();
- return;
}
if (local_err) {
@@ -1788,18 +1795,6 @@
}
}
-/* Return true to retry, false to quit */
-static bool postcopy_pause_return_path_thread(MigrationState *s)
-{
- trace_postcopy_pause_return_path();
-
- qemu_sem_wait(&s->postcopy_pause_rp_sem);
-
- trace_postcopy_pause_return_path_continued();
-
- return true;
-}
-
static int migrate_handle_rp_recv_bitmap(MigrationState *s, char *block_name)
{
RAMBlock *block = qemu_ram_block_by_name(block_name);
@@ -1883,7 +1878,6 @@
trace_source_return_path_thread_entry();
rcu_register_thread();
-retry:
while (!ms->rp_state.error && !qemu_file_get_error(rp) &&
migration_is_setup_or_active(ms->state)) {
trace_source_return_path_thread_loop_top();
@@ -2005,38 +1999,17 @@
}
out:
- res = qemu_file_get_error(rp);
- if (res) {
- if (res && migration_in_postcopy()) {
- /*
- * Maybe there is something we can do: it looks like a
- * network down issue, and we pause for a recovery.
- */
- migration_release_dst_files(ms);
- rp = NULL;
- if (postcopy_pause_return_path_thread(ms)) {
- /*
- * Reload rp, reset the rest. Referencing it is safe since
- * it's reset only by us above, or when migration completes
- */
- rp = ms->rp_state.from_dst_file;
- ms->rp_state.error = false;
- goto retry;
- }
- }
-
+ if (qemu_file_get_error(rp)) {
trace_source_return_path_thread_bad_end();
mark_source_rp_bad(ms);
}
trace_source_return_path_thread_end();
- migration_release_dst_files(ms);
rcu_unregister_thread();
return NULL;
}
-static int open_return_path_on_source(MigrationState *ms,
- bool create_thread)
+static int open_return_path_on_source(MigrationState *ms)
{
ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file);
if (!ms->rp_state.from_dst_file) {
@@ -2045,11 +2018,6 @@
trace_open_return_path_on_source();
- if (!create_thread) {
- /* We're done */
- return 0;
- }
-
qemu_thread_create(&ms->rp_state.rp_thread, "return path",
source_return_path_thread, ms, QEMU_THREAD_JOINABLE);
ms->rp_state.rp_thread_created = true;
@@ -2062,24 +2030,39 @@
/* Returns 0 if the RP was ok, otherwise there was an error on the RP */
static int await_return_path_close_on_source(MigrationState *ms)
{
- /*
- * If this is a normal exit then the destination will send a SHUT and the
- * rp_thread will exit, however if there's an error we need to cause
- * it to exit.
- */
- if (qemu_file_get_error(ms->to_dst_file) && ms->rp_state.from_dst_file) {
- /*
- * shutdown(2), if we have it, will cause it to unblock if it's stuck
- * waiting for the destination.
- */
- qemu_file_shutdown(ms->rp_state.from_dst_file);
- mark_source_rp_bad(ms);
+ int ret;
+
+ if (!ms->rp_state.rp_thread_created) {
+ return 0;
}
+
+ trace_migration_return_path_end_before();
+
+ /*
+ * If this is a normal exit then the destination will send a SHUT
+ * and the rp_thread will exit, however if there's an error we
+ * need to cause it to exit. shutdown(2), if we have it, will
+ * cause it to unblock if it's stuck waiting for the destination.
+ */
+ WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) {
+ if (ms->to_dst_file && ms->rp_state.from_dst_file &&
+ qemu_file_get_error(ms->to_dst_file)) {
+ qemu_file_shutdown(ms->rp_state.from_dst_file);
+ }
+ }
+
trace_await_return_path_close_on_source_joining();
qemu_thread_join(&ms->rp_state.rp_thread);
ms->rp_state.rp_thread_created = false;
trace_await_return_path_close_on_source_close();
- return ms->rp_state.error;
+
+ ret = ms->rp_state.error;
+ ms->rp_state.error = false;
+
+ migration_release_dst_files(ms);
+
+ trace_migration_return_path_end_after(ret);
+ return ret;
}
static inline void
@@ -2375,20 +2358,8 @@
goto fail;
}
- /*
- * If rp was opened we must clean up the thread before
- * cleaning everything else up (since if there are no failures
- * it will wait for the destination to send it's status in
- * a SHUT command).
- */
- if (s->rp_state.rp_thread_created) {
- int rp_error;
- trace_migration_return_path_end_before();
- rp_error = await_return_path_close_on_source(s);
- trace_migration_return_path_end_after(rp_error);
- if (rp_error) {
- goto fail;
- }
+ if (await_return_path_close_on_source(s)) {
+ goto fail;
}
if (qemu_file_get_error(s->to_dst_file)) {
@@ -2565,6 +2536,13 @@
qemu_file_shutdown(file);
qemu_fclose(file);
+ /*
+ * We're already pausing, so ignore any errors on the return
+ * path and just wait for the thread to finish. It will be
+ * re-created when we resume.
+ */
+ await_return_path_close_on_source(s);
+
migrate_set_state(&s->state, s->state,
MIGRATION_STATUS_POSTCOPY_PAUSED);
@@ -2582,12 +2560,6 @@
if (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) {
/* Woken up by a recover procedure. Give it a shot */
- /*
- * Firstly, let's wake up the return path now, with a new
- * return path channel.
- */
- qemu_sem_post(&s->postcopy_pause_rp_sem);
-
/* Do the resume logic */
if (postcopy_do_resume(s) == 0) {
/* Let's continue! */
@@ -3277,7 +3249,7 @@
* QEMU uses the return path.
*/
if (migrate_postcopy_ram() || migrate_return_path()) {
- if (open_return_path_on_source(s, !resume)) {
+ if (open_return_path_on_source(s)) {
error_setg(&local_err, "Unable to open return-path for postcopy");
migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED);
migrate_set_error(s, local_err);
@@ -3341,7 +3313,6 @@
qemu_sem_destroy(&ms->rate_limit_sem);
qemu_sem_destroy(&ms->pause_sem);
qemu_sem_destroy(&ms->postcopy_pause_sem);
- qemu_sem_destroy(&ms->postcopy_pause_rp_sem);
qemu_sem_destroy(&ms->rp_state.rp_sem);
qemu_sem_destroy(&ms->rp_state.rp_pong_acks);
qemu_sem_destroy(&ms->postcopy_qemufile_src_sem);
@@ -3361,7 +3332,6 @@
migrate_params_init(&ms->parameters);
qemu_sem_init(&ms->postcopy_pause_sem, 0);
- qemu_sem_init(&ms->postcopy_pause_rp_sem, 0);
qemu_sem_init(&ms->rp_state.rp_sem, 0);
qemu_sem_init(&ms->rp_state.rp_pong_acks, 0);
qemu_sem_init(&ms->rate_limit_sem, 0);
diff --git a/migration/migration.h b/migration/migration.h
index c390500..972597f 100644
--- a/migration/migration.h
+++ b/migration/migration.h
@@ -196,7 +196,10 @@
/* A tree of pages that we requested to the source VM */
GTree *page_requested;
- /* For debugging purpose only, but would be nice to keep */
+ /*
+ * For postcopy only, count the number of requested page faults that
+ * still haven't been resolved.
+ */
int page_requested_count;
/*
* The mutex helps to maintain the requested pages that we sent to the
@@ -210,6 +213,14 @@
* contains valid information.
*/
QemuMutex page_request_mutex;
+ /*
+ * If postcopy preempt is enabled, there is a chance that the main
+ * thread finished loading its data before the preempt channel has
+ * finished loading the urgent pages. If that happens, the two threads
+ * will use this condvar to synchronize, so the main thread will always
+ * wait until all pages received.
+ */
+ QemuCond page_request_cond;
/*
* Number of devices that have yet to approve switchover. When this reaches
@@ -382,7 +393,6 @@
/* Needed by postcopy-pause state */
QemuSemaphore postcopy_pause_sem;
- QemuSemaphore postcopy_pause_rp_sem;
/*
* Whether we abort the migration if decompression errors are
* detected at the destination. It is left at false for qemu
diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c
index 29aea94..5408e02 100644
--- a/migration/postcopy-ram.c
+++ b/migration/postcopy-ram.c
@@ -599,6 +599,30 @@
if (mis->preempt_thread_status == PREEMPT_THREAD_CREATED) {
/* Notify the fast load thread to quit */
mis->preempt_thread_status = PREEMPT_THREAD_QUIT;
+ /*
+ * Update preempt_thread_status before reading count. Note: mutex
+ * lock only provide ACQUIRE semantic, and it doesn't stops this
+ * write to be reordered after reading the count.
+ */
+ smp_mb();
+ /*
+ * It's possible that the preempt thread is still handling the last
+ * pages to arrive which were requested by guest page faults.
+ * Making sure nothing is left behind by waiting on the condvar if
+ * that unlikely case happened.
+ */
+ WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) {
+ if (qatomic_read(&mis->page_requested_count)) {
+ /*
+ * It is guaranteed to receive a signal later, because the
+ * count>0 now, so it's destined to be decreased to zero
+ * very soon by the preempt thread.
+ */
+ qemu_cond_wait(&mis->page_request_cond,
+ &mis->page_request_mutex);
+ }
+ }
+ /* Notify the fast load thread to quit */
if (mis->postcopy_qemufile_dst) {
qemu_file_shutdown(mis->postcopy_qemufile_dst);
}
@@ -1277,8 +1301,20 @@
*/
if (g_tree_lookup(mis->page_requested, host_addr)) {
g_tree_remove(mis->page_requested, host_addr);
- mis->page_requested_count--;
+ int left_pages = qatomic_dec_fetch(&mis->page_requested_count);
+
trace_postcopy_page_req_del(host_addr, mis->page_requested_count);
+ /* Order the update of count and read of preempt status */
+ smp_mb();
+ if (mis->preempt_thread_status == PREEMPT_THREAD_QUIT &&
+ left_pages == 0) {
+ /*
+ * This probably means the main thread is waiting for us.
+ * Notify that we've finished receiving the last requested
+ * page.
+ */
+ qemu_cond_signal(&mis->page_request_cond);
+ }
}
qemu_mutex_unlock(&mis->page_request_mutex);
mark_postcopy_blocktime_end((uintptr_t)host_addr);
diff --git a/migration/qemu-file.c b/migration/qemu-file.c
index 19c33c9..5e8207d 100644
--- a/migration/qemu-file.c
+++ b/migration/qemu-file.c
@@ -322,23 +322,20 @@
}
}
-size_t ram_control_save_page(QEMUFile *f, ram_addr_t block_offset,
- ram_addr_t offset, size_t size,
- uint64_t *bytes_sent)
+int ram_control_save_page(QEMUFile *f, ram_addr_t block_offset,
+ ram_addr_t offset, size_t size)
{
if (f->hooks && f->hooks->save_page) {
- int ret = f->hooks->save_page(f, block_offset,
- offset, size, bytes_sent);
-
+ int ret = f->hooks->save_page(f, block_offset, offset, size);
+ /*
+ * RAM_SAVE_CONTROL_* are negative values
+ */
if (ret != RAM_SAVE_CONTROL_DELAYED &&
ret != RAM_SAVE_CONTROL_NOT_SUPP) {
- if (bytes_sent && *bytes_sent > 0) {
- qemu_file_credit_transfer(f, *bytes_sent);
- } else if (ret < 0) {
+ if (ret < 0) {
qemu_file_set_error(f, ret);
}
}
-
return ret;
}
@@ -400,11 +397,6 @@
return len;
}
-void qemu_file_credit_transfer(QEMUFile *f, size_t size)
-{
- f->total_transferred += size;
-}
-
/** Closes the file
*
* Returns negative error value if any error happened on previous operations or
diff --git a/migration/qemu-file.h b/migration/qemu-file.h
index 47015f5..03e718c 100644
--- a/migration/qemu-file.h
+++ b/migration/qemu-file.h
@@ -49,11 +49,10 @@
* This function allows override of where the RAM page
* is saved (such as RDMA, for example.)
*/
-typedef size_t (QEMURamSaveFunc)(QEMUFile *f,
- ram_addr_t block_offset,
- ram_addr_t offset,
- size_t size,
- uint64_t *bytes_sent);
+typedef int (QEMURamSaveFunc)(QEMUFile *f,
+ ram_addr_t block_offset,
+ ram_addr_t offset,
+ size_t size);
typedef struct QEMUFileHooks {
QEMURamHookFunc *before_ram_iterate;
@@ -119,14 +118,6 @@
*/
int coroutine_mixed_fn qemu_peek_byte(QEMUFile *f, int offset);
void qemu_file_skip(QEMUFile *f, int size);
-/*
- * qemu_file_credit_transfer:
- *
- * Report on a number of bytes that have been transferred
- * out of band from the main file object I/O methods. This
- * accounting information tracks the total migration traffic.
- */
-void qemu_file_credit_transfer(QEMUFile *f, size_t size);
int qemu_file_get_error_obj_any(QEMUFile *f1, QEMUFile *f2, Error **errp);
void qemu_file_set_error_obj(QEMUFile *f, int ret, Error *err);
void qemu_file_set_error(QEMUFile *f, int ret);
@@ -150,9 +141,8 @@
#define RAM_SAVE_CONTROL_NOT_SUPP -1000
#define RAM_SAVE_CONTROL_DELAYED -2000
-size_t ram_control_save_page(QEMUFile *f, ram_addr_t block_offset,
- ram_addr_t offset, size_t size,
- uint64_t *bytes_sent);
+int ram_control_save_page(QEMUFile *f, ram_addr_t block_offset,
+ ram_addr_t offset, size_t size);
QIOChannel *qemu_file_get_ioc(QEMUFile *file);
#endif
diff --git a/migration/ram.c b/migration/ram.c
index 9040d66..e4bfd39 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1186,31 +1186,19 @@
static bool control_save_page(PageSearchStatus *pss, RAMBlock *block,
ram_addr_t offset, int *pages)
{
- uint64_t bytes_xmit = 0;
int ret;
- *pages = -1;
ret = ram_control_save_page(pss->pss_channel, block->offset, offset,
- TARGET_PAGE_SIZE, &bytes_xmit);
+ TARGET_PAGE_SIZE);
if (ret == RAM_SAVE_CONTROL_NOT_SUPP) {
return false;
}
- if (bytes_xmit) {
- ram_transferred_add(bytes_xmit);
- *pages = 1;
- }
-
if (ret == RAM_SAVE_CONTROL_DELAYED) {
+ *pages = 1;
return true;
}
-
- if (bytes_xmit > 0) {
- stat64_add(&mig_stats.normal_pages, 1);
- } else if (bytes_xmit == 0) {
- stat64_add(&mig_stats.zero_pages, 1);
- }
-
+ *pages = ret;
return true;
}
@@ -3517,8 +3505,6 @@
* we use the same name 'ram_bitmap' as for migration.
*/
if (ram_bytes_total()) {
- RAMBlock *block;
-
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
block->bmap = bitmap_new(pages);
@@ -3998,12 +3984,12 @@
}
}
if (migrate_ignore_shared()) {
- hwaddr addr = qemu_get_be64(f);
+ hwaddr addr2 = qemu_get_be64(f);
if (migrate_ram_is_ignored(block) &&
- block->mr->addr != addr) {
+ block->mr->addr != addr2) {
error_report("Mismatched GPAs for block %s "
"%" PRId64 "!= %" PRId64,
- id, (uint64_t)addr,
+ id, (uint64_t)addr2,
(uint64_t)block->mr->addr);
ret = -EINVAL;
}
diff --git a/migration/rdma.c b/migration/rdma.c
index a2a3db3..cd5e1af 100644
--- a/migration/rdma.c
+++ b/migration/rdma.c
@@ -1902,9 +1902,11 @@
* by waiting for a READY message.
*/
if (rdma->control_ready_expected) {
- RDMAControlHeader resp;
- ret = qemu_rdma_exchange_get_response(rdma,
- &resp, RDMA_CONTROL_READY, RDMA_WRID_READY);
+ RDMAControlHeader resp_ignored;
+
+ ret = qemu_rdma_exchange_get_response(rdma, &resp_ignored,
+ RDMA_CONTROL_READY,
+ RDMA_WRID_READY);
if (ret < 0) {
return ret;
}
@@ -2027,7 +2029,7 @@
* If we're using dynamic registration on the dest-side, we have to
* send a registration command first.
*/
-static int qemu_rdma_write_one(QEMUFile *f, RDMAContext *rdma,
+static int qemu_rdma_write_one(RDMAContext *rdma,
int current_index, uint64_t current_addr,
uint64_t length)
{
@@ -2122,9 +2124,18 @@
return -EIO;
}
+ /*
+ * TODO: Here we are sending something, but we are not
+ * accounting for anything transferred. The following is wrong:
+ *
+ * stat64_add(&mig_stats.rdma_bytes, sge.length);
+ *
+ * because we are using some kind of compression. I
+ * would think that head.len would be the more similar
+ * thing to a correct value.
+ */
stat64_add(&mig_stats.zero_pages,
sge.length / qemu_target_page_size());
-
return 1;
}
@@ -2232,8 +2243,17 @@
set_bit(chunk, block->transit_bitmap);
stat64_add(&mig_stats.normal_pages, sge.length / qemu_target_page_size());
+ /*
+ * We are adding to transferred the amount of data written, but no
+ * overhead at all. I will asume that RDMA is magicaly and don't
+ * need to transfer (at least) the addresses where it wants to
+ * write the pages. Here it looks like it should be something
+ * like:
+ * sizeof(send_wr) + sge.length
+ * but this being RDMA, who knows.
+ */
+ stat64_add(&mig_stats.rdma_bytes, sge.length);
ram_transferred_add(sge.length);
- qemu_file_credit_transfer(f, sge.length);
rdma->total_writes++;
return 0;
@@ -2245,7 +2265,7 @@
* We support sending out multiple chunks at the same time.
* Not all of them need to get signaled in the completion queue.
*/
-static int qemu_rdma_write_flush(QEMUFile *f, RDMAContext *rdma)
+static int qemu_rdma_write_flush(RDMAContext *rdma)
{
int ret;
@@ -2253,7 +2273,7 @@
return 0;
}
- ret = qemu_rdma_write_one(f, rdma,
+ ret = qemu_rdma_write_one(rdma,
rdma->current_index, rdma->current_addr, rdma->current_length);
if (ret < 0) {
@@ -2326,7 +2346,7 @@
* and only require that a batch gets acknowledged in the completion
* queue instead of each individual chunk.
*/
-static int qemu_rdma_write(QEMUFile *f, RDMAContext *rdma,
+static int qemu_rdma_write(RDMAContext *rdma,
uint64_t block_offset, uint64_t offset,
uint64_t len)
{
@@ -2337,7 +2357,7 @@
/* If we cannot merge it, we flush the current buffer first. */
if (!qemu_rdma_buffer_mergable(rdma, current_addr, len)) {
- ret = qemu_rdma_write_flush(f, rdma);
+ ret = qemu_rdma_write_flush(rdma);
if (ret) {
return ret;
}
@@ -2359,7 +2379,7 @@
/* flush it if buffer is too large */
if (rdma->current_length >= RDMA_MERGE_MAX) {
- return qemu_rdma_write_flush(f, rdma);
+ return qemu_rdma_write_flush(rdma);
}
return 0;
@@ -2780,7 +2800,6 @@
Error **errp)
{
QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(ioc);
- QEMUFile *f = rioc->file;
RDMAContext *rdma;
int ret;
ssize_t done = 0;
@@ -2801,7 +2820,7 @@
* Push out any writes that
* we're queued up for VM's ram.
*/
- ret = qemu_rdma_write_flush(f, rdma);
+ ret = qemu_rdma_write_flush(rdma);
if (ret < 0) {
rdma->error_state = ret;
error_setg(errp, "qemu_rdma_write_flush returned %d", ret);
@@ -2812,7 +2831,7 @@
size_t remaining = iov[i].iov_len;
uint8_t * data = (void *)iov[i].iov_base;
while (remaining) {
- RDMAControlHeader head;
+ RDMAControlHeader head = {};
len = MIN(remaining, RDMA_SEND_INCREMENT);
remaining -= len;
@@ -2940,11 +2959,11 @@
/*
* Block until all the outstanding chunks have been delivered by the hardware.
*/
-static int qemu_rdma_drain_cq(QEMUFile *f, RDMAContext *rdma)
+static int qemu_rdma_drain_cq(RDMAContext *rdma)
{
int ret;
- if (qemu_rdma_write_flush(f, rdma) < 0) {
+ if (qemu_rdma_write_flush(rdma) < 0) {
return -EIO;
}
@@ -3223,13 +3242,12 @@
*
* @size : Number of bytes to transfer
*
- * @bytes_sent : User-specificed pointer to indicate how many bytes were
+ * @pages_sent : User-specificed pointer to indicate how many pages were
* sent. Usually, this will not be more than a few bytes of
* the protocol because most transfers are sent asynchronously.
*/
-static size_t qemu_rdma_save_page(QEMUFile *f,
- ram_addr_t block_offset, ram_addr_t offset,
- size_t size, uint64_t *bytes_sent)
+static int qemu_rdma_save_page(QEMUFile *f, ram_addr_t block_offset,
+ ram_addr_t offset, size_t size)
{
QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(qemu_file_get_ioc(f));
RDMAContext *rdma;
@@ -3255,25 +3273,13 @@
* is full, or the page doesn't belong to the current chunk,
* an actual RDMA write will occur and a new chunk will be formed.
*/
- ret = qemu_rdma_write(f, rdma, block_offset, offset, size);
+ ret = qemu_rdma_write(rdma, block_offset, offset, size);
if (ret < 0) {
error_report("rdma migration: write error! %d", ret);
goto err;
}
/*
- * We always return 1 bytes because the RDMA
- * protocol is completely asynchronous. We do not yet know
- * whether an identified chunk is zero or not because we're
- * waiting for other pages to potentially be merged with
- * the current chunk. So, we have to call qemu_update_position()
- * later on when the actual write occurs.
- */
- if (bytes_sent) {
- *bytes_sent = 1;
- }
-
- /*
* Drain the Completion Queue if possible, but do not block,
* just poll.
*
@@ -3282,7 +3288,8 @@
*/
while (1) {
uint64_t wr_id, wr_id_in;
- int ret = qemu_rdma_poll(rdma, rdma->recv_cq, &wr_id_in, NULL);
+ ret = qemu_rdma_poll(rdma, rdma->recv_cq, &wr_id_in, NULL);
+
if (ret < 0) {
error_report("rdma migration: polling error! %d", ret);
goto err;
@@ -3297,7 +3304,8 @@
while (1) {
uint64_t wr_id, wr_id_in;
- int ret = qemu_rdma_poll(rdma, rdma->send_cq, &wr_id_in, NULL);
+ ret = qemu_rdma_poll(rdma, rdma->send_cq, &wr_id_in, NULL);
+
if (ret < 0) {
error_report("rdma migration: polling error! %d", ret);
goto err;
@@ -3910,7 +3918,7 @@
CHECK_ERROR_STATE();
qemu_fflush(f);
- ret = qemu_rdma_drain_cq(f, rdma);
+ ret = qemu_rdma_drain_cq(rdma);
if (ret < 0) {
goto err;
diff --git a/migration/savevm.c b/migration/savevm.c
index bb3e991..60eec7c 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -979,6 +979,8 @@
static int vmstate_save(QEMUFile *f, SaveStateEntry *se, JSONWriter *vmdesc)
{
int ret;
+ Error *local_err = NULL;
+ MigrationState *s = migrate_get_current();
if ((!se->ops || !se->ops->save_state) && !se->vmsd) {
return 0;
@@ -1000,8 +1002,10 @@
if (!se->vmsd) {
vmstate_save_old_style(f, se, vmdesc);
} else {
- ret = vmstate_save_state(f, se->vmsd, se->opaque, vmdesc);
+ ret = vmstate_save_state_with_err(f, se->vmsd, se->opaque, vmdesc, &local_err);
if (ret) {
+ migrate_set_error(s, local_err);
+ error_report_err(local_err);
return ret;
}
}
@@ -1068,10 +1072,14 @@
int qemu_savevm_send_packaged(QEMUFile *f, const uint8_t *buf, size_t len)
{
uint32_t tmp;
+ MigrationState *ms = migrate_get_current();
+ Error *local_err = NULL;
if (len > MAX_VM_CMD_PACKAGED_SIZE) {
- error_report("%s: Unreasonably large packaged state: %zu",
+ error_setg(&local_err, "%s: Unreasonably large packaged state: %zu",
__func__, len);
+ migrate_set_error(ms, local_err);
+ error_report_err(local_err);
return -1;
}
@@ -1499,8 +1507,11 @@
* bdrv_activate_all() on the other end won't fail. */
ret = bdrv_inactivate_all();
if (ret) {
- error_report("%s: bdrv_inactivate_all() failed (%d)",
- __func__, ret);
+ Error *local_err = NULL;
+ error_setg(&local_err, "%s: bdrv_inactivate_all() failed (%d)",
+ __func__, ret);
+ migrate_set_error(ms, local_err);
+ error_report_err(local_err);
qemu_file_set_error(f, ret);
return ret;
}
diff --git a/migration/trace-events b/migration/trace-events
index 4666f19..002abe3 100644
--- a/migration/trace-events
+++ b/migration/trace-events
@@ -66,6 +66,7 @@
vmstate_save_state_top(const char *idstr) "%s"
vmstate_subsection_save_loop(const char *name, const char *sub) "%s/%s"
vmstate_subsection_save_top(const char *idstr) "%s"
+vmstate_field_exists(const char *vmsd, const char *name, int field_version, int version, int result) "%s:%s field_version %d version %d result %d"
# vmstate-types.c
get_qtailq(const char *name, int version_id) "%s v%d"
@@ -191,7 +192,7 @@
postcopy_preempt_enabled(bool value) "%d"
# migration-stats
-migration_transferred_bytes(uint64_t qemu_file, uint64_t multifd) "qemu_file %" PRIu64 " multifd %" PRIu64
+migration_transferred_bytes(uint64_t qemu_file, uint64_t multifd, uint64_t rdma) "qemu_file %" PRIu64 " multifd %" PRIu64 " RDMA %" PRIu64
# channel.c
migration_set_incoming_channel(void *ioc, const char *ioctype) "ioc=%p ioctype=%s"
@@ -311,6 +312,10 @@
migration_fd_outgoing(int fd) "fd=%d"
migration_fd_incoming(int fd) "fd=%d"
+# file.c
+migration_file_outgoing(const char *filename) "filename=%s"
+migration_file_incoming(const char *filename) "filename=%s"
+
# socket.c
migration_socket_incoming_accepted(void) ""
migration_socket_outgoing_connected(const char *hostname) "hostname=%s"
diff --git a/migration/vmstate.c b/migration/vmstate.c
index 31842c3..1cf9e45 100644
--- a/migration/vmstate.c
+++ b/migration/vmstate.c
@@ -14,6 +14,7 @@
#include "migration.h"
#include "migration/vmstate.h"
#include "savevm.h"
+#include "qapi/error.h"
#include "qapi/qmp/json-writer.h"
#include "qemu-file.h"
#include "qemu/bitops.h"
@@ -25,6 +26,30 @@
static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd,
void *opaque);
+/* Whether this field should exist for either save or load the VM? */
+static bool
+vmstate_field_exists(const VMStateDescription *vmsd, const VMStateField *field,
+ void *opaque, int version_id)
+{
+ bool result;
+
+ if (field->field_exists) {
+ /* If there's the function checker, that's the solo truth */
+ result = field->field_exists(opaque, version_id);
+ trace_vmstate_field_exists(vmsd->name, field->name, field->version_id,
+ version_id, result);
+ } else {
+ /*
+ * Otherwise, we only save/load if field version is same or older.
+ * For example, when loading from an old binary with old version,
+ * we ignore new fields with newer version_ids.
+ */
+ result = field->version_id <= version_id;
+ }
+
+ return result;
+}
+
static int vmstate_n_elems(void *opaque, const VMStateField *field)
{
int n_elems = 1;
@@ -97,17 +122,14 @@
return -EINVAL;
}
if (vmsd->pre_load) {
- int ret = vmsd->pre_load(opaque);
+ ret = vmsd->pre_load(opaque);
if (ret) {
return ret;
}
}
while (field->name) {
trace_vmstate_load_state_field(vmsd->name, field->name);
- if ((field->field_exists &&
- field->field_exists(opaque, version_id)) ||
- (!field->field_exists &&
- field->version_id <= version_id)) {
+ if (vmstate_field_exists(vmsd, field, opaque, version_id)) {
void *first_elem = opaque + field->offset;
int i, n_elems = vmstate_n_elems(opaque, field);
int size = vmstate_size(opaque, field);
@@ -315,11 +337,17 @@
int vmstate_save_state(QEMUFile *f, const VMStateDescription *vmsd,
void *opaque, JSONWriter *vmdesc_id)
{
- return vmstate_save_state_v(f, vmsd, opaque, vmdesc_id, vmsd->version_id);
+ return vmstate_save_state_v(f, vmsd, opaque, vmdesc_id, vmsd->version_id, NULL);
+}
+
+int vmstate_save_state_with_err(QEMUFile *f, const VMStateDescription *vmsd,
+ void *opaque, JSONWriter *vmdesc_id, Error **errp)
+{
+ return vmstate_save_state_v(f, vmsd, opaque, vmdesc_id, vmsd->version_id, errp);
}
int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd,
- void *opaque, JSONWriter *vmdesc, int version_id)
+ void *opaque, JSONWriter *vmdesc, int version_id, Error **errp)
{
int ret = 0;
const VMStateField *field = vmsd->fields;
@@ -330,7 +358,7 @@
ret = vmsd->pre_save(opaque);
trace_vmstate_save_state_pre_save_res(vmsd->name, ret);
if (ret) {
- error_report("pre-save failed: %s", vmsd->name);
+ error_setg(errp, "pre-save failed: %s", vmsd->name);
return ret;
}
}
@@ -342,10 +370,7 @@
}
while (field->name) {
- if ((field->field_exists &&
- field->field_exists(opaque, version_id)) ||
- (!field->field_exists &&
- field->version_id <= version_id)) {
+ if (vmstate_field_exists(vmsd, field, opaque, version_id)) {
void *first_elem = opaque + field->offset;
int i, n_elems = vmstate_n_elems(opaque, field);
int size = vmstate_size(opaque, field);
@@ -377,14 +402,14 @@
} else if (field->flags & VMS_VSTRUCT) {
ret = vmstate_save_state_v(f, field->vmsd, curr_elem,
vmdesc_loop,
- field->struct_version_id);
+ field->struct_version_id, errp);
} else {
ret = field->info->put(f, curr_elem, size, field,
vmdesc_loop);
}
if (ret) {
- error_report("Save of field %s/%s failed",
- vmsd->name, field->name);
+ error_setg(errp, "Save of field %s/%s failed",
+ vmsd->name, field->name);
if (vmsd->post_save) {
vmsd->post_save(opaque);
}
diff --git a/monitor/hmp-cmds-target.c b/monitor/hmp-cmds-target.c
index 0d3e84d..d9fbcac 100644
--- a/monitor/hmp-cmds-target.c
+++ b/monitor/hmp-cmds-target.c
@@ -81,7 +81,7 @@
{
CPUState *cs = mon_get_cpu(mon);
- return cs ? cs->env_ptr : NULL;
+ return cs ? cpu_env(cs) : NULL;
}
int monitor_get_cpu_index(Monitor *mon)
diff --git a/nbd/client-connection.c b/nbd/client-connection.c
index 53a6549..aa0201b 100644
--- a/nbd/client-connection.c
+++ b/nbd/client-connection.c
@@ -1,5 +1,5 @@
/*
- * QEMU Block driver for NBD
+ * QEMU Block driver for NBD
*
* Copyright (c) 2021 Virtuozzo International GmbH.
*
@@ -93,7 +93,7 @@
.do_negotiation = do_negotiation,
.initial_info.request_sizes = true,
- .initial_info.structured_reply = true,
+ .initial_info.mode = NBD_MODE_STRUCTURED,
.initial_info.base_allocation = true,
.initial_info.x_dirty_bitmap = g_strdup(x_dirty_bitmap),
.initial_info.name = g_strdup(export_name ?: "")
diff --git a/nbd/client.c b/nbd/client.c
index bd7e200..cecb0f0 100644
--- a/nbd/client.c
+++ b/nbd/client.c
@@ -879,7 +879,7 @@
*/
static int nbd_start_negotiate(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
const char *hostname, QIOChannel **outioc,
- bool structured_reply, bool *zeroes,
+ NBDMode max_mode, bool *zeroes,
Error **errp)
{
ERRP_GUARD();
@@ -953,7 +953,7 @@
if (fixedNewStyle) {
int result = 0;
- if (structured_reply) {
+ if (max_mode >= NBD_MODE_STRUCTURED) {
result = nbd_request_simple_option(ioc,
NBD_OPT_STRUCTURED_REPLY,
false, errp);
@@ -1022,20 +1022,19 @@
trace_nbd_receive_negotiate_name(info->name);
result = nbd_start_negotiate(ioc, tlscreds, hostname, outioc,
- info->structured_reply, &zeroes, errp);
+ info->mode, &zeroes, errp);
if (result < 0) {
return result;
}
- info->structured_reply = false;
+ info->mode = result;
info->base_allocation = false;
if (tlscreds && *outioc) {
ioc = *outioc;
}
- switch ((NBDMode)result) {
+ switch (info->mode) {
case NBD_MODE_STRUCTURED:
- info->structured_reply = true;
if (base_allocation) {
result = nbd_negotiate_simple_meta_context(ioc, info, errp);
if (result < 0) {
@@ -1144,8 +1143,8 @@
QIOChannel *sioc = NULL;
*info = NULL;
- result = nbd_start_negotiate(ioc, tlscreds, hostname, &sioc, true,
- NULL, errp);
+ result = nbd_start_negotiate(ioc, tlscreds, hostname, &sioc,
+ NBD_MODE_STRUCTURED, NULL, errp);
if (tlscreds && sioc) {
ioc = sioc;
}
@@ -1176,7 +1175,7 @@
memset(&array[count - 1], 0, sizeof(*array));
array[count - 1].name = name;
array[count - 1].description = desc;
- array[count - 1].structured_reply = result == NBD_MODE_STRUCTURED;
+ array[count - 1].mode = result;
}
for (i = 0; i < count; i++) {
@@ -1209,6 +1208,7 @@
/* Lone export name is implied, but we can parse length and flags */
array = g_new0(NBDExportInfo, 1);
array->name = g_strdup("");
+ array->mode = NBD_MODE_OLDSTYLE;
count = 1;
if (nbd_negotiate_finish_oldstyle(ioc, array, errp) < 0) {
@@ -1218,7 +1218,7 @@
/* Send NBD_CMD_DISC as a courtesy to the server, but ignore all
* errors now that we have the information we wanted. */
if (nbd_drop(ioc, 124, NULL) == 0) {
- NBDRequest request = { .type = NBD_CMD_DISC };
+ NBDRequest request = { .type = NBD_CMD_DISC, .mode = result };
nbd_send_request(ioc, &request);
}
@@ -1348,6 +1348,8 @@
{
uint8_t buf[NBD_REQUEST_SIZE];
+ assert(request->mode <= NBD_MODE_STRUCTURED); /* TODO handle extended */
+ assert(request->len <= UINT32_MAX);
trace_nbd_send_request(request->from, request->len, request->cookie,
request->flags, request->type,
nbd_cmd_lookup(request->type));
diff --git a/nbd/common.c b/nbd/common.c
index 989fbe5..3247c1d 100644
--- a/nbd/common.c
+++ b/nbd/common.c
@@ -79,6 +79,8 @@
return "list meta context";
case NBD_OPT_SET_META_CONTEXT:
return "set meta context";
+ case NBD_OPT_EXTENDED_HEADERS:
+ return "extended headers";
default:
return "<unknown>";
}
@@ -112,6 +114,10 @@
return "server shutting down";
case NBD_REP_ERR_BLOCK_SIZE_REQD:
return "block size required";
+ case NBD_REP_ERR_TOO_BIG:
+ return "option payload too big";
+ case NBD_REP_ERR_EXT_HEADER_REQD:
+ return "extended headers required";
default:
return "<unknown>";
}
@@ -170,7 +176,9 @@
case NBD_REPLY_TYPE_OFFSET_HOLE:
return "hole";
case NBD_REPLY_TYPE_BLOCK_STATUS:
- return "block status";
+ return "block status (32-bit)";
+ case NBD_REPLY_TYPE_BLOCK_STATUS_EXT:
+ return "block status (64-bit)";
case NBD_REPLY_TYPE_ERROR:
return "generic error";
case NBD_REPLY_TYPE_ERROR_OFFSET:
@@ -261,6 +269,8 @@
return "simple headers";
case NBD_MODE_STRUCTURED:
return "structured replies";
+ case NBD_MODE_EXTENDED:
+ return "extended headers";
default:
return "<unknown>";
}
diff --git a/nbd/nbd-internal.h b/nbd/nbd-internal.h
index df42fef..133b1d9 100644
--- a/nbd/nbd-internal.h
+++ b/nbd/nbd-internal.h
@@ -1,7 +1,7 @@
/*
* NBD Internal Declarations
*
- * Copyright (C) 2016 Red Hat, Inc.
+ * Copyright Red Hat
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
@@ -44,7 +44,6 @@
#define NBD_OLDSTYLE_NEGOTIATE_SIZE (8 + 8 + 8 + 4 + 124)
#define NBD_INIT_MAGIC 0x4e42444d41474943LL /* ASCII "NBDMAGIC" */
-#define NBD_REQUEST_MAGIC 0x25609513
#define NBD_OPTS_MAGIC 0x49484156454F5054LL /* ASCII "IHAVEOPT" */
#define NBD_CLIENT_MAGIC 0x0000420281861253LL
#define NBD_REP_MAGIC 0x0003e889045565a9LL
diff --git a/nbd/server.c b/nbd/server.c
index b5f93a2..7a6f950 100644
--- a/nbd/server.c
+++ b/nbd/server.c
@@ -143,7 +143,7 @@
uint32_t check_align; /* If non-zero, check for aligned client requests */
- bool structured_reply;
+ NBDMode mode;
NBDExportMetaContexts export_meta;
uint32_t opt; /* Current option being negotiated */
@@ -502,7 +502,7 @@
}
myflags = client->exp->nbdflags;
- if (client->structured_reply) {
+ if (client->mode >= NBD_MODE_STRUCTURED) {
myflags |= NBD_FLAG_SEND_DF;
}
trace_nbd_negotiate_new_style_size_flags(client->exp->size, myflags);
@@ -687,7 +687,7 @@
/* Send NBD_INFO_EXPORT always */
myflags = exp->nbdflags;
- if (client->structured_reply) {
+ if (client->mode >= NBD_MODE_STRUCTURED) {
myflags |= NBD_FLAG_SEND_DF;
}
trace_nbd_negotiate_new_style_size_flags(exp->size, myflags);
@@ -985,7 +985,8 @@
size_t i;
size_t count = 0;
- if (client->opt == NBD_OPT_SET_META_CONTEXT && !client->structured_reply) {
+ if (client->opt == NBD_OPT_SET_META_CONTEXT &&
+ client->mode < NBD_MODE_STRUCTURED) {
return nbd_opt_invalid(client, errp,
"request option '%s' when structured reply "
"is not negotiated",
@@ -1122,10 +1123,12 @@
if (nbd_read32(client->ioc, &flags, "flags", errp) < 0) {
return -EIO;
}
+ client->mode = NBD_MODE_EXPORT_NAME;
trace_nbd_negotiate_options_flags(flags);
if (flags & NBD_FLAG_C_FIXED_NEWSTYLE) {
fixedNewstyle = true;
flags &= ~NBD_FLAG_C_FIXED_NEWSTYLE;
+ client->mode = NBD_MODE_SIMPLE;
}
if (flags & NBD_FLAG_C_NO_ZEROES) {
no_zeroes = true;
@@ -1162,7 +1165,7 @@
client->optlen = length;
if (length > NBD_MAX_BUFFER_SIZE) {
- error_setg(errp, "len (%" PRIu32" ) is larger than max len (%u)",
+ error_setg(errp, "len (%" PRIu32 ") is larger than max len (%u)",
length, NBD_MAX_BUFFER_SIZE);
return -EINVAL;
}
@@ -1261,13 +1264,13 @@
case NBD_OPT_STRUCTURED_REPLY:
if (length) {
ret = nbd_reject_length(client, false, errp);
- } else if (client->structured_reply) {
+ } else if (client->mode >= NBD_MODE_STRUCTURED) {
ret = nbd_negotiate_send_rep_err(
client, NBD_REP_ERR_INVALID, errp,
"structured reply already negotiated");
} else {
ret = nbd_negotiate_send_rep(client, NBD_REP_ACK, errp);
- client->structured_reply = true;
+ client->mode = NBD_MODE_STRUCTURED;
}
break;
@@ -1434,7 +1437,7 @@
request->type = lduw_be_p(buf + 6);
request->cookie = ldq_be_p(buf + 8);
request->from = ldq_be_p(buf + 16);
- request->len = ldl_be_p(buf + 24);
+ request->len = (uint32_t)ldl_be_p(buf + 24); /* widen 32 to 64 bits */
trace_nbd_receive_request(magic, request->flags, request->type,
request->from, request->len);
@@ -1884,7 +1887,7 @@
NBDRequest *request,
uint32_t error,
void *data,
- size_t len,
+ uint64_t len,
Error **errp)
{
NBDSimpleReply reply;
@@ -1895,7 +1898,10 @@
};
assert(!len || !nbd_err);
- assert(!client->structured_reply || request->type != NBD_CMD_READ);
+ assert(len <= NBD_MAX_BUFFER_SIZE);
+ assert(client->mode < NBD_MODE_STRUCTURED ||
+ (client->mode == NBD_MODE_STRUCTURED &&
+ request->type != NBD_CMD_READ));
trace_nbd_co_send_simple_reply(request->cookie, nbd_err,
nbd_err_lookup(nbd_err), len);
set_be_simple_reply(&reply, nbd_err, request->cookie);
@@ -1951,7 +1957,7 @@
NBDRequest *request,
uint64_t offset,
void *data,
- size_t size,
+ uint64_t size,
bool final,
Error **errp)
{
@@ -1963,7 +1969,7 @@
{.iov_base = data, .iov_len = size}
};
- assert(size);
+ assert(size && size <= NBD_MAX_BUFFER_SIZE);
trace_nbd_co_send_chunk_read(request->cookie, offset, data, size);
set_be_chunk(client, iov, 3, final ? NBD_REPLY_FLAG_DONE : 0,
NBD_REPLY_TYPE_OFFSET_DATA, request);
@@ -1971,7 +1977,7 @@
return nbd_co_send_iov(client, iov, 3, errp);
}
-/*ebb*/
+
static int coroutine_fn nbd_co_send_chunk_error(NBDClient *client,
NBDRequest *request,
uint32_t error,
@@ -2006,13 +2012,14 @@
NBDRequest *request,
uint64_t offset,
uint8_t *data,
- size_t size,
+ uint64_t size,
Error **errp)
{
int ret = 0;
NBDExport *exp = client->exp;
size_t progress = 0;
+ assert(size <= NBD_MAX_BUFFER_SIZE);
while (progress < size) {
int64_t pnum;
int status = blk_co_block_status_above(exp->common.blk, NULL,
@@ -2067,7 +2074,7 @@
}
typedef struct NBDExtentArray {
- NBDExtent *extents;
+ NBDExtent32 *extents;
unsigned int nb_alloc;
unsigned int count;
uint64_t total_length;
@@ -2080,7 +2087,7 @@
NBDExtentArray *ea = g_new0(NBDExtentArray, 1);
ea->nb_alloc = nb_alloc;
- ea->extents = g_new(NBDExtent, nb_alloc);
+ ea->extents = g_new(NBDExtent32, nb_alloc);
ea->can_add = true;
return ea;
@@ -2143,7 +2150,7 @@
}
ea->total_length += length;
- ea->extents[ea->count] = (NBDExtent) {.length = length, .flags = flags};
+ ea->extents[ea->count] = (NBDExtent32) {.length = length, .flags = flags};
ea->count++;
return 0;
@@ -2310,11 +2317,16 @@
* to the client (although the caller may still need to disconnect after
* reporting the error).
*/
-static int coroutine_fn nbd_co_receive_request(NBDRequestData *req, NBDRequest *request,
+static int coroutine_fn nbd_co_receive_request(NBDRequestData *req,
+ NBDRequest *request,
Error **errp)
{
NBDClient *client = req->client;
- int valid_flags;
+ bool check_length = false;
+ bool check_rofs = false;
+ bool allocate_buffer = false;
+ unsigned payload_len = 0;
+ int valid_flags = NBD_CMD_FLAG_FUA;
int ret;
g_assert(qemu_in_coroutine());
@@ -2326,60 +2338,94 @@
trace_nbd_co_receive_request_decode_type(request->cookie, request->type,
nbd_cmd_lookup(request->type));
-
- if (request->type != NBD_CMD_WRITE) {
- /* No payload, we are ready to read the next request. */
- req->complete = true;
- }
-
- if (request->type == NBD_CMD_DISC) {
+ switch (request->type) {
+ case NBD_CMD_DISC:
/* Special case: we're going to disconnect without a reply,
* whether or not flags, from, or len are bogus */
+ req->complete = true;
return -EIO;
+
+ case NBD_CMD_READ:
+ if (client->mode >= NBD_MODE_STRUCTURED) {
+ valid_flags |= NBD_CMD_FLAG_DF;
+ }
+ check_length = true;
+ allocate_buffer = true;
+ break;
+
+ case NBD_CMD_WRITE:
+ payload_len = request->len;
+ check_length = true;
+ allocate_buffer = true;
+ check_rofs = true;
+ break;
+
+ case NBD_CMD_FLUSH:
+ break;
+
+ case NBD_CMD_TRIM:
+ check_rofs = true;
+ break;
+
+ case NBD_CMD_CACHE:
+ check_length = true;
+ break;
+
+ case NBD_CMD_WRITE_ZEROES:
+ valid_flags |= NBD_CMD_FLAG_NO_HOLE | NBD_CMD_FLAG_FAST_ZERO;
+ check_rofs = true;
+ break;
+
+ case NBD_CMD_BLOCK_STATUS:
+ valid_flags |= NBD_CMD_FLAG_REQ_ONE;
+ break;
+
+ default:
+ /* Unrecognized, will fail later */
+ ;
}
- if (request->type == NBD_CMD_READ || request->type == NBD_CMD_WRITE ||
- request->type == NBD_CMD_CACHE)
- {
- if (request->len > NBD_MAX_BUFFER_SIZE) {
- error_setg(errp, "len (%" PRIu32" ) is larger than max len (%u)",
- request->len, NBD_MAX_BUFFER_SIZE);
- return -EINVAL;
- }
-
- if (request->type != NBD_CMD_CACHE) {
- req->data = blk_try_blockalign(client->exp->common.blk,
- request->len);
- if (req->data == NULL) {
- error_setg(errp, "No memory");
- return -ENOMEM;
- }
+ /* Payload and buffer handling. */
+ if (!payload_len) {
+ req->complete = true;
+ }
+ if (check_length && request->len > NBD_MAX_BUFFER_SIZE) {
+ /* READ, WRITE, CACHE */
+ error_setg(errp, "len (%" PRIu64 ") is larger than max len (%u)",
+ request->len, NBD_MAX_BUFFER_SIZE);
+ return -EINVAL;
+ }
+ if (allocate_buffer) {
+ /* READ, WRITE */
+ req->data = blk_try_blockalign(client->exp->common.blk,
+ request->len);
+ if (req->data == NULL) {
+ error_setg(errp, "No memory");
+ return -ENOMEM;
}
}
-
- if (request->type == NBD_CMD_WRITE) {
- if (nbd_read(client->ioc, req->data, request->len, "CMD_WRITE data",
- errp) < 0)
- {
+ if (payload_len) {
+ /* WRITE */
+ assert(req->data);
+ ret = nbd_read(client->ioc, req->data, payload_len,
+ "CMD_WRITE data", errp);
+ if (ret < 0) {
return -EIO;
}
req->complete = true;
-
trace_nbd_co_receive_request_payload_received(request->cookie,
- request->len);
+ payload_len);
}
/* Sanity checks. */
- if (client->exp->nbdflags & NBD_FLAG_READ_ONLY &&
- (request->type == NBD_CMD_WRITE ||
- request->type == NBD_CMD_WRITE_ZEROES ||
- request->type == NBD_CMD_TRIM)) {
+ if (client->exp->nbdflags & NBD_FLAG_READ_ONLY && check_rofs) {
+ /* WRITE, TRIM, WRITE_ZEROES */
error_setg(errp, "Export is read-only");
return -EROFS;
}
if (request->from > client->exp->size ||
request->len > client->exp->size - request->from) {
- error_setg(errp, "operation past EOF; From: %" PRIu64 ", Len: %" PRIu32
+ error_setg(errp, "operation past EOF; From: %" PRIu64 ", Len: %" PRIu64
", Size: %" PRIu64, request->from, request->len,
client->exp->size);
return (request->type == NBD_CMD_WRITE ||
@@ -2396,14 +2442,6 @@
request->len,
client->check_align);
}
- valid_flags = NBD_CMD_FLAG_FUA;
- if (request->type == NBD_CMD_READ && client->structured_reply) {
- valid_flags |= NBD_CMD_FLAG_DF;
- } else if (request->type == NBD_CMD_WRITE_ZEROES) {
- valid_flags |= NBD_CMD_FLAG_NO_HOLE | NBD_CMD_FLAG_FAST_ZERO;
- } else if (request->type == NBD_CMD_BLOCK_STATUS) {
- valid_flags |= NBD_CMD_FLAG_REQ_ONE;
- }
if (request->flags & ~valid_flags) {
error_setg(errp, "unsupported flags for command %s (got 0x%x)",
nbd_cmd_lookup(request->type), request->flags);
@@ -2423,7 +2461,7 @@
const char *error_msg,
Error **errp)
{
- if (client->structured_reply && ret < 0) {
+ if (client->mode >= NBD_MODE_STRUCTURED && ret < 0) {
return nbd_co_send_chunk_error(client, request, -ret, error_msg, errp);
} else {
return nbd_co_send_simple_reply(client, request, ret < 0 ? -ret : 0,
@@ -2441,6 +2479,7 @@
NBDExport *exp = client->exp;
assert(request->type == NBD_CMD_READ);
+ assert(request->len <= NBD_MAX_BUFFER_SIZE);
/* XXX: NBD Protocol only documents use of FUA with WRITE */
if (request->flags & NBD_CMD_FLAG_FUA) {
@@ -2451,8 +2490,8 @@
}
}
- if (client->structured_reply && !(request->flags & NBD_CMD_FLAG_DF) &&
- request->len)
+ if (client->mode >= NBD_MODE_STRUCTURED &&
+ !(request->flags & NBD_CMD_FLAG_DF) && request->len)
{
return nbd_co_send_sparse_read(client, request, request->from,
data, request->len, errp);
@@ -2464,7 +2503,7 @@
"reading from file failed", errp);
}
- if (client->structured_reply) {
+ if (client->mode >= NBD_MODE_STRUCTURED) {
if (request->len) {
return nbd_co_send_chunk_read(client, request, request->from, data,
request->len, true, errp);
@@ -2491,6 +2530,7 @@
NBDExport *exp = client->exp;
assert(request->type == NBD_CMD_CACHE);
+ assert(request->len <= NBD_MAX_BUFFER_SIZE);
ret = blk_co_preadv(exp->common.blk, request->from, request->len,
NULL, BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH);
@@ -2524,6 +2564,7 @@
if (request->flags & NBD_CMD_FLAG_FUA) {
flags |= BDRV_REQ_FUA;
}
+ assert(request->len <= NBD_MAX_BUFFER_SIZE);
ret = blk_co_pwrite(exp->common.blk, request->from, request->len, data,
flags);
return nbd_send_generic_reply(client, request, ret,
@@ -2567,6 +2608,7 @@
return nbd_send_generic_reply(client, request, -EINVAL,
"need non-zero length", errp);
}
+ assert(request->len <= UINT32_MAX);
if (client->export_meta.count) {
bool dont_fragment = request->flags & NBD_CMD_FLAG_REQ_ONE;
int contexts_remaining = client->export_meta.count;
diff --git a/nbd/trace-events b/nbd/trace-events
index f19a4d0..f9dccfc 100644
--- a/nbd/trace-events
+++ b/nbd/trace-events
@@ -31,7 +31,7 @@
nbd_client_loop_ret(int ret, const char *error) "NBD loop returned %d: %s"
nbd_client_clear_queue(void) "Clearing NBD queue"
nbd_client_clear_socket(void) "Clearing NBD socket"
-nbd_send_request(uint64_t from, uint32_t len, uint64_t cookie, uint16_t flags, uint16_t type, const char *name) "Sending request to server: { .from = %" PRIu64", .len = %" PRIu32 ", .cookie = %" PRIu64 ", .flags = 0x%" PRIx16 ", .type = %" PRIu16 " (%s) }"
+nbd_send_request(uint64_t from, uint64_t len, uint64_t cookie, uint16_t flags, uint16_t type, const char *name) "Sending request to server: { .from = %" PRIu64", .len = %" PRIu64 ", .cookie = %" PRIu64 ", .flags = 0x%" PRIx16 ", .type = %" PRIu16 " (%s) }"
nbd_receive_simple_reply(int32_t error, const char *errname, uint64_t cookie) "Got simple reply: { .error = %" PRId32 " (%s), cookie = %" PRIu64" }"
nbd_receive_structured_reply_chunk(uint16_t flags, uint16_t type, const char *name, uint64_t cookie, uint32_t length) "Got structured reply chunk: { flags = 0x%" PRIx16 ", type = %d (%s), cookie = %" PRIu64 ", length = %" PRIu32 " }"
@@ -60,18 +60,18 @@
nbd_negotiate_begin(void) "Beginning negotiation"
nbd_negotiate_new_style_size_flags(uint64_t size, unsigned flags) "advertising size %" PRIu64 " and flags 0x%x"
nbd_negotiate_success(void) "Negotiation succeeded"
-nbd_receive_request(uint32_t magic, uint16_t flags, uint16_t type, uint64_t from, uint32_t len) "Got request: { magic = 0x%" PRIx32 ", .flags = 0x%" PRIx16 ", .type = 0x%" PRIx16 ", from = %" PRIu64 ", len = %" PRIu32 " }"
+nbd_receive_request(uint32_t magic, uint16_t flags, uint16_t type, uint64_t from, uint64_t len) "Got request: { magic = 0x%" PRIx32 ", .flags = 0x%" PRIx16 ", .type = 0x%" PRIx16 ", from = %" PRIu64 ", len = %" PRIu64 " }"
nbd_blk_aio_attached(const char *name, void *ctx) "Export %s: Attaching clients to AIO context %p"
nbd_blk_aio_detach(const char *name, void *ctx) "Export %s: Detaching clients from AIO context %p"
-nbd_co_send_simple_reply(uint64_t cookie, uint32_t error, const char *errname, int len) "Send simple reply: cookie = %" PRIu64 ", error = %" PRIu32 " (%s), len = %d"
+nbd_co_send_simple_reply(uint64_t cookie, uint32_t error, const char *errname, uint64_t len) "Send simple reply: cookie = %" PRIu64 ", error = %" PRIu32 " (%s), len = %" PRIu64
nbd_co_send_chunk_done(uint64_t cookie) "Send structured reply done: cookie = %" PRIu64
-nbd_co_send_chunk_read(uint64_t cookie, uint64_t offset, void *data, size_t size) "Send structured read data reply: cookie = %" PRIu64 ", offset = %" PRIu64 ", data = %p, len = %zu"
-nbd_co_send_chunk_read_hole(uint64_t cookie, uint64_t offset, size_t size) "Send structured read hole reply: cookie = %" PRIu64 ", offset = %" PRIu64 ", len = %zu"
+nbd_co_send_chunk_read(uint64_t cookie, uint64_t offset, void *data, uint64_t size) "Send structured read data reply: cookie = %" PRIu64 ", offset = %" PRIu64 ", data = %p, len = %" PRIu64
+nbd_co_send_chunk_read_hole(uint64_t cookie, uint64_t offset, uint64_t size) "Send structured read hole reply: cookie = %" PRIu64 ", offset = %" PRIu64 ", len = %" PRIu64
nbd_co_send_extents(uint64_t cookie, unsigned int extents, uint32_t id, uint64_t length, int last) "Send block status reply: cookie = %" PRIu64 ", extents = %u, context = %d (extents cover %" PRIu64 " bytes, last chunk = %d)"
nbd_co_send_chunk_error(uint64_t cookie, int err, const char *errname, const char *msg) "Send structured error reply: cookie = %" PRIu64 ", error = %d (%s), msg = '%s'"
nbd_co_receive_request_decode_type(uint64_t cookie, uint16_t type, const char *name) "Decoding type: cookie = %" PRIu64 ", type = %" PRIu16 " (%s)"
-nbd_co_receive_request_payload_received(uint64_t cookie, uint32_t len) "Payload received: cookie = %" PRIu64 ", len = %" PRIu32
-nbd_co_receive_align_compliance(const char *op, uint64_t from, uint32_t len, uint32_t align) "client sent non-compliant unaligned %s request: from=0x%" PRIx64 ", len=0x%" PRIx32 ", align=0x%" PRIx32
+nbd_co_receive_request_payload_received(uint64_t cookie, uint64_t len) "Payload received: cookie = %" PRIu64 ", len = %" PRIu64
+nbd_co_receive_align_compliance(const char *op, uint64_t from, uint64_t len, uint32_t align) "client sent non-compliant unaligned %s request: from=0x%" PRIx64 ", len=0x%" PRIx64 ", align=0x%" PRIx32
nbd_trip(void) "Reading request"
# client-connection.c
diff --git a/net/eth.c b/net/eth.c
index 649e66b..3f680cc 100644
--- a/net/eth.c
+++ b/net/eth.c
@@ -432,8 +432,6 @@
}
if (opthdr.type == IP6_OPT_HOME) {
- size_t input_size = iov_size(pkt, pkt_frags);
-
if (input_size < opt_offset + sizeof(opthdr)) {
return false;
}
diff --git a/page-vary.c b/page-vary-target.c
similarity index 100%
rename from page-vary.c
rename to page-vary-target.c
diff --git a/pc-bios/bios.bin b/pc-bios/bios.bin
index 6a196cf..d3abd94 100644
--- a/pc-bios/bios.bin
+++ b/pc-bios/bios.bin
Binary files differ
diff --git a/pc-bios/optionrom/Makefile b/pc-bios/optionrom/Makefile
index b1fff0b..30d0702 100644
--- a/pc-bios/optionrom/Makefile
+++ b/pc-bios/optionrom/Makefile
@@ -36,7 +36,7 @@
$(call cc-option,-Wno-array-bounds)) 3> config-cc.mak
-include config-cc.mak
-override LDFLAGS = -nostdlib -Wl,-T,$(SRC_DIR)/flat.lds
+override LDFLAGS = -nostdlib -Wl,--build-id=none,-T,$(SRC_DIR)/flat.lds
pvh.img: pvh.o pvh_main.o
diff --git a/qemu-nbd.c b/qemu-nbd.c
index 30eeb6f..54faa87 100644
--- a/qemu-nbd.c
+++ b/qemu-nbd.c
@@ -295,7 +295,9 @@
static void *nbd_client_thread(void *arg)
{
struct NbdClientOpts *opts = arg;
- NBDExportInfo info = { .request_sizes = false, .name = g_strdup("") };
+ /* TODO: Revisit this if nbd.ko ever gains support for structured reply */
+ NBDExportInfo info = { .request_sizes = false, .name = g_strdup(""),
+ .mode = NBD_MODE_SIMPLE };
QIOChannelSocket *sioc;
int fd = -1;
int ret = EXIT_FAILURE;
@@ -937,7 +939,6 @@
g_autoptr(GError) err = NULL;
int stderr_fd[2];
pid_t pid;
- int ret;
if (!g_unix_open_pipe(stderr_fd, FD_CLOEXEC, &err)) {
error_report("Error setting up communication pipe: %s",
@@ -1170,7 +1171,6 @@
if (opts.device) {
#if HAVE_NBD_DEVICE
- int ret;
ret = pthread_create(&client_thread, NULL, nbd_client_thread, &opts);
if (ret != 0) {
error_report("Failed to create client thread: %s", strerror(ret));
@@ -1217,9 +1217,10 @@
qemu_opts_del(sn_opts);
if (opts.device) {
- void *ret;
- pthread_join(client_thread, &ret);
- exit(ret != NULL);
+ void *result;
+ pthread_join(client_thread, &result);
+ ret = (intptr_t)result;
+ exit(ret);
} else {
exit(EXIT_SUCCESS);
}
diff --git a/qemu-options.hx b/qemu-options.hx
index bcd7725..840b83d 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -727,16 +727,6 @@
ERST
-HXCOMM Deprecated by -audiodev
-DEF("audio-help", 0, QEMU_OPTION_audio_help,
- "-audio-help show -audiodev equivalent of the currently specified audio settings\n",
- QEMU_ARCH_ALL)
-SRST
-``-audio-help``
- Will show the -audiodev equivalent of the currently specified
- (deprecated) environment variables.
-ERST
-
DEF("audio", HAS_ARG, QEMU_OPTION_audio,
"-audio [driver=]driver,model=value[,prop[=value][,...]]\n"
" specifies the audio backend and device to use;\n"
@@ -4716,6 +4706,7 @@
" prepare for incoming migration, listen on\n" \
" specified protocol and socket address\n" \
"-incoming fd:fd\n" \
+ "-incoming file:filename[,offset=offset]\n" \
"-incoming exec:cmdline\n" \
" accept incoming migration on given file descriptor\n" \
" or from given external command\n" \
@@ -4732,7 +4723,11 @@
Prepare for incoming migration, listen on a given unix socket.
``-incoming fd:fd``
- Accept incoming migration from a given filedescriptor.
+ Accept incoming migration from a given file descriptor.
+
+``-incoming file:filename[,offset=offset]``
+ Accept incoming migration from a given file starting at offset.
+ offset allows the common size suffixes, or a 0x prefix, but not both.
``-incoming exec:cmdline``
Accept incoming migration as an output from specified external
diff --git a/qom/object.c b/qom/object.c
index e25f1e9..8557fe8 100644
--- a/qom/object.c
+++ b/qom/object.c
@@ -220,6 +220,19 @@
return 0;
}
+static size_t type_object_get_align(TypeImpl *ti)
+{
+ if (ti->instance_align) {
+ return ti->instance_align;
+ }
+
+ if (type_has_parent(ti)) {
+ return type_object_get_align(type_get_parent(ti));
+ }
+
+ return 0;
+}
+
size_t object_type_get_instance_size(const char *typename)
{
TypeImpl *type = type_get_by_name(typename);
@@ -293,6 +306,7 @@
ti->class_size = type_class_get_size(ti);
ti->instance_size = type_object_get_size(ti);
+ ti->instance_align = type_object_get_align(ti);
/* Any type with zero instance_size is implicitly abstract.
* This means interface types are all abstract.
*/
diff --git a/roms/config.seabios-128k b/roms/config.seabios-128k
index d18c802..0b144bb 100644
--- a/roms/config.seabios-128k
+++ b/roms/config.seabios-128k
@@ -1,21 +1,30 @@
-# for qemu machine types 1.7 + older
-# need to turn off features (xhci,uas) to make it fit into 128k
+# SeaBIOS Configuration for -M isapc
+
CONFIG_QEMU=y
CONFIG_ROM_SIZE=128
CONFIG_ATA_DMA=n
-CONFIG_BOOTSPLASH=n
CONFIG_XEN=n
-CONFIG_USB_OHCI=n
-CONFIG_USB_XHCI=n
-CONFIG_USB_UAS=n
+CONFIG_ATA_PIO32=n
+CONFIG_AHCI=n
CONFIG_SDCARD=n
-CONFIG_TCGBIOS=n
-CONFIG_MPT_SCSI=n
-CONFIG_ESP_SCSI=n
-CONFIG_MEGASAS=n
+CONFIG_VIRTIO_BLK=n
+CONFIG_VIRTIO_SCSI=n
CONFIG_PVSCSI=n
+CONFIG_ESP_SCSI=n
+CONFIG_LSI_SCSI=n
+CONFIG_MEGASAS=n
+CONFIG_MPT_SCSI=n
CONFIG_NVME=n
CONFIG_USE_SMM=n
CONFIG_VGAHOOKS=n
CONFIG_HOST_BIOS_GEOMETRY=n
+CONFIG_USB=n
+CONFIG_PMTIMER=n
+CONFIG_PCIBIOS=n
+CONFIG_DISABLE_A20=n
+CONFIG_WRITABLE_UPPERMEMORY=n
+CONFIG_TCGBIOS=n
+CONFIG_ACPI=n
CONFIG_ACPI_PARSE=n
+CONFIG_DEBUG_SERIAL=n
+CONFIG_DEBUG_SERIAL_MMIO=n
diff --git a/scripts/analyse-locks-simpletrace.py b/scripts/analyse-locks-simpletrace.py
index 63c11f4..d650dd7 100755
--- a/scripts/analyse-locks-simpletrace.py
+++ b/scripts/analyse-locks-simpletrace.py
@@ -75,7 +75,7 @@
(analyser.locks, analyser.locked, analyser.unlocks))
# Now dump the individual lock stats
- for key, val in sorted(analyser.mutex_records.iteritems(),
+ for key, val in sorted(analyser.mutex_records.items(),
key=lambda k_v: k_v[1]["locks"]):
print ("Lock: %#x locks: %d, locked: %d, unlocked: %d" %
(key, val["locks"], val["locked"], val["unlocked"]))
diff --git a/scripts/analyze-migration.py b/scripts/analyze-migration.py
index b82a1b0..0824245 100755
--- a/scripts/analyze-migration.py
+++ b/scripts/analyze-migration.py
@@ -111,6 +111,8 @@
RAM_SAVE_FLAG_CONTINUE = 0x20
RAM_SAVE_FLAG_XBZRLE = 0x40
RAM_SAVE_FLAG_HOOK = 0x80
+ RAM_SAVE_FLAG_COMPRESS_PAGE = 0x100
+ RAM_SAVE_FLAG_MULTIFD_FLUSH = 0x200
def __init__(self, file, version_id, ramargs, section_key):
if version_id != 4:
@@ -205,6 +207,8 @@
raise Exception("XBZRLE RAM compression is not supported yet")
elif flags & self.RAM_SAVE_FLAG_HOOK:
raise Exception("RAM hooks don't make sense with files")
+ if flags & self.RAM_SAVE_FLAG_MULTIFD_FLUSH:
+ continue
# End of RAM section
if flags & self.RAM_SAVE_FLAG_EOS:
diff --git a/scripts/archive-source.sh b/scripts/archive-source.sh
index 4899630..65af806 100755
--- a/scripts/archive-source.sh
+++ b/scripts/archive-source.sh
@@ -26,7 +26,7 @@
# independent of what the developer currently has initialized
# in their checkout, because the build environment is completely
# different to the host OS.
-subprojects="dtc keycodemapdb libvfio-user berkeley-softfloat-3 berkeley-testfloat-3"
+subprojects="keycodemapdb libvfio-user berkeley-softfloat-3 berkeley-testfloat-3"
sub_deinit=""
function cleanup() {
diff --git a/scripts/make-release b/scripts/make-release
index c5db87b..9c570b8 100755
--- a/scripts/make-release
+++ b/scripts/make-release
@@ -17,7 +17,7 @@
fi
# Only include wraps that are invoked with subproject()
-SUBPROJECTS="dtc libvfio-user keycodemapdb berkeley-softfloat-3 berkeley-testfloat-3"
+SUBPROJECTS="libvfio-user keycodemapdb berkeley-softfloat-3 berkeley-testfloat-3"
src="$1"
version="$2"
diff --git a/scripts/meson-buildoptions.sh b/scripts/meson-buildoptions.sh
index e4b46d5..2a74b02 100644
--- a/scripts/meson-buildoptions.sh
+++ b/scripts/meson-buildoptions.sh
@@ -34,7 +34,6 @@
printf "%s\n" ' (choices: auto/disabled/enabled/internal/system)'
printf "%s\n" ' --enable-fuzzing build fuzzing targets'
printf "%s\n" ' --enable-gcov Enable coverage tracking.'
- printf "%s\n" ' --enable-gprof QEMU profiling with gprof'
printf "%s\n" ' --enable-lto Use link time optimization'
printf "%s\n" ' --enable-malloc=CHOICE choose memory allocator to use [system] (choices:'
printf "%s\n" ' jemalloc/system/tcmalloc)'
@@ -309,8 +308,6 @@
--disable-glusterfs) printf "%s" -Dglusterfs=disabled ;;
--enable-gnutls) printf "%s" -Dgnutls=enabled ;;
--disable-gnutls) printf "%s" -Dgnutls=disabled ;;
- --enable-gprof) printf "%s" -Dgprof=true ;;
- --disable-gprof) printf "%s" -Dgprof=false ;;
--enable-gtk) printf "%s" -Dgtk=enabled ;;
--disable-gtk) printf "%s" -Dgtk=disabled ;;
--enable-gtk-clipboard) printf "%s" -Dgtk_clipboard=enabled ;;
diff --git a/scripts/simpletrace.py b/scripts/simpletrace.py
index 1f6d1ae..cef81b0 100755
--- a/scripts/simpletrace.py
+++ b/scripts/simpletrace.py
@@ -9,11 +9,20 @@
#
# For help see docs/devel/tracing.rst
+import sys
import struct
import inspect
+import warnings
from tracetool import read_events, Event
from tracetool.backend.simple import is_string
+__all__ = ['Analyzer', 'Analyzer2', 'process', 'run']
+
+# This is the binary format that the QEMU "simple" trace backend
+# emits. There is no specification documentation because the format is
+# not guaranteed to be stable. Trace files must be parsed with the
+# same trace-events-all file and the same simpletrace.py file that
+# QEMU was built with.
header_event_id = 0xffffffffffffffff
header_magic = 0xf2b177cb0aa429b4
dropped_event_id = 0xfffffffffffffffe
@@ -23,48 +32,19 @@
log_header_fmt = '=QQQ'
rec_header_fmt = '=QQII'
+rec_header_fmt_len = struct.calcsize(rec_header_fmt)
+
+class SimpleException(Exception):
+ pass
def read_header(fobj, hfmt):
'''Read a trace record header'''
hlen = struct.calcsize(hfmt)
hdr = fobj.read(hlen)
if len(hdr) != hlen:
- return None
+ raise SimpleException('Error reading header. Wrong filetype provided?')
return struct.unpack(hfmt, hdr)
-def get_record(edict, idtoname, rechdr, fobj):
- """Deserialize a trace record from a file into a tuple
- (name, timestamp, pid, arg1, ..., arg6)."""
- if rechdr is None:
- return None
- if rechdr[0] != dropped_event_id:
- event_id = rechdr[0]
- name = idtoname[event_id]
- rec = (name, rechdr[1], rechdr[3])
- try:
- event = edict[name]
- except KeyError as e:
- import sys
- sys.stderr.write('%s event is logged but is not declared ' \
- 'in the trace events file, try using ' \
- 'trace-events-all instead.\n' % str(e))
- sys.exit(1)
-
- for type, name in event.args:
- if is_string(type):
- l = fobj.read(4)
- (len,) = struct.unpack('=L', l)
- s = fobj.read(len)
- rec = rec + (s,)
- else:
- (value,) = struct.unpack('=Q', fobj.read(8))
- rec = rec + (value,)
- else:
- rec = ("dropped", rechdr[1], rechdr[3])
- (value,) = struct.unpack('=Q', fobj.read(8))
- rec = rec + (value,)
- return rec
-
def get_mapping(fobj):
(event_id, ) = struct.unpack('=Q', fobj.read(8))
(len, ) = struct.unpack('=L', fobj.read(4))
@@ -72,41 +52,47 @@
return (event_id, name)
-def read_record(edict, idtoname, fobj):
- """Deserialize a trace record from a file into a tuple (event_num, timestamp, pid, arg1, ..., arg6)."""
- rechdr = read_header(fobj, rec_header_fmt)
- return get_record(edict, idtoname, rechdr, fobj)
+def read_record(fobj):
+ """Deserialize a trace record from a file into a tuple (event_num, timestamp, pid, args)."""
+ event_id, timestamp_ns, record_length, record_pid = read_header(fobj, rec_header_fmt)
+ args_payload = fobj.read(record_length - rec_header_fmt_len)
+ return (event_id, timestamp_ns, record_pid, args_payload)
def read_trace_header(fobj):
"""Read and verify trace file header"""
- header = read_header(fobj, log_header_fmt)
- if header is None:
- raise ValueError('Not a valid trace file!')
- if header[0] != header_event_id:
- raise ValueError('Not a valid trace file, header id %d != %d' %
- (header[0], header_event_id))
- if header[1] != header_magic:
- raise ValueError('Not a valid trace file, header magic %d != %d' %
- (header[1], header_magic))
+ _header_event_id, _header_magic, log_version = read_header(fobj, log_header_fmt)
+ if _header_event_id != header_event_id:
+ raise ValueError(f'Not a valid trace file, header id {_header_event_id} != {header_event_id}')
+ if _header_magic != header_magic:
+ raise ValueError(f'Not a valid trace file, header magic {_header_magic} != {header_magic}')
- log_version = header[2]
if log_version not in [0, 2, 3, 4]:
- raise ValueError('Unknown version of tracelog format!')
+ raise ValueError(f'Unknown version {log_version} of tracelog format!')
if log_version != 4:
- raise ValueError('Log format %d not supported with this QEMU release!'
- % log_version)
+ raise ValueError(f'Log format {log_version} not supported with this QEMU release!')
-def read_trace_records(edict, idtoname, fobj):
- """Deserialize trace records from a file, yielding record tuples (event_num, timestamp, pid, arg1, ..., arg6).
-
- Note that `idtoname` is modified if the file contains mapping records.
+def read_trace_records(events, fobj, read_header):
+ """Deserialize trace records from a file, yielding record tuples (event, event_num, timestamp, pid, arg1, ..., arg6).
Args:
- edict (str -> Event): events dict, indexed by name
- idtoname (int -> str): event names dict, indexed by event ID
+ event_mapping (str -> Event): events dict, indexed by name
fobj (file): input file
+ read_header (bool): whether headers were read from fobj
"""
+ frameinfo = inspect.getframeinfo(inspect.currentframe())
+ dropped_event = Event.build("Dropped_Event(uint64_t num_events_dropped)",
+ frameinfo.lineno + 1, frameinfo.filename)
+
+ event_mapping = {e.name: e for e in events}
+ event_mapping["dropped"] = dropped_event
+ event_id_to_name = {dropped_event_id: "dropped"}
+
+ # If there is no header assume event ID mapping matches events list
+ if not read_header:
+ for event_id, event in enumerate(events):
+ event_id_to_name[event_id] = event.name
+
while True:
t = fobj.read(8)
if len(t) == 0:
@@ -114,19 +100,45 @@
(rectype, ) = struct.unpack('=Q', t)
if rectype == record_type_mapping:
- event_id, name = get_mapping(fobj)
- idtoname[event_id] = name
+ event_id, event_name = get_mapping(fobj)
+ event_id_to_name[event_id] = event_name
else:
- rec = read_record(edict, idtoname, fobj)
+ event_id, timestamp_ns, pid, args_payload = read_record(fobj)
+ event_name = event_id_to_name[event_id]
- yield rec
+ try:
+ event = event_mapping[event_name]
+ except KeyError as e:
+ raise SimpleException(
+ f'{e} event is logged but is not declared in the trace events'
+ 'file, try using trace-events-all instead.'
+ )
-class Analyzer(object):
- """A trace file analyzer which processes trace records.
+ offset = 0
+ args = []
+ for type, _ in event.args:
+ if is_string(type):
+ (length,) = struct.unpack_from('=L', args_payload, offset=offset)
+ offset += 4
+ s = args_payload[offset:offset+length]
+ offset += length
+ args.append(s)
+ else:
+ (value,) = struct.unpack_from('=Q', args_payload, offset=offset)
+ offset += 8
+ args.append(value)
+
+ yield (event_mapping[event_name], event_name, timestamp_ns, pid) + tuple(args)
+
+class Analyzer:
+ """[Deprecated. Refer to Analyzer2 instead.]
+
+ A trace file analyzer which processes trace records.
An analyzer can be passed to run() or process(). The begin() method is
invoked, then each trace record is processed, and finally the end() method
- is invoked.
+ is invoked. When Analyzer is used as a context-manager (using the `with`
+ statement), begin() and end() are called automatically.
If a method matching a trace event name exists, it is invoked to process
that trace record. Otherwise the catchall() method is invoked.
@@ -160,44 +172,14 @@
"""Called if no specific method for processing a trace event has been found."""
pass
- def end(self):
- """Called at the end of the trace."""
- pass
-
-def process(events, log, analyzer, read_header=True):
- """Invoke an analyzer on each event in a log."""
- if isinstance(events, str):
- events = read_events(open(events, 'r'), events)
- if isinstance(log, str):
- log = open(log, 'rb')
-
- if read_header:
- read_trace_header(log)
-
- frameinfo = inspect.getframeinfo(inspect.currentframe())
- dropped_event = Event.build("Dropped_Event(uint64_t num_events_dropped)",
- frameinfo.lineno + 1, frameinfo.filename)
- edict = {"dropped": dropped_event}
- idtoname = {dropped_event_id: "dropped"}
-
- for event in events:
- edict[event.name] = event
-
- # If there is no header assume event ID mapping matches events list
- if not read_header:
- for event_id, event in enumerate(events):
- idtoname[event_id] = event.name
-
- def build_fn(analyzer, event):
- if isinstance(event, str):
- return analyzer.catchall
-
- fn = getattr(analyzer, event.name, None)
+ def _build_fn(self, event):
+ fn = getattr(self, event.name, None)
if fn is None:
- return analyzer.catchall
+ # Return early to avoid costly call to inspect.getfullargspec
+ return self.catchall
event_argcount = len(event.args)
- fn_argcount = len(inspect.getargspec(fn)[0]) - 1
+ fn_argcount = len(inspect.getfullargspec(fn)[0]) - 1
if fn_argcount == event_argcount + 1:
# Include timestamp as first argument
return lambda _, rec: fn(*(rec[1:2] + rec[3:3 + event_argcount]))
@@ -208,56 +190,170 @@
# Just arguments, no timestamp or pid
return lambda _, rec: fn(*rec[3:3 + event_argcount])
- analyzer.begin()
- fn_cache = {}
- for rec in read_trace_records(edict, idtoname, log):
- event_num = rec[0]
- event = edict[event_num]
- if event_num not in fn_cache:
- fn_cache[event_num] = build_fn(analyzer, event)
- fn_cache[event_num](event, rec)
- analyzer.end()
+ def _process_event(self, rec_args, *, event, event_id, timestamp_ns, pid, **kwargs):
+ warnings.warn(
+ "Use of deprecated Analyzer class. Refer to Analyzer2 instead.",
+ DeprecationWarning,
+ )
+
+ if not hasattr(self, '_fn_cache'):
+ # NOTE: Cannot depend on downstream subclasses to have
+ # super().__init__() because of legacy.
+ self._fn_cache = {}
+
+ rec = (event_id, timestamp_ns, pid, *rec_args)
+ if event_id not in self._fn_cache:
+ self._fn_cache[event_id] = self._build_fn(event)
+ self._fn_cache[event_id](event, rec)
+
+ def end(self):
+ """Called at the end of the trace."""
+ pass
+
+ def __enter__(self):
+ self.begin()
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ if exc_type is None:
+ self.end()
+ return False
+
+class Analyzer2(Analyzer):
+ """A trace file analyzer which processes trace records.
+
+ An analyzer can be passed to run() or process(). The begin() method is
+ invoked, then each trace record is processed, and finally the end() method
+ is invoked. When Analyzer is used as a context-manager (using the `with`
+ statement), begin() and end() are called automatically.
+
+ If a method matching a trace event name exists, it is invoked to process
+ that trace record. Otherwise the catchall() method is invoked.
+
+ The methods are called with a set of keyword-arguments. These can be ignored
+ using `**kwargs` or defined like any keyword-argument.
+
+ The following keyword-arguments are available, but make sure to have an
+ **kwargs to allow for unmatched arguments in the future:
+ event: Event object of current trace
+ event_id: The id of the event in the current trace file
+ timestamp_ns: The timestamp in nanoseconds of the trace
+ pid: The process id recorded for the given trace
+
+ Example:
+ The following method handles the runstate_set(int new_state) trace event::
+
+ def runstate_set(self, new_state, **kwargs):
+ ...
+
+ The method can also explicitly take a timestamp keyword-argument with the
+ trace event arguments::
+
+ def runstate_set(self, new_state, *, timestamp_ns, **kwargs):
+ ...
+
+ Timestamps have the uint64_t type and are in nanoseconds.
+
+ The pid can be included in addition to the timestamp and is useful when
+ dealing with traces from multiple processes:
+
+ def runstate_set(self, new_state, *, timestamp_ns, pid, **kwargs):
+ ...
+ """
+
+ def catchall(self, *rec_args, event, timestamp_ns, pid, event_id, **kwargs):
+ """Called if no specific method for processing a trace event has been found."""
+ pass
+
+ def _process_event(self, rec_args, *, event, **kwargs):
+ fn = getattr(self, event.name, self.catchall)
+ fn(*rec_args, event=event, **kwargs)
+
+def process(events, log, analyzer, read_header=True):
+ """Invoke an analyzer on each event in a log.
+ Args:
+ events (file-object or list or str): events list or file-like object or file path as str to read event data from
+ log (file-object or str): file-like object or file path as str to read log data from
+ analyzer (Analyzer): Instance of Analyzer to interpret the event data
+ read_header (bool, optional): Whether to read header data from the log data. Defaults to True.
+ """
+
+ if isinstance(events, str):
+ with open(events, 'r') as f:
+ events_list = read_events(f, events)
+ elif isinstance(events, list):
+ # Treat as a list of events already produced by tracetool.read_events
+ events_list = events
+ else:
+ # Treat as an already opened file-object
+ events_list = read_events(events, events.name)
+
+ if isinstance(log, str):
+ with open(log, 'rb') as log_fobj:
+ _process(events_list, log_fobj, analyzer, read_header)
+ else:
+ # Treat `log` as an already opened file-object. We will not close it,
+ # as we do not own it.
+ _process(events_list, log, analyzer, read_header)
+
+def _process(events, log_fobj, analyzer, read_header=True):
+ """Internal function for processing
+
+ Args:
+ events (list): list of events already produced by tracetool.read_events
+ log_fobj (file): file-object to read log data from
+ analyzer (Analyzer): the Analyzer to interpret the event data
+ read_header (bool, optional): Whether to read header data from the log data. Defaults to True.
+ """
+
+ if read_header:
+ read_trace_header(log_fobj)
+
+ with analyzer:
+ for event, event_id, timestamp_ns, record_pid, *rec_args in read_trace_records(events, log_fobj, read_header):
+ analyzer._process_event(
+ rec_args,
+ event=event,
+ event_id=event_id,
+ timestamp_ns=timestamp_ns,
+ pid=record_pid,
+ )
def run(analyzer):
"""Execute an analyzer on a trace file given on the command-line.
This function is useful as a driver for simple analysis scripts. More
advanced scripts will want to call process() instead."""
- import sys
- read_header = True
- if len(sys.argv) == 4 and sys.argv[1] == '--no-header':
- read_header = False
- del sys.argv[1]
- elif len(sys.argv) != 3:
- sys.stderr.write('usage: %s [--no-header] <trace-events> ' \
- '<trace-file>\n' % sys.argv[0])
- sys.exit(1)
+ try:
+ # NOTE: See built-in `argparse` module for a more robust cli interface
+ *no_header, trace_event_path, trace_file_path = sys.argv[1:]
+ assert no_header == [] or no_header == ['--no-header'], 'Invalid no-header argument'
+ except (AssertionError, ValueError):
+ raise SimpleException(f'usage: {sys.argv[0]} [--no-header] <trace-events> <trace-file>\n')
- events = read_events(open(sys.argv[1], 'r'), sys.argv[1])
- process(events, sys.argv[2], analyzer, read_header=read_header)
+ with open(trace_event_path, 'r') as events_fobj, open(trace_file_path, 'rb') as log_fobj:
+ process(events_fobj, log_fobj, analyzer, read_header=not no_header)
if __name__ == '__main__':
- class Formatter(Analyzer):
+ class Formatter2(Analyzer2):
def __init__(self):
- self.last_timestamp = None
+ self.last_timestamp_ns = None
- def catchall(self, event, rec):
- timestamp = rec[1]
- if self.last_timestamp is None:
- self.last_timestamp = timestamp
- delta_ns = timestamp - self.last_timestamp
- self.last_timestamp = timestamp
+ def catchall(self, *rec_args, event, timestamp_ns, pid, event_id):
+ if self.last_timestamp_ns is None:
+ self.last_timestamp_ns = timestamp_ns
+ delta_ns = timestamp_ns - self.last_timestamp_ns
+ self.last_timestamp_ns = timestamp_ns
- fields = [event.name, '%0.3f' % (delta_ns / 1000.0),
- 'pid=%d' % rec[2]]
- i = 3
- for type, name in event.args:
- if is_string(type):
- fields.append('%s=%s' % (name, rec[i]))
- else:
- fields.append('%s=0x%x' % (name, rec[i]))
- i += 1
- print(' '.join(fields))
+ fields = [
+ f'{name}={r}' if is_string(type) else f'{name}=0x{r:x}'
+ for r, (type, name) in zip(rec_args, event.args)
+ ]
+ print(f'{event.name} {delta_ns / 1000:0.3f} {pid=} ' + ' '.join(fields))
- run(Formatter())
+ try:
+ run(Formatter2())
+ except SimpleException as e:
+ sys.stderr.write(str(e) + "\n")
+ sys.exit(1)
diff --git a/semihosting/arm-compat-semi.c b/semihosting/arm-compat-semi.c
index 564fe17..29c5670 100644
--- a/semihosting/arm-compat-semi.c
+++ b/semihosting/arm-compat-semi.c
@@ -251,7 +251,7 @@
static void common_semi_rw_cb(CPUState *cs, uint64_t ret, int err)
{
/* Recover the original length from the third argument. */
- CPUArchState *env G_GNUC_UNUSED = cs->env_ptr;
+ CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
target_ulong args = common_semi_arg(cs, 1);
target_ulong arg2;
GET_ARG(2);
@@ -322,7 +322,7 @@
common_semi_readc_cb(CPUState *cs, uint64_t ret, int err)
{
if (!err) {
- CPUArchState *env G_GNUC_UNUSED = cs->env_ptr;
+ CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
uint8_t ch;
if (get_user_u8(ch, common_semi_stack_bottom(cs) - 1)) {
@@ -361,7 +361,7 @@
*/
void do_common_semihosting(CPUState *cs)
{
- CPUArchState *env = cs->env_ptr;
+ CPUArchState *env = cpu_env(cs);
target_ulong args;
target_ulong arg0, arg1, arg2, arg3;
target_ulong ul_ret;
diff --git a/semihosting/syscalls.c b/semihosting/syscalls.c
index d27574a..1ab4809 100644
--- a/semihosting/syscalls.c
+++ b/semihosting/syscalls.c
@@ -24,7 +24,7 @@
*/
static int validate_strlen(CPUState *cs, target_ulong str, target_ulong tlen)
{
- CPUArchState *env G_GNUC_UNUSED = cs->env_ptr;
+ CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
char c;
if (tlen == 0) {
@@ -54,7 +54,7 @@
target_ulong tstr, target_ulong tlen)
{
int ret = validate_strlen(cs, tstr, tlen);
- CPUArchState *env G_GNUC_UNUSED = cs->env_ptr;
+ CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
char *str = NULL;
if (ret > 0) {
@@ -74,7 +74,7 @@
static int copy_stat_to_user(CPUState *cs, target_ulong addr,
const struct stat *s)
{
- CPUArchState *env G_GNUC_UNUSED = cs->env_ptr;
+ CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
struct gdb_stat *p;
if (s->st_dev != (uint32_t)s->st_dev ||
@@ -258,7 +258,7 @@
target_ulong fname, target_ulong fname_len,
int gdb_flags, int mode)
{
- CPUArchState *env G_GNUC_UNUSED = cs->env_ptr;
+ CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
char *p;
int ret, host_flags = O_BINARY;
@@ -316,7 +316,7 @@
static void host_read(CPUState *cs, gdb_syscall_complete_cb complete,
GuestFD *gf, target_ulong buf, target_ulong len)
{
- CPUArchState *env G_GNUC_UNUSED = cs->env_ptr;
+ CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
void *ptr = lock_user(VERIFY_WRITE, buf, len, 0);
ssize_t ret;
@@ -337,7 +337,7 @@
static void host_write(CPUState *cs, gdb_syscall_complete_cb complete,
GuestFD *gf, target_ulong buf, target_ulong len)
{
- CPUArchState *env G_GNUC_UNUSED = cs->env_ptr;
+ CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
void *ptr = lock_user(VERIFY_READ, buf, len, 1);
ssize_t ret;
@@ -411,7 +411,7 @@
target_ulong fname, target_ulong fname_len,
target_ulong addr)
{
- CPUArchState *env G_GNUC_UNUSED = cs->env_ptr;
+ CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
struct stat buf;
char *name;
int ret, err;
@@ -440,7 +440,7 @@
static void host_remove(CPUState *cs, gdb_syscall_complete_cb complete,
target_ulong fname, target_ulong fname_len)
{
- CPUArchState *env G_GNUC_UNUSED = cs->env_ptr;
+ CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
char *p;
int ret;
@@ -459,7 +459,7 @@
target_ulong oname, target_ulong oname_len,
target_ulong nname, target_ulong nname_len)
{
- CPUArchState *env G_GNUC_UNUSED = cs->env_ptr;
+ CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
char *ostr, *nstr;
int ret;
@@ -484,7 +484,7 @@
static void host_system(CPUState *cs, gdb_syscall_complete_cb complete,
target_ulong cmd, target_ulong cmd_len)
{
- CPUArchState *env G_GNUC_UNUSED = cs->env_ptr;
+ CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
char *p;
int ret;
@@ -502,7 +502,7 @@
static void host_gettimeofday(CPUState *cs, gdb_syscall_complete_cb complete,
target_ulong tv_addr, target_ulong tz_addr)
{
- CPUArchState *env G_GNUC_UNUSED = cs->env_ptr;
+ CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
struct gdb_timeval *p;
int64_t rt;
@@ -547,7 +547,7 @@
static void staticfile_read(CPUState *cs, gdb_syscall_complete_cb complete,
GuestFD *gf, target_ulong buf, target_ulong len)
{
- CPUArchState *env G_GNUC_UNUSED = cs->env_ptr;
+ CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
target_ulong rest = gf->staticfile.len - gf->staticfile.off;
void *ptr;
@@ -605,7 +605,7 @@
static void console_read(CPUState *cs, gdb_syscall_complete_cb complete,
GuestFD *gf, target_ulong buf, target_ulong len)
{
- CPUArchState *env G_GNUC_UNUSED = cs->env_ptr;
+ CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
char *ptr;
int ret;
@@ -622,7 +622,7 @@
static void console_write(CPUState *cs, gdb_syscall_complete_cb complete,
GuestFD *gf, target_ulong buf, target_ulong len)
{
- CPUArchState *env G_GNUC_UNUSED = cs->env_ptr;
+ CPUArchState *env G_GNUC_UNUSED = cpu_env(cs);
char *ptr = lock_user(VERIFY_READ, buf, len, 1);
int ret;
diff --git a/softmmu/device_tree.c b/softmmu/device_tree.c
index 30aa3ae..eb5166c 100644
--- a/softmmu/device_tree.c
+++ b/softmmu/device_tree.c
@@ -418,9 +418,9 @@
}
p = str = g_malloc0(total_len);
for (i = 0; i < len; i++) {
- int len = strlen(array[i]) + 1;
- pstrcpy(p, len, array[i]);
- p += len;
+ int offset = strlen(array[i]) + 1;
+ pstrcpy(p, offset, array[i]);
+ p += offset;
}
ret = qemu_fdt_setprop(fdt, node_path, prop, str, total_len);
diff --git a/softmmu/memory.c b/softmmu/memory.c
index c0383a1..234bd7b 100644
--- a/softmmu/memory.c
+++ b/softmmu/memory.c
@@ -3245,7 +3245,6 @@
}
if (mr->alias) {
- MemoryRegionList *ml;
bool found = false;
/* check if the alias is already in the queue */
diff --git a/softmmu/meson.build b/softmmu/meson.build
index c18b7ad..3a64dd8 100644
--- a/softmmu/meson.build
+++ b/softmmu/meson.build
@@ -6,10 +6,6 @@
'watchpoint.c',
)])
-specific_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: [files(
- 'icount.c',
-)])
-
system_ss.add(files(
'balloon.c',
'bootdevice.c',
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
index 4f6ca65..309653c 100644
--- a/softmmu/physmem.c
+++ b/softmmu/physmem.c
@@ -913,16 +913,16 @@
while (page < end) {
unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
- unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long ofs = page % DIRTY_MEMORY_BLOCK_SIZE;
unsigned long num = MIN(end - page,
- DIRTY_MEMORY_BLOCK_SIZE - offset);
+ DIRTY_MEMORY_BLOCK_SIZE - ofs);
- assert(QEMU_IS_ALIGNED(offset, (1 << BITS_PER_LEVEL)));
+ assert(QEMU_IS_ALIGNED(ofs, (1 << BITS_PER_LEVEL)));
assert(QEMU_IS_ALIGNED(num, (1 << BITS_PER_LEVEL)));
- offset >>= BITS_PER_LEVEL;
+ ofs >>= BITS_PER_LEVEL;
bitmap_copy_and_clear_atomic(snap->dirty + dest,
- blocks->blocks[idx] + offset,
+ blocks->blocks[idx] + ofs,
num);
page += num;
dest += num >> BITS_PER_LEVEL;
diff --git a/softmmu/qemu-seccomp.c b/softmmu/qemu-seccomp.c
index d66a2a1..4d7439e 100644
--- a/softmmu/qemu-seccomp.c
+++ b/softmmu/qemu-seccomp.c
@@ -283,9 +283,9 @@
if (action == SCMP_ACT_TRAP) {
static int kill_process = -1;
if (kill_process == -1) {
- uint32_t action = SECCOMP_RET_KILL_PROCESS;
+ uint32_t testaction = SECCOMP_RET_KILL_PROCESS;
- if (qemu_seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &action) == 0) {
+ if (qemu_seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &testaction) == 0) {
kill_process = 1;
} else {
kill_process = 0;
diff --git a/softmmu/vl.c b/softmmu/vl.c
index db04f98..98e071e 100644
--- a/softmmu/vl.c
+++ b/softmmu/vl.c
@@ -1962,9 +1962,7 @@
* setting machine properties, so they can be referred to.
*/
configure_blockdev(&bdo_queue, machine_class, snapshot);
- if (!audio_init_audiodevs()) {
- exit(1);
- }
+ audio_init_audiodevs();
}
@@ -2926,10 +2924,6 @@
}
break;
#endif
- case QEMU_OPTION_audio_help:
- audio_legacy_help();
- exit (0);
- break;
case QEMU_OPTION_audiodev:
audio_parse_option(optarg);
break;
@@ -3214,7 +3208,6 @@
}
break;
case QEMU_OPTION_watchdog_action: {
- QemuOpts *opts;
opts = qemu_opts_create(qemu_find_opts("action"), NULL, 0, &error_abort);
qemu_opt_set(opts, "watchdog", optarg, &error_abort);
break;
@@ -3525,16 +3518,16 @@
break;
case QEMU_OPTION_compat:
{
- CompatPolicy *opts;
+ CompatPolicy *opts_policy;
Visitor *v;
v = qobject_input_visitor_new_str(optarg, NULL,
&error_fatal);
- visit_type_CompatPolicy(v, NULL, &opts, &error_fatal);
- QAPI_CLONE_MEMBERS(CompatPolicy, &compat_policy, opts);
+ visit_type_CompatPolicy(v, NULL, &opts_policy, &error_fatal);
+ QAPI_CLONE_MEMBERS(CompatPolicy, &compat_policy, opts_policy);
- qapi_free_CompatPolicy(opts);
+ qapi_free_CompatPolicy(opts_policy);
visit_free(v);
break;
}
diff --git a/softmmu/watchpoint.c b/softmmu/watchpoint.c
index 5350163..45d1f12 100644
--- a/softmmu/watchpoint.c
+++ b/softmmu/watchpoint.c
@@ -177,7 +177,7 @@
* Force recompile to succeed, because icount may
* be read only at the end of the block.
*/
- if (!cpu->can_do_io) {
+ if (!cpu->neg.can_do_io) {
/* Force execution of one insn next time. */
cpu->cflags_next_tb = 1 | CF_LAST_IO | CF_NOIRQ
| curr_cflags(cpu);
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
index 270ae78..51b7d8d 100644
--- a/target/alpha/cpu.c
+++ b/target/alpha/cpu.c
@@ -209,8 +209,6 @@
AlphaCPU *cpu = ALPHA_CPU(obj);
CPUAlphaState *env = &cpu->env;
- cpu_set_cpustate_pointers(cpu);
-
env->lock_addr = -1;
#if defined(CONFIG_USER_ONLY)
env->flags = ENV_FLAG_PS_USER | ENV_FLAG_FEN;
@@ -286,6 +284,7 @@
.name = TYPE_ALPHA_CPU,
.parent = TYPE_CPU,
.instance_size = sizeof(AlphaCPU),
+ .instance_align = __alignof(AlphaCPU),
.instance_init = alpha_cpu_initfn,
.abstract = true,
.class_size = sizeof(AlphaCPUClass),
diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h
index 1330666..e2a467e 100644
--- a/target/alpha/cpu.h
+++ b/target/alpha/cpu.h
@@ -263,7 +263,6 @@
CPUState parent_obj;
/*< public >*/
- CPUNegativeOffsetState neg;
CPUAlphaState env;
/* This alarm doesn't exist in real hardware; we wish it did. */
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
index 9be912c..3233308 100644
--- a/target/alpha/translate.c
+++ b/target/alpha/translate.c
@@ -131,13 +131,13 @@
int i;
for (i = 0; i < 31; i++) {
- cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env,
+ cpu_std_ir[i] = tcg_global_mem_new_i64(tcg_env,
offsetof(CPUAlphaState, ir[i]),
greg_names[i]);
}
for (i = 0; i < 31; i++) {
- cpu_fir[i] = tcg_global_mem_new_i64(cpu_env,
+ cpu_fir[i] = tcg_global_mem_new_i64(tcg_env,
offsetof(CPUAlphaState, fir[i]),
freg_names[i]);
}
@@ -146,7 +146,7 @@
memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir));
for (i = 0; i < 8; i++) {
int r = (i == 7 ? 25 : i + 8);
- cpu_pal_ir[r] = tcg_global_mem_new_i64(cpu_env,
+ cpu_pal_ir[r] = tcg_global_mem_new_i64(tcg_env,
offsetof(CPUAlphaState,
shadow[i]),
shadow_names[i]);
@@ -155,7 +155,7 @@
for (i = 0; i < ARRAY_SIZE(vars); ++i) {
const GlobalVar *v = &vars[i];
- *v->var = tcg_global_mem_new_i64(cpu_env, v->ofs, v->name);
+ *v->var = tcg_global_mem_new_i64(tcg_env, v->ofs, v->name);
}
}
@@ -244,12 +244,12 @@
static void ld_flag_byte(TCGv val, unsigned shift)
{
- tcg_gen_ld8u_i64(val, cpu_env, get_flag_ofs(shift));
+ tcg_gen_ld8u_i64(val, tcg_env, get_flag_ofs(shift));
}
static void st_flag_byte(TCGv val, unsigned shift)
{
- tcg_gen_st8_i64(val, cpu_env, get_flag_ofs(shift));
+ tcg_gen_st8_i64(val, tcg_env, get_flag_ofs(shift));
}
static void gen_excp_1(int exception, int error_code)
@@ -258,7 +258,7 @@
tmp1 = tcg_constant_i32(exception);
tmp2 = tcg_constant_i32(error_code);
- gen_helper_excp(cpu_env, tmp1, tmp2);
+ gen_helper_excp(tcg_env, tmp1, tmp2);
}
static DisasJumpType gen_excp(DisasContext *ctx, int exception, int error_code)
@@ -582,7 +582,7 @@
tcg_gen_movi_i32(tmp, float_round_down);
break;
case QUAL_RM_D:
- tcg_gen_ld8u_i32(tmp, cpu_env,
+ tcg_gen_ld8u_i32(tmp, tcg_env,
offsetof(CPUAlphaState, fpcr_dyn_round));
break;
}
@@ -591,7 +591,7 @@
/* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
sets the one field. */
- tcg_gen_st8_i32(tmp, cpu_env,
+ tcg_gen_st8_i32(tmp, tcg_env,
offsetof(CPUAlphaState, fp_status.float_rounding_mode));
#else
gen_helper_setroundmode(tmp);
@@ -611,7 +611,7 @@
tmp = tcg_temp_new_i32();
if (fn11) {
/* Underflow is enabled, use the FPCR setting. */
- tcg_gen_ld8u_i32(tmp, cpu_env,
+ tcg_gen_ld8u_i32(tmp, tcg_env,
offsetof(CPUAlphaState, fpcr_flush_to_zero));
} else {
/* Underflow is disabled, force flush-to-zero. */
@@ -619,7 +619,7 @@
}
#if defined(CONFIG_SOFTFLOAT_INLINE)
- tcg_gen_st8_i32(tmp, cpu_env,
+ tcg_gen_st8_i32(tmp, tcg_env,
offsetof(CPUAlphaState, fp_status.flush_to_zero));
#else
gen_helper_setflushzero(tmp);
@@ -636,16 +636,16 @@
val = cpu_fir[reg];
if ((fn11 & QUAL_S) == 0) {
if (is_cmp) {
- gen_helper_ieee_input_cmp(cpu_env, val);
+ gen_helper_ieee_input_cmp(tcg_env, val);
} else {
- gen_helper_ieee_input(cpu_env, val);
+ gen_helper_ieee_input(tcg_env, val);
}
} else {
#ifndef CONFIG_USER_ONLY
/* In system mode, raise exceptions for denormals like real
hardware. In user mode, proceed as if the OS completion
handler is handling the denormal as per spec. */
- gen_helper_ieee_input_s(cpu_env, val);
+ gen_helper_ieee_input_s(tcg_env, val);
#endif
}
}
@@ -678,9 +678,9 @@
or if we were to do something clever with imprecise exceptions. */
reg = tcg_constant_i32(rc + 32);
if (fn11 & QUAL_S) {
- gen_helper_fp_exc_raise_s(cpu_env, ign, reg);
+ gen_helper_fp_exc_raise_s(tcg_env, ign, reg);
} else {
- gen_helper_fp_exc_raise(cpu_env, ign, reg);
+ gen_helper_fp_exc_raise(tcg_env, ign, reg);
}
}
@@ -705,7 +705,7 @@
gen_qual_flushzero(ctx, fn11);
vb = gen_ieee_input(ctx, rb, fn11, 0);
- helper(dest_fpr(ctx, rc), cpu_env, vb);
+ helper(dest_fpr(ctx, rc), tcg_env, vb);
gen_fp_exc_raise(rc, fn11);
}
@@ -732,10 +732,10 @@
/* Almost all integer conversions use cropped rounding;
special case that. */
if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) {
- gen_helper_cvttq_c(vc, cpu_env, vb);
+ gen_helper_cvttq_c(vc, tcg_env, vb);
} else {
gen_qual_roundmode(ctx, fn11);
- gen_helper_cvttq(vc, cpu_env, vb);
+ gen_helper_cvttq(vc, tcg_env, vb);
}
gen_fp_exc_raise(rc, fn11);
}
@@ -754,10 +754,10 @@
is inexact. Thus we only need to worry about exceptions when
inexact handling is requested. */
if (fn11 & QUAL_I) {
- helper(vc, cpu_env, vb);
+ helper(vc, tcg_env, vb);
gen_fp_exc_raise(rc, fn11);
} else {
- helper(vc, cpu_env, vb);
+ helper(vc, tcg_env, vb);
}
}
@@ -797,7 +797,7 @@
va = gen_ieee_input(ctx, ra, fn11, 0);
vb = gen_ieee_input(ctx, rb, fn11, 0);
vc = dest_fpr(ctx, rc);
- helper(vc, cpu_env, va, vb);
+ helper(vc, tcg_env, va, vb);
gen_fp_exc_raise(rc, fn11);
}
@@ -826,7 +826,7 @@
va = gen_ieee_input(ctx, ra, fn11, 1);
vb = gen_ieee_input(ctx, rb, fn11, 1);
vc = dest_fpr(ctx, rc);
- helper(vc, cpu_env, va, vb);
+ helper(vc, tcg_env, va, vb);
gen_fp_exc_raise(rc, fn11);
}
@@ -1059,12 +1059,12 @@
break;
case 0x9E:
/* RDUNIQUE */
- tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
+ tcg_gen_ld_i64(ctx->ir[IR_V0], tcg_env,
offsetof(CPUAlphaState, unique));
break;
case 0x9F:
/* WRUNIQUE */
- tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
+ tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env,
offsetof(CPUAlphaState, unique));
break;
default:
@@ -1088,17 +1088,17 @@
break;
case 0x2D:
/* WRVPTPTR */
- tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
+ tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env,
offsetof(CPUAlphaState, vptptr));
break;
case 0x31:
/* WRVAL */
- tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
+ tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env,
offsetof(CPUAlphaState, sysval));
break;
case 0x32:
/* RDVAL */
- tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
+ tcg_gen_ld_i64(ctx->ir[IR_V0], tcg_env,
offsetof(CPUAlphaState, sysval));
break;
@@ -1126,23 +1126,23 @@
case 0x38:
/* WRUSP */
- tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
+ tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env,
offsetof(CPUAlphaState, usp));
break;
case 0x3A:
/* RDUSP */
- tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
+ tcg_gen_ld_i64(ctx->ir[IR_V0], tcg_env,
offsetof(CPUAlphaState, usp));
break;
case 0x3C:
/* WHAMI */
- tcg_gen_ld32s_i64(ctx->ir[IR_V0], cpu_env,
+ tcg_gen_ld32s_i64(ctx->ir[IR_V0], tcg_env,
-offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
break;
case 0x3E:
/* WTINT */
- tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
+ tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
-offsetof(AlphaCPU, env) +
offsetof(CPUState, halted));
tcg_gen_movi_i64(ctx->ir[IR_V0], 0);
@@ -1174,7 +1174,7 @@
}
tcg_gen_movi_i64(tmp, exc_addr);
- tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
+ tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUAlphaState, exc_addr));
entry += (palcode & 0x80
? 0x2000 + (palcode - 0x80) * 64
@@ -1254,9 +1254,9 @@
if (data == 0) {
tcg_gen_movi_i64(va, 0);
} else if (data & PR_LONG) {
- tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
+ tcg_gen_ld32s_i64(va, tcg_env, data & ~PR_LONG);
} else {
- tcg_gen_ld_i64(va, cpu_env, data);
+ tcg_gen_ld_i64(va, tcg_env, data);
}
break;
}
@@ -1272,17 +1272,17 @@
switch (regno) {
case 255:
/* TBIA */
- gen_helper_tbia(cpu_env);
+ gen_helper_tbia(tcg_env);
break;
case 254:
/* TBIS */
- gen_helper_tbis(cpu_env, vb);
+ gen_helper_tbis(tcg_env, vb);
break;
case 253:
/* WAIT */
- tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
+ tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
-offsetof(AlphaCPU, env) + offsetof(CPUState, halted));
return gen_excp(ctx, EXCP_HALTED, 0);
@@ -1296,16 +1296,16 @@
if (translator_io_start(&ctx->base)) {
ret = DISAS_PC_STALE;
}
- gen_helper_set_alarm(cpu_env, vb);
+ gen_helper_set_alarm(tcg_env, vb);
break;
case 7:
/* PALBR */
- tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
+ tcg_gen_st_i64(vb, tcg_env, offsetof(CPUAlphaState, palbr));
/* Changing the PAL base register implies un-chaining all of the TBs
that ended with a CALL_PAL. Since the base register usually only
changes during boot, flushing everything works well. */
- gen_helper_tb_flush(cpu_env);
+ gen_helper_tb_flush(tcg_env);
return DISAS_PC_STALE;
case 32 ... 39:
@@ -1327,9 +1327,9 @@
data = cpu_pr_data(regno);
if (data != 0) {
if (data & PR_LONG) {
- tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
+ tcg_gen_st32_i64(vb, tcg_env, data & ~PR_LONG);
} else {
- tcg_gen_st_i64(vb, cpu_env, data);
+ tcg_gen_st_i64(vb, tcg_env, data);
}
}
break;
@@ -1594,7 +1594,7 @@
tcg_gen_ext32s_i64(vc, vb);
tcg_gen_add_i64(tmp, tmp, vc);
tcg_gen_ext32s_i64(vc, tmp);
- gen_helper_check_overflow(cpu_env, vc, tmp);
+ gen_helper_check_overflow(tcg_env, vc, tmp);
break;
case 0x49:
/* SUBL/V */
@@ -1603,7 +1603,7 @@
tcg_gen_ext32s_i64(vc, vb);
tcg_gen_sub_i64(tmp, tmp, vc);
tcg_gen_ext32s_i64(vc, tmp);
- gen_helper_check_overflow(cpu_env, vc, tmp);
+ gen_helper_check_overflow(tcg_env, vc, tmp);
break;
case 0x4D:
/* CMPLT */
@@ -1620,7 +1620,7 @@
tcg_gen_and_i64(tmp, tmp, tmp2);
tcg_gen_shri_i64(tmp, tmp, 63);
tcg_gen_movi_i64(tmp2, 0);
- gen_helper_check_overflow(cpu_env, tmp, tmp2);
+ gen_helper_check_overflow(tcg_env, tmp, tmp2);
break;
case 0x69:
/* SUBQ/V */
@@ -1633,7 +1633,7 @@
tcg_gen_and_i64(tmp, tmp, tmp2);
tcg_gen_shri_i64(tmp, tmp, 63);
tcg_gen_movi_i64(tmp2, 0);
- gen_helper_check_overflow(cpu_env, tmp, tmp2);
+ gen_helper_check_overflow(tcg_env, tmp, tmp2);
break;
case 0x6D:
/* CMPLE */
@@ -1924,7 +1924,7 @@
tcg_gen_ext32s_i64(vc, vb);
tcg_gen_mul_i64(tmp, tmp, vc);
tcg_gen_ext32s_i64(vc, tmp);
- gen_helper_check_overflow(cpu_env, vc, tmp);
+ gen_helper_check_overflow(tcg_env, vc, tmp);
break;
case 0x60:
/* MULQ/V */
@@ -1932,7 +1932,7 @@
tmp2 = tcg_temp_new();
tcg_gen_muls2_i64(vc, tmp, va, vb);
tcg_gen_sari_i64(tmp2, vc, 63);
- gen_helper_check_overflow(cpu_env, tmp, tmp2);
+ gen_helper_check_overflow(tcg_env, tmp, tmp2);
break;
default:
goto invalid_opc;
@@ -1957,7 +1957,7 @@
REQUIRE_REG_31(ra);
REQUIRE_FEN;
vb = load_fpr(ctx, rb);
- gen_helper_sqrtf(vc, cpu_env, vb);
+ gen_helper_sqrtf(vc, tcg_env, vb);
break;
case 0x0B:
/* SQRTS */
@@ -1986,7 +1986,7 @@
REQUIRE_REG_31(ra);
REQUIRE_FEN;
vb = load_fpr(ctx, rb);
- gen_helper_sqrtg(vc, cpu_env, vb);
+ gen_helper_sqrtg(vc, tcg_env, vb);
break;
case 0x02B:
/* SQRTT */
@@ -2009,22 +2009,22 @@
case 0x00:
/* ADDF */
REQUIRE_FEN;
- gen_helper_addf(vc, cpu_env, va, vb);
+ gen_helper_addf(vc, tcg_env, va, vb);
break;
case 0x01:
/* SUBF */
REQUIRE_FEN;
- gen_helper_subf(vc, cpu_env, va, vb);
+ gen_helper_subf(vc, tcg_env, va, vb);
break;
case 0x02:
/* MULF */
REQUIRE_FEN;
- gen_helper_mulf(vc, cpu_env, va, vb);
+ gen_helper_mulf(vc, tcg_env, va, vb);
break;
case 0x03:
/* DIVF */
REQUIRE_FEN;
- gen_helper_divf(vc, cpu_env, va, vb);
+ gen_helper_divf(vc, tcg_env, va, vb);
break;
case 0x1E:
/* CVTDG -- TODO */
@@ -2033,43 +2033,43 @@
case 0x20:
/* ADDG */
REQUIRE_FEN;
- gen_helper_addg(vc, cpu_env, va, vb);
+ gen_helper_addg(vc, tcg_env, va, vb);
break;
case 0x21:
/* SUBG */
REQUIRE_FEN;
- gen_helper_subg(vc, cpu_env, va, vb);
+ gen_helper_subg(vc, tcg_env, va, vb);
break;
case 0x22:
/* MULG */
REQUIRE_FEN;
- gen_helper_mulg(vc, cpu_env, va, vb);
+ gen_helper_mulg(vc, tcg_env, va, vb);
break;
case 0x23:
/* DIVG */
REQUIRE_FEN;
- gen_helper_divg(vc, cpu_env, va, vb);
+ gen_helper_divg(vc, tcg_env, va, vb);
break;
case 0x25:
/* CMPGEQ */
REQUIRE_FEN;
- gen_helper_cmpgeq(vc, cpu_env, va, vb);
+ gen_helper_cmpgeq(vc, tcg_env, va, vb);
break;
case 0x26:
/* CMPGLT */
REQUIRE_FEN;
- gen_helper_cmpglt(vc, cpu_env, va, vb);
+ gen_helper_cmpglt(vc, tcg_env, va, vb);
break;
case 0x27:
/* CMPGLE */
REQUIRE_FEN;
- gen_helper_cmpgle(vc, cpu_env, va, vb);
+ gen_helper_cmpgle(vc, tcg_env, va, vb);
break;
case 0x2C:
/* CVTGF */
REQUIRE_REG_31(ra);
REQUIRE_FEN;
- gen_helper_cvtgf(vc, cpu_env, vb);
+ gen_helper_cvtgf(vc, tcg_env, vb);
break;
case 0x2D:
/* CVTGD -- TODO */
@@ -2079,19 +2079,19 @@
/* CVTGQ */
REQUIRE_REG_31(ra);
REQUIRE_FEN;
- gen_helper_cvtgq(vc, cpu_env, vb);
+ gen_helper_cvtgq(vc, tcg_env, vb);
break;
case 0x3C:
/* CVTQF */
REQUIRE_REG_31(ra);
REQUIRE_FEN;
- gen_helper_cvtqf(vc, cpu_env, vb);
+ gen_helper_cvtqf(vc, tcg_env, vb);
break;
case 0x3E:
/* CVTQG */
REQUIRE_REG_31(ra);
REQUIRE_FEN;
- gen_helper_cvtqg(vc, cpu_env, vb);
+ gen_helper_cvtqg(vc, tcg_env, vb);
break;
default:
goto invalid_opc;
@@ -2242,7 +2242,7 @@
/* MT_FPCR */
REQUIRE_FEN;
va = load_fpr(ctx, ra);
- gen_helper_store_fpcr(cpu_env, va);
+ gen_helper_store_fpcr(tcg_env, va);
if (ctx->tb_rm == QUAL_RM_D) {
/* Re-do the copy of the rounding mode to fp_status
the next time we use dynamic rounding. */
@@ -2253,7 +2253,7 @@
/* MF_FPCR */
REQUIRE_FEN;
va = dest_fpr(ctx, ra);
- gen_helper_load_fpcr(va, cpu_env);
+ gen_helper_load_fpcr(va, tcg_env);
break;
case 0x02A:
/* FCMOVEQ */
@@ -2292,7 +2292,7 @@
REQUIRE_FEN;
vc = dest_fpr(ctx, rc);
vb = load_fpr(ctx, rb);
- gen_helper_cvtql(vc, cpu_env, vb);
+ gen_helper_cvtql(vc, tcg_env, vb);
gen_fp_exc_raise(rc, fn11);
break;
default:
@@ -2332,7 +2332,7 @@
if (translator_io_start(&ctx->base)) {
ret = DISAS_PC_STALE;
}
- gen_helper_load_pcc(va, cpu_env);
+ gen_helper_load_pcc(va, tcg_env);
break;
case 0xE000:
/* RC */
@@ -2628,7 +2628,7 @@
address from EXC_ADDR. This turns out to be useful for our
emulation PALcode, so continue to accept it. */
vb = dest_sink(ctx);
- tcg_gen_ld_i64(vb, cpu_env, offsetof(CPUAlphaState, exc_addr));
+ tcg_gen_ld_i64(vb, tcg_env, offsetof(CPUAlphaState, exc_addr));
} else {
vb = load_gpr(ctx, rb);
}
@@ -2871,7 +2871,7 @@
static void alpha_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
- CPUAlphaState *env = cpu->env_ptr;
+ CPUAlphaState *env = cpu_env(cpu);
int64_t bound;
ctx->tbflags = ctx->base.tb->flags;
@@ -2917,7 +2917,7 @@
static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
- CPUAlphaState *env = cpu->env_ptr;
+ CPUAlphaState *env = cpu_env(cpu);
uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
ctx->base.pc_next += 4;
diff --git a/target/arm/common-semi-target.h b/target/arm/common-semi-target.h
index 629d75c..19438ed 100644
--- a/target/arm/common-semi-target.h
+++ b/target/arm/common-semi-target.h
@@ -38,7 +38,7 @@
static inline bool common_semi_sys_exit_extended(CPUState *cs, int nr)
{
- return (nr == TARGET_SYS_EXIT_EXTENDED || is_a64(cs->env_ptr));
+ return nr == TARGET_SYS_EXIT_EXTENDED || is_a64(cpu_env(cs));
}
static inline bool is_64bit_semihosting(CPUArchState *env)
diff --git a/target/arm/cpu-param.h b/target/arm/cpu-param.h
index b3b35f7..f9b462a 100644
--- a/target/arm/cpu-param.h
+++ b/target/arm/cpu-param.h
@@ -31,18 +31,6 @@
# define TARGET_PAGE_BITS_VARY
# define TARGET_PAGE_BITS_MIN 10
-/*
- * Cache the attrs and shareability fields from the page table entry.
- *
- * For ARMMMUIdx_Stage2*, pte_attrs is the S2 descriptor bits [5:2].
- * Otherwise, pte_attrs is the same as the MAIR_EL1 8-bit format.
- * For shareability and guarded, as in the SH and GP fields respectively
- * of the VMSAv8-64 PTEs.
- */
-# define TARGET_PAGE_ENTRY_EXTRA \
- uint8_t pte_attrs; \
- uint8_t shareability; \
- bool guarded;
#endif
#endif
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index b9e09a7..831295d 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -80,7 +80,7 @@
{
/* The program counter is always up to date with CF_PCREL. */
if (!(tb_cflags(tb) & CF_PCREL)) {
- CPUARMState *env = cs->env_ptr;
+ CPUARMState *env = cpu_env(cs);
/*
* It's OK to look at env for the current mode here, because it's
* never possible for an AArch64 TB to chain to an AArch32 TB.
@@ -97,7 +97,7 @@
const TranslationBlock *tb,
const uint64_t *data)
{
- CPUARMState *env = cs->env_ptr;
+ CPUARMState *env = cpu_env(cs);
if (is_a64(env)) {
if (tb_cflags(tb) & CF_PCREL) {
@@ -560,7 +560,7 @@
unsigned int cur_el, bool secure,
uint64_t hcr_el2)
{
- CPUARMState *env = cs->env_ptr;
+ CPUARMState *env = cpu_env(cs);
bool pstate_unmasked;
bool unmasked = false;
@@ -690,7 +690,7 @@
static bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
CPUClass *cc = CPU_GET_CLASS(cs);
- CPUARMState *env = cs->env_ptr;
+ CPUARMState *env = cpu_env(cs);
uint32_t cur_el = arm_current_el(env);
bool secure = arm_is_secure(env);
uint64_t hcr_el2 = arm_hcr_el2_eff(env);
@@ -1215,7 +1215,6 @@
{
ARMCPU *cpu = ARM_CPU(obj);
- cpu_set_cpustate_pointers(cpu);
cpu->cp_regs = g_hash_table_new_full(g_direct_hash, g_direct_equal,
NULL, g_free);
@@ -2423,10 +2422,7 @@
{
TypeInfo type_info = {
.parent = TYPE_ARM_CPU,
- .instance_size = sizeof(ARMCPU),
- .instance_align = __alignof__(ARMCPU),
.instance_init = arm_cpu_instance_init,
- .class_size = sizeof(ARMCPUClass),
.class_init = info->class_init ?: cpu_register_class_init,
.class_data = (void *)info,
};
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index bd55c5d..a9edfb8 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -856,7 +856,6 @@
CPUState parent_obj;
/*< public >*/
- CPUNegativeOffsetState neg;
CPUARMState env;
/* Coprocessor information */
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index f3d87e0..811f3b3 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -822,9 +822,7 @@
{
TypeInfo type_info = {
.parent = TYPE_AARCH64_CPU,
- .instance_size = sizeof(ARMCPU),
.instance_init = aarch64_cpu_instance_init,
- .class_size = sizeof(ARMCPUClass),
.class_init = info->class_init ?: cpu_register_class_init,
.class_data = (void *)info,
};
@@ -837,10 +835,8 @@
static const TypeInfo aarch64_cpu_type_info = {
.name = TYPE_AARCH64_CPU,
.parent = TYPE_ARM_CPU,
- .instance_size = sizeof(ARMCPU),
.instance_finalize = aarch64_cpu_finalizefn,
.abstract = true,
- .class_size = sizeof(AArch64CPUClass),
.class_init = aarch64_cpu_class_init,
};
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 8362078..74fbb6e 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -10297,7 +10297,7 @@
uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
uint32_t cur_el, bool secure)
{
- CPUARMState *env = cs->env_ptr;
+ CPUARMState *env = cpu_env(cs);
bool rw;
bool scr;
bool hcr;
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
index 546c0e8..757e13b 100644
--- a/target/arm/hvf/hvf.c
+++ b/target/arm/hvf/hvf.c
@@ -1934,16 +1934,16 @@
uint32_t rt = (syndrome >> 5) & 0x1f;
uint32_t reg = syndrome & SYSREG_MASK;
uint64_t val;
- int ret = 0;
+ int sysreg_ret = 0;
if (isread) {
- ret = hvf_sysreg_read(cpu, reg, rt);
+ sysreg_ret = hvf_sysreg_read(cpu, reg, rt);
} else {
val = hvf_get_reg(cpu, rt);
- ret = hvf_sysreg_write(cpu, reg, val);
+ sysreg_ret = hvf_sysreg_write(cpu, reg, val);
}
- advance_pc = !ret;
+ advance_pc = !sysreg_ret;
break;
}
case EC_WFX_TRAP:
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
index bfbab26..95db9ec 100644
--- a/target/arm/ptw.c
+++ b/target/arm/ptw.c
@@ -579,7 +579,7 @@
}
ptw->out_phys = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
ptw->out_rw = full->prot & PAGE_WRITE;
- pte_attrs = full->pte_attrs;
+ pte_attrs = full->extra.arm.pte_attrs;
ptw->out_space = full->attrs.space;
#else
g_assert_not_reached();
@@ -2036,7 +2036,7 @@
/* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */
if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) {
- result->f.guarded = extract64(attrs, 50, 1); /* GP */
+ result->f.extra.arm.guarded = extract64(attrs, 50, 1); /* GP */
}
}
diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c
index 2dd7eb3..70ac876 100644
--- a/target/arm/tcg/mte_helper.c
+++ b/target/arm/tcg/mte_helper.c
@@ -137,7 +137,7 @@
assert(!(flags & TLB_INVALID_MASK));
/* If the virtual page MemAttr != Tagged, access unchecked. */
- if (full->pte_attrs != 0xf0) {
+ if (full->extra.arm.pte_attrs != 0xf0) {
return NULL;
}
diff --git a/target/arm/tcg/mve_helper.c b/target/arm/tcg/mve_helper.c
index c666a96..8b99736 100644
--- a/target/arm/tcg/mve_helper.c
+++ b/target/arm/tcg/mve_helper.c
@@ -925,8 +925,8 @@
bool qc = false; \
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
bool sat = false; \
- TYPE r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], &sat); \
- mergemask(&d[H##ESIZE(e)], r, mask); \
+ TYPE r_ = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], &sat); \
+ mergemask(&d[H##ESIZE(e)], r_, mask); \
qc |= sat & mask & 1; \
} \
if (qc) { \
@@ -1250,11 +1250,11 @@
#define WRAP_QRSHL_HELPER(FN, N, M, ROUND, satp) \
({ \
uint32_t su32 = 0; \
- typeof(N) r = FN(N, (int8_t)(M), sizeof(N) * 8, ROUND, &su32); \
+ typeof(N) qrshl_ret = FN(N, (int8_t)(M), sizeof(N) * 8, ROUND, &su32); \
if (su32) { \
*satp = true; \
} \
- r; \
+ qrshl_ret; \
})
#define DO_SQSHL_OP(N, M, satp) \
@@ -1292,12 +1292,12 @@
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
bool sat = false; \
if ((e & 1) == XCHG) { \
- TYPE r = FN(n[H##ESIZE(e)], \
+ TYPE vqdmladh_ret = FN(n[H##ESIZE(e)], \
m[H##ESIZE(e - XCHG)], \
n[H##ESIZE(e + (1 - 2 * XCHG))], \
m[H##ESIZE(e + (1 - XCHG))], \
ROUND, &sat); \
- mergemask(&d[H##ESIZE(e)], r, mask); \
+ mergemask(&d[H##ESIZE(e)], vqdmladh_ret, mask); \
qc |= sat & mask & 1; \
} \
} \
@@ -2454,7 +2454,7 @@
return extval;
}
} else if (shift < 48) {
- int64_t extval = sextract64(src << shift, 0, 48);
+ extval = sextract64(src << shift, 0, 48);
if (!sat || src == (extval >> shift)) {
return extval;
}
@@ -2486,7 +2486,7 @@
return extval;
}
} else if (shift < 48) {
- uint64_t extval = extract64(src << shift, 0, 48);
+ extval = extract64(src << shift, 0, 48);
if (!sat || src == (extval >> shift)) {
return extval;
}
diff --git a/target/arm/tcg/sve_helper.c b/target/arm/tcg/sve_helper.c
index 7c103fc..f006d15 100644
--- a/target/arm/tcg/sve_helper.c
+++ b/target/arm/tcg/sve_helper.c
@@ -5373,7 +5373,7 @@
info->tagged = (flags & PAGE_ANON) && (flags & PAGE_MTE);
#else
info->attrs = full->attrs;
- info->tagged = full->pte_attrs == 0xf0;
+ info->tagged = full->extra.arm.pte_attrs == 0xf0;
#endif
/* Ensure that info->host[] is relative to addr, not addr + mem_off. */
diff --git a/target/arm/tcg/tlb_helper.c b/target/arm/tcg/tlb_helper.c
index b22b2a4..59bff8b 100644
--- a/target/arm/tcg/tlb_helper.c
+++ b/target/arm/tcg/tlb_helper.c
@@ -334,8 +334,8 @@
address &= TARGET_PAGE_MASK;
}
- res.f.pte_attrs = res.cacheattrs.attrs;
- res.f.shareability = res.cacheattrs.shareability;
+ res.f.extra.arm.pte_attrs = res.cacheattrs.attrs;
+ res.f.extra.arm.shareability = res.cacheattrs.shareability;
tlb_set_page_full(cs, mmu_idx, address, &res.f);
return true;
diff --git a/target/arm/tcg/translate-a32.h b/target/arm/tcg/translate-a32.h
index 48a1537..19de6e0 100644
--- a/target/arm/tcg/translate-a32.h
+++ b/target/arm/tcg/translate-a32.h
@@ -55,7 +55,7 @@
static inline TCGv_i32 load_cpu_offset(int offset)
{
TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_ld_i32(tmp, cpu_env, offset);
+ tcg_gen_ld_i32(tmp, tcg_env, offset);
return tmp;
}
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index 97f25b4..10e8dcf 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -91,16 +91,16 @@
{
int i;
- cpu_pc = tcg_global_mem_new_i64(cpu_env,
+ cpu_pc = tcg_global_mem_new_i64(tcg_env,
offsetof(CPUARMState, pc),
"pc");
for (i = 0; i < 32; i++) {
- cpu_X[i] = tcg_global_mem_new_i64(cpu_env,
+ cpu_X[i] = tcg_global_mem_new_i64(tcg_env,
offsetof(CPUARMState, xregs[i]),
regnames[i]);
}
- cpu_exclusive_high = tcg_global_mem_new_i64(cpu_env,
+ cpu_exclusive_high = tcg_global_mem_new_i64(tcg_env,
offsetof(CPUARMState, exclusive_high), "exclusive_high");
}
@@ -147,7 +147,7 @@
static void set_btype_raw(int val)
{
- tcg_gen_st_i32(tcg_constant_i32(val), cpu_env,
+ tcg_gen_st_i32(tcg_constant_i32(val), tcg_env,
offsetof(CPUARMState, btype));
}
@@ -269,7 +269,7 @@
static void gen_probe_access(DisasContext *s, TCGv_i64 ptr,
MMUAccessType acc, int log2_size)
{
- gen_helper_probe_access(cpu_env, ptr,
+ gen_helper_probe_access(tcg_env, ptr,
tcg_constant_i32(acc),
tcg_constant_i32(get_mem_index(s)),
tcg_constant_i32(1 << log2_size));
@@ -298,7 +298,7 @@
desc = FIELD_DP32(desc, MTEDESC, SIZEM1, memop_size(memop) - 1);
ret = tcg_temp_new_i64();
- gen_helper_mte_check(ret, cpu_env, tcg_constant_i32(desc), addr);
+ gen_helper_mte_check(ret, tcg_env, tcg_constant_i32(desc), addr);
return ret;
}
@@ -330,7 +330,7 @@
desc = FIELD_DP32(desc, MTEDESC, SIZEM1, total_size - 1);
ret = tcg_temp_new_i64();
- gen_helper_mte_check(ret, cpu_env, tcg_constant_i32(desc), addr);
+ gen_helper_mte_check(ret, tcg_env, tcg_constant_i32(desc), addr);
return ret;
}
@@ -366,7 +366,7 @@
type = is_write ? MMU_DATA_STORE : MMU_DATA_LOAD,
mmu_idx = get_mem_index(s);
- gen_helper_unaligned_access(cpu_env, addr, tcg_constant_i32(type),
+ gen_helper_unaligned_access(tcg_env, addr, tcg_constant_i32(type),
tcg_constant_i32(mmu_idx));
gen_set_label(over_label);
@@ -442,13 +442,13 @@
static void gen_rebuild_hflags(DisasContext *s)
{
- gen_helper_rebuild_hflags_a64(cpu_env, tcg_constant_i32(s->current_el));
+ gen_helper_rebuild_hflags_a64(tcg_env, tcg_constant_i32(s->current_el));
}
static void gen_exception_internal(int excp)
{
assert(excp_is_internal(excp));
- gen_helper_exception_internal(cpu_env, tcg_constant_i32(excp));
+ gen_helper_exception_internal(tcg_env, tcg_constant_i32(excp));
}
static void gen_exception_internal_insn(DisasContext *s, int excp)
@@ -461,7 +461,7 @@
static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syndrome)
{
gen_a64_update_pc(s, 0);
- gen_helper_exception_bkpt_insn(cpu_env, tcg_constant_i32(syndrome));
+ gen_helper_exception_bkpt_insn(tcg_env, tcg_constant_i32(syndrome));
s->base.is_jmp = DISAS_NORETURN;
}
@@ -608,7 +608,7 @@
{
TCGv_i64 v = tcg_temp_new_i64();
- tcg_gen_ld_i64(v, cpu_env, fp_reg_offset(s, reg, MO_64));
+ tcg_gen_ld_i64(v, tcg_env, fp_reg_offset(s, reg, MO_64));
return v;
}
@@ -616,7 +616,7 @@
{
TCGv_i32 v = tcg_temp_new_i32();
- tcg_gen_ld_i32(v, cpu_env, fp_reg_offset(s, reg, MO_32));
+ tcg_gen_ld_i32(v, tcg_env, fp_reg_offset(s, reg, MO_32));
return v;
}
@@ -624,7 +624,7 @@
{
TCGv_i32 v = tcg_temp_new_i32();
- tcg_gen_ld16u_i32(v, cpu_env, fp_reg_offset(s, reg, MO_16));
+ tcg_gen_ld16u_i32(v, tcg_env, fp_reg_offset(s, reg, MO_16));
return v;
}
@@ -644,7 +644,7 @@
{
unsigned ofs = fp_reg_offset(s, reg, MO_64);
- tcg_gen_st_i64(v, cpu_env, ofs);
+ tcg_gen_st_i64(v, tcg_env, ofs);
clear_vec_high(s, false, reg);
}
@@ -730,7 +730,7 @@
{
TCGv_ptr qc_ptr = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(qc_ptr, cpu_env, offsetof(CPUARMState, vfp.qc));
+ tcg_gen_addi_ptr(qc_ptr, tcg_env, offsetof(CPUARMState, vfp.qc));
tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
vec_full_reg_offset(s, rn),
vec_full_reg_offset(s, rm), qc_ptr,
@@ -1025,7 +1025,7 @@
/* This writes the bottom N bits of a 128 bit wide vector to memory */
TCGv_i64 tmplo = tcg_temp_new_i64();
- tcg_gen_ld_i64(tmplo, cpu_env, fp_reg_offset(s, srcidx, MO_64));
+ tcg_gen_ld_i64(tmplo, tcg_env, fp_reg_offset(s, srcidx, MO_64));
if ((mop & MO_SIZE) < MO_128) {
tcg_gen_qemu_st_i64(tmplo, tcg_addr, get_mem_index(s), mop);
@@ -1033,7 +1033,7 @@
TCGv_i64 tmphi = tcg_temp_new_i64();
TCGv_i128 t16 = tcg_temp_new_i128();
- tcg_gen_ld_i64(tmphi, cpu_env, fp_reg_hi_offset(s, srcidx));
+ tcg_gen_ld_i64(tmphi, tcg_env, fp_reg_hi_offset(s, srcidx));
tcg_gen_concat_i64_i128(t16, tmplo, tmphi);
tcg_gen_qemu_st_i128(t16, tcg_addr, get_mem_index(s), mop);
@@ -1060,10 +1060,10 @@
tcg_gen_extr_i128_i64(tmplo, tmphi, t16);
}
- tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(s, destidx, MO_64));
+ tcg_gen_st_i64(tmplo, tcg_env, fp_reg_offset(s, destidx, MO_64));
if (tmphi) {
- tcg_gen_st_i64(tmphi, cpu_env, fp_reg_hi_offset(s, destidx));
+ tcg_gen_st_i64(tmphi, tcg_env, fp_reg_hi_offset(s, destidx));
}
clear_vec_high(s, tmphi != NULL, destidx);
}
@@ -1087,26 +1087,26 @@
int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
switch ((unsigned)memop) {
case MO_8:
- tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off);
+ tcg_gen_ld8u_i64(tcg_dest, tcg_env, vect_off);
break;
case MO_16:
- tcg_gen_ld16u_i64(tcg_dest, cpu_env, vect_off);
+ tcg_gen_ld16u_i64(tcg_dest, tcg_env, vect_off);
break;
case MO_32:
- tcg_gen_ld32u_i64(tcg_dest, cpu_env, vect_off);
+ tcg_gen_ld32u_i64(tcg_dest, tcg_env, vect_off);
break;
case MO_8|MO_SIGN:
- tcg_gen_ld8s_i64(tcg_dest, cpu_env, vect_off);
+ tcg_gen_ld8s_i64(tcg_dest, tcg_env, vect_off);
break;
case MO_16|MO_SIGN:
- tcg_gen_ld16s_i64(tcg_dest, cpu_env, vect_off);
+ tcg_gen_ld16s_i64(tcg_dest, tcg_env, vect_off);
break;
case MO_32|MO_SIGN:
- tcg_gen_ld32s_i64(tcg_dest, cpu_env, vect_off);
+ tcg_gen_ld32s_i64(tcg_dest, tcg_env, vect_off);
break;
case MO_64:
case MO_64|MO_SIGN:
- tcg_gen_ld_i64(tcg_dest, cpu_env, vect_off);
+ tcg_gen_ld_i64(tcg_dest, tcg_env, vect_off);
break;
default:
g_assert_not_reached();
@@ -1119,20 +1119,20 @@
int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
switch (memop) {
case MO_8:
- tcg_gen_ld8u_i32(tcg_dest, cpu_env, vect_off);
+ tcg_gen_ld8u_i32(tcg_dest, tcg_env, vect_off);
break;
case MO_16:
- tcg_gen_ld16u_i32(tcg_dest, cpu_env, vect_off);
+ tcg_gen_ld16u_i32(tcg_dest, tcg_env, vect_off);
break;
case MO_8|MO_SIGN:
- tcg_gen_ld8s_i32(tcg_dest, cpu_env, vect_off);
+ tcg_gen_ld8s_i32(tcg_dest, tcg_env, vect_off);
break;
case MO_16|MO_SIGN:
- tcg_gen_ld16s_i32(tcg_dest, cpu_env, vect_off);
+ tcg_gen_ld16s_i32(tcg_dest, tcg_env, vect_off);
break;
case MO_32:
case MO_32|MO_SIGN:
- tcg_gen_ld_i32(tcg_dest, cpu_env, vect_off);
+ tcg_gen_ld_i32(tcg_dest, tcg_env, vect_off);
break;
default:
g_assert_not_reached();
@@ -1146,16 +1146,16 @@
int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
switch (memop) {
case MO_8:
- tcg_gen_st8_i64(tcg_src, cpu_env, vect_off);
+ tcg_gen_st8_i64(tcg_src, tcg_env, vect_off);
break;
case MO_16:
- tcg_gen_st16_i64(tcg_src, cpu_env, vect_off);
+ tcg_gen_st16_i64(tcg_src, tcg_env, vect_off);
break;
case MO_32:
- tcg_gen_st32_i64(tcg_src, cpu_env, vect_off);
+ tcg_gen_st32_i64(tcg_src, tcg_env, vect_off);
break;
case MO_64:
- tcg_gen_st_i64(tcg_src, cpu_env, vect_off);
+ tcg_gen_st_i64(tcg_src, tcg_env, vect_off);
break;
default:
g_assert_not_reached();
@@ -1168,13 +1168,13 @@
int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
switch (memop) {
case MO_8:
- tcg_gen_st8_i32(tcg_src, cpu_env, vect_off);
+ tcg_gen_st8_i32(tcg_src, tcg_env, vect_off);
break;
case MO_16:
- tcg_gen_st16_i32(tcg_src, cpu_env, vect_off);
+ tcg_gen_st16_i32(tcg_src, tcg_env, vect_off);
break;
case MO_32:
- tcg_gen_st_i32(tcg_src, cpu_env, vect_off);
+ tcg_gen_st_i32(tcg_src, tcg_env, vect_off);
break;
default:
g_assert_not_reached();
@@ -1542,9 +1542,9 @@
truedst = tcg_temp_new_i64();
if (use_key_a) {
- gen_helper_autia_combined(truedst, cpu_env, dst, modifier);
+ gen_helper_autia_combined(truedst, tcg_env, dst, modifier);
} else {
- gen_helper_autib_combined(truedst, cpu_env, dst, modifier);
+ gen_helper_autib_combined(truedst, tcg_env, dst, modifier);
}
return truedst;
}
@@ -1643,12 +1643,12 @@
return true;
}
dst = tcg_temp_new_i64();
- tcg_gen_ld_i64(dst, cpu_env,
+ tcg_gen_ld_i64(dst, tcg_env,
offsetof(CPUARMState, elr_el[s->current_el]));
translator_io_start(&s->base);
- gen_helper_exception_return(cpu_env, dst);
+ gen_helper_exception_return(tcg_env, dst);
/* Must exit loop to check un-masked IRQs */
s->base.is_jmp = DISAS_EXIT;
return true;
@@ -1670,14 +1670,14 @@
return true;
}
dst = tcg_temp_new_i64();
- tcg_gen_ld_i64(dst, cpu_env,
+ tcg_gen_ld_i64(dst, tcg_env,
offsetof(CPUARMState, elr_el[s->current_el]));
dst = auth_branch_target(s, dst, cpu_X[31], !a->m);
translator_io_start(&s->base);
- gen_helper_exception_return(cpu_env, dst);
+ gen_helper_exception_return(tcg_env, dst);
/* Must exit loop to check un-masked IRQs */
s->base.is_jmp = DISAS_EXIT;
return true;
@@ -1725,7 +1725,7 @@
static bool trans_XPACLRI(DisasContext *s, arg_XPACLRI *a)
{
if (s->pauth_active) {
- gen_helper_xpaci(cpu_X[30], cpu_env, cpu_X[30]);
+ gen_helper_xpaci(cpu_X[30], tcg_env, cpu_X[30]);
}
return true;
}
@@ -1733,7 +1733,7 @@
static bool trans_PACIA1716(DisasContext *s, arg_PACIA1716 *a)
{
if (s->pauth_active) {
- gen_helper_pacia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
+ gen_helper_pacia(cpu_X[17], tcg_env, cpu_X[17], cpu_X[16]);
}
return true;
}
@@ -1741,7 +1741,7 @@
static bool trans_PACIB1716(DisasContext *s, arg_PACIB1716 *a)
{
if (s->pauth_active) {
- gen_helper_pacib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
+ gen_helper_pacib(cpu_X[17], tcg_env, cpu_X[17], cpu_X[16]);
}
return true;
}
@@ -1749,7 +1749,7 @@
static bool trans_AUTIA1716(DisasContext *s, arg_AUTIA1716 *a)
{
if (s->pauth_active) {
- gen_helper_autia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
+ gen_helper_autia(cpu_X[17], tcg_env, cpu_X[17], cpu_X[16]);
}
return true;
}
@@ -1757,7 +1757,7 @@
static bool trans_AUTIB1716(DisasContext *s, arg_AUTIB1716 *a)
{
if (s->pauth_active) {
- gen_helper_autib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
+ gen_helper_autib(cpu_X[17], tcg_env, cpu_X[17], cpu_X[16]);
}
return true;
}
@@ -1776,7 +1776,7 @@
* Test for EL2 present, and defer test for SEL2 to runtime.
*/
if (s->current_el <= 1 && arm_dc_feature(s, ARM_FEATURE_EL2)) {
- gen_helper_vesb(cpu_env);
+ gen_helper_vesb(tcg_env);
}
}
return true;
@@ -1785,7 +1785,7 @@
static bool trans_PACIAZ(DisasContext *s, arg_PACIAZ *a)
{
if (s->pauth_active) {
- gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30], tcg_constant_i64(0));
+ gen_helper_pacia(cpu_X[30], tcg_env, cpu_X[30], tcg_constant_i64(0));
}
return true;
}
@@ -1793,7 +1793,7 @@
static bool trans_PACIASP(DisasContext *s, arg_PACIASP *a)
{
if (s->pauth_active) {
- gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
+ gen_helper_pacia(cpu_X[30], tcg_env, cpu_X[30], cpu_X[31]);
}
return true;
}
@@ -1801,7 +1801,7 @@
static bool trans_PACIBZ(DisasContext *s, arg_PACIBZ *a)
{
if (s->pauth_active) {
- gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30], tcg_constant_i64(0));
+ gen_helper_pacib(cpu_X[30], tcg_env, cpu_X[30], tcg_constant_i64(0));
}
return true;
}
@@ -1809,7 +1809,7 @@
static bool trans_PACIBSP(DisasContext *s, arg_PACIBSP *a)
{
if (s->pauth_active) {
- gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
+ gen_helper_pacib(cpu_X[30], tcg_env, cpu_X[30], cpu_X[31]);
}
return true;
}
@@ -1817,7 +1817,7 @@
static bool trans_AUTIAZ(DisasContext *s, arg_AUTIAZ *a)
{
if (s->pauth_active) {
- gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30], tcg_constant_i64(0));
+ gen_helper_autia(cpu_X[30], tcg_env, cpu_X[30], tcg_constant_i64(0));
}
return true;
}
@@ -1825,7 +1825,7 @@
static bool trans_AUTIASP(DisasContext *s, arg_AUTIASP *a)
{
if (s->pauth_active) {
- gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
+ gen_helper_autia(cpu_X[30], tcg_env, cpu_X[30], cpu_X[31]);
}
return true;
}
@@ -1833,7 +1833,7 @@
static bool trans_AUTIBZ(DisasContext *s, arg_AUTIBZ *a)
{
if (s->pauth_active) {
- gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30], tcg_constant_i64(0));
+ gen_helper_autib(cpu_X[30], tcg_env, cpu_X[30], tcg_constant_i64(0));
}
return true;
}
@@ -1841,7 +1841,7 @@
static bool trans_AUTIBSP(DisasContext *s, arg_AUTIBSP *a)
{
if (s->pauth_active) {
- gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
+ gen_helper_autib(cpu_X[30], tcg_env, cpu_X[30], cpu_X[31]);
}
return true;
}
@@ -1996,7 +1996,7 @@
if (s->current_el == 0) {
return false;
}
- gen_helper_msr_i_spsel(cpu_env, tcg_constant_i32(a->imm & PSTATE_SP));
+ gen_helper_msr_i_spsel(tcg_env, tcg_constant_i32(a->imm & PSTATE_SP));
s->base.is_jmp = DISAS_TOO_MANY;
return true;
}
@@ -2055,14 +2055,14 @@
static bool trans_MSR_i_DAIFSET(DisasContext *s, arg_i *a)
{
- gen_helper_msr_i_daifset(cpu_env, tcg_constant_i32(a->imm));
+ gen_helper_msr_i_daifset(tcg_env, tcg_constant_i32(a->imm));
s->base.is_jmp = DISAS_TOO_MANY;
return true;
}
static bool trans_MSR_i_DAIFCLEAR(DisasContext *s, arg_i *a)
{
- gen_helper_msr_i_daifclear(cpu_env, tcg_constant_i32(a->imm));
+ gen_helper_msr_i_daifclear(tcg_env, tcg_constant_i32(a->imm));
/* Exit the cpu loop to re-evaluate pending IRQs. */
s->base.is_jmp = DISAS_UPDATE_EXIT;
return true;
@@ -2079,7 +2079,7 @@
if ((old ^ new) & a->mask) {
/* At least one bit changes. */
- gen_helper_set_svcr(cpu_env, tcg_constant_i32(new),
+ gen_helper_set_svcr(tcg_env, tcg_constant_i32(new),
tcg_constant_i32(a->mask));
s->base.is_jmp = DISAS_TOO_MANY;
}
@@ -2177,11 +2177,11 @@
switch (s->current_el) {
case 0:
if (dc_isar_feature(aa64_tidcp1, s)) {
- gen_helper_tidcp_el0(cpu_env, tcg_constant_i32(syndrome));
+ gen_helper_tidcp_el0(tcg_env, tcg_constant_i32(syndrome));
}
break;
case 1:
- gen_helper_tidcp_el1(cpu_env, tcg_constant_i32(syndrome));
+ gen_helper_tidcp_el1(tcg_env, tcg_constant_i32(syndrome));
break;
}
}
@@ -2210,7 +2210,7 @@
syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
gen_a64_update_pc(s, 0);
tcg_ri = tcg_temp_new_ptr();
- gen_helper_access_check_cp_reg(tcg_ri, cpu_env,
+ gen_helper_access_check_cp_reg(tcg_ri, tcg_env,
tcg_constant_i32(key),
tcg_constant_i32(syndrome),
tcg_constant_i32(isread));
@@ -2253,12 +2253,12 @@
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
tcg_rt = tcg_temp_new_i64();
- gen_helper_mte_check_zva(tcg_rt, cpu_env,
+ gen_helper_mte_check_zva(tcg_rt, tcg_env,
tcg_constant_i32(desc), cpu_reg(s, rt));
} else {
tcg_rt = clean_data_tbi(s, cpu_reg(s, rt));
}
- gen_helper_dc_zva(cpu_env, tcg_rt);
+ gen_helper_dc_zva(tcg_env, tcg_rt);
return;
case ARM_CP_DC_GVA:
{
@@ -2276,7 +2276,7 @@
/* Extract the tag from the register to match STZGM. */
tag = tcg_temp_new_i64();
tcg_gen_shri_i64(tag, tcg_rt, 56);
- gen_helper_stzgm_tags(cpu_env, clean_addr, tag);
+ gen_helper_stzgm_tags(tcg_env, clean_addr, tag);
}
}
return;
@@ -2287,13 +2287,13 @@
/* For DC_GZVA, we can rely on DC_ZVA for the proper fault. */
tcg_rt = cpu_reg(s, rt);
clean_addr = clean_data_tbi(s, tcg_rt);
- gen_helper_dc_zva(cpu_env, clean_addr);
+ gen_helper_dc_zva(tcg_env, clean_addr);
if (s->ata[0]) {
/* Extract the tag from the register to match STZGM. */
tag = tcg_temp_new_i64();
tcg_gen_shri_i64(tag, tcg_rt, 56);
- gen_helper_stzgm_tags(cpu_env, clean_addr, tag);
+ gen_helper_stzgm_tags(tcg_env, clean_addr, tag);
}
}
return;
@@ -2322,9 +2322,9 @@
if (!tcg_ri) {
tcg_ri = gen_lookup_cp_reg(key);
}
- gen_helper_get_cp_reg64(tcg_rt, cpu_env, tcg_ri);
+ gen_helper_get_cp_reg64(tcg_rt, tcg_env, tcg_ri);
} else {
- tcg_gen_ld_i64(tcg_rt, cpu_env, ri->fieldoffset);
+ tcg_gen_ld_i64(tcg_rt, tcg_env, ri->fieldoffset);
}
} else {
if (ri->type & ARM_CP_CONST) {
@@ -2334,9 +2334,9 @@
if (!tcg_ri) {
tcg_ri = gen_lookup_cp_reg(key);
}
- gen_helper_set_cp_reg64(cpu_env, tcg_ri, tcg_rt);
+ gen_helper_set_cp_reg64(tcg_env, tcg_ri, tcg_rt);
} else {
- tcg_gen_st_i64(tcg_rt, cpu_env, ri->fieldoffset);
+ tcg_gen_st_i64(tcg_rt, tcg_env, ri->fieldoffset);
}
}
@@ -2393,7 +2393,7 @@
* as an undefined insn by runtime configuration.
*/
gen_a64_update_pc(s, 0);
- gen_helper_pre_hvc(cpu_env);
+ gen_helper_pre_hvc(tcg_env);
/* Architecture requires ss advance before we do the actual work */
gen_ss_advance(s);
gen_exception_insn_el(s, 4, EXCP_HVC, syn_aa64_hvc(a->imm), 2);
@@ -2407,7 +2407,7 @@
return true;
}
gen_a64_update_pc(s, 0);
- gen_helper_pre_smc(cpu_env, tcg_constant_i32(syn_aa64_smc(a->imm)));
+ gen_helper_pre_smc(tcg_env, tcg_constant_i32(syn_aa64_smc(a->imm)));
/* Architecture requires ss advance before we do the actual work */
gen_ss_advance(s);
gen_exception_insn_el(s, 4, EXCP_SMC, syn_aa64_smc(a->imm), 3);
@@ -3072,9 +3072,9 @@
/* Perform the tag store, if tag access enabled. */
if (s->ata[0]) {
if (tb_cflags(s->base.tb) & CF_PARALLEL) {
- gen_helper_stg_parallel(cpu_env, dirty_addr, dirty_addr);
+ gen_helper_stg_parallel(tcg_env, dirty_addr, dirty_addr);
} else {
- gen_helper_stg(cpu_env, dirty_addr, dirty_addr);
+ gen_helper_stg(tcg_env, dirty_addr, dirty_addr);
}
}
@@ -3370,10 +3370,10 @@
if (s->pauth_active) {
if (!a->m) {
- gen_helper_autda_combined(dirty_addr, cpu_env, dirty_addr,
+ gen_helper_autda_combined(dirty_addr, tcg_env, dirty_addr,
tcg_constant_i64(0));
} else {
- gen_helper_autdb_combined(dirty_addr, cpu_env, dirty_addr,
+ gen_helper_autdb_combined(dirty_addr, tcg_env, dirty_addr,
tcg_constant_i64(0));
}
}
@@ -3769,7 +3769,7 @@
tcg_rt = cpu_reg(s, a->rt);
if (s->ata[0]) {
- gen_helper_stzgm_tags(cpu_env, addr, tcg_rt);
+ gen_helper_stzgm_tags(tcg_env, addr, tcg_rt);
}
/*
* The non-tags portion of STZGM is mostly like DC_ZVA,
@@ -3777,7 +3777,7 @@
*/
clean_addr = clean_data_tbi(s, addr);
tcg_gen_andi_i64(clean_addr, clean_addr, -size);
- gen_helper_dc_zva(cpu_env, clean_addr);
+ gen_helper_dc_zva(tcg_env, clean_addr);
return true;
}
@@ -3801,7 +3801,7 @@
tcg_rt = cpu_reg(s, a->rt);
if (s->ata[0]) {
- gen_helper_stgm(cpu_env, addr, tcg_rt);
+ gen_helper_stgm(tcg_env, addr, tcg_rt);
} else {
MMUAccessType acc = MMU_DATA_STORE;
int size = 4 << s->gm_blocksize;
@@ -3833,7 +3833,7 @@
tcg_rt = cpu_reg(s, a->rt);
if (s->ata[0]) {
- gen_helper_ldgm(tcg_rt, cpu_env, addr);
+ gen_helper_ldgm(tcg_rt, tcg_env, addr);
} else {
MMUAccessType acc = MMU_DATA_LOAD;
int size = 4 << s->gm_blocksize;
@@ -3868,7 +3868,7 @@
tcg_gen_andi_i64(addr, addr, -TAG_GRANULE);
tcg_rt = cpu_reg(s, a->rt);
if (s->ata[0]) {
- gen_helper_ldg(tcg_rt, cpu_env, addr, tcg_rt);
+ gen_helper_ldg(tcg_rt, tcg_env, addr, tcg_rt);
} else {
/*
* Tag access disabled: we must check for aborts on the load
@@ -3911,21 +3911,21 @@
* at least for system mode; user-only won't enforce alignment.
*/
if (is_pair) {
- gen_helper_st2g_stub(cpu_env, addr);
+ gen_helper_st2g_stub(tcg_env, addr);
} else {
- gen_helper_stg_stub(cpu_env, addr);
+ gen_helper_stg_stub(tcg_env, addr);
}
} else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
if (is_pair) {
- gen_helper_st2g_parallel(cpu_env, addr, tcg_rt);
+ gen_helper_st2g_parallel(tcg_env, addr, tcg_rt);
} else {
- gen_helper_stg_parallel(cpu_env, addr, tcg_rt);
+ gen_helper_stg_parallel(tcg_env, addr, tcg_rt);
}
} else {
if (is_pair) {
- gen_helper_st2g(cpu_env, addr, tcg_rt);
+ gen_helper_st2g(tcg_env, addr, tcg_rt);
} else {
- gen_helper_stg(cpu_env, addr, tcg_rt);
+ gen_helper_stg(tcg_env, addr, tcg_rt);
}
}
@@ -4008,7 +4008,7 @@
* the syndrome anyway, we let it extract them from there rather
* than passing in an extra three integer arguments.
*/
- fn(cpu_env, tcg_constant_i32(syndrome), tcg_constant_i32(desc));
+ fn(tcg_env, tcg_constant_i32(syndrome), tcg_constant_i32(desc));
return true;
}
@@ -4067,7 +4067,7 @@
* the syndrome anyway, we let it extract them from there rather
* than passing in an extra three integer arguments.
*/
- fn(cpu_env, tcg_constant_i32(syndrome), tcg_constant_i32(wdesc),
+ fn(tcg_env, tcg_constant_i32(syndrome), tcg_constant_i32(wdesc),
tcg_constant_i32(rdesc));
return true;
}
@@ -4142,7 +4142,7 @@
tcg_rd = cpu_reg_sp(s, a->rd);
if (s->ata[0]) {
- gen_helper_addsubg(tcg_rd, cpu_env, tcg_rn,
+ gen_helper_addsubg(tcg_rd, tcg_env, tcg_rn,
tcg_constant_i32(imm),
tcg_constant_i32(a->uimm4));
} else {
@@ -5241,7 +5241,7 @@
case MAP(1, 0x01, 0x00): /* PACIA */
if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
- gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
+ gen_helper_pacia(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn));
} else if (!dc_isar_feature(aa64_pauth, s)) {
goto do_unallocated;
}
@@ -5249,7 +5249,7 @@
case MAP(1, 0x01, 0x01): /* PACIB */
if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
- gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
+ gen_helper_pacib(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn));
} else if (!dc_isar_feature(aa64_pauth, s)) {
goto do_unallocated;
}
@@ -5257,7 +5257,7 @@
case MAP(1, 0x01, 0x02): /* PACDA */
if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
- gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
+ gen_helper_pacda(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn));
} else if (!dc_isar_feature(aa64_pauth, s)) {
goto do_unallocated;
}
@@ -5265,7 +5265,7 @@
case MAP(1, 0x01, 0x03): /* PACDB */
if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
- gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
+ gen_helper_pacdb(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn));
} else if (!dc_isar_feature(aa64_pauth, s)) {
goto do_unallocated;
}
@@ -5273,7 +5273,7 @@
case MAP(1, 0x01, 0x04): /* AUTIA */
if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
- gen_helper_autia(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
+ gen_helper_autia(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn));
} else if (!dc_isar_feature(aa64_pauth, s)) {
goto do_unallocated;
}
@@ -5281,7 +5281,7 @@
case MAP(1, 0x01, 0x05): /* AUTIB */
if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
- gen_helper_autib(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
+ gen_helper_autib(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn));
} else if (!dc_isar_feature(aa64_pauth, s)) {
goto do_unallocated;
}
@@ -5289,7 +5289,7 @@
case MAP(1, 0x01, 0x06): /* AUTDA */
if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
- gen_helper_autda(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
+ gen_helper_autda(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn));
} else if (!dc_isar_feature(aa64_pauth, s)) {
goto do_unallocated;
}
@@ -5297,7 +5297,7 @@
case MAP(1, 0x01, 0x07): /* AUTDB */
if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
- gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
+ gen_helper_autdb(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn));
} else if (!dc_isar_feature(aa64_pauth, s)) {
goto do_unallocated;
}
@@ -5307,7 +5307,7 @@
goto do_unallocated;
} else if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
- gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
+ gen_helper_pacia(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0));
}
break;
case MAP(1, 0x01, 0x09): /* PACIZB */
@@ -5315,7 +5315,7 @@
goto do_unallocated;
} else if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
- gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
+ gen_helper_pacib(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0));
}
break;
case MAP(1, 0x01, 0x0a): /* PACDZA */
@@ -5323,7 +5323,7 @@
goto do_unallocated;
} else if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
- gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
+ gen_helper_pacda(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0));
}
break;
case MAP(1, 0x01, 0x0b): /* PACDZB */
@@ -5331,7 +5331,7 @@
goto do_unallocated;
} else if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
- gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
+ gen_helper_pacdb(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0));
}
break;
case MAP(1, 0x01, 0x0c): /* AUTIZA */
@@ -5339,7 +5339,7 @@
goto do_unallocated;
} else if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
- gen_helper_autia(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
+ gen_helper_autia(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0));
}
break;
case MAP(1, 0x01, 0x0d): /* AUTIZB */
@@ -5347,7 +5347,7 @@
goto do_unallocated;
} else if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
- gen_helper_autib(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
+ gen_helper_autib(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0));
}
break;
case MAP(1, 0x01, 0x0e): /* AUTDZA */
@@ -5355,7 +5355,7 @@
goto do_unallocated;
} else if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
- gen_helper_autda(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
+ gen_helper_autda(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0));
}
break;
case MAP(1, 0x01, 0x0f): /* AUTDZB */
@@ -5363,7 +5363,7 @@
goto do_unallocated;
} else if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
- gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
+ gen_helper_autdb(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0));
}
break;
case MAP(1, 0x01, 0x10): /* XPACI */
@@ -5371,7 +5371,7 @@
goto do_unallocated;
} else if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
- gen_helper_xpaci(tcg_rd, cpu_env, tcg_rd);
+ gen_helper_xpaci(tcg_rd, tcg_env, tcg_rd);
}
break;
case MAP(1, 0x01, 0x11): /* XPACD */
@@ -5379,7 +5379,7 @@
goto do_unallocated;
} else if (s->pauth_active) {
tcg_rd = cpu_reg(s, rd);
- gen_helper_xpacd(tcg_rd, cpu_env, tcg_rd);
+ gen_helper_xpacd(tcg_rd, tcg_env, tcg_rd);
}
break;
default:
@@ -5529,7 +5529,7 @@
goto do_unallocated;
}
if (s->ata[0]) {
- gen_helper_irg(cpu_reg_sp(s, rd), cpu_env,
+ gen_helper_irg(cpu_reg_sp(s, rd), tcg_env,
cpu_reg_sp(s, rn), cpu_reg(s, rm));
} else {
gen_address_with_allocation_tag0(cpu_reg_sp(s, rd),
@@ -5563,7 +5563,7 @@
if (sf == 0 || !dc_isar_feature(aa64_pauth, s)) {
goto do_unallocated;
}
- gen_helper_pacga(cpu_reg(s, rd), cpu_env,
+ gen_helper_pacga(cpu_reg(s, rd), tcg_env,
cpu_reg(s, rn), cpu_reg_sp(s, rm));
break;
case 16:
@@ -5969,7 +5969,7 @@
gen_helper_vfp_negs(tcg_res, tcg_op);
goto done;
case 0x3: /* FSQRT */
- gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
+ gen_helper_vfp_sqrts(tcg_res, tcg_op, tcg_env);
goto done;
case 0x6: /* BFCVT */
gen_fpst = gen_helper_bfcvt;
@@ -6044,7 +6044,7 @@
gen_helper_vfp_negd(tcg_res, tcg_op);
goto done;
case 0x3: /* FSQRT */
- gen_helper_vfp_sqrtd(tcg_res, tcg_op, cpu_env);
+ gen_helper_vfp_sqrtd(tcg_res, tcg_op, tcg_env);
goto done;
case 0x8: /* FRINTN */
case 0x9: /* FRINTP */
@@ -6101,7 +6101,7 @@
if (dtype == 1) {
/* Single to double */
TCGv_i64 tcg_rd = tcg_temp_new_i64();
- gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, cpu_env);
+ gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, tcg_env);
write_fp_dreg(s, rd, tcg_rd);
} else {
/* Single to half */
@@ -6121,7 +6121,7 @@
TCGv_i32 tcg_rd = tcg_temp_new_i32();
if (dtype == 0) {
/* Double to single */
- gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, cpu_env);
+ gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, tcg_env);
} else {
TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
TCGv_i32 ahp = get_ahp_flag();
@@ -6881,7 +6881,7 @@
break;
case 2:
/* 64 bit to top half. */
- tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_hi_offset(s, rd));
+ tcg_gen_st_i64(tcg_rn, tcg_env, fp_reg_hi_offset(s, rd));
clear_vec_high(s, true, rd);
break;
case 3:
@@ -6899,19 +6899,19 @@
switch (type) {
case 0:
/* 32 bit */
- tcg_gen_ld32u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_32));
+ tcg_gen_ld32u_i64(tcg_rd, tcg_env, fp_reg_offset(s, rn, MO_32));
break;
case 1:
/* 64 bit */
- tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_64));
+ tcg_gen_ld_i64(tcg_rd, tcg_env, fp_reg_offset(s, rn, MO_64));
break;
case 2:
/* 64 bits from top half */
- tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_hi_offset(s, rn));
+ tcg_gen_ld_i64(tcg_rd, tcg_env, fp_reg_hi_offset(s, rn));
break;
case 3:
/* 16 bit */
- tcg_gen_ld16u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_16));
+ tcg_gen_ld16u_i64(tcg_rd, tcg_env, fp_reg_offset(s, rn, MO_16));
break;
default:
g_assert_not_reached();
@@ -7195,7 +7195,7 @@
}
tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, rd),
- vec_full_reg_offset(s, rm), cpu_env,
+ vec_full_reg_offset(s, rm), tcg_env,
is_q ? 16 : 8, vec_full_reg_size(s),
(len << 6) | (is_tbx << 5) | rn,
gen_helper_simd_tblx);
@@ -8249,7 +8249,7 @@
read_vec_element(s, tcg_rn, rn, i, ldop);
handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
false, is_u_shift, size+1, shift);
- narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd);
+ narrowfn(tcg_rd_narrowed, tcg_env, tcg_rd);
tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed);
if (i == 0) {
tcg_gen_mov_i64(tcg_final, tcg_rd);
@@ -8321,7 +8321,7 @@
TCGv_i64 tcg_op = tcg_temp_new_i64();
read_vec_element(s, tcg_op, rn, pass, MO_64);
- genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
+ genfn(tcg_op, tcg_env, tcg_op, tcg_shift);
write_vec_element(s, tcg_op, rd, pass, MO_64);
}
clear_vec_high(s, is_q, rd);
@@ -8350,7 +8350,7 @@
TCGv_i32 tcg_op = tcg_temp_new_i32();
read_vec_element_i32(s, tcg_op, rn, pass, memop);
- genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
+ genfn(tcg_op, tcg_env, tcg_op, tcg_shift);
if (scalar) {
switch (size) {
case 0:
@@ -8733,7 +8733,7 @@
read_vec_element(s, tcg_op2, rm, 0, MO_32 | MO_SIGN);
tcg_gen_mul_i64(tcg_res, tcg_op1, tcg_op2);
- gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env, tcg_res, tcg_res);
+ gen_helper_neon_addl_saturate_s64(tcg_res, tcg_env, tcg_res, tcg_res);
switch (opcode) {
case 0xd: /* SQDMULL, SQDMULL2 */
@@ -8743,7 +8743,7 @@
/* fall through */
case 0x9: /* SQDMLAL, SQDMLAL2 */
read_vec_element(s, tcg_op1, rd, 0, MO_64);
- gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env,
+ gen_helper_neon_addl_saturate_s64(tcg_res, tcg_env,
tcg_res, tcg_op1);
break;
default:
@@ -8757,7 +8757,7 @@
TCGv_i64 tcg_res = tcg_temp_new_i64();
gen_helper_neon_mull_s16(tcg_res, tcg_op1, tcg_op2);
- gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env, tcg_res, tcg_res);
+ gen_helper_neon_addl_saturate_s32(tcg_res, tcg_env, tcg_res, tcg_res);
switch (opcode) {
case 0xd: /* SQDMULL, SQDMULL2 */
@@ -8769,7 +8769,7 @@
{
TCGv_i64 tcg_op3 = tcg_temp_new_i64();
read_vec_element(s, tcg_op3, rd, 0, MO_32);
- gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env,
+ gen_helper_neon_addl_saturate_s32(tcg_res, tcg_env,
tcg_res, tcg_op3);
break;
}
@@ -8795,16 +8795,16 @@
switch (opcode) {
case 0x1: /* SQADD */
if (u) {
- gen_helper_neon_qadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ gen_helper_neon_qadd_u64(tcg_rd, tcg_env, tcg_rn, tcg_rm);
} else {
- gen_helper_neon_qadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ gen_helper_neon_qadd_s64(tcg_rd, tcg_env, tcg_rn, tcg_rm);
}
break;
case 0x5: /* SQSUB */
if (u) {
- gen_helper_neon_qsub_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ gen_helper_neon_qsub_u64(tcg_rd, tcg_env, tcg_rn, tcg_rm);
} else {
- gen_helper_neon_qsub_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ gen_helper_neon_qsub_s64(tcg_rd, tcg_env, tcg_rn, tcg_rm);
}
break;
case 0x6: /* CMGT, CMHI */
@@ -8832,9 +8832,9 @@
break;
case 0x9: /* SQSHL, UQSHL */
if (u) {
- gen_helper_neon_qshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ gen_helper_neon_qshl_u64(tcg_rd, tcg_env, tcg_rn, tcg_rm);
} else {
- gen_helper_neon_qshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ gen_helper_neon_qshl_s64(tcg_rd, tcg_env, tcg_rn, tcg_rm);
}
break;
case 0xa: /* SRSHL, URSHL */
@@ -8846,9 +8846,9 @@
break;
case 0xb: /* SQRSHL, UQRSHL */
if (u) {
- gen_helper_neon_qrshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ gen_helper_neon_qrshl_u64(tcg_rd, tcg_env, tcg_rn, tcg_rm);
} else {
- gen_helper_neon_qrshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ gen_helper_neon_qrshl_s64(tcg_rd, tcg_env, tcg_rn, tcg_rm);
}
break;
case 0x10: /* ADD, SUB */
@@ -9192,7 +9192,7 @@
g_assert_not_reached();
}
- genenvfn(tcg_rd32, cpu_env, tcg_rn, tcg_rm);
+ genenvfn(tcg_rd32, tcg_env, tcg_rn, tcg_rm);
tcg_gen_extu_i32_i64(tcg_rd, tcg_rd32);
}
@@ -9345,16 +9345,16 @@
switch (opcode) {
case 0x0: /* SQRDMLAH */
if (size == 1) {
- gen_helper_neon_qrdmlah_s16(ele3, cpu_env, ele1, ele2, ele3);
+ gen_helper_neon_qrdmlah_s16(ele3, tcg_env, ele1, ele2, ele3);
} else {
- gen_helper_neon_qrdmlah_s32(ele3, cpu_env, ele1, ele2, ele3);
+ gen_helper_neon_qrdmlah_s32(ele3, tcg_env, ele1, ele2, ele3);
}
break;
case 0x1: /* SQRDMLSH */
if (size == 1) {
- gen_helper_neon_qrdmlsh_s16(ele3, cpu_env, ele1, ele2, ele3);
+ gen_helper_neon_qrdmlsh_s16(ele3, tcg_env, ele1, ele2, ele3);
} else {
- gen_helper_neon_qrdmlsh_s32(ele3, cpu_env, ele1, ele2, ele3);
+ gen_helper_neon_qrdmlsh_s32(ele3, tcg_env, ele1, ele2, ele3);
}
break;
default:
@@ -9394,9 +9394,9 @@
break;
case 0x7: /* SQABS, SQNEG */
if (u) {
- gen_helper_neon_qneg_s64(tcg_rd, cpu_env, tcg_rn);
+ gen_helper_neon_qneg_s64(tcg_rd, tcg_env, tcg_rn);
} else {
- gen_helper_neon_qabs_s64(tcg_rd, cpu_env, tcg_rn);
+ gen_helper_neon_qabs_s64(tcg_rd, tcg_env, tcg_rn);
}
break;
case 0xa: /* CMLT */
@@ -9425,7 +9425,7 @@
gen_helper_vfp_negd(tcg_rd, tcg_rn);
break;
case 0x7f: /* FSQRT */
- gen_helper_vfp_sqrtd(tcg_rd, tcg_rn, cpu_env);
+ gen_helper_vfp_sqrtd(tcg_rd, tcg_rn, tcg_env);
break;
case 0x1a: /* FCVTNS */
case 0x1b: /* FCVTMS */
@@ -9731,7 +9731,7 @@
case 0x16: /* FCVTN, FCVTN2 */
/* 32 bit to 16 bit or 64 bit to 32 bit float conversion */
if (size == 2) {
- gen_helper_vfp_fcvtsd(tcg_res[pass], tcg_op, cpu_env);
+ gen_helper_vfp_fcvtsd(tcg_res[pass], tcg_op, tcg_env);
} else {
TCGv_i32 tcg_lo = tcg_temp_new_i32();
TCGv_i32 tcg_hi = tcg_temp_new_i32();
@@ -9755,7 +9755,7 @@
* with von Neumann rounding (round to odd)
*/
assert(size == 2);
- gen_helper_fcvtx_f64_to_f32(tcg_res[pass], tcg_op, cpu_env);
+ gen_helper_fcvtx_f64_to_f32(tcg_res[pass], tcg_op, tcg_env);
break;
default:
g_assert_not_reached();
@@ -9764,7 +9764,7 @@
if (genfn) {
genfn(tcg_res[pass], tcg_op);
} else if (genenvfn) {
- genenvfn(tcg_res[pass], cpu_env, tcg_op);
+ genenvfn(tcg_res[pass], tcg_env, tcg_op);
}
}
@@ -9790,9 +9790,9 @@
read_vec_element(s, tcg_rd, rd, pass, MO_64);
if (is_u) { /* USQADD */
- gen_helper_neon_uqadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ gen_helper_neon_uqadd_s64(tcg_rd, tcg_env, tcg_rn, tcg_rd);
} else { /* SUQADD */
- gen_helper_neon_sqadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ gen_helper_neon_sqadd_u64(tcg_rd, tcg_env, tcg_rn, tcg_rd);
}
write_vec_element(s, tcg_rd, rd, pass, MO_64);
}
@@ -9820,13 +9820,13 @@
if (is_u) { /* USQADD */
switch (size) {
case 0:
- gen_helper_neon_uqadd_s8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ gen_helper_neon_uqadd_s8(tcg_rd, tcg_env, tcg_rn, tcg_rd);
break;
case 1:
- gen_helper_neon_uqadd_s16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ gen_helper_neon_uqadd_s16(tcg_rd, tcg_env, tcg_rn, tcg_rd);
break;
case 2:
- gen_helper_neon_uqadd_s32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ gen_helper_neon_uqadd_s32(tcg_rd, tcg_env, tcg_rn, tcg_rd);
break;
default:
g_assert_not_reached();
@@ -9834,13 +9834,13 @@
} else { /* SUQADD */
switch (size) {
case 0:
- gen_helper_neon_sqadd_u8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ gen_helper_neon_sqadd_u8(tcg_rd, tcg_env, tcg_rn, tcg_rd);
break;
case 1:
- gen_helper_neon_sqadd_u16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ gen_helper_neon_sqadd_u16(tcg_rd, tcg_env, tcg_rn, tcg_rd);
break;
case 2:
- gen_helper_neon_sqadd_u32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ gen_helper_neon_sqadd_u32(tcg_rd, tcg_env, tcg_rn, tcg_rd);
break;
default:
g_assert_not_reached();
@@ -10018,7 +10018,7 @@
{ gen_helper_neon_qabs_s32, gen_helper_neon_qneg_s32 },
};
genfn = fns[size][u];
- genfn(tcg_rd, cpu_env, tcg_rn);
+ genfn(tcg_rd, tcg_env, tcg_rn);
break;
}
case 0x1a: /* FCVTNS */
@@ -10403,7 +10403,7 @@
case 11: /* SQDMLSL, SQDMLSL2 */
case 13: /* SQDMULL, SQDMULL2 */
tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
- gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
+ gen_helper_neon_addl_saturate_s64(tcg_passres, tcg_env,
tcg_passres, tcg_passres);
break;
default:
@@ -10415,7 +10415,7 @@
if (accop < 0) {
tcg_gen_neg_i64(tcg_passres, tcg_passres);
}
- gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
+ gen_helper_neon_addl_saturate_s64(tcg_res[pass], tcg_env,
tcg_res[pass], tcg_passres);
} else if (accop > 0) {
tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
@@ -10495,7 +10495,7 @@
case 13: /* SQDMULL, SQDMULL2 */
assert(size == 1);
gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
- gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
+ gen_helper_neon_addl_saturate_s32(tcg_passres, tcg_env,
tcg_passres, tcg_passres);
break;
default:
@@ -10508,7 +10508,7 @@
if (accop < 0) {
gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
}
- gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
+ gen_helper_neon_addl_saturate_s32(tcg_res[pass], tcg_env,
tcg_res[pass],
tcg_passres);
} else {
@@ -10978,7 +10978,7 @@
int data = (is_2 << 1) | is_s;
tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
vec_full_reg_offset(s, rn),
- vec_full_reg_offset(s, rm), cpu_env,
+ vec_full_reg_offset(s, rm), tcg_env,
is_q ? 16 : 8, vec_full_reg_size(s),
data, gen_helper_gvec_fmlal_a64);
}
@@ -11233,7 +11233,7 @@
}
if (genenvfn) {
- genenvfn(tcg_res, cpu_env, tcg_op1, tcg_op2);
+ genenvfn(tcg_res, tcg_env, tcg_op1, tcg_op2);
} else {
genfn(tcg_res, tcg_op1, tcg_op2);
}
@@ -11702,7 +11702,7 @@
tcg_res[pass] = tcg_temp_new_i64();
read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32);
- gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, cpu_env);
+ gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, tcg_env);
}
for (pass = 0; pass < 2; pass++) {
write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
@@ -12257,9 +12257,9 @@
break;
case 0x7: /* SQABS, SQNEG */
if (u) {
- gen_helper_neon_qneg_s32(tcg_res, cpu_env, tcg_op);
+ gen_helper_neon_qneg_s32(tcg_res, tcg_env, tcg_op);
} else {
- gen_helper_neon_qabs_s32(tcg_res, cpu_env, tcg_op);
+ gen_helper_neon_qabs_s32(tcg_res, tcg_env, tcg_op);
}
break;
case 0x2f: /* FABS */
@@ -12269,7 +12269,7 @@
gen_helper_vfp_negs(tcg_res, tcg_op);
break;
case 0x7f: /* FSQRT */
- gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
+ gen_helper_vfp_sqrts(tcg_res, tcg_op, tcg_env);
break;
case 0x1a: /* FCVTNS */
case 0x1b: /* FCVTMS */
@@ -12333,7 +12333,7 @@
{ gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
};
genfn = fns[size][u];
- genfn(tcg_res, cpu_env, tcg_op);
+ genfn(tcg_res, tcg_env, tcg_op);
break;
}
case 0x4: /* CLS, CLZ */
@@ -12770,7 +12770,7 @@
return;
}
size = MO_16;
- /* is_fp, but we pass cpu_env not fp_status. */
+ /* is_fp, but we pass tcg_env not fp_status. */
break;
default:
unallocated_encoding(s);
@@ -12913,7 +12913,7 @@
int data = (index << 2) | (is_2 << 1) | is_s;
tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
vec_full_reg_offset(s, rn),
- vec_full_reg_offset(s, rm), cpu_env,
+ vec_full_reg_offset(s, rm), tcg_env,
is_q ? 16 : 8, vec_full_reg_size(s),
data, gen_helper_gvec_fmlal_idx_a64);
}
@@ -13132,19 +13132,19 @@
break;
case 0x0c: /* SQDMULH */
if (size == 1) {
- gen_helper_neon_qdmulh_s16(tcg_res, cpu_env,
+ gen_helper_neon_qdmulh_s16(tcg_res, tcg_env,
tcg_op, tcg_idx);
} else {
- gen_helper_neon_qdmulh_s32(tcg_res, cpu_env,
+ gen_helper_neon_qdmulh_s32(tcg_res, tcg_env,
tcg_op, tcg_idx);
}
break;
case 0x0d: /* SQRDMULH */
if (size == 1) {
- gen_helper_neon_qrdmulh_s16(tcg_res, cpu_env,
+ gen_helper_neon_qrdmulh_s16(tcg_res, tcg_env,
tcg_op, tcg_idx);
} else {
- gen_helper_neon_qrdmulh_s32(tcg_res, cpu_env,
+ gen_helper_neon_qrdmulh_s32(tcg_res, tcg_env,
tcg_op, tcg_idx);
}
break;
@@ -13152,10 +13152,10 @@
read_vec_element_i32(s, tcg_res, rd, pass,
is_scalar ? size : MO_32);
if (size == 1) {
- gen_helper_neon_qrdmlah_s16(tcg_res, cpu_env,
+ gen_helper_neon_qrdmlah_s16(tcg_res, tcg_env,
tcg_op, tcg_idx, tcg_res);
} else {
- gen_helper_neon_qrdmlah_s32(tcg_res, cpu_env,
+ gen_helper_neon_qrdmlah_s32(tcg_res, tcg_env,
tcg_op, tcg_idx, tcg_res);
}
break;
@@ -13163,10 +13163,10 @@
read_vec_element_i32(s, tcg_res, rd, pass,
is_scalar ? size : MO_32);
if (size == 1) {
- gen_helper_neon_qrdmlsh_s16(tcg_res, cpu_env,
+ gen_helper_neon_qrdmlsh_s16(tcg_res, tcg_env,
tcg_op, tcg_idx, tcg_res);
} else {
- gen_helper_neon_qrdmlsh_s32(tcg_res, cpu_env,
+ gen_helper_neon_qrdmlsh_s32(tcg_res, tcg_env,
tcg_op, tcg_idx, tcg_res);
}
break;
@@ -13224,7 +13224,7 @@
if (satop) {
/* saturating, doubling */
- gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
+ gen_helper_neon_addl_saturate_s64(tcg_passres, tcg_env,
tcg_passres, tcg_passres);
}
@@ -13246,7 +13246,7 @@
tcg_gen_neg_i64(tcg_passres, tcg_passres);
/* fall through */
case 0x3: /* SQDMLAL, SQDMLAL2 */
- gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
+ gen_helper_neon_addl_saturate_s64(tcg_res[pass], tcg_env,
tcg_res[pass],
tcg_passres);
break;
@@ -13296,7 +13296,7 @@
gen_helper_neon_mull_u16(tcg_passres, tcg_op, tcg_idx);
}
if (satop) {
- gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
+ gen_helper_neon_addl_saturate_s32(tcg_passres, tcg_env,
tcg_passres, tcg_passres);
}
@@ -13320,7 +13320,7 @@
gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
/* fall through */
case 0x3: /* SQDMLAL, SQDMLAL2 */
- gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
+ gen_helper_neon_addl_saturate_s32(tcg_res[pass], tcg_env,
tcg_res[pass],
tcg_passres);
break;
@@ -13904,7 +13904,7 @@
false, &host, &full, 0);
assert(!(flags & TLB_INVALID_MASK));
- return full->guarded;
+ return full->extra.arm.guarded;
#endif
}
@@ -13982,7 +13982,7 @@
CPUState *cpu)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
- CPUARMState *env = cpu->env_ptr;
+ CPUARMState *env = cpu_env(cpu);
ARMCPU *arm_cpu = env_archcpu(env);
CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb);
int bound, core_mmu_idx;
@@ -14089,7 +14089,7 @@
static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *s = container_of(dcbase, DisasContext, base);
- CPUARMState *env = cpu->env_ptr;
+ CPUARMState *env = cpu_env(cpu);
uint64_t pc = s->base.pc_next;
uint32_t insn;
@@ -14120,7 +14120,7 @@
* start of the TB.
*/
assert(s->base.num_insns == 1);
- gen_helper_exception_pc_alignment(cpu_env, tcg_constant_tl(pc));
+ gen_helper_exception_pc_alignment(tcg_env, tcg_constant_tl(pc));
s->base.is_jmp = DISAS_NORETURN;
s->base.pc_next = QEMU_ALIGN_UP(pc, 4);
return;
@@ -14244,11 +14244,11 @@
break;
case DISAS_WFE:
gen_a64_update_pc(dc, 4);
- gen_helper_wfe(cpu_env);
+ gen_helper_wfe(tcg_env);
break;
case DISAS_YIELD:
gen_a64_update_pc(dc, 4);
- gen_helper_yield(cpu_env);
+ gen_helper_yield(tcg_env);
break;
case DISAS_WFI:
/*
@@ -14256,7 +14256,7 @@
* the CPU if trying to debug across a WFI.
*/
gen_a64_update_pc(dc, 4);
- gen_helper_wfi(cpu_env, tcg_constant_i32(4));
+ gen_helper_wfi(tcg_env, tcg_constant_i32(4));
/*
* The helper doesn't necessarily throw an exception, but we
* must go back to the main loop to check for interrupts anyway.
diff --git a/target/arm/tcg/translate-a64.h b/target/arm/tcg/translate-a64.h
index b55dc43..96ba39b 100644
--- a/target/arm/tcg/translate-a64.h
+++ b/target/arm/tcg/translate-a64.h
@@ -115,7 +115,7 @@
static inline TCGv_ptr vec_full_reg_ptr(DisasContext *s, int regno)
{
TCGv_ptr ret = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(ret, cpu_env, vec_full_reg_offset(s, regno));
+ tcg_gen_addi_ptr(ret, tcg_env, vec_full_reg_offset(s, regno));
return ret;
}
@@ -179,7 +179,7 @@
static inline TCGv_ptr pred_full_reg_ptr(DisasContext *s, int regno)
{
TCGv_ptr ret = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(ret, cpu_env, pred_full_reg_offset(s, regno));
+ tcg_gen_addi_ptr(ret, tcg_env, pred_full_reg_offset(s, regno));
return ret;
}
diff --git a/target/arm/tcg/translate-m-nocp.c b/target/arm/tcg/translate-m-nocp.c
index 33f6478..f564d06 100644
--- a/target/arm/tcg/translate-m-nocp.c
+++ b/target/arm/tcg/translate-m-nocp.c
@@ -85,9 +85,9 @@
fptr = load_reg(s, a->rn);
if (a->l) {
- gen_helper_v7m_vlldm(cpu_env, fptr);
+ gen_helper_v7m_vlldm(tcg_env, fptr);
} else {
- gen_helper_v7m_vlstm(cpu_env, fptr);
+ gen_helper_v7m_vlstm(tcg_env, fptr);
}
clear_eci_state(s);
@@ -322,7 +322,7 @@
switch (regno) {
case ARM_VFP_FPSCR:
tmp = loadfn(s, opaque, true);
- gen_helper_vfp_set_fpscr(cpu_env, tmp);
+ gen_helper_vfp_set_fpscr(tcg_env, tmp);
gen_lookup_tb(s);
break;
case ARM_VFP_FPSCR_NZCVQC:
@@ -391,7 +391,7 @@
R_V7M_CONTROL_SFPA_SHIFT, 1);
store_cpu_field(control, v7m.control[M_REG_S]);
tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK);
- gen_helper_vfp_set_fpscr(cpu_env, tmp);
+ gen_helper_vfp_set_fpscr(tcg_env, tmp);
s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
break;
}
@@ -451,12 +451,12 @@
switch (regno) {
case ARM_VFP_FPSCR:
tmp = tcg_temp_new_i32();
- gen_helper_vfp_get_fpscr(tmp, cpu_env);
+ gen_helper_vfp_get_fpscr(tmp, tcg_env);
storefn(s, opaque, tmp, true);
break;
case ARM_VFP_FPSCR_NZCVQC:
tmp = tcg_temp_new_i32();
- gen_helper_vfp_get_fpscr(tmp, cpu_env);
+ gen_helper_vfp_get_fpscr(tmp, tcg_env);
tcg_gen_andi_i32(tmp, tmp, FPCR_NZCVQC_MASK);
storefn(s, opaque, tmp, true);
break;
@@ -475,7 +475,7 @@
/* Bits [27:0] from FPSCR, bit [31] from CONTROL.SFPA */
tmp = tcg_temp_new_i32();
sfpa = tcg_temp_new_i32();
- gen_helper_vfp_get_fpscr(tmp, cpu_env);
+ gen_helper_vfp_get_fpscr(tmp, tcg_env);
tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK);
control = load_cpu_field(v7m.control[M_REG_S]);
tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK);
@@ -493,7 +493,7 @@
tcg_gen_andi_i32(control, control, ~R_V7M_CONTROL_SFPA_MASK);
store_cpu_field(control, v7m.control[M_REG_S]);
fpscr = load_cpu_field(v7m.fpdscr[M_REG_NS]);
- gen_helper_vfp_set_fpscr(cpu_env, fpscr);
+ gen_helper_vfp_set_fpscr(tcg_env, fpscr);
lookup_tb = true;
break;
}
@@ -506,7 +506,7 @@
gen_branch_fpInactive(s, TCG_COND_EQ, lab_active);
/* fpInactive case: reads as FPDSCR_NS */
- TCGv_i32 tmp = load_cpu_field(v7m.fpdscr[M_REG_NS]);
+ tmp = load_cpu_field(v7m.fpdscr[M_REG_NS]);
storefn(s, opaque, tmp, true);
lab_end = gen_new_label();
tcg_gen_br(lab_end);
@@ -528,7 +528,7 @@
tmp = tcg_temp_new_i32();
sfpa = tcg_temp_new_i32();
fpscr = tcg_temp_new_i32();
- gen_helper_vfp_get_fpscr(fpscr, cpu_env);
+ gen_helper_vfp_get_fpscr(fpscr, tcg_env);
tcg_gen_andi_i32(tmp, fpscr, ~FPCR_NZCV_MASK);
control = load_cpu_field(v7m.control[M_REG_S]);
tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK);
@@ -540,7 +540,7 @@
fpdscr = load_cpu_field(v7m.fpdscr[M_REG_NS]);
tcg_gen_movcond_i32(TCG_COND_EQ, fpscr, sfpa, tcg_constant_i32(0),
fpdscr, fpscr);
- gen_helper_vfp_set_fpscr(cpu_env, fpscr);
+ gen_helper_vfp_set_fpscr(tcg_env, fpscr);
break;
}
case ARM_VFP_VPR:
@@ -643,7 +643,7 @@
}
if (s->v8m_stackcheck && a->rn == 13 && a->w) {
- gen_helper_v8m_stackcheck(cpu_env, addr);
+ gen_helper_v8m_stackcheck(tcg_env, addr);
}
if (do_access) {
@@ -682,7 +682,7 @@
}
if (s->v8m_stackcheck && a->rn == 13 && a->w) {
- gen_helper_v8m_stackcheck(cpu_env, addr);
+ gen_helper_v8m_stackcheck(tcg_env, addr);
}
if (do_access) {
diff --git a/target/arm/tcg/translate-mve.c b/target/arm/tcg/translate-mve.c
index 17d8e68..b1a8d6a 100644
--- a/target/arm/tcg/translate-mve.c
+++ b/target/arm/tcg/translate-mve.c
@@ -56,7 +56,7 @@
static TCGv_ptr mve_qreg_ptr(unsigned reg)
{
TCGv_ptr ret = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(ret, cpu_env, mve_qreg_offset(reg));
+ tcg_gen_addi_ptr(ret, tcg_env, mve_qreg_offset(reg));
return ret;
}
@@ -173,7 +173,7 @@
}
qreg = mve_qreg_ptr(a->qd);
- fn(cpu_env, qreg, addr);
+ fn(tcg_env, qreg, addr);
/*
* Writeback always happens after the last beat of the insn,
@@ -234,7 +234,7 @@
qd = mve_qreg_ptr(a->qd);
qm = mve_qreg_ptr(a->qm);
- fn(cpu_env, qd, qm, addr);
+ fn(tcg_env, qd, qm, addr);
mve_update_eci(s);
return true;
}
@@ -330,7 +330,7 @@
qd = mve_qreg_ptr(a->qd);
qm = mve_qreg_ptr(a->qm);
- fn(cpu_env, qd, qm, tcg_constant_i32(offset));
+ fn(tcg_env, qd, qm, tcg_constant_i32(offset));
mve_update_eci(s);
return true;
}
@@ -397,7 +397,7 @@
* We pass the index of Qd, not a pointer, because the helper must
* access multiple Q registers starting at Qd and working up.
*/
- fn(cpu_env, tcg_constant_i32(a->qd), rn);
+ fn(tcg_env, tcg_constant_i32(a->qd), rn);
if (a->w) {
tcg_gen_addi_i32(rn, rn, addrinc);
@@ -491,7 +491,7 @@
} else {
qd = mve_qreg_ptr(a->qd);
tcg_gen_dup_i32(a->size, rt, rt);
- gen_helper_mve_vdup(cpu_env, qd, rt);
+ gen_helper_mve_vdup(tcg_env, qd, rt);
}
mve_update_eci(s);
return true;
@@ -517,7 +517,7 @@
} else {
qd = mve_qreg_ptr(a->qd);
qm = mve_qreg_ptr(a->qm);
- fn(cpu_env, qd, qm);
+ fn(tcg_env, qd, qm);
}
mve_update_eci(s);
return true;
@@ -612,7 +612,7 @@
qd = mve_qreg_ptr(a->qd);
qm = mve_qreg_ptr(a->qm);
- fn(cpu_env, qd, qm, tcg_constant_i32(arm_rmode_to_sf(rmode)));
+ fn(tcg_env, qd, qm, tcg_constant_i32(arm_rmode_to_sf(rmode)));
mve_update_eci(s);
return true;
}
@@ -800,7 +800,7 @@
qd = mve_qreg_ptr(a->qd);
qn = mve_qreg_ptr(a->qn);
qm = mve_qreg_ptr(a->qm);
- fn(cpu_env, qd, qn, qm);
+ fn(tcg_env, qd, qn, qm);
}
mve_update_eci(s);
return true;
@@ -1052,7 +1052,7 @@
qd = mve_qreg_ptr(a->qd);
qn = mve_qreg_ptr(a->qn);
rm = load_reg(s, a->rm);
- fn(cpu_env, qd, qn, rm);
+ fn(tcg_env, qd, qn, rm);
mve_update_eci(s);
return true;
}
@@ -1183,7 +1183,7 @@
rda_i = tcg_constant_i64(0);
}
- fn(rda_o, cpu_env, qn, qm, rda_i);
+ fn(rda_o, tcg_env, qn, qm, rda_i);
rdalo = tcg_temp_new_i32();
rdahi = tcg_temp_new_i32();
@@ -1281,7 +1281,7 @@
rda_o = tcg_temp_new_i32();
}
- fn(rda_o, cpu_env, qn, qm, rda_i);
+ fn(rda_o, tcg_env, qn, qm, rda_i);
store_reg(s, a->rda, rda_o);
mve_update_eci(s);
@@ -1377,7 +1377,7 @@
return true;
}
- gen_helper_mve_vpnot(cpu_env);
+ gen_helper_mve_vpnot(tcg_env);
/* This insn updates predication bits */
s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
mve_update_eci(s);
@@ -1419,7 +1419,7 @@
}
qm = mve_qreg_ptr(a->qm);
- fns[a->size][a->u](rda_o, cpu_env, qm, rda_i);
+ fns[a->size][a->u](rda_o, tcg_env, qm, rda_i);
store_reg(s, a->rda, rda_o);
mve_update_eci(s);
@@ -1471,9 +1471,9 @@
qm = mve_qreg_ptr(a->qm);
if (a->u) {
- gen_helper_mve_vaddlv_u(rda_o, cpu_env, qm, rda_i);
+ gen_helper_mve_vaddlv_u(rda_o, tcg_env, qm, rda_i);
} else {
- gen_helper_mve_vaddlv_s(rda_o, cpu_env, qm, rda_i);
+ gen_helper_mve_vaddlv_s(rda_o, tcg_env, qm, rda_i);
}
rdalo = tcg_temp_new_i32();
@@ -1508,7 +1508,7 @@
imm, 16, 16);
} else {
qd = mve_qreg_ptr(a->qd);
- fn(cpu_env, qd, tcg_constant_i64(imm));
+ fn(tcg_env, qd, tcg_constant_i64(imm));
}
mve_update_eci(s);
return true;
@@ -1580,7 +1580,7 @@
} else {
qd = mve_qreg_ptr(a->qd);
qm = mve_qreg_ptr(a->qm);
- fn(cpu_env, qd, qm, tcg_constant_i32(shift));
+ fn(tcg_env, qd, qm, tcg_constant_i32(shift));
}
mve_update_eci(s);
return true;
@@ -1685,7 +1685,7 @@
qda = mve_qreg_ptr(a->qda);
rm = load_reg(s, a->rm);
- fn(cpu_env, qda, qda, rm);
+ fn(tcg_env, qda, qda, rm);
mve_update_eci(s);
return true;
}
@@ -1827,7 +1827,7 @@
qd = mve_qreg_ptr(a->qd);
rdm = load_reg(s, a->rdm);
- gen_helper_mve_vshlc(rdm, cpu_env, qd, rdm, tcg_constant_i32(a->imm));
+ gen_helper_mve_vshlc(rdm, tcg_env, qd, rdm, tcg_constant_i32(a->imm));
store_reg(s, a->rdm, rdm);
mve_update_eci(s);
return true;
@@ -1856,7 +1856,7 @@
qd = mve_qreg_ptr(a->qd);
rn = load_reg(s, a->rn);
- fn(rn, cpu_env, qd, rn, tcg_constant_i32(a->imm));
+ fn(rn, tcg_env, qd, rn, tcg_constant_i32(a->imm));
store_reg(s, a->rn, rn);
mve_update_eci(s);
return true;
@@ -1891,7 +1891,7 @@
qd = mve_qreg_ptr(a->qd);
rn = load_reg(s, a->rn);
rm = load_reg(s, a->rm);
- fn(rn, cpu_env, qd, rn, rm, tcg_constant_i32(a->imm));
+ fn(rn, tcg_env, qd, rn, rm, tcg_constant_i32(a->imm));
store_reg(s, a->rn, rn);
mve_update_eci(s);
return true;
@@ -1957,7 +1957,7 @@
qn = mve_qreg_ptr(a->qn);
qm = mve_qreg_ptr(a->qm);
- fn(cpu_env, qn, qm);
+ fn(tcg_env, qn, qm);
if (a->mask) {
/* VPT */
gen_vpst(s, a->mask);
@@ -1988,7 +1988,7 @@
} else {
rm = load_reg(s, a->rm);
}
- fn(cpu_env, qn, rm);
+ fn(tcg_env, qn, rm);
if (a->mask) {
/* VPT */
gen_vpst(s, a->mask);
@@ -2089,7 +2089,7 @@
qm = mve_qreg_ptr(a->qm);
rda = load_reg(s, a->rda);
- fn(rda, cpu_env, qm, rda);
+ fn(rda, tcg_env, qm, rda);
store_reg(s, a->rda, rda);
mve_update_eci(s);
return true;
@@ -2153,7 +2153,7 @@
qm = mve_qreg_ptr(a->qm);
qn = mve_qreg_ptr(a->qn);
rda = load_reg(s, a->rda);
- fn(rda, cpu_env, qn, qm, rda);
+ fn(rda, tcg_env, qn, qm, rda);
store_reg(s, a->rda, rda);
mve_update_eci(s);
return true;
diff --git a/target/arm/tcg/translate-neon.c b/target/arm/tcg/translate-neon.c
index 8de4ceb..144f18b 100644
--- a/target/arm/tcg/translate-neon.c
+++ b/target/arm/tcg/translate-neon.c
@@ -32,7 +32,7 @@
static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
{
TCGv_ptr ret = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
+ tcg_gen_addi_ptr(ret, tcg_env, vfp_reg_offset(dp, reg));
return ret;
}
@@ -42,13 +42,13 @@
switch (mop) {
case MO_UB:
- tcg_gen_ld8u_i32(var, cpu_env, offset);
+ tcg_gen_ld8u_i32(var, tcg_env, offset);
break;
case MO_UW:
- tcg_gen_ld16u_i32(var, cpu_env, offset);
+ tcg_gen_ld16u_i32(var, tcg_env, offset);
break;
case MO_UL:
- tcg_gen_ld_i32(var, cpu_env, offset);
+ tcg_gen_ld_i32(var, tcg_env, offset);
break;
default:
g_assert_not_reached();
@@ -61,16 +61,16 @@
switch (mop) {
case MO_UB:
- tcg_gen_ld8u_i64(var, cpu_env, offset);
+ tcg_gen_ld8u_i64(var, tcg_env, offset);
break;
case MO_UW:
- tcg_gen_ld16u_i64(var, cpu_env, offset);
+ tcg_gen_ld16u_i64(var, tcg_env, offset);
break;
case MO_UL:
- tcg_gen_ld32u_i64(var, cpu_env, offset);
+ tcg_gen_ld32u_i64(var, tcg_env, offset);
break;
case MO_UQ:
- tcg_gen_ld_i64(var, cpu_env, offset);
+ tcg_gen_ld_i64(var, tcg_env, offset);
break;
default:
g_assert_not_reached();
@@ -83,13 +83,13 @@
switch (size) {
case MO_8:
- tcg_gen_st8_i32(var, cpu_env, offset);
+ tcg_gen_st8_i32(var, tcg_env, offset);
break;
case MO_16:
- tcg_gen_st16_i32(var, cpu_env, offset);
+ tcg_gen_st16_i32(var, tcg_env, offset);
break;
case MO_32:
- tcg_gen_st_i32(var, cpu_env, offset);
+ tcg_gen_st_i32(var, tcg_env, offset);
break;
default:
g_assert_not_reached();
@@ -102,16 +102,16 @@
switch (size) {
case MO_8:
- tcg_gen_st8_i64(var, cpu_env, offset);
+ tcg_gen_st8_i64(var, tcg_env, offset);
break;
case MO_16:
- tcg_gen_st16_i64(var, cpu_env, offset);
+ tcg_gen_st16_i64(var, tcg_env, offset);
break;
case MO_32:
- tcg_gen_st32_i64(var, cpu_env, offset);
+ tcg_gen_st32_i64(var, tcg_env, offset);
break;
case MO_64:
- tcg_gen_st_i64(var, cpu_env, offset);
+ tcg_gen_st_i64(var, tcg_env, offset);
break;
default:
g_assert_not_reached();
@@ -296,7 +296,7 @@
tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
vfp_reg_offset(a->q, a->vn),
vfp_reg_offset(a->q, a->vm),
- cpu_env, opr_sz, opr_sz, a->s, /* is_2 == 0 */
+ tcg_env, opr_sz, opr_sz, a->s, /* is_2 == 0 */
gen_helper_gvec_fmlal_a32);
return true;
}
@@ -390,7 +390,7 @@
tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
vfp_reg_offset(a->q, a->vn),
vfp_reg_offset(a->q, a->rm),
- cpu_env, opr_sz, opr_sz,
+ tcg_env, opr_sz, opr_sz,
(a->index << 2) | a->s, /* is_2 == 0 */
gen_helper_gvec_fmlal_idx_a32);
return true;
@@ -920,7 +920,7 @@
#define DO_3SAME_64_ENV(INSN, FUNC) \
static void gen_##INSN##_elt(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m) \
{ \
- FUNC(d, cpu_env, n, m); \
+ FUNC(d, tcg_env, n, m); \
} \
DO_3SAME_64(INSN, gen_##INSN##_elt)
@@ -953,7 +953,7 @@
}
/*
- * Some helper functions need to be passed the cpu_env. In order
+ * Some helper functions need to be passed the tcg_env. In order
* to use those with the gvec APIs like tcg_gen_gvec_3() we need
* to create wrapper functions whose prototype is a NeonGenTwoOpFn()
* and which call a NeonGenTwoOpEnvFn().
@@ -961,7 +961,7 @@
#define WRAP_ENV_FN(WRAPNAME, FUNC) \
static void WRAPNAME(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m) \
{ \
- FUNC(d, cpu_env, n, m); \
+ FUNC(d, tcg_env, n, m); \
}
#define DO_3SAME_32_ENV(INSN, FUNC) \
@@ -1305,7 +1305,7 @@
{
/*
* 2-reg-and-shift operations, size == 3 case, where the
- * function needs to be passed cpu_env.
+ * function needs to be passed tcg_env.
*/
TCGv_i64 constimm;
int pass;
@@ -1338,7 +1338,7 @@
TCGv_i64 tmp = tcg_temp_new_i64();
read_neon_element64(tmp, a->vm, pass, MO_64);
- fn(tmp, cpu_env, tmp, constimm);
+ fn(tmp, tcg_env, tmp, constimm);
write_neon_element64(tmp, a->vd, pass, MO_64);
}
return true;
@@ -1349,7 +1349,7 @@
{
/*
* 2-reg-and-shift operations, size < 3 case, where the
- * helper needs to be passed cpu_env.
+ * helper needs to be passed tcg_env.
*/
TCGv_i32 constimm, tmp;
int pass;
@@ -1381,7 +1381,7 @@
for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
read_neon_element32(tmp, a->vm, pass, MO_32);
- fn(tmp, cpu_env, tmp, constimm);
+ fn(tmp, tcg_env, tmp, constimm);
write_neon_element32(tmp, a->vd, pass, MO_32);
}
return true;
@@ -1447,11 +1447,11 @@
read_neon_element64(rm2, a->vm, 1, MO_64);
shiftfn(rm1, rm1, constimm);
- narrowfn(rd, cpu_env, rm1);
+ narrowfn(rd, tcg_env, rm1);
write_neon_element32(rd, a->vd, 0, MO_32);
shiftfn(rm2, rm2, constimm);
- narrowfn(rd, cpu_env, rm2);
+ narrowfn(rd, tcg_env, rm2);
write_neon_element32(rd, a->vd, 1, MO_32);
return true;
@@ -1514,7 +1514,7 @@
tcg_gen_concat_i32_i64(rtmp, rm1, rm2);
- narrowfn(rm1, cpu_env, rtmp);
+ narrowfn(rm1, tcg_env, rtmp);
write_neon_element32(rm1, a->vd, 0, MO_32);
shiftfn(rm3, rm3, constimm);
@@ -1522,7 +1522,7 @@
tcg_gen_concat_i32_i64(rtmp, rm3, rm4);
- narrowfn(rm3, cpu_env, rtmp);
+ narrowfn(rm3, tcg_env, rtmp);
write_neon_element32(rm3, a->vd, 1, MO_32);
return true;
}
@@ -2159,13 +2159,13 @@
static void gen_VQDMULL_16(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm)
{
gen_helper_neon_mull_s16(rd, rn, rm);
- gen_helper_neon_addl_saturate_s32(rd, cpu_env, rd, rd);
+ gen_helper_neon_addl_saturate_s32(rd, tcg_env, rd, rd);
}
static void gen_VQDMULL_32(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm)
{
gen_mull_s32(rd, rn, rm);
- gen_helper_neon_addl_saturate_s64(rd, cpu_env, rd, rd);
+ gen_helper_neon_addl_saturate_s64(rd, tcg_env, rd, rd);
}
static bool trans_VQDMULL_3d(DisasContext *s, arg_3diff *a)
@@ -2182,12 +2182,12 @@
static void gen_VQDMLAL_acc_16(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
{
- gen_helper_neon_addl_saturate_s32(rd, cpu_env, rn, rm);
+ gen_helper_neon_addl_saturate_s32(rd, tcg_env, rn, rm);
}
static void gen_VQDMLAL_acc_32(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
{
- gen_helper_neon_addl_saturate_s64(rd, cpu_env, rn, rm);
+ gen_helper_neon_addl_saturate_s64(rd, tcg_env, rn, rm);
}
static bool trans_VQDMLAL_3d(DisasContext *s, arg_3diff *a)
@@ -2211,13 +2211,13 @@
static void gen_VQDMLSL_acc_16(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
{
gen_helper_neon_negl_u32(rm, rm);
- gen_helper_neon_addl_saturate_s32(rd, cpu_env, rn, rm);
+ gen_helper_neon_addl_saturate_s32(rd, tcg_env, rn, rm);
}
static void gen_VQDMLSL_acc_32(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
{
tcg_gen_neg_i64(rm, rm);
- gen_helper_neon_addl_saturate_s64(rd, cpu_env, rn, rm);
+ gen_helper_neon_addl_saturate_s64(rd, tcg_env, rn, rm);
}
static bool trans_VQDMLSL_3d(DisasContext *s, arg_3diff *a)
@@ -2550,7 +2550,7 @@
for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
read_neon_element32(rn, a->vn, pass, MO_32);
read_neon_element32(rd, a->vd, pass, MO_32);
- opfn(rd, cpu_env, rn, scalar, rd);
+ opfn(rd, tcg_env, rn, scalar, rd);
write_neon_element32(rd, a->vd, pass, MO_32);
}
return true;
@@ -2837,7 +2837,7 @@
val = tcg_temp_new_i64();
read_neon_element64(val, a->vm, 0, MO_64);
- gen_helper_neon_tbl(val, cpu_env, desc, val, def);
+ gen_helper_neon_tbl(val, tcg_env, desc, val, def);
write_neon_element64(val, a->vd, 0, MO_64);
return true;
}
@@ -3171,9 +3171,9 @@
rd1 = tcg_temp_new_i32();
read_neon_element64(rm, a->vm, 0, MO_64);
- narrowfn(rd0, cpu_env, rm);
+ narrowfn(rd0, tcg_env, rm);
read_neon_element64(rm, a->vm, 1, MO_64);
- narrowfn(rd1, cpu_env, rm);
+ narrowfn(rd1, tcg_env, rm);
write_neon_element32(rd0, a->vd, 0, MO_32);
write_neon_element32(rd1, a->vd, 1, MO_32);
return true;
@@ -3625,7 +3625,7 @@
#define WRAP_1OP_ENV_FN(WRAPNAME, FUNC) \
static void WRAPNAME(TCGv_i32 d, TCGv_i32 m) \
{ \
- FUNC(d, cpu_env, m); \
+ FUNC(d, tcg_env, m); \
}
WRAP_1OP_ENV_FN(gen_VQABS_s8, gen_helper_neon_qabs_s8)
diff --git a/target/arm/tcg/translate-sme.c b/target/arm/tcg/translate-sme.c
index 6038b0a..8f0dfc8 100644
--- a/target/arm/tcg/translate-sme.c
+++ b/target/arm/tcg/translate-sme.c
@@ -90,7 +90,7 @@
/* Add the byte offset to env to produce the final pointer. */
addr = tcg_temp_new_ptr();
tcg_gen_ext_i32_ptr(addr, tmp);
- tcg_gen_add_ptr(addr, addr, cpu_env);
+ tcg_gen_add_ptr(addr, addr, tcg_env);
return addr;
}
@@ -106,7 +106,7 @@
offset = tile * sizeof(ARMVectorReg) + offsetof(CPUARMState, zarray);
- tcg_gen_addi_ptr(addr, cpu_env, offset);
+ tcg_gen_addi_ptr(addr, tcg_env, offset);
return addr;
}
@@ -116,7 +116,7 @@
return false;
}
if (sme_za_enabled_check(s)) {
- gen_helper_sme_zero(cpu_env, tcg_constant_i32(a->imm),
+ gen_helper_sme_zero(tcg_env, tcg_constant_i32(a->imm),
tcg_constant_i32(streaming_vec_reg_size(s)));
}
return true;
@@ -237,7 +237,7 @@
svl = streaming_vec_reg_size(s);
desc = simd_desc(svl, svl, desc);
- fns[a->esz][be][a->v][mte][a->st](cpu_env, t_za, t_pg, addr,
+ fns[a->esz][be][a->v][mte][a->st](tcg_env, t_za, t_pg, addr,
tcg_constant_i32(desc));
return true;
}
diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c
index 2ba5efa..7b39962 100644
--- a/target/arm/tcg/translate-sve.c
+++ b/target/arm/tcg/translate-sve.c
@@ -497,8 +497,8 @@
TCGv_ptr gptr = tcg_temp_new_ptr();
TCGv_i32 t = tcg_temp_new_i32();
- tcg_gen_addi_ptr(dptr, cpu_env, dofs);
- tcg_gen_addi_ptr(gptr, cpu_env, gofs);
+ tcg_gen_addi_ptr(dptr, tcg_env, dofs);
+ tcg_gen_addi_ptr(gptr, tcg_env, gofs);
gen_helper_sve_predtest(t, dptr, gptr, tcg_constant_i32(words));
@@ -956,8 +956,8 @@
t_zn = tcg_temp_new_ptr();
t_pg = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn));
- tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->pg));
+ tcg_gen_addi_ptr(t_zn, tcg_env, vec_full_reg_offset(s, a->rn));
+ tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, a->pg));
fn(temp, t_zn, t_pg, desc);
write_fp_dreg(s, a->rd, temp);
@@ -1209,7 +1209,7 @@
desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
t_zd = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, rd));
+ tcg_gen_addi_ptr(t_zd, tcg_env, vec_full_reg_offset(s, rd));
if (esz == 3) {
gen_helper_sve_index_d(t_zd, start, incr, desc);
} else {
@@ -1379,12 +1379,12 @@
TCGv_i64 pm = tcg_temp_new_i64();
TCGv_i64 pg = tcg_temp_new_i64();
- tcg_gen_ld_i64(pn, cpu_env, nofs);
- tcg_gen_ld_i64(pm, cpu_env, mofs);
- tcg_gen_ld_i64(pg, cpu_env, gofs);
+ tcg_gen_ld_i64(pn, tcg_env, nofs);
+ tcg_gen_ld_i64(pm, tcg_env, mofs);
+ tcg_gen_ld_i64(pg, tcg_env, gofs);
gvec_op->fni8(pd, pn, pm, pg);
- tcg_gen_st_i64(pd, cpu_env, dofs);
+ tcg_gen_st_i64(pd, tcg_env, dofs);
do_predtest1(pd, pg);
} else {
@@ -1654,8 +1654,8 @@
TCGv_i64 pn = tcg_temp_new_i64();
TCGv_i64 pg = tcg_temp_new_i64();
- tcg_gen_ld_i64(pn, cpu_env, nofs);
- tcg_gen_ld_i64(pg, cpu_env, gofs);
+ tcg_gen_ld_i64(pn, tcg_env, nofs);
+ tcg_gen_ld_i64(pg, tcg_env, gofs);
do_predtest1(pn, pg);
} else {
do_predtest(s, nofs, gofs, words);
@@ -1736,7 +1736,7 @@
t = tcg_temp_new_i64();
if (fullsz <= 64) {
tcg_gen_movi_i64(t, lastword);
- tcg_gen_st_i64(t, cpu_env, ofs);
+ tcg_gen_st_i64(t, tcg_env, ofs);
goto done;
}
@@ -1755,17 +1755,17 @@
tcg_gen_movi_i64(t, word);
for (i = 0; i < QEMU_ALIGN_DOWN(setsz, 8); i += 8) {
- tcg_gen_st_i64(t, cpu_env, ofs + i);
+ tcg_gen_st_i64(t, tcg_env, ofs + i);
}
if (lastword != word) {
tcg_gen_movi_i64(t, lastword);
- tcg_gen_st_i64(t, cpu_env, ofs + i);
+ tcg_gen_st_i64(t, tcg_env, ofs + i);
i += 8;
}
if (i < fullsz) {
tcg_gen_movi_i64(t, 0);
for (; i < fullsz; i += 8) {
- tcg_gen_st_i64(t, cpu_env, ofs + i);
+ tcg_gen_st_i64(t, tcg_env, ofs + i);
}
}
@@ -1822,8 +1822,8 @@
desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pred_full_reg_size(s));
desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
- tcg_gen_addi_ptr(t_pd, cpu_env, pred_full_reg_offset(s, a->rd));
- tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->rn));
+ tcg_gen_addi_ptr(t_pd, tcg_env, pred_full_reg_offset(s, a->rd));
+ tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, a->rn));
t = tcg_temp_new_i32();
gen_fn(t, t_pd, t_pg, tcg_constant_i32(desc));
@@ -1919,8 +1919,8 @@
dptr = tcg_temp_new_ptr();
nptr = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(dptr, cpu_env, vec_full_reg_offset(s, rd));
- tcg_gen_addi_ptr(nptr, cpu_env, vec_full_reg_offset(s, rn));
+ tcg_gen_addi_ptr(dptr, tcg_env, vec_full_reg_offset(s, rd));
+ tcg_gen_addi_ptr(nptr, tcg_env, vec_full_reg_offset(s, rn));
desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
switch (esz) {
@@ -2163,9 +2163,9 @@
TCGv_ptr t_zn = tcg_temp_new_ptr();
TCGv_ptr t_pg = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, rd));
- tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, rn));
- tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
+ tcg_gen_addi_ptr(t_zd, tcg_env, vec_full_reg_offset(s, rd));
+ tcg_gen_addi_ptr(t_zn, tcg_env, vec_full_reg_offset(s, rn));
+ tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg));
fns[esz](t_zd, t_zn, t_pg, val, desc);
}
@@ -2310,8 +2310,8 @@
TCGv_ptr t_zd = tcg_temp_new_ptr();
TCGv_ptr t_zn = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, a->rd));
- tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn));
+ tcg_gen_addi_ptr(t_zd, tcg_env, vec_full_reg_offset(s, a->rd));
+ tcg_gen_addi_ptr(t_zn, tcg_env, vec_full_reg_offset(s, a->rn));
fns[a->esz](t_zd, t_zn, val, desc);
}
@@ -2323,7 +2323,7 @@
}
if (sve_access_check(s)) {
TCGv_i64 t = tcg_temp_new_i64();
- tcg_gen_ld_i64(t, cpu_env, vec_reg_offset(s, a->rm, 0, MO_64));
+ tcg_gen_ld_i64(t, tcg_env, vec_reg_offset(s, a->rm, 0, MO_64));
do_insr_i64(s, a, t);
}
return true;
@@ -2409,9 +2409,9 @@
desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
desc = FIELD_DP32(desc, PREDDESC, DATA, high_odd);
- tcg_gen_addi_ptr(t_d, cpu_env, pred_full_reg_offset(s, a->rd));
- tcg_gen_addi_ptr(t_n, cpu_env, pred_full_reg_offset(s, a->rn));
- tcg_gen_addi_ptr(t_m, cpu_env, pred_full_reg_offset(s, a->rm));
+ tcg_gen_addi_ptr(t_d, tcg_env, pred_full_reg_offset(s, a->rd));
+ tcg_gen_addi_ptr(t_n, tcg_env, pred_full_reg_offset(s, a->rn));
+ tcg_gen_addi_ptr(t_m, tcg_env, pred_full_reg_offset(s, a->rm));
fn(t_d, t_n, t_m, tcg_constant_i32(desc));
return true;
@@ -2429,8 +2429,8 @@
TCGv_ptr t_n = tcg_temp_new_ptr();
uint32_t desc = 0;
- tcg_gen_addi_ptr(t_d, cpu_env, pred_full_reg_offset(s, a->rd));
- tcg_gen_addi_ptr(t_n, cpu_env, pred_full_reg_offset(s, a->rn));
+ tcg_gen_addi_ptr(t_d, tcg_env, pred_full_reg_offset(s, a->rd));
+ tcg_gen_addi_ptr(t_n, tcg_env, pred_full_reg_offset(s, a->rn));
desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz);
desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
@@ -2525,7 +2525,7 @@
desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pred_full_reg_size(s));
desc = FIELD_DP32(desc, PREDDESC, ESZ, esz);
- tcg_gen_addi_ptr(t_p, cpu_env, pred_full_reg_offset(s, pg));
+ tcg_gen_addi_ptr(t_p, tcg_env, pred_full_reg_offset(s, pg));
gen_helper_sve_last_active_element(ret, t_p, tcg_constant_i32(desc));
}
@@ -2602,7 +2602,7 @@
}
#endif
tcg_gen_ext_i32_ptr(p, last);
- tcg_gen_add_ptr(p, p, cpu_env);
+ tcg_gen_add_ptr(p, p, tcg_env);
return load_esz(p, vec_full_reg_offset(s, rm), esz);
}
@@ -2674,7 +2674,7 @@
}
/* The conceit here is that while last < 0 indicates not found, after
- * adjusting for cpu_env->vfp.zregs[rm], it is still a valid address
+ * adjusting for tcg_env->vfp.zregs[rm], it is still a valid address
* from which we can load garbage. We then discard the garbage with
* a conditional move.
*/
@@ -2690,7 +2690,7 @@
if (sve_access_check(s)) {
int esz = a->esz;
int ofs = vec_reg_offset(s, a->rd, 0, esz);
- TCGv_i64 reg = load_esz(cpu_env, ofs, esz);
+ TCGv_i64 reg = load_esz(tcg_env, ofs, esz);
do_clast_scalar(s, esz, a->pg, a->rn, before, reg);
write_fp_dreg(s, a->rd, reg);
@@ -2794,7 +2794,7 @@
}
if (sve_access_check(s)) {
int ofs = vec_reg_offset(s, a->rn, 0, a->esz);
- TCGv_i64 t = load_esz(cpu_env, ofs, a->esz);
+ TCGv_i64 t = load_esz(tcg_env, ofs, a->esz);
do_cpy_m(s, a->esz, a->rd, a->rd, a->pg, t);
}
return true;
@@ -2847,10 +2847,10 @@
zm = tcg_temp_new_ptr();
pg = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(pd, cpu_env, pred_full_reg_offset(s, a->rd));
- tcg_gen_addi_ptr(zn, cpu_env, vec_full_reg_offset(s, a->rn));
- tcg_gen_addi_ptr(zm, cpu_env, vec_full_reg_offset(s, a->rm));
- tcg_gen_addi_ptr(pg, cpu_env, pred_full_reg_offset(s, a->pg));
+ tcg_gen_addi_ptr(pd, tcg_env, pred_full_reg_offset(s, a->rd));
+ tcg_gen_addi_ptr(zn, tcg_env, vec_full_reg_offset(s, a->rn));
+ tcg_gen_addi_ptr(zm, tcg_env, vec_full_reg_offset(s, a->rm));
+ tcg_gen_addi_ptr(pg, tcg_env, pred_full_reg_offset(s, a->pg));
gen_fn(t, pd, zn, zm, pg, tcg_constant_i32(simd_desc(vsz, vsz, 0)));
@@ -2920,9 +2920,9 @@
zn = tcg_temp_new_ptr();
pg = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(pd, cpu_env, pred_full_reg_offset(s, a->rd));
- tcg_gen_addi_ptr(zn, cpu_env, vec_full_reg_offset(s, a->rn));
- tcg_gen_addi_ptr(pg, cpu_env, pred_full_reg_offset(s, a->pg));
+ tcg_gen_addi_ptr(pd, tcg_env, pred_full_reg_offset(s, a->rd));
+ tcg_gen_addi_ptr(zn, tcg_env, vec_full_reg_offset(s, a->rn));
+ tcg_gen_addi_ptr(pg, tcg_env, pred_full_reg_offset(s, a->pg));
gen_fn(t, pd, zn, pg, tcg_constant_i32(simd_desc(vsz, vsz, a->imm)));
@@ -2971,10 +2971,10 @@
TCGv_ptr g = tcg_temp_new_ptr();
TCGv_i32 desc = tcg_constant_i32(FIELD_DP32(0, PREDDESC, OPRSZ, vsz));
- tcg_gen_addi_ptr(d, cpu_env, pred_full_reg_offset(s, a->rd));
- tcg_gen_addi_ptr(n, cpu_env, pred_full_reg_offset(s, a->rn));
- tcg_gen_addi_ptr(m, cpu_env, pred_full_reg_offset(s, a->rm));
- tcg_gen_addi_ptr(g, cpu_env, pred_full_reg_offset(s, a->pg));
+ tcg_gen_addi_ptr(d, tcg_env, pred_full_reg_offset(s, a->rd));
+ tcg_gen_addi_ptr(n, tcg_env, pred_full_reg_offset(s, a->rn));
+ tcg_gen_addi_ptr(m, tcg_env, pred_full_reg_offset(s, a->rm));
+ tcg_gen_addi_ptr(g, tcg_env, pred_full_reg_offset(s, a->pg));
if (a->s) {
TCGv_i32 t = tcg_temp_new_i32();
@@ -3001,9 +3001,9 @@
TCGv_ptr g = tcg_temp_new_ptr();
TCGv_i32 desc = tcg_constant_i32(FIELD_DP32(0, PREDDESC, OPRSZ, vsz));
- tcg_gen_addi_ptr(d, cpu_env, pred_full_reg_offset(s, a->rd));
- tcg_gen_addi_ptr(n, cpu_env, pred_full_reg_offset(s, a->rn));
- tcg_gen_addi_ptr(g, cpu_env, pred_full_reg_offset(s, a->pg));
+ tcg_gen_addi_ptr(d, tcg_env, pred_full_reg_offset(s, a->rd));
+ tcg_gen_addi_ptr(n, tcg_env, pred_full_reg_offset(s, a->rn));
+ tcg_gen_addi_ptr(g, tcg_env, pred_full_reg_offset(s, a->pg));
if (a->s) {
TCGv_i32 t = tcg_temp_new_i32();
@@ -3044,10 +3044,10 @@
if (psz <= 8) {
uint64_t psz_mask;
- tcg_gen_ld_i64(val, cpu_env, pred_full_reg_offset(s, pn));
+ tcg_gen_ld_i64(val, tcg_env, pred_full_reg_offset(s, pn));
if (pn != pg) {
TCGv_i64 g = tcg_temp_new_i64();
- tcg_gen_ld_i64(g, cpu_env, pred_full_reg_offset(s, pg));
+ tcg_gen_ld_i64(g, tcg_env, pred_full_reg_offset(s, pg));
tcg_gen_and_i64(val, val, g);
}
@@ -3066,8 +3066,8 @@
desc = FIELD_DP32(desc, PREDDESC, OPRSZ, psz);
desc = FIELD_DP32(desc, PREDDESC, ESZ, esz);
- tcg_gen_addi_ptr(t_pn, cpu_env, pred_full_reg_offset(s, pn));
- tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
+ tcg_gen_addi_ptr(t_pn, tcg_env, pred_full_reg_offset(s, pn));
+ tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg));
gen_helper_sve_cntp(val, t_pn, t_pg, tcg_constant_i32(desc));
}
@@ -3291,7 +3291,7 @@
desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
ptr = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(ptr, cpu_env, pred_full_reg_offset(s, a->rd));
+ tcg_gen_addi_ptr(ptr, tcg_env, pred_full_reg_offset(s, a->rd));
if (a->lt) {
gen_helper_sve_whilel(t2, ptr, t2, tcg_constant_i32(desc));
@@ -3354,7 +3354,7 @@
desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
ptr = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(ptr, cpu_env, pred_full_reg_offset(s, a->rd));
+ tcg_gen_addi_ptr(ptr, tcg_env, pred_full_reg_offset(s, a->rd));
gen_helper_sve_whilel(t2, ptr, t2, tcg_constant_i32(desc));
do_pred_flags(t2);
@@ -3684,8 +3684,8 @@
t_zn = tcg_temp_new_ptr();
t_pg = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn));
- tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->pg));
+ tcg_gen_addi_ptr(t_zn, tcg_env, vec_full_reg_offset(s, a->rn));
+ tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, a->pg));
status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
fn(temp, t_zn, t_pg, status, t_desc);
@@ -3802,11 +3802,11 @@
return true;
}
- t_val = load_esz(cpu_env, vec_reg_offset(s, a->rn, 0, a->esz), a->esz);
+ t_val = load_esz(tcg_env, vec_reg_offset(s, a->rn, 0, a->esz), a->esz);
t_rm = tcg_temp_new_ptr();
t_pg = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(t_rm, cpu_env, vec_full_reg_offset(s, a->rm));
- tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->pg));
+ tcg_gen_addi_ptr(t_rm, tcg_env, vec_full_reg_offset(s, a->rm));
+ tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, a->pg));
t_fpst = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
t_desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
@@ -3878,9 +3878,9 @@
t_zd = tcg_temp_new_ptr();
t_zn = tcg_temp_new_ptr();
t_pg = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, zd));
- tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, zn));
- tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
+ tcg_gen_addi_ptr(t_zd, tcg_env, vec_full_reg_offset(s, zd));
+ tcg_gen_addi_ptr(t_zn, tcg_env, vec_full_reg_offset(s, zn));
+ tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg));
status = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
@@ -4228,7 +4228,7 @@
/*
* Predicate register loads can be any multiple of 2.
- * Note that we still store the entire 64-bit unit into cpu_env.
+ * Note that we still store the entire 64-bit unit into tcg_env.
*/
if (len_remain >= 8) {
t0 = tcg_temp_new_i64();
@@ -4370,7 +4370,7 @@
if (sve_access_check(s)) {
int size = vec_full_reg_size(s);
int off = vec_full_reg_offset(s, a->rd);
- gen_sve_ldr(s, cpu_env, off, size, a->rn, a->imm * size);
+ gen_sve_ldr(s, tcg_env, off, size, a->rn, a->imm * size);
}
return true;
}
@@ -4383,7 +4383,7 @@
if (sve_access_check(s)) {
int size = pred_full_reg_size(s);
int off = pred_full_reg_offset(s, a->rd);
- gen_sve_ldr(s, cpu_env, off, size, a->rn, a->imm * size);
+ gen_sve_ldr(s, tcg_env, off, size, a->rn, a->imm * size);
}
return true;
}
@@ -4396,7 +4396,7 @@
if (sve_access_check(s)) {
int size = vec_full_reg_size(s);
int off = vec_full_reg_offset(s, a->rd);
- gen_sve_str(s, cpu_env, off, size, a->rn, a->imm * size);
+ gen_sve_str(s, tcg_env, off, size, a->rn, a->imm * size);
}
return true;
}
@@ -4409,7 +4409,7 @@
if (sve_access_check(s)) {
int size = pred_full_reg_size(s);
int off = pred_full_reg_offset(s, a->rd);
- gen_sve_str(s, cpu_env, off, size, a->rn, a->imm * size);
+ gen_sve_str(s, tcg_env, off, size, a->rn, a->imm * size);
}
return true;
}
@@ -4465,8 +4465,8 @@
desc = simd_desc(vsz, vsz, zt | desc);
t_pg = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
- fn(cpu_env, t_pg, addr, tcg_constant_i32(desc));
+ tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg));
+ fn(tcg_env, t_pg, addr, tcg_constant_i32(desc));
}
/* Indexed by [mte][be][dtype][nreg] */
@@ -4860,18 +4860,18 @@
#if HOST_BIG_ENDIAN
poff += 6;
#endif
- tcg_gen_ld16u_i64(tmp, cpu_env, poff);
+ tcg_gen_ld16u_i64(tmp, tcg_env, poff);
poff = offsetof(CPUARMState, vfp.preg_tmp);
- tcg_gen_st_i64(tmp, cpu_env, poff);
+ tcg_gen_st_i64(tmp, tcg_env, poff);
}
t_pg = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(t_pg, cpu_env, poff);
+ tcg_gen_addi_ptr(t_pg, tcg_env, poff);
gen_helper_gvec_mem *fn
= ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0];
- fn(cpu_env, t_pg, addr, tcg_constant_i32(simd_desc(16, 16, zt)));
+ fn(tcg_env, t_pg, addr, tcg_constant_i32(simd_desc(16, 16, zt)));
/* Replicate that first quadword. */
if (vsz > 16) {
@@ -4939,18 +4939,18 @@
#if HOST_BIG_ENDIAN
poff += 4;
#endif
- tcg_gen_ld32u_i64(tmp, cpu_env, poff);
+ tcg_gen_ld32u_i64(tmp, tcg_env, poff);
poff = offsetof(CPUARMState, vfp.preg_tmp);
- tcg_gen_st_i64(tmp, cpu_env, poff);
+ tcg_gen_st_i64(tmp, tcg_env, poff);
}
t_pg = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(t_pg, cpu_env, poff);
+ tcg_gen_addi_ptr(t_pg, tcg_env, poff);
gen_helper_gvec_mem *fn
= ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0];
- fn(cpu_env, t_pg, addr, tcg_constant_i32(simd_desc(32, 32, zt)));
+ fn(tcg_env, t_pg, addr, tcg_constant_i32(simd_desc(32, 32, zt)));
/*
* Replicate that first octaword.
@@ -5027,7 +5027,7 @@
*/
uint64_t psz_mask = MAKE_64BIT_MASK(0, psz * 8);
temp = tcg_temp_new_i64();
- tcg_gen_ld_i64(temp, cpu_env, pred_full_reg_offset(s, a->pg));
+ tcg_gen_ld_i64(temp, tcg_env, pred_full_reg_offset(s, a->pg));
tcg_gen_andi_i64(temp, temp, pred_esz_masks[esz] & psz_mask);
tcg_gen_brcondi_i64(TCG_COND_EQ, temp, 0, over);
} else {
@@ -5238,10 +5238,10 @@
}
desc = simd_desc(vsz, vsz, desc | scale);
- tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
- tcg_gen_addi_ptr(t_zm, cpu_env, vec_full_reg_offset(s, zm));
- tcg_gen_addi_ptr(t_zt, cpu_env, vec_full_reg_offset(s, zt));
- fn(cpu_env, t_zt, t_pg, t_zm, scalar, tcg_constant_i32(desc));
+ tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg));
+ tcg_gen_addi_ptr(t_zm, tcg_env, vec_full_reg_offset(s, zm));
+ tcg_gen_addi_ptr(t_zt, tcg_env, vec_full_reg_offset(s, zt));
+ fn(tcg_env, t_zt, t_pg, t_zm, scalar, tcg_constant_i32(desc));
}
/* Indexed by [mte][be][ff][xs][u][msz]. */
@@ -7197,7 +7197,7 @@
{
return gen_gvec_ptr_zzzz(s, gen_helper_sve2_fmlal_zzzw_s,
a->rd, a->rn, a->rm, a->ra,
- (sel << 1) | sub, cpu_env);
+ (sel << 1) | sub, tcg_env);
}
TRANS_FEAT(FMLALB_zzzw, aa64_sve2, do_FMLAL_zzzw, a, false, false)
@@ -7209,7 +7209,7 @@
{
return gen_gvec_ptr_zzzz(s, gen_helper_sve2_fmlal_zzxw_s,
a->rd, a->rn, a->rm, a->ra,
- (a->index << 2) | (sel << 1) | sub, cpu_env);
+ (a->index << 2) | (sel << 1) | sub, tcg_env);
}
TRANS_FEAT(FMLALB_zzxw, aa64_sve2, do_FMLAL_zzxw, a, false, false)
@@ -7289,7 +7289,7 @@
/* Load the predicate word. */
tcg_gen_trunc_i64_ptr(ptr, didx);
- tcg_gen_add_ptr(ptr, ptr, cpu_env);
+ tcg_gen_add_ptr(ptr, ptr, tcg_env);
tcg_gen_ld8u_i64(tmp, ptr, pred_full_reg_offset(s, a->pm));
/* Extract the predicate bit and replicate to MO_64. */
diff --git a/target/arm/tcg/translate-vfp.c b/target/arm/tcg/translate-vfp.c
index d3e89fd..b9af03b 100644
--- a/target/arm/tcg/translate-vfp.c
+++ b/target/arm/tcg/translate-vfp.c
@@ -30,22 +30,22 @@
static inline void vfp_load_reg64(TCGv_i64 var, int reg)
{
- tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(true, reg));
+ tcg_gen_ld_i64(var, tcg_env, vfp_reg_offset(true, reg));
}
static inline void vfp_store_reg64(TCGv_i64 var, int reg)
{
- tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(true, reg));
+ tcg_gen_st_i64(var, tcg_env, vfp_reg_offset(true, reg));
}
static inline void vfp_load_reg32(TCGv_i32 var, int reg)
{
- tcg_gen_ld_i32(var, cpu_env, vfp_reg_offset(false, reg));
+ tcg_gen_ld_i32(var, tcg_env, vfp_reg_offset(false, reg));
}
static inline void vfp_store_reg32(TCGv_i32 var, int reg)
{
- tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg));
+ tcg_gen_st_i32(var, tcg_env, vfp_reg_offset(false, reg));
}
/*
@@ -116,7 +116,7 @@
if (translator_io_start(&s->base)) {
s->base.is_jmp = DISAS_UPDATE_EXIT;
}
- gen_helper_v7m_preserve_fp_state(cpu_env);
+ gen_helper_v7m_preserve_fp_state(tcg_env);
/*
* If the preserve_fp_state helper doesn't throw an exception
* then it will clear LSPACT; we don't need to repeat this for
@@ -172,7 +172,7 @@
uint32_t bits = R_V7M_CONTROL_FPCA_MASK;
fpscr = load_cpu_field(v7m.fpdscr[s->v8m_secure]);
- gen_helper_vfp_set_fpscr(cpu_env, fpscr);
+ gen_helper_vfp_set_fpscr(tcg_env, fpscr);
if (dc_isar_feature(aa32_mve, s)) {
store_cpu_field(tcg_constant_i32(0), v7m.vpr);
}
@@ -815,7 +815,7 @@
if (s->current_el == 1) {
gen_set_condexec(s);
gen_update_pc(s, 0);
- gen_helper_check_hcr_el2_trap(cpu_env,
+ gen_helper_check_hcr_el2_trap(tcg_env,
tcg_constant_i32(a->rt),
tcg_constant_i32(a->reg));
}
@@ -831,7 +831,7 @@
tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
} else {
tmp = tcg_temp_new_i32();
- gen_helper_vfp_get_fpscr(tmp, cpu_env);
+ gen_helper_vfp_get_fpscr(tmp, tcg_env);
}
break;
default:
@@ -855,7 +855,7 @@
break;
case ARM_VFP_FPSCR:
tmp = load_reg(s, a->rt);
- gen_helper_vfp_set_fpscr(cpu_env, tmp);
+ gen_helper_vfp_set_fpscr(tcg_env, tmp);
gen_lookup_tb(s);
break;
case ARM_VFP_FPEXC:
@@ -1169,7 +1169,7 @@
* value is above, it is UNKNOWN whether the limit check
* triggers; we choose to trigger.
*/
- gen_helper_v8m_stackcheck(cpu_env, addr);
+ gen_helper_v8m_stackcheck(tcg_env, addr);
}
offset = 4;
@@ -1252,7 +1252,7 @@
* value is above, it is UNKNOWN whether the limit check
* triggers; we choose to trigger.
*/
- gen_helper_v8m_stackcheck(cpu_env, addr);
+ gen_helper_v8m_stackcheck(tcg_env, addr);
}
offset = 8;
@@ -2419,17 +2419,17 @@
static void gen_VSQRT_hp(TCGv_i32 vd, TCGv_i32 vm)
{
- gen_helper_vfp_sqrth(vd, vm, cpu_env);
+ gen_helper_vfp_sqrth(vd, vm, tcg_env);
}
static void gen_VSQRT_sp(TCGv_i32 vd, TCGv_i32 vm)
{
- gen_helper_vfp_sqrts(vd, vm, cpu_env);
+ gen_helper_vfp_sqrts(vd, vm, tcg_env);
}
static void gen_VSQRT_dp(TCGv_i64 vd, TCGv_i64 vm)
{
- gen_helper_vfp_sqrtd(vd, vm, cpu_env);
+ gen_helper_vfp_sqrtd(vd, vm, tcg_env);
}
DO_VFP_2OP(VSQRT, hp, gen_VSQRT_hp, aa32_fp16_arith)
@@ -2464,9 +2464,9 @@
}
if (a->e) {
- gen_helper_vfp_cmpeh(vd, vm, cpu_env);
+ gen_helper_vfp_cmpeh(vd, vm, tcg_env);
} else {
- gen_helper_vfp_cmph(vd, vm, cpu_env);
+ gen_helper_vfp_cmph(vd, vm, tcg_env);
}
return true;
}
@@ -2499,9 +2499,9 @@
}
if (a->e) {
- gen_helper_vfp_cmpes(vd, vm, cpu_env);
+ gen_helper_vfp_cmpes(vd, vm, tcg_env);
} else {
- gen_helper_vfp_cmps(vd, vm, cpu_env);
+ gen_helper_vfp_cmps(vd, vm, tcg_env);
}
return true;
}
@@ -2539,9 +2539,9 @@
}
if (a->e) {
- gen_helper_vfp_cmped(vd, vm, cpu_env);
+ gen_helper_vfp_cmped(vd, vm, tcg_env);
} else {
- gen_helper_vfp_cmpd(vd, vm, cpu_env);
+ gen_helper_vfp_cmpd(vd, vm, tcg_env);
}
return true;
}
@@ -2564,7 +2564,7 @@
ahp_mode = get_ahp_flag();
tmp = tcg_temp_new_i32();
/* The T bit tells us if we want the low or high 16 bits of Vm */
- tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t));
+ tcg_gen_ld16u_i32(tmp, tcg_env, vfp_f16_offset(a->vm, a->t));
gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp_mode);
vfp_store_reg32(tmp, a->vd);
return true;
@@ -2598,7 +2598,7 @@
ahp_mode = get_ahp_flag();
tmp = tcg_temp_new_i32();
/* The T bit tells us if we want the low or high 16 bits of Vm */
- tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t));
+ tcg_gen_ld16u_i32(tmp, tcg_env, vfp_f16_offset(a->vm, a->t));
vd = tcg_temp_new_i64();
gen_helper_vfp_fcvt_f16_to_f64(vd, tmp, fpst, ahp_mode);
vfp_store_reg64(vd, a->vd);
@@ -2623,7 +2623,7 @@
vfp_load_reg32(tmp, a->vm);
gen_helper_bfcvt(tmp, tmp, fpst);
- tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
+ tcg_gen_st16_i32(tmp, tcg_env, vfp_f16_offset(a->vd, a->t));
return true;
}
@@ -2647,7 +2647,7 @@
vfp_load_reg32(tmp, a->vm);
gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp_mode);
- tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
+ tcg_gen_st16_i32(tmp, tcg_env, vfp_f16_offset(a->vd, a->t));
return true;
}
@@ -2682,7 +2682,7 @@
vfp_load_reg64(vm, a->vm);
gen_helper_vfp_fcvt_f64_to_f16(tmp, vm, fpst, ahp_mode);
- tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
+ tcg_gen_st16_i32(tmp, tcg_env, vfp_f16_offset(a->vd, a->t));
return true;
}
@@ -2932,7 +2932,7 @@
vm = tcg_temp_new_i32();
vd = tcg_temp_new_i64();
vfp_load_reg32(vm, a->vm);
- gen_helper_vfp_fcvtds(vd, vm, cpu_env);
+ gen_helper_vfp_fcvtds(vd, vm, tcg_env);
vfp_store_reg64(vd, a->vd);
return true;
}
@@ -2958,7 +2958,7 @@
vd = tcg_temp_new_i32();
vm = tcg_temp_new_i64();
vfp_load_reg64(vm, a->vm);
- gen_helper_vfp_fcvtsd(vd, vm, cpu_env);
+ gen_helper_vfp_fcvtsd(vd, vm, tcg_env);
vfp_store_reg32(vd, a->vd);
return true;
}
@@ -3076,7 +3076,7 @@
vm = tcg_temp_new_i64();
vd = tcg_temp_new_i32();
vfp_load_reg64(vm, a->vm);
- gen_helper_vjcvt(vd, vm, cpu_env);
+ gen_helper_vjcvt(vd, vm, tcg_env);
vfp_store_reg32(vd, a->vd);
return true;
}
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
index d83a0e7..48927fb 100644
--- a/target/arm/tcg/translate.c
+++ b/target/arm/tcg/translate.c
@@ -63,18 +63,18 @@
int i;
for (i = 0; i < 16; i++) {
- cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
+ cpu_R[i] = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUARMState, regs[i]),
regnames[i]);
}
- cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
- cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
- cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
- cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
+ cpu_CF = tcg_global_mem_new_i32(tcg_env, offsetof(CPUARMState, CF), "CF");
+ cpu_NF = tcg_global_mem_new_i32(tcg_env, offsetof(CPUARMState, NF), "NF");
+ cpu_VF = tcg_global_mem_new_i32(tcg_env, offsetof(CPUARMState, VF), "VF");
+ cpu_ZF = tcg_global_mem_new_i32(tcg_env, offsetof(CPUARMState, ZF), "ZF");
- cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
+ cpu_exclusive_addr = tcg_global_mem_new_i64(tcg_env,
offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
- cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
+ cpu_exclusive_val = tcg_global_mem_new_i64(tcg_env,
offsetof(CPUARMState, exclusive_val), "exclusive_val");
a64_translate_init();
@@ -179,10 +179,10 @@
{
switch (size) {
case 1:
- tcg_gen_st8_i32(var, cpu_env, offset);
+ tcg_gen_st8_i32(var, tcg_env, offset);
break;
case 4:
- tcg_gen_st_i32(var, cpu_env, offset);
+ tcg_gen_st_i32(var, tcg_env, offset);
break;
default:
g_assert_not_reached();
@@ -329,7 +329,7 @@
{
#ifndef CONFIG_USER_ONLY
if (s->v8m_stackcheck) {
- gen_helper_v8m_stackcheck(cpu_env, var);
+ gen_helper_v8m_stackcheck(tcg_env, var);
}
#endif
store_reg(s, 13, var);
@@ -346,7 +346,7 @@
void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
{
- gen_helper_cpsr_write(cpu_env, var, tcg_constant_i32(mask));
+ gen_helper_cpsr_write(tcg_env, var, tcg_constant_i32(mask));
}
static void gen_rebuild_hflags(DisasContext *s, bool new_el)
@@ -355,16 +355,16 @@
if (new_el) {
if (m_profile) {
- gen_helper_rebuild_hflags_m32_newel(cpu_env);
+ gen_helper_rebuild_hflags_m32_newel(tcg_env);
} else {
- gen_helper_rebuild_hflags_a32_newel(cpu_env);
+ gen_helper_rebuild_hflags_a32_newel(tcg_env);
}
} else {
TCGv_i32 tcg_el = tcg_constant_i32(s->current_el);
if (m_profile) {
- gen_helper_rebuild_hflags_m32(cpu_env, tcg_el);
+ gen_helper_rebuild_hflags_m32(tcg_env, tcg_el);
} else {
- gen_helper_rebuild_hflags_a32(cpu_env, tcg_el);
+ gen_helper_rebuild_hflags_a32(tcg_env, tcg_el);
}
}
}
@@ -372,7 +372,7 @@
static void gen_exception_internal(int excp)
{
assert(excp_is_internal(excp));
- gen_helper_exception_internal(cpu_env, tcg_constant_i32(excp));
+ gen_helper_exception_internal(tcg_env, tcg_constant_i32(excp));
}
static void gen_singlestep_exception(DisasContext *s)
@@ -617,10 +617,10 @@
{
if (flags) {
switch (shiftop) {
- case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
- case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
- case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
- case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
+ case 0: gen_helper_shl_cc(var, tcg_env, var, shift); break;
+ case 1: gen_helper_shr_cc(var, tcg_env, var, shift); break;
+ case 2: gen_helper_sar_cc(var, tcg_env, var, shift); break;
+ case 3: gen_helper_ror_cc(var, tcg_env, var, shift); break;
}
} else {
switch (shiftop) {
@@ -849,7 +849,7 @@
* is correct in the non-UNPREDICTABLE cases, and we can choose
* "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
*/
- gen_helper_v7m_bxns(cpu_env, var);
+ gen_helper_v7m_bxns(tcg_env, var);
s->base.is_jmp = DISAS_EXIT;
}
@@ -862,7 +862,7 @@
* The blxns helper may throw an exception.
*/
gen_update_pc(s, curr_insn_len(s));
- gen_helper_v7m_blxns(cpu_env, var);
+ gen_helper_v7m_blxns(tcg_env, var);
s->base.is_jmp = DISAS_EXIT;
}
@@ -1024,7 +1024,7 @@
* the insn really executes).
*/
gen_update_pc(s, 0);
- gen_helper_pre_hvc(cpu_env);
+ gen_helper_pre_hvc(tcg_env);
/* Otherwise we will treat this as a real exception which
* happens after execution of the insn. (The distinction matters
* for the PC value reported to the exception handler and also
@@ -1041,7 +1041,7 @@
* the insn executes.
*/
gen_update_pc(s, 0);
- gen_helper_pre_smc(cpu_env, tcg_constant_i32(syn_aa32_smc()));
+ gen_helper_pre_smc(tcg_env, tcg_constant_i32(syn_aa32_smc()));
gen_update_pc(s, curr_insn_len(s));
s->base.is_jmp = DISAS_SMC;
}
@@ -1056,7 +1056,7 @@
static void gen_exception_el_v(int excp, uint32_t syndrome, TCGv_i32 tcg_el)
{
- gen_helper_exception_with_syndrome_el(cpu_env, tcg_constant_i32(excp),
+ gen_helper_exception_with_syndrome_el(tcg_env, tcg_constant_i32(excp),
tcg_constant_i32(syndrome), tcg_el);
}
@@ -1067,7 +1067,7 @@
static void gen_exception(int excp, uint32_t syndrome)
{
- gen_helper_exception_with_syndrome(cpu_env, tcg_constant_i32(excp),
+ gen_helper_exception_with_syndrome(tcg_env, tcg_constant_i32(excp),
tcg_constant_i32(syndrome));
}
@@ -1108,7 +1108,7 @@
{
gen_set_condexec(s);
gen_update_pc(s, 0);
- gen_helper_exception_bkpt_insn(cpu_env, tcg_constant_i32(syn));
+ gen_helper_exception_bkpt_insn(tcg_env, tcg_constant_i32(syn));
s->base.is_jmp = DISAS_NORETURN;
}
@@ -1192,20 +1192,20 @@
switch (memop) {
case MO_SB:
- tcg_gen_ld8s_i32(dest, cpu_env, off);
+ tcg_gen_ld8s_i32(dest, tcg_env, off);
break;
case MO_UB:
- tcg_gen_ld8u_i32(dest, cpu_env, off);
+ tcg_gen_ld8u_i32(dest, tcg_env, off);
break;
case MO_SW:
- tcg_gen_ld16s_i32(dest, cpu_env, off);
+ tcg_gen_ld16s_i32(dest, tcg_env, off);
break;
case MO_UW:
- tcg_gen_ld16u_i32(dest, cpu_env, off);
+ tcg_gen_ld16u_i32(dest, tcg_env, off);
break;
case MO_UL:
case MO_SL:
- tcg_gen_ld_i32(dest, cpu_env, off);
+ tcg_gen_ld_i32(dest, tcg_env, off);
break;
default:
g_assert_not_reached();
@@ -1218,13 +1218,13 @@
switch (memop) {
case MO_SL:
- tcg_gen_ld32s_i64(dest, cpu_env, off);
+ tcg_gen_ld32s_i64(dest, tcg_env, off);
break;
case MO_UL:
- tcg_gen_ld32u_i64(dest, cpu_env, off);
+ tcg_gen_ld32u_i64(dest, tcg_env, off);
break;
case MO_UQ:
- tcg_gen_ld_i64(dest, cpu_env, off);
+ tcg_gen_ld_i64(dest, tcg_env, off);
break;
default:
g_assert_not_reached();
@@ -1237,13 +1237,13 @@
switch (memop) {
case MO_8:
- tcg_gen_st8_i32(src, cpu_env, off);
+ tcg_gen_st8_i32(src, tcg_env, off);
break;
case MO_16:
- tcg_gen_st16_i32(src, cpu_env, off);
+ tcg_gen_st16_i32(src, tcg_env, off);
break;
case MO_32:
- tcg_gen_st_i32(src, cpu_env, off);
+ tcg_gen_st_i32(src, tcg_env, off);
break;
default:
g_assert_not_reached();
@@ -1256,10 +1256,10 @@
switch (memop) {
case MO_32:
- tcg_gen_st32_i64(src, cpu_env, off);
+ tcg_gen_st32_i64(src, tcg_env, off);
break;
case MO_64:
- tcg_gen_st_i64(src, cpu_env, off);
+ tcg_gen_st_i64(src, tcg_env, off);
break;
default:
g_assert_not_reached();
@@ -1270,24 +1270,24 @@
static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
{
- tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
+ tcg_gen_ld_i64(var, tcg_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
}
static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
{
- tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
+ tcg_gen_st_i64(var, tcg_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
}
static inline TCGv_i32 iwmmxt_load_creg(int reg)
{
TCGv_i32 var = tcg_temp_new_i32();
- tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
+ tcg_gen_ld_i32(var, tcg_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
return var;
}
static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
{
- tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
+ tcg_gen_st_i32(var, tcg_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
}
static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
@@ -1329,7 +1329,7 @@
static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
{ \
iwmmxt_load_reg(cpu_V1, rn); \
- gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
+ gen_helper_iwmmxt_##name(cpu_M0, tcg_env, cpu_M0, cpu_V1); \
}
#define IWMMXT_OP_ENV_SIZE(name) \
@@ -1340,7 +1340,7 @@
#define IWMMXT_OP_ENV1(name) \
static inline void gen_op_iwmmxt_##name##_M0(void) \
{ \
- gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
+ gen_helper_iwmmxt_##name(cpu_M0, tcg_env, cpu_M0); \
}
IWMMXT_OP(maddsq)
@@ -2113,13 +2113,13 @@
}
switch ((insn >> 22) & 3) {
case 1:
- gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
+ gen_helper_iwmmxt_srlw(cpu_M0, tcg_env, cpu_M0, tmp);
break;
case 2:
- gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
+ gen_helper_iwmmxt_srll(cpu_M0, tcg_env, cpu_M0, tmp);
break;
case 3:
- gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
+ gen_helper_iwmmxt_srlq(cpu_M0, tcg_env, cpu_M0, tmp);
break;
}
gen_op_iwmmxt_movq_wRn_M0(wrd);
@@ -2139,13 +2139,13 @@
}
switch ((insn >> 22) & 3) {
case 1:
- gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
+ gen_helper_iwmmxt_sraw(cpu_M0, tcg_env, cpu_M0, tmp);
break;
case 2:
- gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
+ gen_helper_iwmmxt_sral(cpu_M0, tcg_env, cpu_M0, tmp);
break;
case 3:
- gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
+ gen_helper_iwmmxt_sraq(cpu_M0, tcg_env, cpu_M0, tmp);
break;
}
gen_op_iwmmxt_movq_wRn_M0(wrd);
@@ -2165,13 +2165,13 @@
}
switch ((insn >> 22) & 3) {
case 1:
- gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
+ gen_helper_iwmmxt_sllw(cpu_M0, tcg_env, cpu_M0, tmp);
break;
case 2:
- gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
+ gen_helper_iwmmxt_slll(cpu_M0, tcg_env, cpu_M0, tmp);
break;
case 3:
- gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
+ gen_helper_iwmmxt_sllq(cpu_M0, tcg_env, cpu_M0, tmp);
break;
}
gen_op_iwmmxt_movq_wRn_M0(wrd);
@@ -2191,19 +2191,19 @@
if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
return 1;
}
- gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
+ gen_helper_iwmmxt_rorw(cpu_M0, tcg_env, cpu_M0, tmp);
break;
case 2:
if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
return 1;
}
- gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
+ gen_helper_iwmmxt_rorl(cpu_M0, tcg_env, cpu_M0, tmp);
break;
case 3:
if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
return 1;
}
- gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
+ gen_helper_iwmmxt_rorq(cpu_M0, tcg_env, cpu_M0, tmp);
break;
}
gen_op_iwmmxt_movq_wRn_M0(wrd);
@@ -2335,7 +2335,7 @@
rd0 = (insn >> 16) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
tmp = tcg_constant_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
- gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
+ gen_helper_iwmmxt_shufh(cpu_M0, tcg_env, cpu_M0, tmp);
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
gen_op_iwmmxt_set_cup();
@@ -2857,7 +2857,7 @@
gen_set_condexec(s);
gen_update_pc(s, 0);
tcg_reg = load_reg(s, rn);
- gen_helper_msr_banked(cpu_env, tcg_reg,
+ gen_helper_msr_banked(tcg_env, tcg_reg,
tcg_constant_i32(tgtmode),
tcg_constant_i32(regno));
s->base.is_jmp = DISAS_UPDATE_EXIT;
@@ -2876,7 +2876,7 @@
gen_set_condexec(s);
gen_update_pc(s, 0);
tcg_reg = tcg_temp_new_i32();
- gen_helper_mrs_banked(tcg_reg, cpu_env,
+ gen_helper_mrs_banked(tcg_reg, tcg_env,
tcg_constant_i32(tgtmode),
tcg_constant_i32(regno));
store_reg(s, rn, tcg_reg);
@@ -2901,7 +2901,7 @@
* be called after storing the new PC.
*/
translator_io_start(&s->base);
- gen_helper_cpsr_write_eret(cpu_env, cpsr);
+ gen_helper_cpsr_write_eret(tcg_env, cpsr);
/* Must exit loop to check un-masked IRQs */
s->base.is_jmp = DISAS_EXIT;
}
@@ -2918,7 +2918,7 @@
{
TCGv_ptr qc_ptr = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(qc_ptr, cpu_env, offsetof(CPUARMState, vfp.qc));
+ tcg_gen_addi_ptr(qc_ptr, tcg_env, offsetof(CPUARMState, vfp.qc));
tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, qc_ptr,
opr_sz, max_sz, 0, fn);
}
@@ -4605,11 +4605,11 @@
case 0:
if (arm_dc_feature(s, ARM_FEATURE_AARCH64)
&& dc_isar_feature(aa64_tidcp1, s)) {
- gen_helper_tidcp_el0(cpu_env, tcg_constant_i32(syndrome));
+ gen_helper_tidcp_el0(tcg_env, tcg_constant_i32(syndrome));
}
break;
case 1:
- gen_helper_tidcp_el1(cpu_env, tcg_constant_i32(syndrome));
+ gen_helper_tidcp_el1(tcg_env, tcg_constant_i32(syndrome));
break;
}
}
@@ -4654,7 +4654,7 @@
gen_set_condexec(s);
gen_update_pc(s, 0);
tcg_ri = tcg_temp_new_ptr();
- gen_helper_access_check_cp_reg(tcg_ri, cpu_env,
+ gen_helper_access_check_cp_reg(tcg_ri, tcg_env,
tcg_constant_i32(key),
tcg_constant_i32(syndrome),
tcg_constant_i32(isread));
@@ -4702,10 +4702,10 @@
tcg_ri = gen_lookup_cp_reg(key);
}
tmp64 = tcg_temp_new_i64();
- gen_helper_get_cp_reg64(tmp64, cpu_env, tcg_ri);
+ gen_helper_get_cp_reg64(tmp64, tcg_env, tcg_ri);
} else {
tmp64 = tcg_temp_new_i64();
- tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
+ tcg_gen_ld_i64(tmp64, tcg_env, ri->fieldoffset);
}
tmp = tcg_temp_new_i32();
tcg_gen_extrl_i64_i32(tmp, tmp64);
@@ -4722,7 +4722,7 @@
tcg_ri = gen_lookup_cp_reg(key);
}
tmp = tcg_temp_new_i32();
- gen_helper_get_cp_reg(tmp, cpu_env, tcg_ri);
+ gen_helper_get_cp_reg(tmp, tcg_env, tcg_ri);
} else {
tmp = load_cpu_offset(ri->fieldoffset);
}
@@ -4752,9 +4752,9 @@
if (!tcg_ri) {
tcg_ri = gen_lookup_cp_reg(key);
}
- gen_helper_set_cp_reg64(cpu_env, tcg_ri, tmp64);
+ gen_helper_set_cp_reg64(tcg_env, tcg_ri, tmp64);
} else {
- tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
+ tcg_gen_st_i64(tmp64, tcg_env, ri->fieldoffset);
}
} else {
TCGv_i32 tmp = load_reg(s, rt);
@@ -4762,7 +4762,7 @@
if (!tcg_ri) {
tcg_ri = gen_lookup_cp_reg(key);
}
- gen_helper_set_cp_reg(cpu_env, tcg_ri, tmp);
+ gen_helper_set_cp_reg(tcg_env, tcg_ri, tmp);
} else {
store_cpu_offset(tmp, ri->fieldoffset, 4);
}
@@ -5028,7 +5028,7 @@
/* get_r13_banked() will raise an exception if called from System mode */
gen_set_condexec(s);
gen_update_pc(s, 0);
- gen_helper_get_r13_banked(addr, cpu_env, tcg_constant_i32(mode));
+ gen_helper_get_r13_banked(addr, tcg_env, tcg_constant_i32(mode));
switch (amode) {
case 0: /* DA */
offset = -4;
@@ -5069,7 +5069,7 @@
g_assert_not_reached();
}
tcg_gen_addi_i32(addr, addr, offset);
- gen_helper_set_r13_banked(cpu_env, tcg_constant_i32(mode), addr);
+ gen_helper_set_r13_banked(tcg_env, tcg_constant_i32(mode), addr);
}
s->base.is_jmp = DISAS_UPDATE_EXIT;
}
@@ -5618,7 +5618,7 @@
static void gen_mve_sqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift)
{
- gen_helper_mve_sqshll(r, cpu_env, n, tcg_constant_i32(shift));
+ gen_helper_mve_sqshll(r, tcg_env, n, tcg_constant_i32(shift));
}
static bool trans_SQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a)
@@ -5628,7 +5628,7 @@
static void gen_mve_uqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift)
{
- gen_helper_mve_uqshll(r, cpu_env, n, tcg_constant_i32(shift));
+ gen_helper_mve_uqshll(r, tcg_env, n, tcg_constant_i32(shift));
}
static bool trans_UQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a)
@@ -5674,7 +5674,7 @@
tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
/* The helper takes care of the sign-extension of the low 8 bits of Rm */
- fn(rda, cpu_env, rda, cpu_R[a->rm]);
+ fn(rda, tcg_env, rda, cpu_R[a->rm]);
tcg_gen_extrl_i64_i32(rdalo, rda);
tcg_gen_extrh_i64_i32(rdahi, rda);
@@ -5748,7 +5748,7 @@
static void gen_mve_sqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift)
{
- gen_helper_mve_sqshl(r, cpu_env, n, tcg_constant_i32(shift));
+ gen_helper_mve_sqshl(r, tcg_env, n, tcg_constant_i32(shift));
}
static bool trans_SQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
@@ -5758,7 +5758,7 @@
static void gen_mve_uqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift)
{
- gen_helper_mve_uqshl(r, cpu_env, n, tcg_constant_i32(shift));
+ gen_helper_mve_uqshl(r, tcg_env, n, tcg_constant_i32(shift));
}
static bool trans_UQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
@@ -5782,7 +5782,7 @@
}
/* The helper takes care of the sign-extension of the low 8 bits of Rm */
- fn(cpu_R[a->rda], cpu_env, cpu_R[a->rda], cpu_R[a->rm]);
+ fn(cpu_R[a->rda], tcg_env, cpu_R[a->rda], cpu_R[a->rm]);
return true;
}
@@ -5928,12 +5928,12 @@
t0 = load_reg(s, a->rm);
t1 = load_reg(s, a->rn);
if (doub) {
- gen_helper_add_saturate(t1, cpu_env, t1, t1);
+ gen_helper_add_saturate(t1, tcg_env, t1, t1);
}
if (add) {
- gen_helper_add_saturate(t0, cpu_env, t0, t1);
+ gen_helper_add_saturate(t0, tcg_env, t0, t1);
} else {
- gen_helper_sub_saturate(t0, cpu_env, t0, t1);
+ gen_helper_sub_saturate(t0, tcg_env, t0, t1);
}
store_reg(s, a->rd, t0);
return true;
@@ -5977,7 +5977,7 @@
break;
case 1:
t1 = load_reg(s, a->ra);
- gen_helper_add_setq(t0, cpu_env, t0, t1);
+ gen_helper_add_setq(t0, tcg_env, t0, t1);
store_reg(s, a->rd, t0);
break;
case 2:
@@ -6041,7 +6041,7 @@
tcg_gen_muls2_i32(t0, t1, t0, t1);
if (add) {
t0 = load_reg(s, a->ra);
- gen_helper_add_setq(t1, cpu_env, t1, t0);
+ gen_helper_add_setq(t1, tcg_env, t1, t0);
}
store_reg(s, a->rd, t1);
return true;
@@ -6120,7 +6120,7 @@
* Test for EL2 present, and defer test for SEL2 to runtime.
*/
if (s->current_el <= 1 && arm_dc_feature(s, ARM_FEATURE_EL2)) {
- gen_helper_vesb(cpu_env);
+ gen_helper_vesb(tcg_env);
}
}
return true;
@@ -6228,7 +6228,7 @@
tmp = load_cpu_field(spsr);
} else {
tmp = tcg_temp_new_i32();
- gen_helper_cpsr_read(tmp, cpu_env);
+ gen_helper_cpsr_read(tmp, tcg_env);
}
store_reg(s, a->rd, tmp);
return true;
@@ -6257,7 +6257,7 @@
return false;
}
tmp = tcg_temp_new_i32();
- gen_helper_v7m_mrs(tmp, cpu_env, tcg_constant_i32(a->sysm));
+ gen_helper_v7m_mrs(tmp, tcg_env, tcg_constant_i32(a->sysm));
store_reg(s, a->rd, tmp);
return true;
}
@@ -6271,7 +6271,7 @@
}
addr = tcg_constant_i32((a->mask << 10) | a->sysm);
reg = load_reg(s, a->rn);
- gen_helper_v7m_msr(cpu_env, addr, reg);
+ gen_helper_v7m_msr(tcg_env, addr, reg);
/* If we wrote to CONTROL, the EL might have changed */
gen_rebuild_hflags(s, true);
gen_lookup_tb(s);
@@ -6302,7 +6302,7 @@
if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
arm_dc_feature(s, ARM_FEATURE_EL2) &&
s->current_el < 2 && s->ns) {
- gen_helper_check_bxj_trap(cpu_env, tcg_constant_i32(a->rm));
+ gen_helper_check_bxj_trap(tcg_env, tcg_constant_i32(a->rm));
}
/* Trivial implementation equivalent to bx. */
gen_bx(s, load_reg(s, a->rm));
@@ -6480,7 +6480,7 @@
addr = load_reg(s, a->rn);
tmp = tcg_temp_new_i32();
- gen_helper_v7m_tt(tmp, cpu_env, addr, tcg_constant_i32((a->A << 1) | a->T));
+ gen_helper_v7m_tt(tmp, tcg_env, addr, tcg_constant_i32((a->A << 1) | a->T));
store_reg(s, a->rd, tmp);
return true;
}
@@ -6510,7 +6510,7 @@
TCGv_i32 addr = load_reg(s, a->rn);
if (s->v8m_stackcheck && a->rn == 13 && a->w) {
- gen_helper_v8m_stackcheck(cpu_env, addr);
+ gen_helper_v8m_stackcheck(tcg_env, addr);
}
if (a->p) {
@@ -6665,9 +6665,9 @@
if (!a->u) {
TCGv_i32 newsp = tcg_temp_new_i32();
tcg_gen_addi_i32(newsp, cpu_R[13], ofs);
- gen_helper_v8m_stackcheck(cpu_env, newsp);
+ gen_helper_v8m_stackcheck(tcg_env, newsp);
} else {
- gen_helper_v8m_stackcheck(cpu_env, cpu_R[13]);
+ gen_helper_v8m_stackcheck(tcg_env, cpu_R[13]);
}
}
@@ -7319,7 +7319,7 @@
t1 = load_reg(s, a->rm);
ge = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(ge, cpu_env, offsetof(CPUARMState, GE));
+ tcg_gen_addi_ptr(ge, tcg_env, offsetof(CPUARMState, GE));
gen(t0, t0, t1, ge);
store_reg(s, a->rd, t0);
@@ -7433,7 +7433,7 @@
tcg_gen_shli_i32(tmp, tmp, shift);
}
- gen(tmp, cpu_env, tmp, tcg_constant_i32(a->satimm));
+ gen(tmp, tcg_env, tmp, tcg_constant_i32(a->satimm));
store_reg(s, a->rd, tmp);
return true;
@@ -7540,7 +7540,7 @@
t1 = load_reg(s, a->rn);
t2 = load_reg(s, a->rm);
t3 = tcg_temp_new_i32();
- tcg_gen_ld_i32(t3, cpu_env, offsetof(CPUARMState, GE));
+ tcg_gen_ld_i32(t3, tcg_env, offsetof(CPUARMState, GE));
gen_helper_sel_flags(t1, t3, t1, t2);
store_reg(s, a->rd, t1);
return true;
@@ -7618,11 +7618,11 @@
if (a->ra != 15) {
t2 = load_reg(s, a->ra);
- gen_helper_add_setq(t1, cpu_env, t1, t2);
+ gen_helper_add_setq(t1, tcg_env, t1, t2);
}
} else if (a->ra == 15) {
/* Single saturation-checking addition */
- gen_helper_add_setq(t1, cpu_env, t1, t2);
+ gen_helper_add_setq(t1, tcg_env, t1, t2);
} else {
/*
* We need to add the products and Ra together and then
@@ -7804,9 +7804,9 @@
t1 = load_reg(s, a->rn);
t2 = load_reg(s, a->rm);
if (u) {
- gen_helper_udiv(t1, cpu_env, t1, t2);
+ gen_helper_udiv(t1, tcg_env, t1, t2);
} else {
- gen_helper_sdiv(t1, cpu_env, t1, t2);
+ gen_helper_sdiv(t1, tcg_env, t1, t2);
}
store_reg(s, a->rd, t1);
return true;
@@ -7855,7 +7855,7 @@
* either the original SP (if incrementing) or our
* final SP (if decrementing), so that's what we check.
*/
- gen_helper_v8m_stackcheck(cpu_env, addr);
+ gen_helper_v8m_stackcheck(tcg_env, addr);
}
return addr;
@@ -7916,7 +7916,7 @@
if (user && i != 15) {
tmp = tcg_temp_new_i32();
- gen_helper_get_user_reg(tmp, cpu_env, tcg_constant_i32(i));
+ gen_helper_get_user_reg(tmp, tcg_env, tcg_constant_i32(i));
} else {
tmp = load_reg(s, i);
}
@@ -7999,7 +7999,7 @@
tmp = tcg_temp_new_i32();
gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
if (user) {
- gen_helper_set_user_reg(cpu_env, tcg_constant_i32(i), tmp);
+ gen_helper_set_user_reg(tcg_env, tcg_constant_i32(i), tmp);
} else if (i == a->rn) {
loaded_var = tmp;
loaded_base = true;
@@ -8026,7 +8026,7 @@
/* Restore CPSR from SPSR. */
tmp = load_cpu_field(spsr);
translator_io_start(&s->base);
- gen_helper_cpsr_write_eret(cpu_env, tmp);
+ gen_helper_cpsr_write_eret(tcg_env, tmp);
/* Must exit loop to check un-masked IRQs */
s->base.is_jmp = DISAS_EXIT;
}
@@ -8100,7 +8100,7 @@
* Clear APSR (by calling the MSR helper with the same argument
* as for "MSR APSR_nzcvqg, Rn": mask = 0b1100, SYSM=0)
*/
- gen_helper_v7m_msr(cpu_env, tcg_constant_i32(0xc00), zero);
+ gen_helper_v7m_msr(tcg_env, tcg_constant_i32(0xc00), zero);
}
clear_eci_state(s);
return true;
@@ -8487,7 +8487,7 @@
tcg_gen_movcond_i32(TCG_COND_LEU, masklen,
masklen, tcg_constant_i32(1 << (4 - a->size)),
rn_shifted, tcg_constant_i32(16));
- gen_helper_mve_vctp(cpu_env, masklen);
+ gen_helper_mve_vctp(tcg_env, masklen);
/* This insn updates predication bits */
s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
mve_update_eci(s);
@@ -8665,12 +8665,12 @@
/* FAULTMASK */
if (a->F) {
addr = tcg_constant_i32(19);
- gen_helper_v7m_msr(cpu_env, addr, tmp);
+ gen_helper_v7m_msr(tcg_env, addr, tmp);
}
/* PRIMASK */
if (a->I) {
addr = tcg_constant_i32(16);
- gen_helper_v7m_msr(cpu_env, addr, tmp);
+ gen_helper_v7m_msr(tcg_env, addr, tmp);
}
gen_rebuild_hflags(s, false);
gen_lookup_tb(s);
@@ -8740,7 +8740,7 @@
return false;
}
if (a->E != (s->be_data == MO_BE)) {
- gen_helper_setend(cpu_env);
+ gen_helper_setend(tcg_env);
s->base.is_jmp = DISAS_UPDATE_EXIT;
}
return true;
@@ -9089,7 +9089,7 @@
static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
- CPUARMState *env = cs->env_ptr;
+ CPUARMState *env = cpu_env(cs);
ARMCPU *cpu = env_archcpu(env);
CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb);
uint32_t condexec, core_mmu_idx;
@@ -9317,7 +9317,7 @@
static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
- CPUARMState *env = cpu->env_ptr;
+ CPUARMState *env = cpu_env(cpu);
uint32_t pc = dc->base.pc_next;
unsigned int insn;
@@ -9335,7 +9335,7 @@
* be possible after an indirect branch, at the start of the TB.
*/
assert(dc->base.num_insns == 1);
- gen_helper_exception_pc_alignment(cpu_env, tcg_constant_tl(pc));
+ gen_helper_exception_pc_alignment(tcg_env, tcg_constant_tl(pc));
dc->base.is_jmp = DISAS_NORETURN;
dc->base.pc_next = QEMU_ALIGN_UP(pc, 4);
return;
@@ -9407,7 +9407,7 @@
static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
- CPUARMState *env = cpu->env_ptr;
+ CPUARMState *env = cpu_env(cpu);
uint32_t pc = dc->base.pc_next;
uint32_t insn;
bool is_16bit;
@@ -9615,7 +9615,7 @@
/* nothing more to generate */
break;
case DISAS_WFI:
- gen_helper_wfi(cpu_env, tcg_constant_i32(curr_insn_len(dc)));
+ gen_helper_wfi(tcg_env, tcg_constant_i32(curr_insn_len(dc)));
/*
* The helper doesn't necessarily throw an exception, but we
* must go back to the main loop to check for interrupts anyway.
@@ -9623,10 +9623,10 @@
tcg_gen_exit_tb(NULL, 0);
break;
case DISAS_WFE:
- gen_helper_wfe(cpu_env);
+ gen_helper_wfe(tcg_env);
break;
case DISAS_YIELD:
- gen_helper_yield(cpu_env);
+ gen_helper_yield(tcg_env);
break;
case DISAS_SWI:
gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h
index 63922f8..b404661 100644
--- a/target/arm/tcg/translate.h
+++ b/target/arm/tcg/translate.h
@@ -329,7 +329,7 @@
{
TCGv_i32 ret = tcg_temp_new_i32();
- tcg_gen_ld_i32(ret, cpu_env,
+ tcg_gen_ld_i32(ret, tcg_env,
offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPSCR]));
tcg_gen_extract_i32(ret, ret, 26, 1);
@@ -343,9 +343,9 @@
tcg_debug_assert(!(bits & CACHED_PSTATE_BITS));
- tcg_gen_ld_i32(p, cpu_env, offsetof(CPUARMState, pstate));
+ tcg_gen_ld_i32(p, tcg_env, offsetof(CPUARMState, pstate));
tcg_gen_ori_i32(p, p, bits);
- tcg_gen_st_i32(p, cpu_env, offsetof(CPUARMState, pstate));
+ tcg_gen_st_i32(p, tcg_env, offsetof(CPUARMState, pstate));
}
/* Clear bits within PSTATE. */
@@ -355,9 +355,9 @@
tcg_debug_assert(!(bits & CACHED_PSTATE_BITS));
- tcg_gen_ld_i32(p, cpu_env, offsetof(CPUARMState, pstate));
+ tcg_gen_ld_i32(p, tcg_env, offsetof(CPUARMState, pstate));
tcg_gen_andi_i32(p, p, ~bits);
- tcg_gen_st_i32(p, cpu_env, offsetof(CPUARMState, pstate));
+ tcg_gen_st_i32(p, tcg_env, offsetof(CPUARMState, pstate));
}
/* If the singlestep state is Active-not-pending, advance to Active-pending. */
@@ -374,7 +374,7 @@
{
/* Fill in the same_el field of the syndrome in the helper. */
uint32_t syn = syn_swstep(false, isv, ex);
- gen_helper_exception_swstep(cpu_env, tcg_constant_i32(syn));
+ gen_helper_exception_swstep(tcg_env, tcg_constant_i32(syn));
}
/*
@@ -557,7 +557,7 @@
default:
g_assert_not_reached();
}
- tcg_gen_addi_ptr(statusptr, cpu_env, offset);
+ tcg_gen_addi_ptr(statusptr, tcg_env, offset);
return statusptr;
}
@@ -679,7 +679,7 @@
static inline TCGv_ptr gen_lookup_cp_reg(uint32_t key)
{
TCGv_ptr ret = tcg_temp_new_ptr();
- gen_helper_lookup_cp_reg(ret, cpu_env, tcg_constant_i32(key));
+ gen_helper_lookup_cp_reg(ret, tcg_env, tcg_constant_i32(key));
return ret;
}
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
index 8f741f2..14d8b9d 100644
--- a/target/avr/cpu.c
+++ b/target/avr/cpu.c
@@ -147,8 +147,6 @@
{
AVRCPU *cpu = AVR_CPU(obj);
- cpu_set_cpustate_pointers(cpu);
-
/* Set the number of interrupts supported by the CPU. */
qdev_init_gpio_in(DEVICE(cpu), avr_cpu_set_int,
sizeof(cpu->env.intsrc) * 8);
@@ -390,6 +388,7 @@
.name = TYPE_AVR_CPU,
.parent = TYPE_CPU,
.instance_size = sizeof(AVRCPU),
+ .instance_align = __alignof(AVRCPU),
.instance_init = avr_cpu_initfn,
.class_size = sizeof(AVRCPUClass),
.class_init = avr_cpu_class_init,
diff --git a/target/avr/cpu.h b/target/avr/cpu.h
index 7225174..4ce22d8 100644
--- a/target/avr/cpu.h
+++ b/target/avr/cpu.h
@@ -148,7 +148,6 @@
CPUState parent_obj;
/*< public >*/
- CPUNegativeOffsetState neg;
CPUAVRState env;
};
diff --git a/target/avr/translate.c b/target/avr/translate.c
index ef2edd7..cdffa04 100644
--- a/target/avr/translate.c
+++ b/target/avr/translate.c
@@ -127,25 +127,25 @@
int i;
#define AVR_REG_OFFS(x) offsetof(CPUAVRState, x)
- cpu_pc = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(pc_w), "pc");
- cpu_Cf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregC), "Cf");
- cpu_Zf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregZ), "Zf");
- cpu_Nf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregN), "Nf");
- cpu_Vf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregV), "Vf");
- cpu_Sf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregS), "Sf");
- cpu_Hf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregH), "Hf");
- cpu_Tf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregT), "Tf");
- cpu_If = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregI), "If");
- cpu_rampD = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(rampD), "rampD");
- cpu_rampX = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(rampX), "rampX");
- cpu_rampY = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(rampY), "rampY");
- cpu_rampZ = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(rampZ), "rampZ");
- cpu_eind = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(eind), "eind");
- cpu_sp = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sp), "sp");
- cpu_skip = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(skip), "skip");
+ cpu_pc = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(pc_w), "pc");
+ cpu_Cf = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(sregC), "Cf");
+ cpu_Zf = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(sregZ), "Zf");
+ cpu_Nf = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(sregN), "Nf");
+ cpu_Vf = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(sregV), "Vf");
+ cpu_Sf = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(sregS), "Sf");
+ cpu_Hf = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(sregH), "Hf");
+ cpu_Tf = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(sregT), "Tf");
+ cpu_If = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(sregI), "If");
+ cpu_rampD = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(rampD), "rampD");
+ cpu_rampX = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(rampX), "rampX");
+ cpu_rampY = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(rampY), "rampY");
+ cpu_rampZ = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(rampZ), "rampZ");
+ cpu_eind = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(eind), "eind");
+ cpu_sp = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(sp), "sp");
+ cpu_skip = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(skip), "skip");
for (i = 0; i < NUMBER_OF_CPU_REGISTERS; i++) {
- cpu_r[i] = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(r[i]),
+ cpu_r[i] = tcg_global_mem_new_i32(tcg_env, AVR_REG_OFFS(r[i]),
reg_names[i]);
}
#undef AVR_REG_OFFS
@@ -184,7 +184,7 @@
static bool avr_have_feature(DisasContext *ctx, int feature)
{
if (!avr_feature(ctx->env, feature)) {
- gen_helper_unsupported(cpu_env);
+ gen_helper_unsupported(tcg_env);
ctx->base.is_jmp = DISAS_NORETURN;
return false;
}
@@ -1295,7 +1295,7 @@
TCGv data = tcg_temp_new_i32();
TCGv port = tcg_constant_i32(a->reg);
- gen_helper_inb(data, cpu_env, port);
+ gen_helper_inb(data, tcg_env, port);
tcg_gen_andi_tl(data, data, 1 << a->bit);
ctx->skip_cond = TCG_COND_EQ;
ctx->skip_var0 = data;
@@ -1313,7 +1313,7 @@
TCGv data = tcg_temp_new_i32();
TCGv port = tcg_constant_i32(a->reg);
- gen_helper_inb(data, cpu_env, port);
+ gen_helper_inb(data, tcg_env, port);
tcg_gen_andi_tl(data, data, 1 << a->bit);
ctx->skip_cond = TCG_COND_NE;
ctx->skip_var0 = data;
@@ -1494,7 +1494,7 @@
static void gen_data_store(DisasContext *ctx, TCGv data, TCGv addr)
{
if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) {
- gen_helper_fullwr(cpu_env, data, addr);
+ gen_helper_fullwr(tcg_env, data, addr);
} else {
tcg_gen_qemu_st_tl(data, addr, MMU_DATA_IDX, MO_UB);
}
@@ -1503,7 +1503,7 @@
static void gen_data_load(DisasContext *ctx, TCGv data, TCGv addr)
{
if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) {
- gen_helper_fullrd(data, cpu_env, addr);
+ gen_helper_fullrd(data, tcg_env, addr);
} else {
tcg_gen_qemu_ld_tl(data, addr, MMU_DATA_IDX, MO_UB);
}
@@ -2130,7 +2130,7 @@
TCGv Rd = cpu_r[a->rd];
TCGv port = tcg_constant_i32(a->imm);
- gen_helper_inb(Rd, cpu_env, port);
+ gen_helper_inb(Rd, tcg_env, port);
return true;
}
@@ -2143,7 +2143,7 @@
TCGv Rd = cpu_r[a->rd];
TCGv port = tcg_constant_i32(a->imm);
- gen_helper_outb(cpu_env, port, Rd);
+ gen_helper_outb(tcg_env, port, Rd);
return true;
}
@@ -2411,9 +2411,9 @@
TCGv data = tcg_temp_new_i32();
TCGv port = tcg_constant_i32(a->reg);
- gen_helper_inb(data, cpu_env, port);
+ gen_helper_inb(data, tcg_env, port);
tcg_gen_ori_tl(data, data, 1 << a->bit);
- gen_helper_outb(cpu_env, port, data);
+ gen_helper_outb(tcg_env, port, data);
return true;
}
@@ -2426,9 +2426,9 @@
TCGv data = tcg_temp_new_i32();
TCGv port = tcg_constant_i32(a->reg);
- gen_helper_inb(data, cpu_env, port);
+ gen_helper_inb(data, tcg_env, port);
tcg_gen_andi_tl(data, data, ~(1 << a->bit));
- gen_helper_outb(cpu_env, port, data);
+ gen_helper_outb(tcg_env, port, data);
return true;
}
@@ -2551,7 +2551,7 @@
#ifdef BREAKPOINT_ON_BREAK
tcg_gen_movi_tl(cpu_pc, ctx->npc - 1);
- gen_helper_debug(cpu_env);
+ gen_helper_debug(tcg_env);
ctx->base.is_jmp = DISAS_EXIT;
#else
/* NOP */
@@ -2577,7 +2577,7 @@
*/
static bool trans_SLEEP(DisasContext *ctx, arg_SLEEP *a)
{
- gen_helper_sleep(cpu_env);
+ gen_helper_sleep(tcg_env);
ctx->base.is_jmp = DISAS_NORETURN;
return true;
}
@@ -2589,7 +2589,7 @@
*/
static bool trans_WDR(DisasContext *ctx, arg_WDR *a)
{
- gen_helper_wdr(cpu_env);
+ gen_helper_wdr(tcg_env);
return true;
}
@@ -2608,7 +2608,7 @@
uint32_t opcode = next_word(ctx);
if (!decode_insn(ctx, opcode)) {
- gen_helper_unsupported(cpu_env);
+ gen_helper_unsupported(tcg_env);
ctx->base.is_jmp = DISAS_NORETURN;
}
}
@@ -2657,7 +2657,7 @@
static void avr_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
- CPUAVRState *env = cs->env_ptr;
+ CPUAVRState *env = cpu_env(cs);
uint32_t tb_flags = ctx->base.tb->flags;
ctx->cs = cs;
diff --git a/target/cris/cpu.c b/target/cris/cpu.c
index a6a93c2..be4a44c 100644
--- a/target/cris/cpu.c
+++ b/target/cris/cpu.c
@@ -201,8 +201,6 @@
CRISCPUClass *ccc = CRIS_CPU_GET_CLASS(obj);
CPUCRISState *env = &cpu->env;
- cpu_set_cpustate_pointers(cpu);
-
env->pregs[PR_VR] = ccc->vr;
#ifndef CONFIG_USER_ONLY
@@ -345,6 +343,7 @@
.name = TYPE_CRIS_CPU,
.parent = TYPE_CPU,
.instance_size = sizeof(CRISCPU),
+ .instance_align = __alignof(CRISCPU),
.instance_init = cris_cpu_initfn,
.abstract = true,
.class_size = sizeof(CRISCPUClass),
diff --git a/target/cris/cpu.h b/target/cris/cpu.h
index 8e37c6e..676b8e9 100644
--- a/target/cris/cpu.h
+++ b/target/cris/cpu.h
@@ -178,7 +178,6 @@
CPUState parent_obj;
/*< public >*/
- CPUNegativeOffsetState neg;
CPUCRISState env;
};
diff --git a/target/cris/translate.c b/target/cris/translate.c
index 42103b5..b3974ba 100644
--- a/target/cris/translate.c
+++ b/target/cris/translate.c
@@ -171,9 +171,9 @@
};
#define t_gen_mov_TN_env(tn, member) \
- tcg_gen_ld_tl(tn, cpu_env, offsetof(CPUCRISState, member))
+ tcg_gen_ld_tl(tn, tcg_env, offsetof(CPUCRISState, member))
#define t_gen_mov_env_TN(member, tn) \
- tcg_gen_st_tl(tn, cpu_env, offsetof(CPUCRISState, member))
+ tcg_gen_st_tl(tn, tcg_env, offsetof(CPUCRISState, member))
#define t_gen_movi_env_TN(member, c) \
t_gen_mov_env_TN(member, tcg_constant_tl(c))
@@ -197,10 +197,10 @@
tcg_gen_andi_tl(cpu_PR[r], tn, 3);
} else {
if (r == PR_PID) {
- gen_helper_tlb_flush_pid(cpu_env, tn);
+ gen_helper_tlb_flush_pid(tcg_env, tn);
}
if (dc->tb_flags & S_FLAG && r == PR_SPC) {
- gen_helper_spc_write(cpu_env, tn);
+ gen_helper_spc_write(tcg_env, tn);
} else if (r == PR_CCS) {
dc->cpustate_changed = 1;
}
@@ -265,7 +265,7 @@
static inline void t_gen_raise_exception(uint32_t index)
{
- gen_helper_raise_exception(cpu_env, tcg_constant_i32(index));
+ gen_helper_raise_exception(tcg_env, tcg_constant_i32(index));
}
static void t_gen_lsl(TCGv d, TCGv a, TCGv b)
@@ -504,17 +504,17 @@
switch (dc->cc_op) {
case CC_OP_MCP:
- gen_helper_evaluate_flags_mcp(cpu_PR[PR_CCS], cpu_env,
+ gen_helper_evaluate_flags_mcp(cpu_PR[PR_CCS], tcg_env,
cpu_PR[PR_CCS], cc_src,
cc_dest, cc_result);
break;
case CC_OP_MULS:
- gen_helper_evaluate_flags_muls(cpu_PR[PR_CCS], cpu_env,
+ gen_helper_evaluate_flags_muls(cpu_PR[PR_CCS], tcg_env,
cpu_PR[PR_CCS], cc_result,
cpu_PR[PR_MOF]);
break;
case CC_OP_MULU:
- gen_helper_evaluate_flags_mulu(cpu_PR[PR_CCS], cpu_env,
+ gen_helper_evaluate_flags_mulu(cpu_PR[PR_CCS], tcg_env,
cpu_PR[PR_CCS], cc_result,
cpu_PR[PR_MOF]);
break;
@@ -528,14 +528,14 @@
switch (dc->cc_size) {
case 4:
gen_helper_evaluate_flags_move_4(cpu_PR[PR_CCS],
- cpu_env, cpu_PR[PR_CCS], cc_result);
+ tcg_env, cpu_PR[PR_CCS], cc_result);
break;
case 2:
gen_helper_evaluate_flags_move_2(cpu_PR[PR_CCS],
- cpu_env, cpu_PR[PR_CCS], cc_result);
+ tcg_env, cpu_PR[PR_CCS], cc_result);
break;
default:
- gen_helper_evaluate_flags(cpu_env);
+ gen_helper_evaluate_flags(tcg_env);
break;
}
break;
@@ -545,21 +545,21 @@
case CC_OP_SUB:
case CC_OP_CMP:
if (dc->cc_size == 4) {
- gen_helper_evaluate_flags_sub_4(cpu_PR[PR_CCS], cpu_env,
+ gen_helper_evaluate_flags_sub_4(cpu_PR[PR_CCS], tcg_env,
cpu_PR[PR_CCS], cc_src, cc_dest, cc_result);
} else {
- gen_helper_evaluate_flags(cpu_env);
+ gen_helper_evaluate_flags(tcg_env);
}
break;
default:
switch (dc->cc_size) {
case 4:
- gen_helper_evaluate_flags_alu_4(cpu_PR[PR_CCS], cpu_env,
+ gen_helper_evaluate_flags_alu_4(cpu_PR[PR_CCS], tcg_env,
cpu_PR[PR_CCS], cc_src, cc_dest, cc_result);
break;
default:
- gen_helper_evaluate_flags(cpu_env);
+ gen_helper_evaluate_flags(tcg_env);
break;
}
break;
@@ -1330,7 +1330,7 @@
cris_cc_mask(dc, CC_MASK_NZ);
c = tcg_constant_tl(dc->op1);
cris_evaluate_flags(dc);
- gen_helper_btst(cpu_PR[PR_CCS], cpu_env, cpu_R[dc->op2],
+ gen_helper_btst(cpu_PR[PR_CCS], tcg_env, cpu_R[dc->op2],
c, cpu_PR[PR_CCS]);
cris_alu(dc, CC_OP_MOVE,
cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op2], 4);
@@ -1744,7 +1744,7 @@
dc->op1, dc->op2);
cris_cc_mask(dc, CC_MASK_NZ);
cris_evaluate_flags(dc);
- gen_helper_btst(cpu_PR[PR_CCS], cpu_env, cpu_R[dc->op2],
+ gen_helper_btst(cpu_PR[PR_CCS], tcg_env, cpu_R[dc->op2],
cpu_R[dc->op1], cpu_PR[PR_CCS]);
cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2],
cpu_R[dc->op2], cpu_R[dc->op2], 4);
@@ -1946,7 +1946,7 @@
c1 = tcg_constant_tl(dc->op1);
c2 = tcg_constant_tl(dc->op2);
cris_cc_mask(dc, 0);
- gen_helper_movl_sreg_reg(cpu_env, c2, c1);
+ gen_helper_movl_sreg_reg(tcg_env, c2, c1);
return 2;
}
static int dec_move_sr(CPUCRISState *env, DisasContext *dc)
@@ -1956,7 +1956,7 @@
c1 = tcg_constant_tl(dc->op1);
c2 = tcg_constant_tl(dc->op2);
cris_cc_mask(dc, 0);
- gen_helper_movl_reg_sreg(cpu_env, c1, c2);
+ gen_helper_movl_reg_sreg(tcg_env, c1, c2);
return 2;
}
@@ -2693,7 +2693,7 @@
cris_cc_mask(dc, 0);
if (dc->op2 == 15) {
- tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
+ tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
-offsetof(CRISCPU, env) + offsetof(CPUState, halted));
tcg_gen_movi_tl(env_pc, dc->pc + 2);
t_gen_raise_exception(EXCP_HLT);
@@ -2706,7 +2706,7 @@
/* rfe. */
LOG_DIS("rfe\n");
cris_evaluate_flags(dc);
- gen_helper_rfe(cpu_env);
+ gen_helper_rfe(tcg_env);
dc->base.is_jmp = DISAS_UPDATE;
dc->cpustate_changed = true;
break;
@@ -2714,7 +2714,7 @@
/* rfn. */
LOG_DIS("rfn\n");
cris_evaluate_flags(dc);
- gen_helper_rfn(cpu_env);
+ gen_helper_rfn(tcg_env);
dc->base.is_jmp = DISAS_UPDATE;
dc->cpustate_changed = true;
break;
@@ -2948,7 +2948,7 @@
static void cris_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
- CPUCRISState *env = cs->env_ptr;
+ CPUCRISState *env = cpu_env(cs);
uint32_t tb_flags = dc->base.tb->flags;
uint32_t pc_start;
@@ -3006,7 +3006,7 @@
static void cris_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
- CPUCRISState *env = cs->env_ptr;
+ CPUCRISState *env = cpu_env(cs);
unsigned int insn_len;
/* Pretty disas. */
@@ -3238,41 +3238,41 @@
{
int i;
- cc_x = tcg_global_mem_new(cpu_env,
+ cc_x = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, cc_x), "cc_x");
- cc_src = tcg_global_mem_new(cpu_env,
+ cc_src = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, cc_src), "cc_src");
- cc_dest = tcg_global_mem_new(cpu_env,
+ cc_dest = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, cc_dest),
"cc_dest");
- cc_result = tcg_global_mem_new(cpu_env,
+ cc_result = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, cc_result),
"cc_result");
- cc_op = tcg_global_mem_new(cpu_env,
+ cc_op = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, cc_op), "cc_op");
- cc_size = tcg_global_mem_new(cpu_env,
+ cc_size = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, cc_size),
"cc_size");
- cc_mask = tcg_global_mem_new(cpu_env,
+ cc_mask = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, cc_mask),
"cc_mask");
- env_pc = tcg_global_mem_new(cpu_env,
+ env_pc = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, pc),
"pc");
- env_btarget = tcg_global_mem_new(cpu_env,
+ env_btarget = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, btarget),
"btarget");
- env_btaken = tcg_global_mem_new(cpu_env,
+ env_btaken = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, btaken),
"btaken");
for (i = 0; i < 16; i++) {
- cpu_R[i] = tcg_global_mem_new(cpu_env,
+ cpu_R[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, regs[i]),
regnames_v32[i]);
}
for (i = 0; i < 16; i++) {
- cpu_PR[i] = tcg_global_mem_new(cpu_env,
+ cpu_PR[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, pregs[i]),
pregnames_v32[i]);
}
diff --git a/target/cris/translate_v10.c.inc b/target/cris/translate_v10.c.inc
index b7b0517..6df599f 100644
--- a/target/cris/translate_v10.c.inc
+++ b/target/cris/translate_v10.c.inc
@@ -282,7 +282,7 @@
} else {
/* BTST */
cris_update_cc_op(dc, CC_OP_FLAGS, 4);
- gen_helper_btst(cpu_PR[PR_CCS], cpu_env, cpu_R[dc->dst],
+ gen_helper_btst(cpu_PR[PR_CCS], tcg_env, cpu_R[dc->dst],
c, cpu_PR[PR_CCS]);
}
break;
@@ -696,7 +696,7 @@
LOG_DIS("btst $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
cris_cc_mask(dc, CC_MASK_NZVC);
cris_update_cc_op(dc, CC_OP_FLAGS, 4);
- gen_helper_btst(cpu_PR[PR_CCS], cpu_env, cpu_R[dc->dst],
+ gen_helper_btst(cpu_PR[PR_CCS], tcg_env, cpu_R[dc->dst],
cpu_R[dc->src], cpu_PR[PR_CCS]);
break;
case CRISV10_REG_DSTEP:
@@ -1235,41 +1235,41 @@
{
int i;
- cc_x = tcg_global_mem_new(cpu_env,
+ cc_x = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, cc_x), "cc_x");
- cc_src = tcg_global_mem_new(cpu_env,
+ cc_src = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, cc_src), "cc_src");
- cc_dest = tcg_global_mem_new(cpu_env,
+ cc_dest = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, cc_dest),
"cc_dest");
- cc_result = tcg_global_mem_new(cpu_env,
+ cc_result = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, cc_result),
"cc_result");
- cc_op = tcg_global_mem_new(cpu_env,
+ cc_op = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, cc_op), "cc_op");
- cc_size = tcg_global_mem_new(cpu_env,
+ cc_size = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, cc_size),
"cc_size");
- cc_mask = tcg_global_mem_new(cpu_env,
+ cc_mask = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, cc_mask),
"cc_mask");
- env_pc = tcg_global_mem_new(cpu_env,
+ env_pc = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, pc),
"pc");
- env_btarget = tcg_global_mem_new(cpu_env,
+ env_btarget = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, btarget),
"btarget");
- env_btaken = tcg_global_mem_new(cpu_env,
+ env_btaken = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, btaken),
"btaken");
for (i = 0; i < 16; i++) {
- cpu_R[i] = tcg_global_mem_new(cpu_env,
+ cpu_R[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, regs[i]),
regnames_v10[i]);
}
for (i = 0; i < 16; i++) {
- cpu_PR[i] = tcg_global_mem_new(cpu_env,
+ cpu_PR[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUCRISState, pregs[i]),
pregnames_v10[i]);
}
diff --git a/target/hexagon/README b/target/hexagon/README
index e757bcb..69b2ffe 100644
--- a/target/hexagon/README
+++ b/target/hexagon/README
@@ -86,7 +86,7 @@
const int RdN = insn->regno[0];
TCGv RsV = hex_gpr[insn->regno[1]];
TCGv RtV = hex_gpr[insn->regno[2]];
- gen_helper_A2_add(RdV, cpu_env, RsV, RtV);
+ gen_helper_A2_add(RdV, tcg_env, RsV, RtV);
gen_log_reg_write(ctx, RdN, RdV);
}
@@ -143,7 +143,7 @@
const intptr_t VdV_off =
ctx_future_vreg_off(ctx, VdN, 1, true);
TCGv_ptr VdV = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(VdV, cpu_env, VdV_off);
+ tcg_gen_addi_ptr(VdV, tcg_env, VdV_off);
const int VuN = insn->regno[1];
const intptr_t VuV_off =
vreg_src_off(ctx, VuN);
@@ -152,9 +152,9 @@
const intptr_t VvV_off =
vreg_src_off(ctx, VvN);
TCGv_ptr VvV = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(VuV, cpu_env, VuV_off);
- tcg_gen_addi_ptr(VvV, cpu_env, VvV_off);
- gen_helper_V6_vaddw(cpu_env, VdV, VuV, VvV);
+ tcg_gen_addi_ptr(VuV, tcg_env, VuV_off);
+ tcg_gen_addi_ptr(VvV, tcg_env, VvV_off);
+ gen_helper_V6_vaddw(tcg_env, VdV, VuV, VvV);
}
Notice that we also generate a variable named <operand>_off for each operand of
diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c
index f155936..1adc11b 100644
--- a/target/hexagon/cpu.c
+++ b/target/hexagon/cpu.c
@@ -353,9 +353,6 @@
static void hexagon_cpu_init(Object *obj)
{
- HexagonCPU *cpu = HEXAGON_CPU(obj);
-
- cpu_set_cpustate_pointers(cpu);
qdev_property_add_static(DEVICE(obj), &hexagon_lldb_compat_property);
qdev_property_add_static(DEVICE(obj), &hexagon_lldb_stack_adjust_property);
qdev_property_add_static(DEVICE(obj), &hexagon_short_circuit_property);
@@ -408,6 +405,7 @@
.name = TYPE_HEXAGON_CPU,
.parent = TYPE_CPU,
.instance_size = sizeof(HexagonCPU),
+ .instance_align = __alignof(HexagonCPU),
.instance_init = hexagon_cpu_init,
.abstract = true,
.class_size = sizeof(HexagonCPUClass),
diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h
index daef5c3..10cd1ef 100644
--- a/target/hexagon/cpu.h
+++ b/target/hexagon/cpu.h
@@ -141,7 +141,7 @@
/*< private >*/
CPUState parent_obj;
/*< public >*/
- CPUNegativeOffsetState neg;
+
CPUHexagonState env;
bool lldb_compat;
diff --git a/target/hexagon/gen_tcg.h b/target/hexagon/gen_tcg.h
index d78d99d..d992059 100644
--- a/target/hexagon/gen_tcg.h
+++ b/target/hexagon/gen_tcg.h
@@ -591,8 +591,8 @@
*/
#define fGEN_TCG_A5_ACS(SHORTCODE) \
do { \
- gen_helper_vacsh_pred(PeV, cpu_env, RxxV, RssV, RttV); \
- gen_helper_vacsh_val(RxxV, cpu_env, RxxV, RssV, RttV, \
+ gen_helper_vacsh_pred(PeV, tcg_env, RxxV, RssV, RttV); \
+ gen_helper_vacsh_val(RxxV, tcg_env, RxxV, RssV, RttV, \
tcg_constant_tl(ctx->need_commit)); \
} while (0)
@@ -614,7 +614,7 @@
#define fGEN_TCG_F2_sfrecipa(SHORTCODE) \
do { \
TCGv_i64 tmp = tcg_temp_new_i64(); \
- gen_helper_sfrecipa(tmp, cpu_env, RsV, RtV); \
+ gen_helper_sfrecipa(tmp, tcg_env, RsV, RtV); \
tcg_gen_extrh_i64_i32(RdV, tmp); \
tcg_gen_extrl_i64_i32(PeV, tmp); \
} while (0)
@@ -629,7 +629,7 @@
#define fGEN_TCG_F2_sfinvsqrta(SHORTCODE) \
do { \
TCGv_i64 tmp = tcg_temp_new_i64(); \
- gen_helper_sfinvsqrta(tmp, cpu_env, RsV); \
+ gen_helper_sfinvsqrta(tmp, tcg_env, RsV); \
tcg_gen_extrh_i64_i32(RdV, tmp); \
tcg_gen_extrl_i64_i32(PeV, tmp); \
} while (0)
@@ -1205,122 +1205,122 @@
/* Floating point */
#define fGEN_TCG_F2_conv_sf2df(SHORTCODE) \
- gen_helper_conv_sf2df(RddV, cpu_env, RsV)
+ gen_helper_conv_sf2df(RddV, tcg_env, RsV)
#define fGEN_TCG_F2_conv_df2sf(SHORTCODE) \
- gen_helper_conv_df2sf(RdV, cpu_env, RssV)
+ gen_helper_conv_df2sf(RdV, tcg_env, RssV)
#define fGEN_TCG_F2_conv_uw2sf(SHORTCODE) \
- gen_helper_conv_uw2sf(RdV, cpu_env, RsV)
+ gen_helper_conv_uw2sf(RdV, tcg_env, RsV)
#define fGEN_TCG_F2_conv_uw2df(SHORTCODE) \
- gen_helper_conv_uw2df(RddV, cpu_env, RsV)
+ gen_helper_conv_uw2df(RddV, tcg_env, RsV)
#define fGEN_TCG_F2_conv_w2sf(SHORTCODE) \
- gen_helper_conv_w2sf(RdV, cpu_env, RsV)
+ gen_helper_conv_w2sf(RdV, tcg_env, RsV)
#define fGEN_TCG_F2_conv_w2df(SHORTCODE) \
- gen_helper_conv_w2df(RddV, cpu_env, RsV)
+ gen_helper_conv_w2df(RddV, tcg_env, RsV)
#define fGEN_TCG_F2_conv_ud2sf(SHORTCODE) \
- gen_helper_conv_ud2sf(RdV, cpu_env, RssV)
+ gen_helper_conv_ud2sf(RdV, tcg_env, RssV)
#define fGEN_TCG_F2_conv_ud2df(SHORTCODE) \
- gen_helper_conv_ud2df(RddV, cpu_env, RssV)
+ gen_helper_conv_ud2df(RddV, tcg_env, RssV)
#define fGEN_TCG_F2_conv_d2sf(SHORTCODE) \
- gen_helper_conv_d2sf(RdV, cpu_env, RssV)
+ gen_helper_conv_d2sf(RdV, tcg_env, RssV)
#define fGEN_TCG_F2_conv_d2df(SHORTCODE) \
- gen_helper_conv_d2df(RddV, cpu_env, RssV)
+ gen_helper_conv_d2df(RddV, tcg_env, RssV)
#define fGEN_TCG_F2_conv_sf2uw(SHORTCODE) \
- gen_helper_conv_sf2uw(RdV, cpu_env, RsV)
+ gen_helper_conv_sf2uw(RdV, tcg_env, RsV)
#define fGEN_TCG_F2_conv_sf2w(SHORTCODE) \
- gen_helper_conv_sf2w(RdV, cpu_env, RsV)
+ gen_helper_conv_sf2w(RdV, tcg_env, RsV)
#define fGEN_TCG_F2_conv_sf2ud(SHORTCODE) \
- gen_helper_conv_sf2ud(RddV, cpu_env, RsV)
+ gen_helper_conv_sf2ud(RddV, tcg_env, RsV)
#define fGEN_TCG_F2_conv_sf2d(SHORTCODE) \
- gen_helper_conv_sf2d(RddV, cpu_env, RsV)
+ gen_helper_conv_sf2d(RddV, tcg_env, RsV)
#define fGEN_TCG_F2_conv_df2uw(SHORTCODE) \
- gen_helper_conv_df2uw(RdV, cpu_env, RssV)
+ gen_helper_conv_df2uw(RdV, tcg_env, RssV)
#define fGEN_TCG_F2_conv_df2w(SHORTCODE) \
- gen_helper_conv_df2w(RdV, cpu_env, RssV)
+ gen_helper_conv_df2w(RdV, tcg_env, RssV)
#define fGEN_TCG_F2_conv_df2ud(SHORTCODE) \
- gen_helper_conv_df2ud(RddV, cpu_env, RssV)
+ gen_helper_conv_df2ud(RddV, tcg_env, RssV)
#define fGEN_TCG_F2_conv_df2d(SHORTCODE) \
- gen_helper_conv_df2d(RddV, cpu_env, RssV)
+ gen_helper_conv_df2d(RddV, tcg_env, RssV)
#define fGEN_TCG_F2_conv_sf2uw_chop(SHORTCODE) \
- gen_helper_conv_sf2uw_chop(RdV, cpu_env, RsV)
+ gen_helper_conv_sf2uw_chop(RdV, tcg_env, RsV)
#define fGEN_TCG_F2_conv_sf2w_chop(SHORTCODE) \
- gen_helper_conv_sf2w_chop(RdV, cpu_env, RsV)
+ gen_helper_conv_sf2w_chop(RdV, tcg_env, RsV)
#define fGEN_TCG_F2_conv_sf2ud_chop(SHORTCODE) \
- gen_helper_conv_sf2ud_chop(RddV, cpu_env, RsV)
+ gen_helper_conv_sf2ud_chop(RddV, tcg_env, RsV)
#define fGEN_TCG_F2_conv_sf2d_chop(SHORTCODE) \
- gen_helper_conv_sf2d_chop(RddV, cpu_env, RsV)
+ gen_helper_conv_sf2d_chop(RddV, tcg_env, RsV)
#define fGEN_TCG_F2_conv_df2uw_chop(SHORTCODE) \
- gen_helper_conv_df2uw_chop(RdV, cpu_env, RssV)
+ gen_helper_conv_df2uw_chop(RdV, tcg_env, RssV)
#define fGEN_TCG_F2_conv_df2w_chop(SHORTCODE) \
- gen_helper_conv_df2w_chop(RdV, cpu_env, RssV)
+ gen_helper_conv_df2w_chop(RdV, tcg_env, RssV)
#define fGEN_TCG_F2_conv_df2ud_chop(SHORTCODE) \
- gen_helper_conv_df2ud_chop(RddV, cpu_env, RssV)
+ gen_helper_conv_df2ud_chop(RddV, tcg_env, RssV)
#define fGEN_TCG_F2_conv_df2d_chop(SHORTCODE) \
- gen_helper_conv_df2d_chop(RddV, cpu_env, RssV)
+ gen_helper_conv_df2d_chop(RddV, tcg_env, RssV)
#define fGEN_TCG_F2_sfadd(SHORTCODE) \
- gen_helper_sfadd(RdV, cpu_env, RsV, RtV)
+ gen_helper_sfadd(RdV, tcg_env, RsV, RtV)
#define fGEN_TCG_F2_sfsub(SHORTCODE) \
- gen_helper_sfsub(RdV, cpu_env, RsV, RtV)
+ gen_helper_sfsub(RdV, tcg_env, RsV, RtV)
#define fGEN_TCG_F2_sfcmpeq(SHORTCODE) \
- gen_helper_sfcmpeq(PdV, cpu_env, RsV, RtV)
+ gen_helper_sfcmpeq(PdV, tcg_env, RsV, RtV)
#define fGEN_TCG_F2_sfcmpgt(SHORTCODE) \
- gen_helper_sfcmpgt(PdV, cpu_env, RsV, RtV)
+ gen_helper_sfcmpgt(PdV, tcg_env, RsV, RtV)
#define fGEN_TCG_F2_sfcmpge(SHORTCODE) \
- gen_helper_sfcmpge(PdV, cpu_env, RsV, RtV)
+ gen_helper_sfcmpge(PdV, tcg_env, RsV, RtV)
#define fGEN_TCG_F2_sfcmpuo(SHORTCODE) \
- gen_helper_sfcmpuo(PdV, cpu_env, RsV, RtV)
+ gen_helper_sfcmpuo(PdV, tcg_env, RsV, RtV)
#define fGEN_TCG_F2_sfmax(SHORTCODE) \
- gen_helper_sfmax(RdV, cpu_env, RsV, RtV)
+ gen_helper_sfmax(RdV, tcg_env, RsV, RtV)
#define fGEN_TCG_F2_sfmin(SHORTCODE) \
- gen_helper_sfmin(RdV, cpu_env, RsV, RtV)
+ gen_helper_sfmin(RdV, tcg_env, RsV, RtV)
#define fGEN_TCG_F2_sfclass(SHORTCODE) \
do { \
TCGv imm = tcg_constant_tl(uiV); \
- gen_helper_sfclass(PdV, cpu_env, RsV, imm); \
+ gen_helper_sfclass(PdV, tcg_env, RsV, imm); \
} while (0)
#define fGEN_TCG_F2_sffixupn(SHORTCODE) \
- gen_helper_sffixupn(RdV, cpu_env, RsV, RtV)
+ gen_helper_sffixupn(RdV, tcg_env, RsV, RtV)
#define fGEN_TCG_F2_sffixupd(SHORTCODE) \
- gen_helper_sffixupd(RdV, cpu_env, RsV, RtV)
+ gen_helper_sffixupd(RdV, tcg_env, RsV, RtV)
#define fGEN_TCG_F2_sffixupr(SHORTCODE) \
- gen_helper_sffixupr(RdV, cpu_env, RsV)
+ gen_helper_sffixupr(RdV, tcg_env, RsV)
#define fGEN_TCG_F2_dfadd(SHORTCODE) \
- gen_helper_dfadd(RddV, cpu_env, RssV, RttV)
+ gen_helper_dfadd(RddV, tcg_env, RssV, RttV)
#define fGEN_TCG_F2_dfsub(SHORTCODE) \
- gen_helper_dfsub(RddV, cpu_env, RssV, RttV)
+ gen_helper_dfsub(RddV, tcg_env, RssV, RttV)
#define fGEN_TCG_F2_dfmax(SHORTCODE) \
- gen_helper_dfmax(RddV, cpu_env, RssV, RttV)
+ gen_helper_dfmax(RddV, tcg_env, RssV, RttV)
#define fGEN_TCG_F2_dfmin(SHORTCODE) \
- gen_helper_dfmin(RddV, cpu_env, RssV, RttV)
+ gen_helper_dfmin(RddV, tcg_env, RssV, RttV)
#define fGEN_TCG_F2_dfcmpeq(SHORTCODE) \
- gen_helper_dfcmpeq(PdV, cpu_env, RssV, RttV)
+ gen_helper_dfcmpeq(PdV, tcg_env, RssV, RttV)
#define fGEN_TCG_F2_dfcmpgt(SHORTCODE) \
- gen_helper_dfcmpgt(PdV, cpu_env, RssV, RttV)
+ gen_helper_dfcmpgt(PdV, tcg_env, RssV, RttV)
#define fGEN_TCG_F2_dfcmpge(SHORTCODE) \
- gen_helper_dfcmpge(PdV, cpu_env, RssV, RttV)
+ gen_helper_dfcmpge(PdV, tcg_env, RssV, RttV)
#define fGEN_TCG_F2_dfcmpuo(SHORTCODE) \
- gen_helper_dfcmpuo(PdV, cpu_env, RssV, RttV)
+ gen_helper_dfcmpuo(PdV, tcg_env, RssV, RttV)
#define fGEN_TCG_F2_dfclass(SHORTCODE) \
do { \
TCGv imm = tcg_constant_tl(uiV); \
- gen_helper_dfclass(PdV, cpu_env, RssV, imm); \
+ gen_helper_dfclass(PdV, tcg_env, RssV, imm); \
} while (0)
#define fGEN_TCG_F2_sfmpy(SHORTCODE) \
- gen_helper_sfmpy(RdV, cpu_env, RsV, RtV)
+ gen_helper_sfmpy(RdV, tcg_env, RsV, RtV)
#define fGEN_TCG_F2_sffma(SHORTCODE) \
- gen_helper_sffma(RxV, cpu_env, RxV, RsV, RtV)
+ gen_helper_sffma(RxV, tcg_env, RxV, RsV, RtV)
#define fGEN_TCG_F2_sffma_sc(SHORTCODE) \
- gen_helper_sffma_sc(RxV, cpu_env, RxV, RsV, RtV, PuV)
+ gen_helper_sffma_sc(RxV, tcg_env, RxV, RsV, RtV, PuV)
#define fGEN_TCG_F2_sffms(SHORTCODE) \
- gen_helper_sffms(RxV, cpu_env, RxV, RsV, RtV)
+ gen_helper_sffms(RxV, tcg_env, RxV, RsV, RtV)
#define fGEN_TCG_F2_sffma_lib(SHORTCODE) \
- gen_helper_sffma_lib(RxV, cpu_env, RxV, RsV, RtV)
+ gen_helper_sffma_lib(RxV, tcg_env, RxV, RsV, RtV)
#define fGEN_TCG_F2_sffms_lib(SHORTCODE) \
- gen_helper_sffms_lib(RxV, cpu_env, RxV, RsV, RtV)
+ gen_helper_sffms_lib(RxV, tcg_env, RxV, RsV, RtV)
#define fGEN_TCG_F2_dfmpyfix(SHORTCODE) \
- gen_helper_dfmpyfix(RddV, cpu_env, RssV, RttV)
+ gen_helper_dfmpyfix(RddV, tcg_env, RssV, RttV)
#define fGEN_TCG_F2_dfmpyhh(SHORTCODE) \
- gen_helper_dfmpyhh(RxxV, cpu_env, RxxV, RssV, RttV)
+ gen_helper_dfmpyhh(RxxV, tcg_env, RxxV, RssV, RttV)
/* Nothing to do for these in qemu, need to suppress compiler warnings */
#define fGEN_TCG_Y4_l2fetch(SHORTCODE) \
@@ -1367,6 +1367,6 @@
uiV = uiV; \
tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], ctx->pkt->pc); \
TCGv excp = tcg_constant_tl(HEX_EXCP_TRAP0); \
- gen_helper_raise_exception(cpu_env, excp); \
+ gen_helper_raise_exception(tcg_env, excp); \
} while (0)
#endif
diff --git a/target/hexagon/gen_tcg_funcs.py b/target/hexagon/gen_tcg_funcs.py
index fe29d83..f5246ce 100755
--- a/target/hexagon/gen_tcg_funcs.py
+++ b/target/hexagon/gen_tcg_funcs.py
@@ -120,7 +120,7 @@
if not hex_common.skip_qemu_helper(tag):
f.write(f" TCGv_ptr {regtype}{regid}V = " "tcg_temp_new_ptr();\n")
f.write(
- f" tcg_gen_addi_ptr({regtype}{regid}V, cpu_env, "
+ f" tcg_gen_addi_ptr({regtype}{regid}V, tcg_env, "
f"{regtype}{regid}V_off);\n"
)
elif regid in {"uu", "vv", "xx"}:
@@ -130,7 +130,7 @@
if not hex_common.skip_qemu_helper(tag):
f.write(f" TCGv_ptr {regtype}{regid}V = " "tcg_temp_new_ptr();\n")
f.write(
- f" tcg_gen_addi_ptr({regtype}{regid}V, cpu_env, "
+ f" tcg_gen_addi_ptr({regtype}{regid}V, tcg_env, "
f"{regtype}{regid}V_off);\n"
)
elif regid in {"s", "u", "v", "w"}:
@@ -155,7 +155,7 @@
if not hex_common.skip_qemu_helper(tag):
f.write(f" TCGv_ptr {regtype}{regid}V = " "tcg_temp_new_ptr();\n")
f.write(
- f" tcg_gen_addi_ptr({regtype}{regid}V, cpu_env, "
+ f" tcg_gen_addi_ptr({regtype}{regid}V, tcg_env, "
f"{regtype}{regid}V_off);\n"
)
else:
@@ -168,7 +168,7 @@
if not hex_common.skip_qemu_helper(tag):
f.write(f" TCGv_ptr {regtype}{regid}V = " "tcg_temp_new_ptr();\n")
f.write(
- f" tcg_gen_addi_ptr({regtype}{regid}V, cpu_env, "
+ f" tcg_gen_addi_ptr({regtype}{regid}V, tcg_env, "
f"{regtype}{regid}V_off);\n"
)
elif regid in {"s", "t", "u", "v"}:
@@ -303,7 +303,7 @@
elif regid in {"s", "u", "v", "w"}:
if not hex_common.skip_qemu_helper(tag):
f.write(
- f" tcg_gen_addi_ptr({regtype}{regid}V, cpu_env, "
+ f" tcg_gen_addi_ptr({regtype}{regid}V, tcg_env, "
f"{regtype}{regid}V_off);\n"
)
elif regid in {"x", "y"}:
@@ -316,7 +316,7 @@
if regid in {"s", "t", "u", "v"}:
if not hex_common.skip_qemu_helper(tag):
f.write(
- f" tcg_gen_addi_ptr({regtype}{regid}V, cpu_env, "
+ f" tcg_gen_addi_ptr({regtype}{regid}V, tcg_env, "
f"{regtype}{regid}V_off);\n"
)
elif regid in {"x"}:
@@ -490,7 +490,7 @@
## if hex_common.skip_qemu_helper(tag) is True
## <GEN> is fGEN_TCG_A2_add({ RdV=RsV+RtV;});
## if hex_common.skip_qemu_helper(tag) is False
-## <GEN> is gen_helper_A2_add(RdV, cpu_env, RsV, RtV);
+## <GEN> is gen_helper_A2_add(RdV, tcg_env, RsV, RtV);
##
def gen_tcg_func(f, tag, regs, imms):
f.write(f"static void generate_{tag}(DisasContext *ctx)\n")
@@ -572,7 +572,7 @@
i += 1
if i > 0:
f.write(", ")
- f.write("cpu_env")
+ f.write("tcg_env")
i = 1
## For conditional instructions, we pass in the destination register
if "A_CONDEXEC" in hex_common.attribdict[tag]:
diff --git a/target/hexagon/gen_tcg_hvx.h b/target/hexagon/gen_tcg_hvx.h
index 44bae53..0da64d4 100644
--- a/target/hexagon/gen_tcg_hvx.h
+++ b/target/hexagon/gen_tcg_hvx.h
@@ -43,7 +43,7 @@
#define fGEN_TCG_V6_vhist(SHORTCODE) \
if (!ctx->pre_commit) { \
assert_vhist_tmp(ctx); \
- gen_helper_vhist(cpu_env); \
+ gen_helper_vhist(tcg_env); \
}
#define fGEN_TCG_V6_vhistq(SHORTCODE) \
do { \
@@ -53,13 +53,13 @@
sizeof(MMVector), sizeof(MMVector)); \
} else { \
assert_vhist_tmp(ctx); \
- gen_helper_vhistq(cpu_env); \
+ gen_helper_vhistq(tcg_env); \
} \
} while (0)
#define fGEN_TCG_V6_vwhist256(SHORTCODE) \
if (!ctx->pre_commit) { \
assert_vhist_tmp(ctx); \
- gen_helper_vwhist256(cpu_env); \
+ gen_helper_vwhist256(tcg_env); \
}
#define fGEN_TCG_V6_vwhist256q(SHORTCODE) \
do { \
@@ -69,13 +69,13 @@
sizeof(MMVector), sizeof(MMVector)); \
} else { \
assert_vhist_tmp(ctx); \
- gen_helper_vwhist256q(cpu_env); \
+ gen_helper_vwhist256q(tcg_env); \
} \
} while (0)
#define fGEN_TCG_V6_vwhist256_sat(SHORTCODE) \
if (!ctx->pre_commit) { \
assert_vhist_tmp(ctx); \
- gen_helper_vwhist256_sat(cpu_env); \
+ gen_helper_vwhist256_sat(tcg_env); \
}
#define fGEN_TCG_V6_vwhist256q_sat(SHORTCODE) \
do { \
@@ -85,13 +85,13 @@
sizeof(MMVector), sizeof(MMVector)); \
} else { \
assert_vhist_tmp(ctx); \
- gen_helper_vwhist256q_sat(cpu_env); \
+ gen_helper_vwhist256q_sat(tcg_env); \
} \
} while (0)
#define fGEN_TCG_V6_vwhist128(SHORTCODE) \
if (!ctx->pre_commit) { \
assert_vhist_tmp(ctx); \
- gen_helper_vwhist128(cpu_env); \
+ gen_helper_vwhist128(tcg_env); \
}
#define fGEN_TCG_V6_vwhist128q(SHORTCODE) \
do { \
@@ -101,14 +101,14 @@
sizeof(MMVector), sizeof(MMVector)); \
} else { \
assert_vhist_tmp(ctx); \
- gen_helper_vwhist128q(cpu_env); \
+ gen_helper_vwhist128q(tcg_env); \
} \
} while (0)
#define fGEN_TCG_V6_vwhist128m(SHORTCODE) \
if (!ctx->pre_commit) { \
TCGv tcgv_uiV = tcg_constant_tl(uiV); \
assert_vhist_tmp(ctx); \
- gen_helper_vwhist128m(cpu_env, tcgv_uiV); \
+ gen_helper_vwhist128m(tcg_env, tcgv_uiV); \
}
#define fGEN_TCG_V6_vwhist128qm(SHORTCODE) \
do { \
@@ -119,7 +119,7 @@
} else { \
TCGv tcgv_uiV = tcg_constant_tl(uiV); \
assert_vhist_tmp(ctx); \
- gen_helper_vwhist128qm(cpu_env, tcgv_uiV); \
+ gen_helper_vwhist128qm(tcg_env, tcgv_uiV); \
} \
} while (0)
diff --git a/target/hexagon/genptr.c b/target/hexagon/genptr.c
index 217bc7b..dbae6c5 100644
--- a/target/hexagon/genptr.c
+++ b/target/hexagon/genptr.c
@@ -414,50 +414,50 @@
tcg_gen_mov_tl(hex_store_val32[slot], src);
}
-void gen_store1(TCGv_env cpu_env, TCGv vaddr, TCGv src, uint32_t slot)
+void gen_store1(TCGv_env tcg_env, TCGv vaddr, TCGv src, uint32_t slot)
{
gen_store32(vaddr, src, 1, slot);
}
-void gen_store1i(TCGv_env cpu_env, TCGv vaddr, int32_t src, uint32_t slot)
+void gen_store1i(TCGv_env tcg_env, TCGv vaddr, int32_t src, uint32_t slot)
{
TCGv tmp = tcg_constant_tl(src);
- gen_store1(cpu_env, vaddr, tmp, slot);
+ gen_store1(tcg_env, vaddr, tmp, slot);
}
-void gen_store2(TCGv_env cpu_env, TCGv vaddr, TCGv src, uint32_t slot)
+void gen_store2(TCGv_env tcg_env, TCGv vaddr, TCGv src, uint32_t slot)
{
gen_store32(vaddr, src, 2, slot);
}
-void gen_store2i(TCGv_env cpu_env, TCGv vaddr, int32_t src, uint32_t slot)
+void gen_store2i(TCGv_env tcg_env, TCGv vaddr, int32_t src, uint32_t slot)
{
TCGv tmp = tcg_constant_tl(src);
- gen_store2(cpu_env, vaddr, tmp, slot);
+ gen_store2(tcg_env, vaddr, tmp, slot);
}
-void gen_store4(TCGv_env cpu_env, TCGv vaddr, TCGv src, uint32_t slot)
+void gen_store4(TCGv_env tcg_env, TCGv vaddr, TCGv src, uint32_t slot)
{
gen_store32(vaddr, src, 4, slot);
}
-void gen_store4i(TCGv_env cpu_env, TCGv vaddr, int32_t src, uint32_t slot)
+void gen_store4i(TCGv_env tcg_env, TCGv vaddr, int32_t src, uint32_t slot)
{
TCGv tmp = tcg_constant_tl(src);
- gen_store4(cpu_env, vaddr, tmp, slot);
+ gen_store4(tcg_env, vaddr, tmp, slot);
}
-void gen_store8(TCGv_env cpu_env, TCGv vaddr, TCGv_i64 src, uint32_t slot)
+void gen_store8(TCGv_env tcg_env, TCGv vaddr, TCGv_i64 src, uint32_t slot)
{
tcg_gen_mov_tl(hex_store_addr[slot], vaddr);
tcg_gen_movi_tl(hex_store_width[slot], 8);
tcg_gen_mov_i64(hex_store_val64[slot], src);
}
-void gen_store8i(TCGv_env cpu_env, TCGv vaddr, int64_t src, uint32_t slot)
+void gen_store8i(TCGv_env tcg_env, TCGv vaddr, int64_t src, uint32_t slot)
{
TCGv_i64 tmp = tcg_constant_i64(src);
- gen_store8(cpu_env, vaddr, tmp, slot);
+ gen_store8(tcg_env, vaddr, tmp, slot);
}
TCGv gen_8bitsof(TCGv result, TCGv value)
@@ -783,7 +783,7 @@
TCGv_i64 frame;
tcg_gen_addi_tl(r30, r29, -8);
frame = gen_frame_scramble();
- gen_store8(cpu_env, r30, frame, ctx->insn->slot);
+ gen_store8(tcg_env, r30, frame, ctx->insn->slot);
gen_log_reg_write(ctx, HEX_REG_FP, r30);
gen_framecheck(r30, framesize);
tcg_gen_subi_tl(r29, r30, framesize);
@@ -1239,7 +1239,7 @@
for (int i = 0; i < sizeof(MMVector) / 8; i++) {
tcg_gen_qemu_ld_i64(tmp, src, ctx->mem_idx, MO_TEUQ);
tcg_gen_addi_tl(src, src, 8);
- tcg_gen_st_i64(tmp, cpu_env, dstoff + i * 8);
+ tcg_gen_st_i64(tmp, tcg_env, dstoff + i * 8);
}
}
@@ -1251,7 +1251,7 @@
if (is_gather_store_insn(ctx)) {
TCGv sl = tcg_constant_tl(slot);
- gen_helper_gather_store(cpu_env, EA, sl);
+ gen_helper_gather_store(tcg_env, EA, sl);
return;
}
@@ -1301,7 +1301,7 @@
TCGv_i64 ones = tcg_constant_i64(~0);
for (int i = 0; i < sizeof(MMVector) / 8; i++) {
- tcg_gen_ld_i64(tmp, cpu_env, srcoff + i * 8);
+ tcg_gen_ld_i64(tmp, tcg_env, srcoff + i * 8);
tcg_gen_movi_i64(mask, 0);
for (int j = 0; j < 8; j += size) {
@@ -1310,7 +1310,7 @@
tcg_gen_deposit_i64(mask, mask, bits, j, size);
}
- tcg_gen_st8_i64(mask, cpu_env, dstoff + i);
+ tcg_gen_st8_i64(mask, tcg_env, dstoff + i);
}
}
@@ -1318,7 +1318,7 @@
{
TCGv size = tcg_constant_tl(s);
TCGv mem_idx = tcg_constant_tl(mi);
- gen_helper_probe_noshuf_load(cpu_env, va, size, mem_idx);
+ gen_helper_probe_noshuf_load(tcg_env, va, size, mem_idx);
}
/*
diff --git a/target/hexagon/idef-parser/parser-helpers.c b/target/hexagon/idef-parser/parser-helpers.c
index ec43343..4af0209 100644
--- a/target/hexagon/idef-parser/parser-helpers.c
+++ b/target/hexagon/idef-parser/parser-helpers.c
@@ -1773,7 +1773,7 @@
/* Lookup the effective address EA */
find_variable(c, locp, ea, ea);
src_m = rvalue_materialize(c, locp, &src_m);
- OUT(c, locp, "gen_store", &mem_width, "(cpu_env, ", ea, ", ", &src_m);
+ OUT(c, locp, "gen_store", &mem_width, "(tcg_env, ", ea, ", ", &src_m);
OUT(c, locp, ", insn->slot);\n");
}
diff --git a/target/hexagon/macros.h b/target/hexagon/macros.h
index 5451b06..b356d85 100644
--- a/target/hexagon/macros.h
+++ b/target/hexagon/macros.h
@@ -147,7 +147,7 @@
__builtin_choose_expr(TYPE_TCGV(X), \
gen_store1, (void)0))
#define MEM_STORE1(VA, DATA, SLOT) \
- MEM_STORE1_FUNC(DATA)(cpu_env, VA, DATA, SLOT)
+ MEM_STORE1_FUNC(DATA)(tcg_env, VA, DATA, SLOT)
#define MEM_STORE2_FUNC(X) \
__builtin_choose_expr(TYPE_INT(X), \
@@ -155,7 +155,7 @@
__builtin_choose_expr(TYPE_TCGV(X), \
gen_store2, (void)0))
#define MEM_STORE2(VA, DATA, SLOT) \
- MEM_STORE2_FUNC(DATA)(cpu_env, VA, DATA, SLOT)
+ MEM_STORE2_FUNC(DATA)(tcg_env, VA, DATA, SLOT)
#define MEM_STORE4_FUNC(X) \
__builtin_choose_expr(TYPE_INT(X), \
@@ -163,7 +163,7 @@
__builtin_choose_expr(TYPE_TCGV(X), \
gen_store4, (void)0))
#define MEM_STORE4(VA, DATA, SLOT) \
- MEM_STORE4_FUNC(DATA)(cpu_env, VA, DATA, SLOT)
+ MEM_STORE4_FUNC(DATA)(tcg_env, VA, DATA, SLOT)
#define MEM_STORE8_FUNC(X) \
__builtin_choose_expr(TYPE_INT(X), \
@@ -171,7 +171,7 @@
__builtin_choose_expr(TYPE_TCGV_I64(X), \
gen_store8, (void)0))
#define MEM_STORE8(VA, DATA, SLOT) \
- MEM_STORE8_FUNC(DATA)(cpu_env, VA, DATA, SLOT)
+ MEM_STORE8_FUNC(DATA)(tcg_env, VA, DATA, SLOT)
#else
#define MEM_LOAD1s(VA) ((int8_t)mem_load1(env, pkt_has_store_s1, slot, VA))
#define MEM_LOAD1u(VA) ((uint8_t)mem_load1(env, pkt_has_store_s1, slot, VA))
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
index c00254e..663b7bb 100644
--- a/target/hexagon/translate.c
+++ b/target/hexagon/translate.c
@@ -115,7 +115,7 @@
static void gen_exception_raw(int excp)
{
- gen_helper_raise_exception(cpu_env, tcg_constant_i32(excp));
+ gen_helper_raise_exception(tcg_env, tcg_constant_i32(excp));
}
static void gen_exec_counters(DisasContext *ctx)
@@ -528,7 +528,7 @@
if (HEX_DEBUG) {
/* Handy place to set a breakpoint before the packet executes */
- gen_helper_debug_start_packet(cpu_env);
+ gen_helper_debug_start_packet(tcg_env);
}
/* Initialize the runtime state for packet semantics */
@@ -701,7 +701,7 @@
if (HEX_DEBUG) {
TCGv slot = tcg_constant_tl(slot_num);
TCGv check = tcg_constant_tl(ctx->store_width[slot_num]);
- gen_helper_debug_check_store_width(cpu_env, slot, check);
+ gen_helper_debug_check_store_width(tcg_env, slot, check);
}
}
@@ -783,7 +783,7 @@
* avoid branching based on the width at runtime.
*/
TCGv slot = tcg_constant_tl(slot_num);
- gen_helper_commit_store(cpu_env, slot);
+ gen_helper_commit_store(tcg_env, slot);
}
}
}
@@ -882,7 +882,7 @@
}
if (pkt_has_hvx_store(ctx->pkt)) {
- gen_helper_commit_hvx_stores(cpu_env);
+ gen_helper_commit_hvx_stores(tcg_env);
}
}
@@ -942,7 +942,7 @@
} else if (has_hvx_store) {
if (!has_store_s0 && !has_store_s1) {
TCGv mem_idx = tcg_constant_tl(ctx->mem_idx);
- gen_helper_probe_hvx_stores(cpu_env, mem_idx);
+ gen_helper_probe_hvx_stores(tcg_env, mem_idx);
} else {
int mask = 0;
@@ -971,7 +971,7 @@
}
mask = FIELD_DP32(mask, PROBE_PKT_SCALAR_HVX_STORES, MMU_IDX,
ctx->mem_idx);
- gen_helper_probe_pkt_scalar_hvx_stores(cpu_env,
+ gen_helper_probe_pkt_scalar_hvx_stores(tcg_env,
tcg_constant_tl(mask));
}
} else if (has_store_s0 && has_store_s1) {
@@ -987,7 +987,7 @@
FIELD_DP32(args, PROBE_PKT_SCALAR_STORE_S0, IS_PREDICATED, 1);
}
TCGv args_tcgv = tcg_constant_tl(args);
- gen_helper_probe_pkt_scalar_store_s0(cpu_env, args_tcgv);
+ gen_helper_probe_pkt_scalar_store_s0(tcg_env, args_tcgv);
}
process_store_log(ctx);
@@ -1005,7 +1005,7 @@
tcg_constant_tl(pkt->pkt_has_store_s1 && !pkt->pkt_has_dczeroa);
/* Handy place to set a breakpoint at the end of execution */
- gen_helper_debug_commit_end(cpu_env, tcg_constant_tl(ctx->pkt->pc),
+ gen_helper_debug_commit_end(tcg_env, tcg_constant_tl(ctx->pkt->pc),
ctx->pred_written, has_st0, has_st1);
}
@@ -1053,7 +1053,7 @@
CPUState *cs)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
- HexagonCPU *hex_cpu = env_archcpu(cs->env_ptr);
+ HexagonCPU *hex_cpu = env_archcpu(cpu_env(cs));
uint32_t hex_flags = dcbase->tb->flags;
ctx->mem_idx = MMU_USER_IDX;
@@ -1094,7 +1094,7 @@
static void hexagon_tr_translate_packet(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
- CPUHexagonState *env = cpu->env_ptr;
+ CPUHexagonState *env = cpu_env(cpu);
decode_and_translate_packet(env, ctx);
@@ -1179,68 +1179,68 @@
opcode_init();
for (i = 0; i < TOTAL_PER_THREAD_REGS; i++) {
- hex_gpr[i] = tcg_global_mem_new(cpu_env,
+ hex_gpr[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUHexagonState, gpr[i]),
hexagon_regnames[i]);
if (HEX_DEBUG) {
snprintf(reg_written_names[i], NAME_LEN, "reg_written_%s",
hexagon_regnames[i]);
- hex_reg_written[i] = tcg_global_mem_new(cpu_env,
+ hex_reg_written[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUHexagonState, reg_written[i]),
reg_written_names[i]);
}
}
- hex_new_value_usr = tcg_global_mem_new(cpu_env,
+ hex_new_value_usr = tcg_global_mem_new(tcg_env,
offsetof(CPUHexagonState, new_value_usr), "new_value_usr");
for (i = 0; i < NUM_PREGS; i++) {
- hex_pred[i] = tcg_global_mem_new(cpu_env,
+ hex_pred[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUHexagonState, pred[i]),
hexagon_prednames[i]);
}
- hex_slot_cancelled = tcg_global_mem_new(cpu_env,
+ hex_slot_cancelled = tcg_global_mem_new(tcg_env,
offsetof(CPUHexagonState, slot_cancelled), "slot_cancelled");
- hex_llsc_addr = tcg_global_mem_new(cpu_env,
+ hex_llsc_addr = tcg_global_mem_new(tcg_env,
offsetof(CPUHexagonState, llsc_addr), "llsc_addr");
- hex_llsc_val = tcg_global_mem_new(cpu_env,
+ hex_llsc_val = tcg_global_mem_new(tcg_env,
offsetof(CPUHexagonState, llsc_val), "llsc_val");
- hex_llsc_val_i64 = tcg_global_mem_new_i64(cpu_env,
+ hex_llsc_val_i64 = tcg_global_mem_new_i64(tcg_env,
offsetof(CPUHexagonState, llsc_val_i64), "llsc_val_i64");
for (i = 0; i < STORES_MAX; i++) {
snprintf(store_addr_names[i], NAME_LEN, "store_addr_%d", i);
- hex_store_addr[i] = tcg_global_mem_new(cpu_env,
+ hex_store_addr[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUHexagonState, mem_log_stores[i].va),
store_addr_names[i]);
snprintf(store_width_names[i], NAME_LEN, "store_width_%d", i);
- hex_store_width[i] = tcg_global_mem_new(cpu_env,
+ hex_store_width[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUHexagonState, mem_log_stores[i].width),
store_width_names[i]);
snprintf(store_val32_names[i], NAME_LEN, "store_val32_%d", i);
- hex_store_val32[i] = tcg_global_mem_new(cpu_env,
+ hex_store_val32[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUHexagonState, mem_log_stores[i].data32),
store_val32_names[i]);
snprintf(store_val64_names[i], NAME_LEN, "store_val64_%d", i);
- hex_store_val64[i] = tcg_global_mem_new_i64(cpu_env,
+ hex_store_val64[i] = tcg_global_mem_new_i64(tcg_env,
offsetof(CPUHexagonState, mem_log_stores[i].data64),
store_val64_names[i]);
}
for (int i = 0; i < VSTORES_MAX; i++) {
snprintf(vstore_addr_names[i], NAME_LEN, "vstore_addr_%d", i);
- hex_vstore_addr[i] = tcg_global_mem_new(cpu_env,
+ hex_vstore_addr[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUHexagonState, vstore[i].va),
vstore_addr_names[i]);
snprintf(vstore_size_names[i], NAME_LEN, "vstore_size_%d", i);
- hex_vstore_size[i] = tcg_global_mem_new(cpu_env,
+ hex_vstore_size[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUHexagonState, vstore[i].size),
vstore_size_names[i]);
snprintf(vstore_pending_names[i], NAME_LEN, "vstore_pending_%d", i);
- hex_vstore_pending[i] = tcg_global_mem_new(cpu_env,
+ hex_vstore_pending[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUHexagonState, vstore_pending[i]),
vstore_pending_names[i]);
}
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
index 11022f9..1644297 100644
--- a/target/hppa/cpu.c
+++ b/target/hppa/cpu.c
@@ -149,7 +149,6 @@
HPPACPU *cpu = HPPA_CPU(obj);
CPUHPPAState *env = &cpu->env;
- cpu_set_cpustate_pointers(cpu);
cs->exception_index = -1;
cpu_hppa_loaded_fr0(env);
cpu_hppa_put_psw(env, PSW_W);
@@ -212,6 +211,7 @@
.name = TYPE_HPPA_CPU,
.parent = TYPE_CPU,
.instance_size = sizeof(HPPACPU),
+ .instance_align = __alignof(HPPACPU),
.instance_init = hppa_cpu_initfn,
.abstract = false,
.class_size = sizeof(HPPACPUClass),
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
index 730f352..798d0c2 100644
--- a/target/hppa/cpu.h
+++ b/target/hppa/cpu.h
@@ -237,7 +237,6 @@
CPUState parent_obj;
/*< public >*/
- CPUNegativeOffsetState neg;
CPUHPPAState env;
QEMUTimer *alarm_timer;
};
diff --git a/target/hppa/mem_helper.c b/target/hppa/mem_helper.c
index 520fd31..350485f 100644
--- a/target/hppa/mem_helper.c
+++ b/target/hppa/mem_helper.c
@@ -335,7 +335,7 @@
synchronous across all processors. */
static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
{
- CPUHPPAState *env = cpu->env_ptr;
+ CPUHPPAState *env = cpu_env(cpu);
target_ulong addr = (target_ulong) data.target_ptr;
hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
index 650bbcf..9f3ba9f 100644
--- a/target/hppa/translate.c
+++ b/target/hppa/translate.c
@@ -396,28 +396,28 @@
cpu_gr[0] = NULL;
for (i = 1; i < 32; i++) {
- cpu_gr[i] = tcg_global_mem_new(cpu_env,
+ cpu_gr[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUHPPAState, gr[i]),
gr_names[i]);
}
for (i = 0; i < 4; i++) {
- cpu_sr[i] = tcg_global_mem_new_i64(cpu_env,
+ cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
offsetof(CPUHPPAState, sr[i]),
sr_names[i]);
}
- cpu_srH = tcg_global_mem_new_i64(cpu_env,
+ cpu_srH = tcg_global_mem_new_i64(tcg_env,
offsetof(CPUHPPAState, sr[4]),
sr_names[4]);
for (i = 0; i < ARRAY_SIZE(vars); ++i) {
const GlobalVar *v = &vars[i];
- *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
+ *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
}
- cpu_iasq_f = tcg_global_mem_new_i64(cpu_env,
+ cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
offsetof(CPUHPPAState, iasq_f),
"iasq_f");
- cpu_iasq_b = tcg_global_mem_new_i64(cpu_env,
+ cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
offsetof(CPUHPPAState, iasq_b),
"iasq_b");
}
@@ -563,7 +563,7 @@
static TCGv_i32 load_frw_i32(unsigned rt)
{
TCGv_i32 ret = tcg_temp_new_i32();
- tcg_gen_ld_i32(ret, cpu_env,
+ tcg_gen_ld_i32(ret, tcg_env,
offsetof(CPUHPPAState, fr[rt & 31])
+ (rt & 32 ? LO_OFS : HI_OFS));
return ret;
@@ -586,7 +586,7 @@
if (rt == 0) {
tcg_gen_movi_i64(ret, 0);
} else {
- tcg_gen_ld32u_i64(ret, cpu_env,
+ tcg_gen_ld32u_i64(ret, tcg_env,
offsetof(CPUHPPAState, fr[rt & 31])
+ (rt & 32 ? LO_OFS : HI_OFS));
}
@@ -595,7 +595,7 @@
static void save_frw_i32(unsigned rt, TCGv_i32 val)
{
- tcg_gen_st_i32(val, cpu_env,
+ tcg_gen_st_i32(val, tcg_env,
offsetof(CPUHPPAState, fr[rt & 31])
+ (rt & 32 ? LO_OFS : HI_OFS));
}
@@ -606,7 +606,7 @@
static TCGv_i64 load_frd(unsigned rt)
{
TCGv_i64 ret = tcg_temp_new_i64();
- tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
+ tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
return ret;
}
@@ -623,7 +623,7 @@
static void save_frd(unsigned rt, TCGv_i64 val)
{
- tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
+ tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
}
static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
@@ -636,7 +636,7 @@
} else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
tcg_gen_mov_i64(dest, cpu_srH);
} else {
- tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUHPPAState, sr[reg]));
+ tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
}
#endif
}
@@ -752,7 +752,7 @@
static void gen_excp_1(int exception)
{
- gen_helper_excp(cpu_env, tcg_constant_i32(exception));
+ gen_helper_excp(tcg_env, tcg_constant_i32(exception));
}
static void gen_excp(DisasContext *ctx, int exception)
@@ -768,7 +768,7 @@
{
nullify_over(ctx);
tcg_gen_st_reg(tcg_constant_reg(ctx->insn),
- cpu_env, offsetof(CPUHPPAState, cr[CR_IIR]));
+ tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
gen_excp(ctx, exc);
return nullify_end(ctx);
}
@@ -1138,7 +1138,7 @@
sv = do_add_sv(ctx, dest, in1, in2);
if (is_tsv) {
/* ??? Need to include overflow from shift. */
- gen_helper_tsv(cpu_env, sv);
+ gen_helper_tsv(tcg_env, sv);
}
}
@@ -1147,7 +1147,7 @@
if (is_tc) {
tmp = tcg_temp_new();
tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
- gen_helper_tcond(cpu_env, tmp);
+ gen_helper_tcond(tcg_env, tmp);
}
/* Write back the result. */
@@ -1224,7 +1224,7 @@
if (is_tsv || cond_need_sv(c)) {
sv = do_sub_sv(ctx, dest, in1, in2);
if (is_tsv) {
- gen_helper_tsv(cpu_env, sv);
+ gen_helper_tsv(tcg_env, sv);
}
}
@@ -1239,7 +1239,7 @@
if (is_tc) {
tmp = tcg_temp_new();
tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
- gen_helper_tcond(cpu_env, tmp);
+ gen_helper_tcond(tcg_env, tmp);
}
/* Write back the result. */
@@ -1358,7 +1358,7 @@
if (is_tc) {
TCGv_reg tmp = tcg_temp_new();
tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
- gen_helper_tcond(cpu_env, tmp);
+ gen_helper_tcond(tcg_env, tmp);
}
save_gpr(ctx, rt, dest);
@@ -1398,7 +1398,7 @@
tcg_gen_andi_reg(tmp, tmp, 030);
tcg_gen_trunc_reg_ptr(ptr, tmp);
- tcg_gen_add_ptr(ptr, ptr, cpu_env);
+ tcg_gen_add_ptr(ptr, ptr, tcg_env);
tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
return spc;
@@ -1559,7 +1559,7 @@
save_frw_i32(rt, tmp);
if (rt == 0) {
- gen_helper_loaded_fr0(cpu_env);
+ gen_helper_loaded_fr0(tcg_env);
}
return nullify_end(ctx);
@@ -1584,7 +1584,7 @@
save_frd(rt, tmp);
if (rt == 0) {
- gen_helper_loaded_fr0(cpu_env);
+ gen_helper_loaded_fr0(tcg_env);
}
return nullify_end(ctx);
@@ -1653,7 +1653,7 @@
nullify_over(ctx);
tmp = load_frw0_i32(ra);
- func(tmp, cpu_env, tmp);
+ func(tmp, tcg_env, tmp);
save_frw_i32(rt, tmp);
return nullify_end(ctx);
@@ -1669,7 +1669,7 @@
src = load_frd(ra);
dst = tcg_temp_new_i32();
- func(dst, cpu_env, src);
+ func(dst, tcg_env, src);
save_frw_i32(rt, dst);
return nullify_end(ctx);
@@ -1683,7 +1683,7 @@
nullify_over(ctx);
tmp = load_frd0(ra);
- func(tmp, cpu_env, tmp);
+ func(tmp, tcg_env, tmp);
save_frd(rt, tmp);
return nullify_end(ctx);
@@ -1699,7 +1699,7 @@
src = load_frw0_i32(ra);
dst = tcg_temp_new_i64();
- func(dst, cpu_env, src);
+ func(dst, tcg_env, src);
save_frd(rt, dst);
return nullify_end(ctx);
@@ -1715,7 +1715,7 @@
a = load_frw0_i32(ra);
b = load_frw0_i32(rb);
- func(a, cpu_env, a, b);
+ func(a, tcg_env, a, b);
save_frw_i32(rt, a);
return nullify_end(ctx);
@@ -1731,7 +1731,7 @@
a = load_frd0(ra);
b = load_frd0(rb);
- func(a, cpu_env, a, b);
+ func(a, tcg_env, a, b);
save_frd(rt, a);
return nullify_end(ctx);
@@ -1996,7 +1996,7 @@
break;
case 0xe0: /* SET_THREAD_POINTER */
- tcg_gen_st_reg(cpu_gr[26], cpu_env, offsetof(CPUHPPAState, cr[27]));
+ tcg_gen_st_reg(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
@@ -2105,7 +2105,7 @@
}
tmp = get_temp(ctx);
- tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
+ tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
save_gpr(ctx, rt, tmp);
done:
@@ -2129,7 +2129,7 @@
tcg_gen_shli_i64(t64, t64, 32);
if (rs >= 4) {
- tcg_gen_st_i64(t64, cpu_env, offsetof(CPUHPPAState, sr[rs]));
+ tcg_gen_st_i64(t64, tcg_env, offsetof(CPUHPPAState, sr[rs]));
ctx->tb_flags &= ~TB_FLAG_SR_SAME;
} else {
tcg_gen_mov_i64(cpu_sr[rs], t64);
@@ -2163,13 +2163,13 @@
switch (ctl) {
case CR_IT:
- gen_helper_write_interval_timer(cpu_env, reg);
+ gen_helper_write_interval_timer(tcg_env, reg);
break;
case CR_EIRR:
- gen_helper_write_eirr(cpu_env, reg);
+ gen_helper_write_eirr(tcg_env, reg);
break;
case CR_EIEM:
- gen_helper_write_eiem(cpu_env, reg);
+ gen_helper_write_eiem(tcg_env, reg);
ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
break;
@@ -2178,10 +2178,10 @@
/* FIXME: Respect PSW_Q bit */
/* The write advances the queue and stores to the back element. */
tmp = get_temp(ctx);
- tcg_gen_ld_reg(tmp, cpu_env,
+ tcg_gen_ld_reg(tmp, tcg_env,
offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
- tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
- tcg_gen_st_reg(reg, cpu_env,
+ tcg_gen_st_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
+ tcg_gen_st_reg(reg, tcg_env,
offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
break;
@@ -2189,14 +2189,14 @@
case CR_PID2:
case CR_PID3:
case CR_PID4:
- tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
+ tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
#ifndef CONFIG_USER_ONLY
- gen_helper_change_prot_id(cpu_env);
+ gen_helper_change_prot_id(tcg_env);
#endif
break;
default:
- tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
+ tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
break;
}
return nullify_end(ctx);
@@ -2244,9 +2244,9 @@
nullify_over(ctx);
tmp = get_temp(ctx);
- tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
+ tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
tcg_gen_andi_reg(tmp, tmp, ~a->i);
- gen_helper_swap_system_mask(tmp, cpu_env, tmp);
+ gen_helper_swap_system_mask(tmp, tcg_env, tmp);
save_gpr(ctx, a->t, tmp);
/* Exit the TB to recognize new interrupts, e.g. PSW_M. */
@@ -2264,9 +2264,9 @@
nullify_over(ctx);
tmp = get_temp(ctx);
- tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
+ tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
tcg_gen_ori_reg(tmp, tmp, a->i);
- gen_helper_swap_system_mask(tmp, cpu_env, tmp);
+ gen_helper_swap_system_mask(tmp, tcg_env, tmp);
save_gpr(ctx, a->t, tmp);
/* Exit the TB to recognize new interrupts, e.g. PSW_I. */
@@ -2284,7 +2284,7 @@
reg = load_gpr(ctx, a->r);
tmp = get_temp(ctx);
- gen_helper_swap_system_mask(tmp, cpu_env, reg);
+ gen_helper_swap_system_mask(tmp, tcg_env, reg);
/* Exit the TB to recognize new interrupts. */
ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
@@ -2299,9 +2299,9 @@
nullify_over(ctx);
if (rfi_r) {
- gen_helper_rfi_r(cpu_env);
+ gen_helper_rfi_r(tcg_env);
} else {
- gen_helper_rfi(cpu_env);
+ gen_helper_rfi(tcg_env);
}
/* Exit the TB to recognize new interrupts. */
tcg_gen_exit_tb(NULL, 0);
@@ -2326,7 +2326,7 @@
CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
#ifndef CONFIG_USER_ONLY
nullify_over(ctx);
- gen_helper_halt(cpu_env);
+ gen_helper_halt(tcg_env);
ctx->base.is_jmp = DISAS_NORETURN;
return nullify_end(ctx);
#endif
@@ -2337,7 +2337,7 @@
CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
#ifndef CONFIG_USER_ONLY
nullify_over(ctx);
- gen_helper_reset(cpu_env);
+ gen_helper_reset(tcg_env);
ctx->base.is_jmp = DISAS_NORETURN;
return nullify_end(ctx);
#endif
@@ -2348,7 +2348,7 @@
CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
#ifndef CONFIG_USER_ONLY
nullify_over(ctx);
- gen_helper_getshadowregs(cpu_env);
+ gen_helper_getshadowregs(tcg_env);
return nullify_end(ctx);
#endif
}
@@ -2388,7 +2388,7 @@
}
want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
- gen_helper_probe(dest, cpu_env, addr, level, want);
+ gen_helper_probe(dest, tcg_env, addr, level, want);
save_gpr(ctx, a->t, dest);
return nullify_end(ctx);
@@ -2406,9 +2406,9 @@
form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
reg = load_gpr(ctx, a->r);
if (a->addr) {
- gen_helper_itlba(cpu_env, addr, reg);
+ gen_helper_itlba(tcg_env, addr, reg);
} else {
- gen_helper_itlbp(cpu_env, addr, reg);
+ gen_helper_itlbp(tcg_env, addr, reg);
}
/* Exit TB for TLB change if mmu is enabled. */
@@ -2433,9 +2433,9 @@
save_gpr(ctx, a->b, ofs);
}
if (a->local) {
- gen_helper_ptlbe(cpu_env);
+ gen_helper_ptlbe(tcg_env);
} else {
- gen_helper_ptlb(cpu_env, addr);
+ gen_helper_ptlb(tcg_env, addr);
}
/* Exit TB for TLB change if mmu is enabled. */
@@ -2473,10 +2473,10 @@
stl = tcg_temp_new_tl();
addr = tcg_temp_new_tl();
- tcg_gen_ld32u_i64(stl, cpu_env,
+ tcg_gen_ld32u_i64(stl, tcg_env,
a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
: offsetof(CPUHPPAState, cr[CR_IIASQ]));
- tcg_gen_ld32u_i64(atl, cpu_env,
+ tcg_gen_ld32u_i64(atl, tcg_env,
a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
: offsetof(CPUHPPAState, cr[CR_IIAOQ]));
tcg_gen_shli_i64(stl, stl, 32);
@@ -2484,9 +2484,9 @@
reg = load_gpr(ctx, a->r);
if (a->addr) {
- gen_helper_itlba(cpu_env, addr, reg);
+ gen_helper_itlba(tcg_env, addr, reg);
} else {
- gen_helper_itlbp(cpu_env, addr, reg);
+ gen_helper_itlbp(tcg_env, addr, reg);
}
/* Exit TB for TLB change if mmu is enabled. */
@@ -2509,7 +2509,7 @@
form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
paddr = tcg_temp_new();
- gen_helper_lpa(paddr, cpu_env, vaddr);
+ gen_helper_lpa(paddr, tcg_env, vaddr);
/* Note that physical address result overrides base modification. */
if (a->m) {
@@ -2640,7 +2640,7 @@
nullify_set(ctx, 0);
/* Tell the qemu main loop to halt until this cpu has work. */
- tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
+ tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
offsetof(CPUState, halted) - offsetof(HPPACPU, env));
gen_excp_1(EXCP_HALTED);
ctx->base.is_jmp = DISAS_NORETURN;
@@ -2907,15 +2907,15 @@
val = load_gpr(ctx, a->r);
if (a->a) {
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
- gen_helper_stby_e_parallel(cpu_env, addr, val);
+ gen_helper_stby_e_parallel(tcg_env, addr, val);
} else {
- gen_helper_stby_e(cpu_env, addr, val);
+ gen_helper_stby_e(tcg_env, addr, val);
}
} else {
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
- gen_helper_stby_b_parallel(cpu_env, addr, val);
+ gen_helper_stby_b_parallel(tcg_env, addr, val);
} else {
- gen_helper_stby_b(cpu_env, addr, val);
+ gen_helper_stby_b(tcg_env, addr, val);
}
}
if (a->m) {
@@ -3450,7 +3450,7 @@
#ifndef CONFIG_USER_ONLY
if (ctx->tb_flags & PSW_C) {
- CPUHPPAState *env = ctx->cs->env_ptr;
+ CPUHPPAState *env = cpu_env(ctx->cs);
int type = hppa_artype_for_page(env, ctx->base.pc_next);
/* If we could not find a TLB entry, then we need to generate an
ITLB miss exception so the kernel will provide it.
@@ -3806,7 +3806,7 @@
ty = tcg_constant_i32(a->y);
tc = tcg_constant_i32(a->c);
- gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
+ gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
return nullify_end(ctx);
}
@@ -3823,7 +3823,7 @@
ty = tcg_constant_i32(a->y);
tc = tcg_constant_i32(a->c);
- gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
+ gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
return nullify_end(ctx);
}
@@ -3835,7 +3835,7 @@
nullify_over(ctx);
t = get_temp(ctx);
- tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
+ tcg_gen_ld32u_reg(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
if (a->y == 1) {
int mask;
@@ -4012,9 +4012,9 @@
z = load_frw0_i32(a->ra3);
if (a->neg) {
- gen_helper_fmpynfadd_s(x, cpu_env, x, y, z);
+ gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
} else {
- gen_helper_fmpyfadd_s(x, cpu_env, x, y, z);
+ gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
}
save_frw_i32(a->t, x);
@@ -4031,9 +4031,9 @@
z = load_frd0(a->ra3);
if (a->neg) {
- gen_helper_fmpynfadd_d(x, cpu_env, x, y, z);
+ gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
} else {
- gen_helper_fmpyfadd_d(x, cpu_env, x, y, z);
+ gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
}
save_frd(a->t, x);
@@ -4042,18 +4042,17 @@
static bool trans_diag(DisasContext *ctx, arg_diag *a)
{
- nullify_over(ctx);
CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
#ifndef CONFIG_USER_ONLY
if (a->i == 0x100) {
/* emulate PDC BTLB, called by SeaBIOS-hppa */
- gen_helper_diag_btlb(cpu_env);
- } else
-#endif
- {
- qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
+ nullify_over(ctx);
+ gen_helper_diag_btlb(tcg_env);
+ return nullify_end(ctx);
}
- return nullify_end(ctx);
+#endif
+ qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
+ return true;
}
static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
@@ -4120,7 +4119,7 @@
static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
- CPUHPPAState *env = cs->env_ptr;
+ CPUHPPAState *env = cpu_env(cs);
DisasJumpType ret;
int i, n;
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 7836aa6..9fad31b 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -5976,9 +5976,10 @@
/* Versioned models: */
for (vdef = x86_cpu_def_get_versions(def); vdef->version; vdef++) {
- X86CPUModel *m = g_new0(X86CPUModel, 1);
g_autofree char *name =
x86_cpu_versioned_model_name(def, vdef->version);
+
+ m = g_new0(X86CPUModel, 1);
m->cpudef = def;
m->version = vdef->version;
m->note = vdef->note;
@@ -7589,7 +7590,6 @@
CPUX86State *env = &cpu->env;
env->nr_dies = 1;
- cpu_set_cpustate_pointers(cpu);
object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
x86_cpu_get_feature_words,
@@ -8021,6 +8021,7 @@
.name = TYPE_X86_CPU,
.parent = TYPE_CPU,
.instance_size = sizeof(X86CPU),
+ .instance_align = __alignof(X86CPU),
.instance_init = x86_cpu_initfn,
.instance_post_init = x86_cpu_post_initfn,
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index d3f377d..e187546 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -1901,7 +1901,6 @@
CPUState parent_obj;
/*< public >*/
- CPUNegativeOffsetState neg;
CPUX86State env;
VMChangeStateEntry *vmsentry;
diff --git a/target/i386/hvf/hvf-cpu.c b/target/i386/hvf/hvf-cpu.c
index 333db59..bb0da39 100644
--- a/target/i386/hvf/hvf-cpu.c
+++ b/target/i386/hvf/hvf-cpu.c
@@ -77,7 +77,7 @@
{
AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
- acc->cpu_realizefn = host_cpu_realizefn;
+ acc->cpu_target_realize = host_cpu_realizefn;
acc->cpu_instance_init = hvf_cpu_instance_init;
}
diff --git a/target/i386/kvm/kvm-cpu.c b/target/i386/kvm/kvm-cpu.c
index 7237378..56c72f3 100644
--- a/target/i386/kvm/kvm-cpu.c
+++ b/target/i386/kvm/kvm-cpu.c
@@ -35,7 +35,7 @@
* x86_cpu_realize():
* -> x86_cpu_expand_features()
* -> cpu_exec_realizefn():
- * -> accel_cpu_realizefn()
+ * -> accel_cpu_common_realize()
* kvm_cpu_realizefn() -> host_cpu_realizefn()
* -> check/update ucode_rev, phys_bits, mwait
*/
@@ -190,7 +190,7 @@
{
AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
- acc->cpu_realizefn = kvm_cpu_realizefn;
+ acc->cpu_target_realize = kvm_cpu_realizefn;
acc->cpu_instance_init = kvm_cpu_instance_init;
}
static const TypeInfo kvm_cpu_accel_type_info = {
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
index af101fc..f6c7f7e 100644
--- a/target/i386/kvm/kvm.c
+++ b/target/i386/kvm/kvm.c
@@ -2699,8 +2699,6 @@
if (enable_cpu_pm) {
int disable_exits = kvm_check_extension(s, KVM_CAP_X86_DISABLE_EXITS);
- int ret;
-
/* Work around for kernel header with a typo. TODO: fix header and drop. */
#if defined(KVM_X86_DISABLE_EXITS_HTL) && !defined(KVM_X86_DISABLE_EXITS_HLT)
#define KVM_X86_DISABLE_EXITS_HLT KVM_X86_DISABLE_EXITS_HTL
@@ -3610,7 +3608,7 @@
if (kvm_enabled() && cpu->enable_pmu &&
(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
uint64_t depth;
- int i, ret;
+ int ret;
/*
* Only migrate Arch LBR states when the host Arch LBR depth
@@ -3643,8 +3641,6 @@
}
if (env->mcg_cap) {
- int i;
-
kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status);
kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl);
if (has_msr_mcg_ext_ctl) {
@@ -4041,7 +4037,6 @@
if (kvm_enabled() && cpu->enable_pmu &&
(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
uint64_t depth;
- int i, ret;
ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth);
if (ret == 1 && depth == ARCH_LBR_NR_ENTRIES) {
diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c
index 066a173..fb76986 100644
--- a/target/i386/nvmm/nvmm-all.c
+++ b/target/i386/nvmm/nvmm-all.c
@@ -78,7 +78,7 @@
static void
nvmm_set_registers(CPUState *cpu)
{
- CPUX86State *env = cpu->env_ptr;
+ CPUX86State *env = cpu_env(cpu);
struct nvmm_machine *mach = get_nvmm_mach();
AccelCPUState *qcpu = cpu->accel;
struct nvmm_vcpu *vcpu = &qcpu->vcpu;
@@ -215,7 +215,7 @@
static void
nvmm_get_registers(CPUState *cpu)
{
- CPUX86State *env = cpu->env_ptr;
+ CPUX86State *env = cpu_env(cpu);
struct nvmm_machine *mach = get_nvmm_mach();
AccelCPUState *qcpu = cpu->accel;
struct nvmm_vcpu *vcpu = &qcpu->vcpu;
@@ -340,7 +340,7 @@
static bool
nvmm_can_take_int(CPUState *cpu)
{
- CPUX86State *env = cpu->env_ptr;
+ CPUX86State *env = cpu_env(cpu);
AccelCPUState *qcpu = cpu->accel;
struct nvmm_vcpu *vcpu = &qcpu->vcpu;
struct nvmm_machine *mach = get_nvmm_mach();
@@ -387,7 +387,7 @@
static void
nvmm_vcpu_pre_run(CPUState *cpu)
{
- CPUX86State *env = cpu->env_ptr;
+ CPUX86State *env = cpu_env(cpu);
struct nvmm_machine *mach = get_nvmm_mach();
AccelCPUState *qcpu = cpu->accel;
struct nvmm_vcpu *vcpu = &qcpu->vcpu;
@@ -473,8 +473,8 @@
nvmm_vcpu_post_run(CPUState *cpu, struct nvmm_vcpu_exit *exit)
{
AccelCPUState *qcpu = cpu->accel;
- CPUX86State *env = cpu->env_ptr;
X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
uint64_t tpr;
env->eflags = exit->exitstate.rflags;
@@ -645,7 +645,7 @@
nvmm_handle_halted(struct nvmm_machine *mach, CPUState *cpu,
struct nvmm_vcpu_exit *exit)
{
- CPUX86State *env = cpu->env_ptr;
+ CPUX86State *env = cpu_env(cpu);
int ret = 0;
qemu_mutex_lock_iothread();
@@ -678,11 +678,11 @@
static int
nvmm_vcpu_loop(CPUState *cpu)
{
- CPUX86State *env = cpu->env_ptr;
struct nvmm_machine *mach = get_nvmm_mach();
AccelCPUState *qcpu = cpu->accel;
struct nvmm_vcpu *vcpu = &qcpu->vcpu;
X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
struct nvmm_vcpu_exit *exit = vcpu->exit;
int ret;
diff --git a/target/i386/tcg/decode-new.c.inc b/target/i386/tcg/decode-new.c.inc
index 0db19cd..7d76f15 100644
--- a/target/i386/tcg/decode-new.c.inc
+++ b/target/i386/tcg/decode-new.c.inc
@@ -1595,7 +1595,7 @@
*/
static void disas_insn_new(DisasContext *s, CPUState *cpu, int b)
{
- CPUX86State *env = cpu->env_ptr;
+ CPUX86State *env = cpu_env(cpu);
bool first = true;
X86DecodedInsn decode;
X86DecodeFunc decode_func = decode_root;
@@ -1822,7 +1822,7 @@
}
if (decode.e.special == X86_SPECIAL_MMX &&
!(s->prefix & (PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA))) {
- gen_helper_enter_mmx(cpu_env);
+ gen_helper_enter_mmx(tcg_env);
}
if (decode.op[0].has_ea || decode.op[1].has_ea || decode.op[2].has_ea) {
diff --git a/target/i386/tcg/emit.c.inc b/target/i386/tcg/emit.c.inc
index 45a3e55..88793ba 100644
--- a/target/i386/tcg/emit.c.inc
+++ b/target/i386/tcg/emit.c.inc
@@ -175,15 +175,15 @@
switch(ot) {
case MO_8:
gen_op_ld_v(s, MO_8, temp, s->A0);
- tcg_gen_st8_tl(temp, cpu_env, dest_ofs);
+ tcg_gen_st8_tl(temp, tcg_env, dest_ofs);
break;
case MO_16:
gen_op_ld_v(s, MO_16, temp, s->A0);
- tcg_gen_st16_tl(temp, cpu_env, dest_ofs);
+ tcg_gen_st16_tl(temp, tcg_env, dest_ofs);
break;
case MO_32:
gen_op_ld_v(s, MO_32, temp, s->A0);
- tcg_gen_st32_tl(temp, cpu_env, dest_ofs);
+ tcg_gen_st32_tl(temp, tcg_env, dest_ofs);
break;
case MO_64:
gen_ldq_env_A0(s, dest_ofs);
@@ -226,14 +226,14 @@
case X86_OP_SKIP:
return;
case X86_OP_SEG:
- tcg_gen_ld32u_tl(v, cpu_env,
+ tcg_gen_ld32u_tl(v, tcg_env,
offsetof(CPUX86State,segs[op->n].selector));
break;
case X86_OP_CR:
- tcg_gen_ld_tl(v, cpu_env, offsetof(CPUX86State, cr[op->n]));
+ tcg_gen_ld_tl(v, tcg_env, offsetof(CPUX86State, cr[op->n]));
break;
case X86_OP_DR:
- tcg_gen_ld_tl(v, cpu_env, offsetof(CPUX86State, dr[op->n]));
+ tcg_gen_ld_tl(v, tcg_env, offsetof(CPUX86State, dr[op->n]));
break;
case X86_OP_INT:
if (op->has_ea) {
@@ -273,7 +273,7 @@
op->v_ptr = tcg_temp_new_ptr();
/* The temporary points to the MMXReg or ZMMReg. */
- tcg_gen_addi_ptr(op->v_ptr, cpu_env, vector_reg_offset(op));
+ tcg_gen_addi_ptr(op->v_ptr, tcg_env, vector_reg_offset(op));
return op->v_ptr;
}
@@ -400,12 +400,12 @@
return;
}
- gen_helper_enter_mmx(cpu_env);
+ gen_helper_enter_mmx(tcg_env);
if (fn == FN_3DNOW_MOVE) {
- tcg_gen_ld_i64(s->tmp1_i64, cpu_env, decode->op[1].offset);
- tcg_gen_st_i64(s->tmp1_i64, cpu_env, decode->op[0].offset);
+ tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[1].offset);
+ tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset);
} else {
- fn(cpu_env, OP_PTR0, OP_PTR1);
+ fn(tcg_env, OP_PTR0, OP_PTR1);
}
}
@@ -426,7 +426,7 @@
gen_illegal_opcode(s);
return;
}
- fn(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2);
+ fn(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
} else {
SSEFunc_0_epp ps, pd, fn;
ps = s->vex_l ? ps_ymm : ps_xmm;
@@ -436,7 +436,7 @@
gen_illegal_opcode(s);
return;
}
- fn(cpu_env, OP_PTR0, OP_PTR2);
+ fn(tcg_env, OP_PTR0, OP_PTR2);
}
}
#define UNARY_FP_SSE(uname, lname) \
@@ -472,7 +472,7 @@
fn = s->prefix & PREFIX_DATA ? pd : ps;
}
if (fn) {
- fn(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2);
+ fn(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
} else {
gen_illegal_opcode(s);
}
@@ -503,7 +503,7 @@
SSEFunc_0_eppppii ymm = s->vex_w ? gen_helper_fma4pd_ymm : gen_helper_fma4ps_ymm; \
SSEFunc_0_eppppii fn = s->vex_l ? ymm : xmm; \
\
- fn(cpu_env, OP_PTR0, ptr0, ptr1, ptr2, \
+ fn(tcg_env, OP_PTR0, ptr0, ptr1, ptr2, \
tcg_constant_i32(even), \
tcg_constant_i32((even) ^ (odd))); \
}
@@ -514,7 +514,7 @@
{ \
SSEFunc_0_eppppi fn = s->vex_w ? gen_helper_fma4sd : gen_helper_fma4ss; \
\
- fn(cpu_env, OP_PTR0, ptr0, ptr1, ptr2, \
+ fn(tcg_env, OP_PTR0, ptr0, ptr1, ptr2, \
tcg_constant_i32(flags)); \
} \
@@ -571,13 +571,13 @@
if (!ss) {
goto illegal_op;
}
- ss(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2);
+ ss(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
} else {
SSEFunc_0_epp fn = s->vex_l ? ps_ymm : ps_xmm;
if (!fn) {
goto illegal_op;
}
- fn(cpu_env, OP_PTR0, OP_PTR2);
+ fn(tcg_env, OP_PTR0, OP_PTR2);
}
return;
@@ -607,7 +607,7 @@
ps = s->vex_l ? ps_ymm : ps_xmm;
pd = s->vex_l ? pd_ymm : pd_xmm;
fn = s->prefix & PREFIX_DATA ? pd : ps;
- fn(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2);
+ fn(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
}
#define HORIZONTAL_FP_SSE(uname, lname) \
static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \
@@ -627,8 +627,8 @@
TCGv_ptr ptr3 = tcg_temp_new_ptr();
/* The format of the fourth input is Lx */
- tcg_gen_addi_ptr(ptr3, cpu_env, ZMM_OFFSET(op3));
- fn(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2, ptr3);
+ tcg_gen_addi_ptr(ptr3, tcg_env, ZMM_OFFSET(op3));
+ fn(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2, ptr3);
}
#define TERNARY_SSE(uname, uvname, lname) \
static void gen_##uvname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \
@@ -650,9 +650,9 @@
{
TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
if (!s->vex_l) {
- xmm(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2, imm);
+ xmm(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2, imm);
} else {
- ymm(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2, imm);
+ ymm(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2, imm);
}
}
@@ -763,11 +763,11 @@
return;
}
if (!(s->prefix & PREFIX_DATA)) {
- mmx(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2);
+ mmx(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
} else if (!s->vex_l) {
- xmm(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2);
+ xmm(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
} else {
- ymm(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2);
+ ymm(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
}
}
@@ -850,9 +850,9 @@
static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \
{ \
if (!s->vex_l) { \
- gen_helper_##lname##_xmm(cpu_env, OP_PTR1, OP_PTR2); \
+ gen_helper_##lname##_xmm(tcg_env, OP_PTR1, OP_PTR2); \
} else { \
- gen_helper_##lname##_ymm(cpu_env, OP_PTR1, OP_PTR2); \
+ gen_helper_##lname##_ymm(tcg_env, OP_PTR1, OP_PTR2); \
} \
set_cc_op(s, CC_OP_EFLAGS); \
}
@@ -864,9 +864,9 @@
SSEFunc_0_epp xmm, SSEFunc_0_epp ymm)
{
if (!s->vex_l) {
- xmm(cpu_env, OP_PTR0, OP_PTR2);
+ xmm(tcg_env, OP_PTR0, OP_PTR2);
} else {
- ymm(cpu_env, OP_PTR0, OP_PTR2);
+ ymm(tcg_env, OP_PTR0, OP_PTR2);
}
}
@@ -937,9 +937,9 @@
{
TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
if (!s->vex_l) {
- xmm(cpu_env, OP_PTR0, OP_PTR1, imm);
+ xmm(tcg_env, OP_PTR0, OP_PTR1, imm);
} else {
- ymm(cpu_env, OP_PTR0, OP_PTR1, imm);
+ ymm(tcg_env, OP_PTR0, OP_PTR1, imm);
}
}
@@ -961,7 +961,7 @@
SSEFunc_0_eppp d = s->vex_l ? d_ymm : d_xmm;
SSEFunc_0_eppp q = s->vex_l ? q_ymm : q_xmm;
SSEFunc_0_eppp fn = s->vex_w ? q : d;
- fn(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2);
+ fn(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
}
/* VEX.W affects whether to operate on 32- or 64-bit elements. */
@@ -989,8 +989,8 @@
TCGv_ptr index = tcg_temp_new_ptr();
/* Pass third input as (index, base, scale) */
- tcg_gen_addi_ptr(index, cpu_env, ZMM_OFFSET(decode->mem.index));
- fn(cpu_env, OP_PTR0, OP_PTR1, index, s->A0, scale);
+ tcg_gen_addi_ptr(index, tcg_env, ZMM_OFFSET(decode->mem.index));
+ fn(tcg_env, OP_PTR0, OP_PTR1, index, s->A0, scale);
/*
* There are two output operands, so zero OP1's high 128 bits
@@ -1175,37 +1175,37 @@
static void gen_CVTPI2Px(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
- gen_helper_enter_mmx(cpu_env);
+ gen_helper_enter_mmx(tcg_env);
if (s->prefix & PREFIX_DATA) {
- gen_helper_cvtpi2pd(cpu_env, OP_PTR0, OP_PTR2);
+ gen_helper_cvtpi2pd(tcg_env, OP_PTR0, OP_PTR2);
} else {
- gen_helper_cvtpi2ps(cpu_env, OP_PTR0, OP_PTR2);
+ gen_helper_cvtpi2ps(tcg_env, OP_PTR0, OP_PTR2);
}
}
static void gen_CVTPx2PI(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
- gen_helper_enter_mmx(cpu_env);
+ gen_helper_enter_mmx(tcg_env);
if (s->prefix & PREFIX_DATA) {
- gen_helper_cvtpd2pi(cpu_env, OP_PTR0, OP_PTR2);
+ gen_helper_cvtpd2pi(tcg_env, OP_PTR0, OP_PTR2);
} else {
- gen_helper_cvtps2pi(cpu_env, OP_PTR0, OP_PTR2);
+ gen_helper_cvtps2pi(tcg_env, OP_PTR0, OP_PTR2);
}
}
static void gen_CVTTPx2PI(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
- gen_helper_enter_mmx(cpu_env);
+ gen_helper_enter_mmx(tcg_env);
if (s->prefix & PREFIX_DATA) {
- gen_helper_cvttpd2pi(cpu_env, OP_PTR0, OP_PTR2);
+ gen_helper_cvttpd2pi(tcg_env, OP_PTR0, OP_PTR2);
} else {
- gen_helper_cvttps2pi(cpu_env, OP_PTR0, OP_PTR2);
+ gen_helper_cvttps2pi(tcg_env, OP_PTR0, OP_PTR2);
}
}
static void gen_EMMS(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
- gen_helper_emms(cpu_env);
+ gen_helper_emms(tcg_env);
}
static void gen_EXTRQ_i(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
@@ -1213,12 +1213,12 @@
TCGv_i32 length = tcg_constant_i32(decode->immediate & 63);
TCGv_i32 index = tcg_constant_i32((decode->immediate >> 8) & 63);
- gen_helper_extrq_i(cpu_env, OP_PTR0, index, length);
+ gen_helper_extrq_i(tcg_env, OP_PTR0, index, length);
}
static void gen_EXTRQ_r(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
- gen_helper_extrq_r(cpu_env, OP_PTR0, OP_PTR2);
+ gen_helper_extrq_r(tcg_env, OP_PTR0, OP_PTR2);
}
static void gen_INSERTQ_i(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
@@ -1226,12 +1226,12 @@
TCGv_i32 length = tcg_constant_i32(decode->immediate & 63);
TCGv_i32 index = tcg_constant_i32((decode->immediate >> 8) & 63);
- gen_helper_insertq_i(cpu_env, OP_PTR0, OP_PTR1, index, length);
+ gen_helper_insertq_i(tcg_env, OP_PTR0, OP_PTR1, index, length);
}
static void gen_INSERTQ_r(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
- gen_helper_insertq_r(cpu_env, OP_PTR0, OP_PTR2);
+ gen_helper_insertq_r(tcg_env, OP_PTR0, OP_PTR2);
}
static void gen_LDMXCSR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
@@ -1241,7 +1241,7 @@
return;
}
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T1);
- gen_helper_ldmxcsr(cpu_env, s->tmp2_i32);
+ gen_helper_ldmxcsr(tcg_env, s->tmp2_i32);
}
static void gen_MASKMOV(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
@@ -1251,9 +1251,9 @@
gen_add_A0_ds_seg(s);
if (s->prefix & PREFIX_DATA) {
- gen_helper_maskmov_xmm(cpu_env, OP_PTR1, OP_PTR2, s->A0);
+ gen_helper_maskmov_xmm(tcg_env, OP_PTR1, OP_PTR2, s->A0);
} else {
- gen_helper_maskmov_mmx(cpu_env, OP_PTR1, OP_PTR2, s->A0);
+ gen_helper_maskmov_mmx(tcg_env, OP_PTR1, OP_PTR2, s->A0);
}
}
@@ -1276,11 +1276,11 @@
switch (ot) {
case MO_32:
#ifdef TARGET_X86_64
- tcg_gen_ld32u_tl(s->T0, cpu_env, decode->op[2].offset);
+ tcg_gen_ld32u_tl(s->T0, tcg_env, decode->op[2].offset);
break;
case MO_64:
#endif
- tcg_gen_ld_tl(s->T0, cpu_env, decode->op[2].offset);
+ tcg_gen_ld_tl(s->T0, tcg_env, decode->op[2].offset);
break;
default:
abort();
@@ -1298,11 +1298,11 @@
switch (ot) {
case MO_32:
#ifdef TARGET_X86_64
- tcg_gen_st32_tl(s->T1, cpu_env, lo_ofs);
+ tcg_gen_st32_tl(s->T1, tcg_env, lo_ofs);
break;
case MO_64:
#endif
- tcg_gen_st_tl(s->T1, cpu_env, lo_ofs);
+ tcg_gen_st_tl(s->T1, tcg_env, lo_ofs);
break;
default:
g_assert_not_reached();
@@ -1320,7 +1320,7 @@
ps = s->vex_l ? gen_helper_movmskps_ymm : gen_helper_movmskps_xmm;
pd = s->vex_l ? gen_helper_movmskpd_ymm : gen_helper_movmskpd_xmm;
fn = s->prefix & PREFIX_DATA ? pd : ps;
- fn(s->tmp2_i32, cpu_env, OP_PTR2);
+ fn(s->tmp2_i32, tcg_env, OP_PTR2);
tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
}
@@ -1329,7 +1329,7 @@
int vec_len = vector_len(s, decode);
int lo_ofs = vector_elem_offset(&decode->op[0], MO_64, 0);
- tcg_gen_ld_i64(s->tmp1_i64, cpu_env, decode->op[2].offset);
+ tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[2].offset);
if (decode->op[0].has_ea) {
tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
} else {
@@ -1342,13 +1342,13 @@
* it disqualifies using oprsz < maxsz to emulate VEX128.
*/
tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0);
- tcg_gen_st_i64(s->tmp1_i64, cpu_env, lo_ofs);
+ tcg_gen_st_i64(s->tmp1_i64, tcg_env, lo_ofs);
}
}
static void gen_MOVq_dq(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
- gen_helper_enter_mmx(cpu_env);
+ gen_helper_enter_mmx(tcg_env);
/* Otherwise the same as any other movq. */
return gen_MOVQ(s, env, decode);
}
@@ -1380,11 +1380,11 @@
{
TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
if (!(s->prefix & PREFIX_DATA)) {
- gen_helper_palignr_mmx(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2, imm);
+ gen_helper_palignr_mmx(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2, imm);
} else if (!s->vex_l) {
- gen_helper_palignr_xmm(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2, imm);
+ gen_helper_palignr_xmm(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2, imm);
} else {
- gen_helper_palignr_ymm(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2, imm);
+ gen_helper_palignr_ymm(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2, imm);
}
}
@@ -1401,14 +1401,14 @@
static void gen_PCMPESTRI(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
- gen_helper_pcmpestri_xmm(cpu_env, OP_PTR1, OP_PTR2, imm);
+ gen_helper_pcmpestri_xmm(tcg_env, OP_PTR1, OP_PTR2, imm);
set_cc_op(s, CC_OP_EFLAGS);
}
static void gen_PCMPESTRM(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
- gen_helper_pcmpestrm_xmm(cpu_env, OP_PTR1, OP_PTR2, imm);
+ gen_helper_pcmpestrm_xmm(tcg_env, OP_PTR1, OP_PTR2, imm);
set_cc_op(s, CC_OP_EFLAGS);
if ((s->prefix & PREFIX_VEX) && !s->vex_l) {
tcg_gen_gvec_dup_imm(MO_64, offsetof(CPUX86State, xmm_regs[0].ZMM_X(1)),
@@ -1419,14 +1419,14 @@
static void gen_PCMPISTRI(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
- gen_helper_pcmpistri_xmm(cpu_env, OP_PTR1, OP_PTR2, imm);
+ gen_helper_pcmpistri_xmm(tcg_env, OP_PTR1, OP_PTR2, imm);
set_cc_op(s, CC_OP_EFLAGS);
}
static void gen_PCMPISTRM(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
- gen_helper_pcmpistrm_xmm(cpu_env, OP_PTR1, OP_PTR2, imm);
+ gen_helper_pcmpistrm_xmm(tcg_env, OP_PTR1, OP_PTR2, imm);
set_cc_op(s, CC_OP_EFLAGS);
if ((s->prefix & PREFIX_VEX) && !s->vex_l) {
tcg_gen_gvec_dup_imm(MO_64, offsetof(CPUX86State, xmm_regs[0].ZMM_X(1)),
@@ -1460,18 +1460,18 @@
switch (ot) {
case MO_8:
- tcg_gen_ld8u_tl(s->T0, cpu_env, vector_elem_offset(&decode->op[1], ot, val));
+ tcg_gen_ld8u_tl(s->T0, tcg_env, vector_elem_offset(&decode->op[1], ot, val));
break;
case MO_16:
- tcg_gen_ld16u_tl(s->T0, cpu_env, vector_elem_offset(&decode->op[1], ot, val));
+ tcg_gen_ld16u_tl(s->T0, tcg_env, vector_elem_offset(&decode->op[1], ot, val));
break;
case MO_32:
#ifdef TARGET_X86_64
- tcg_gen_ld32u_tl(s->T0, cpu_env, vector_elem_offset(&decode->op[1], ot, val));
+ tcg_gen_ld32u_tl(s->T0, tcg_env, vector_elem_offset(&decode->op[1], ot, val));
break;
case MO_64:
#endif
- tcg_gen_ld_tl(s->T0, cpu_env, vector_elem_offset(&decode->op[1], ot, val));
+ tcg_gen_ld_tl(s->T0, tcg_env, vector_elem_offset(&decode->op[1], ot, val));
break;
default:
abort();
@@ -1507,18 +1507,18 @@
switch (ot) {
case MO_8:
- tcg_gen_st8_tl(s->T1, cpu_env, vector_elem_offset(&decode->op[0], ot, val));
+ tcg_gen_st8_tl(s->T1, tcg_env, vector_elem_offset(&decode->op[0], ot, val));
break;
case MO_16:
- tcg_gen_st16_tl(s->T1, cpu_env, vector_elem_offset(&decode->op[0], ot, val));
+ tcg_gen_st16_tl(s->T1, tcg_env, vector_elem_offset(&decode->op[0], ot, val));
break;
case MO_32:
#ifdef TARGET_X86_64
- tcg_gen_st32_tl(s->T1, cpu_env, vector_elem_offset(&decode->op[0], ot, val));
+ tcg_gen_st32_tl(s->T1, tcg_env, vector_elem_offset(&decode->op[0], ot, val));
break;
case MO_64:
#endif
- tcg_gen_st_tl(s->T1, cpu_env, vector_elem_offset(&decode->op[0], ot, val));
+ tcg_gen_st_tl(s->T1, tcg_env, vector_elem_offset(&decode->op[0], ot, val));
break;
default:
abort();
@@ -1599,7 +1599,7 @@
tcg_gen_gvec_2(offsetof(CPUX86State, xmm_t0) + xmm_offset(ot), decode->op[2].offset,
vec_len, vec_len, &g);
- tcg_gen_ld8u_tl(s->T0, cpu_env, offsetof(CPUX86State, xmm_t0.ZMM_B(vec_len - 1)));
+ tcg_gen_ld8u_tl(s->T0, tcg_env, offsetof(CPUX86State, xmm_t0.ZMM_B(vec_len - 1)));
while (vec_len > 8) {
vec_len -= 8;
if (TCG_TARGET_HAS_extract2_tl) {
@@ -1609,9 +1609,9 @@
* loading the whole word, the shift left is avoided.
*/
#ifdef TARGET_X86_64
- tcg_gen_ld_tl(t, cpu_env, offsetof(CPUX86State, xmm_t0.ZMM_Q((vec_len - 1) / 8)));
+ tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, xmm_t0.ZMM_Q((vec_len - 1) / 8)));
#else
- tcg_gen_ld_tl(t, cpu_env, offsetof(CPUX86State, xmm_t0.ZMM_L((vec_len - 1) / 4)));
+ tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, xmm_t0.ZMM_L((vec_len - 1) / 4)));
#endif
tcg_gen_extract2_tl(s->T0, t, s->T0, TARGET_LONG_BITS - 8);
@@ -1621,7 +1621,7 @@
* those bits are known to be zero after ld8u, this becomes a shift+or
* if deposit is not available.
*/
- tcg_gen_ld8u_tl(t, cpu_env, offsetof(CPUX86State, xmm_t0.ZMM_B(vec_len - 1)));
+ tcg_gen_ld8u_tl(t, tcg_env, offsetof(CPUX86State, xmm_t0.ZMM_B(vec_len - 1)));
tcg_gen_deposit_tl(s->T0, t, s->T0, 8, TARGET_LONG_BITS - 8);
}
}
@@ -1744,8 +1744,8 @@
tcg_gen_gvec_dup_imm(MO_64, offsetof(CPUX86State, xmm_t0) + xmm_offset(ot),
vec_len, vec_len, 0);
- tcg_gen_addi_ptr(ptr, cpu_env, offsetof(CPUX86State, xmm_t0));
- tcg_gen_st_i32(imm_v, cpu_env, offsetof(CPUX86State, xmm_t0.ZMM_L(0)));
+ tcg_gen_addi_ptr(ptr, tcg_env, offsetof(CPUX86State, xmm_t0));
+ tcg_gen_st_i32(imm_v, tcg_env, offsetof(CPUX86State, xmm_t0.ZMM_L(0)));
return ptr;
}
@@ -1755,9 +1755,9 @@
TCGv_ptr imm_vec = make_imm8u_xmm_vec(decode->immediate, vec_len);
if (s->vex_l) {
- gen_helper_psrldq_ymm(cpu_env, OP_PTR0, OP_PTR1, imm_vec);
+ gen_helper_psrldq_ymm(tcg_env, OP_PTR0, OP_PTR1, imm_vec);
} else {
- gen_helper_psrldq_xmm(cpu_env, OP_PTR0, OP_PTR1, imm_vec);
+ gen_helper_psrldq_xmm(tcg_env, OP_PTR0, OP_PTR1, imm_vec);
}
}
@@ -1767,9 +1767,9 @@
TCGv_ptr imm_vec = make_imm8u_xmm_vec(decode->immediate, vec_len);
if (s->vex_l) {
- gen_helper_pslldq_ymm(cpu_env, OP_PTR0, OP_PTR1, imm_vec);
+ gen_helper_pslldq_ymm(tcg_env, OP_PTR0, OP_PTR1, imm_vec);
} else {
- gen_helper_pslldq_xmm(cpu_env, OP_PTR0, OP_PTR1, imm_vec);
+ gen_helper_pslldq_xmm(tcg_env, OP_PTR0, OP_PTR1, imm_vec);
}
}
@@ -1827,7 +1827,7 @@
{
TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
assert(!s->vex_l);
- gen_helper_aeskeygenassist_xmm(cpu_env, OP_PTR0, OP_PTR1, imm);
+ gen_helper_aeskeygenassist_xmm(tcg_env, OP_PTR0, OP_PTR1, imm);
}
static void gen_STMXCSR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
@@ -1836,14 +1836,14 @@
gen_illegal_opcode(s);
return;
}
- gen_helper_update_mxcsr(cpu_env);
- tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, mxcsr));
+ gen_helper_update_mxcsr(tcg_env);
+ tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, mxcsr));
}
static void gen_VAESIMC(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
assert(!s->vex_l);
- gen_helper_aesimc_xmm(cpu_env, OP_PTR0, OP_PTR2);
+ gen_helper_aesimc_xmm(tcg_env, OP_PTR0, OP_PTR2);
}
/*
@@ -1903,32 +1903,32 @@
s->prefix & PREFIX_REPNZ ? 3 /* sd */ :
!!(s->prefix & PREFIX_DATA) /* pd */ + (s->vex_l << 2);
- gen_helper_cmp_funcs[index][b](cpu_env, OP_PTR0, OP_PTR1, OP_PTR2);
+ gen_helper_cmp_funcs[index][b](tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
}
static void gen_VCOMI(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
SSEFunc_0_epp fn;
fn = s->prefix & PREFIX_DATA ? gen_helper_comisd : gen_helper_comiss;
- fn(cpu_env, OP_PTR1, OP_PTR2);
+ fn(tcg_env, OP_PTR1, OP_PTR2);
set_cc_op(s, CC_OP_EFLAGS);
}
static void gen_VCVTPD2PS(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
if (s->vex_l) {
- gen_helper_cvtpd2ps_ymm(cpu_env, OP_PTR0, OP_PTR2);
+ gen_helper_cvtpd2ps_ymm(tcg_env, OP_PTR0, OP_PTR2);
} else {
- gen_helper_cvtpd2ps_xmm(cpu_env, OP_PTR0, OP_PTR2);
+ gen_helper_cvtpd2ps_xmm(tcg_env, OP_PTR0, OP_PTR2);
}
}
static void gen_VCVTPS2PD(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
if (s->vex_l) {
- gen_helper_cvtps2pd_ymm(cpu_env, OP_PTR0, OP_PTR2);
+ gen_helper_cvtps2pd_ymm(tcg_env, OP_PTR0, OP_PTR2);
} else {
- gen_helper_cvtps2pd_xmm(cpu_env, OP_PTR0, OP_PTR2);
+ gen_helper_cvtps2pd_xmm(tcg_env, OP_PTR0, OP_PTR2);
}
}
@@ -1948,12 +1948,12 @@
static void gen_VCVTSD2SS(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
- gen_helper_cvtsd2ss(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2);
+ gen_helper_cvtsd2ss(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
}
static void gen_VCVTSS2SD(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
- gen_helper_cvtss2sd(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2);
+ gen_helper_cvtss2sd(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
}
static void gen_VCVTSI2Sx(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
@@ -1967,9 +1967,9 @@
MemOp ot = decode->op[2].ot;
if (ot == MO_64) {
if (s->prefix & PREFIX_REPNZ) {
- gen_helper_cvtsq2sd(cpu_env, OP_PTR0, s->T1);
+ gen_helper_cvtsq2sd(tcg_env, OP_PTR0, s->T1);
} else {
- gen_helper_cvtsq2ss(cpu_env, OP_PTR0, s->T1);
+ gen_helper_cvtsq2ss(tcg_env, OP_PTR0, s->T1);
}
return;
}
@@ -1980,9 +1980,9 @@
#endif
if (s->prefix & PREFIX_REPNZ) {
- gen_helper_cvtsi2sd(cpu_env, OP_PTR0, in);
+ gen_helper_cvtsi2sd(tcg_env, OP_PTR0, in);
} else {
- gen_helper_cvtsi2ss(cpu_env, OP_PTR0, in);
+ gen_helper_cvtsi2ss(tcg_env, OP_PTR0, in);
}
}
@@ -1996,9 +1996,9 @@
MemOp ot = decode->op[0].ot;
if (ot == MO_64) {
if (s->prefix & PREFIX_REPNZ) {
- sd2sq(s->T0, cpu_env, OP_PTR2);
+ sd2sq(s->T0, tcg_env, OP_PTR2);
} else {
- ss2sq(s->T0, cpu_env, OP_PTR2);
+ ss2sq(s->T0, tcg_env, OP_PTR2);
}
return;
}
@@ -2008,9 +2008,9 @@
out = s->T0;
#endif
if (s->prefix & PREFIX_REPNZ) {
- sd2si(out, cpu_env, OP_PTR2);
+ sd2si(out, tcg_env, OP_PTR2);
} else {
- ss2si(out, cpu_env, OP_PTR2);
+ ss2si(out, tcg_env, OP_PTR2);
}
#ifdef TARGET_X86_64
tcg_gen_extu_i32_tl(s->T0, out);
@@ -2072,7 +2072,7 @@
}
if (new_mask != (val & 15)) {
- tcg_gen_st_i32(s->tmp2_i32, cpu_env,
+ tcg_gen_st_i32(s->tmp2_i32, tcg_env,
vector_elem_offset(&decode->op[0], MO_32, dest_word));
}
@@ -2081,7 +2081,7 @@
int i;
for (i = 0; i < 4; i++) {
if ((val >> i) & 1) {
- tcg_gen_st_i32(zero, cpu_env,
+ tcg_gen_st_i32(zero, tcg_env,
vector_elem_offset(&decode->op[0], MO_32, i));
}
}
@@ -2091,7 +2091,7 @@
static void gen_VINSERTPS_r(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
int val = decode->immediate;
- tcg_gen_ld_i32(s->tmp2_i32, cpu_env,
+ tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
vector_elem_offset(&decode->op[2], MO_32, (val >> 6) & 3));
gen_vinsertps(s, env, decode);
}
@@ -2117,9 +2117,9 @@
SSEFunc_0_eppt xmm, SSEFunc_0_eppt ymm)
{
if (!s->vex_l) {
- xmm(cpu_env, OP_PTR2, OP_PTR1, s->A0);
+ xmm(tcg_env, OP_PTR2, OP_PTR1, s->A0);
} else {
- ymm(cpu_env, OP_PTR2, OP_PTR1, s->A0);
+ ymm(tcg_env, OP_PTR2, OP_PTR1, s->A0);
}
}
@@ -2137,8 +2137,8 @@
{
gen_ldq_env_A0(s, decode->op[0].offset + offsetof(XMMReg, XMM_Q(1)));
if (decode->op[0].offset != decode->op[1].offset) {
- tcg_gen_ld_i64(s->tmp1_i64, cpu_env, decode->op[1].offset + offsetof(XMMReg, XMM_Q(0)));
- tcg_gen_st_i64(s->tmp1_i64, cpu_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0)));
+ tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[1].offset + offsetof(XMMReg, XMM_Q(0)));
+ tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0)));
}
}
@@ -2150,32 +2150,32 @@
static void gen_VMOVHPx(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
if (decode->op[0].offset != decode->op[2].offset) {
- tcg_gen_ld_i64(s->tmp1_i64, cpu_env, decode->op[2].offset + offsetof(XMMReg, XMM_Q(1)));
- tcg_gen_st_i64(s->tmp1_i64, cpu_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(1)));
+ tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[2].offset + offsetof(XMMReg, XMM_Q(1)));
+ tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(1)));
}
if (decode->op[0].offset != decode->op[1].offset) {
- tcg_gen_ld_i64(s->tmp1_i64, cpu_env, decode->op[1].offset + offsetof(XMMReg, XMM_Q(0)));
- tcg_gen_st_i64(s->tmp1_i64, cpu_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0)));
+ tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[1].offset + offsetof(XMMReg, XMM_Q(0)));
+ tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0)));
}
}
static void gen_VMOVHLPS(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
- tcg_gen_ld_i64(s->tmp1_i64, cpu_env, decode->op[2].offset + offsetof(XMMReg, XMM_Q(1)));
- tcg_gen_st_i64(s->tmp1_i64, cpu_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0)));
+ tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[2].offset + offsetof(XMMReg, XMM_Q(1)));
+ tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0)));
if (decode->op[0].offset != decode->op[1].offset) {
- tcg_gen_ld_i64(s->tmp1_i64, cpu_env, decode->op[1].offset + offsetof(XMMReg, XMM_Q(1)));
- tcg_gen_st_i64(s->tmp1_i64, cpu_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(1)));
+ tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[1].offset + offsetof(XMMReg, XMM_Q(1)));
+ tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(1)));
}
}
static void gen_VMOVLHPS(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
- tcg_gen_ld_i64(s->tmp1_i64, cpu_env, decode->op[2].offset);
- tcg_gen_st_i64(s->tmp1_i64, cpu_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(1)));
+ tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[2].offset);
+ tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(1)));
if (decode->op[0].offset != decode->op[1].offset) {
- tcg_gen_ld_i64(s->tmp1_i64, cpu_env, decode->op[1].offset + offsetof(XMMReg, XMM_Q(0)));
- tcg_gen_st_i64(s->tmp1_i64, cpu_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0)));
+ tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[1].offset + offsetof(XMMReg, XMM_Q(0)));
+ tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0)));
}
}
@@ -2188,9 +2188,9 @@
{
int vec_len = vector_len(s, decode);
- tcg_gen_ld_i64(s->tmp1_i64, cpu_env, decode->op[2].offset + offsetof(XMMReg, XMM_Q(0)));
+ tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[2].offset + offsetof(XMMReg, XMM_Q(0)));
tcg_gen_gvec_mov(MO_64, decode->op[0].offset, decode->op[1].offset, vec_len, vec_len);
- tcg_gen_st_i64(s->tmp1_i64, cpu_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0)));
+ tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0)));
}
static void gen_VMOVLPx_ld(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
@@ -2266,21 +2266,21 @@
static void gen_VPHMINPOSUW(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
assert(!s->vex_l);
- gen_helper_phminposuw_xmm(cpu_env, OP_PTR0, OP_PTR2);
+ gen_helper_phminposuw_xmm(tcg_env, OP_PTR0, OP_PTR2);
}
static void gen_VROUNDSD(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
assert(!s->vex_l);
- gen_helper_roundsd_xmm(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2, imm);
+ gen_helper_roundsd_xmm(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2, imm);
}
static void gen_VROUNDSS(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
assert(!s->vex_l);
- gen_helper_roundss_xmm(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2, imm);
+ gen_helper_roundss_xmm(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2, imm);
}
static void gen_VSHUF(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
@@ -2297,7 +2297,7 @@
{
SSEFunc_0_epp fn;
fn = s->prefix & PREFIX_DATA ? gen_helper_ucomisd : gen_helper_ucomiss;
- fn(cpu_env, OP_PTR1, OP_PTR2);
+ fn(tcg_env, OP_PTR1, OP_PTR2);
set_cc_op(s, CC_OP_EFLAGS);
}
@@ -2305,7 +2305,7 @@
{
TCGv_ptr ptr = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(ptr, cpu_env, offsetof(CPUX86State, xmm_regs));
+ tcg_gen_addi_ptr(ptr, tcg_env, offsetof(CPUX86State, xmm_regs));
gen_helper_memset(ptr, ptr, tcg_constant_i32(0),
tcg_constant_ptr(CPU_NB_REGS * sizeof(ZMMReg)));
}
diff --git a/target/i386/tcg/seg_helper.c b/target/i386/tcg/seg_helper.c
index e8d19c6..2b92aee 100644
--- a/target/i386/tcg/seg_helper.c
+++ b/target/i386/tcg/seg_helper.c
@@ -226,14 +226,29 @@
}
}
+static void tss_set_busy(CPUX86State *env, int tss_selector, bool value,
+ uintptr_t retaddr)
+{
+ target_ulong ptr = env->gdt.base + (env->tr.selector & ~7);
+ uint32_t e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
+
+ if (value) {
+ e2 |= DESC_TSS_BUSY_MASK;
+ } else {
+ e2 &= ~DESC_TSS_BUSY_MASK;
+ }
+
+ cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
+}
+
#define SWITCH_TSS_JMP 0
#define SWITCH_TSS_IRET 1
#define SWITCH_TSS_CALL 2
-/* XXX: restore CPU state in registers (PowerPC case) */
-static void switch_tss_ra(CPUX86State *env, int tss_selector,
- uint32_t e1, uint32_t e2, int source,
- uint32_t next_eip, uintptr_t retaddr)
+/* return 0 if switching to a 16-bit selector */
+static int switch_tss_ra(CPUX86State *env, int tss_selector,
+ uint32_t e1, uint32_t e2, int source,
+ uint32_t next_eip, uintptr_t retaddr)
{
int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
target_ulong tss_base;
@@ -341,13 +356,7 @@
/* clear busy bit (it is restartable) */
if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
- target_ulong ptr;
- uint32_t e2;
-
- ptr = env->gdt.base + (env->tr.selector & ~7);
- e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
- e2 &= ~DESC_TSS_BUSY_MASK;
- cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
+ tss_set_busy(env, env->tr.selector, 0, retaddr);
}
old_eflags = cpu_compute_eflags(env);
if (source == SWITCH_TSS_IRET) {
@@ -399,13 +408,7 @@
/* set busy bit */
if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
- target_ulong ptr;
- uint32_t e2;
-
- ptr = env->gdt.base + (tss_selector & ~7);
- e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
- e2 |= DESC_TSS_BUSY_MASK;
- cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
+ tss_set_busy(env, tss_selector, 1, retaddr);
}
/* set the new CPU state */
@@ -499,13 +502,14 @@
cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
}
#endif
+ return type >> 3;
}
-static void switch_tss(CPUX86State *env, int tss_selector,
- uint32_t e1, uint32_t e2, int source,
- uint32_t next_eip)
+static int switch_tss(CPUX86State *env, int tss_selector,
+ uint32_t e1, uint32_t e2, int source,
+ uint32_t next_eip)
{
- switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
+ return switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
}
static inline unsigned int get_sp_mask(unsigned int e2)
@@ -647,14 +651,11 @@
if (!(e2 & DESC_P_MASK)) {
raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
}
- switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
+ shift = switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
if (has_error_code) {
- int type;
uint32_t mask;
/* push the error code */
- type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
- shift = type >> 3;
if (env->segs[R_SS].flags & DESC_B_MASK) {
mask = 0xffffffff;
} else {
diff --git a/target/i386/tcg/sysemu/excp_helper.c b/target/i386/tcg/sysemu/excp_helper.c
index 226689a..5b86f43 100644
--- a/target/i386/tcg/sysemu/excp_helper.c
+++ b/target/i386/tcg/sysemu/excp_helper.c
@@ -597,7 +597,7 @@
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
- CPUX86State *env = cs->env_ptr;
+ CPUX86State *env = cpu_env(cs);
TranslateResult out;
TranslateFault err;
diff --git a/target/i386/tcg/sysemu/svm_helper.c b/target/i386/tcg/sysemu/svm_helper.c
index 2d27731..32ff0db 100644
--- a/target/i386/tcg/sysemu/svm_helper.c
+++ b/target/i386/tcg/sysemu/svm_helper.c
@@ -387,8 +387,6 @@
env->hflags2 |= HF2_GIF_MASK;
if (ctl_has_irq(env)) {
- CPUState *cs = env_cpu(env);
-
cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
}
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
index b942c30..2c6a12c 100644
--- a/target/i386/tcg/tcg-cpu.c
+++ b/target/i386/tcg/tcg-cpu.c
@@ -51,7 +51,7 @@
{
/* The instruction pointer is always up to date with CF_PCREL. */
if (!(tb_cflags(tb) & CF_PCREL)) {
- CPUX86State *env = cs->env_ptr;
+ CPUX86State *env = cpu_env(cs);
env->eip = tb->pc - tb->cs_base;
}
}
@@ -163,7 +163,7 @@
AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
#ifndef CONFIG_USER_ONLY
- acc->cpu_realizefn = tcg_cpu_realizefn;
+ acc->cpu_target_realize = tcg_cpu_realizefn;
#endif /* CONFIG_USER_ONLY */
acc->cpu_class_init = tcg_cpu_class_init;
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
index c98e42f..4f12873 100644
--- a/target/i386/tcg/translate.c
+++ b/target/i386/tcg/translate.c
@@ -695,7 +695,7 @@
static inline void gen_op_movl_T0_Dshift(DisasContext *s, MemOp ot)
{
- tcg_gen_ld32s_tl(s->T0, cpu_env, offsetof(CPUX86State, df));
+ tcg_gen_ld32s_tl(s->T0, tcg_env, offsetof(CPUX86State, df));
tcg_gen_shli_tl(s->T0, s->T0, ot);
};
@@ -761,13 +761,13 @@
{
switch (ot) {
case MO_8:
- gen_helper_inb(v, cpu_env, n);
+ gen_helper_inb(v, tcg_env, n);
break;
case MO_16:
- gen_helper_inw(v, cpu_env, n);
+ gen_helper_inw(v, tcg_env, n);
break;
case MO_32:
- gen_helper_inl(v, cpu_env, n);
+ gen_helper_inl(v, tcg_env, n);
break;
default:
g_assert_not_reached();
@@ -778,13 +778,13 @@
{
switch (ot) {
case MO_8:
- gen_helper_outb(cpu_env, v, n);
+ gen_helper_outb(tcg_env, v, n);
break;
case MO_16:
- gen_helper_outw(cpu_env, v, n);
+ gen_helper_outw(tcg_env, v, n);
break;
case MO_32:
- gen_helper_outl(cpu_env, v, n);
+ gen_helper_outl(tcg_env, v, n);
break;
default:
g_assert_not_reached();
@@ -807,7 +807,7 @@
return false;
#else
if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) {
- gen_helper_check_io(cpu_env, port, tcg_constant_i32(1 << ot));
+ gen_helper_check_io(tcg_env, port, tcg_constant_i32(1 << ot));
}
if (GUEST(s)) {
gen_update_cc_op(s);
@@ -816,7 +816,7 @@
svm_flags |= SVM_IOIO_REP_MASK;
}
svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot);
- gen_helper_svm_check_io(cpu_env, port,
+ gen_helper_svm_check_io(tcg_env, port,
tcg_constant_i32(svm_flags),
cur_insn_len_i32(s));
}
@@ -1298,7 +1298,7 @@
#else
TCGv_i32 t_size = tcg_constant_i32(1 << ot);
TCGv t_next = eip_next_tl(s);
- gen_helper_bpt_io(cpu_env, t_port, t_size, t_next);
+ gen_helper_bpt_io(tcg_env, t_port, t_size, t_next);
#endif /* CONFIG_USER_ONLY */
}
}
@@ -1388,28 +1388,28 @@
{
switch (op) {
case 0:
- gen_helper_fadd_ST0_FT0(cpu_env);
+ gen_helper_fadd_ST0_FT0(tcg_env);
break;
case 1:
- gen_helper_fmul_ST0_FT0(cpu_env);
+ gen_helper_fmul_ST0_FT0(tcg_env);
break;
case 2:
- gen_helper_fcom_ST0_FT0(cpu_env);
+ gen_helper_fcom_ST0_FT0(tcg_env);
break;
case 3:
- gen_helper_fcom_ST0_FT0(cpu_env);
+ gen_helper_fcom_ST0_FT0(tcg_env);
break;
case 4:
- gen_helper_fsub_ST0_FT0(cpu_env);
+ gen_helper_fsub_ST0_FT0(tcg_env);
break;
case 5:
- gen_helper_fsubr_ST0_FT0(cpu_env);
+ gen_helper_fsubr_ST0_FT0(tcg_env);
break;
case 6:
- gen_helper_fdiv_ST0_FT0(cpu_env);
+ gen_helper_fdiv_ST0_FT0(tcg_env);
break;
case 7:
- gen_helper_fdivr_ST0_FT0(cpu_env);
+ gen_helper_fdivr_ST0_FT0(tcg_env);
break;
}
}
@@ -1420,22 +1420,22 @@
TCGv_i32 tmp = tcg_constant_i32(opreg);
switch (op) {
case 0:
- gen_helper_fadd_STN_ST0(cpu_env, tmp);
+ gen_helper_fadd_STN_ST0(tcg_env, tmp);
break;
case 1:
- gen_helper_fmul_STN_ST0(cpu_env, tmp);
+ gen_helper_fmul_STN_ST0(tcg_env, tmp);
break;
case 4:
- gen_helper_fsubr_STN_ST0(cpu_env, tmp);
+ gen_helper_fsubr_STN_ST0(tcg_env, tmp);
break;
case 5:
- gen_helper_fsub_STN_ST0(cpu_env, tmp);
+ gen_helper_fsub_STN_ST0(tcg_env, tmp);
break;
case 6:
- gen_helper_fdivr_STN_ST0(cpu_env, tmp);
+ gen_helper_fdivr_STN_ST0(tcg_env, tmp);
break;
case 7:
- gen_helper_fdiv_STN_ST0(cpu_env, tmp);
+ gen_helper_fdiv_STN_ST0(tcg_env, tmp);
break;
}
}
@@ -1444,7 +1444,7 @@
{
gen_update_cc_op(s);
gen_update_eip_cur(s);
- gen_helper_raise_exception(cpu_env, tcg_constant_i32(trapno));
+ gen_helper_raise_exception(tcg_env, tcg_constant_i32(trapno));
s->base.is_jmp = DISAS_NORETURN;
}
@@ -1923,17 +1923,17 @@
if (is_right) {
switch (ot) {
case MO_8:
- gen_helper_rcrb(s->T0, cpu_env, s->T0, s->T1);
+ gen_helper_rcrb(s->T0, tcg_env, s->T0, s->T1);
break;
case MO_16:
- gen_helper_rcrw(s->T0, cpu_env, s->T0, s->T1);
+ gen_helper_rcrw(s->T0, tcg_env, s->T0, s->T1);
break;
case MO_32:
- gen_helper_rcrl(s->T0, cpu_env, s->T0, s->T1);
+ gen_helper_rcrl(s->T0, tcg_env, s->T0, s->T1);
break;
#ifdef TARGET_X86_64
case MO_64:
- gen_helper_rcrq(s->T0, cpu_env, s->T0, s->T1);
+ gen_helper_rcrq(s->T0, tcg_env, s->T0, s->T1);
break;
#endif
default:
@@ -1942,17 +1942,17 @@
} else {
switch (ot) {
case MO_8:
- gen_helper_rclb(s->T0, cpu_env, s->T0, s->T1);
+ gen_helper_rclb(s->T0, tcg_env, s->T0, s->T1);
break;
case MO_16:
- gen_helper_rclw(s->T0, cpu_env, s->T0, s->T1);
+ gen_helper_rclw(s->T0, tcg_env, s->T0, s->T1);
break;
case MO_32:
- gen_helper_rcll(s->T0, cpu_env, s->T0, s->T1);
+ gen_helper_rcll(s->T0, tcg_env, s->T0, s->T1);
break;
#ifdef TARGET_X86_64
case MO_64:
- gen_helper_rclq(s->T0, cpu_env, s->T0, s->T1);
+ gen_helper_rclq(s->T0, tcg_env, s->T0, s->T1);
break;
#endif
default:
@@ -2354,7 +2354,7 @@
}
tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv);
tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64);
- gen_helper_bndck(cpu_env, s->tmp2_i32);
+ gen_helper_bndck(tcg_env, s->tmp2_i32);
}
/* used for LEA and MOV AX, mem */
@@ -2512,14 +2512,14 @@
static inline void gen_op_movl_T0_seg(DisasContext *s, X86Seg seg_reg)
{
- tcg_gen_ld32u_tl(s->T0, cpu_env,
+ tcg_gen_ld32u_tl(s->T0, tcg_env,
offsetof(CPUX86State,segs[seg_reg].selector));
}
static inline void gen_op_movl_seg_T0_vm(DisasContext *s, X86Seg seg_reg)
{
tcg_gen_ext16u_tl(s->T0, s->T0);
- tcg_gen_st32_tl(s->T0, cpu_env,
+ tcg_gen_st32_tl(s->T0, tcg_env,
offsetof(CPUX86State,segs[seg_reg].selector));
tcg_gen_shli_tl(cpu_seg_base[seg_reg], s->T0, 4);
}
@@ -2530,7 +2530,7 @@
{
if (PE(s) && !VM86(s)) {
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
- gen_helper_load_seg(cpu_env, tcg_constant_i32(seg_reg), s->tmp2_i32);
+ gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), s->tmp2_i32);
/* abort translation because the addseg value may change or
because ss32 may change. For R_SS, translation must always
stop as a special handling must be done to disable hardware
@@ -2554,7 +2554,7 @@
if (likely(!GUEST(s))) {
return;
}
- gen_helper_svm_check_intercept(cpu_env, tcg_constant_i32(type));
+ gen_helper_svm_check_intercept(tcg_env, tcg_constant_i32(type));
}
static inline void gen_stack_update(DisasContext *s, int addend)
@@ -2724,7 +2724,7 @@
{
gen_update_cc_op(s);
gen_update_eip_cur(s);
- gen_helper_raise_interrupt(cpu_env, tcg_constant_i32(intno),
+ gen_helper_raise_interrupt(tcg_env, tcg_constant_i32(intno),
cur_insn_len_i32(s));
s->base.is_jmp = DISAS_NORETURN;
}
@@ -2733,9 +2733,9 @@
{
if ((s->flags & mask) == 0) {
TCGv_i32 t = tcg_temp_new_i32();
- tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
+ tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
tcg_gen_ori_i32(t, t, mask);
- tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
+ tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
s->flags |= mask;
}
}
@@ -2744,9 +2744,9 @@
{
if (s->flags & mask) {
TCGv_i32 t = tcg_temp_new_i32();
- tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
+ tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
tcg_gen_andi_i32(t, t, ~mask);
- tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
+ tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
s->flags &= ~mask;
}
}
@@ -2755,18 +2755,18 @@
{
TCGv t = tcg_temp_new();
- tcg_gen_ld_tl(t, cpu_env, offsetof(CPUX86State, eflags));
+ tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
tcg_gen_ori_tl(t, t, mask);
- tcg_gen_st_tl(t, cpu_env, offsetof(CPUX86State, eflags));
+ tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
}
static void gen_reset_eflags(DisasContext *s, target_ulong mask)
{
TCGv t = tcg_temp_new();
- tcg_gen_ld_tl(t, cpu_env, offsetof(CPUX86State, eflags));
+ tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
tcg_gen_andi_tl(t, t, ~mask);
- tcg_gen_st_tl(t, cpu_env, offsetof(CPUX86State, eflags));
+ tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
}
/* Clear BND registers during legacy branches. */
@@ -2778,7 +2778,7 @@
if ((s->prefix & PREFIX_REPNZ) == 0
&& (s->flags & HF_MPX_EN_MASK) != 0
&& (s->flags & HF_MPX_IU_MASK) != 0) {
- gen_helper_bnd_jmp(cpu_env);
+ gen_helper_bnd_jmp(tcg_env);
}
}
@@ -2802,10 +2802,10 @@
gen_reset_eflags(s, RF_MASK);
}
if (recheck_tf) {
- gen_helper_rechecking_single_step(cpu_env);
+ gen_helper_rechecking_single_step(tcg_env);
tcg_gen_exit_tb(NULL, 0);
} else if (s->flags & HF_TF_MASK) {
- gen_helper_single_step(cpu_env);
+ gen_helper_single_step(tcg_env);
} else if (jr) {
tcg_gen_lookup_and_goto_ptr();
} else {
@@ -2907,12 +2907,12 @@
static inline void gen_ldq_env_A0(DisasContext *s, int offset)
{
tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
- tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset);
+ tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset);
}
static inline void gen_stq_env_A0(DisasContext *s, int offset)
{
- tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset);
+ tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset);
tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
}
@@ -2921,20 +2921,20 @@
int mem_index = s->mem_index;
tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index,
MO_LEUQ | (align ? MO_ALIGN_16 : 0));
- tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
+ tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset + offsetof(XMMReg, XMM_Q(0)));
tcg_gen_addi_tl(s->tmp0, s->A0, 8);
tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
- tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
+ tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset + offsetof(XMMReg, XMM_Q(1)));
}
static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align)
{
int mem_index = s->mem_index;
- tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
+ tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset + offsetof(XMMReg, XMM_Q(0)));
tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index,
MO_LEUQ | (align ? MO_ALIGN_16 : 0));
tcg_gen_addi_tl(s->tmp0, s->A0, 8);
- tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
+ tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset + offsetof(XMMReg, XMM_Q(1)));
tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
}
@@ -2943,33 +2943,33 @@
int mem_index = s->mem_index;
tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index,
MO_LEUQ | (align ? MO_ALIGN_32 : 0));
- tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(0)));
+ tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(0)));
tcg_gen_addi_tl(s->tmp0, s->A0, 8);
tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
- tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(1)));
+ tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(1)));
tcg_gen_addi_tl(s->tmp0, s->A0, 16);
tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
- tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(2)));
+ tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(2)));
tcg_gen_addi_tl(s->tmp0, s->A0, 24);
tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
- tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(3)));
+ tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(3)));
}
static void gen_sty_env_A0(DisasContext *s, int offset, bool align)
{
int mem_index = s->mem_index;
- tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(0)));
+ tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(0)));
tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index,
MO_LEUQ | (align ? MO_ALIGN_32 : 0));
tcg_gen_addi_tl(s->tmp0, s->A0, 8);
- tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(1)));
+ tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(1)));
tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
tcg_gen_addi_tl(s->tmp0, s->A0, 16);
- tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(2)));
+ tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(2)));
tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
tcg_gen_addi_tl(s->tmp0, s->A0, 24);
- tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(3)));
+ tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(3)));
tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
}
@@ -3079,7 +3079,7 @@
be stopped. Return the next pc value */
static bool disas_insn(DisasContext *s, CPUState *cpu)
{
- CPUX86State *env = cpu->env_ptr;
+ CPUX86State *env = cpu_env(cpu);
int b, prefixes;
int shift;
MemOp ot, aflag, dflag;
@@ -3242,7 +3242,7 @@
case 0x30 ... 0x35:
case 0x38 ... 0x3d:
{
- int op, f, val;
+ int f;
op = (b >> 3) & 7;
f = (b >> 1) & 3;
@@ -3302,8 +3302,6 @@
case 0x81:
case 0x83:
{
- int val;
-
ot = mo_b_d(b, dflag);
modrm = x86_ldub_code(env, s);
@@ -3533,18 +3531,18 @@
case 6: /* div */
switch(ot) {
case MO_8:
- gen_helper_divb_AL(cpu_env, s->T0);
+ gen_helper_divb_AL(tcg_env, s->T0);
break;
case MO_16:
- gen_helper_divw_AX(cpu_env, s->T0);
+ gen_helper_divw_AX(tcg_env, s->T0);
break;
default:
case MO_32:
- gen_helper_divl_EAX(cpu_env, s->T0);
+ gen_helper_divl_EAX(tcg_env, s->T0);
break;
#ifdef TARGET_X86_64
case MO_64:
- gen_helper_divq_EAX(cpu_env, s->T0);
+ gen_helper_divq_EAX(tcg_env, s->T0);
break;
#endif
}
@@ -3552,18 +3550,18 @@
case 7: /* idiv */
switch(ot) {
case MO_8:
- gen_helper_idivb_AL(cpu_env, s->T0);
+ gen_helper_idivb_AL(tcg_env, s->T0);
break;
case MO_16:
- gen_helper_idivw_AX(cpu_env, s->T0);
+ gen_helper_idivw_AX(tcg_env, s->T0);
break;
default:
case MO_32:
- gen_helper_idivl_EAX(cpu_env, s->T0);
+ gen_helper_idivl_EAX(tcg_env, s->T0);
break;
#ifdef TARGET_X86_64
case MO_64:
- gen_helper_idivq_EAX(cpu_env, s->T0);
+ gen_helper_idivq_EAX(tcg_env, s->T0);
break;
#endif
}
@@ -3638,13 +3636,13 @@
do_lcall:
if (PE(s) && !VM86(s)) {
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
- gen_helper_lcall_protected(cpu_env, s->tmp2_i32, s->T1,
+ gen_helper_lcall_protected(tcg_env, s->tmp2_i32, s->T1,
tcg_constant_i32(dflag - 1),
eip_next_tl(s));
} else {
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
- gen_helper_lcall_real(cpu_env, s->tmp2_i32, s->tmp3_i32,
+ gen_helper_lcall_real(tcg_env, s->tmp2_i32, s->tmp3_i32,
tcg_constant_i32(dflag - 1),
eip_next_i32(s));
}
@@ -3668,7 +3666,7 @@
do_ljmp:
if (PE(s) && !VM86(s)) {
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
- gen_helper_ljmp_protected(cpu_env, s->tmp2_i32, s->T1,
+ gen_helper_ljmp_protected(tcg_env, s->tmp2_i32, s->T1,
eip_next_tl(s));
} else {
gen_op_movl_seg_T0_vm(s, R_CS);
@@ -3935,7 +3933,7 @@
if (!(s->cpuid_ext_features & CPUID_7_0_ECX_RDPID)) {
goto illegal_op;
}
- gen_helper_rdpid(s->T0, cpu_env);
+ gen_helper_rdpid(s->T0, tcg_env);
rm = (modrm & 7) | REX_B(s);
gen_op_mov_reg_v(s, dflag, rm, s->T0);
break;
@@ -3954,7 +3952,7 @@
}
do_rdrand:
translator_io_start(&s->base);
- gen_helper_rdrand(s->T0, cpu_env);
+ gen_helper_rdrand(s->T0, tcg_env);
rm = (modrm & 7) | REX_B(s);
gen_op_mov_reg_v(s, dflag, rm, s->T0);
set_cc_op(s, CC_OP_EFLAGS);
@@ -4412,30 +4410,30 @@
case 0:
tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
s->mem_index, MO_LEUL);
- gen_helper_flds_FT0(cpu_env, s->tmp2_i32);
+ gen_helper_flds_FT0(tcg_env, s->tmp2_i32);
break;
case 1:
tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
s->mem_index, MO_LEUL);
- gen_helper_fildl_FT0(cpu_env, s->tmp2_i32);
+ gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
break;
case 2:
tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
s->mem_index, MO_LEUQ);
- gen_helper_fldl_FT0(cpu_env, s->tmp1_i64);
+ gen_helper_fldl_FT0(tcg_env, s->tmp1_i64);
break;
case 3:
default:
tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
s->mem_index, MO_LESW);
- gen_helper_fildl_FT0(cpu_env, s->tmp2_i32);
+ gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
break;
}
gen_helper_fp_arith_ST0_FT0(op1);
if (op1 == 3) {
/* fcomp needs pop */
- gen_helper_fpop(cpu_env);
+ gen_helper_fpop(tcg_env);
}
}
break;
@@ -4451,23 +4449,23 @@
case 0:
tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
s->mem_index, MO_LEUL);
- gen_helper_flds_ST0(cpu_env, s->tmp2_i32);
+ gen_helper_flds_ST0(tcg_env, s->tmp2_i32);
break;
case 1:
tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
s->mem_index, MO_LEUL);
- gen_helper_fildl_ST0(cpu_env, s->tmp2_i32);
+ gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
break;
case 2:
tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
s->mem_index, MO_LEUQ);
- gen_helper_fldl_ST0(cpu_env, s->tmp1_i64);
+ gen_helper_fldl_ST0(tcg_env, s->tmp1_i64);
break;
case 3:
default:
tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
s->mem_index, MO_LESW);
- gen_helper_fildl_ST0(cpu_env, s->tmp2_i32);
+ gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
break;
}
break;
@@ -4475,116 +4473,116 @@
/* XXX: the corresponding CPUID bit must be tested ! */
switch (op >> 4) {
case 1:
- gen_helper_fisttl_ST0(s->tmp2_i32, cpu_env);
+ gen_helper_fisttl_ST0(s->tmp2_i32, tcg_env);
tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
s->mem_index, MO_LEUL);
break;
case 2:
- gen_helper_fisttll_ST0(s->tmp1_i64, cpu_env);
+ gen_helper_fisttll_ST0(s->tmp1_i64, tcg_env);
tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
s->mem_index, MO_LEUQ);
break;
case 3:
default:
- gen_helper_fistt_ST0(s->tmp2_i32, cpu_env);
+ gen_helper_fistt_ST0(s->tmp2_i32, tcg_env);
tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
s->mem_index, MO_LEUW);
break;
}
- gen_helper_fpop(cpu_env);
+ gen_helper_fpop(tcg_env);
break;
default:
switch (op >> 4) {
case 0:
- gen_helper_fsts_ST0(s->tmp2_i32, cpu_env);
+ gen_helper_fsts_ST0(s->tmp2_i32, tcg_env);
tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
s->mem_index, MO_LEUL);
break;
case 1:
- gen_helper_fistl_ST0(s->tmp2_i32, cpu_env);
+ gen_helper_fistl_ST0(s->tmp2_i32, tcg_env);
tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
s->mem_index, MO_LEUL);
break;
case 2:
- gen_helper_fstl_ST0(s->tmp1_i64, cpu_env);
+ gen_helper_fstl_ST0(s->tmp1_i64, tcg_env);
tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
s->mem_index, MO_LEUQ);
break;
case 3:
default:
- gen_helper_fist_ST0(s->tmp2_i32, cpu_env);
+ gen_helper_fist_ST0(s->tmp2_i32, tcg_env);
tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
s->mem_index, MO_LEUW);
break;
}
if ((op & 7) == 3) {
- gen_helper_fpop(cpu_env);
+ gen_helper_fpop(tcg_env);
}
break;
}
break;
case 0x0c: /* fldenv mem */
- gen_helper_fldenv(cpu_env, s->A0,
+ gen_helper_fldenv(tcg_env, s->A0,
tcg_constant_i32(dflag - 1));
update_fip = update_fdp = false;
break;
case 0x0d: /* fldcw mem */
tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
s->mem_index, MO_LEUW);
- gen_helper_fldcw(cpu_env, s->tmp2_i32);
+ gen_helper_fldcw(tcg_env, s->tmp2_i32);
update_fip = update_fdp = false;
break;
case 0x0e: /* fnstenv mem */
- gen_helper_fstenv(cpu_env, s->A0,
+ gen_helper_fstenv(tcg_env, s->A0,
tcg_constant_i32(dflag - 1));
update_fip = update_fdp = false;
break;
case 0x0f: /* fnstcw mem */
- gen_helper_fnstcw(s->tmp2_i32, cpu_env);
+ gen_helper_fnstcw(s->tmp2_i32, tcg_env);
tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
s->mem_index, MO_LEUW);
update_fip = update_fdp = false;
break;
case 0x1d: /* fldt mem */
- gen_helper_fldt_ST0(cpu_env, s->A0);
+ gen_helper_fldt_ST0(tcg_env, s->A0);
break;
case 0x1f: /* fstpt mem */
- gen_helper_fstt_ST0(cpu_env, s->A0);
- gen_helper_fpop(cpu_env);
+ gen_helper_fstt_ST0(tcg_env, s->A0);
+ gen_helper_fpop(tcg_env);
break;
case 0x2c: /* frstor mem */
- gen_helper_frstor(cpu_env, s->A0,
+ gen_helper_frstor(tcg_env, s->A0,
tcg_constant_i32(dflag - 1));
update_fip = update_fdp = false;
break;
case 0x2e: /* fnsave mem */
- gen_helper_fsave(cpu_env, s->A0,
+ gen_helper_fsave(tcg_env, s->A0,
tcg_constant_i32(dflag - 1));
update_fip = update_fdp = false;
break;
case 0x2f: /* fnstsw mem */
- gen_helper_fnstsw(s->tmp2_i32, cpu_env);
+ gen_helper_fnstsw(s->tmp2_i32, tcg_env);
tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
s->mem_index, MO_LEUW);
update_fip = update_fdp = false;
break;
case 0x3c: /* fbld */
- gen_helper_fbld_ST0(cpu_env, s->A0);
+ gen_helper_fbld_ST0(tcg_env, s->A0);
break;
case 0x3e: /* fbstp */
- gen_helper_fbst_ST0(cpu_env, s->A0);
- gen_helper_fpop(cpu_env);
+ gen_helper_fbst_ST0(tcg_env, s->A0);
+ gen_helper_fpop(tcg_env);
break;
case 0x3d: /* fildll */
tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
s->mem_index, MO_LEUQ);
- gen_helper_fildll_ST0(cpu_env, s->tmp1_i64);
+ gen_helper_fildll_ST0(tcg_env, s->tmp1_i64);
break;
case 0x3f: /* fistpll */
- gen_helper_fistll_ST0(s->tmp1_i64, cpu_env);
+ gen_helper_fistll_ST0(s->tmp1_i64, tcg_env);
tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
s->mem_index, MO_LEUQ);
- gen_helper_fpop(cpu_env);
+ gen_helper_fpop(tcg_env);
break;
default:
goto unknown_op;
@@ -4593,12 +4591,12 @@
if (update_fdp) {
int last_seg = s->override >= 0 ? s->override : a.def_seg;
- tcg_gen_ld_i32(s->tmp2_i32, cpu_env,
+ tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
offsetof(CPUX86State,
segs[last_seg].selector));
- tcg_gen_st16_i32(s->tmp2_i32, cpu_env,
+ tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
offsetof(CPUX86State, fpds));
- tcg_gen_st_tl(last_addr, cpu_env,
+ tcg_gen_st_tl(last_addr, tcg_env,
offsetof(CPUX86State, fpdp));
}
} else {
@@ -4607,14 +4605,14 @@
switch (op) {
case 0x08: /* fld sti */
- gen_helper_fpush(cpu_env);
- gen_helper_fmov_ST0_STN(cpu_env,
+ gen_helper_fpush(tcg_env);
+ gen_helper_fmov_ST0_STN(tcg_env,
tcg_constant_i32((opreg + 1) & 7));
break;
case 0x09: /* fxchg sti */
case 0x29: /* fxchg4 sti, undocumented op */
case 0x39: /* fxchg7 sti, undocumented op */
- gen_helper_fxchg_ST0_STN(cpu_env, tcg_constant_i32(opreg));
+ gen_helper_fxchg_ST0_STN(tcg_env, tcg_constant_i32(opreg));
break;
case 0x0a: /* grp d9/2 */
switch (rm) {
@@ -4624,7 +4622,7 @@
* needs to be treated as I/O because of ferr_irq
*/
translator_io_start(&s->base);
- gen_helper_fwait(cpu_env);
+ gen_helper_fwait(tcg_env);
update_fip = false;
break;
default:
@@ -4634,17 +4632,17 @@
case 0x0c: /* grp d9/4 */
switch (rm) {
case 0: /* fchs */
- gen_helper_fchs_ST0(cpu_env);
+ gen_helper_fchs_ST0(tcg_env);
break;
case 1: /* fabs */
- gen_helper_fabs_ST0(cpu_env);
+ gen_helper_fabs_ST0(tcg_env);
break;
case 4: /* ftst */
- gen_helper_fldz_FT0(cpu_env);
- gen_helper_fcom_ST0_FT0(cpu_env);
+ gen_helper_fldz_FT0(tcg_env);
+ gen_helper_fcom_ST0_FT0(tcg_env);
break;
case 5: /* fxam */
- gen_helper_fxam_ST0(cpu_env);
+ gen_helper_fxam_ST0(tcg_env);
break;
default:
goto unknown_op;
@@ -4654,32 +4652,32 @@
{
switch (rm) {
case 0:
- gen_helper_fpush(cpu_env);
- gen_helper_fld1_ST0(cpu_env);
+ gen_helper_fpush(tcg_env);
+ gen_helper_fld1_ST0(tcg_env);
break;
case 1:
- gen_helper_fpush(cpu_env);
- gen_helper_fldl2t_ST0(cpu_env);
+ gen_helper_fpush(tcg_env);
+ gen_helper_fldl2t_ST0(tcg_env);
break;
case 2:
- gen_helper_fpush(cpu_env);
- gen_helper_fldl2e_ST0(cpu_env);
+ gen_helper_fpush(tcg_env);
+ gen_helper_fldl2e_ST0(tcg_env);
break;
case 3:
- gen_helper_fpush(cpu_env);
- gen_helper_fldpi_ST0(cpu_env);
+ gen_helper_fpush(tcg_env);
+ gen_helper_fldpi_ST0(tcg_env);
break;
case 4:
- gen_helper_fpush(cpu_env);
- gen_helper_fldlg2_ST0(cpu_env);
+ gen_helper_fpush(tcg_env);
+ gen_helper_fldlg2_ST0(tcg_env);
break;
case 5:
- gen_helper_fpush(cpu_env);
- gen_helper_fldln2_ST0(cpu_env);
+ gen_helper_fpush(tcg_env);
+ gen_helper_fldln2_ST0(tcg_env);
break;
case 6:
- gen_helper_fpush(cpu_env);
- gen_helper_fldz_ST0(cpu_env);
+ gen_helper_fpush(tcg_env);
+ gen_helper_fldz_ST0(tcg_env);
break;
default:
goto unknown_op;
@@ -4689,58 +4687,58 @@
case 0x0e: /* grp d9/6 */
switch (rm) {
case 0: /* f2xm1 */
- gen_helper_f2xm1(cpu_env);
+ gen_helper_f2xm1(tcg_env);
break;
case 1: /* fyl2x */
- gen_helper_fyl2x(cpu_env);
+ gen_helper_fyl2x(tcg_env);
break;
case 2: /* fptan */
- gen_helper_fptan(cpu_env);
+ gen_helper_fptan(tcg_env);
break;
case 3: /* fpatan */
- gen_helper_fpatan(cpu_env);
+ gen_helper_fpatan(tcg_env);
break;
case 4: /* fxtract */
- gen_helper_fxtract(cpu_env);
+ gen_helper_fxtract(tcg_env);
break;
case 5: /* fprem1 */
- gen_helper_fprem1(cpu_env);
+ gen_helper_fprem1(tcg_env);
break;
case 6: /* fdecstp */
- gen_helper_fdecstp(cpu_env);
+ gen_helper_fdecstp(tcg_env);
break;
default:
case 7: /* fincstp */
- gen_helper_fincstp(cpu_env);
+ gen_helper_fincstp(tcg_env);
break;
}
break;
case 0x0f: /* grp d9/7 */
switch (rm) {
case 0: /* fprem */
- gen_helper_fprem(cpu_env);
+ gen_helper_fprem(tcg_env);
break;
case 1: /* fyl2xp1 */
- gen_helper_fyl2xp1(cpu_env);
+ gen_helper_fyl2xp1(tcg_env);
break;
case 2: /* fsqrt */
- gen_helper_fsqrt(cpu_env);
+ gen_helper_fsqrt(tcg_env);
break;
case 3: /* fsincos */
- gen_helper_fsincos(cpu_env);
+ gen_helper_fsincos(tcg_env);
break;
case 5: /* fscale */
- gen_helper_fscale(cpu_env);
+ gen_helper_fscale(tcg_env);
break;
case 4: /* frndint */
- gen_helper_frndint(cpu_env);
+ gen_helper_frndint(tcg_env);
break;
case 6: /* fsin */
- gen_helper_fsin(cpu_env);
+ gen_helper_fsin(tcg_env);
break;
default:
case 7: /* fcos */
- gen_helper_fcos(cpu_env);
+ gen_helper_fcos(tcg_env);
break;
}
break;
@@ -4754,10 +4752,10 @@
if (op >= 0x20) {
gen_helper_fp_arith_STN_ST0(op1, opreg);
if (op >= 0x30) {
- gen_helper_fpop(cpu_env);
+ gen_helper_fpop(tcg_env);
}
} else {
- gen_helper_fmov_FT0_STN(cpu_env,
+ gen_helper_fmov_FT0_STN(tcg_env,
tcg_constant_i32(opreg));
gen_helper_fp_arith_ST0_FT0(op1);
}
@@ -4765,23 +4763,23 @@
break;
case 0x02: /* fcom */
case 0x22: /* fcom2, undocumented op */
- gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
- gen_helper_fcom_ST0_FT0(cpu_env);
+ gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
+ gen_helper_fcom_ST0_FT0(tcg_env);
break;
case 0x03: /* fcomp */
case 0x23: /* fcomp3, undocumented op */
case 0x32: /* fcomp5, undocumented op */
- gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
- gen_helper_fcom_ST0_FT0(cpu_env);
- gen_helper_fpop(cpu_env);
+ gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
+ gen_helper_fcom_ST0_FT0(tcg_env);
+ gen_helper_fpop(tcg_env);
break;
case 0x15: /* da/5 */
switch (rm) {
case 1: /* fucompp */
- gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(1));
- gen_helper_fucom_ST0_FT0(cpu_env);
- gen_helper_fpop(cpu_env);
- gen_helper_fpop(cpu_env);
+ gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
+ gen_helper_fucom_ST0_FT0(tcg_env);
+ gen_helper_fpop(tcg_env);
+ gen_helper_fpop(tcg_env);
break;
default:
goto unknown_op;
@@ -4794,11 +4792,11 @@
case 1: /* fdisi (287 only, just do nop here) */
break;
case 2: /* fclex */
- gen_helper_fclex(cpu_env);
+ gen_helper_fclex(tcg_env);
update_fip = false;
break;
case 3: /* fninit */
- gen_helper_fninit(cpu_env);
+ gen_helper_fninit(tcg_env);
update_fip = false;
break;
case 4: /* fsetpm (287 only, just do nop here) */
@@ -4812,8 +4810,8 @@
goto illegal_op;
}
gen_update_cc_op(s);
- gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
- gen_helper_fucomi_ST0_FT0(cpu_env);
+ gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
+ gen_helper_fucomi_ST0_FT0(tcg_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x1e: /* fcomi */
@@ -4821,52 +4819,52 @@
goto illegal_op;
}
gen_update_cc_op(s);
- gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
- gen_helper_fcomi_ST0_FT0(cpu_env);
+ gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
+ gen_helper_fcomi_ST0_FT0(tcg_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x28: /* ffree sti */
- gen_helper_ffree_STN(cpu_env, tcg_constant_i32(opreg));
+ gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
break;
case 0x2a: /* fst sti */
- gen_helper_fmov_STN_ST0(cpu_env, tcg_constant_i32(opreg));
+ gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
break;
case 0x2b: /* fstp sti */
case 0x0b: /* fstp1 sti, undocumented op */
case 0x3a: /* fstp8 sti, undocumented op */
case 0x3b: /* fstp9 sti, undocumented op */
- gen_helper_fmov_STN_ST0(cpu_env, tcg_constant_i32(opreg));
- gen_helper_fpop(cpu_env);
+ gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
+ gen_helper_fpop(tcg_env);
break;
case 0x2c: /* fucom st(i) */
- gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
- gen_helper_fucom_ST0_FT0(cpu_env);
+ gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
+ gen_helper_fucom_ST0_FT0(tcg_env);
break;
case 0x2d: /* fucomp st(i) */
- gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
- gen_helper_fucom_ST0_FT0(cpu_env);
- gen_helper_fpop(cpu_env);
+ gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
+ gen_helper_fucom_ST0_FT0(tcg_env);
+ gen_helper_fpop(tcg_env);
break;
case 0x33: /* de/3 */
switch (rm) {
case 1: /* fcompp */
- gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(1));
- gen_helper_fcom_ST0_FT0(cpu_env);
- gen_helper_fpop(cpu_env);
- gen_helper_fpop(cpu_env);
+ gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
+ gen_helper_fcom_ST0_FT0(tcg_env);
+ gen_helper_fpop(tcg_env);
+ gen_helper_fpop(tcg_env);
break;
default:
goto unknown_op;
}
break;
case 0x38: /* ffreep sti, undocumented op */
- gen_helper_ffree_STN(cpu_env, tcg_constant_i32(opreg));
- gen_helper_fpop(cpu_env);
+ gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
+ gen_helper_fpop(tcg_env);
break;
case 0x3c: /* df/4 */
switch (rm) {
case 0:
- gen_helper_fnstsw(s->tmp2_i32, cpu_env);
+ gen_helper_fnstsw(s->tmp2_i32, tcg_env);
tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
break;
@@ -4879,9 +4877,9 @@
goto illegal_op;
}
gen_update_cc_op(s);
- gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
- gen_helper_fucomi_ST0_FT0(cpu_env);
- gen_helper_fpop(cpu_env);
+ gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
+ gen_helper_fucomi_ST0_FT0(tcg_env);
+ gen_helper_fpop(tcg_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x3e: /* fcomip */
@@ -4889,9 +4887,9 @@
goto illegal_op;
}
gen_update_cc_op(s);
- gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
- gen_helper_fcomi_ST0_FT0(cpu_env);
- gen_helper_fpop(cpu_env);
+ gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
+ gen_helper_fcomi_ST0_FT0(tcg_env);
+ gen_helper_fpop(tcg_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x10 ... 0x13: /* fcmovxx */
@@ -4912,7 +4910,7 @@
op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
l1 = gen_new_label();
gen_jcc1_noeob(s, op1, l1);
- gen_helper_fmov_ST0_STN(cpu_env,
+ gen_helper_fmov_ST0_STN(tcg_env,
tcg_constant_i32(opreg));
gen_set_label(l1);
}
@@ -4923,12 +4921,12 @@
}
if (update_fip) {
- tcg_gen_ld_i32(s->tmp2_i32, cpu_env,
+ tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
offsetof(CPUX86State, segs[R_CS].selector));
- tcg_gen_st16_i32(s->tmp2_i32, cpu_env,
+ tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
offsetof(CPUX86State, fpcs));
tcg_gen_st_tl(eip_cur_tl(s),
- cpu_env, offsetof(CPUX86State, fpip));
+ tcg_env, offsetof(CPUX86State, fpip));
}
}
break;
@@ -5101,7 +5099,7 @@
if (PE(s) && !VM86(s)) {
gen_update_cc_op(s);
gen_update_eip_cur(s);
- gen_helper_lret_protected(cpu_env, tcg_constant_i32(dflag - 1),
+ gen_helper_lret_protected(tcg_env, tcg_constant_i32(dflag - 1),
tcg_constant_i32(val));
} else {
gen_stack_A0(s);
@@ -5129,9 +5127,9 @@
if (!check_vm86_iopl(s)) {
break;
}
- gen_helper_iret_real(cpu_env, tcg_constant_i32(dflag - 1));
+ gen_helper_iret_real(tcg_env, tcg_constant_i32(dflag - 1));
} else {
- gen_helper_iret_protected(cpu_env, tcg_constant_i32(dflag - 1),
+ gen_helper_iret_protected(tcg_env, tcg_constant_i32(dflag - 1),
eip_next_i32(s));
}
set_cc_op(s, CC_OP_EFLAGS);
@@ -5228,7 +5226,7 @@
gen_svm_check_intercept(s, SVM_EXIT_PUSHF);
if (check_vm86_iopl(s)) {
gen_update_cc_op(s);
- gen_helper_read_eflags(s->T0, cpu_env);
+ gen_helper_read_eflags(s->T0, tcg_env);
gen_push_v(s, s->T0);
}
break;
@@ -5247,7 +5245,7 @@
}
ot = gen_pop_T0(s);
- gen_helper_write_eflags(cpu_env, s->T0, tcg_constant_i32(mask));
+ gen_helper_write_eflags(tcg_env, s->T0, tcg_constant_i32(mask));
gen_pop_update(s, ot);
set_cc_op(s, CC_OP_EFLAGS);
/* abort translation because TF/AC flag may change */
@@ -5285,11 +5283,11 @@
break;
case 0xfc: /* cld */
tcg_gen_movi_i32(s->tmp2_i32, 1);
- tcg_gen_st_i32(s->tmp2_i32, cpu_env, offsetof(CPUX86State, df));
+ tcg_gen_st_i32(s->tmp2_i32, tcg_env, offsetof(CPUX86State, df));
break;
case 0xfd: /* std */
tcg_gen_movi_i32(s->tmp2_i32, -1);
- tcg_gen_st_i32(s->tmp2_i32, cpu_env, offsetof(CPUX86State, df));
+ tcg_gen_st_i32(s->tmp2_i32, tcg_env, offsetof(CPUX86State, df));
break;
/************************/
@@ -5487,28 +5485,28 @@
if (CODE64(s))
goto illegal_op;
gen_update_cc_op(s);
- gen_helper_daa(cpu_env);
+ gen_helper_daa(tcg_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x2f: /* das */
if (CODE64(s))
goto illegal_op;
gen_update_cc_op(s);
- gen_helper_das(cpu_env);
+ gen_helper_das(tcg_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x37: /* aaa */
if (CODE64(s))
goto illegal_op;
gen_update_cc_op(s);
- gen_helper_aaa(cpu_env);
+ gen_helper_aaa(tcg_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x3f: /* aas */
if (CODE64(s))
goto illegal_op;
gen_update_cc_op(s);
- gen_helper_aas(cpu_env);
+ gen_helper_aas(tcg_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0xd4: /* aam */
@@ -5518,7 +5516,7 @@
if (val == 0) {
gen_exception(s, EXCP00_DIVZ);
} else {
- gen_helper_aam(cpu_env, tcg_constant_i32(val));
+ gen_helper_aam(tcg_env, tcg_constant_i32(val));
set_cc_op(s, CC_OP_LOGICB);
}
break;
@@ -5526,7 +5524,7 @@
if (CODE64(s))
goto illegal_op;
val = x86_ldub_code(env, s);
- gen_helper_aad(cpu_env, tcg_constant_i32(val));
+ gen_helper_aad(tcg_env, tcg_constant_i32(val));
set_cc_op(s, CC_OP_LOGICB);
break;
/************************/
@@ -5543,7 +5541,7 @@
if (prefixes & PREFIX_REPZ) {
gen_update_cc_op(s);
gen_update_eip_cur(s);
- gen_helper_pause(cpu_env, cur_insn_len_i32(s));
+ gen_helper_pause(tcg_env, cur_insn_len_i32(s));
s->base.is_jmp = DISAS_NORETURN;
}
break;
@@ -5554,7 +5552,7 @@
} else {
/* needs to be treated as I/O because of ferr_irq */
translator_io_start(&s->base);
- gen_helper_fwait(cpu_env);
+ gen_helper_fwait(tcg_env);
}
break;
case 0xcc: /* int3 */
@@ -5571,7 +5569,7 @@
goto illegal_op;
gen_update_cc_op(s);
gen_update_eip_cur(s);
- gen_helper_into(cpu_env, cur_insn_len_i32(s));
+ gen_helper_into(tcg_env, cur_insn_len_i32(s));
break;
#ifdef WANT_ICEBP
case 0xf1: /* icebp (undocumented, exits to external debugger) */
@@ -5605,9 +5603,9 @@
gen_lea_modrm(env, s, modrm);
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
if (ot == MO_16) {
- gen_helper_boundw(cpu_env, s->A0, s->tmp2_i32);
+ gen_helper_boundw(tcg_env, s->A0, s->tmp2_i32);
} else {
- gen_helper_boundl(cpu_env, s->A0, s->tmp2_i32);
+ gen_helper_boundl(tcg_env, s->A0, s->tmp2_i32);
}
break;
case 0x1c8 ... 0x1cf: /* bswap reg */
@@ -5669,9 +5667,9 @@
gen_update_cc_op(s);
gen_update_eip_cur(s);
if (b & 2) {
- gen_helper_rdmsr(cpu_env);
+ gen_helper_rdmsr(tcg_env);
} else {
- gen_helper_wrmsr(cpu_env);
+ gen_helper_wrmsr(tcg_env);
s->base.is_jmp = DISAS_EOB_NEXT;
}
}
@@ -5680,12 +5678,12 @@
gen_update_cc_op(s);
gen_update_eip_cur(s);
translator_io_start(&s->base);
- gen_helper_rdtsc(cpu_env);
+ gen_helper_rdtsc(tcg_env);
break;
case 0x133: /* rdpmc */
gen_update_cc_op(s);
gen_update_eip_cur(s);
- gen_helper_rdpmc(cpu_env);
+ gen_helper_rdpmc(tcg_env);
s->base.is_jmp = DISAS_NORETURN;
break;
case 0x134: /* sysenter */
@@ -5696,7 +5694,7 @@
if (!PE(s)) {
gen_exception_gpf(s);
} else {
- gen_helper_sysenter(cpu_env);
+ gen_helper_sysenter(tcg_env);
s->base.is_jmp = DISAS_EOB_ONLY;
}
break;
@@ -5708,7 +5706,7 @@
if (!PE(s) || CPL(s) != 0) {
gen_exception_gpf(s);
} else {
- gen_helper_sysexit(cpu_env, tcg_constant_i32(dflag - 1));
+ gen_helper_sysexit(tcg_env, tcg_constant_i32(dflag - 1));
s->base.is_jmp = DISAS_EOB_ONLY;
}
break;
@@ -5719,7 +5717,7 @@
}
gen_update_cc_op(s);
gen_update_eip_cur(s);
- gen_helper_syscall(cpu_env, cur_insn_len_i32(s));
+ gen_helper_syscall(tcg_env, cur_insn_len_i32(s));
/* TF handling for the syscall insn is different. The TF bit is checked
after the syscall insn completes. This allows #DB to not be
generated after one has entered CPL0 if TF is set in FMASK. */
@@ -5733,7 +5731,7 @@
if (!PE(s) || CPL(s) != 0) {
gen_exception_gpf(s);
} else {
- gen_helper_sysret(cpu_env, tcg_constant_i32(dflag - 1));
+ gen_helper_sysret(tcg_env, tcg_constant_i32(dflag - 1));
/* condition codes are modified only in long mode */
if (LMA(s)) {
set_cc_op(s, CC_OP_EFLAGS);
@@ -5748,13 +5746,13 @@
case 0x1a2: /* cpuid */
gen_update_cc_op(s);
gen_update_eip_cur(s);
- gen_helper_cpuid(cpu_env);
+ gen_helper_cpuid(tcg_env);
break;
case 0xf4: /* hlt */
if (check_cpl0(s)) {
gen_update_cc_op(s);
gen_update_eip_cur(s);
- gen_helper_hlt(cpu_env, cur_insn_len_i32(s));
+ gen_helper_hlt(tcg_env, cur_insn_len_i32(s));
s->base.is_jmp = DISAS_NORETURN;
}
break;
@@ -5770,7 +5768,7 @@
break;
}
gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ);
- tcg_gen_ld32u_tl(s->T0, cpu_env,
+ tcg_gen_ld32u_tl(s->T0, tcg_env,
offsetof(CPUX86State, ldt.selector));
ot = mod == 3 ? dflag : MO_16;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
@@ -5782,7 +5780,7 @@
gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE);
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
- gen_helper_lldt(cpu_env, s->tmp2_i32);
+ gen_helper_lldt(tcg_env, s->tmp2_i32);
}
break;
case 1: /* str */
@@ -5792,7 +5790,7 @@
break;
}
gen_svm_check_intercept(s, SVM_EXIT_TR_READ);
- tcg_gen_ld32u_tl(s->T0, cpu_env,
+ tcg_gen_ld32u_tl(s->T0, tcg_env,
offsetof(CPUX86State, tr.selector));
ot = mod == 3 ? dflag : MO_16;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
@@ -5804,7 +5802,7 @@
gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE);
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
- gen_helper_ltr(cpu_env, s->tmp2_i32);
+ gen_helper_ltr(tcg_env, s->tmp2_i32);
}
break;
case 4: /* verr */
@@ -5814,9 +5812,9 @@
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
gen_update_cc_op(s);
if (op == 4) {
- gen_helper_verr(cpu_env, s->T0);
+ gen_helper_verr(tcg_env, s->T0);
} else {
- gen_helper_verw(cpu_env, s->T0);
+ gen_helper_verw(tcg_env, s->T0);
}
set_cc_op(s, CC_OP_EFLAGS);
break;
@@ -5835,10 +5833,10 @@
gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ);
gen_lea_modrm(env, s, modrm);
tcg_gen_ld32u_tl(s->T0,
- cpu_env, offsetof(CPUX86State, gdt.limit));
+ tcg_env, offsetof(CPUX86State, gdt.limit));
gen_op_st_v(s, MO_16, s->T0, s->A0);
gen_add_A0_im(s, 2);
- tcg_gen_ld_tl(s->T0, cpu_env, offsetof(CPUX86State, gdt.base));
+ tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
if (dflag == MO_16) {
tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
}
@@ -5854,7 +5852,7 @@
tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
gen_extu(s->aflag, s->A0);
gen_add_A0_ds_seg(s);
- gen_helper_monitor(cpu_env, s->A0);
+ gen_helper_monitor(tcg_env, s->A0);
break;
case 0xc9: /* mwait */
@@ -5863,7 +5861,7 @@
}
gen_update_cc_op(s);
gen_update_eip_cur(s);
- gen_helper_mwait(cpu_env, cur_insn_len_i32(s));
+ gen_helper_mwait(tcg_env, cur_insn_len_i32(s));
s->base.is_jmp = DISAS_NORETURN;
break;
@@ -5891,10 +5889,10 @@
}
gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ);
gen_lea_modrm(env, s, modrm);
- tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, idt.limit));
+ tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.limit));
gen_op_st_v(s, MO_16, s->T0, s->A0);
gen_add_A0_im(s, 2);
- tcg_gen_ld_tl(s->T0, cpu_env, offsetof(CPUX86State, idt.base));
+ tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
if (dflag == MO_16) {
tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
}
@@ -5908,7 +5906,7 @@
goto illegal_op;
}
tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
- gen_helper_xgetbv(s->tmp1_i64, cpu_env, s->tmp2_i32);
+ gen_helper_xgetbv(s->tmp1_i64, tcg_env, s->tmp2_i32);
tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
break;
@@ -5924,7 +5922,7 @@
tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
cpu_regs[R_EDX]);
tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
- gen_helper_xsetbv(cpu_env, s->tmp2_i32, s->tmp1_i64);
+ gen_helper_xsetbv(tcg_env, s->tmp2_i32, s->tmp1_i64);
/* End TB because translation flags may change. */
s->base.is_jmp = DISAS_EOB_NEXT;
break;
@@ -5938,7 +5936,7 @@
}
gen_update_cc_op(s);
gen_update_eip_cur(s);
- gen_helper_vmrun(cpu_env, tcg_constant_i32(s->aflag - 1),
+ gen_helper_vmrun(tcg_env, tcg_constant_i32(s->aflag - 1),
cur_insn_len_i32(s));
tcg_gen_exit_tb(NULL, 0);
s->base.is_jmp = DISAS_NORETURN;
@@ -5950,7 +5948,7 @@
}
gen_update_cc_op(s);
gen_update_eip_cur(s);
- gen_helper_vmmcall(cpu_env);
+ gen_helper_vmmcall(tcg_env);
break;
case 0xda: /* VMLOAD */
@@ -5962,7 +5960,7 @@
}
gen_update_cc_op(s);
gen_update_eip_cur(s);
- gen_helper_vmload(cpu_env, tcg_constant_i32(s->aflag - 1));
+ gen_helper_vmload(tcg_env, tcg_constant_i32(s->aflag - 1));
break;
case 0xdb: /* VMSAVE */
@@ -5974,7 +5972,7 @@
}
gen_update_cc_op(s);
gen_update_eip_cur(s);
- gen_helper_vmsave(cpu_env, tcg_constant_i32(s->aflag - 1));
+ gen_helper_vmsave(tcg_env, tcg_constant_i32(s->aflag - 1));
break;
case 0xdc: /* STGI */
@@ -5986,7 +5984,7 @@
break;
}
gen_update_cc_op(s);
- gen_helper_stgi(cpu_env);
+ gen_helper_stgi(tcg_env);
s->base.is_jmp = DISAS_EOB_NEXT;
break;
@@ -5999,7 +5997,7 @@
}
gen_update_cc_op(s);
gen_update_eip_cur(s);
- gen_helper_clgi(cpu_env);
+ gen_helper_clgi(tcg_env);
break;
case 0xde: /* SKINIT */
@@ -6024,7 +6022,7 @@
} else {
tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]);
}
- gen_helper_flush_page(cpu_env, s->A0);
+ gen_helper_flush_page(tcg_env, s->A0);
s->base.is_jmp = DISAS_EOB_NEXT;
break;
@@ -6040,8 +6038,8 @@
if (dflag == MO_16) {
tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
}
- tcg_gen_st_tl(s->T0, cpu_env, offsetof(CPUX86State, gdt.base));
- tcg_gen_st32_tl(s->T1, cpu_env, offsetof(CPUX86State, gdt.limit));
+ tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
+ tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, gdt.limit));
break;
CASE_MODRM_MEM_OP(3): /* lidt */
@@ -6056,8 +6054,8 @@
if (dflag == MO_16) {
tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
}
- tcg_gen_st_tl(s->T0, cpu_env, offsetof(CPUX86State, idt.base));
- tcg_gen_st32_tl(s->T1, cpu_env, offsetof(CPUX86State, idt.limit));
+ tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
+ tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, idt.limit));
break;
CASE_MODRM_OP(4): /* smsw */
@@ -6065,7 +6063,7 @@
break;
}
gen_svm_check_intercept(s, SVM_EXIT_READ_CR0);
- tcg_gen_ld_tl(s->T0, cpu_env, offsetof(CPUX86State, cr[0]));
+ tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, cr[0]));
/*
* In 32-bit mode, the higher 16 bits of the destination
* register are undefined. In practice CR0[31:0] is stored
@@ -6080,7 +6078,7 @@
goto illegal_op;
}
tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
- gen_helper_rdpkru(s->tmp1_i64, cpu_env, s->tmp2_i32);
+ gen_helper_rdpkru(s->tmp1_i64, tcg_env, s->tmp2_i32);
tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
break;
case 0xef: /* wrpkru */
@@ -6090,7 +6088,7 @@
tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
cpu_regs[R_EDX]);
tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
- gen_helper_wrpkru(cpu_env, s->tmp2_i32, s->tmp1_i64);
+ gen_helper_wrpkru(tcg_env, s->tmp2_i32, s->tmp1_i64);
break;
CASE_MODRM_OP(6): /* lmsw */
@@ -6103,11 +6101,11 @@
* Only the 4 lower bits of CR0 are modified.
* PE cannot be set to zero if already set to one.
*/
- tcg_gen_ld_tl(s->T1, cpu_env, offsetof(CPUX86State, cr[0]));
+ tcg_gen_ld_tl(s->T1, tcg_env, offsetof(CPUX86State, cr[0]));
tcg_gen_andi_tl(s->T0, s->T0, 0xf);
tcg_gen_andi_tl(s->T1, s->T1, ~0xe);
tcg_gen_or_tl(s->T0, s->T0, s->T1);
- gen_helper_write_crN(cpu_env, tcg_constant_i32(0), s->T0);
+ gen_helper_write_crN(tcg_env, tcg_constant_i32(0), s->T0);
s->base.is_jmp = DISAS_EOB_NEXT;
break;
@@ -6117,7 +6115,7 @@
}
gen_svm_check_intercept(s, SVM_EXIT_INVLPG);
gen_lea_modrm(env, s, modrm);
- gen_helper_flush_page(cpu_env, s->A0);
+ gen_helper_flush_page(tcg_env, s->A0);
s->base.is_jmp = DISAS_EOB_NEXT;
break;
@@ -6126,9 +6124,9 @@
if (CODE64(s)) {
if (check_cpl0(s)) {
tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]);
- tcg_gen_ld_tl(cpu_seg_base[R_GS], cpu_env,
+ tcg_gen_ld_tl(cpu_seg_base[R_GS], tcg_env,
offsetof(CPUX86State, kernelgsbase));
- tcg_gen_st_tl(s->T0, cpu_env,
+ tcg_gen_st_tl(s->T0, tcg_env,
offsetof(CPUX86State, kernelgsbase));
}
break;
@@ -6143,8 +6141,8 @@
gen_update_cc_op(s);
gen_update_eip_cur(s);
translator_io_start(&s->base);
- gen_helper_rdtsc(cpu_env);
- gen_helper_rdpid(s->T0, cpu_env);
+ gen_helper_rdtsc(tcg_env);
+ gen_helper_rdpid(s->T0, tcg_env);
gen_op_mov_reg_v(s, dflag, R_ECX, s->T0);
break;
@@ -6240,9 +6238,9 @@
t0 = tcg_temp_new();
gen_update_cc_op(s);
if (b == 0x102) {
- gen_helper_lar(t0, cpu_env, s->T0);
+ gen_helper_lar(t0, tcg_env, s->T0);
} else {
- gen_helper_lsl(t0, cpu_env, s->T0);
+ gen_helper_lsl(t0, tcg_env, s->T0);
}
tcg_gen_andi_tl(s->tmp0, cpu_cc_src, CC_Z);
label1 = gen_new_label();
@@ -6347,11 +6345,11 @@
tcg_gen_movi_tl(s->T0, 0);
}
if (CODE64(s)) {
- gen_helper_bndldx64(cpu_bndl[reg], cpu_env, s->A0, s->T0);
- tcg_gen_ld_i64(cpu_bndu[reg], cpu_env,
+ gen_helper_bndldx64(cpu_bndl[reg], tcg_env, s->A0, s->T0);
+ tcg_gen_ld_i64(cpu_bndu[reg], tcg_env,
offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
} else {
- gen_helper_bndldx32(cpu_bndu[reg], cpu_env, s->A0, s->T0);
+ gen_helper_bndldx32(cpu_bndu[reg], tcg_env, s->A0, s->T0);
tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
}
@@ -6452,10 +6450,10 @@
tcg_gen_movi_tl(s->T0, 0);
}
if (CODE64(s)) {
- gen_helper_bndstx64(cpu_env, s->A0, s->T0,
+ gen_helper_bndstx64(tcg_env, s->A0, s->T0,
cpu_bndl[reg], cpu_bndu[reg]);
} else {
- gen_helper_bndstx32(cpu_env, s->A0, s->T0,
+ gen_helper_bndstx32(tcg_env, s->A0, s->T0,
cpu_bndl[reg], cpu_bndu[reg]);
}
}
@@ -6502,11 +6500,11 @@
if (b & 2) {
gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0 + reg);
gen_op_mov_v_reg(s, ot, s->T0, rm);
- gen_helper_write_crN(cpu_env, tcg_constant_i32(reg), s->T0);
+ gen_helper_write_crN(tcg_env, tcg_constant_i32(reg), s->T0);
s->base.is_jmp = DISAS_EOB_NEXT;
} else {
gen_svm_check_intercept(s, SVM_EXIT_READ_CR0 + reg);
- gen_helper_read_crN(s->T0, cpu_env, tcg_constant_i32(reg));
+ gen_helper_read_crN(s->T0, tcg_env, tcg_constant_i32(reg));
gen_op_mov_reg_v(s, ot, rm, s->T0);
}
break;
@@ -6533,12 +6531,12 @@
gen_svm_check_intercept(s, SVM_EXIT_WRITE_DR0 + reg);
gen_op_mov_v_reg(s, ot, s->T0, rm);
tcg_gen_movi_i32(s->tmp2_i32, reg);
- gen_helper_set_dr(cpu_env, s->tmp2_i32, s->T0);
+ gen_helper_set_dr(tcg_env, s->tmp2_i32, s->T0);
s->base.is_jmp = DISAS_EOB_NEXT;
} else {
gen_svm_check_intercept(s, SVM_EXIT_READ_DR0 + reg);
tcg_gen_movi_i32(s->tmp2_i32, reg);
- gen_helper_get_dr(s->T0, cpu_env, s->tmp2_i32);
+ gen_helper_get_dr(s->T0, tcg_env, s->tmp2_i32);
gen_op_mov_reg_v(s, ot, rm, s->T0);
}
}
@@ -6546,7 +6544,7 @@
case 0x106: /* clts */
if (check_cpl0(s)) {
gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
- gen_helper_clts(cpu_env);
+ gen_helper_clts(tcg_env);
/* abort block because static cpu state changed */
s->base.is_jmp = DISAS_EOB_NEXT;
}
@@ -6577,7 +6575,7 @@
break;
}
gen_lea_modrm(env, s, modrm);
- gen_helper_fxsave(cpu_env, s->A0);
+ gen_helper_fxsave(tcg_env, s->A0);
break;
CASE_MODRM_MEM_OP(1): /* fxrstor */
@@ -6590,7 +6588,7 @@
break;
}
gen_lea_modrm(env, s, modrm);
- gen_helper_fxrstor(cpu_env, s->A0);
+ gen_helper_fxrstor(tcg_env, s->A0);
break;
CASE_MODRM_MEM_OP(2): /* ldmxcsr */
@@ -6603,7 +6601,7 @@
}
gen_lea_modrm(env, s, modrm);
tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL);
- gen_helper_ldmxcsr(cpu_env, s->tmp2_i32);
+ gen_helper_ldmxcsr(tcg_env, s->tmp2_i32);
break;
CASE_MODRM_MEM_OP(3): /* stmxcsr */
@@ -6614,9 +6612,9 @@
gen_exception(s, EXCP07_PREX);
break;
}
- gen_helper_update_mxcsr(cpu_env);
+ gen_helper_update_mxcsr(tcg_env);
gen_lea_modrm(env, s, modrm);
- tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, mxcsr));
+ tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, mxcsr));
gen_op_st_v(s, MO_32, s->T0, s->A0);
break;
@@ -6629,7 +6627,7 @@
gen_lea_modrm(env, s, modrm);
tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
cpu_regs[R_EDX]);
- gen_helper_xsave(cpu_env, s->A0, s->tmp1_i64);
+ gen_helper_xsave(tcg_env, s->A0, s->tmp1_i64);
break;
CASE_MODRM_MEM_OP(5): /* xrstor */
@@ -6641,7 +6639,7 @@
gen_lea_modrm(env, s, modrm);
tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
cpu_regs[R_EDX]);
- gen_helper_xrstor(cpu_env, s->A0, s->tmp1_i64);
+ gen_helper_xrstor(tcg_env, s->A0, s->tmp1_i64);
/* XRSTOR is how MPX is enabled, which changes how
we translate. Thus we need to end the TB. */
s->base.is_jmp = DISAS_EOB_NEXT;
@@ -6667,7 +6665,7 @@
gen_lea_modrm(env, s, modrm);
tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
cpu_regs[R_EDX]);
- gen_helper_xsaveopt(cpu_env, s->A0, s->tmp1_i64);
+ gen_helper_xsaveopt(tcg_env, s->A0, s->tmp1_i64);
}
break;
@@ -6702,7 +6700,7 @@
/* Preserve hflags bits by testing CR4 at runtime. */
tcg_gen_movi_i32(s->tmp2_i32, CR4_FSGSBASE_MASK);
- gen_helper_cr4_testbit(cpu_env, s->tmp2_i32);
+ gen_helper_cr4_testbit(tcg_env, s->tmp2_i32);
base = cpu_seg_base[modrm & 8 ? R_GS : R_FS];
treg = cpu_regs[(modrm & 7) | REX_B(s)];
@@ -6778,7 +6776,7 @@
#else
gen_update_cc_op(s);
gen_update_eip_next(s);
- gen_helper_rsm(cpu_env);
+ gen_helper_rsm(tcg_env);
#endif /* CONFIG_USER_ONLY */
s->base.is_jmp = DISAS_EOB_ONLY;
break;
@@ -6882,36 +6880,36 @@
};
int i;
- cpu_cc_op = tcg_global_mem_new_i32(cpu_env,
+ cpu_cc_op = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUX86State, cc_op), "cc_op");
- cpu_cc_dst = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_dst),
+ cpu_cc_dst = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_dst),
"cc_dst");
- cpu_cc_src = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src),
+ cpu_cc_src = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src),
"cc_src");
- cpu_cc_src2 = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src2),
+ cpu_cc_src2 = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src2),
"cc_src2");
- cpu_eip = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, eip), eip_name);
+ cpu_eip = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, eip), eip_name);
for (i = 0; i < CPU_NB_REGS; ++i) {
- cpu_regs[i] = tcg_global_mem_new(cpu_env,
+ cpu_regs[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUX86State, regs[i]),
reg_names[i]);
}
for (i = 0; i < 6; ++i) {
cpu_seg_base[i]
- = tcg_global_mem_new(cpu_env,
+ = tcg_global_mem_new(tcg_env,
offsetof(CPUX86State, segs[i].base),
seg_base_names[i]);
}
for (i = 0; i < 4; ++i) {
cpu_bndl[i]
- = tcg_global_mem_new_i64(cpu_env,
+ = tcg_global_mem_new_i64(tcg_env,
offsetof(CPUX86State, bnd_regs[i].lb),
bnd_regl_names[i]);
cpu_bndu[i]
- = tcg_global_mem_new_i64(cpu_env,
+ = tcg_global_mem_new_i64(tcg_env,
offsetof(CPUX86State, bnd_regs[i].ub),
bnd_regu_names[i]);
}
@@ -6920,7 +6918,7 @@
static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
- CPUX86State *env = cpu->env_ptr;
+ CPUX86State *env = cpu_env(cpu);
uint32_t flags = dc->base.tb->flags;
uint32_t cflags = tb_cflags(dc->base.tb);
int cpl = (flags >> HF_CPL_SHIFT) & 3;
diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c
index 3de0dc1..df3aba2 100644
--- a/target/i386/whpx/whpx-all.c
+++ b/target/i386/whpx/whpx-all.c
@@ -300,7 +300,7 @@
/* X64 Extended Control Registers */
static void whpx_set_xcrs(CPUState *cpu)
{
- CPUX86State *env = cpu->env_ptr;
+ CPUX86State *env = cpu_env(cpu);
HRESULT hr;
struct whpx_state *whpx = &whpx_global;
WHV_REGISTER_VALUE xcr0;
@@ -321,7 +321,7 @@
static int whpx_set_tsc(CPUState *cpu)
{
- CPUX86State *env = cpu->env_ptr;
+ CPUX86State *env = cpu_env(cpu);
WHV_REGISTER_NAME tsc_reg = WHvX64RegisterTsc;
WHV_REGISTER_VALUE tsc_val;
HRESULT hr;
@@ -382,8 +382,8 @@
{
struct whpx_state *whpx = &whpx_global;
AccelCPUState *vcpu = cpu->accel;
- CPUX86State *env = cpu->env_ptr;
X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
struct whpx_register_set vcxt;
HRESULT hr;
int idx;
@@ -556,7 +556,7 @@
static int whpx_get_tsc(CPUState *cpu)
{
- CPUX86State *env = cpu->env_ptr;
+ CPUX86State *env = cpu_env(cpu);
WHV_REGISTER_NAME tsc_reg = WHvX64RegisterTsc;
WHV_REGISTER_VALUE tsc_val;
HRESULT hr;
@@ -576,7 +576,7 @@
/* X64 Extended Control Registers */
static void whpx_get_xcrs(CPUState *cpu)
{
- CPUX86State *env = cpu->env_ptr;
+ CPUX86State *env = cpu_env(cpu);
HRESULT hr;
struct whpx_state *whpx = &whpx_global;
WHV_REGISTER_VALUE xcr0;
@@ -601,8 +601,8 @@
{
struct whpx_state *whpx = &whpx_global;
AccelCPUState *vcpu = cpu->accel;
- CPUX86State *env = cpu->env_ptr;
X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
struct whpx_register_set vcxt;
uint64_t tpr, apic_base;
HRESULT hr;
@@ -1400,7 +1400,7 @@
{
if (cpu->vcpu_dirty) {
/* The CPU registers have been modified by other parts of QEMU. */
- CPUArchState *env = (CPUArchState *)(cpu->env_ptr);
+ CPUArchState *env = cpu_env(cpu);
return env->eip;
} else if (exit_context_valid) {
/*
@@ -1439,7 +1439,7 @@
static int whpx_handle_halt(CPUState *cpu)
{
- CPUX86State *env = cpu->env_ptr;
+ CPUX86State *env = cpu_env(cpu);
int ret = 0;
qemu_mutex_lock_iothread();
@@ -1460,8 +1460,8 @@
HRESULT hr;
struct whpx_state *whpx = &whpx_global;
AccelCPUState *vcpu = cpu->accel;
- CPUX86State *env = cpu->env_ptr;
X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
int irq;
uint8_t tpr;
WHV_X64_PENDING_INTERRUPTION_REGISTER new_int;
@@ -1582,8 +1582,8 @@
static void whpx_vcpu_post_run(CPUState *cpu)
{
AccelCPUState *vcpu = cpu->accel;
- CPUX86State *env = cpu->env_ptr;
X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
env->eflags = vcpu->exit_ctx.VpContext.Rflags;
@@ -1606,8 +1606,8 @@
static void whpx_vcpu_process_async_events(CPUState *cpu)
{
- CPUX86State *env = cpu->env_ptr;
X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
AccelCPUState *vcpu = cpu->accel;
if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
@@ -2147,8 +2147,8 @@
struct whpx_state *whpx = &whpx_global;
AccelCPUState *vcpu = NULL;
Error *local_error = NULL;
- CPUX86State *env = cpu->env_ptr;
X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
UINT64 freq = 0;
int ret;
@@ -2245,7 +2245,7 @@
cpu->vcpu_dirty = true;
cpu->accel = vcpu;
max_vcpu_index = max(max_vcpu_index, cpu->cpu_index);
- qemu_add_vm_change_state_handler(whpx_cpu_update_state, cpu->env_ptr);
+ qemu_add_vm_change_state_handler(whpx_cpu_update_state, env);
return 0;
diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c
index fc7f70f..2bea7ca 100644
--- a/target/loongarch/cpu.c
+++ b/target/loongarch/cpu.c
@@ -618,17 +618,15 @@
static void loongarch_cpu_init(Object *obj)
{
- LoongArchCPU *cpu = LOONGARCH_CPU(obj);
-
- cpu_set_cpustate_pointers(cpu);
-
#ifndef CONFIG_USER_ONLY
+ LoongArchCPU *cpu = LOONGARCH_CPU(obj);
CPULoongArchState *env = &cpu->env;
+
qdev_init_gpio_in(DEVICE(cpu), loongarch_cpu_set_irq, N_IRQS);
timer_init_ns(&cpu->timer, QEMU_CLOCK_VIRTUAL,
&loongarch_constant_timer_cb, cpu);
memory_region_init_io(&env->system_iocsr, OBJECT(cpu), NULL,
- env, "iocsr", UINT64_MAX);
+ env, "iocsr", UINT64_MAX);
address_space_init(&env->address_space_iocsr, &env->system_iocsr, "IOCSR");
memory_region_init_io(&env->iocsr_mem, OBJECT(cpu), &loongarch_qemu_ops,
NULL, "iocsr_misc", 0x428);
@@ -808,6 +806,7 @@
.name = TYPE_LOONGARCH_CPU,
.parent = TYPE_CPU,
.instance_size = sizeof(LoongArchCPU),
+ .instance_align = __alignof(LoongArchCPU),
.instance_init = loongarch_cpu_init,
.abstract = true,
diff --git a/target/loongarch/cpu.h b/target/loongarch/cpu.h
index f125a8e..40e70a8 100644
--- a/target/loongarch/cpu.h
+++ b/target/loongarch/cpu.h
@@ -375,7 +375,6 @@
CPUState parent_obj;
/*< public >*/
- CPUNegativeOffsetState neg;
CPULoongArchState env;
QEMUTimer timer;
uint32_t phy_id;
diff --git a/target/loongarch/insn_trans/trans_atomic.c.inc b/target/loongarch/insn_trans/trans_atomic.c.inc
index 4008519..80c2e28 100644
--- a/target/loongarch/insn_trans/trans_atomic.c.inc
+++ b/target/loongarch/insn_trans/trans_atomic.c.inc
@@ -10,8 +10,8 @@
TCGv t0 = make_address_i(ctx, src1, a->imm);
tcg_gen_qemu_ld_i64(dest, t0, ctx->mem_idx, mop);
- tcg_gen_st_tl(t0, cpu_env, offsetof(CPULoongArchState, lladdr));
- tcg_gen_st_tl(dest, cpu_env, offsetof(CPULoongArchState, llval));
+ tcg_gen_st_tl(t0, tcg_env, offsetof(CPULoongArchState, lladdr));
+ tcg_gen_st_tl(dest, tcg_env, offsetof(CPULoongArchState, llval));
gen_set_gpr(a->rd, dest, EXT_NONE);
return true;
diff --git a/target/loongarch/insn_trans/trans_branch.c.inc b/target/loongarch/insn_trans/trans_branch.c.inc
index a4fd209..221e515 100644
--- a/target/loongarch/insn_trans/trans_branch.c.inc
+++ b/target/loongarch/insn_trans/trans_branch.c.inc
@@ -66,7 +66,7 @@
TCGv src1 = tcg_temp_new();
TCGv src2 = tcg_constant_tl(0);
- tcg_gen_ld8u_tl(src1, cpu_env,
+ tcg_gen_ld8u_tl(src1, tcg_env,
offsetof(CPULoongArchState, cf[a->cj]));
gen_bc(ctx, src1, src2, a->offs, cond);
return true;
diff --git a/target/loongarch/insn_trans/trans_extra.c.inc b/target/loongarch/insn_trans/trans_extra.c.inc
index dd5d02e..cfa361f 100644
--- a/target/loongarch/insn_trans/trans_extra.c.inc
+++ b/target/loongarch/insn_trans/trans_extra.c.inc
@@ -24,7 +24,7 @@
return false;
}
- gen_helper_asrtle_d(cpu_env, src1, src2);
+ gen_helper_asrtle_d(tcg_env, src1, src2);
return true;
}
@@ -37,7 +37,7 @@
return false;
}
- gen_helper_asrtgt_d(cpu_env, src1, src2);
+ gen_helper_asrtgt_d(tcg_env, src1, src2);
return true;
}
@@ -48,11 +48,11 @@
TCGv dst2 = gpr_dst(ctx, a->rj, EXT_NONE);
translator_io_start(&ctx->base);
- gen_helper_rdtime_d(dst1, cpu_env);
+ gen_helper_rdtime_d(dst1, tcg_env);
if (word) {
tcg_gen_sextract_tl(dst1, dst1, high ? 32 : 0, 32);
}
- tcg_gen_ld_i64(dst2, cpu_env, offsetof(CPULoongArchState, CSR_TID));
+ tcg_gen_ld_i64(dst2, tcg_env, offsetof(CPULoongArchState, CSR_TID));
return true;
}
@@ -77,7 +77,7 @@
TCGv dest = gpr_dst(ctx, a->rd, EXT_NONE);
TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE);
- gen_helper_cpucfg(dest, cpu_env, src1);
+ gen_helper_cpucfg(dest, tcg_env, src1);
gen_set_gpr(a->rd, dest, EXT_NONE);
return true;
diff --git a/target/loongarch/insn_trans/trans_farith.c.inc b/target/loongarch/insn_trans/trans_farith.c.inc
index a7ced99..f4a0dea 100644
--- a/target/loongarch/insn_trans/trans_farith.c.inc
+++ b/target/loongarch/insn_trans/trans_farith.c.inc
@@ -23,7 +23,7 @@
CHECK_FPE;
- func(dest, cpu_env, src1, src2);
+ func(dest, tcg_env, src1, src2);
set_fpr(a->fd, dest);
return true;
@@ -37,7 +37,7 @@
CHECK_FPE;
- func(dest, cpu_env, src);
+ func(dest, tcg_env, src);
set_fpr(a->fd, dest);
return true;
@@ -55,7 +55,7 @@
CHECK_FPE;
- func(dest, cpu_env, src1, src2, src3, tflag);
+ func(dest, tcg_env, src1, src2, src3, tflag);
set_fpr(a->fd, dest);
return true;
diff --git a/target/loongarch/insn_trans/trans_fcmp.c.inc b/target/loongarch/insn_trans/trans_fcmp.c.inc
index 43d5866..3babf69 100644
--- a/target/loongarch/insn_trans/trans_fcmp.c.inc
+++ b/target/loongarch/insn_trans/trans_fcmp.c.inc
@@ -41,9 +41,9 @@
fn = (a->fcond & 1 ? gen_helper_fcmp_s_s : gen_helper_fcmp_c_s);
flags = get_fcmp_flags(a->fcond >> 1);
- fn(var, cpu_env, src1, src2, tcg_constant_i32(flags));
+ fn(var, tcg_env, src1, src2, tcg_constant_i32(flags));
- tcg_gen_st8_tl(var, cpu_env, offsetof(CPULoongArchState, cf[a->cd]));
+ tcg_gen_st8_tl(var, tcg_env, offsetof(CPULoongArchState, cf[a->cd]));
return true;
}
@@ -65,8 +65,8 @@
fn = (a->fcond & 1 ? gen_helper_fcmp_s_d : gen_helper_fcmp_c_d);
flags = get_fcmp_flags(a->fcond >> 1);
- fn(var, cpu_env, src1, src2, tcg_constant_i32(flags));
+ fn(var, tcg_env, src1, src2, tcg_constant_i32(flags));
- tcg_gen_st8_tl(var, cpu_env, offsetof(CPULoongArchState, cf[a->cd]));
+ tcg_gen_st8_tl(var, tcg_env, offsetof(CPULoongArchState, cf[a->cd]));
return true;
}
diff --git a/target/loongarch/insn_trans/trans_fmemory.c.inc b/target/loongarch/insn_trans/trans_fmemory.c.inc
index 5ddb8a4..13452bc 100644
--- a/target/loongarch/insn_trans/trans_fmemory.c.inc
+++ b/target/loongarch/insn_trans/trans_fmemory.c.inc
@@ -81,7 +81,7 @@
CHECK_FPE;
- gen_helper_asrtgt_d(cpu_env, src1, src2);
+ gen_helper_asrtgt_d(tcg_env, src1, src2);
addr = make_address_x(ctx, src1, src2);
tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, mop);
maybe_nanbox_load(dest, mop);
@@ -99,7 +99,7 @@
CHECK_FPE;
- gen_helper_asrtgt_d(cpu_env, src1, src2);
+ gen_helper_asrtgt_d(tcg_env, src1, src2);
addr = make_address_x(ctx, src1, src2);
tcg_gen_qemu_st_tl(src3, addr, ctx->mem_idx, mop);
@@ -115,7 +115,7 @@
CHECK_FPE;
- gen_helper_asrtle_d(cpu_env, src1, src2);
+ gen_helper_asrtle_d(tcg_env, src1, src2);
addr = make_address_x(ctx, src1, src2);
tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, mop);
maybe_nanbox_load(dest, mop);
@@ -133,7 +133,7 @@
CHECK_FPE;
- gen_helper_asrtle_d(cpu_env, src1, src2);
+ gen_helper_asrtle_d(tcg_env, src1, src2);
addr = make_address_x(ctx, src1, src2);
tcg_gen_qemu_st_tl(src3, addr, ctx->mem_idx, mop);
diff --git a/target/loongarch/insn_trans/trans_fmov.c.inc b/target/loongarch/insn_trans/trans_fmov.c.inc
index 928e127..5cbd9d3 100644
--- a/target/loongarch/insn_trans/trans_fmov.c.inc
+++ b/target/loongarch/insn_trans/trans_fmov.c.inc
@@ -22,7 +22,7 @@
CHECK_FPE;
cond = tcg_temp_new();
- tcg_gen_ld8u_tl(cond, cpu_env, offsetof(CPULoongArchState, cf[a->ca]));
+ tcg_gen_ld8u_tl(cond, tcg_env, offsetof(CPULoongArchState, cf[a->ca]));
tcg_gen_movcond_tl(TCG_COND_EQ, dest, cond, zero, src1, src2);
set_fpr(a->fd, dest);
@@ -94,17 +94,17 @@
CHECK_FPE;
if (mask == UINT32_MAX) {
- tcg_gen_st32_i64(Rj, cpu_env, offsetof(CPULoongArchState, fcsr0));
+ tcg_gen_st32_i64(Rj, tcg_env, offsetof(CPULoongArchState, fcsr0));
} else {
TCGv_i32 fcsr0 = tcg_temp_new_i32();
TCGv_i32 temp = tcg_temp_new_i32();
- tcg_gen_ld_i32(fcsr0, cpu_env, offsetof(CPULoongArchState, fcsr0));
+ tcg_gen_ld_i32(fcsr0, tcg_env, offsetof(CPULoongArchState, fcsr0));
tcg_gen_extrl_i64_i32(temp, Rj);
tcg_gen_andi_i32(temp, temp, mask);
tcg_gen_andi_i32(fcsr0, fcsr0, ~mask);
tcg_gen_or_i32(fcsr0, fcsr0, temp);
- tcg_gen_st_i32(fcsr0, cpu_env, offsetof(CPULoongArchState, fcsr0));
+ tcg_gen_st_i32(fcsr0, tcg_env, offsetof(CPULoongArchState, fcsr0));
}
/*
@@ -112,7 +112,7 @@
* Note that FCSR3 is exactly the rounding mode field.
*/
if (mask & FCSR0_M3) {
- gen_helper_set_rounding_mode(cpu_env);
+ gen_helper_set_rounding_mode(tcg_env);
}
return true;
}
@@ -127,7 +127,7 @@
CHECK_FPE;
- tcg_gen_ld32u_i64(dest, cpu_env, offsetof(CPULoongArchState, fcsr0));
+ tcg_gen_ld32u_i64(dest, tcg_env, offsetof(CPULoongArchState, fcsr0));
tcg_gen_andi_i64(dest, dest, fcsr_mask[a->fcsrs]);
gen_set_gpr(a->rd, dest, EXT_NONE);
@@ -162,7 +162,7 @@
t0 = tcg_temp_new();
tcg_gen_andi_tl(t0, src, 0x1);
- tcg_gen_st8_tl(t0, cpu_env, offsetof(CPULoongArchState, cf[a->cd & 0x7]));
+ tcg_gen_st8_tl(t0, tcg_env, offsetof(CPULoongArchState, cf[a->cd & 0x7]));
return true;
}
@@ -177,7 +177,7 @@
CHECK_FPE;
- tcg_gen_ld8u_tl(dest, cpu_env,
+ tcg_gen_ld8u_tl(dest, tcg_env,
offsetof(CPULoongArchState, cf[a->cj & 0x7]));
set_fpr(a->fd, dest);
@@ -196,7 +196,7 @@
t0 = tcg_temp_new();
tcg_gen_andi_tl(t0, gpr_src(ctx, a->rj, EXT_NONE), 0x1);
- tcg_gen_st8_tl(t0, cpu_env, offsetof(CPULoongArchState, cf[a->cd & 0x7]));
+ tcg_gen_st8_tl(t0, tcg_env, offsetof(CPULoongArchState, cf[a->cd & 0x7]));
return true;
}
@@ -209,7 +209,7 @@
CHECK_FPE;
- tcg_gen_ld8u_tl(gpr_dst(ctx, a->rd, EXT_NONE), cpu_env,
+ tcg_gen_ld8u_tl(gpr_dst(ctx, a->rd, EXT_NONE), tcg_env,
offsetof(CPULoongArchState, cf[a->cj & 0x7]));
return true;
}
diff --git a/target/loongarch/insn_trans/trans_memory.c.inc b/target/loongarch/insn_trans/trans_memory.c.inc
index d9d0622..c3de140 100644
--- a/target/loongarch/insn_trans/trans_memory.c.inc
+++ b/target/loongarch/insn_trans/trans_memory.c.inc
@@ -57,7 +57,7 @@
TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE);
TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE);
- gen_helper_asrtgt_d(cpu_env, src1, src2);
+ gen_helper_asrtgt_d(tcg_env, src1, src2);
src1 = make_address_i(ctx, src1, 0);
tcg_gen_qemu_ld_tl(dest, src1, ctx->mem_idx, mop);
gen_set_gpr(a->rd, dest, EXT_NONE);
@@ -71,7 +71,7 @@
TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE);
TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE);
- gen_helper_asrtle_d(cpu_env, src1, src2);
+ gen_helper_asrtle_d(tcg_env, src1, src2);
src1 = make_address_i(ctx, src1, 0);
tcg_gen_qemu_ld_tl(dest, src1, ctx->mem_idx, mop);
gen_set_gpr(a->rd, dest, EXT_NONE);
@@ -85,7 +85,7 @@
TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE);
TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE);
- gen_helper_asrtgt_d(cpu_env, src1, src2);
+ gen_helper_asrtgt_d(tcg_env, src1, src2);
src1 = make_address_i(ctx, src1, 0);
tcg_gen_qemu_st_tl(data, src1, ctx->mem_idx, mop);
@@ -98,7 +98,7 @@
TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE);
TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE);
- gen_helper_asrtle_d(cpu_env, src1, src2);
+ gen_helper_asrtle_d(tcg_env, src1, src2);
src1 = make_address_i(ctx, src1, 0);
tcg_gen_qemu_st_tl(data, src1, ctx->mem_idx, mop);
diff --git a/target/loongarch/insn_trans/trans_privileged.c.inc b/target/loongarch/insn_trans/trans_privileged.c.inc
index 4cb701b..01d4572 100644
--- a/target/loongarch/insn_trans/trans_privileged.c.inc
+++ b/target/loongarch/insn_trans/trans_privileged.c.inc
@@ -203,9 +203,9 @@
check_csr_flags(ctx, csr, false);
dest = gpr_dst(ctx, a->rd, EXT_NONE);
if (csr->readfn) {
- csr->readfn(dest, cpu_env);
+ csr->readfn(dest, tcg_env);
} else {
- tcg_gen_ld_tl(dest, cpu_env, csr->offset);
+ tcg_gen_ld_tl(dest, tcg_env, csr->offset);
}
}
gen_set_gpr(a->rd, dest, EXT_NONE);
@@ -233,11 +233,11 @@
src1 = gpr_src(ctx, a->rd, EXT_NONE);
if (csr->writefn) {
dest = gpr_dst(ctx, a->rd, EXT_NONE);
- csr->writefn(dest, cpu_env, src1);
+ csr->writefn(dest, tcg_env, src1);
} else {
dest = tcg_temp_new();
- tcg_gen_ld_tl(dest, cpu_env, csr->offset);
- tcg_gen_st_tl(src1, cpu_env, csr->offset);
+ tcg_gen_ld_tl(dest, tcg_env, csr->offset);
+ tcg_gen_st_tl(src1, tcg_env, csr->offset);
}
gen_set_gpr(a->rd, dest, EXT_NONE);
return true;
@@ -272,15 +272,15 @@
newv = tcg_temp_new();
temp = tcg_temp_new();
- tcg_gen_ld_tl(oldv, cpu_env, csr->offset);
+ tcg_gen_ld_tl(oldv, tcg_env, csr->offset);
tcg_gen_and_tl(newv, src1, mask);
tcg_gen_andc_tl(temp, oldv, mask);
tcg_gen_or_tl(newv, newv, temp);
if (csr->writefn) {
- csr->writefn(oldv, cpu_env, newv);
+ csr->writefn(oldv, tcg_env, newv);
} else {
- tcg_gen_st_tl(newv, cpu_env, csr->offset);
+ tcg_gen_st_tl(newv, tcg_env, csr->offset);
}
gen_set_gpr(a->rd, oldv, EXT_NONE);
return true;
@@ -295,7 +295,7 @@
if (check_plv(ctx)) {
return false;
}
- func(dest, cpu_env, src1);
+ func(dest, tcg_env, src1);
return true;
}
@@ -308,7 +308,7 @@
if (check_plv(ctx)) {
return false;
}
- func(cpu_env, addr, val);
+ func(tcg_env, addr, val);
return true;
}
@@ -334,7 +334,7 @@
if (check_plv(ctx)) {
return false;
}
- gen_helper_tlbsrch(cpu_env);
+ gen_helper_tlbsrch(tcg_env);
return true;
}
@@ -343,7 +343,7 @@
if (check_plv(ctx)) {
return false;
}
- gen_helper_tlbrd(cpu_env);
+ gen_helper_tlbrd(tcg_env);
return true;
}
@@ -352,7 +352,7 @@
if (check_plv(ctx)) {
return false;
}
- gen_helper_tlbwr(cpu_env);
+ gen_helper_tlbwr(tcg_env);
check_mmu_idx(ctx);
return true;
}
@@ -362,7 +362,7 @@
if (check_plv(ctx)) {
return false;
}
- gen_helper_tlbfill(cpu_env);
+ gen_helper_tlbfill(tcg_env);
check_mmu_idx(ctx);
return true;
}
@@ -372,7 +372,7 @@
if (check_plv(ctx)) {
return false;
}
- gen_helper_tlbclr(cpu_env);
+ gen_helper_tlbclr(tcg_env);
check_mmu_idx(ctx);
return true;
}
@@ -382,7 +382,7 @@
if (check_plv(ctx)) {
return false;
}
- gen_helper_tlbflush(cpu_env);
+ gen_helper_tlbflush(tcg_env);
check_mmu_idx(ctx);
return true;
}
@@ -399,22 +399,22 @@
switch (a->imm) {
case 0:
case 1:
- gen_helper_invtlb_all(cpu_env);
+ gen_helper_invtlb_all(tcg_env);
break;
case 2:
- gen_helper_invtlb_all_g(cpu_env, tcg_constant_i32(1));
+ gen_helper_invtlb_all_g(tcg_env, tcg_constant_i32(1));
break;
case 3:
- gen_helper_invtlb_all_g(cpu_env, tcg_constant_i32(0));
+ gen_helper_invtlb_all_g(tcg_env, tcg_constant_i32(0));
break;
case 4:
- gen_helper_invtlb_all_asid(cpu_env, rj);
+ gen_helper_invtlb_all_asid(tcg_env, rj);
break;
case 5:
- gen_helper_invtlb_page_asid(cpu_env, rj, rk);
+ gen_helper_invtlb_page_asid(tcg_env, rj, rk);
break;
case 6:
- gen_helper_invtlb_page_asid_or_g(cpu_env, rj, rk);
+ gen_helper_invtlb_page_asid_or_g(tcg_env, rj, rk);
break;
default:
return false;
@@ -444,7 +444,7 @@
if (check_plv(ctx)) {
return false;
}
- gen_helper_ldpte(cpu_env, src1, tcg_constant_tl(a->imm), mem_idx);
+ gen_helper_ldpte(tcg_env, src1, tcg_constant_tl(a->imm), mem_idx);
return true;
}
@@ -461,7 +461,7 @@
if (check_plv(ctx)) {
return false;
}
- gen_helper_lddir(dest, cpu_env, src, tcg_constant_tl(a->imm), mem_idx);
+ gen_helper_lddir(dest, tcg_env, src, tcg_constant_tl(a->imm), mem_idx);
return true;
}
@@ -470,7 +470,7 @@
if (check_plv(ctx)) {
return false;
}
- gen_helper_ertn(cpu_env);
+ gen_helper_ertn(tcg_env);
ctx->base.is_jmp = DISAS_EXIT;
return true;
}
@@ -491,7 +491,7 @@
}
tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next + 4);
- gen_helper_idle(cpu_env);
+ gen_helper_idle(tcg_env);
ctx->base.is_jmp = DISAS_NORETURN;
return true;
}
diff --git a/target/loongarch/insn_trans/trans_vec.c.inc b/target/loongarch/insn_trans/trans_vec.c.inc
index c647137..98f856b 100644
--- a/target/loongarch/insn_trans/trans_vec.c.inc
+++ b/target/loongarch/insn_trans/trans_vec.c.inc
@@ -41,7 +41,7 @@
vec_full_offset(a->vj),
vec_full_offset(a->vk),
vec_full_offset(a->va),
- cpu_env,
+ tcg_env,
oprsz, ctx->vl / 8, 0, fn);
return true;
}
@@ -94,7 +94,7 @@
tcg_gen_gvec_3_ptr(vec_full_offset(a->vd),
vec_full_offset(a->vj),
vec_full_offset(a->vk),
- cpu_env,
+ tcg_env,
oprsz, ctx->vl / 8, 0, fn);
return true;
}
@@ -144,7 +144,7 @@
tcg_gen_gvec_2_ptr(vec_full_offset(a->vd),
vec_full_offset(a->vj),
- cpu_env,
+ tcg_env,
oprsz, ctx->vl / 8, 0, fn);
return true;
}
@@ -219,7 +219,7 @@
TCGv_i32 cd = tcg_constant_i32(a->cd);
TCGv_i32 oprsz = tcg_constant_i32(sz);
- func(cpu_env, oprsz, cd, vj);
+ func(tcg_env, oprsz, cd, vj);
return true;
}
@@ -4679,7 +4679,7 @@
fn = (a->fcond & 1 ? gen_helper_vfcmp_s_s : gen_helper_vfcmp_c_s);
flags = get_fcmp_flags(a->fcond >> 1);
- fn(cpu_env, oprsz, vd, vj, vk, tcg_constant_i32(flags));
+ fn(tcg_env, oprsz, vd, vj, vk, tcg_constant_i32(flags));
return true;
}
@@ -4699,7 +4699,7 @@
fn = (a->fcond & 1 ? gen_helper_vfcmp_s_d : gen_helper_vfcmp_c_d);
flags = get_fcmp_flags(a->fcond >> 1);
- fn(cpu_env, oprsz, vd, vj, vk, tcg_constant_i32(flags));
+ fn(tcg_env, oprsz, vd, vj, vk, tcg_constant_i32(flags));
return true;
}
@@ -4772,7 +4772,7 @@
\
tcg_gen_or_i64(t1, al, ah); \
tcg_gen_setcondi_i64(COND, t1, t1, 0); \
- tcg_gen_st8_tl(t1, cpu_env, offsetof(CPULoongArchState, cf[a->cd & 0x7])); \
+ tcg_gen_st8_tl(t1, tcg_env, offsetof(CPULoongArchState, cf[a->cd & 0x7])); \
\
return true; \
}
@@ -4818,7 +4818,7 @@
tcg_gen_or_i64(t2, d[2], d[3]); \
tcg_gen_or_i64(t1, t2, t1); \
tcg_gen_setcondi_i64(COND, t1, t1, 0); \
- tcg_gen_st8_tl(t1, cpu_env, offsetof(CPULoongArchState, cf[a->cd & 0x7])); \
+ tcg_gen_st8_tl(t1, tcg_env, offsetof(CPULoongArchState, cf[a->cd & 0x7])); \
\
return true; \
}
@@ -4844,7 +4844,7 @@
return true;
}
- func(src, cpu_env, vec_reg_offset(a->vd, a->imm, mop));
+ func(src, tcg_env, vec_reg_offset(a->vd, a->imm, mop));
return true;
}
@@ -4877,7 +4877,7 @@
return true;
}
- func(dst, cpu_env, vec_reg_offset(a->vj, a->imm, mop));
+ func(dst, tcg_env, vec_reg_offset(a->vj, a->imm, mop));
return true;
}
@@ -5026,7 +5026,7 @@
}
tcg_gen_trunc_i64_ptr(t1, t0);
- tcg_gen_add_ptr(t1, t1, cpu_env);
+ tcg_gen_add_ptr(t1, t1, tcg_env);
for (i = 0; i < oprsz; i += 16) {
func(t2, t1, vec_full_offset(a->vj) + i);
@@ -5422,7 +5422,7 @@
val = tcg_temp_new_i64();
addr = make_address_i(ctx, addr, a->imm);
- tcg_gen_ld_i64(val, cpu_env, vec_reg_offset(a->vd, a->imm2, mop));
+ tcg_gen_ld_i64(val, tcg_env, vec_reg_offset(a->vd, a->imm2, mop));
tcg_gen_qemu_st_i64(val, addr, ctx->mem_idx, mop);
return true;
}
diff --git a/target/loongarch/translate.c b/target/loongarch/translate.c
index f6038fc..21f4db6 100644
--- a/target/loongarch/translate.c
+++ b/target/loongarch/translate.c
@@ -51,13 +51,13 @@
static inline void get_vreg64(TCGv_i64 dest, int regno, int index)
{
- tcg_gen_ld_i64(dest, cpu_env,
+ tcg_gen_ld_i64(dest, tcg_env,
offsetof(CPULoongArchState, fpr[regno].vreg.D(index)));
}
static inline void set_vreg64(TCGv_i64 src, int regno, int index)
{
- tcg_gen_st_i64(src, cpu_env,
+ tcg_gen_st_i64(src, tcg_env,
offsetof(CPULoongArchState, fpr[regno].vreg.D(index)));
}
@@ -93,7 +93,7 @@
void generate_exception(DisasContext *ctx, int excp)
{
tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
- gen_helper_raise_exception(cpu_env, tcg_constant_i32(excp));
+ gen_helper_raise_exception(tcg_env, tcg_constant_i32(excp));
ctx->base.is_jmp = DISAS_NORETURN;
}
@@ -117,7 +117,7 @@
CPUState *cs)
{
int64_t bound;
- CPULoongArchState *env = cs->env_ptr;
+ CPULoongArchState *env = cpu_env(cs);
DisasContext *ctx = container_of(dcbase, DisasContext, base);
ctx->page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
@@ -221,14 +221,14 @@
static TCGv get_fpr(DisasContext *ctx, int reg_num)
{
TCGv t = tcg_temp_new();
- tcg_gen_ld_i64(t, cpu_env,
+ tcg_gen_ld_i64(t, tcg_env,
offsetof(CPULoongArchState, fpr[reg_num].vreg.D(0)));
return t;
}
static void set_fpr(int reg_num, TCGv val)
{
- tcg_gen_st_i64(val, cpu_env,
+ tcg_gen_st_i64(val, tcg_env,
offsetof(CPULoongArchState, fpr[reg_num].vreg.D(0)));
}
@@ -282,7 +282,7 @@
static void loongarch_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{
- CPULoongArchState *env = cs->env_ptr;
+ CPULoongArchState *env = cpu_env(cs);
DisasContext *ctx = container_of(dcbase, DisasContext, base);
ctx->opcode = translator_ldl(env, &ctx->base, ctx->base.pc_next);
@@ -357,14 +357,14 @@
cpu_gpr[0] = NULL;
for (i = 1; i < 32; i++) {
- cpu_gpr[i] = tcg_global_mem_new(cpu_env,
+ cpu_gpr[i] = tcg_global_mem_new(tcg_env,
offsetof(CPULoongArchState, gpr[i]),
regnames[i]);
}
- cpu_pc = tcg_global_mem_new(cpu_env, offsetof(CPULoongArchState, pc), "pc");
- cpu_lladdr = tcg_global_mem_new(cpu_env,
+ cpu_pc = tcg_global_mem_new(tcg_env, offsetof(CPULoongArchState, pc), "pc");
+ cpu_lladdr = tcg_global_mem_new(tcg_env,
offsetof(CPULoongArchState, lladdr), "lladdr");
- cpu_llval = tcg_global_mem_new(cpu_env,
+ cpu_llval = tcg_global_mem_new(tcg_env,
offsetof(CPULoongArchState, llval), "llval");
}
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
index 70d5847..538d947 100644
--- a/target/m68k/cpu.c
+++ b/target/m68k/cpu.c
@@ -327,13 +327,6 @@
mcc->parent_realize(dev, errp);
}
-static void m68k_cpu_initfn(Object *obj)
-{
- M68kCPU *cpu = M68K_CPU(obj);
-
- cpu_set_cpustate_pointers(cpu);
-}
-
#if !defined(CONFIG_USER_ONLY)
static bool fpu_needed(void *opaque)
{
@@ -611,7 +604,7 @@
.name = TYPE_M68K_CPU,
.parent = TYPE_CPU,
.instance_size = sizeof(M68kCPU),
- .instance_init = m68k_cpu_initfn,
+ .instance_align = __alignof(M68kCPU),
.abstract = true,
.class_size = sizeof(M68kCPUClass),
.class_init = m68k_cpu_class_init,
diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h
index cf70282..20afb0c 100644
--- a/target/m68k/cpu.h
+++ b/target/m68k/cpu.h
@@ -168,7 +168,6 @@
CPUState parent_obj;
/*< public >*/
- CPUNegativeOffsetState neg;
CPUM68KState env;
};
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
index 9e224fe..4d0110d 100644
--- a/target/m68k/translate.c
+++ b/target/m68k/translate.c
@@ -70,19 +70,19 @@
int i;
#define DEFO32(name, offset) \
- QREG_##name = tcg_global_mem_new_i32(cpu_env, \
+ QREG_##name = tcg_global_mem_new_i32(tcg_env, \
offsetof(CPUM68KState, offset), #name);
#define DEFO64(name, offset) \
- QREG_##name = tcg_global_mem_new_i64(cpu_env, \
+ QREG_##name = tcg_global_mem_new_i64(tcg_env, \
offsetof(CPUM68KState, offset), #name);
#include "qregs.h.inc"
#undef DEFO32
#undef DEFO64
- cpu_halted = tcg_global_mem_new_i32(cpu_env,
+ cpu_halted = tcg_global_mem_new_i32(tcg_env,
-offsetof(M68kCPU, env) +
offsetof(CPUState, halted), "HALTED");
- cpu_exception_index = tcg_global_mem_new_i32(cpu_env,
+ cpu_exception_index = tcg_global_mem_new_i32(tcg_env,
-offsetof(M68kCPU, env) +
offsetof(CPUState, exception_index),
"EXCEPTION");
@@ -90,23 +90,23 @@
p = cpu_reg_names;
for (i = 0; i < 8; i++) {
sprintf(p, "D%d", i);
- cpu_dregs[i] = tcg_global_mem_new(cpu_env,
+ cpu_dregs[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUM68KState, dregs[i]), p);
p += 3;
sprintf(p, "A%d", i);
- cpu_aregs[i] = tcg_global_mem_new(cpu_env,
+ cpu_aregs[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUM68KState, aregs[i]), p);
p += 3;
}
for (i = 0; i < 4; i++) {
sprintf(p, "ACC%d", i);
- cpu_macc[i] = tcg_global_mem_new_i64(cpu_env,
+ cpu_macc[i] = tcg_global_mem_new_i64(tcg_env,
offsetof(CPUM68KState, macc[i]), p);
p += 5;
}
- NULL_QREG = tcg_global_mem_new(cpu_env, -4, "NULL");
- store_dummy = tcg_global_mem_new(cpu_env, -8, "NULL");
+ NULL_QREG = tcg_global_mem_new(tcg_env, -4, "NULL");
+ store_dummy = tcg_global_mem_new(tcg_env, -8, "NULL");
}
/* internal defines */
@@ -264,7 +264,7 @@
static void gen_raise_exception(int nr)
{
- gen_helper_raise_exception(cpu_env, tcg_constant_i32(nr));
+ gen_helper_raise_exception(tcg_env, tcg_constant_i32(nr));
}
static void gen_raise_exception_format2(DisasContext *s, int nr,
@@ -276,7 +276,7 @@
* Re-use mmu.ar for the purpose, since that's only valid
* after tlb_fill.
*/
- tcg_gen_st_i32(tcg_constant_i32(this_pc), cpu_env,
+ tcg_gen_st_i32(tcg_constant_i32(this_pc), tcg_env,
offsetof(CPUM68KState, mmu.ar));
gen_raise_exception(nr);
s->base.is_jmp = DISAS_NORETURN;
@@ -602,12 +602,12 @@
break;
case CC_OP_DYNAMIC:
- gen_helper_flush_flags(cpu_env, QREG_CC_OP);
+ gen_helper_flush_flags(tcg_env, QREG_CC_OP);
s->cc_op_synced = 1;
break;
default:
- gen_helper_flush_flags(cpu_env, tcg_constant_i32(s->cc_op));
+ gen_helper_flush_flags(tcg_env, tcg_constant_i32(s->cc_op));
s->cc_op_synced = 1;
break;
}
@@ -824,7 +824,7 @@
reg = get_areg(s, reg0);
result = gen_ldst(s, opsize, reg, val, what, index);
if (what == EA_STORE || !addrp) {
- TCGv tmp = tcg_temp_new();
+ tmp = tcg_temp_new();
if (reg0 == 7 && opsize == OS_BYTE &&
m68k_feature(s->env, M68K_FEATURE_M68K)) {
tcg_gen_addi_i32(tmp, reg, 2);
@@ -916,14 +916,14 @@
static TCGv_ptr gen_fp_ptr(int freg)
{
TCGv_ptr fp = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fregs[freg]));
+ tcg_gen_addi_ptr(fp, tcg_env, offsetof(CPUM68KState, fregs[freg]));
return fp;
}
static TCGv_ptr gen_fp_result_ptr(void)
{
TCGv_ptr fp = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fp_result));
+ tcg_gen_addi_ptr(fp, tcg_env, offsetof(CPUM68KState, fp_result));
return fp;
}
@@ -954,15 +954,15 @@
case OS_WORD:
case OS_LONG:
tcg_gen_qemu_ld_tl(tmp, addr, index, opsize | MO_SIGN | MO_TE);
- gen_helper_exts32(cpu_env, fp, tmp);
+ gen_helper_exts32(tcg_env, fp, tmp);
break;
case OS_SINGLE:
tcg_gen_qemu_ld_tl(tmp, addr, index, MO_TEUL);
- gen_helper_extf32(cpu_env, fp, tmp);
+ gen_helper_extf32(tcg_env, fp, tmp);
break;
case OS_DOUBLE:
tcg_gen_qemu_ld_i64(t64, addr, index, MO_TEUQ);
- gen_helper_extf64(cpu_env, fp, t64);
+ gen_helper_extf64(tcg_env, fp, t64);
break;
case OS_EXTENDED:
if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
@@ -1000,15 +1000,15 @@
case OS_BYTE:
case OS_WORD:
case OS_LONG:
- gen_helper_reds32(tmp, cpu_env, fp);
+ gen_helper_reds32(tmp, tcg_env, fp);
tcg_gen_qemu_st_tl(tmp, addr, index, opsize | MO_TE);
break;
case OS_SINGLE:
- gen_helper_redf32(tmp, cpu_env, fp);
+ gen_helper_redf32(tmp, tcg_env, fp);
tcg_gen_qemu_st_tl(tmp, addr, index, MO_TEUL);
break;
case OS_DOUBLE:
- gen_helper_redf64(t64, cpu_env, fp);
+ gen_helper_redf64(t64, tcg_env, fp);
tcg_gen_qemu_st_i64(t64, addr, index, MO_TEUQ);
break;
case OS_EXTENDED:
@@ -1060,10 +1060,10 @@
case OS_BYTE:
case OS_WORD:
case OS_LONG:
- gen_helper_reds32(reg, cpu_env, fp);
+ gen_helper_reds32(reg, tcg_env, fp);
break;
case OS_SINGLE:
- gen_helper_redf32(reg, cpu_env, fp);
+ gen_helper_redf32(reg, tcg_env, fp);
break;
default:
g_assert_not_reached();
@@ -1073,17 +1073,17 @@
switch (opsize) {
case OS_BYTE:
tcg_gen_ext8s_i32(tmp, reg);
- gen_helper_exts32(cpu_env, fp, tmp);
+ gen_helper_exts32(tcg_env, fp, tmp);
break;
case OS_WORD:
tcg_gen_ext16s_i32(tmp, reg);
- gen_helper_exts32(cpu_env, fp, tmp);
+ gen_helper_exts32(tcg_env, fp, tmp);
break;
case OS_LONG:
- gen_helper_exts32(cpu_env, fp, reg);
+ gen_helper_exts32(tcg_env, fp, reg);
break;
case OS_SINGLE:
- gen_helper_extf32(cpu_env, fp, reg);
+ gen_helper_extf32(tcg_env, fp, reg);
break;
default:
g_assert_not_reached();
@@ -1132,23 +1132,23 @@
switch (opsize) {
case OS_BYTE:
tmp = tcg_constant_i32((int8_t)read_im8(env, s));
- gen_helper_exts32(cpu_env, fp, tmp);
+ gen_helper_exts32(tcg_env, fp, tmp);
break;
case OS_WORD:
tmp = tcg_constant_i32((int16_t)read_im16(env, s));
- gen_helper_exts32(cpu_env, fp, tmp);
+ gen_helper_exts32(tcg_env, fp, tmp);
break;
case OS_LONG:
tmp = tcg_constant_i32(read_im32(env, s));
- gen_helper_exts32(cpu_env, fp, tmp);
+ gen_helper_exts32(tcg_env, fp, tmp);
break;
case OS_SINGLE:
tmp = tcg_constant_i32(read_im32(env, s));
- gen_helper_extf32(cpu_env, fp, tmp);
+ gen_helper_extf32(tcg_env, fp, tmp);
break;
case OS_DOUBLE:
t64 = tcg_constant_i64(read_im64(env, s));
- gen_helper_extf64(cpu_env, fp, t64);
+ gen_helper_extf64(tcg_env, fp, t64);
break;
case OS_EXTENDED:
if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
@@ -1516,9 +1516,9 @@
destr = tcg_constant_i32(REG(insn, 9));
ilen = tcg_constant_i32(s->pc - s->base.pc_next);
if (sign) {
- gen_helper_divsw(cpu_env, destr, src, ilen);
+ gen_helper_divsw(tcg_env, destr, src, ilen);
} else {
- gen_helper_divuw(cpu_env, destr, src, ilen);
+ gen_helper_divuw(tcg_env, destr, src, ilen);
}
set_cc_op(s, CC_OP_FLAGS);
@@ -1547,9 +1547,9 @@
reg = tcg_constant_i32(REG(ext, 0));
ilen = tcg_constant_i32(s->pc - s->base.pc_next);
if (sign) {
- gen_helper_divsll(cpu_env, num, reg, den, ilen);
+ gen_helper_divsll(tcg_env, num, reg, den, ilen);
} else {
- gen_helper_divull(cpu_env, num, reg, den, ilen);
+ gen_helper_divull(tcg_env, num, reg, den, ilen);
}
set_cc_op(s, CC_OP_FLAGS);
return;
@@ -1563,9 +1563,9 @@
reg = tcg_constant_i32(REG(ext, 0));
ilen = tcg_constant_i32(s->pc - s->base.pc_next);
if (sign) {
- gen_helper_divsl(cpu_env, num, reg, den, ilen);
+ gen_helper_divsl(tcg_env, num, reg, den, ilen);
} else {
- gen_helper_divul(cpu_env, num, reg, den, ilen);
+ gen_helper_divul(tcg_env, num, reg, den, ilen);
}
set_cc_op(s, CC_OP_FLAGS);
@@ -2126,7 +2126,7 @@
update_cc_op(s);
dest = tcg_temp_new();
- gen_helper_get_ccr(dest, cpu_env);
+ gen_helper_get_ccr(dest, tcg_env);
return dest;
}
@@ -2153,7 +2153,7 @@
} else {
/* Must writeback before changing security state. */
do_writebacks(s);
- gen_helper_set_sr(cpu_env, tcg_constant_i32(val));
+ gen_helper_set_sr(tcg_env, tcg_constant_i32(val));
}
set_cc_op(s, CC_OP_FLAGS);
}
@@ -2161,11 +2161,11 @@
static void gen_set_sr(DisasContext *s, TCGv val, int ccr_only)
{
if (ccr_only) {
- gen_helper_set_ccr(cpu_env, val);
+ gen_helper_set_ccr(tcg_env, val);
} else {
/* Must writeback before changing security state. */
do_writebacks(s);
- gen_helper_set_sr(cpu_env, val);
+ gen_helper_set_sr(tcg_env, val);
}
set_cc_op(s, CC_OP_FLAGS);
}
@@ -2388,13 +2388,13 @@
*/
if (tb_cflags(s->base.tb) & CF_PARALLEL) {
- gen_helper_exit_atomic(cpu_env);
+ gen_helper_exit_atomic(tcg_env);
} else {
TCGv regs = tcg_constant_i32(REG(ext2, 6) |
(REG(ext1, 6) << 3) |
(REG(ext2, 0) << 6) |
(REG(ext1, 0) << 9));
- gen_helper_cas2w(cpu_env, regs, addr1, addr2);
+ gen_helper_cas2w(tcg_env, regs, addr1, addr2);
}
/* Note that cas2w also assigned to env->cc_op. */
@@ -2442,9 +2442,9 @@
(REG(ext2, 0) << 6) |
(REG(ext1, 0) << 9));
if (tb_cflags(s->base.tb) & CF_PARALLEL) {
- gen_helper_cas2l_parallel(cpu_env, regs, addr1, addr2);
+ gen_helper_cas2l_parallel(tcg_env, regs, addr1, addr2);
} else {
- gen_helper_cas2l(cpu_env, regs, addr1, addr2);
+ gen_helper_cas2l(tcg_env, regs, addr1, addr2);
}
/* Note that cas2l also assigned to env->cc_op. */
@@ -2837,7 +2837,7 @@
return;
}
- gen_helper_reset(cpu_env);
+ gen_helper_reset(tcg_env);
}
#endif
@@ -3971,11 +3971,11 @@
}
if (is_sign) {
- gen_helper_bfexts_mem(dest, cpu_env, addr, ofs, len);
+ gen_helper_bfexts_mem(dest, tcg_env, addr, ofs, len);
tcg_gen_mov_i32(QREG_CC_N, dest);
} else {
TCGv_i64 tmp = tcg_temp_new_i64();
- gen_helper_bfextu_mem(tmp, cpu_env, addr, ofs, len);
+ gen_helper_bfextu_mem(tmp, tcg_env, addr, ofs, len);
tcg_gen_extr_i64_i32(dest, QREG_CC_N, tmp);
}
set_cc_op(s, CC_OP_LOGIC);
@@ -4093,21 +4093,21 @@
switch (insn & 0x0f00) {
case 0x0a00: /* bfchg */
- gen_helper_bfchg_mem(QREG_CC_N, cpu_env, addr, ofs, len);
+ gen_helper_bfchg_mem(QREG_CC_N, tcg_env, addr, ofs, len);
break;
case 0x0c00: /* bfclr */
- gen_helper_bfclr_mem(QREG_CC_N, cpu_env, addr, ofs, len);
+ gen_helper_bfclr_mem(QREG_CC_N, tcg_env, addr, ofs, len);
break;
case 0x0d00: /* bfffo */
t64 = tcg_temp_new_i64();
- gen_helper_bfffo_mem(t64, cpu_env, addr, ofs, len);
+ gen_helper_bfffo_mem(t64, tcg_env, addr, ofs, len);
tcg_gen_extr_i64_i32(DREG(ext, 12), QREG_CC_N, t64);
break;
case 0x0e00: /* bfset */
- gen_helper_bfset_mem(QREG_CC_N, cpu_env, addr, ofs, len);
+ gen_helper_bfset_mem(QREG_CC_N, tcg_env, addr, ofs, len);
break;
case 0x0800: /* bftst */
- gen_helper_bfexts_mem(QREG_CC_N, cpu_env, addr, ofs, len);
+ gen_helper_bfexts_mem(QREG_CC_N, tcg_env, addr, ofs, len);
break;
default:
g_assert_not_reached();
@@ -4208,7 +4208,7 @@
ofs = tcg_constant_i32(extract32(ext, 6, 5));
}
- gen_helper_bfins_mem(QREG_CC_N, cpu_env, addr, src, ofs, len);
+ gen_helper_bfins_mem(QREG_CC_N, tcg_env, addr, src, ofs, len);
set_cc_op(s, CC_OP_LOGIC);
}
@@ -4243,7 +4243,7 @@
reg = gen_extend(s, DREG(insn, 9), opsize, 1);
gen_flush_flags(s);
- gen_helper_chk(cpu_env, reg, src);
+ gen_helper_chk(tcg_env, reg, src);
}
DISAS_INSN(chk2)
@@ -4288,7 +4288,7 @@
}
gen_flush_flags(s);
- gen_helper_chk2(cpu_env, reg, bound1, bound2);
+ gen_helper_chk2(tcg_env, reg, bound1, bound2);
}
static void m68k_copy_line(TCGv dst, TCGv src, int index)
@@ -4462,7 +4462,7 @@
gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
return;
}
- tcg_gen_ld_i32(AREG(insn, 0), cpu_env,
+ tcg_gen_ld_i32(AREG(insn, 0), tcg_env,
offsetof(CPUM68KState, sp[M68K_USP]));
}
@@ -4472,7 +4472,7 @@
gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
return;
}
- tcg_gen_st_i32(AREG(insn, 0), cpu_env,
+ tcg_gen_st_i32(AREG(insn, 0), tcg_env,
offsetof(CPUM68KState, sp[M68K_USP]));
}
@@ -4528,7 +4528,7 @@
} else {
reg = DREG(ext, 12);
}
- gen_helper_cf_movec_to(cpu_env, tcg_constant_i32(ext & 0xfff), reg);
+ gen_helper_cf_movec_to(tcg_env, tcg_constant_i32(ext & 0xfff), reg);
gen_exit_tb(s);
}
@@ -4551,9 +4551,9 @@
}
creg = tcg_constant_i32(ext & 0xfff);
if (insn & 1) {
- gen_helper_m68k_movec_to(cpu_env, creg, reg);
+ gen_helper_m68k_movec_to(tcg_env, creg, reg);
} else {
- gen_helper_m68k_movec_from(reg, cpu_env, creg);
+ gen_helper_m68k_movec_from(reg, tcg_env, creg);
}
gen_exit_tb(s);
}
@@ -4605,7 +4605,7 @@
}
opmode = tcg_constant_i32((insn >> 3) & 3);
- gen_helper_pflush(cpu_env, AREG(insn, 0), opmode);
+ gen_helper_pflush(tcg_env, AREG(insn, 0), opmode);
}
DISAS_INSN(ptest)
@@ -4617,7 +4617,7 @@
return;
}
is_read = tcg_constant_i32((insn >> 5) & 1);
- gen_helper_ptest(cpu_env, AREG(insn, 0), is_read);
+ gen_helper_ptest(tcg_env, AREG(insn, 0), is_read);
}
#endif
@@ -4703,10 +4703,10 @@
tcg_gen_movi_i32(res, 0);
break;
case M68K_FPSR:
- tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpsr));
+ tcg_gen_ld_i32(res, tcg_env, offsetof(CPUM68KState, fpsr));
break;
case M68K_FPCR:
- tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpcr));
+ tcg_gen_ld_i32(res, tcg_env, offsetof(CPUM68KState, fpcr));
break;
}
}
@@ -4717,10 +4717,10 @@
case M68K_FPIAR:
break;
case M68K_FPSR:
- tcg_gen_st_i32(val, cpu_env, offsetof(CPUM68KState, fpsr));
+ tcg_gen_st_i32(val, tcg_env, offsetof(CPUM68KState, fpsr));
break;
case M68K_FPCR:
- gen_helper_set_fpcr(cpu_env, val);
+ gen_helper_set_fpcr(tcg_env, val);
break;
}
}
@@ -4877,23 +4877,23 @@
* only available to store register to memory
*/
if (opsize == OS_EXTENDED) {
- gen_helper_fmovemx_st_predec(tmp, cpu_env, addr, tmp);
+ gen_helper_fmovemx_st_predec(tmp, tcg_env, addr, tmp);
} else {
- gen_helper_fmovemd_st_predec(tmp, cpu_env, addr, tmp);
+ gen_helper_fmovemd_st_predec(tmp, tcg_env, addr, tmp);
}
} else {
/* postincrement addressing mode */
if (opsize == OS_EXTENDED) {
if (is_load) {
- gen_helper_fmovemx_ld_postinc(tmp, cpu_env, addr, tmp);
+ gen_helper_fmovemx_ld_postinc(tmp, tcg_env, addr, tmp);
} else {
- gen_helper_fmovemx_st_postinc(tmp, cpu_env, addr, tmp);
+ gen_helper_fmovemx_st_postinc(tmp, tcg_env, addr, tmp);
}
} else {
if (is_load) {
- gen_helper_fmovemd_ld_postinc(tmp, cpu_env, addr, tmp);
+ gen_helper_fmovemd_ld_postinc(tmp, tcg_env, addr, tmp);
} else {
- gen_helper_fmovemd_st_postinc(tmp, cpu_env, addr, tmp);
+ gen_helper_fmovemd_st_postinc(tmp, tcg_env, addr, tmp);
}
}
}
@@ -4925,7 +4925,7 @@
/* fmovecr */
TCGv rom_offset = tcg_constant_i32(opmode);
cpu_dest = gen_fp_ptr(REG(ext, 7));
- gen_helper_fconst(cpu_env, cpu_dest, rom_offset);
+ gen_helper_fconst(tcg_env, cpu_dest, rom_offset);
return;
}
break;
@@ -4936,7 +4936,7 @@
EA_STORE, IS_USER(s)) == -1) {
gen_addr_fault(s);
}
- gen_helper_ftst(cpu_env, cpu_src);
+ gen_helper_ftst(tcg_env, cpu_src);
return;
case 4: /* fmove to control register. */
case 5: /* fmove from control register. */
@@ -4970,172 +4970,172 @@
gen_fp_move(cpu_dest, cpu_src);
break;
case 0x40: /* fsmove */
- gen_helper_fsround(cpu_env, cpu_dest, cpu_src);
+ gen_helper_fsround(tcg_env, cpu_dest, cpu_src);
break;
case 0x44: /* fdmove */
- gen_helper_fdround(cpu_env, cpu_dest, cpu_src);
+ gen_helper_fdround(tcg_env, cpu_dest, cpu_src);
break;
case 1: /* fint */
- gen_helper_firound(cpu_env, cpu_dest, cpu_src);
+ gen_helper_firound(tcg_env, cpu_dest, cpu_src);
break;
case 2: /* fsinh */
- gen_helper_fsinh(cpu_env, cpu_dest, cpu_src);
+ gen_helper_fsinh(tcg_env, cpu_dest, cpu_src);
break;
case 3: /* fintrz */
- gen_helper_fitrunc(cpu_env, cpu_dest, cpu_src);
+ gen_helper_fitrunc(tcg_env, cpu_dest, cpu_src);
break;
case 4: /* fsqrt */
- gen_helper_fsqrt(cpu_env, cpu_dest, cpu_src);
+ gen_helper_fsqrt(tcg_env, cpu_dest, cpu_src);
break;
case 0x41: /* fssqrt */
- gen_helper_fssqrt(cpu_env, cpu_dest, cpu_src);
+ gen_helper_fssqrt(tcg_env, cpu_dest, cpu_src);
break;
case 0x45: /* fdsqrt */
- gen_helper_fdsqrt(cpu_env, cpu_dest, cpu_src);
+ gen_helper_fdsqrt(tcg_env, cpu_dest, cpu_src);
break;
case 0x06: /* flognp1 */
- gen_helper_flognp1(cpu_env, cpu_dest, cpu_src);
+ gen_helper_flognp1(tcg_env, cpu_dest, cpu_src);
break;
case 0x08: /* fetoxm1 */
- gen_helper_fetoxm1(cpu_env, cpu_dest, cpu_src);
+ gen_helper_fetoxm1(tcg_env, cpu_dest, cpu_src);
break;
case 0x09: /* ftanh */
- gen_helper_ftanh(cpu_env, cpu_dest, cpu_src);
+ gen_helper_ftanh(tcg_env, cpu_dest, cpu_src);
break;
case 0x0a: /* fatan */
- gen_helper_fatan(cpu_env, cpu_dest, cpu_src);
+ gen_helper_fatan(tcg_env, cpu_dest, cpu_src);
break;
case 0x0c: /* fasin */
- gen_helper_fasin(cpu_env, cpu_dest, cpu_src);
+ gen_helper_fasin(tcg_env, cpu_dest, cpu_src);
break;
case 0x0d: /* fatanh */
- gen_helper_fatanh(cpu_env, cpu_dest, cpu_src);
+ gen_helper_fatanh(tcg_env, cpu_dest, cpu_src);
break;
case 0x0e: /* fsin */
- gen_helper_fsin(cpu_env, cpu_dest, cpu_src);
+ gen_helper_fsin(tcg_env, cpu_dest, cpu_src);
break;
case 0x0f: /* ftan */
- gen_helper_ftan(cpu_env, cpu_dest, cpu_src);
+ gen_helper_ftan(tcg_env, cpu_dest, cpu_src);
break;
case 0x10: /* fetox */
- gen_helper_fetox(cpu_env, cpu_dest, cpu_src);
+ gen_helper_fetox(tcg_env, cpu_dest, cpu_src);
break;
case 0x11: /* ftwotox */
- gen_helper_ftwotox(cpu_env, cpu_dest, cpu_src);
+ gen_helper_ftwotox(tcg_env, cpu_dest, cpu_src);
break;
case 0x12: /* ftentox */
- gen_helper_ftentox(cpu_env, cpu_dest, cpu_src);
+ gen_helper_ftentox(tcg_env, cpu_dest, cpu_src);
break;
case 0x14: /* flogn */
- gen_helper_flogn(cpu_env, cpu_dest, cpu_src);
+ gen_helper_flogn(tcg_env, cpu_dest, cpu_src);
break;
case 0x15: /* flog10 */
- gen_helper_flog10(cpu_env, cpu_dest, cpu_src);
+ gen_helper_flog10(tcg_env, cpu_dest, cpu_src);
break;
case 0x16: /* flog2 */
- gen_helper_flog2(cpu_env, cpu_dest, cpu_src);
+ gen_helper_flog2(tcg_env, cpu_dest, cpu_src);
break;
case 0x18: /* fabs */
- gen_helper_fabs(cpu_env, cpu_dest, cpu_src);
+ gen_helper_fabs(tcg_env, cpu_dest, cpu_src);
break;
case 0x58: /* fsabs */
- gen_helper_fsabs(cpu_env, cpu_dest, cpu_src);
+ gen_helper_fsabs(tcg_env, cpu_dest, cpu_src);
break;
case 0x5c: /* fdabs */
- gen_helper_fdabs(cpu_env, cpu_dest, cpu_src);
+ gen_helper_fdabs(tcg_env, cpu_dest, cpu_src);
break;
case 0x19: /* fcosh */
- gen_helper_fcosh(cpu_env, cpu_dest, cpu_src);
+ gen_helper_fcosh(tcg_env, cpu_dest, cpu_src);
break;
case 0x1a: /* fneg */
- gen_helper_fneg(cpu_env, cpu_dest, cpu_src);
+ gen_helper_fneg(tcg_env, cpu_dest, cpu_src);
break;
case 0x5a: /* fsneg */
- gen_helper_fsneg(cpu_env, cpu_dest, cpu_src);
+ gen_helper_fsneg(tcg_env, cpu_dest, cpu_src);
break;
case 0x5e: /* fdneg */
- gen_helper_fdneg(cpu_env, cpu_dest, cpu_src);
+ gen_helper_fdneg(tcg_env, cpu_dest, cpu_src);
break;
case 0x1c: /* facos */
- gen_helper_facos(cpu_env, cpu_dest, cpu_src);
+ gen_helper_facos(tcg_env, cpu_dest, cpu_src);
break;
case 0x1d: /* fcos */
- gen_helper_fcos(cpu_env, cpu_dest, cpu_src);
+ gen_helper_fcos(tcg_env, cpu_dest, cpu_src);
break;
case 0x1e: /* fgetexp */
- gen_helper_fgetexp(cpu_env, cpu_dest, cpu_src);
+ gen_helper_fgetexp(tcg_env, cpu_dest, cpu_src);
break;
case 0x1f: /* fgetman */
- gen_helper_fgetman(cpu_env, cpu_dest, cpu_src);
+ gen_helper_fgetman(tcg_env, cpu_dest, cpu_src);
break;
case 0x20: /* fdiv */
- gen_helper_fdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ gen_helper_fdiv(tcg_env, cpu_dest, cpu_src, cpu_dest);
break;
case 0x60: /* fsdiv */
- gen_helper_fsdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ gen_helper_fsdiv(tcg_env, cpu_dest, cpu_src, cpu_dest);
break;
case 0x64: /* fddiv */
- gen_helper_fddiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ gen_helper_fddiv(tcg_env, cpu_dest, cpu_src, cpu_dest);
break;
case 0x21: /* fmod */
- gen_helper_fmod(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ gen_helper_fmod(tcg_env, cpu_dest, cpu_src, cpu_dest);
break;
case 0x22: /* fadd */
- gen_helper_fadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ gen_helper_fadd(tcg_env, cpu_dest, cpu_src, cpu_dest);
break;
case 0x62: /* fsadd */
- gen_helper_fsadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ gen_helper_fsadd(tcg_env, cpu_dest, cpu_src, cpu_dest);
break;
case 0x66: /* fdadd */
- gen_helper_fdadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ gen_helper_fdadd(tcg_env, cpu_dest, cpu_src, cpu_dest);
break;
case 0x23: /* fmul */
- gen_helper_fmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ gen_helper_fmul(tcg_env, cpu_dest, cpu_src, cpu_dest);
break;
case 0x63: /* fsmul */
- gen_helper_fsmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ gen_helper_fsmul(tcg_env, cpu_dest, cpu_src, cpu_dest);
break;
case 0x67: /* fdmul */
- gen_helper_fdmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ gen_helper_fdmul(tcg_env, cpu_dest, cpu_src, cpu_dest);
break;
case 0x24: /* fsgldiv */
- gen_helper_fsgldiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ gen_helper_fsgldiv(tcg_env, cpu_dest, cpu_src, cpu_dest);
break;
case 0x25: /* frem */
- gen_helper_frem(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ gen_helper_frem(tcg_env, cpu_dest, cpu_src, cpu_dest);
break;
case 0x26: /* fscale */
- gen_helper_fscale(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ gen_helper_fscale(tcg_env, cpu_dest, cpu_src, cpu_dest);
break;
case 0x27: /* fsglmul */
- gen_helper_fsglmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ gen_helper_fsglmul(tcg_env, cpu_dest, cpu_src, cpu_dest);
break;
case 0x28: /* fsub */
- gen_helper_fsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ gen_helper_fsub(tcg_env, cpu_dest, cpu_src, cpu_dest);
break;
case 0x68: /* fssub */
- gen_helper_fssub(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ gen_helper_fssub(tcg_env, cpu_dest, cpu_src, cpu_dest);
break;
case 0x6c: /* fdsub */
- gen_helper_fdsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ gen_helper_fdsub(tcg_env, cpu_dest, cpu_src, cpu_dest);
break;
case 0x30: case 0x31: case 0x32:
case 0x33: case 0x34: case 0x35:
case 0x36: case 0x37: {
TCGv_ptr cpu_dest2 = gen_fp_ptr(REG(ext, 0));
- gen_helper_fsincos(cpu_env, cpu_dest, cpu_dest2, cpu_src);
+ gen_helper_fsincos(tcg_env, cpu_dest, cpu_dest2, cpu_src);
}
break;
case 0x38: /* fcmp */
- gen_helper_fcmp(cpu_env, cpu_src, cpu_dest);
+ gen_helper_fcmp(tcg_env, cpu_src, cpu_dest);
return;
case 0x3a: /* ftst */
- gen_helper_ftst(cpu_env, cpu_src);
+ gen_helper_ftst(tcg_env, cpu_src);
return;
default:
goto undef;
}
- gen_helper_ftst(cpu_env, cpu_dest);
+ gen_helper_ftst(tcg_env, cpu_dest);
return;
undef:
/* FIXME: Is this right for offset addressing modes? */
@@ -5466,12 +5466,12 @@
ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
}
if (s->env->macsr & MACSR_FI) {
- gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
+ gen_helper_macmulf(s->mactmp, tcg_env, rx, ry);
} else {
if (s->env->macsr & MACSR_SU)
- gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
+ gen_helper_macmuls(s->mactmp, tcg_env, rx, ry);
else
- gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
+ gen_helper_macmulu(s->mactmp, tcg_env, rx, ry);
switch ((ext >> 9) & 3) {
case 1:
tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
@@ -5507,11 +5507,11 @@
tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
if (s->env->macsr & MACSR_FI)
- gen_helper_macsatf(cpu_env, tcg_constant_i32(acc));
+ gen_helper_macsatf(tcg_env, tcg_constant_i32(acc));
else if (s->env->macsr & MACSR_SU)
- gen_helper_macsats(cpu_env, tcg_constant_i32(acc));
+ gen_helper_macsats(tcg_env, tcg_constant_i32(acc));
else
- gen_helper_macsatu(cpu_env, tcg_constant_i32(acc));
+ gen_helper_macsatu(tcg_env, tcg_constant_i32(acc));
#if 0
/* Disabled because conditional branches clobber temporary vars. */
@@ -5539,18 +5539,18 @@
else
tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
if (s->env->macsr & MACSR_FI)
- gen_helper_macsatf(cpu_env, tcg_constant_i32(acc));
+ gen_helper_macsatf(tcg_env, tcg_constant_i32(acc));
else if (s->env->macsr & MACSR_SU)
- gen_helper_macsats(cpu_env, tcg_constant_i32(acc));
+ gen_helper_macsats(tcg_env, tcg_constant_i32(acc));
else
- gen_helper_macsatu(cpu_env, tcg_constant_i32(acc));
+ gen_helper_macsatu(tcg_env, tcg_constant_i32(acc));
#if 0
/* Disabled because conditional branches clobber temporary vars. */
if (l1 != -1)
gen_set_label(l1);
#endif
}
- gen_helper_mac_set_flags(cpu_env, tcg_constant_i32(acc));
+ gen_helper_mac_set_flags(tcg_env, tcg_constant_i32(acc));
if (insn & 0x30) {
TCGv rw;
@@ -5580,7 +5580,7 @@
accnum = (insn >> 9) & 3;
acc = MACREG(accnum);
if (s->env->macsr & MACSR_FI) {
- gen_helper_get_macf(rx, cpu_env, acc);
+ gen_helper_get_macf(rx, tcg_env, acc);
} else if ((s->env->macsr & MACSR_OMC) == 0) {
tcg_gen_extrl_i64_i32(rx, acc);
} else if (s->env->macsr & MACSR_SU) {
@@ -5601,9 +5601,9 @@
TCGv dest;
src = insn & 3;
dest = tcg_constant_i32((insn >> 9) & 3);
- gen_helper_mac_move(cpu_env, dest, tcg_constant_i32(src));
+ gen_helper_mac_move(tcg_env, dest, tcg_constant_i32(src));
gen_mac_clear_flags();
- gen_helper_mac_set_flags(cpu_env, dest);
+ gen_helper_mac_set_flags(tcg_env, dest);
}
DISAS_INSN(from_macsr)
@@ -5628,9 +5628,9 @@
reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
acc = tcg_constant_i32((insn & 0x400) ? 2 : 0);
if (s->env->macsr & MACSR_FI)
- gen_helper_get_mac_extf(reg, cpu_env, acc);
+ gen_helper_get_mac_extf(reg, tcg_env, acc);
else
- gen_helper_get_mac_exti(reg, cpu_env, acc);
+ gen_helper_get_mac_exti(reg, tcg_env, acc);
}
DISAS_INSN(macsr_to_ccr)
@@ -5639,7 +5639,7 @@
/* Note that X and C are always cleared. */
tcg_gen_andi_i32(tmp, QREG_MACSR, CCF_N | CCF_Z | CCF_V);
- gen_helper_set_ccr(cpu_env, tmp);
+ gen_helper_set_ccr(tcg_env, tmp);
set_cc_op(s, CC_OP_FLAGS);
}
@@ -5661,14 +5661,14 @@
}
tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
gen_mac_clear_flags();
- gen_helper_mac_set_flags(cpu_env, tcg_constant_i32(accnum));
+ gen_helper_mac_set_flags(tcg_env, tcg_constant_i32(accnum));
}
DISAS_INSN(to_macsr)
{
TCGv val;
SRC_EA(env, val, OS_LONG, 0, NULL);
- gen_helper_set_macsr(cpu_env, val);
+ gen_helper_set_macsr(tcg_env, val);
gen_exit_tb(s);
}
@@ -5686,11 +5686,11 @@
SRC_EA(env, val, OS_LONG, 0, NULL);
acc = tcg_constant_i32((insn & 0x400) ? 2 : 0);
if (s->env->macsr & MACSR_FI)
- gen_helper_set_mac_extf(cpu_env, val, acc);
+ gen_helper_set_mac_extf(tcg_env, val, acc);
else if (s->env->macsr & MACSR_SU)
- gen_helper_set_mac_exts(cpu_env, val, acc);
+ gen_helper_set_mac_exts(tcg_env, val, acc);
else
- gen_helper_set_mac_extu(cpu_env, val, acc);
+ gen_helper_set_mac_extu(tcg_env, val, acc);
}
static disas_proc opcode_table[65536];
@@ -5990,7 +5990,7 @@
static void m68k_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
- CPUM68KState *env = cpu->env_ptr;
+ CPUM68KState *env = cpu_env(cpu);
dc->env = env;
dc->pc = dc->base.pc_first;
@@ -6021,7 +6021,7 @@
static void m68k_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
- CPUM68KState *env = cpu->env_ptr;
+ CPUM68KState *env = cpu_env(cpu);
uint16_t insn = read_im16(env, dc);
opcode_table[insn](env, dc, insn);
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
index 03c2c4d..bbb3335 100644
--- a/target/microblaze/cpu.c
+++ b/target/microblaze/cpu.c
@@ -296,7 +296,6 @@
MicroBlazeCPU *cpu = MICROBLAZE_CPU(obj);
CPUMBState *env = &cpu->env;
- cpu_set_cpustate_pointers(cpu);
gdb_register_coprocessor(CPU(cpu), mb_cpu_gdb_read_stack_protect,
mb_cpu_gdb_write_stack_protect, 2,
"microblaze-stack-protect.xml", 0);
@@ -439,6 +438,7 @@
.name = TYPE_MICROBLAZE_CPU,
.parent = TYPE_CPU,
.instance_size = sizeof(MicroBlazeCPU),
+ .instance_align = __alignof(MicroBlazeCPU),
.instance_init = mb_cpu_initfn,
.class_size = sizeof(MicroBlazeCPUClass),
.class_init = mb_cpu_class_init,
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
index f6cab6c..e43c49d 100644
--- a/target/microblaze/cpu.h
+++ b/target/microblaze/cpu.h
@@ -345,15 +345,15 @@
struct ArchCPU {
/*< private >*/
CPUState parent_obj;
-
/*< public >*/
+
+ CPUMBState env;
+
bool ns_axi_dp;
bool ns_axi_ip;
bool ns_axi_dc;
bool ns_axi_ic;
- CPUNegativeOffsetState neg;
- CPUMBState env;
MicroBlazeCPUConfig cfg;
};
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index d02c162..49bfb4a 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -102,7 +102,7 @@
static void gen_raise_exception(DisasContext *dc, uint32_t index)
{
- gen_helper_raise_exception(cpu_env, tcg_constant_i32(index));
+ gen_helper_raise_exception(tcg_env, tcg_constant_i32(index));
dc->base.is_jmp = DISAS_NORETURN;
}
@@ -116,7 +116,7 @@
static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
{
TCGv_i32 tmp = tcg_constant_i32(esr_ec);
- tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, esr));
+ tcg_gen_st_i32(tmp, tcg_env, offsetof(CPUMBState, esr));
gen_raise_exception_sync(dc, EXCP_HW_EXCP);
}
@@ -295,11 +295,11 @@
#define ENV_WRAPPER2(NAME, HELPER) \
static void NAME(TCGv_i32 out, TCGv_i32 ina) \
- { HELPER(out, cpu_env, ina); }
+ { HELPER(out, tcg_env, ina); }
#define ENV_WRAPPER3(NAME, HELPER) \
static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \
- { HELPER(out, cpu_env, ina, inb); }
+ { HELPER(out, tcg_env, ina, inb); }
/* No input carry, but output carry. */
static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
@@ -472,12 +472,12 @@
/* Does not use ENV_WRAPPER3, because arguments are swapped as well. */
static void gen_idiv(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
{
- gen_helper_divs(out, cpu_env, inb, ina);
+ gen_helper_divs(out, tcg_env, inb, ina);
}
static void gen_idivu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
{
- gen_helper_divu(out, cpu_env, inb, ina);
+ gen_helper_divu(out, tcg_env, inb, ina);
}
DO_TYPEA_CFG(idiv, use_div, true, gen_idiv)
@@ -643,7 +643,7 @@
}
if ((ra == 1 || rb == 1) && dc->cfg->stackprot) {
- gen_helper_stackprot(cpu_env, ret);
+ gen_helper_stackprot(tcg_env, ret);
}
return ret;
}
@@ -662,7 +662,7 @@
}
if (ra == 1 && dc->cfg->stackprot) {
- gen_helper_stackprot(cpu_env, ret);
+ gen_helper_stackprot(tcg_env, ret);
}
return ret;
}
@@ -1232,7 +1232,7 @@
t_sync_flags(dc);
- tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
+ tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
-offsetof(MicroBlazeCPU, env)
+offsetof(CPUState, halted));
@@ -1381,13 +1381,13 @@
tcg_gen_andi_i32(cpu_msr, src, ~(MSR_C | MSR_CC | MSR_PVR));
break;
case SR_FSR:
- tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, fsr));
+ tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, fsr));
break;
case 0x800:
- tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, slr));
+ tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, slr));
break;
case 0x802:
- tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, shr));
+ tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, shr));
break;
case 0x1000: /* PID */
@@ -1400,7 +1400,7 @@
TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
- gen_helper_mmu_write(cpu_env, tmp_ext, tmp_reg, src);
+ gen_helper_mmu_write(tcg_env, tmp_ext, tmp_reg, src);
}
break;
@@ -1422,7 +1422,7 @@
case SR_EAR:
{
TCGv_i64 t64 = tcg_temp_new_i64();
- tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
+ tcg_gen_ld_i64(t64, tcg_env, offsetof(CPUMBState, ear));
tcg_gen_extrh_i64_i32(dest, t64);
}
return true;
@@ -1452,27 +1452,27 @@
case SR_EAR:
{
TCGv_i64 t64 = tcg_temp_new_i64();
- tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
+ tcg_gen_ld_i64(t64, tcg_env, offsetof(CPUMBState, ear));
tcg_gen_extrl_i64_i32(dest, t64);
}
break;
case SR_ESR:
- tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, esr));
+ tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, esr));
break;
case SR_FSR:
- tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, fsr));
+ tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, fsr));
break;
case SR_BTR:
- tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, btr));
+ tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, btr));
break;
case SR_EDR:
- tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, edr));
+ tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, edr));
break;
case 0x800:
- tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, slr));
+ tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, slr));
break;
case 0x802:
- tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, shr));
+ tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, shr));
break;
#ifndef CONFIG_USER_ONLY
@@ -1486,13 +1486,13 @@
TCGv_i32 tmp_ext = tcg_constant_i32(arg->e);
TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7);
- gen_helper_mmu_read(dest, cpu_env, tmp_ext, tmp_reg);
+ gen_helper_mmu_read(dest, tcg_env, tmp_ext, tmp_reg);
}
break;
#endif
case 0x2000 ... 0x200c:
- tcg_gen_ld_i32(dest, cpu_env,
+ tcg_gen_ld_i32(dest, tcg_env,
offsetof(MicroBlazeCPU, cfg.pvr_regs[arg->rs - 0x2000])
- offsetof(MicroBlazeCPU, env));
break;
@@ -1630,7 +1630,7 @@
static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
{
DisasContext *dc = container_of(dcb, DisasContext, base);
- CPUMBState *env = cs->env_ptr;
+ CPUMBState *env = cpu_env(cs);
uint32_t ir;
/* TODO: This should raise an exception, not terminate qemu. */
@@ -1882,9 +1882,9 @@
for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
*i32s[i].var =
- tcg_global_mem_new_i32(cpu_env, i32s[i].ofs, i32s[i].name);
+ tcg_global_mem_new_i32(tcg_env, i32s[i].ofs, i32s[i].name);
}
cpu_res_addr =
- tcg_global_mem_new(cpu_env, offsetof(CPUMBState, res_addr), "res_addr");
+ tcg_global_mem_new(tcg_env, offsetof(CPUMBState, res_addr), "res_addr");
}
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
index 63da194..a0023ed 100644
--- a/target/mips/cpu.c
+++ b/target/mips/cpu.c
@@ -504,7 +504,6 @@
CPUMIPSState *env = &cpu->env;
MIPSCPUClass *mcc = MIPS_CPU_GET_CLASS(obj);
- cpu_set_cpustate_pointers(cpu);
cpu->clock = qdev_init_clock_in(DEVICE(obj), "clk-in", NULL, cpu, 0);
cpu->count_div = clock_new(OBJECT(obj), "clk-div-count");
env->count_clock = clock_new(OBJECT(obj), "clk-count");
@@ -600,6 +599,7 @@
.name = TYPE_MIPS_CPU,
.parent = TYPE_CPU,
.instance_size = sizeof(MIPSCPU),
+ .instance_align = __alignof(MIPSCPU),
.instance_init = mips_cpu_initfn,
.abstract = true,
.class_size = sizeof(MIPSCPUClass),
diff --git a/target/mips/cpu.h b/target/mips/cpu.h
index 6d6af1f..67f8e8b 100644
--- a/target/mips/cpu.h
+++ b/target/mips/cpu.h
@@ -1213,10 +1213,10 @@
CPUState parent_obj;
/*< public >*/
+ CPUMIPSState env;
+
Clock *clock;
Clock *count_div; /* Divider for CP0_Count clock */
- CPUNegativeOffsetState neg;
- CPUMIPSState env;
};
diff --git a/target/mips/tcg/lcsr_translate.c b/target/mips/tcg/lcsr_translate.c
index 9f2a5f4..352b0f4 100644
--- a/target/mips/tcg/lcsr_translate.c
+++ b/target/mips/tcg/lcsr_translate.c
@@ -22,7 +22,7 @@
TCGv src1 = tcg_temp_new();
gen_load_gpr(src1, a->rs);
- gen_helper_lcsr_cpucfg(dest, cpu_env, src1);
+ gen_helper_lcsr_cpucfg(dest, tcg_env, src1);
gen_store_gpr(dest, a->rd);
return true;
@@ -37,7 +37,7 @@
check_cp0_enabled(ctx);
gen_load_gpr(src1, a->rs);
- func(dest, cpu_env, src1);
+ func(dest, tcg_env, src1);
gen_store_gpr(dest, a->rd);
return true;
@@ -52,7 +52,7 @@
check_cp0_enabled(ctx);
gen_load_gpr(addr, a->rs);
gen_load_gpr(val, a->rd);
- func(cpu_env, addr, val);
+ func(tcg_env, addr, val);
return true;
}
diff --git a/target/mips/tcg/micromips_translate.c.inc b/target/mips/tcg/micromips_translate.c.inc
index 211d102..7510831 100644
--- a/target/mips/tcg/micromips_translate.c.inc
+++ b/target/mips/tcg/micromips_translate.c.inc
@@ -710,17 +710,17 @@
save_cpu_state(ctx, 1);
switch (opc) {
case LWM32:
- gen_helper_lwm(cpu_env, t0, t1, t2);
+ gen_helper_lwm(tcg_env, t0, t1, t2);
break;
case SWM32:
- gen_helper_swm(cpu_env, t0, t1, t2);
+ gen_helper_swm(tcg_env, t0, t1, t2);
break;
#ifdef TARGET_MIPS64
case LDM:
- gen_helper_ldm(cpu_env, t0, t1, t2);
+ gen_helper_ldm(tcg_env, t0, t1, t2);
break;
case SDM:
- gen_helper_sdm(cpu_env, t0, t1, t2);
+ gen_helper_sdm(tcg_env, t0, t1, t2);
break;
#endif
}
@@ -1271,7 +1271,7 @@
TCGv t0 = tcg_temp_new();
save_cpu_state(ctx, 1);
- gen_helper_di(t0, cpu_env);
+ gen_helper_di(t0, tcg_env);
gen_store_gpr(t0, rs);
/*
* Stop translation as we may have switched the execution
@@ -1286,7 +1286,7 @@
TCGv t0 = tcg_temp_new();
save_cpu_state(ctx, 1);
- gen_helper_ei(t0, cpu_env);
+ gen_helper_ei(t0, tcg_env);
gen_store_gpr(t0, rs);
/*
* DISAS_STOP isn't sufficient, we need to ensure we break out
diff --git a/target/mips/tcg/msa_helper.c b/target/mips/tcg/msa_helper.c
index c314a74..7a8dbad 100644
--- a/target/mips/tcg/msa_helper.c
+++ b/target/mips/tcg/msa_helper.c
@@ -7432,15 +7432,15 @@
#define MSA_FLOAT_MAXOP(DEST, OP, ARG1, ARG2, BITS) \
do { \
- float_status *status = &env->active_tc.msa_fp_status; \
+ float_status *status_ = &env->active_tc.msa_fp_status; \
int c; \
\
- set_float_exception_flags(0, status); \
- DEST = float ## BITS ## _ ## OP(ARG1, ARG2, status); \
+ set_float_exception_flags(0, status_); \
+ DEST = float ## BITS ## _ ## OP(ARG1, ARG2, status_); \
c = update_msacsr(env, 0, 0); \
\
if (get_enabled_exceptions(env, c)) { \
- DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
+ DEST = ((FLOAT_SNAN ## BITS(status_) >> 6) << 6) | c; \
} \
} while (0)
diff --git a/target/mips/tcg/msa_translate.c b/target/mips/tcg/msa_translate.c
index b5b66fb..75cf80a 100644
--- a/target/mips/tcg/msa_translate.c
+++ b/target/mips/tcg/msa_translate.c
@@ -140,7 +140,7 @@
off = offsetof(CPUMIPSState, active_fpu.fpr[i].wr.d[1]);
msa_wr_d[i * 2 + 1] =
- tcg_global_mem_new_i64(cpu_env, off, msaregnames[i * 2 + 1]);
+ tcg_global_mem_new_i64(tcg_env, off, msaregnames[i * 2 + 1]);
}
}
@@ -288,7 +288,7 @@
return true;
}
- gen_msa_i8(cpu_env,
+ gen_msa_i8(tcg_env,
tcg_constant_i32(a->wd),
tcg_constant_i32(a->ws),
tcg_constant_i32(a->sa));
@@ -314,7 +314,7 @@
return true;
}
- gen_helper_msa_shf_df(cpu_env,
+ gen_helper_msa_shf_df(tcg_env,
tcg_constant_i32(a->df),
tcg_constant_i32(a->wd),
tcg_constant_i32(a->ws),
@@ -330,7 +330,7 @@
return true;
}
- gen_msa_i5(cpu_env,
+ gen_msa_i5(tcg_env,
tcg_constant_i32(a->df),
tcg_constant_i32(a->wd),
tcg_constant_i32(a->ws),
@@ -357,7 +357,7 @@
return true;
}
- gen_helper_msa_ldi_df(cpu_env,
+ gen_helper_msa_ldi_df(tcg_env,
tcg_constant_i32(a->df),
tcg_constant_i32(a->wd),
tcg_constant_i32(a->sa));
@@ -376,7 +376,7 @@
return true;
}
- gen_msa_bit(cpu_env,
+ gen_msa_bit(tcg_env,
tcg_constant_i32(a->df),
tcg_constant_i32(a->wd),
tcg_constant_i32(a->ws),
@@ -405,7 +405,7 @@
return true;
}
- gen_msa_3rf(cpu_env,
+ gen_msa_3rf(tcg_env,
tcg_constant_i32(a->df),
tcg_constant_i32(a->wd),
tcg_constant_i32(a->ws),
@@ -425,7 +425,7 @@
return true;
}
- gen_msa_3r(cpu_env,
+ gen_msa_3r(tcg_env,
tcg_constant_i32(a->wd),
tcg_constant_i32(a->ws),
tcg_constant_i32(a->wt));
@@ -519,7 +519,7 @@
return true;
}
- gen_helper_msa_move_v(cpu_env,
+ gen_helper_msa_move_v(tcg_env,
tcg_constant_i32(a->wd),
tcg_constant_i32(a->ws));
@@ -537,7 +537,7 @@
telm = tcg_temp_new();
gen_load_gpr(telm, a->ws);
- gen_helper_msa_ctcmsa(cpu_env, telm, tcg_constant_i32(a->wd));
+ gen_helper_msa_ctcmsa(tcg_env, telm, tcg_constant_i32(a->wd));
return true;
}
@@ -552,7 +552,7 @@
telm = tcg_temp_new();
- gen_helper_msa_cfcmsa(telm, cpu_env, tcg_constant_i32(a->ws));
+ gen_helper_msa_cfcmsa(telm, tcg_env, tcg_constant_i32(a->ws));
gen_store_gpr(telm, a->wd);
return true;
@@ -569,7 +569,7 @@
return true;
}
- gen_msa_elm_df(cpu_env,
+ gen_msa_elm_df(tcg_env,
tcg_constant_i32(a->df),
tcg_constant_i32(a->wd),
tcg_constant_i32(a->ws),
@@ -593,7 +593,7 @@
return true;
}
- gen_msa_elm[a->df](cpu_env,
+ gen_msa_elm[a->df](tcg_env,
tcg_constant_i32(a->wd),
tcg_constant_i32(a->ws),
tcg_constant_i32(a->n));
@@ -698,7 +698,7 @@
return true;
}
- gen_msa_2r(cpu_env, tcg_constant_i32(a->wd), tcg_constant_i32(a->ws));
+ gen_msa_2r(tcg_env, tcg_constant_i32(a->wd), tcg_constant_i32(a->ws));
return true;
}
@@ -718,7 +718,7 @@
return true;
}
- gen_helper_msa_fill_df(cpu_env,
+ gen_helper_msa_fill_df(tcg_env,
tcg_constant_i32(a->df),
tcg_constant_i32(a->wd),
tcg_constant_i32(a->ws));
@@ -733,7 +733,7 @@
return true;
}
- gen_msa_2rf(cpu_env,
+ gen_msa_2rf(tcg_env,
tcg_constant_i32(a->df),
tcg_constant_i32(a->wd),
tcg_constant_i32(a->ws));
@@ -770,7 +770,7 @@
taddr = tcg_temp_new();
gen_base_offset_addr(ctx, taddr, a->ws, a->sa << a->df);
- gen_msa_ldst(cpu_env, tcg_constant_i32(a->wd), taddr);
+ gen_msa_ldst(tcg_env, tcg_constant_i32(a->wd), taddr);
return true;
}
diff --git a/target/mips/tcg/mxu_translate.c b/target/mips/tcg/mxu_translate.c
index cfcd8ac..c517258 100644
--- a/target/mips/tcg/mxu_translate.c
+++ b/target/mips/tcg/mxu_translate.c
@@ -617,12 +617,12 @@
void mxu_translate_init(void)
{
for (unsigned i = 0; i < NUMBER_OF_MXU_REGISTERS - 1; i++) {
- mxu_gpr[i] = tcg_global_mem_new(cpu_env,
+ mxu_gpr[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUMIPSState, active_tc.mxu_gpr[i]),
mxuregnames[i]);
}
- mxu_CR = tcg_global_mem_new(cpu_env,
+ mxu_CR = tcg_global_mem_new(tcg_env,
offsetof(CPUMIPSState, active_tc.mxu_cr),
mxuregnames[NUMBER_OF_MXU_REGISTERS - 1]);
}
diff --git a/target/mips/tcg/nanomips_translate.c.inc b/target/mips/tcg/nanomips_translate.c.inc
index a98dde0..b4b746d 100644
--- a/target/mips/tcg/nanomips_translate.c.inc
+++ b/target/mips/tcg/nanomips_translate.c.inc
@@ -1006,8 +1006,8 @@
}
gen_store_gpr(tmp1, reg1);
gen_store_gpr(tmp2, reg2);
- tcg_gen_st_i64(tval, cpu_env, offsetof(CPUMIPSState, llval_wp));
- tcg_gen_st_tl(taddr, cpu_env, offsetof(CPUMIPSState, lladdr));
+ tcg_gen_st_i64(tval, tcg_env, offsetof(CPUMIPSState, llval_wp));
+ tcg_gen_st_tl(taddr, tcg_env, offsetof(CPUMIPSState, lladdr));
}
static void gen_scwp(DisasContext *ctx, uint32_t base, int16_t offset,
@@ -1025,7 +1025,7 @@
gen_base_offset_addr(ctx, taddr, base, offset);
- tcg_gen_ld_tl(lladdr, cpu_env, offsetof(CPUMIPSState, lladdr));
+ tcg_gen_ld_tl(lladdr, tcg_env, offsetof(CPUMIPSState, lladdr));
tcg_gen_brcond_tl(TCG_COND_NE, taddr, lladdr, lab_fail);
gen_load_gpr(tmp1, reg1);
@@ -1037,7 +1037,7 @@
tcg_gen_concat_tl_i64(tval, tmp1, tmp2);
}
- tcg_gen_ld_i64(llval, cpu_env, offsetof(CPUMIPSState, llval_wp));
+ tcg_gen_ld_i64(llval, tcg_env, offsetof(CPUMIPSState, llval_wp));
tcg_gen_atomic_cmpxchg_i64(val, taddr, llval, tval,
eva ? MIPS_HFLAG_UM : ctx->mem_idx,
MO_64 | MO_ALIGN);
@@ -1053,7 +1053,7 @@
}
gen_set_label(lab_done);
tcg_gen_movi_tl(lladdr, -1);
- tcg_gen_st_tl(lladdr, cpu_env, offsetof(CPUMIPSState, lladdr));
+ tcg_gen_st_tl(lladdr, tcg_env, offsetof(CPUMIPSState, lladdr));
}
static void gen_adjust_sp(DisasContext *ctx, int u)
@@ -1335,14 +1335,14 @@
case NM_DVP:
if (ctx->vp) {
check_cp0_enabled(ctx);
- gen_helper_dvp(t0, cpu_env);
+ gen_helper_dvp(t0, tcg_env);
gen_store_gpr(t0, rt);
}
break;
case NM_EVP:
if (ctx->vp) {
check_cp0_enabled(ctx);
- gen_helper_evp(t0, cpu_env);
+ gen_helper_evp(t0, tcg_env);
gen_store_gpr(t0, rt);
}
break;
@@ -1428,7 +1428,7 @@
} else if (rs == 0) {
/* DVPE */
check_cp0_mt(ctx);
- gen_helper_dvpe(t0, cpu_env);
+ gen_helper_dvpe(t0, tcg_env);
gen_store_gpr(t0, rt);
} else {
gen_reserved_instruction(ctx);
@@ -1443,7 +1443,7 @@
} else if (rs == 0) {
/* EVPE */
check_cp0_mt(ctx);
- gen_helper_evpe(t0, cpu_env);
+ gen_helper_evpe(t0, tcg_env);
gen_store_gpr(t0, rt);
} else {
gen_reserved_instruction(ctx);
@@ -1485,7 +1485,7 @@
TCGv t0 = tcg_temp_new();
gen_load_gpr(t0, rs);
- gen_helper_yield(t0, cpu_env, t0);
+ gen_helper_yield(t0, tcg_env, t0);
gen_store_gpr(t0, rt);
}
break;
@@ -1517,19 +1517,19 @@
switch (opc) {
case NM_MAQ_S_W_PHR:
check_dsp(ctx);
- gen_helper_maq_s_w_phr(t0, v1_t, v0_t, cpu_env);
+ gen_helper_maq_s_w_phr(t0, v1_t, v0_t, tcg_env);
break;
case NM_MAQ_S_W_PHL:
check_dsp(ctx);
- gen_helper_maq_s_w_phl(t0, v1_t, v0_t, cpu_env);
+ gen_helper_maq_s_w_phl(t0, v1_t, v0_t, tcg_env);
break;
case NM_MAQ_SA_W_PHR:
check_dsp(ctx);
- gen_helper_maq_sa_w_phr(t0, v1_t, v0_t, cpu_env);
+ gen_helper_maq_sa_w_phr(t0, v1_t, v0_t, tcg_env);
break;
case NM_MAQ_SA_W_PHL:
check_dsp(ctx);
- gen_helper_maq_sa_w_phl(t0, v1_t, v0_t, cpu_env);
+ gen_helper_maq_sa_w_phl(t0, v1_t, v0_t, tcg_env);
break;
default:
gen_reserved_instruction(ctx);
@@ -1571,11 +1571,11 @@
switch (extract32(ctx->opcode, 12, 2)) {
case NM_MTHLIP:
tcg_gen_movi_tl(t0, v2 >> 3);
- gen_helper_mthlip(t0, v0_t, cpu_env);
+ gen_helper_mthlip(t0, v0_t, tcg_env);
break;
case NM_SHILOV:
tcg_gen_movi_tl(t0, v2 >> 3);
- gen_helper_shilo(t0, v0_t, cpu_env);
+ gen_helper_shilo(t0, v0_t, tcg_env);
break;
default:
gen_reserved_instruction(ctx);
@@ -1588,24 +1588,24 @@
switch (extract32(ctx->opcode, 12, 2)) {
case NM_RDDSP:
tcg_gen_movi_tl(t0, imm);
- gen_helper_rddsp(t0, t0, cpu_env);
+ gen_helper_rddsp(t0, t0, tcg_env);
gen_store_gpr(t0, ret);
break;
case NM_WRDSP:
gen_load_gpr(t0, ret);
tcg_gen_movi_tl(t1, imm);
- gen_helper_wrdsp(t0, t1, cpu_env);
+ gen_helper_wrdsp(t0, t1, tcg_env);
break;
case NM_EXTP:
tcg_gen_movi_tl(t0, v2 >> 3);
tcg_gen_movi_tl(t1, v1);
- gen_helper_extp(t0, t0, t1, cpu_env);
+ gen_helper_extp(t0, t0, t1, tcg_env);
gen_store_gpr(t0, ret);
break;
case NM_EXTPDP:
tcg_gen_movi_tl(t0, v2 >> 3);
tcg_gen_movi_tl(t1, v1);
- gen_helper_extpdp(t0, t0, t1, cpu_env);
+ gen_helper_extpdp(t0, t0, t1, tcg_env);
gen_store_gpr(t0, ret);
break;
}
@@ -1615,7 +1615,7 @@
tcg_gen_movi_tl(t0, v2 >> 2);
switch (extract32(ctx->opcode, 12, 1)) {
case NM_SHLL_QB:
- gen_helper_shll_qb(t0, t0, v0_t, cpu_env);
+ gen_helper_shll_qb(t0, t0, v0_t, tcg_env);
gen_store_gpr(t0, ret);
break;
case NM_SHRL_QB:
@@ -1634,19 +1634,19 @@
tcg_gen_movi_tl(t1, v1);
switch (extract32(ctx->opcode, 12, 2)) {
case NM_EXTR_W:
- gen_helper_extr_w(t0, t0, t1, cpu_env);
+ gen_helper_extr_w(t0, t0, t1, tcg_env);
gen_store_gpr(t0, ret);
break;
case NM_EXTR_R_W:
- gen_helper_extr_r_w(t0, t0, t1, cpu_env);
+ gen_helper_extr_r_w(t0, t0, t1, tcg_env);
gen_store_gpr(t0, ret);
break;
case NM_EXTR_RS_W:
- gen_helper_extr_rs_w(t0, t0, t1, cpu_env);
+ gen_helper_extr_rs_w(t0, t0, t1, tcg_env);
gen_store_gpr(t0, ret);
break;
case NM_EXTR_S_H:
- gen_helper_extr_s_h(t0, t0, t1, cpu_env);
+ gen_helper_extr_s_h(t0, t0, t1, tcg_env);
gen_store_gpr(t0, ret);
break;
}
@@ -1671,19 +1671,19 @@
switch (extract32(ctx->opcode, 9, 3)) {
case NM_DPA_W_PH:
check_dsp_r2(ctx);
- gen_helper_dpa_w_ph(t0, v1, v0, cpu_env);
+ gen_helper_dpa_w_ph(t0, v1, v0, tcg_env);
break;
case NM_DPAQ_S_W_PH:
check_dsp(ctx);
- gen_helper_dpaq_s_w_ph(t0, v1, v0, cpu_env);
+ gen_helper_dpaq_s_w_ph(t0, v1, v0, tcg_env);
break;
case NM_DPS_W_PH:
check_dsp_r2(ctx);
- gen_helper_dps_w_ph(t0, v1, v0, cpu_env);
+ gen_helper_dps_w_ph(t0, v1, v0, tcg_env);
break;
case NM_DPSQ_S_W_PH:
check_dsp(ctx);
- gen_helper_dpsq_s_w_ph(t0, v1, v0, cpu_env);
+ gen_helper_dpsq_s_w_ph(t0, v1, v0, tcg_env);
break;
default:
gen_reserved_instruction(ctx);
@@ -1694,19 +1694,19 @@
switch (extract32(ctx->opcode, 9, 3)) {
case NM_DPAX_W_PH:
check_dsp_r2(ctx);
- gen_helper_dpax_w_ph(t0, v0, v1, cpu_env);
+ gen_helper_dpax_w_ph(t0, v0, v1, tcg_env);
break;
case NM_DPAQ_SA_L_W:
check_dsp(ctx);
- gen_helper_dpaq_sa_l_w(t0, v0, v1, cpu_env);
+ gen_helper_dpaq_sa_l_w(t0, v0, v1, tcg_env);
break;
case NM_DPSX_W_PH:
check_dsp_r2(ctx);
- gen_helper_dpsx_w_ph(t0, v0, v1, cpu_env);
+ gen_helper_dpsx_w_ph(t0, v0, v1, tcg_env);
break;
case NM_DPSQ_SA_L_W:
check_dsp(ctx);
- gen_helper_dpsq_sa_l_w(t0, v0, v1, cpu_env);
+ gen_helper_dpsq_sa_l_w(t0, v0, v1, tcg_env);
break;
default:
gen_reserved_instruction(ctx);
@@ -1717,23 +1717,23 @@
switch (extract32(ctx->opcode, 9, 3)) {
case NM_DPAU_H_QBL:
check_dsp(ctx);
- gen_helper_dpau_h_qbl(t0, v0, v1, cpu_env);
+ gen_helper_dpau_h_qbl(t0, v0, v1, tcg_env);
break;
case NM_DPAQX_S_W_PH:
check_dsp_r2(ctx);
- gen_helper_dpaqx_s_w_ph(t0, v0, v1, cpu_env);
+ gen_helper_dpaqx_s_w_ph(t0, v0, v1, tcg_env);
break;
case NM_DPSU_H_QBL:
check_dsp(ctx);
- gen_helper_dpsu_h_qbl(t0, v0, v1, cpu_env);
+ gen_helper_dpsu_h_qbl(t0, v0, v1, tcg_env);
break;
case NM_DPSQX_S_W_PH:
check_dsp_r2(ctx);
- gen_helper_dpsqx_s_w_ph(t0, v0, v1, cpu_env);
+ gen_helper_dpsqx_s_w_ph(t0, v0, v1, tcg_env);
break;
case NM_MULSA_W_PH:
check_dsp_r2(ctx);
- gen_helper_mulsa_w_ph(t0, v0, v1, cpu_env);
+ gen_helper_mulsa_w_ph(t0, v0, v1, tcg_env);
break;
default:
gen_reserved_instruction(ctx);
@@ -1744,23 +1744,23 @@
switch (extract32(ctx->opcode, 9, 3)) {
case NM_DPAU_H_QBR:
check_dsp(ctx);
- gen_helper_dpau_h_qbr(t0, v1, v0, cpu_env);
+ gen_helper_dpau_h_qbr(t0, v1, v0, tcg_env);
break;
case NM_DPAQX_SA_W_PH:
check_dsp_r2(ctx);
- gen_helper_dpaqx_sa_w_ph(t0, v1, v0, cpu_env);
+ gen_helper_dpaqx_sa_w_ph(t0, v1, v0, tcg_env);
break;
case NM_DPSU_H_QBR:
check_dsp(ctx);
- gen_helper_dpsu_h_qbr(t0, v1, v0, cpu_env);
+ gen_helper_dpsu_h_qbr(t0, v1, v0, tcg_env);
break;
case NM_DPSQX_SA_W_PH:
check_dsp_r2(ctx);
- gen_helper_dpsqx_sa_w_ph(t0, v1, v0, cpu_env);
+ gen_helper_dpsqx_sa_w_ph(t0, v1, v0, tcg_env);
break;
case NM_MULSAQ_S_W_PH:
check_dsp(ctx);
- gen_helper_mulsaq_s_w_ph(t0, v1, v0, cpu_env);
+ gen_helper_mulsaq_s_w_ph(t0, v1, v0, tcg_env);
break;
default:
gen_reserved_instruction(ctx);
@@ -1849,7 +1849,7 @@
check_dsp(ctx);
gen_load_gpr(v1_t, rs);
tcg_gen_movi_tl(t0, rd >> 3);
- gen_helper_extr_w(t0, t0, v1_t, cpu_env);
+ gen_helper_extr_w(t0, t0, v1_t, tcg_env);
gen_store_gpr(t0, ret);
break;
}
@@ -1904,7 +1904,7 @@
case NM_EXTRV_R_W:
check_dsp(ctx);
tcg_gen_movi_tl(t0, rd >> 3);
- gen_helper_extr_r_w(t0, t0, v1_t, cpu_env);
+ gen_helper_extr_r_w(t0, t0, v1_t, tcg_env);
gen_store_gpr(t0, ret);
break;
default:
@@ -1924,7 +1924,7 @@
case NM_EXTPV:
check_dsp(ctx);
tcg_gen_movi_tl(t0, rd >> 3);
- gen_helper_extp(t0, t0, v1_t, cpu_env);
+ gen_helper_extp(t0, t0, v1_t, tcg_env);
gen_store_gpr(t0, ret);
break;
case NM_MSUB:
@@ -1948,7 +1948,7 @@
case NM_EXTRV_RS_W:
check_dsp(ctx);
tcg_gen_movi_tl(t0, rd >> 3);
- gen_helper_extr_rs_w(t0, t0, v1_t, cpu_env);
+ gen_helper_extr_rs_w(t0, t0, v1_t, tcg_env);
gen_store_gpr(t0, ret);
break;
}
@@ -1965,7 +1965,7 @@
case NM_EXTPDPV:
check_dsp(ctx);
tcg_gen_movi_tl(t0, rd >> 3);
- gen_helper_extpdp(t0, t0, v1_t, cpu_env);
+ gen_helper_extpdp(t0, t0, v1_t, tcg_env);
gen_store_gpr(t0, ret);
break;
case NM_MSUBU:
@@ -1991,7 +1991,7 @@
case NM_EXTRV_S_H:
check_dsp(ctx);
tcg_gen_movi_tl(t0, rd >> 3);
- gen_helper_extr_s_h(t0, t0, v1_t, cpu_env);
+ gen_helper_extr_s_h(t0, t0, v1_t, tcg_env);
gen_store_gpr(t0, ret);
break;
}
@@ -2014,17 +2014,17 @@
switch (opc) {
case NM_ABSQ_S_QB:
check_dsp_r2(ctx);
- gen_helper_absq_s_qb(v0_t, v0_t, cpu_env);
+ gen_helper_absq_s_qb(v0_t, v0_t, tcg_env);
gen_store_gpr(v0_t, ret);
break;
case NM_ABSQ_S_PH:
check_dsp(ctx);
- gen_helper_absq_s_ph(v0_t, v0_t, cpu_env);
+ gen_helper_absq_s_ph(v0_t, v0_t, tcg_env);
gen_store_gpr(v0_t, ret);
break;
case NM_ABSQ_S_W:
check_dsp(ctx);
- gen_helper_absq_s_w(v0_t, v0_t, cpu_env);
+ gen_helper_absq_s_w(v0_t, v0_t, tcg_env);
gen_store_gpr(v0_t, ret);
break;
case NM_PRECEQ_W_PHL:
@@ -2109,7 +2109,7 @@
TCGv tv0 = tcg_temp_new();
gen_load_gpr(tv0, rt);
- gen_helper_insv(v0_t, cpu_env, v0_t, tv0);
+ gen_helper_insv(v0_t, tcg_env, v0_t, tv0);
gen_store_gpr(v0_t, ret);
}
break;
@@ -2243,7 +2243,7 @@
TCGv t0 = tcg_temp_new();
save_cpu_state(ctx, 1);
- gen_helper_di(t0, cpu_env);
+ gen_helper_di(t0, tcg_env);
gen_store_gpr(t0, rt);
/* Stop translation as we may have switched the execution mode */
ctx->base.is_jmp = DISAS_STOP;
@@ -2255,7 +2255,7 @@
TCGv t0 = tcg_temp_new();
save_cpu_state(ctx, 1);
- gen_helper_ei(t0, cpu_env);
+ gen_helper_ei(t0, tcg_env);
gen_store_gpr(t0, rt);
/* Stop translation as we may have switched the execution mode */
ctx->base.is_jmp = DISAS_STOP;
@@ -3036,27 +3036,27 @@
switch (opc) {
case NM_CMP_EQ_PH:
check_dsp(ctx);
- gen_helper_cmp_eq_ph(v1_t, v2_t, cpu_env);
+ gen_helper_cmp_eq_ph(v1_t, v2_t, tcg_env);
break;
case NM_CMP_LT_PH:
check_dsp(ctx);
- gen_helper_cmp_lt_ph(v1_t, v2_t, cpu_env);
+ gen_helper_cmp_lt_ph(v1_t, v2_t, tcg_env);
break;
case NM_CMP_LE_PH:
check_dsp(ctx);
- gen_helper_cmp_le_ph(v1_t, v2_t, cpu_env);
+ gen_helper_cmp_le_ph(v1_t, v2_t, tcg_env);
break;
case NM_CMPU_EQ_QB:
check_dsp(ctx);
- gen_helper_cmpu_eq_qb(v1_t, v2_t, cpu_env);
+ gen_helper_cmpu_eq_qb(v1_t, v2_t, tcg_env);
break;
case NM_CMPU_LT_QB:
check_dsp(ctx);
- gen_helper_cmpu_lt_qb(v1_t, v2_t, cpu_env);
+ gen_helper_cmpu_lt_qb(v1_t, v2_t, tcg_env);
break;
case NM_CMPU_LE_QB:
check_dsp(ctx);
- gen_helper_cmpu_le_qb(v1_t, v2_t, cpu_env);
+ gen_helper_cmpu_le_qb(v1_t, v2_t, tcg_env);
break;
case NM_CMPGU_EQ_QB:
check_dsp(ctx);
@@ -3098,32 +3098,32 @@
break;
case NM_PICK_QB:
check_dsp(ctx);
- gen_helper_pick_qb(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_pick_qb(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
case NM_PICK_PH:
check_dsp(ctx);
- gen_helper_pick_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_pick_ph(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
case NM_ADDQ_S_W:
check_dsp(ctx);
- gen_helper_addq_s_w(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_addq_s_w(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
case NM_SUBQ_S_W:
check_dsp(ctx);
- gen_helper_subq_s_w(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_subq_s_w(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
case NM_ADDSC:
check_dsp(ctx);
- gen_helper_addsc(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_addsc(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
case NM_ADDWC:
check_dsp(ctx);
- gen_helper_addwc(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_addwc(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
case NM_ADDQ_S_PH:
@@ -3131,12 +3131,12 @@
switch (extract32(ctx->opcode, 10, 1)) {
case 0:
/* ADDQ_PH */
- gen_helper_addq_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_addq_ph(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
case 1:
/* ADDQ_S_PH */
- gen_helper_addq_s_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_addq_s_ph(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
}
@@ -3176,12 +3176,12 @@
switch (extract32(ctx->opcode, 10, 1)) {
case 0:
/* ADDU_QB */
- gen_helper_addu_qb(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_addu_qb(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
case 1:
/* ADDU_S_QB */
- gen_helper_addu_s_qb(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_addu_s_qb(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
}
@@ -3191,12 +3191,12 @@
switch (extract32(ctx->opcode, 10, 1)) {
case 0:
/* ADDU_PH */
- gen_helper_addu_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_addu_ph(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
case 1:
/* ADDU_S_PH */
- gen_helper_addu_s_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_addu_s_ph(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
}
@@ -3251,12 +3251,12 @@
switch (extract32(ctx->opcode, 10, 1)) {
case 0:
/* SUBQ_PH */
- gen_helper_subq_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_subq_ph(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
case 1:
/* SUBQ_S_PH */
- gen_helper_subq_s_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_subq_s_ph(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
}
@@ -3296,12 +3296,12 @@
switch (extract32(ctx->opcode, 10, 1)) {
case 0:
/* SUBU_QB */
- gen_helper_subu_qb(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_subu_qb(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
case 1:
/* SUBU_S_QB */
- gen_helper_subu_s_qb(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_subu_s_qb(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
}
@@ -3311,12 +3311,12 @@
switch (extract32(ctx->opcode, 10, 1)) {
case 0:
/* SUBU_PH */
- gen_helper_subu_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_subu_ph(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
case 1:
/* SUBU_S_PH */
- gen_helper_subu_s_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_subu_s_ph(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
}
@@ -3341,12 +3341,12 @@
switch (extract32(ctx->opcode, 10, 1)) {
case 0:
/* SHLLV_PH */
- gen_helper_shll_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_shll_ph(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
case 1:
/* SHLLV_S_PH */
- gen_helper_shll_s_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_shll_s_ph(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
}
@@ -3376,32 +3376,32 @@
break;
case NM_MULEU_S_PH_QBL:
check_dsp(ctx);
- gen_helper_muleu_s_ph_qbl(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_muleu_s_ph_qbl(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
case NM_MULEU_S_PH_QBR:
check_dsp(ctx);
- gen_helper_muleu_s_ph_qbr(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_muleu_s_ph_qbr(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
case NM_MULQ_RS_PH:
check_dsp(ctx);
- gen_helper_mulq_rs_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_mulq_rs_ph(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
case NM_MULQ_S_PH:
check_dsp_r2(ctx);
- gen_helper_mulq_s_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_mulq_s_ph(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
case NM_MULQ_RS_W:
check_dsp_r2(ctx);
- gen_helper_mulq_rs_w(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_mulq_rs_w(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
case NM_MULQ_S_W:
check_dsp_r2(ctx);
- gen_helper_mulq_s_w(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_mulq_s_w(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
case NM_APPEND:
@@ -3434,12 +3434,12 @@
break;
case NM_SHLLV_QB:
check_dsp(ctx);
- gen_helper_shll_qb(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_shll_qb(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
case NM_SHLLV_S_W:
check_dsp(ctx);
- gen_helper_shll_s_w(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_shll_s_w(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
case NM_SHILO:
@@ -3451,17 +3451,17 @@
tcg_gen_movi_tl(tv0, rd >> 3);
tcg_gen_movi_tl(tv1, imm);
- gen_helper_shilo(tv0, tv1, cpu_env);
+ gen_helper_shilo(tv0, tv1, tcg_env);
}
break;
case NM_MULEQ_S_W_PHL:
check_dsp(ctx);
- gen_helper_muleq_s_w_phl(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_muleq_s_w_phl(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
case NM_MULEQ_S_W_PHR:
check_dsp(ctx);
- gen_helper_muleq_s_w_phr(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_muleq_s_w_phr(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
case NM_MUL_S_PH:
@@ -3469,12 +3469,12 @@
switch (extract32(ctx->opcode, 10, 1)) {
case 0:
/* MUL_PH */
- gen_helper_mul_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_mul_ph(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
case 1:
/* MUL_S_PH */
- gen_helper_mul_s_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_mul_s_ph(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
}
@@ -3496,12 +3496,12 @@
break;
case NM_PRECRQ_RS_PH_W:
check_dsp(ctx);
- gen_helper_precrq_rs_ph_w(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_precrq_rs_ph_w(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
case NM_PRECRQU_S_QB_PH:
check_dsp(ctx);
- gen_helper_precrqu_s_qb_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_helper_precrqu_s_qb_ph(v1_t, v1_t, v2_t, tcg_env);
gen_store_gpr(v1_t, ret);
break;
case NM_SHRA_R_W:
@@ -3532,12 +3532,12 @@
switch (extract32(ctx->opcode, 10, 2)) {
case 0:
/* SHLL_PH */
- gen_helper_shll_ph(v1_t, t0, v1_t, cpu_env);
+ gen_helper_shll_ph(v1_t, t0, v1_t, tcg_env);
gen_store_gpr(v1_t, rt);
break;
case 2:
/* SHLL_S_PH */
- gen_helper_shll_s_ph(v1_t, t0, v1_t, cpu_env);
+ gen_helper_shll_s_ph(v1_t, t0, v1_t, tcg_env);
gen_store_gpr(v1_t, rt);
break;
default:
@@ -3548,7 +3548,7 @@
case NM_SHLL_S_W:
check_dsp(ctx);
tcg_gen_movi_tl(t0, rd);
- gen_helper_shll_s_w(v1_t, t0, v1_t, cpu_env);
+ gen_helper_shll_s_w(v1_t, t0, v1_t, tcg_env);
gen_store_gpr(v1_t, rt);
break;
case NM_REPL_PH:
@@ -4407,8 +4407,8 @@
case NM_BPOSGE32C:
check_dsp_r3(ctx);
{
- int32_t imm = extract32(ctx->opcode, 1, 13) |
- extract32(ctx->opcode, 0, 1) << 13;
+ imm = extract32(ctx->opcode, 1, 13)
+ | extract32(ctx->opcode, 0, 1) << 13;
gen_compute_branch_nm(ctx, OPC_BPOSGE32, 4, -1, -2,
imm << 1);
@@ -4503,7 +4503,7 @@
/* make sure instructions are on a halfword boundary */
if (ctx->base.pc_next & 0x1) {
TCGv tmp = tcg_constant_tl(ctx->base.pc_next);
- tcg_gen_st_tl(tmp, cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr));
+ tcg_gen_st_tl(tmp, tcg_env, offsetof(CPUMIPSState, CP0_BadVAddr));
generate_exception_end(ctx, EXCP_AdEL);
return 2;
}
@@ -4635,7 +4635,7 @@
break;
case NM_LI16:
{
- int imm = extract32(ctx->opcode, 0, 7);
+ imm = extract32(ctx->opcode, 0, 7);
imm = (imm == 0x7f ? -1 : imm);
if (rt != 0) {
tcg_gen_movi_tl(cpu_gpr[rt], imm);
diff --git a/target/mips/tcg/sysemu/mips-semi.c b/target/mips/tcg/sysemu/mips-semi.c
index f3735df..b3e4e49 100644
--- a/target/mips/tcg/sysemu/mips-semi.c
+++ b/target/mips/tcg/sysemu/mips-semi.c
@@ -126,7 +126,7 @@
static void uhi_cb(CPUState *cs, uint64_t ret, int err)
{
- CPUMIPSState *env = cs->env_ptr;
+ CPUMIPSState *env = cpu_env(cs);
#define E(N) case E##N: err = UHI_E##N; break
@@ -167,7 +167,7 @@
QEMU_BUILD_BUG_ON(sizeof(UHIStat) < sizeof(struct gdb_stat));
if (!err) {
- CPUMIPSState *env = cs->env_ptr;
+ CPUMIPSState *env = cpu_env(cs);
target_ulong addr = env->active_tc.gpr[5];
UHIStat *dst = lock_user(VERIFY_WRITE, addr, sizeof(UHIStat), 1);
struct gdb_stat s;
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
index 9bb40f1..13e43fa 100644
--- a/target/mips/tcg/translate.c
+++ b/target/mips/tcg/translate.c
@@ -1268,12 +1268,12 @@
TCGv_i32 t2 = tcg_temp_new_i32();
TCGv_ptr addr = tcg_temp_new_ptr();
- tcg_gen_ld_i32(t2, cpu_env, offsetof(CPUMIPSState, CP0_SRSCtl));
+ tcg_gen_ld_i32(t2, tcg_env, offsetof(CPUMIPSState, CP0_SRSCtl));
tcg_gen_shri_i32(t2, t2, CP0SRSCtl_PSS);
tcg_gen_andi_i32(t2, t2, 0xf);
tcg_gen_muli_i32(t2, t2, sizeof(target_ulong) * 32);
tcg_gen_ext_i32_ptr(addr, t2);
- tcg_gen_add_ptr(addr, cpu_env, addr);
+ tcg_gen_add_ptr(addr, tcg_env, addr);
tcg_gen_ld_tl(t0, addr, sizeof(target_ulong) * from);
}
@@ -1288,12 +1288,12 @@
TCGv_ptr addr = tcg_temp_new_ptr();
gen_load_gpr(t0, from);
- tcg_gen_ld_i32(t2, cpu_env, offsetof(CPUMIPSState, CP0_SRSCtl));
+ tcg_gen_ld_i32(t2, tcg_env, offsetof(CPUMIPSState, CP0_SRSCtl));
tcg_gen_shri_i32(t2, t2, CP0SRSCtl_PSS);
tcg_gen_andi_i32(t2, t2, 0xf);
tcg_gen_muli_i32(t2, t2, sizeof(target_ulong) * 32);
tcg_gen_ext_i32_ptr(addr, t2);
- tcg_gen_add_ptr(addr, cpu_env, addr);
+ tcg_gen_add_ptr(addr, tcg_env, addr);
tcg_gen_st_tl(t0, addr, sizeof(target_ulong) * to);
}
@@ -1344,14 +1344,14 @@
void generate_exception_err(DisasContext *ctx, int excp, int err)
{
save_cpu_state(ctx, 1);
- gen_helper_raise_exception_err(cpu_env, tcg_constant_i32(excp),
+ gen_helper_raise_exception_err(tcg_env, tcg_constant_i32(excp),
tcg_constant_i32(err));
ctx->base.is_jmp = DISAS_NORETURN;
}
void generate_exception(DisasContext *ctx, int excp)
{
- gen_helper_raise_exception(cpu_env, tcg_constant_i32(excp));
+ gen_helper_raise_exception(tcg_env, tcg_constant_i32(excp));
}
void generate_exception_end(DisasContext *ctx, int excp)
@@ -1363,7 +1363,7 @@
{
#ifdef CONFIG_USER_ONLY
/* Pass the break code along to cpu_loop. */
- tcg_gen_st_i32(tcg_constant_i32(code), cpu_env,
+ tcg_gen_st_i32(tcg_constant_i32(code), tcg_env,
offsetof(CPUMIPSState, error_code));
#endif
generate_exception_end(ctx, EXCP_BREAK);
@@ -1868,70 +1868,70 @@
gen_ldcmp_fpr ## bits(ctx, fp1, ft); \
switch (n) { \
case 0: \
- gen_helper_r6_cmp_ ## fmt ## _af(fp0, cpu_env, fp0, fp1); \
+ gen_helper_r6_cmp_ ## fmt ## _af(fp0, tcg_env, fp0, fp1); \
break; \
case 1: \
- gen_helper_r6_cmp_ ## fmt ## _un(fp0, cpu_env, fp0, fp1); \
+ gen_helper_r6_cmp_ ## fmt ## _un(fp0, tcg_env, fp0, fp1); \
break; \
case 2: \
- gen_helper_r6_cmp_ ## fmt ## _eq(fp0, cpu_env, fp0, fp1); \
+ gen_helper_r6_cmp_ ## fmt ## _eq(fp0, tcg_env, fp0, fp1); \
break; \
case 3: \
- gen_helper_r6_cmp_ ## fmt ## _ueq(fp0, cpu_env, fp0, fp1); \
+ gen_helper_r6_cmp_ ## fmt ## _ueq(fp0, tcg_env, fp0, fp1); \
break; \
case 4: \
- gen_helper_r6_cmp_ ## fmt ## _lt(fp0, cpu_env, fp0, fp1); \
+ gen_helper_r6_cmp_ ## fmt ## _lt(fp0, tcg_env, fp0, fp1); \
break; \
case 5: \
- gen_helper_r6_cmp_ ## fmt ## _ult(fp0, cpu_env, fp0, fp1); \
+ gen_helper_r6_cmp_ ## fmt ## _ult(fp0, tcg_env, fp0, fp1); \
break; \
case 6: \
- gen_helper_r6_cmp_ ## fmt ## _le(fp0, cpu_env, fp0, fp1); \
+ gen_helper_r6_cmp_ ## fmt ## _le(fp0, tcg_env, fp0, fp1); \
break; \
case 7: \
- gen_helper_r6_cmp_ ## fmt ## _ule(fp0, cpu_env, fp0, fp1); \
+ gen_helper_r6_cmp_ ## fmt ## _ule(fp0, tcg_env, fp0, fp1); \
break; \
case 8: \
- gen_helper_r6_cmp_ ## fmt ## _saf(fp0, cpu_env, fp0, fp1); \
+ gen_helper_r6_cmp_ ## fmt ## _saf(fp0, tcg_env, fp0, fp1); \
break; \
case 9: \
- gen_helper_r6_cmp_ ## fmt ## _sun(fp0, cpu_env, fp0, fp1); \
+ gen_helper_r6_cmp_ ## fmt ## _sun(fp0, tcg_env, fp0, fp1); \
break; \
case 10: \
- gen_helper_r6_cmp_ ## fmt ## _seq(fp0, cpu_env, fp0, fp1); \
+ gen_helper_r6_cmp_ ## fmt ## _seq(fp0, tcg_env, fp0, fp1); \
break; \
case 11: \
- gen_helper_r6_cmp_ ## fmt ## _sueq(fp0, cpu_env, fp0, fp1); \
+ gen_helper_r6_cmp_ ## fmt ## _sueq(fp0, tcg_env, fp0, fp1); \
break; \
case 12: \
- gen_helper_r6_cmp_ ## fmt ## _slt(fp0, cpu_env, fp0, fp1); \
+ gen_helper_r6_cmp_ ## fmt ## _slt(fp0, tcg_env, fp0, fp1); \
break; \
case 13: \
- gen_helper_r6_cmp_ ## fmt ## _sult(fp0, cpu_env, fp0, fp1); \
+ gen_helper_r6_cmp_ ## fmt ## _sult(fp0, tcg_env, fp0, fp1); \
break; \
case 14: \
- gen_helper_r6_cmp_ ## fmt ## _sle(fp0, cpu_env, fp0, fp1); \
+ gen_helper_r6_cmp_ ## fmt ## _sle(fp0, tcg_env, fp0, fp1); \
break; \
case 15: \
- gen_helper_r6_cmp_ ## fmt ## _sule(fp0, cpu_env, fp0, fp1); \
+ gen_helper_r6_cmp_ ## fmt ## _sule(fp0, tcg_env, fp0, fp1); \
break; \
case 17: \
- gen_helper_r6_cmp_ ## fmt ## _or(fp0, cpu_env, fp0, fp1); \
+ gen_helper_r6_cmp_ ## fmt ## _or(fp0, tcg_env, fp0, fp1); \
break; \
case 18: \
- gen_helper_r6_cmp_ ## fmt ## _une(fp0, cpu_env, fp0, fp1); \
+ gen_helper_r6_cmp_ ## fmt ## _une(fp0, tcg_env, fp0, fp1); \
break; \
case 19: \
- gen_helper_r6_cmp_ ## fmt ## _ne(fp0, cpu_env, fp0, fp1); \
+ gen_helper_r6_cmp_ ## fmt ## _ne(fp0, tcg_env, fp0, fp1); \
break; \
case 25: \
- gen_helper_r6_cmp_ ## fmt ## _sor(fp0, cpu_env, fp0, fp1); \
+ gen_helper_r6_cmp_ ## fmt ## _sor(fp0, tcg_env, fp0, fp1); \
break; \
case 26: \
- gen_helper_r6_cmp_ ## fmt ## _sune(fp0, cpu_env, fp0, fp1); \
+ gen_helper_r6_cmp_ ## fmt ## _sune(fp0, tcg_env, fp0, fp1); \
break; \
case 27: \
- gen_helper_r6_cmp_ ## fmt ## _sne(fp0, cpu_env, fp0, fp1); \
+ gen_helper_r6_cmp_ ## fmt ## _sne(fp0, tcg_env, fp0, fp1); \
break; \
default: \
abort(); \
@@ -1954,15 +1954,15 @@
TCGv t0 = tcg_temp_new(); \
tcg_gen_mov_tl(t0, arg1); \
tcg_gen_qemu_ld_tl(ret, arg1, ctx->mem_idx, memop); \
- tcg_gen_st_tl(t0, cpu_env, offsetof(CPUMIPSState, lladdr)); \
- tcg_gen_st_tl(ret, cpu_env, offsetof(CPUMIPSState, llval)); \
+ tcg_gen_st_tl(t0, tcg_env, offsetof(CPUMIPSState, lladdr)); \
+ tcg_gen_st_tl(ret, tcg_env, offsetof(CPUMIPSState, llval)); \
}
#else
#define OP_LD_ATOMIC(insn, fname) \
static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \
DisasContext *ctx) \
{ \
- gen_helper_##insn(ret, cpu_env, arg1, tcg_constant_i32(mem_idx)); \
+ gen_helper_##insn(ret, tcg_env, arg1, tcg_constant_i32(mem_idx)); \
}
#endif
OP_LD_ATOMIC(ll, MO_TESL);
@@ -4499,7 +4499,7 @@
/* Always trap */
#ifdef CONFIG_USER_ONLY
/* Pass the break code along to cpu_loop. */
- tcg_gen_st_i32(tcg_constant_i32(code), cpu_env,
+ tcg_gen_st_i32(tcg_constant_i32(code), tcg_env,
offsetof(CPUMIPSState, error_code));
#endif
generate_exception_end(ctx, EXCP_TRAP);
@@ -4544,7 +4544,7 @@
}
#ifdef CONFIG_USER_ONLY
/* Pass the break code along to cpu_loop. */
- tcg_gen_st_i32(tcg_constant_i32(code), cpu_env,
+ tcg_gen_st_i32(tcg_constant_i32(code), tcg_env,
offsetof(CPUMIPSState, error_code));
#endif
/* Like save_cpu_state, only don't update saved values. */
@@ -5053,13 +5053,13 @@
TCGv_i64 t1 = tcg_temp_new_i64();
tcg_gen_ext_tl_i64(t0, arg);
- tcg_gen_ld_i64(t1, cpu_env, off);
+ tcg_gen_ld_i64(t1, tcg_env, off);
#if defined(TARGET_MIPS64)
tcg_gen_deposit_i64(t1, t1, t0, 30, 32);
#else
tcg_gen_concat32_i64(t1, t1, t0);
#endif
- tcg_gen_st_i64(t1, cpu_env, off);
+ tcg_gen_st_i64(t1, tcg_env, off);
}
static inline void gen_mthc0_store64(TCGv arg, target_ulong off)
@@ -5068,16 +5068,16 @@
TCGv_i64 t1 = tcg_temp_new_i64();
tcg_gen_ext_tl_i64(t0, arg);
- tcg_gen_ld_i64(t1, cpu_env, off);
+ tcg_gen_ld_i64(t1, tcg_env, off);
tcg_gen_concat32_i64(t1, t1, t0);
- tcg_gen_st_i64(t1, cpu_env, off);
+ tcg_gen_st_i64(t1, tcg_env, off);
}
static inline void gen_mfhc0_entrylo(TCGv arg, target_ulong off)
{
TCGv_i64 t0 = tcg_temp_new_i64();
- tcg_gen_ld_i64(t0, cpu_env, off);
+ tcg_gen_ld_i64(t0, tcg_env, off);
#if defined(TARGET_MIPS64)
tcg_gen_shri_i64(t0, t0, 30);
#else
@@ -5090,7 +5090,7 @@
{
TCGv_i64 t0 = tcg_temp_new_i64();
- tcg_gen_ld_i64(t0, cpu_env, off);
+ tcg_gen_ld_i64(t0, tcg_env, off);
tcg_gen_shri_i64(t0, t0, 32 + shift);
gen_move_low32(arg, t0);
}
@@ -5099,13 +5099,13 @@
{
TCGv_i32 t0 = tcg_temp_new_i32();
- tcg_gen_ld_i32(t0, cpu_env, off);
+ tcg_gen_ld_i32(t0, tcg_env, off);
tcg_gen_ext_i32_tl(arg, t0);
}
static inline void gen_mfc0_load64(TCGv arg, target_ulong off)
{
- tcg_gen_ld_tl(arg, cpu_env, off);
+ tcg_gen_ld_tl(arg, tcg_env, off);
tcg_gen_ext32s_tl(arg, arg);
}
@@ -5114,7 +5114,7 @@
TCGv_i32 t0 = tcg_temp_new_i32();
tcg_gen_trunc_tl_i32(t0, arg);
- tcg_gen_st_i32(t0, cpu_env, off);
+ tcg_gen_st_i32(t0, tcg_env, off);
}
#define CP0_CHECK(c) \
@@ -5155,7 +5155,7 @@
switch (sel) {
case CP0_REG09__SAAR:
CP0_CHECK(ctx->saar);
- gen_helper_mfhc0_saar(arg, cpu_env);
+ gen_helper_mfhc0_saar(arg, tcg_env);
register_name = "SAAR";
break;
default:
@@ -5171,7 +5171,7 @@
break;
case CP0_REG17__MAAR:
CP0_CHECK(ctx->mrp);
- gen_helper_mfhc0_maar(arg, cpu_env);
+ gen_helper_mfhc0_maar(arg, tcg_env);
register_name = "MAAR";
break;
default:
@@ -5256,7 +5256,7 @@
switch (sel) {
case CP0_REG09__SAAR:
CP0_CHECK(ctx->saar);
- gen_helper_mthc0_saar(cpu_env, arg);
+ gen_helper_mthc0_saar(tcg_env, arg);
register_name = "SAAR";
break;
default:
@@ -5276,7 +5276,7 @@
break;
case CP0_REG17__MAAR:
CP0_CHECK(ctx->mrp);
- gen_helper_mthc0_maar(cpu_env, arg);
+ gen_helper_mthc0_maar(tcg_env, arg);
register_name = "MAAR";
break;
default:
@@ -5353,17 +5353,17 @@
break;
case CP0_REG00__MVPCONTROL:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mfc0_mvpcontrol(arg, cpu_env);
+ gen_helper_mfc0_mvpcontrol(arg, tcg_env);
register_name = "MVPControl";
break;
case CP0_REG00__MVPCONF0:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mfc0_mvpconf0(arg, cpu_env);
+ gen_helper_mfc0_mvpconf0(arg, tcg_env);
register_name = "MVPConf0";
break;
case CP0_REG00__MVPCONF1:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mfc0_mvpconf1(arg, cpu_env);
+ gen_helper_mfc0_mvpconf1(arg, tcg_env);
register_name = "MVPConf1";
break;
case CP0_REG00__VPCONTROL:
@@ -5379,7 +5379,7 @@
switch (sel) {
case CP0_REG01__RANDOM:
CP0_CHECK(!(ctx->insn_flags & ISA_MIPS_R6));
- gen_helper_mfc0_random(arg, cpu_env);
+ gen_helper_mfc0_random(arg, tcg_env);
register_name = "Random";
break;
case CP0_REG01__VPECONTROL:
@@ -5426,7 +5426,7 @@
case CP0_REG02__ENTRYLO0:
{
TCGv_i64 tmp = tcg_temp_new_i64();
- tcg_gen_ld_i64(tmp, cpu_env,
+ tcg_gen_ld_i64(tmp, tcg_env,
offsetof(CPUMIPSState, CP0_EntryLo0));
#if defined(TARGET_MIPS64)
if (ctx->rxi) {
@@ -5441,37 +5441,37 @@
break;
case CP0_REG02__TCSTATUS:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mfc0_tcstatus(arg, cpu_env);
+ gen_helper_mfc0_tcstatus(arg, tcg_env);
register_name = "TCStatus";
break;
case CP0_REG02__TCBIND:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mfc0_tcbind(arg, cpu_env);
+ gen_helper_mfc0_tcbind(arg, tcg_env);
register_name = "TCBind";
break;
case CP0_REG02__TCRESTART:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mfc0_tcrestart(arg, cpu_env);
+ gen_helper_mfc0_tcrestart(arg, tcg_env);
register_name = "TCRestart";
break;
case CP0_REG02__TCHALT:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mfc0_tchalt(arg, cpu_env);
+ gen_helper_mfc0_tchalt(arg, tcg_env);
register_name = "TCHalt";
break;
case CP0_REG02__TCCONTEXT:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mfc0_tccontext(arg, cpu_env);
+ gen_helper_mfc0_tccontext(arg, tcg_env);
register_name = "TCContext";
break;
case CP0_REG02__TCSCHEDULE:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mfc0_tcschedule(arg, cpu_env);
+ gen_helper_mfc0_tcschedule(arg, tcg_env);
register_name = "TCSchedule";
break;
case CP0_REG02__TCSCHEFBACK:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mfc0_tcschefback(arg, cpu_env);
+ gen_helper_mfc0_tcschefback(arg, tcg_env);
register_name = "TCScheFBack";
break;
default:
@@ -5483,7 +5483,7 @@
case CP0_REG03__ENTRYLO1:
{
TCGv_i64 tmp = tcg_temp_new_i64();
- tcg_gen_ld_i64(tmp, cpu_env,
+ tcg_gen_ld_i64(tmp, tcg_env,
offsetof(CPUMIPSState, CP0_EntryLo1));
#if defined(TARGET_MIPS64)
if (ctx->rxi) {
@@ -5508,7 +5508,7 @@
case CP0_REGISTER_04:
switch (sel) {
case CP0_REG04__CONTEXT:
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_Context));
+ tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_Context));
tcg_gen_ext32s_tl(arg, arg);
register_name = "Context";
break;
@@ -5519,14 +5519,14 @@
goto cp0_unimplemented;
case CP0_REG04__USERLOCAL:
CP0_CHECK(ctx->ulri);
- tcg_gen_ld_tl(arg, cpu_env,
+ tcg_gen_ld_tl(arg, tcg_env,
offsetof(CPUMIPSState, active_tc.CP0_UserLocal));
tcg_gen_ext32s_tl(arg, arg);
register_name = "UserLocal";
break;
case CP0_REG04__MMID:
CP0_CHECK(ctx->mi);
- gen_helper_mtc0_memorymapid(cpu_env, arg);
+ gen_helper_mtc0_memorymapid(tcg_env, arg);
register_name = "MMID";
break;
default:
@@ -5546,19 +5546,19 @@
break;
case CP0_REG05__SEGCTL0:
CP0_CHECK(ctx->sc);
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_SegCtl0));
+ tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_SegCtl0));
tcg_gen_ext32s_tl(arg, arg);
register_name = "SegCtl0";
break;
case CP0_REG05__SEGCTL1:
CP0_CHECK(ctx->sc);
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_SegCtl1));
+ tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_SegCtl1));
tcg_gen_ext32s_tl(arg, arg);
register_name = "SegCtl1";
break;
case CP0_REG05__SEGCTL2:
CP0_CHECK(ctx->sc);
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_SegCtl2));
+ tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_SegCtl2));
tcg_gen_ext32s_tl(arg, arg);
register_name = "SegCtl2";
break;
@@ -5635,7 +5635,7 @@
case CP0_REGISTER_08:
switch (sel) {
case CP0_REG08__BADVADDR:
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr));
+ tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_BadVAddr));
tcg_gen_ext32s_tl(arg, arg);
register_name = "BadVAddr";
break;
@@ -5665,7 +5665,7 @@
/* Mark as an IO operation because we read the time. */
translator_io_start(&ctx->base);
- gen_helper_mfc0_count(arg, cpu_env);
+ gen_helper_mfc0_count(arg, tcg_env);
/*
* Break the TB to be able to take timer interrupts immediately
* after reading count. DISAS_STOP isn't sufficient, we need to
@@ -5682,7 +5682,7 @@
break;
case CP0_REG09__SAAR:
CP0_CHECK(ctx->saar);
- gen_helper_mfc0_saar(arg, cpu_env);
+ gen_helper_mfc0_saar(arg, tcg_env);
register_name = "SAAR";
break;
default:
@@ -5692,7 +5692,7 @@
case CP0_REGISTER_10:
switch (sel) {
case CP0_REG10__ENTRYHI:
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EntryHi));
+ tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_EntryHi));
tcg_gen_ext32s_tl(arg, arg);
register_name = "EntryHi";
break;
@@ -5749,7 +5749,7 @@
case CP0_REGISTER_14:
switch (sel) {
case CP0_REG14__EPC:
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EPC));
+ tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_EPC));
tcg_gen_ext32s_tl(arg, arg);
register_name = "EPC";
break;
@@ -5765,14 +5765,14 @@
break;
case CP0_REG15__EBASE:
check_insn(ctx, ISA_MIPS_R2);
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EBase));
+ tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_EBase));
tcg_gen_ext32s_tl(arg, arg);
register_name = "EBase";
break;
case CP0_REG15__CMGCRBASE:
check_insn(ctx, ISA_MIPS_R2);
CP0_CHECK(ctx->cmgcr);
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_CMGCRBase));
+ tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_CMGCRBase));
tcg_gen_ext32s_tl(arg, arg);
register_name = "CMGCRBase";
break;
@@ -5822,12 +5822,12 @@
case CP0_REGISTER_17:
switch (sel) {
case CP0_REG17__LLADDR:
- gen_helper_mfc0_lladdr(arg, cpu_env);
+ gen_helper_mfc0_lladdr(arg, tcg_env);
register_name = "LLAddr";
break;
case CP0_REG17__MAAR:
CP0_CHECK(ctx->mrp);
- gen_helper_mfc0_maar(arg, cpu_env);
+ gen_helper_mfc0_maar(arg, tcg_env);
register_name = "MAAR";
break;
case CP0_REG17__MAARI:
@@ -5880,7 +5880,7 @@
case CP0_REG20__XCONTEXT:
#if defined(TARGET_MIPS64)
check_insn(ctx, ISA_MIPS3);
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_XContext));
+ tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_XContext));
tcg_gen_ext32s_tl(arg, arg);
register_name = "XContext";
break;
@@ -5908,7 +5908,7 @@
case CP0_REGISTER_23:
switch (sel) {
case CP0_REG23__DEBUG:
- gen_helper_mfc0_debug(arg, cpu_env); /* EJTAG support */
+ gen_helper_mfc0_debug(arg, tcg_env); /* EJTAG support */
register_name = "Debug";
break;
case CP0_REG23__TRACECONTROL:
@@ -5944,7 +5944,7 @@
switch (sel) {
case CP0_REG24__DEPC:
/* EJTAG support */
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_DEPC));
+ tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_DEPC));
tcg_gen_ext32s_tl(arg, arg);
register_name = "DEPC";
break;
@@ -6018,7 +6018,7 @@
case CP0_REG28__TAGLO3:
{
TCGv_i64 tmp = tcg_temp_new_i64();
- tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUMIPSState, CP0_TagLo));
+ tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUMIPSState, CP0_TagLo));
gen_move_low32(arg, tmp);
}
register_name = "TagLo";
@@ -6057,7 +6057,7 @@
case CP0_REGISTER_30:
switch (sel) {
case CP0_REG30__ERROREPC:
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC));
+ tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_ErrorEPC));
tcg_gen_ext32s_tl(arg, arg);
register_name = "ErrorEPC";
break;
@@ -6079,7 +6079,7 @@
case CP0_REG31__KSCRATCH5:
case CP0_REG31__KSCRATCH6:
CP0_CHECK(ctx->kscrexist & (1 << sel));
- tcg_gen_ld_tl(arg, cpu_env,
+ tcg_gen_ld_tl(arg, tcg_env,
offsetof(CPUMIPSState, CP0_KScratch[sel - 2]));
tcg_gen_ext32s_tl(arg, arg);
register_name = "KScratch";
@@ -6115,12 +6115,12 @@
case CP0_REGISTER_00:
switch (sel) {
case CP0_REG00__INDEX:
- gen_helper_mtc0_index(cpu_env, arg);
+ gen_helper_mtc0_index(tcg_env, arg);
register_name = "Index";
break;
case CP0_REG00__MVPCONTROL:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mtc0_mvpcontrol(cpu_env, arg);
+ gen_helper_mtc0_mvpcontrol(tcg_env, arg);
register_name = "MVPControl";
break;
case CP0_REG00__MVPCONF0:
@@ -6150,39 +6150,39 @@
break;
case CP0_REG01__VPECONTROL:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mtc0_vpecontrol(cpu_env, arg);
+ gen_helper_mtc0_vpecontrol(tcg_env, arg);
register_name = "VPEControl";
break;
case CP0_REG01__VPECONF0:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mtc0_vpeconf0(cpu_env, arg);
+ gen_helper_mtc0_vpeconf0(tcg_env, arg);
register_name = "VPEConf0";
break;
case CP0_REG01__VPECONF1:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mtc0_vpeconf1(cpu_env, arg);
+ gen_helper_mtc0_vpeconf1(tcg_env, arg);
register_name = "VPEConf1";
break;
case CP0_REG01__YQMASK:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mtc0_yqmask(cpu_env, arg);
+ gen_helper_mtc0_yqmask(tcg_env, arg);
register_name = "YQMask";
break;
case CP0_REG01__VPESCHEDULE:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- tcg_gen_st_tl(arg, cpu_env,
+ tcg_gen_st_tl(arg, tcg_env,
offsetof(CPUMIPSState, CP0_VPESchedule));
register_name = "VPESchedule";
break;
case CP0_REG01__VPESCHEFBACK:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- tcg_gen_st_tl(arg, cpu_env,
+ tcg_gen_st_tl(arg, tcg_env,
offsetof(CPUMIPSState, CP0_VPEScheFBack));
register_name = "VPEScheFBack";
break;
case CP0_REG01__VPEOPT:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mtc0_vpeopt(cpu_env, arg);
+ gen_helper_mtc0_vpeopt(tcg_env, arg);
register_name = "VPEOpt";
break;
default:
@@ -6192,42 +6192,42 @@
case CP0_REGISTER_02:
switch (sel) {
case CP0_REG02__ENTRYLO0:
- gen_helper_mtc0_entrylo0(cpu_env, arg);
+ gen_helper_mtc0_entrylo0(tcg_env, arg);
register_name = "EntryLo0";
break;
case CP0_REG02__TCSTATUS:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mtc0_tcstatus(cpu_env, arg);
+ gen_helper_mtc0_tcstatus(tcg_env, arg);
register_name = "TCStatus";
break;
case CP0_REG02__TCBIND:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mtc0_tcbind(cpu_env, arg);
+ gen_helper_mtc0_tcbind(tcg_env, arg);
register_name = "TCBind";
break;
case CP0_REG02__TCRESTART:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mtc0_tcrestart(cpu_env, arg);
+ gen_helper_mtc0_tcrestart(tcg_env, arg);
register_name = "TCRestart";
break;
case CP0_REG02__TCHALT:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mtc0_tchalt(cpu_env, arg);
+ gen_helper_mtc0_tchalt(tcg_env, arg);
register_name = "TCHalt";
break;
case CP0_REG02__TCCONTEXT:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mtc0_tccontext(cpu_env, arg);
+ gen_helper_mtc0_tccontext(tcg_env, arg);
register_name = "TCContext";
break;
case CP0_REG02__TCSCHEDULE:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mtc0_tcschedule(cpu_env, arg);
+ gen_helper_mtc0_tcschedule(tcg_env, arg);
register_name = "TCSchedule";
break;
case CP0_REG02__TCSCHEFBACK:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mtc0_tcschefback(cpu_env, arg);
+ gen_helper_mtc0_tcschefback(tcg_env, arg);
register_name = "TCScheFBack";
break;
default:
@@ -6237,7 +6237,7 @@
case CP0_REGISTER_03:
switch (sel) {
case CP0_REG03__ENTRYLO1:
- gen_helper_mtc0_entrylo1(cpu_env, arg);
+ gen_helper_mtc0_entrylo1(tcg_env, arg);
register_name = "EntryLo1";
break;
case CP0_REG03__GLOBALNUM:
@@ -6252,7 +6252,7 @@
case CP0_REGISTER_04:
switch (sel) {
case CP0_REG04__CONTEXT:
- gen_helper_mtc0_context(cpu_env, arg);
+ gen_helper_mtc0_context(tcg_env, arg);
register_name = "Context";
break;
case CP0_REG04__CONTEXTCONFIG:
@@ -6262,7 +6262,7 @@
goto cp0_unimplemented;
case CP0_REG04__USERLOCAL:
CP0_CHECK(ctx->ulri);
- tcg_gen_st_tl(arg, cpu_env,
+ tcg_gen_st_tl(arg, tcg_env,
offsetof(CPUMIPSState, active_tc.CP0_UserLocal));
register_name = "UserLocal";
break;
@@ -6278,28 +6278,28 @@
case CP0_REGISTER_05:
switch (sel) {
case CP0_REG05__PAGEMASK:
- gen_helper_mtc0_pagemask(cpu_env, arg);
+ gen_helper_mtc0_pagemask(tcg_env, arg);
register_name = "PageMask";
break;
case CP0_REG05__PAGEGRAIN:
check_insn(ctx, ISA_MIPS_R2);
- gen_helper_mtc0_pagegrain(cpu_env, arg);
+ gen_helper_mtc0_pagegrain(tcg_env, arg);
register_name = "PageGrain";
ctx->base.is_jmp = DISAS_STOP;
break;
case CP0_REG05__SEGCTL0:
CP0_CHECK(ctx->sc);
- gen_helper_mtc0_segctl0(cpu_env, arg);
+ gen_helper_mtc0_segctl0(tcg_env, arg);
register_name = "SegCtl0";
break;
case CP0_REG05__SEGCTL1:
CP0_CHECK(ctx->sc);
- gen_helper_mtc0_segctl1(cpu_env, arg);
+ gen_helper_mtc0_segctl1(tcg_env, arg);
register_name = "SegCtl1";
break;
case CP0_REG05__SEGCTL2:
CP0_CHECK(ctx->sc);
- gen_helper_mtc0_segctl2(cpu_env, arg);
+ gen_helper_mtc0_segctl2(tcg_env, arg);
register_name = "SegCtl2";
break;
case CP0_REG05__PWBASE:
@@ -6309,12 +6309,12 @@
break;
case CP0_REG05__PWFIELD:
check_pw(ctx);
- gen_helper_mtc0_pwfield(cpu_env, arg);
+ gen_helper_mtc0_pwfield(tcg_env, arg);
register_name = "PWField";
break;
case CP0_REG05__PWSIZE:
check_pw(ctx);
- gen_helper_mtc0_pwsize(cpu_env, arg);
+ gen_helper_mtc0_pwsize(tcg_env, arg);
register_name = "PWSize";
break;
default:
@@ -6324,37 +6324,37 @@
case CP0_REGISTER_06:
switch (sel) {
case CP0_REG06__WIRED:
- gen_helper_mtc0_wired(cpu_env, arg);
+ gen_helper_mtc0_wired(tcg_env, arg);
register_name = "Wired";
break;
case CP0_REG06__SRSCONF0:
check_insn(ctx, ISA_MIPS_R2);
- gen_helper_mtc0_srsconf0(cpu_env, arg);
+ gen_helper_mtc0_srsconf0(tcg_env, arg);
register_name = "SRSConf0";
break;
case CP0_REG06__SRSCONF1:
check_insn(ctx, ISA_MIPS_R2);
- gen_helper_mtc0_srsconf1(cpu_env, arg);
+ gen_helper_mtc0_srsconf1(tcg_env, arg);
register_name = "SRSConf1";
break;
case CP0_REG06__SRSCONF2:
check_insn(ctx, ISA_MIPS_R2);
- gen_helper_mtc0_srsconf2(cpu_env, arg);
+ gen_helper_mtc0_srsconf2(tcg_env, arg);
register_name = "SRSConf2";
break;
case CP0_REG06__SRSCONF3:
check_insn(ctx, ISA_MIPS_R2);
- gen_helper_mtc0_srsconf3(cpu_env, arg);
+ gen_helper_mtc0_srsconf3(tcg_env, arg);
register_name = "SRSConf3";
break;
case CP0_REG06__SRSCONF4:
check_insn(ctx, ISA_MIPS_R2);
- gen_helper_mtc0_srsconf4(cpu_env, arg);
+ gen_helper_mtc0_srsconf4(tcg_env, arg);
register_name = "SRSConf4";
break;
case CP0_REG06__PWCTL:
check_pw(ctx);
- gen_helper_mtc0_pwctl(cpu_env, arg);
+ gen_helper_mtc0_pwctl(tcg_env, arg);
register_name = "PWCtl";
break;
default:
@@ -6365,7 +6365,7 @@
switch (sel) {
case CP0_REG07__HWRENA:
check_insn(ctx, ISA_MIPS_R2);
- gen_helper_mtc0_hwrena(cpu_env, arg);
+ gen_helper_mtc0_hwrena(tcg_env, arg);
ctx->base.is_jmp = DISAS_STOP;
register_name = "HWREna";
break;
@@ -6398,17 +6398,17 @@
case CP0_REGISTER_09:
switch (sel) {
case CP0_REG09__COUNT:
- gen_helper_mtc0_count(cpu_env, arg);
+ gen_helper_mtc0_count(tcg_env, arg);
register_name = "Count";
break;
case CP0_REG09__SAARI:
CP0_CHECK(ctx->saar);
- gen_helper_mtc0_saari(cpu_env, arg);
+ gen_helper_mtc0_saari(tcg_env, arg);
register_name = "SAARI";
break;
case CP0_REG09__SAAR:
CP0_CHECK(ctx->saar);
- gen_helper_mtc0_saar(cpu_env, arg);
+ gen_helper_mtc0_saar(tcg_env, arg);
register_name = "SAAR";
break;
default:
@@ -6418,7 +6418,7 @@
case CP0_REGISTER_10:
switch (sel) {
case CP0_REG10__ENTRYHI:
- gen_helper_mtc0_entryhi(cpu_env, arg);
+ gen_helper_mtc0_entryhi(tcg_env, arg);
register_name = "EntryHi";
break;
default:
@@ -6428,7 +6428,7 @@
case CP0_REGISTER_11:
switch (sel) {
case CP0_REG11__COMPARE:
- gen_helper_mtc0_compare(cpu_env, arg);
+ gen_helper_mtc0_compare(tcg_env, arg);
register_name = "Compare";
break;
/* 6,7 are implementation dependent */
@@ -6440,7 +6440,7 @@
switch (sel) {
case CP0_REG12__STATUS:
save_cpu_state(ctx, 1);
- gen_helper_mtc0_status(cpu_env, arg);
+ gen_helper_mtc0_status(tcg_env, arg);
/* DISAS_STOP isn't good enough here, hflags may have changed. */
gen_save_pc(ctx->base.pc_next + 4);
ctx->base.is_jmp = DISAS_EXIT;
@@ -6448,14 +6448,14 @@
break;
case CP0_REG12__INTCTL:
check_insn(ctx, ISA_MIPS_R2);
- gen_helper_mtc0_intctl(cpu_env, arg);
+ gen_helper_mtc0_intctl(tcg_env, arg);
/* Stop translation as we may have switched the execution mode */
ctx->base.is_jmp = DISAS_STOP;
register_name = "IntCtl";
break;
case CP0_REG12__SRSCTL:
check_insn(ctx, ISA_MIPS_R2);
- gen_helper_mtc0_srsctl(cpu_env, arg);
+ gen_helper_mtc0_srsctl(tcg_env, arg);
/* Stop translation as we may have switched the execution mode */
ctx->base.is_jmp = DISAS_STOP;
register_name = "SRSCtl";
@@ -6475,7 +6475,7 @@
switch (sel) {
case CP0_REG13__CAUSE:
save_cpu_state(ctx, 1);
- gen_helper_mtc0_cause(cpu_env, arg);
+ gen_helper_mtc0_cause(tcg_env, arg);
/*
* Stop translation as we may have triggered an interrupt.
* DISAS_STOP isn't sufficient, we need to ensure we break out of
@@ -6492,7 +6492,7 @@
case CP0_REGISTER_14:
switch (sel) {
case CP0_REG14__EPC:
- tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EPC));
+ tcg_gen_st_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_EPC));
register_name = "EPC";
break;
default:
@@ -6507,7 +6507,7 @@
break;
case CP0_REG15__EBASE:
check_insn(ctx, ISA_MIPS_R2);
- gen_helper_mtc0_ebase(cpu_env, arg);
+ gen_helper_mtc0_ebase(tcg_env, arg);
register_name = "EBase";
break;
default:
@@ -6517,7 +6517,7 @@
case CP0_REGISTER_16:
switch (sel) {
case CP0_REG16__CONFIG:
- gen_helper_mtc0_config0(cpu_env, arg);
+ gen_helper_mtc0_config0(tcg_env, arg);
register_name = "Config";
/* Stop translation as we may have switched the execution mode */
ctx->base.is_jmp = DISAS_STOP;
@@ -6527,24 +6527,24 @@
register_name = "Config1";
break;
case CP0_REG16__CONFIG2:
- gen_helper_mtc0_config2(cpu_env, arg);
+ gen_helper_mtc0_config2(tcg_env, arg);
register_name = "Config2";
/* Stop translation as we may have switched the execution mode */
ctx->base.is_jmp = DISAS_STOP;
break;
case CP0_REG16__CONFIG3:
- gen_helper_mtc0_config3(cpu_env, arg);
+ gen_helper_mtc0_config3(tcg_env, arg);
register_name = "Config3";
/* Stop translation as we may have switched the execution mode */
ctx->base.is_jmp = DISAS_STOP;
break;
case CP0_REG16__CONFIG4:
- gen_helper_mtc0_config4(cpu_env, arg);
+ gen_helper_mtc0_config4(tcg_env, arg);
register_name = "Config4";
ctx->base.is_jmp = DISAS_STOP;
break;
case CP0_REG16__CONFIG5:
- gen_helper_mtc0_config5(cpu_env, arg);
+ gen_helper_mtc0_config5(tcg_env, arg);
register_name = "Config5";
/* Stop translation as we may have switched the execution mode */
ctx->base.is_jmp = DISAS_STOP;
@@ -6566,17 +6566,17 @@
case CP0_REGISTER_17:
switch (sel) {
case CP0_REG17__LLADDR:
- gen_helper_mtc0_lladdr(cpu_env, arg);
+ gen_helper_mtc0_lladdr(tcg_env, arg);
register_name = "LLAddr";
break;
case CP0_REG17__MAAR:
CP0_CHECK(ctx->mrp);
- gen_helper_mtc0_maar(cpu_env, arg);
+ gen_helper_mtc0_maar(tcg_env, arg);
register_name = "MAAR";
break;
case CP0_REG17__MAARI:
CP0_CHECK(ctx->mrp);
- gen_helper_mtc0_maari(cpu_env, arg);
+ gen_helper_mtc0_maari(tcg_env, arg);
register_name = "MAARI";
break;
default:
@@ -6624,7 +6624,7 @@
case CP0_REG20__XCONTEXT:
#if defined(TARGET_MIPS64)
check_insn(ctx, ISA_MIPS3);
- gen_helper_mtc0_xcontext(cpu_env, arg);
+ gen_helper_mtc0_xcontext(tcg_env, arg);
register_name = "XContext";
break;
#endif
@@ -6637,7 +6637,7 @@
CP0_CHECK(!(ctx->insn_flags & ISA_MIPS_R6));
switch (sel) {
case 0:
- gen_helper_mtc0_framemask(cpu_env, arg);
+ gen_helper_mtc0_framemask(tcg_env, arg);
register_name = "Framemask";
break;
default:
@@ -6651,7 +6651,7 @@
case CP0_REGISTER_23:
switch (sel) {
case CP0_REG23__DEBUG:
- gen_helper_mtc0_debug(cpu_env, arg); /* EJTAG support */
+ gen_helper_mtc0_debug(tcg_env, arg); /* EJTAG support */
/* DISAS_STOP isn't good enough here, hflags may have changed. */
gen_save_pc(ctx->base.pc_next + 4);
ctx->base.is_jmp = DISAS_EXIT;
@@ -6659,14 +6659,14 @@
break;
case CP0_REG23__TRACECONTROL:
/* PDtrace support */
- /* gen_helper_mtc0_tracecontrol(cpu_env, arg); */
+ /* gen_helper_mtc0_tracecontrol(tcg_env, arg); */
register_name = "TraceControl";
/* Stop translation as we may have switched the execution mode */
ctx->base.is_jmp = DISAS_STOP;
goto cp0_unimplemented;
case CP0_REG23__TRACECONTROL2:
/* PDtrace support */
- /* gen_helper_mtc0_tracecontrol2(cpu_env, arg); */
+ /* gen_helper_mtc0_tracecontrol2(tcg_env, arg); */
register_name = "TraceControl2";
/* Stop translation as we may have switched the execution mode */
ctx->base.is_jmp = DISAS_STOP;
@@ -6675,21 +6675,21 @@
/* Stop translation as we may have switched the execution mode */
ctx->base.is_jmp = DISAS_STOP;
/* PDtrace support */
- /* gen_helper_mtc0_usertracedata1(cpu_env, arg);*/
+ /* gen_helper_mtc0_usertracedata1(tcg_env, arg);*/
register_name = "UserTraceData";
/* Stop translation as we may have switched the execution mode */
ctx->base.is_jmp = DISAS_STOP;
goto cp0_unimplemented;
case CP0_REG23__TRACEIBPC:
/* PDtrace support */
- /* gen_helper_mtc0_traceibpc(cpu_env, arg); */
+ /* gen_helper_mtc0_traceibpc(tcg_env, arg); */
/* Stop translation as we may have switched the execution mode */
ctx->base.is_jmp = DISAS_STOP;
register_name = "TraceIBPC";
goto cp0_unimplemented;
case CP0_REG23__TRACEDBPC:
/* PDtrace support */
- /* gen_helper_mtc0_tracedbpc(cpu_env, arg); */
+ /* gen_helper_mtc0_tracedbpc(tcg_env, arg); */
/* Stop translation as we may have switched the execution mode */
ctx->base.is_jmp = DISAS_STOP;
register_name = "TraceDBPC";
@@ -6702,7 +6702,7 @@
switch (sel) {
case CP0_REG24__DEPC:
/* EJTAG support */
- tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_DEPC));
+ tcg_gen_st_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_DEPC));
register_name = "DEPC";
break;
default:
@@ -6712,7 +6712,7 @@
case CP0_REGISTER_25:
switch (sel) {
case CP0_REG25__PERFCTL0:
- gen_helper_mtc0_performance0(cpu_env, arg);
+ gen_helper_mtc0_performance0(tcg_env, arg);
register_name = "Performance0";
break;
case CP0_REG25__PERFCNT0:
@@ -6750,7 +6750,7 @@
case CP0_REGISTER_26:
switch (sel) {
case CP0_REG26__ERRCTL:
- gen_helper_mtc0_errctl(cpu_env, arg);
+ gen_helper_mtc0_errctl(tcg_env, arg);
ctx->base.is_jmp = DISAS_STOP;
register_name = "ErrCtl";
break;
@@ -6774,14 +6774,14 @@
case CP0_REG28__TAGLO1:
case CP0_REG28__TAGLO2:
case CP0_REG28__TAGLO3:
- gen_helper_mtc0_taglo(cpu_env, arg);
+ gen_helper_mtc0_taglo(tcg_env, arg);
register_name = "TagLo";
break;
case CP0_REG28__DATALO:
case CP0_REG28__DATALO1:
case CP0_REG28__DATALO2:
case CP0_REG28__DATALO3:
- gen_helper_mtc0_datalo(cpu_env, arg);
+ gen_helper_mtc0_datalo(tcg_env, arg);
register_name = "DataLo";
break;
default:
@@ -6794,14 +6794,14 @@
case CP0_REG29__TAGHI1:
case CP0_REG29__TAGHI2:
case CP0_REG29__TAGHI3:
- gen_helper_mtc0_taghi(cpu_env, arg);
+ gen_helper_mtc0_taghi(tcg_env, arg);
register_name = "TagHi";
break;
case CP0_REG29__DATAHI:
case CP0_REG29__DATAHI1:
case CP0_REG29__DATAHI2:
case CP0_REG29__DATAHI3:
- gen_helper_mtc0_datahi(cpu_env, arg);
+ gen_helper_mtc0_datahi(tcg_env, arg);
register_name = "DataHi";
break;
default:
@@ -6812,7 +6812,7 @@
case CP0_REGISTER_30:
switch (sel) {
case CP0_REG30__ERROREPC:
- tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC));
+ tcg_gen_st_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_ErrorEPC));
register_name = "ErrorEPC";
break;
default:
@@ -6833,7 +6833,7 @@
case CP0_REG31__KSCRATCH5:
case CP0_REG31__KSCRATCH6:
CP0_CHECK(ctx->kscrexist & (1 << sel));
- tcg_gen_st_tl(arg, cpu_env,
+ tcg_gen_st_tl(arg, tcg_env,
offsetof(CPUMIPSState, CP0_KScratch[sel - 2]));
register_name = "KScratch";
break;
@@ -6880,17 +6880,17 @@
break;
case CP0_REG00__MVPCONTROL:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mfc0_mvpcontrol(arg, cpu_env);
+ gen_helper_mfc0_mvpcontrol(arg, tcg_env);
register_name = "MVPControl";
break;
case CP0_REG00__MVPCONF0:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mfc0_mvpconf0(arg, cpu_env);
+ gen_helper_mfc0_mvpconf0(arg, tcg_env);
register_name = "MVPConf0";
break;
case CP0_REG00__MVPCONF1:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mfc0_mvpconf1(arg, cpu_env);
+ gen_helper_mfc0_mvpconf1(arg, tcg_env);
register_name = "MVPConf1";
break;
case CP0_REG00__VPCONTROL:
@@ -6906,7 +6906,7 @@
switch (sel) {
case CP0_REG01__RANDOM:
CP0_CHECK(!(ctx->insn_flags & ISA_MIPS_R6));
- gen_helper_mfc0_random(arg, cpu_env);
+ gen_helper_mfc0_random(arg, tcg_env);
register_name = "Random";
break;
case CP0_REG01__VPECONTROL:
@@ -6926,19 +6926,19 @@
break;
case CP0_REG01__YQMASK:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- tcg_gen_ld_tl(arg, cpu_env,
+ tcg_gen_ld_tl(arg, tcg_env,
offsetof(CPUMIPSState, CP0_YQMask));
register_name = "YQMask";
break;
case CP0_REG01__VPESCHEDULE:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- tcg_gen_ld_tl(arg, cpu_env,
+ tcg_gen_ld_tl(arg, tcg_env,
offsetof(CPUMIPSState, CP0_VPESchedule));
register_name = "VPESchedule";
break;
case CP0_REG01__VPESCHEFBACK:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- tcg_gen_ld_tl(arg, cpu_env,
+ tcg_gen_ld_tl(arg, tcg_env,
offsetof(CPUMIPSState, CP0_VPEScheFBack));
register_name = "VPEScheFBack";
break;
@@ -6954,43 +6954,43 @@
case CP0_REGISTER_02:
switch (sel) {
case CP0_REG02__ENTRYLO0:
- tcg_gen_ld_tl(arg, cpu_env,
+ tcg_gen_ld_tl(arg, tcg_env,
offsetof(CPUMIPSState, CP0_EntryLo0));
register_name = "EntryLo0";
break;
case CP0_REG02__TCSTATUS:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mfc0_tcstatus(arg, cpu_env);
+ gen_helper_mfc0_tcstatus(arg, tcg_env);
register_name = "TCStatus";
break;
case CP0_REG02__TCBIND:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mfc0_tcbind(arg, cpu_env);
+ gen_helper_mfc0_tcbind(arg, tcg_env);
register_name = "TCBind";
break;
case CP0_REG02__TCRESTART:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_dmfc0_tcrestart(arg, cpu_env);
+ gen_helper_dmfc0_tcrestart(arg, tcg_env);
register_name = "TCRestart";
break;
case CP0_REG02__TCHALT:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_dmfc0_tchalt(arg, cpu_env);
+ gen_helper_dmfc0_tchalt(arg, tcg_env);
register_name = "TCHalt";
break;
case CP0_REG02__TCCONTEXT:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_dmfc0_tccontext(arg, cpu_env);
+ gen_helper_dmfc0_tccontext(arg, tcg_env);
register_name = "TCContext";
break;
case CP0_REG02__TCSCHEDULE:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_dmfc0_tcschedule(arg, cpu_env);
+ gen_helper_dmfc0_tcschedule(arg, tcg_env);
register_name = "TCSchedule";
break;
case CP0_REG02__TCSCHEFBACK:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_dmfc0_tcschefback(arg, cpu_env);
+ gen_helper_dmfc0_tcschefback(arg, tcg_env);
register_name = "TCScheFBack";
break;
default:
@@ -7000,7 +7000,7 @@
case CP0_REGISTER_03:
switch (sel) {
case CP0_REG03__ENTRYLO1:
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EntryLo1));
+ tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_EntryLo1));
register_name = "EntryLo1";
break;
case CP0_REG03__GLOBALNUM:
@@ -7015,7 +7015,7 @@
case CP0_REGISTER_04:
switch (sel) {
case CP0_REG04__CONTEXT:
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_Context));
+ tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_Context));
register_name = "Context";
break;
case CP0_REG04__CONTEXTCONFIG:
@@ -7025,13 +7025,13 @@
goto cp0_unimplemented;
case CP0_REG04__USERLOCAL:
CP0_CHECK(ctx->ulri);
- tcg_gen_ld_tl(arg, cpu_env,
+ tcg_gen_ld_tl(arg, tcg_env,
offsetof(CPUMIPSState, active_tc.CP0_UserLocal));
register_name = "UserLocal";
break;
case CP0_REG04__MMID:
CP0_CHECK(ctx->mi);
- gen_helper_mtc0_memorymapid(cpu_env, arg);
+ gen_helper_mtc0_memorymapid(tcg_env, arg);
register_name = "MMID";
break;
default:
@@ -7051,32 +7051,32 @@
break;
case CP0_REG05__SEGCTL0:
CP0_CHECK(ctx->sc);
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_SegCtl0));
+ tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_SegCtl0));
register_name = "SegCtl0";
break;
case CP0_REG05__SEGCTL1:
CP0_CHECK(ctx->sc);
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_SegCtl1));
+ tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_SegCtl1));
register_name = "SegCtl1";
break;
case CP0_REG05__SEGCTL2:
CP0_CHECK(ctx->sc);
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_SegCtl2));
+ tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_SegCtl2));
register_name = "SegCtl2";
break;
case CP0_REG05__PWBASE:
check_pw(ctx);
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_PWBase));
+ tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_PWBase));
register_name = "PWBase";
break;
case CP0_REG05__PWFIELD:
check_pw(ctx);
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_PWField));
+ tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_PWField));
register_name = "PWField";
break;
case CP0_REG05__PWSIZE:
check_pw(ctx);
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_PWSize));
+ tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_PWSize));
register_name = "PWSize";
break;
default:
@@ -7137,7 +7137,7 @@
case CP0_REGISTER_08:
switch (sel) {
case CP0_REG08__BADVADDR:
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr));
+ tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_BadVAddr));
register_name = "BadVAddr";
break;
case CP0_REG08__BADINSTR:
@@ -7165,7 +7165,7 @@
case CP0_REG09__COUNT:
/* Mark as an IO operation because we read the time. */
translator_io_start(&ctx->base);
- gen_helper_mfc0_count(arg, cpu_env);
+ gen_helper_mfc0_count(arg, tcg_env);
/*
* Break the TB to be able to take timer interrupts immediately
* after reading count. DISAS_STOP isn't sufficient, we need to
@@ -7182,7 +7182,7 @@
break;
case CP0_REG09__SAAR:
CP0_CHECK(ctx->saar);
- gen_helper_dmfc0_saar(arg, cpu_env);
+ gen_helper_dmfc0_saar(arg, tcg_env);
register_name = "SAAR";
break;
default:
@@ -7192,7 +7192,7 @@
case CP0_REGISTER_10:
switch (sel) {
case CP0_REG10__ENTRYHI:
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EntryHi));
+ tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_EntryHi));
register_name = "EntryHi";
break;
default:
@@ -7248,7 +7248,7 @@
case CP0_REGISTER_14:
switch (sel) {
case CP0_REG14__EPC:
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EPC));
+ tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_EPC));
register_name = "EPC";
break;
default:
@@ -7263,13 +7263,13 @@
break;
case CP0_REG15__EBASE:
check_insn(ctx, ISA_MIPS_R2);
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EBase));
+ tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_EBase));
register_name = "EBase";
break;
case CP0_REG15__CMGCRBASE:
check_insn(ctx, ISA_MIPS_R2);
CP0_CHECK(ctx->cmgcr);
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_CMGCRBase));
+ tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_CMGCRBase));
register_name = "CMGCRBase";
break;
default:
@@ -7318,12 +7318,12 @@
case CP0_REGISTER_17:
switch (sel) {
case CP0_REG17__LLADDR:
- gen_helper_dmfc0_lladdr(arg, cpu_env);
+ gen_helper_dmfc0_lladdr(arg, tcg_env);
register_name = "LLAddr";
break;
case CP0_REG17__MAAR:
CP0_CHECK(ctx->mrp);
- gen_helper_dmfc0_maar(arg, cpu_env);
+ gen_helper_dmfc0_maar(arg, tcg_env);
register_name = "MAAR";
break;
case CP0_REG17__MAARI:
@@ -7375,7 +7375,7 @@
switch (sel) {
case CP0_REG20__XCONTEXT:
check_insn(ctx, ISA_MIPS3);
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_XContext));
+ tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_XContext));
register_name = "XContext";
break;
default:
@@ -7401,32 +7401,32 @@
case CP0_REGISTER_23:
switch (sel) {
case CP0_REG23__DEBUG:
- gen_helper_mfc0_debug(arg, cpu_env); /* EJTAG support */
+ gen_helper_mfc0_debug(arg, tcg_env); /* EJTAG support */
register_name = "Debug";
break;
case CP0_REG23__TRACECONTROL:
/* PDtrace support */
- /* gen_helper_dmfc0_tracecontrol(arg, cpu_env); */
+ /* gen_helper_dmfc0_tracecontrol(arg, tcg_env); */
register_name = "TraceControl";
goto cp0_unimplemented;
case CP0_REG23__TRACECONTROL2:
/* PDtrace support */
- /* gen_helper_dmfc0_tracecontrol2(arg, cpu_env); */
+ /* gen_helper_dmfc0_tracecontrol2(arg, tcg_env); */
register_name = "TraceControl2";
goto cp0_unimplemented;
case CP0_REG23__USERTRACEDATA1:
/* PDtrace support */
- /* gen_helper_dmfc0_usertracedata1(arg, cpu_env);*/
+ /* gen_helper_dmfc0_usertracedata1(arg, tcg_env);*/
register_name = "UserTraceData1";
goto cp0_unimplemented;
case CP0_REG23__TRACEIBPC:
/* PDtrace support */
- /* gen_helper_dmfc0_traceibpc(arg, cpu_env); */
+ /* gen_helper_dmfc0_traceibpc(arg, tcg_env); */
register_name = "TraceIBPC";
goto cp0_unimplemented;
case CP0_REG23__TRACEDBPC:
/* PDtrace support */
- /* gen_helper_dmfc0_tracedbpc(arg, cpu_env); */
+ /* gen_helper_dmfc0_tracedbpc(arg, tcg_env); */
register_name = "TraceDBPC";
goto cp0_unimplemented;
default:
@@ -7437,7 +7437,7 @@
switch (sel) {
case CP0_REG24__DEPC:
/* EJTAG support */
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_DEPC));
+ tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_DEPC));
register_name = "DEPC";
break;
default:
@@ -7546,7 +7546,7 @@
case CP0_REGISTER_30:
switch (sel) {
case CP0_REG30__ERROREPC:
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC));
+ tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_ErrorEPC));
register_name = "ErrorEPC";
break;
default:
@@ -7567,7 +7567,7 @@
case CP0_REG31__KSCRATCH5:
case CP0_REG31__KSCRATCH6:
CP0_CHECK(ctx->kscrexist & (1 << sel));
- tcg_gen_ld_tl(arg, cpu_env,
+ tcg_gen_ld_tl(arg, tcg_env,
offsetof(CPUMIPSState, CP0_KScratch[sel - 2]));
register_name = "KScratch";
break;
@@ -7602,12 +7602,12 @@
case CP0_REGISTER_00:
switch (sel) {
case CP0_REG00__INDEX:
- gen_helper_mtc0_index(cpu_env, arg);
+ gen_helper_mtc0_index(tcg_env, arg);
register_name = "Index";
break;
case CP0_REG00__MVPCONTROL:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mtc0_mvpcontrol(cpu_env, arg);
+ gen_helper_mtc0_mvpcontrol(tcg_env, arg);
register_name = "MVPControl";
break;
case CP0_REG00__MVPCONF0:
@@ -7637,39 +7637,39 @@
break;
case CP0_REG01__VPECONTROL:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mtc0_vpecontrol(cpu_env, arg);
+ gen_helper_mtc0_vpecontrol(tcg_env, arg);
register_name = "VPEControl";
break;
case CP0_REG01__VPECONF0:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mtc0_vpeconf0(cpu_env, arg);
+ gen_helper_mtc0_vpeconf0(tcg_env, arg);
register_name = "VPEConf0";
break;
case CP0_REG01__VPECONF1:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mtc0_vpeconf1(cpu_env, arg);
+ gen_helper_mtc0_vpeconf1(tcg_env, arg);
register_name = "VPEConf1";
break;
case CP0_REG01__YQMASK:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mtc0_yqmask(cpu_env, arg);
+ gen_helper_mtc0_yqmask(tcg_env, arg);
register_name = "YQMask";
break;
case CP0_REG01__VPESCHEDULE:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- tcg_gen_st_tl(arg, cpu_env,
+ tcg_gen_st_tl(arg, tcg_env,
offsetof(CPUMIPSState, CP0_VPESchedule));
register_name = "VPESchedule";
break;
case CP0_REG01__VPESCHEFBACK:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- tcg_gen_st_tl(arg, cpu_env,
+ tcg_gen_st_tl(arg, tcg_env,
offsetof(CPUMIPSState, CP0_VPEScheFBack));
register_name = "VPEScheFBack";
break;
case CP0_REG01__VPEOPT:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mtc0_vpeopt(cpu_env, arg);
+ gen_helper_mtc0_vpeopt(tcg_env, arg);
register_name = "VPEOpt";
break;
default:
@@ -7679,42 +7679,42 @@
case CP0_REGISTER_02:
switch (sel) {
case CP0_REG02__ENTRYLO0:
- gen_helper_dmtc0_entrylo0(cpu_env, arg);
+ gen_helper_dmtc0_entrylo0(tcg_env, arg);
register_name = "EntryLo0";
break;
case CP0_REG02__TCSTATUS:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mtc0_tcstatus(cpu_env, arg);
+ gen_helper_mtc0_tcstatus(tcg_env, arg);
register_name = "TCStatus";
break;
case CP0_REG02__TCBIND:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mtc0_tcbind(cpu_env, arg);
+ gen_helper_mtc0_tcbind(tcg_env, arg);
register_name = "TCBind";
break;
case CP0_REG02__TCRESTART:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mtc0_tcrestart(cpu_env, arg);
+ gen_helper_mtc0_tcrestart(tcg_env, arg);
register_name = "TCRestart";
break;
case CP0_REG02__TCHALT:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mtc0_tchalt(cpu_env, arg);
+ gen_helper_mtc0_tchalt(tcg_env, arg);
register_name = "TCHalt";
break;
case CP0_REG02__TCCONTEXT:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mtc0_tccontext(cpu_env, arg);
+ gen_helper_mtc0_tccontext(tcg_env, arg);
register_name = "TCContext";
break;
case CP0_REG02__TCSCHEDULE:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mtc0_tcschedule(cpu_env, arg);
+ gen_helper_mtc0_tcschedule(tcg_env, arg);
register_name = "TCSchedule";
break;
case CP0_REG02__TCSCHEFBACK:
CP0_CHECK(ctx->insn_flags & ASE_MT);
- gen_helper_mtc0_tcschefback(cpu_env, arg);
+ gen_helper_mtc0_tcschefback(tcg_env, arg);
register_name = "TCScheFBack";
break;
default:
@@ -7724,7 +7724,7 @@
case CP0_REGISTER_03:
switch (sel) {
case CP0_REG03__ENTRYLO1:
- gen_helper_dmtc0_entrylo1(cpu_env, arg);
+ gen_helper_dmtc0_entrylo1(tcg_env, arg);
register_name = "EntryLo1";
break;
case CP0_REG03__GLOBALNUM:
@@ -7739,7 +7739,7 @@
case CP0_REGISTER_04:
switch (sel) {
case CP0_REG04__CONTEXT:
- gen_helper_mtc0_context(cpu_env, arg);
+ gen_helper_mtc0_context(tcg_env, arg);
register_name = "Context";
break;
case CP0_REG04__CONTEXTCONFIG:
@@ -7749,7 +7749,7 @@
goto cp0_unimplemented;
case CP0_REG04__USERLOCAL:
CP0_CHECK(ctx->ulri);
- tcg_gen_st_tl(arg, cpu_env,
+ tcg_gen_st_tl(arg, tcg_env,
offsetof(CPUMIPSState, active_tc.CP0_UserLocal));
register_name = "UserLocal";
break;
@@ -7765,42 +7765,42 @@
case CP0_REGISTER_05:
switch (sel) {
case CP0_REG05__PAGEMASK:
- gen_helper_mtc0_pagemask(cpu_env, arg);
+ gen_helper_mtc0_pagemask(tcg_env, arg);
register_name = "PageMask";
break;
case CP0_REG05__PAGEGRAIN:
check_insn(ctx, ISA_MIPS_R2);
- gen_helper_mtc0_pagegrain(cpu_env, arg);
+ gen_helper_mtc0_pagegrain(tcg_env, arg);
register_name = "PageGrain";
break;
case CP0_REG05__SEGCTL0:
CP0_CHECK(ctx->sc);
- gen_helper_mtc0_segctl0(cpu_env, arg);
+ gen_helper_mtc0_segctl0(tcg_env, arg);
register_name = "SegCtl0";
break;
case CP0_REG05__SEGCTL1:
CP0_CHECK(ctx->sc);
- gen_helper_mtc0_segctl1(cpu_env, arg);
+ gen_helper_mtc0_segctl1(tcg_env, arg);
register_name = "SegCtl1";
break;
case CP0_REG05__SEGCTL2:
CP0_CHECK(ctx->sc);
- gen_helper_mtc0_segctl2(cpu_env, arg);
+ gen_helper_mtc0_segctl2(tcg_env, arg);
register_name = "SegCtl2";
break;
case CP0_REG05__PWBASE:
check_pw(ctx);
- tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_PWBase));
+ tcg_gen_st_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_PWBase));
register_name = "PWBase";
break;
case CP0_REG05__PWFIELD:
check_pw(ctx);
- gen_helper_mtc0_pwfield(cpu_env, arg);
+ gen_helper_mtc0_pwfield(tcg_env, arg);
register_name = "PWField";
break;
case CP0_REG05__PWSIZE:
check_pw(ctx);
- gen_helper_mtc0_pwsize(cpu_env, arg);
+ gen_helper_mtc0_pwsize(tcg_env, arg);
register_name = "PWSize";
break;
default:
@@ -7810,37 +7810,37 @@
case CP0_REGISTER_06:
switch (sel) {
case CP0_REG06__WIRED:
- gen_helper_mtc0_wired(cpu_env, arg);
+ gen_helper_mtc0_wired(tcg_env, arg);
register_name = "Wired";
break;
case CP0_REG06__SRSCONF0:
check_insn(ctx, ISA_MIPS_R2);
- gen_helper_mtc0_srsconf0(cpu_env, arg);
+ gen_helper_mtc0_srsconf0(tcg_env, arg);
register_name = "SRSConf0";
break;
case CP0_REG06__SRSCONF1:
check_insn(ctx, ISA_MIPS_R2);
- gen_helper_mtc0_srsconf1(cpu_env, arg);
+ gen_helper_mtc0_srsconf1(tcg_env, arg);
register_name = "SRSConf1";
break;
case CP0_REG06__SRSCONF2:
check_insn(ctx, ISA_MIPS_R2);
- gen_helper_mtc0_srsconf2(cpu_env, arg);
+ gen_helper_mtc0_srsconf2(tcg_env, arg);
register_name = "SRSConf2";
break;
case CP0_REG06__SRSCONF3:
check_insn(ctx, ISA_MIPS_R2);
- gen_helper_mtc0_srsconf3(cpu_env, arg);
+ gen_helper_mtc0_srsconf3(tcg_env, arg);
register_name = "SRSConf3";
break;
case CP0_REG06__SRSCONF4:
check_insn(ctx, ISA_MIPS_R2);
- gen_helper_mtc0_srsconf4(cpu_env, arg);
+ gen_helper_mtc0_srsconf4(tcg_env, arg);
register_name = "SRSConf4";
break;
case CP0_REG06__PWCTL:
check_pw(ctx);
- gen_helper_mtc0_pwctl(cpu_env, arg);
+ gen_helper_mtc0_pwctl(tcg_env, arg);
register_name = "PWCtl";
break;
default:
@@ -7851,7 +7851,7 @@
switch (sel) {
case CP0_REG07__HWRENA:
check_insn(ctx, ISA_MIPS_R2);
- gen_helper_mtc0_hwrena(cpu_env, arg);
+ gen_helper_mtc0_hwrena(tcg_env, arg);
ctx->base.is_jmp = DISAS_STOP;
register_name = "HWREna";
break;
@@ -7884,17 +7884,17 @@
case CP0_REGISTER_09:
switch (sel) {
case CP0_REG09__COUNT:
- gen_helper_mtc0_count(cpu_env, arg);
+ gen_helper_mtc0_count(tcg_env, arg);
register_name = "Count";
break;
case CP0_REG09__SAARI:
CP0_CHECK(ctx->saar);
- gen_helper_mtc0_saari(cpu_env, arg);
+ gen_helper_mtc0_saari(tcg_env, arg);
register_name = "SAARI";
break;
case CP0_REG09__SAAR:
CP0_CHECK(ctx->saar);
- gen_helper_mtc0_saar(cpu_env, arg);
+ gen_helper_mtc0_saar(tcg_env, arg);
register_name = "SAAR";
break;
default:
@@ -7906,7 +7906,7 @@
case CP0_REGISTER_10:
switch (sel) {
case CP0_REG10__ENTRYHI:
- gen_helper_mtc0_entryhi(cpu_env, arg);
+ gen_helper_mtc0_entryhi(tcg_env, arg);
register_name = "EntryHi";
break;
default:
@@ -7916,7 +7916,7 @@
case CP0_REGISTER_11:
switch (sel) {
case CP0_REG11__COMPARE:
- gen_helper_mtc0_compare(cpu_env, arg);
+ gen_helper_mtc0_compare(tcg_env, arg);
register_name = "Compare";
break;
/* 6,7 are implementation dependent */
@@ -7930,7 +7930,7 @@
switch (sel) {
case CP0_REG12__STATUS:
save_cpu_state(ctx, 1);
- gen_helper_mtc0_status(cpu_env, arg);
+ gen_helper_mtc0_status(tcg_env, arg);
/* DISAS_STOP isn't good enough here, hflags may have changed. */
gen_save_pc(ctx->base.pc_next + 4);
ctx->base.is_jmp = DISAS_EXIT;
@@ -7938,14 +7938,14 @@
break;
case CP0_REG12__INTCTL:
check_insn(ctx, ISA_MIPS_R2);
- gen_helper_mtc0_intctl(cpu_env, arg);
+ gen_helper_mtc0_intctl(tcg_env, arg);
/* Stop translation as we may have switched the execution mode */
ctx->base.is_jmp = DISAS_STOP;
register_name = "IntCtl";
break;
case CP0_REG12__SRSCTL:
check_insn(ctx, ISA_MIPS_R2);
- gen_helper_mtc0_srsctl(cpu_env, arg);
+ gen_helper_mtc0_srsctl(tcg_env, arg);
/* Stop translation as we may have switched the execution mode */
ctx->base.is_jmp = DISAS_STOP;
register_name = "SRSCtl";
@@ -7965,7 +7965,7 @@
switch (sel) {
case CP0_REG13__CAUSE:
save_cpu_state(ctx, 1);
- gen_helper_mtc0_cause(cpu_env, arg);
+ gen_helper_mtc0_cause(tcg_env, arg);
/*
* Stop translation as we may have triggered an interrupt.
* DISAS_STOP isn't sufficient, we need to ensure we break out of
@@ -7982,7 +7982,7 @@
case CP0_REGISTER_14:
switch (sel) {
case CP0_REG14__EPC:
- tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EPC));
+ tcg_gen_st_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_EPC));
register_name = "EPC";
break;
default:
@@ -7997,7 +7997,7 @@
break;
case CP0_REG15__EBASE:
check_insn(ctx, ISA_MIPS_R2);
- gen_helper_mtc0_ebase(cpu_env, arg);
+ gen_helper_mtc0_ebase(tcg_env, arg);
register_name = "EBase";
break;
default:
@@ -8007,7 +8007,7 @@
case CP0_REGISTER_16:
switch (sel) {
case CP0_REG16__CONFIG:
- gen_helper_mtc0_config0(cpu_env, arg);
+ gen_helper_mtc0_config0(tcg_env, arg);
register_name = "Config";
/* Stop translation as we may have switched the execution mode */
ctx->base.is_jmp = DISAS_STOP;
@@ -8017,13 +8017,13 @@
register_name = "Config1";
break;
case CP0_REG16__CONFIG2:
- gen_helper_mtc0_config2(cpu_env, arg);
+ gen_helper_mtc0_config2(tcg_env, arg);
register_name = "Config2";
/* Stop translation as we may have switched the execution mode */
ctx->base.is_jmp = DISAS_STOP;
break;
case CP0_REG16__CONFIG3:
- gen_helper_mtc0_config3(cpu_env, arg);
+ gen_helper_mtc0_config3(tcg_env, arg);
register_name = "Config3";
/* Stop translation as we may have switched the execution mode */
ctx->base.is_jmp = DISAS_STOP;
@@ -8033,7 +8033,7 @@
register_name = "Config4";
break;
case CP0_REG16__CONFIG5:
- gen_helper_mtc0_config5(cpu_env, arg);
+ gen_helper_mtc0_config5(tcg_env, arg);
register_name = "Config5";
/* Stop translation as we may have switched the execution mode */
ctx->base.is_jmp = DISAS_STOP;
@@ -8047,17 +8047,17 @@
case CP0_REGISTER_17:
switch (sel) {
case CP0_REG17__LLADDR:
- gen_helper_mtc0_lladdr(cpu_env, arg);
+ gen_helper_mtc0_lladdr(tcg_env, arg);
register_name = "LLAddr";
break;
case CP0_REG17__MAAR:
CP0_CHECK(ctx->mrp);
- gen_helper_mtc0_maar(cpu_env, arg);
+ gen_helper_mtc0_maar(tcg_env, arg);
register_name = "MAAR";
break;
case CP0_REG17__MAARI:
CP0_CHECK(ctx->mrp);
- gen_helper_mtc0_maari(cpu_env, arg);
+ gen_helper_mtc0_maari(tcg_env, arg);
register_name = "MAARI";
break;
default:
@@ -8104,7 +8104,7 @@
switch (sel) {
case CP0_REG20__XCONTEXT:
check_insn(ctx, ISA_MIPS3);
- gen_helper_mtc0_xcontext(cpu_env, arg);
+ gen_helper_mtc0_xcontext(tcg_env, arg);
register_name = "XContext";
break;
default:
@@ -8116,7 +8116,7 @@
CP0_CHECK(!(ctx->insn_flags & ISA_MIPS_R6));
switch (sel) {
case 0:
- gen_helper_mtc0_framemask(cpu_env, arg);
+ gen_helper_mtc0_framemask(tcg_env, arg);
register_name = "Framemask";
break;
default:
@@ -8130,7 +8130,7 @@
case CP0_REGISTER_23:
switch (sel) {
case CP0_REG23__DEBUG:
- gen_helper_mtc0_debug(cpu_env, arg); /* EJTAG support */
+ gen_helper_mtc0_debug(tcg_env, arg); /* EJTAG support */
/* DISAS_STOP isn't good enough here, hflags may have changed. */
gen_save_pc(ctx->base.pc_next + 4);
ctx->base.is_jmp = DISAS_EXIT;
@@ -8138,35 +8138,35 @@
break;
case CP0_REG23__TRACECONTROL:
/* PDtrace support */
- /* gen_helper_mtc0_tracecontrol(cpu_env, arg); */
+ /* gen_helper_mtc0_tracecontrol(tcg_env, arg); */
/* Stop translation as we may have switched the execution mode */
ctx->base.is_jmp = DISAS_STOP;
register_name = "TraceControl";
goto cp0_unimplemented;
case CP0_REG23__TRACECONTROL2:
/* PDtrace support */
- /* gen_helper_mtc0_tracecontrol2(cpu_env, arg); */
+ /* gen_helper_mtc0_tracecontrol2(tcg_env, arg); */
/* Stop translation as we may have switched the execution mode */
ctx->base.is_jmp = DISAS_STOP;
register_name = "TraceControl2";
goto cp0_unimplemented;
case CP0_REG23__USERTRACEDATA1:
/* PDtrace support */
- /* gen_helper_mtc0_usertracedata1(cpu_env, arg);*/
+ /* gen_helper_mtc0_usertracedata1(tcg_env, arg);*/
/* Stop translation as we may have switched the execution mode */
ctx->base.is_jmp = DISAS_STOP;
register_name = "UserTraceData1";
goto cp0_unimplemented;
case CP0_REG23__TRACEIBPC:
/* PDtrace support */
- /* gen_helper_mtc0_traceibpc(cpu_env, arg); */
+ /* gen_helper_mtc0_traceibpc(tcg_env, arg); */
/* Stop translation as we may have switched the execution mode */
ctx->base.is_jmp = DISAS_STOP;
register_name = "TraceIBPC";
goto cp0_unimplemented;
case CP0_REG23__TRACEDBPC:
/* PDtrace support */
- /* gen_helper_mtc0_tracedbpc(cpu_env, arg); */
+ /* gen_helper_mtc0_tracedbpc(tcg_env, arg); */
/* Stop translation as we may have switched the execution mode */
ctx->base.is_jmp = DISAS_STOP;
register_name = "TraceDBPC";
@@ -8179,7 +8179,7 @@
switch (sel) {
case CP0_REG24__DEPC:
/* EJTAG support */
- tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_DEPC));
+ tcg_gen_st_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_DEPC));
register_name = "DEPC";
break;
default:
@@ -8189,35 +8189,35 @@
case CP0_REGISTER_25:
switch (sel) {
case CP0_REG25__PERFCTL0:
- gen_helper_mtc0_performance0(cpu_env, arg);
+ gen_helper_mtc0_performance0(tcg_env, arg);
register_name = "Performance0";
break;
case CP0_REG25__PERFCNT0:
- /* gen_helper_mtc0_performance1(cpu_env, arg); */
+ /* gen_helper_mtc0_performance1(tcg_env, arg); */
register_name = "Performance1";
goto cp0_unimplemented;
case CP0_REG25__PERFCTL1:
- /* gen_helper_mtc0_performance2(cpu_env, arg); */
+ /* gen_helper_mtc0_performance2(tcg_env, arg); */
register_name = "Performance2";
goto cp0_unimplemented;
case CP0_REG25__PERFCNT1:
- /* gen_helper_mtc0_performance3(cpu_env, arg); */
+ /* gen_helper_mtc0_performance3(tcg_env, arg); */
register_name = "Performance3";
goto cp0_unimplemented;
case CP0_REG25__PERFCTL2:
- /* gen_helper_mtc0_performance4(cpu_env, arg); */
+ /* gen_helper_mtc0_performance4(tcg_env, arg); */
register_name = "Performance4";
goto cp0_unimplemented;
case CP0_REG25__PERFCNT2:
- /* gen_helper_mtc0_performance5(cpu_env, arg); */
+ /* gen_helper_mtc0_performance5(tcg_env, arg); */
register_name = "Performance5";
goto cp0_unimplemented;
case CP0_REG25__PERFCTL3:
- /* gen_helper_mtc0_performance6(cpu_env, arg); */
+ /* gen_helper_mtc0_performance6(tcg_env, arg); */
register_name = "Performance6";
goto cp0_unimplemented;
case CP0_REG25__PERFCNT3:
- /* gen_helper_mtc0_performance7(cpu_env, arg); */
+ /* gen_helper_mtc0_performance7(tcg_env, arg); */
register_name = "Performance7";
goto cp0_unimplemented;
default:
@@ -8227,7 +8227,7 @@
case CP0_REGISTER_26:
switch (sel) {
case CP0_REG26__ERRCTL:
- gen_helper_mtc0_errctl(cpu_env, arg);
+ gen_helper_mtc0_errctl(tcg_env, arg);
ctx->base.is_jmp = DISAS_STOP;
register_name = "ErrCtl";
break;
@@ -8251,14 +8251,14 @@
case CP0_REG28__TAGLO1:
case CP0_REG28__TAGLO2:
case CP0_REG28__TAGLO3:
- gen_helper_mtc0_taglo(cpu_env, arg);
+ gen_helper_mtc0_taglo(tcg_env, arg);
register_name = "TagLo";
break;
case CP0_REG28__DATALO:
case CP0_REG28__DATALO1:
case CP0_REG28__DATALO2:
case CP0_REG28__DATALO3:
- gen_helper_mtc0_datalo(cpu_env, arg);
+ gen_helper_mtc0_datalo(tcg_env, arg);
register_name = "DataLo";
break;
default:
@@ -8271,14 +8271,14 @@
case CP0_REG29__TAGHI1:
case CP0_REG29__TAGHI2:
case CP0_REG29__TAGHI3:
- gen_helper_mtc0_taghi(cpu_env, arg);
+ gen_helper_mtc0_taghi(tcg_env, arg);
register_name = "TagHi";
break;
case CP0_REG29__DATAHI:
case CP0_REG29__DATAHI1:
case CP0_REG29__DATAHI2:
case CP0_REG29__DATAHI3:
- gen_helper_mtc0_datahi(cpu_env, arg);
+ gen_helper_mtc0_datahi(tcg_env, arg);
register_name = "DataHi";
break;
default:
@@ -8289,7 +8289,7 @@
case CP0_REGISTER_30:
switch (sel) {
case CP0_REG30__ERROREPC:
- tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC));
+ tcg_gen_st_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_ErrorEPC));
register_name = "ErrorEPC";
break;
default:
@@ -8310,7 +8310,7 @@
case CP0_REG31__KSCRATCH5:
case CP0_REG31__KSCRATCH6:
CP0_CHECK(ctx->kscrexist & (1 << sel));
- tcg_gen_st_tl(arg, cpu_env,
+ tcg_gen_st_tl(arg, tcg_env,
offsetof(CPUMIPSState, CP0_KScratch[sel - 2]));
register_name = "KScratch";
break;
@@ -8358,10 +8358,10 @@
case 1:
switch (sel) {
case 1:
- gen_helper_mftc0_vpecontrol(t0, cpu_env);
+ gen_helper_mftc0_vpecontrol(t0, tcg_env);
break;
case 2:
- gen_helper_mftc0_vpeconf0(t0, cpu_env);
+ gen_helper_mftc0_vpeconf0(t0, tcg_env);
break;
default:
goto die;
@@ -8371,25 +8371,25 @@
case 2:
switch (sel) {
case 1:
- gen_helper_mftc0_tcstatus(t0, cpu_env);
+ gen_helper_mftc0_tcstatus(t0, tcg_env);
break;
case 2:
- gen_helper_mftc0_tcbind(t0, cpu_env);
+ gen_helper_mftc0_tcbind(t0, tcg_env);
break;
case 3:
- gen_helper_mftc0_tcrestart(t0, cpu_env);
+ gen_helper_mftc0_tcrestart(t0, tcg_env);
break;
case 4:
- gen_helper_mftc0_tchalt(t0, cpu_env);
+ gen_helper_mftc0_tchalt(t0, tcg_env);
break;
case 5:
- gen_helper_mftc0_tccontext(t0, cpu_env);
+ gen_helper_mftc0_tccontext(t0, tcg_env);
break;
case 6:
- gen_helper_mftc0_tcschedule(t0, cpu_env);
+ gen_helper_mftc0_tcschedule(t0, tcg_env);
break;
case 7:
- gen_helper_mftc0_tcschefback(t0, cpu_env);
+ gen_helper_mftc0_tcschefback(t0, tcg_env);
break;
default:
gen_mfc0(ctx, t0, rt, sel);
@@ -8399,7 +8399,7 @@
case 10:
switch (sel) {
case 0:
- gen_helper_mftc0_entryhi(t0, cpu_env);
+ gen_helper_mftc0_entryhi(t0, tcg_env);
break;
default:
gen_mfc0(ctx, t0, rt, sel);
@@ -8409,7 +8409,7 @@
case 12:
switch (sel) {
case 0:
- gen_helper_mftc0_status(t0, cpu_env);
+ gen_helper_mftc0_status(t0, tcg_env);
break;
default:
gen_mfc0(ctx, t0, rt, sel);
@@ -8419,7 +8419,7 @@
case 13:
switch (sel) {
case 0:
- gen_helper_mftc0_cause(t0, cpu_env);
+ gen_helper_mftc0_cause(t0, tcg_env);
break;
default:
goto die;
@@ -8429,7 +8429,7 @@
case 14:
switch (sel) {
case 0:
- gen_helper_mftc0_epc(t0, cpu_env);
+ gen_helper_mftc0_epc(t0, tcg_env);
break;
default:
goto die;
@@ -8439,7 +8439,7 @@
case 15:
switch (sel) {
case 1:
- gen_helper_mftc0_ebase(t0, cpu_env);
+ gen_helper_mftc0_ebase(t0, tcg_env);
break;
default:
goto die;
@@ -8456,7 +8456,7 @@
case 5:
case 6:
case 7:
- gen_helper_mftc0_configx(t0, cpu_env, tcg_constant_tl(sel));
+ gen_helper_mftc0_configx(t0, tcg_env, tcg_constant_tl(sel));
break;
default:
goto die;
@@ -8466,7 +8466,7 @@
case 23:
switch (sel) {
case 0:
- gen_helper_mftc0_debug(t0, cpu_env);
+ gen_helper_mftc0_debug(t0, tcg_env);
break;
default:
gen_mfc0(ctx, t0, rt, sel);
@@ -8522,7 +8522,7 @@
gen_helper_1e0i(mftacx, t0, 3);
break;
case 16:
- gen_helper_mftdsp(t0, cpu_env);
+ gen_helper_mftdsp(t0, tcg_env);
break;
default:
goto die;
@@ -8585,10 +8585,10 @@
case 1:
switch (sel) {
case 1:
- gen_helper_mttc0_vpecontrol(cpu_env, t0);
+ gen_helper_mttc0_vpecontrol(tcg_env, t0);
break;
case 2:
- gen_helper_mttc0_vpeconf0(cpu_env, t0);
+ gen_helper_mttc0_vpeconf0(tcg_env, t0);
break;
default:
goto die;
@@ -8598,25 +8598,25 @@
case 2:
switch (sel) {
case 1:
- gen_helper_mttc0_tcstatus(cpu_env, t0);
+ gen_helper_mttc0_tcstatus(tcg_env, t0);
break;
case 2:
- gen_helper_mttc0_tcbind(cpu_env, t0);
+ gen_helper_mttc0_tcbind(tcg_env, t0);
break;
case 3:
- gen_helper_mttc0_tcrestart(cpu_env, t0);
+ gen_helper_mttc0_tcrestart(tcg_env, t0);
break;
case 4:
- gen_helper_mttc0_tchalt(cpu_env, t0);
+ gen_helper_mttc0_tchalt(tcg_env, t0);
break;
case 5:
- gen_helper_mttc0_tccontext(cpu_env, t0);
+ gen_helper_mttc0_tccontext(tcg_env, t0);
break;
case 6:
- gen_helper_mttc0_tcschedule(cpu_env, t0);
+ gen_helper_mttc0_tcschedule(tcg_env, t0);
break;
case 7:
- gen_helper_mttc0_tcschefback(cpu_env, t0);
+ gen_helper_mttc0_tcschefback(tcg_env, t0);
break;
default:
gen_mtc0(ctx, t0, rd, sel);
@@ -8626,7 +8626,7 @@
case 10:
switch (sel) {
case 0:
- gen_helper_mttc0_entryhi(cpu_env, t0);
+ gen_helper_mttc0_entryhi(tcg_env, t0);
break;
default:
gen_mtc0(ctx, t0, rd, sel);
@@ -8636,7 +8636,7 @@
case 12:
switch (sel) {
case 0:
- gen_helper_mttc0_status(cpu_env, t0);
+ gen_helper_mttc0_status(tcg_env, t0);
break;
default:
gen_mtc0(ctx, t0, rd, sel);
@@ -8646,7 +8646,7 @@
case 13:
switch (sel) {
case 0:
- gen_helper_mttc0_cause(cpu_env, t0);
+ gen_helper_mttc0_cause(tcg_env, t0);
break;
default:
goto die;
@@ -8656,7 +8656,7 @@
case 15:
switch (sel) {
case 1:
- gen_helper_mttc0_ebase(cpu_env, t0);
+ gen_helper_mttc0_ebase(tcg_env, t0);
break;
default:
goto die;
@@ -8666,7 +8666,7 @@
case 23:
switch (sel) {
case 0:
- gen_helper_mttc0_debug(cpu_env, t0);
+ gen_helper_mttc0_debug(tcg_env, t0);
break;
default:
gen_mtc0(ctx, t0, rd, sel);
@@ -8722,7 +8722,7 @@
gen_helper_0e1i(mttacx, t0, 3);
break;
case 16:
- gen_helper_mttdsp(cpu_env, t0);
+ gen_helper_mttdsp(tcg_env, t0);
break;
default:
goto die;
@@ -8849,7 +8849,7 @@
if (!env->tlb->helper_tlbwi) {
goto die;
}
- gen_helper_tlbwi(cpu_env);
+ gen_helper_tlbwi(tcg_env);
break;
case OPC_TLBINV:
opn = "tlbinv";
@@ -8857,7 +8857,7 @@
if (!env->tlb->helper_tlbinv) {
goto die;
}
- gen_helper_tlbinv(cpu_env);
+ gen_helper_tlbinv(tcg_env);
} /* treat as nop if TLBINV not supported */
break;
case OPC_TLBINVF:
@@ -8866,7 +8866,7 @@
if (!env->tlb->helper_tlbinvf) {
goto die;
}
- gen_helper_tlbinvf(cpu_env);
+ gen_helper_tlbinvf(tcg_env);
} /* treat as nop if TLBINV not supported */
break;
case OPC_TLBWR:
@@ -8874,21 +8874,21 @@
if (!env->tlb->helper_tlbwr) {
goto die;
}
- gen_helper_tlbwr(cpu_env);
+ gen_helper_tlbwr(tcg_env);
break;
case OPC_TLBP:
opn = "tlbp";
if (!env->tlb->helper_tlbp) {
goto die;
}
- gen_helper_tlbp(cpu_env);
+ gen_helper_tlbp(tcg_env);
break;
case OPC_TLBR:
opn = "tlbr";
if (!env->tlb->helper_tlbr) {
goto die;
}
- gen_helper_tlbr(cpu_env);
+ gen_helper_tlbr(tcg_env);
break;
case OPC_ERET: /* OPC_ERETNC */
if ((ctx->insn_flags & ISA_MIPS_R6) &&
@@ -8900,12 +8900,12 @@
/* OPC_ERETNC */
opn = "eretnc";
check_insn(ctx, ISA_MIPS_R5);
- gen_helper_eretnc(cpu_env);
+ gen_helper_eretnc(tcg_env);
} else {
/* OPC_ERET */
opn = "eret";
check_insn(ctx, ISA_MIPS2);
- gen_helper_eret(cpu_env);
+ gen_helper_eret(tcg_env);
}
ctx->base.is_jmp = DISAS_EXIT;
}
@@ -8921,7 +8921,7 @@
MIPS_INVAL(opn);
gen_reserved_instruction(ctx);
} else {
- gen_helper_deret(cpu_env);
+ gen_helper_deret(tcg_env);
ctx->base.is_jmp = DISAS_EXIT;
}
break;
@@ -8936,7 +8936,7 @@
ctx->base.pc_next += 4;
save_cpu_state(ctx, 1);
ctx->base.pc_next -= 4;
- gen_helper_wait(cpu_env);
+ gen_helper_wait(tcg_env);
ctx->base.is_jmp = DISAS_NORETURN;
break;
default:
@@ -9557,7 +9557,7 @@
gen_load_fpr32(ctx, fp0, fs);
gen_load_fpr32(ctx, fp1, ft);
- gen_helper_float_add_s(fp0, cpu_env, fp0, fp1);
+ gen_helper_float_add_s(fp0, tcg_env, fp0, fp1);
gen_store_fpr32(ctx, fp0, fd);
}
break;
@@ -9568,7 +9568,7 @@
gen_load_fpr32(ctx, fp0, fs);
gen_load_fpr32(ctx, fp1, ft);
- gen_helper_float_sub_s(fp0, cpu_env, fp0, fp1);
+ gen_helper_float_sub_s(fp0, tcg_env, fp0, fp1);
gen_store_fpr32(ctx, fp0, fd);
}
break;
@@ -9579,7 +9579,7 @@
gen_load_fpr32(ctx, fp0, fs);
gen_load_fpr32(ctx, fp1, ft);
- gen_helper_float_mul_s(fp0, cpu_env, fp0, fp1);
+ gen_helper_float_mul_s(fp0, tcg_env, fp0, fp1);
gen_store_fpr32(ctx, fp0, fd);
}
break;
@@ -9590,7 +9590,7 @@
gen_load_fpr32(ctx, fp0, fs);
gen_load_fpr32(ctx, fp1, ft);
- gen_helper_float_div_s(fp0, cpu_env, fp0, fp1);
+ gen_helper_float_div_s(fp0, tcg_env, fp0, fp1);
gen_store_fpr32(ctx, fp0, fd);
}
break;
@@ -9599,7 +9599,7 @@
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, fs);
- gen_helper_float_sqrt_s(fp0, cpu_env, fp0);
+ gen_helper_float_sqrt_s(fp0, tcg_env, fp0);
gen_store_fpr32(ctx, fp0, fd);
}
break;
@@ -9645,9 +9645,9 @@
gen_load_fpr32(ctx, fp32, fs);
if (ctx->nan2008) {
- gen_helper_float_round_2008_l_s(fp64, cpu_env, fp32);
+ gen_helper_float_round_2008_l_s(fp64, tcg_env, fp32);
} else {
- gen_helper_float_round_l_s(fp64, cpu_env, fp32);
+ gen_helper_float_round_l_s(fp64, tcg_env, fp32);
}
gen_store_fpr64(ctx, fp64, fd);
}
@@ -9660,9 +9660,9 @@
gen_load_fpr32(ctx, fp32, fs);
if (ctx->nan2008) {
- gen_helper_float_trunc_2008_l_s(fp64, cpu_env, fp32);
+ gen_helper_float_trunc_2008_l_s(fp64, tcg_env, fp32);
} else {
- gen_helper_float_trunc_l_s(fp64, cpu_env, fp32);
+ gen_helper_float_trunc_l_s(fp64, tcg_env, fp32);
}
gen_store_fpr64(ctx, fp64, fd);
}
@@ -9675,9 +9675,9 @@
gen_load_fpr32(ctx, fp32, fs);
if (ctx->nan2008) {
- gen_helper_float_ceil_2008_l_s(fp64, cpu_env, fp32);
+ gen_helper_float_ceil_2008_l_s(fp64, tcg_env, fp32);
} else {
- gen_helper_float_ceil_l_s(fp64, cpu_env, fp32);
+ gen_helper_float_ceil_l_s(fp64, tcg_env, fp32);
}
gen_store_fpr64(ctx, fp64, fd);
}
@@ -9690,9 +9690,9 @@
gen_load_fpr32(ctx, fp32, fs);
if (ctx->nan2008) {
- gen_helper_float_floor_2008_l_s(fp64, cpu_env, fp32);
+ gen_helper_float_floor_2008_l_s(fp64, tcg_env, fp32);
} else {
- gen_helper_float_floor_l_s(fp64, cpu_env, fp32);
+ gen_helper_float_floor_l_s(fp64, tcg_env, fp32);
}
gen_store_fpr64(ctx, fp64, fd);
}
@@ -9703,9 +9703,9 @@
gen_load_fpr32(ctx, fp0, fs);
if (ctx->nan2008) {
- gen_helper_float_round_2008_w_s(fp0, cpu_env, fp0);
+ gen_helper_float_round_2008_w_s(fp0, tcg_env, fp0);
} else {
- gen_helper_float_round_w_s(fp0, cpu_env, fp0);
+ gen_helper_float_round_w_s(fp0, tcg_env, fp0);
}
gen_store_fpr32(ctx, fp0, fd);
}
@@ -9716,9 +9716,9 @@
gen_load_fpr32(ctx, fp0, fs);
if (ctx->nan2008) {
- gen_helper_float_trunc_2008_w_s(fp0, cpu_env, fp0);
+ gen_helper_float_trunc_2008_w_s(fp0, tcg_env, fp0);
} else {
- gen_helper_float_trunc_w_s(fp0, cpu_env, fp0);
+ gen_helper_float_trunc_w_s(fp0, tcg_env, fp0);
}
gen_store_fpr32(ctx, fp0, fd);
}
@@ -9729,9 +9729,9 @@
gen_load_fpr32(ctx, fp0, fs);
if (ctx->nan2008) {
- gen_helper_float_ceil_2008_w_s(fp0, cpu_env, fp0);
+ gen_helper_float_ceil_2008_w_s(fp0, tcg_env, fp0);
} else {
- gen_helper_float_ceil_w_s(fp0, cpu_env, fp0);
+ gen_helper_float_ceil_w_s(fp0, tcg_env, fp0);
}
gen_store_fpr32(ctx, fp0, fd);
}
@@ -9742,9 +9742,9 @@
gen_load_fpr32(ctx, fp0, fs);
if (ctx->nan2008) {
- gen_helper_float_floor_2008_w_s(fp0, cpu_env, fp0);
+ gen_helper_float_floor_2008_w_s(fp0, tcg_env, fp0);
} else {
- gen_helper_float_floor_w_s(fp0, cpu_env, fp0);
+ gen_helper_float_floor_w_s(fp0, tcg_env, fp0);
}
gen_store_fpr32(ctx, fp0, fd);
}
@@ -9800,7 +9800,7 @@
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, fs);
- gen_helper_float_recip_s(fp0, cpu_env, fp0);
+ gen_helper_float_recip_s(fp0, tcg_env, fp0);
gen_store_fpr32(ctx, fp0, fd);
}
break;
@@ -9809,7 +9809,7 @@
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, fs);
- gen_helper_float_rsqrt_s(fp0, cpu_env, fp0);
+ gen_helper_float_rsqrt_s(fp0, tcg_env, fp0);
gen_store_fpr32(ctx, fp0, fd);
}
break;
@@ -9822,7 +9822,7 @@
gen_load_fpr32(ctx, fp0, fs);
gen_load_fpr32(ctx, fp1, ft);
gen_load_fpr32(ctx, fp2, fd);
- gen_helper_float_maddf_s(fp2, cpu_env, fp0, fp1, fp2);
+ gen_helper_float_maddf_s(fp2, tcg_env, fp0, fp1, fp2);
gen_store_fpr32(ctx, fp2, fd);
}
break;
@@ -9835,7 +9835,7 @@
gen_load_fpr32(ctx, fp0, fs);
gen_load_fpr32(ctx, fp1, ft);
gen_load_fpr32(ctx, fp2, fd);
- gen_helper_float_msubf_s(fp2, cpu_env, fp0, fp1, fp2);
+ gen_helper_float_msubf_s(fp2, tcg_env, fp0, fp1, fp2);
gen_store_fpr32(ctx, fp2, fd);
}
break;
@@ -9844,7 +9844,7 @@
{
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, fs);
- gen_helper_float_rint_s(fp0, cpu_env, fp0);
+ gen_helper_float_rint_s(fp0, tcg_env, fp0);
gen_store_fpr32(ctx, fp0, fd);
}
break;
@@ -9853,7 +9853,7 @@
{
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, fs);
- gen_helper_float_class_s(fp0, cpu_env, fp0);
+ gen_helper_float_class_s(fp0, tcg_env, fp0);
gen_store_fpr32(ctx, fp0, fd);
}
break;
@@ -9865,7 +9865,7 @@
TCGv_i32 fp2 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, fs);
gen_load_fpr32(ctx, fp1, ft);
- gen_helper_float_min_s(fp2, cpu_env, fp0, fp1);
+ gen_helper_float_min_s(fp2, tcg_env, fp0, fp1);
gen_store_fpr32(ctx, fp2, fd);
} else {
/* OPC_RECIP2_S */
@@ -9876,7 +9876,7 @@
gen_load_fpr32(ctx, fp0, fs);
gen_load_fpr32(ctx, fp1, ft);
- gen_helper_float_recip2_s(fp0, cpu_env, fp0, fp1);
+ gen_helper_float_recip2_s(fp0, tcg_env, fp0, fp1);
gen_store_fpr32(ctx, fp0, fd);
}
}
@@ -9889,7 +9889,7 @@
TCGv_i32 fp2 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, fs);
gen_load_fpr32(ctx, fp1, ft);
- gen_helper_float_mina_s(fp2, cpu_env, fp0, fp1);
+ gen_helper_float_mina_s(fp2, tcg_env, fp0, fp1);
gen_store_fpr32(ctx, fp2, fd);
} else {
/* OPC_RECIP1_S */
@@ -9898,7 +9898,7 @@
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, fs);
- gen_helper_float_recip1_s(fp0, cpu_env, fp0);
+ gen_helper_float_recip1_s(fp0, tcg_env, fp0);
gen_store_fpr32(ctx, fp0, fd);
}
}
@@ -9910,7 +9910,7 @@
TCGv_i32 fp1 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, fs);
gen_load_fpr32(ctx, fp1, ft);
- gen_helper_float_max_s(fp1, cpu_env, fp0, fp1);
+ gen_helper_float_max_s(fp1, tcg_env, fp0, fp1);
gen_store_fpr32(ctx, fp1, fd);
} else {
/* OPC_RSQRT1_S */
@@ -9919,7 +9919,7 @@
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, fs);
- gen_helper_float_rsqrt1_s(fp0, cpu_env, fp0);
+ gen_helper_float_rsqrt1_s(fp0, tcg_env, fp0);
gen_store_fpr32(ctx, fp0, fd);
}
}
@@ -9931,7 +9931,7 @@
TCGv_i32 fp1 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, fs);
gen_load_fpr32(ctx, fp1, ft);
- gen_helper_float_maxa_s(fp1, cpu_env, fp0, fp1);
+ gen_helper_float_maxa_s(fp1, tcg_env, fp0, fp1);
gen_store_fpr32(ctx, fp1, fd);
} else {
/* OPC_RSQRT2_S */
@@ -9942,7 +9942,7 @@
gen_load_fpr32(ctx, fp0, fs);
gen_load_fpr32(ctx, fp1, ft);
- gen_helper_float_rsqrt2_s(fp0, cpu_env, fp0, fp1);
+ gen_helper_float_rsqrt2_s(fp0, tcg_env, fp0, fp1);
gen_store_fpr32(ctx, fp0, fd);
}
}
@@ -9954,7 +9954,7 @@
TCGv_i64 fp64 = tcg_temp_new_i64();
gen_load_fpr32(ctx, fp32, fs);
- gen_helper_float_cvtd_s(fp64, cpu_env, fp32);
+ gen_helper_float_cvtd_s(fp64, tcg_env, fp32);
gen_store_fpr64(ctx, fp64, fd);
}
break;
@@ -9964,9 +9964,9 @@
gen_load_fpr32(ctx, fp0, fs);
if (ctx->nan2008) {
- gen_helper_float_cvt_2008_w_s(fp0, cpu_env, fp0);
+ gen_helper_float_cvt_2008_w_s(fp0, tcg_env, fp0);
} else {
- gen_helper_float_cvt_w_s(fp0, cpu_env, fp0);
+ gen_helper_float_cvt_w_s(fp0, tcg_env, fp0);
}
gen_store_fpr32(ctx, fp0, fd);
}
@@ -9979,9 +9979,9 @@
gen_load_fpr32(ctx, fp32, fs);
if (ctx->nan2008) {
- gen_helper_float_cvt_2008_l_s(fp64, cpu_env, fp32);
+ gen_helper_float_cvt_2008_l_s(fp64, tcg_env, fp32);
} else {
- gen_helper_float_cvt_l_s(fp64, cpu_env, fp32);
+ gen_helper_float_cvt_l_s(fp64, tcg_env, fp32);
}
gen_store_fpr64(ctx, fp64, fd);
}
@@ -10030,7 +10030,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
- gen_helper_float_add_d(fp0, cpu_env, fp0, fp1);
+ gen_helper_float_add_d(fp0, tcg_env, fp0, fp1);
gen_store_fpr64(ctx, fp0, fd);
}
break;
@@ -10042,7 +10042,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
- gen_helper_float_sub_d(fp0, cpu_env, fp0, fp1);
+ gen_helper_float_sub_d(fp0, tcg_env, fp0, fp1);
gen_store_fpr64(ctx, fp0, fd);
}
break;
@@ -10054,7 +10054,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
- gen_helper_float_mul_d(fp0, cpu_env, fp0, fp1);
+ gen_helper_float_mul_d(fp0, tcg_env, fp0, fp1);
gen_store_fpr64(ctx, fp0, fd);
}
break;
@@ -10066,7 +10066,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
- gen_helper_float_div_d(fp0, cpu_env, fp0, fp1);
+ gen_helper_float_div_d(fp0, tcg_env, fp0, fp1);
gen_store_fpr64(ctx, fp0, fd);
}
break;
@@ -10076,7 +10076,7 @@
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_sqrt_d(fp0, cpu_env, fp0);
+ gen_helper_float_sqrt_d(fp0, tcg_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
}
break;
@@ -10124,9 +10124,9 @@
gen_load_fpr64(ctx, fp0, fs);
if (ctx->nan2008) {
- gen_helper_float_round_2008_l_d(fp0, cpu_env, fp0);
+ gen_helper_float_round_2008_l_d(fp0, tcg_env, fp0);
} else {
- gen_helper_float_round_l_d(fp0, cpu_env, fp0);
+ gen_helper_float_round_l_d(fp0, tcg_env, fp0);
}
gen_store_fpr64(ctx, fp0, fd);
}
@@ -10138,9 +10138,9 @@
gen_load_fpr64(ctx, fp0, fs);
if (ctx->nan2008) {
- gen_helper_float_trunc_2008_l_d(fp0, cpu_env, fp0);
+ gen_helper_float_trunc_2008_l_d(fp0, tcg_env, fp0);
} else {
- gen_helper_float_trunc_l_d(fp0, cpu_env, fp0);
+ gen_helper_float_trunc_l_d(fp0, tcg_env, fp0);
}
gen_store_fpr64(ctx, fp0, fd);
}
@@ -10152,9 +10152,9 @@
gen_load_fpr64(ctx, fp0, fs);
if (ctx->nan2008) {
- gen_helper_float_ceil_2008_l_d(fp0, cpu_env, fp0);
+ gen_helper_float_ceil_2008_l_d(fp0, tcg_env, fp0);
} else {
- gen_helper_float_ceil_l_d(fp0, cpu_env, fp0);
+ gen_helper_float_ceil_l_d(fp0, tcg_env, fp0);
}
gen_store_fpr64(ctx, fp0, fd);
}
@@ -10166,9 +10166,9 @@
gen_load_fpr64(ctx, fp0, fs);
if (ctx->nan2008) {
- gen_helper_float_floor_2008_l_d(fp0, cpu_env, fp0);
+ gen_helper_float_floor_2008_l_d(fp0, tcg_env, fp0);
} else {
- gen_helper_float_floor_l_d(fp0, cpu_env, fp0);
+ gen_helper_float_floor_l_d(fp0, tcg_env, fp0);
}
gen_store_fpr64(ctx, fp0, fd);
}
@@ -10181,9 +10181,9 @@
gen_load_fpr64(ctx, fp64, fs);
if (ctx->nan2008) {
- gen_helper_float_round_2008_w_d(fp32, cpu_env, fp64);
+ gen_helper_float_round_2008_w_d(fp32, tcg_env, fp64);
} else {
- gen_helper_float_round_w_d(fp32, cpu_env, fp64);
+ gen_helper_float_round_w_d(fp32, tcg_env, fp64);
}
gen_store_fpr32(ctx, fp32, fd);
}
@@ -10196,9 +10196,9 @@
gen_load_fpr64(ctx, fp64, fs);
if (ctx->nan2008) {
- gen_helper_float_trunc_2008_w_d(fp32, cpu_env, fp64);
+ gen_helper_float_trunc_2008_w_d(fp32, tcg_env, fp64);
} else {
- gen_helper_float_trunc_w_d(fp32, cpu_env, fp64);
+ gen_helper_float_trunc_w_d(fp32, tcg_env, fp64);
}
gen_store_fpr32(ctx, fp32, fd);
}
@@ -10211,9 +10211,9 @@
gen_load_fpr64(ctx, fp64, fs);
if (ctx->nan2008) {
- gen_helper_float_ceil_2008_w_d(fp32, cpu_env, fp64);
+ gen_helper_float_ceil_2008_w_d(fp32, tcg_env, fp64);
} else {
- gen_helper_float_ceil_w_d(fp32, cpu_env, fp64);
+ gen_helper_float_ceil_w_d(fp32, tcg_env, fp64);
}
gen_store_fpr32(ctx, fp32, fd);
}
@@ -10226,9 +10226,9 @@
gen_load_fpr64(ctx, fp64, fs);
if (ctx->nan2008) {
- gen_helper_float_floor_2008_w_d(fp32, cpu_env, fp64);
+ gen_helper_float_floor_2008_w_d(fp32, tcg_env, fp64);
} else {
- gen_helper_float_floor_w_d(fp32, cpu_env, fp64);
+ gen_helper_float_floor_w_d(fp32, tcg_env, fp64);
}
gen_store_fpr32(ctx, fp32, fd);
}
@@ -10285,7 +10285,7 @@
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_recip_d(fp0, cpu_env, fp0);
+ gen_helper_float_recip_d(fp0, tcg_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
}
break;
@@ -10295,7 +10295,7 @@
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_rsqrt_d(fp0, cpu_env, fp0);
+ gen_helper_float_rsqrt_d(fp0, tcg_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
}
break;
@@ -10308,7 +10308,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fd);
- gen_helper_float_maddf_d(fp2, cpu_env, fp0, fp1, fp2);
+ gen_helper_float_maddf_d(fp2, tcg_env, fp0, fp1, fp2);
gen_store_fpr64(ctx, fp2, fd);
}
break;
@@ -10321,7 +10321,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fd);
- gen_helper_float_msubf_d(fp2, cpu_env, fp0, fp1, fp2);
+ gen_helper_float_msubf_d(fp2, tcg_env, fp0, fp1, fp2);
gen_store_fpr64(ctx, fp2, fd);
}
break;
@@ -10330,7 +10330,7 @@
{
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_rint_d(fp0, cpu_env, fp0);
+ gen_helper_float_rint_d(fp0, tcg_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
}
break;
@@ -10339,7 +10339,7 @@
{
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_class_d(fp0, cpu_env, fp0);
+ gen_helper_float_class_d(fp0, tcg_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
}
break;
@@ -10350,7 +10350,7 @@
TCGv_i64 fp1 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
- gen_helper_float_min_d(fp1, cpu_env, fp0, fp1);
+ gen_helper_float_min_d(fp1, tcg_env, fp0, fp1);
gen_store_fpr64(ctx, fp1, fd);
} else {
/* OPC_RECIP2_D */
@@ -10361,7 +10361,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
- gen_helper_float_recip2_d(fp0, cpu_env, fp0, fp1);
+ gen_helper_float_recip2_d(fp0, tcg_env, fp0, fp1);
gen_store_fpr64(ctx, fp0, fd);
}
}
@@ -10373,7 +10373,7 @@
TCGv_i64 fp1 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
- gen_helper_float_mina_d(fp1, cpu_env, fp0, fp1);
+ gen_helper_float_mina_d(fp1, tcg_env, fp0, fp1);
gen_store_fpr64(ctx, fp1, fd);
} else {
/* OPC_RECIP1_D */
@@ -10382,7 +10382,7 @@
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_recip1_d(fp0, cpu_env, fp0);
+ gen_helper_float_recip1_d(fp0, tcg_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
}
}
@@ -10394,7 +10394,7 @@
TCGv_i64 fp1 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
- gen_helper_float_max_d(fp1, cpu_env, fp0, fp1);
+ gen_helper_float_max_d(fp1, tcg_env, fp0, fp1);
gen_store_fpr64(ctx, fp1, fd);
} else {
/* OPC_RSQRT1_D */
@@ -10403,7 +10403,7 @@
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_rsqrt1_d(fp0, cpu_env, fp0);
+ gen_helper_float_rsqrt1_d(fp0, tcg_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
}
}
@@ -10415,7 +10415,7 @@
TCGv_i64 fp1 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
- gen_helper_float_maxa_d(fp1, cpu_env, fp0, fp1);
+ gen_helper_float_maxa_d(fp1, tcg_env, fp0, fp1);
gen_store_fpr64(ctx, fp1, fd);
} else {
/* OPC_RSQRT2_D */
@@ -10426,7 +10426,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
- gen_helper_float_rsqrt2_d(fp0, cpu_env, fp0, fp1);
+ gen_helper_float_rsqrt2_d(fp0, tcg_env, fp0, fp1);
gen_store_fpr64(ctx, fp0, fd);
}
}
@@ -10461,7 +10461,7 @@
TCGv_i64 fp64 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp64, fs);
- gen_helper_float_cvts_d(fp32, cpu_env, fp64);
+ gen_helper_float_cvts_d(fp32, tcg_env, fp64);
gen_store_fpr32(ctx, fp32, fd);
}
break;
@@ -10473,9 +10473,9 @@
gen_load_fpr64(ctx, fp64, fs);
if (ctx->nan2008) {
- gen_helper_float_cvt_2008_w_d(fp32, cpu_env, fp64);
+ gen_helper_float_cvt_2008_w_d(fp32, tcg_env, fp64);
} else {
- gen_helper_float_cvt_w_d(fp32, cpu_env, fp64);
+ gen_helper_float_cvt_w_d(fp32, tcg_env, fp64);
}
gen_store_fpr32(ctx, fp32, fd);
}
@@ -10487,9 +10487,9 @@
gen_load_fpr64(ctx, fp0, fs);
if (ctx->nan2008) {
- gen_helper_float_cvt_2008_l_d(fp0, cpu_env, fp0);
+ gen_helper_float_cvt_2008_l_d(fp0, tcg_env, fp0);
} else {
- gen_helper_float_cvt_l_d(fp0, cpu_env, fp0);
+ gen_helper_float_cvt_l_d(fp0, tcg_env, fp0);
}
gen_store_fpr64(ctx, fp0, fd);
}
@@ -10499,7 +10499,7 @@
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, fs);
- gen_helper_float_cvts_w(fp0, cpu_env, fp0);
+ gen_helper_float_cvts_w(fp0, tcg_env, fp0);
gen_store_fpr32(ctx, fp0, fd);
}
break;
@@ -10510,7 +10510,7 @@
TCGv_i64 fp64 = tcg_temp_new_i64();
gen_load_fpr32(ctx, fp32, fs);
- gen_helper_float_cvtd_w(fp64, cpu_env, fp32);
+ gen_helper_float_cvtd_w(fp64, tcg_env, fp32);
gen_store_fpr64(ctx, fp64, fd);
}
break;
@@ -10521,7 +10521,7 @@
TCGv_i64 fp64 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp64, fs);
- gen_helper_float_cvts_l(fp32, cpu_env, fp64);
+ gen_helper_float_cvts_l(fp32, tcg_env, fp64);
gen_store_fpr32(ctx, fp32, fd);
}
break;
@@ -10531,7 +10531,7 @@
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_cvtd_l(fp0, cpu_env, fp0);
+ gen_helper_float_cvtd_l(fp0, tcg_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
}
break;
@@ -10541,7 +10541,7 @@
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_cvtps_pw(fp0, cpu_env, fp0);
+ gen_helper_float_cvtps_pw(fp0, tcg_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
}
break;
@@ -10553,7 +10553,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
- gen_helper_float_add_ps(fp0, cpu_env, fp0, fp1);
+ gen_helper_float_add_ps(fp0, tcg_env, fp0, fp1);
gen_store_fpr64(ctx, fp0, fd);
}
break;
@@ -10565,7 +10565,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
- gen_helper_float_sub_ps(fp0, cpu_env, fp0, fp1);
+ gen_helper_float_sub_ps(fp0, tcg_env, fp0, fp1);
gen_store_fpr64(ctx, fp0, fd);
}
break;
@@ -10577,7 +10577,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
- gen_helper_float_mul_ps(fp0, cpu_env, fp0, fp1);
+ gen_helper_float_mul_ps(fp0, tcg_env, fp0, fp1);
gen_store_fpr64(ctx, fp0, fd);
}
break;
@@ -10652,7 +10652,7 @@
gen_load_fpr64(ctx, fp0, ft);
gen_load_fpr64(ctx, fp1, fs);
- gen_helper_float_addr_ps(fp0, cpu_env, fp0, fp1);
+ gen_helper_float_addr_ps(fp0, tcg_env, fp0, fp1);
gen_store_fpr64(ctx, fp0, fd);
}
break;
@@ -10664,7 +10664,7 @@
gen_load_fpr64(ctx, fp0, ft);
gen_load_fpr64(ctx, fp1, fs);
- gen_helper_float_mulr_ps(fp0, cpu_env, fp0, fp1);
+ gen_helper_float_mulr_ps(fp0, tcg_env, fp0, fp1);
gen_store_fpr64(ctx, fp0, fd);
}
break;
@@ -10676,7 +10676,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
- gen_helper_float_recip2_ps(fp0, cpu_env, fp0, fp1);
+ gen_helper_float_recip2_ps(fp0, tcg_env, fp0, fp1);
gen_store_fpr64(ctx, fp0, fd);
}
break;
@@ -10686,7 +10686,7 @@
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_recip1_ps(fp0, cpu_env, fp0);
+ gen_helper_float_recip1_ps(fp0, tcg_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
}
break;
@@ -10696,7 +10696,7 @@
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_rsqrt1_ps(fp0, cpu_env, fp0);
+ gen_helper_float_rsqrt1_ps(fp0, tcg_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
}
break;
@@ -10708,7 +10708,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
- gen_helper_float_rsqrt2_ps(fp0, cpu_env, fp0, fp1);
+ gen_helper_float_rsqrt2_ps(fp0, tcg_env, fp0, fp1);
gen_store_fpr64(ctx, fp0, fd);
}
break;
@@ -10718,7 +10718,7 @@
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32h(ctx, fp0, fs);
- gen_helper_float_cvts_pu(fp0, cpu_env, fp0);
+ gen_helper_float_cvts_pu(fp0, tcg_env, fp0);
gen_store_fpr32(ctx, fp0, fd);
}
break;
@@ -10728,7 +10728,7 @@
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_cvtpw_ps(fp0, cpu_env, fp0);
+ gen_helper_float_cvtpw_ps(fp0, tcg_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
}
break;
@@ -10738,7 +10738,7 @@
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, fs);
- gen_helper_float_cvts_pl(fp0, cpu_env, fp0);
+ gen_helper_float_cvts_pl(fp0, tcg_env, fp0);
gen_store_fpr32(ctx, fp0, fd);
}
break;
@@ -10943,7 +10943,7 @@
gen_load_fpr32(ctx, fp0, fs);
gen_load_fpr32(ctx, fp1, ft);
gen_load_fpr32(ctx, fp2, fr);
- gen_helper_float_madd_s(fp2, cpu_env, fp0, fp1, fp2);
+ gen_helper_float_madd_s(fp2, tcg_env, fp0, fp1, fp2);
gen_store_fpr32(ctx, fp2, fd);
}
break;
@@ -10958,7 +10958,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
- gen_helper_float_madd_d(fp2, cpu_env, fp0, fp1, fp2);
+ gen_helper_float_madd_d(fp2, tcg_env, fp0, fp1, fp2);
gen_store_fpr64(ctx, fp2, fd);
}
break;
@@ -10972,7 +10972,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
- gen_helper_float_madd_ps(fp2, cpu_env, fp0, fp1, fp2);
+ gen_helper_float_madd_ps(fp2, tcg_env, fp0, fp1, fp2);
gen_store_fpr64(ctx, fp2, fd);
}
break;
@@ -10986,7 +10986,7 @@
gen_load_fpr32(ctx, fp0, fs);
gen_load_fpr32(ctx, fp1, ft);
gen_load_fpr32(ctx, fp2, fr);
- gen_helper_float_msub_s(fp2, cpu_env, fp0, fp1, fp2);
+ gen_helper_float_msub_s(fp2, tcg_env, fp0, fp1, fp2);
gen_store_fpr32(ctx, fp2, fd);
}
break;
@@ -11001,7 +11001,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
- gen_helper_float_msub_d(fp2, cpu_env, fp0, fp1, fp2);
+ gen_helper_float_msub_d(fp2, tcg_env, fp0, fp1, fp2);
gen_store_fpr64(ctx, fp2, fd);
}
break;
@@ -11015,7 +11015,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
- gen_helper_float_msub_ps(fp2, cpu_env, fp0, fp1, fp2);
+ gen_helper_float_msub_ps(fp2, tcg_env, fp0, fp1, fp2);
gen_store_fpr64(ctx, fp2, fd);
}
break;
@@ -11029,7 +11029,7 @@
gen_load_fpr32(ctx, fp0, fs);
gen_load_fpr32(ctx, fp1, ft);
gen_load_fpr32(ctx, fp2, fr);
- gen_helper_float_nmadd_s(fp2, cpu_env, fp0, fp1, fp2);
+ gen_helper_float_nmadd_s(fp2, tcg_env, fp0, fp1, fp2);
gen_store_fpr32(ctx, fp2, fd);
}
break;
@@ -11044,7 +11044,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
- gen_helper_float_nmadd_d(fp2, cpu_env, fp0, fp1, fp2);
+ gen_helper_float_nmadd_d(fp2, tcg_env, fp0, fp1, fp2);
gen_store_fpr64(ctx, fp2, fd);
}
break;
@@ -11058,7 +11058,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
- gen_helper_float_nmadd_ps(fp2, cpu_env, fp0, fp1, fp2);
+ gen_helper_float_nmadd_ps(fp2, tcg_env, fp0, fp1, fp2);
gen_store_fpr64(ctx, fp2, fd);
}
break;
@@ -11072,7 +11072,7 @@
gen_load_fpr32(ctx, fp0, fs);
gen_load_fpr32(ctx, fp1, ft);
gen_load_fpr32(ctx, fp2, fr);
- gen_helper_float_nmsub_s(fp2, cpu_env, fp0, fp1, fp2);
+ gen_helper_float_nmsub_s(fp2, tcg_env, fp0, fp1, fp2);
gen_store_fpr32(ctx, fp2, fd);
}
break;
@@ -11087,7 +11087,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
- gen_helper_float_nmsub_d(fp2, cpu_env, fp0, fp1, fp2);
+ gen_helper_float_nmsub_d(fp2, tcg_env, fp0, fp1, fp2);
gen_store_fpr64(ctx, fp2, fd);
}
break;
@@ -11101,7 +11101,7 @@
gen_load_fpr64(ctx, fp0, fs);
gen_load_fpr64(ctx, fp1, ft);
gen_load_fpr64(ctx, fp2, fr);
- gen_helper_float_nmsub_ps(fp2, cpu_env, fp0, fp1, fp2);
+ gen_helper_float_nmsub_ps(fp2, tcg_env, fp0, fp1, fp2);
gen_store_fpr64(ctx, fp2, fd);
}
break;
@@ -11127,16 +11127,16 @@
switch (rd) {
case 0:
- gen_helper_rdhwr_cpunum(t0, cpu_env);
+ gen_helper_rdhwr_cpunum(t0, tcg_env);
gen_store_gpr(t0, rt);
break;
case 1:
- gen_helper_rdhwr_synci_step(t0, cpu_env);
+ gen_helper_rdhwr_synci_step(t0, tcg_env);
gen_store_gpr(t0, rt);
break;
case 2:
translator_io_start(&ctx->base);
- gen_helper_rdhwr_cc(t0, cpu_env);
+ gen_helper_rdhwr_cc(t0, tcg_env);
gen_store_gpr(t0, rt);
/*
* Break the TB to be able to take timer interrupts immediately
@@ -11147,7 +11147,7 @@
ctx->base.is_jmp = DISAS_EXIT;
break;
case 3:
- gen_helper_rdhwr_ccres(t0, cpu_env);
+ gen_helper_rdhwr_ccres(t0, tcg_env);
gen_store_gpr(t0, rt);
break;
case 4:
@@ -11159,24 +11159,24 @@
*/
generate_exception(ctx, EXCP_RI);
}
- gen_helper_rdhwr_performance(t0, cpu_env);
+ gen_helper_rdhwr_performance(t0, tcg_env);
gen_store_gpr(t0, rt);
break;
case 5:
check_insn(ctx, ISA_MIPS_R6);
- gen_helper_rdhwr_xnp(t0, cpu_env);
+ gen_helper_rdhwr_xnp(t0, tcg_env);
gen_store_gpr(t0, rt);
break;
case 29:
#if defined(CONFIG_USER_ONLY)
- tcg_gen_ld_tl(t0, cpu_env,
+ tcg_gen_ld_tl(t0, tcg_env,
offsetof(CPUMIPSState, active_tc.CP0_UserLocal));
gen_store_gpr(t0, rt);
break;
#else
if ((ctx->hflags & MIPS_HFLAG_CP0) ||
(ctx->hflags & MIPS_HFLAG_HWRENA_ULR)) {
- tcg_gen_ld_tl(t0, cpu_env,
+ tcg_gen_ld_tl(t0, tcg_env,
offsetof(CPUMIPSState, active_tc.CP0_UserLocal));
gen_store_gpr(t0, rt);
} else {
@@ -11212,7 +11212,6 @@
/* Branches completion */
clear_branch_hflags(ctx);
ctx->base.is_jmp = DISAS_NORETURN;
- /* FIXME: Need to clear can_do_io. */
switch (proc_hflags & MIPS_HFLAG_BMASK_BASE) {
case MIPS_HFLAG_FBNSLOT:
gen_goto_tb(ctx, 0, ctx->base.pc_next + insn_bytes);
@@ -11514,7 +11513,7 @@
TCGv_i32 t0 = tcg_constant_i32(op);
TCGv t1 = tcg_temp_new();
gen_base_offset_addr(ctx, t1, base, offset);
- gen_helper_cache(cpu_env, t1, t0);
+ gen_helper_cache(tcg_env, t1, t0);
}
static inline bool is_uhi(DisasContext *ctx, int sdbbp_code)
@@ -11711,15 +11710,15 @@
switch (op2) {
case OPC_ABSQ_S_QB:
check_dsp_r2(ctx);
- gen_helper_absq_s_qb(cpu_gpr[ret], v2_t, cpu_env);
+ gen_helper_absq_s_qb(cpu_gpr[ret], v2_t, tcg_env);
break;
case OPC_ABSQ_S_PH:
check_dsp(ctx);
- gen_helper_absq_s_ph(cpu_gpr[ret], v2_t, cpu_env);
+ gen_helper_absq_s_ph(cpu_gpr[ret], v2_t, tcg_env);
break;
case OPC_ABSQ_S_W:
check_dsp(ctx);
- gen_helper_absq_s_w(cpu_gpr[ret], v2_t, cpu_env);
+ gen_helper_absq_s_w(cpu_gpr[ret], v2_t, tcg_env);
break;
case OPC_PRECEQ_W_PHL:
check_dsp(ctx);
@@ -11770,67 +11769,67 @@
switch (op2) {
case OPC_ADDQ_PH:
check_dsp(ctx);
- gen_helper_addq_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_addq_ph(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_ADDQ_S_PH:
check_dsp(ctx);
- gen_helper_addq_s_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_addq_s_ph(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_ADDQ_S_W:
check_dsp(ctx);
- gen_helper_addq_s_w(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_addq_s_w(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_ADDU_QB:
check_dsp(ctx);
- gen_helper_addu_qb(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_addu_qb(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_ADDU_S_QB:
check_dsp(ctx);
- gen_helper_addu_s_qb(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_addu_s_qb(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_ADDU_PH:
check_dsp_r2(ctx);
- gen_helper_addu_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_addu_ph(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_ADDU_S_PH:
check_dsp_r2(ctx);
- gen_helper_addu_s_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_addu_s_ph(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_SUBQ_PH:
check_dsp(ctx);
- gen_helper_subq_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_subq_ph(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_SUBQ_S_PH:
check_dsp(ctx);
- gen_helper_subq_s_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_subq_s_ph(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_SUBQ_S_W:
check_dsp(ctx);
- gen_helper_subq_s_w(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_subq_s_w(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_SUBU_QB:
check_dsp(ctx);
- gen_helper_subu_qb(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_subu_qb(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_SUBU_S_QB:
check_dsp(ctx);
- gen_helper_subu_s_qb(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_subu_s_qb(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_SUBU_PH:
check_dsp_r2(ctx);
- gen_helper_subu_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_subu_ph(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_SUBU_S_PH:
check_dsp_r2(ctx);
- gen_helper_subu_s_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_subu_s_ph(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_ADDSC:
check_dsp(ctx);
- gen_helper_addsc(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_addsc(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_ADDWC:
check_dsp(ctx);
- gen_helper_addwc(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_addwc(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_MODSUB:
check_dsp(ctx);
@@ -11874,11 +11873,11 @@
break;
case OPC_PRECRQ_RS_PH_W:
check_dsp(ctx);
- gen_helper_precrq_rs_ph_w(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_precrq_rs_ph_w(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_PRECRQU_S_QB_PH:
check_dsp(ctx);
- gen_helper_precrqu_s_qb_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_precrqu_s_qb_ph(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
}
break;
@@ -11943,15 +11942,15 @@
break;
case OPC_ABSQ_S_OB:
check_dsp_r2(ctx);
- gen_helper_absq_s_ob(cpu_gpr[ret], v2_t, cpu_env);
+ gen_helper_absq_s_ob(cpu_gpr[ret], v2_t, tcg_env);
break;
case OPC_ABSQ_S_PW:
check_dsp(ctx);
- gen_helper_absq_s_pw(cpu_gpr[ret], v2_t, cpu_env);
+ gen_helper_absq_s_pw(cpu_gpr[ret], v2_t, tcg_env);
break;
case OPC_ABSQ_S_QH:
check_dsp(ctx);
- gen_helper_absq_s_qh(cpu_gpr[ret], v2_t, cpu_env);
+ gen_helper_absq_s_qh(cpu_gpr[ret], v2_t, tcg_env);
break;
}
break;
@@ -11963,35 +11962,35 @@
break;
case OPC_SUBQ_PW:
check_dsp(ctx);
- gen_helper_subq_pw(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_subq_pw(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_SUBQ_S_PW:
check_dsp(ctx);
- gen_helper_subq_s_pw(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_subq_s_pw(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_SUBQ_QH:
check_dsp(ctx);
- gen_helper_subq_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_subq_qh(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_SUBQ_S_QH:
check_dsp(ctx);
- gen_helper_subq_s_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_subq_s_qh(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_SUBU_OB:
check_dsp(ctx);
- gen_helper_subu_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_subu_ob(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_SUBU_S_OB:
check_dsp(ctx);
- gen_helper_subu_s_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_subu_s_ob(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_SUBU_QH:
check_dsp_r2(ctx);
- gen_helper_subu_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_subu_qh(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_SUBU_S_QH:
check_dsp_r2(ctx);
- gen_helper_subu_s_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_subu_s_qh(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_SUBUH_OB:
check_dsp_r2(ctx);
@@ -12003,35 +12002,35 @@
break;
case OPC_ADDQ_PW:
check_dsp(ctx);
- gen_helper_addq_pw(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_addq_pw(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_ADDQ_S_PW:
check_dsp(ctx);
- gen_helper_addq_s_pw(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_addq_s_pw(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_ADDQ_QH:
check_dsp(ctx);
- gen_helper_addq_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_addq_qh(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_ADDQ_S_QH:
check_dsp(ctx);
- gen_helper_addq_s_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_addq_s_qh(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_ADDU_OB:
check_dsp(ctx);
- gen_helper_addu_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_addu_ob(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_ADDU_S_OB:
check_dsp(ctx);
- gen_helper_addu_s_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_addu_s_ob(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_ADDU_QH:
check_dsp_r2(ctx);
- gen_helper_addu_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_addu_qh(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_ADDU_S_QH:
check_dsp_r2(ctx);
- gen_helper_addu_s_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_addu_s_qh(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_ADDUH_OB:
check_dsp_r2(ctx);
@@ -12077,11 +12076,11 @@
break;
case OPC_PRECRQ_RS_QH_PW:
check_dsp(ctx);
- gen_helper_precrq_rs_qh_pw(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_precrq_rs_qh_pw(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_PRECRQU_S_OB_QH:
check_dsp(ctx);
- gen_helper_precrqu_s_ob_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_precrqu_s_ob_qh(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
}
break;
@@ -12117,35 +12116,35 @@
switch (op2) {
case OPC_SHLL_QB:
check_dsp(ctx);
- gen_helper_shll_qb(cpu_gpr[ret], t0, v2_t, cpu_env);
+ gen_helper_shll_qb(cpu_gpr[ret], t0, v2_t, tcg_env);
break;
case OPC_SHLLV_QB:
check_dsp(ctx);
- gen_helper_shll_qb(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_shll_qb(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_SHLL_PH:
check_dsp(ctx);
- gen_helper_shll_ph(cpu_gpr[ret], t0, v2_t, cpu_env);
+ gen_helper_shll_ph(cpu_gpr[ret], t0, v2_t, tcg_env);
break;
case OPC_SHLLV_PH:
check_dsp(ctx);
- gen_helper_shll_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_shll_ph(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_SHLL_S_PH:
check_dsp(ctx);
- gen_helper_shll_s_ph(cpu_gpr[ret], t0, v2_t, cpu_env);
+ gen_helper_shll_s_ph(cpu_gpr[ret], t0, v2_t, tcg_env);
break;
case OPC_SHLLV_S_PH:
check_dsp(ctx);
- gen_helper_shll_s_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_shll_s_ph(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_SHLL_S_W:
check_dsp(ctx);
- gen_helper_shll_s_w(cpu_gpr[ret], t0, v2_t, cpu_env);
+ gen_helper_shll_s_w(cpu_gpr[ret], t0, v2_t, tcg_env);
break;
case OPC_SHLLV_S_W:
check_dsp(ctx);
- gen_helper_shll_s_w(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_shll_s_w(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_SHRL_QB:
check_dsp(ctx);
@@ -12216,43 +12215,43 @@
switch (op2) {
case OPC_SHLL_PW:
check_dsp(ctx);
- gen_helper_shll_pw(cpu_gpr[ret], v2_t, t0, cpu_env);
+ gen_helper_shll_pw(cpu_gpr[ret], v2_t, t0, tcg_env);
break;
case OPC_SHLLV_PW:
check_dsp(ctx);
- gen_helper_shll_pw(cpu_gpr[ret], v2_t, v1_t, cpu_env);
+ gen_helper_shll_pw(cpu_gpr[ret], v2_t, v1_t, tcg_env);
break;
case OPC_SHLL_S_PW:
check_dsp(ctx);
- gen_helper_shll_s_pw(cpu_gpr[ret], v2_t, t0, cpu_env);
+ gen_helper_shll_s_pw(cpu_gpr[ret], v2_t, t0, tcg_env);
break;
case OPC_SHLLV_S_PW:
check_dsp(ctx);
- gen_helper_shll_s_pw(cpu_gpr[ret], v2_t, v1_t, cpu_env);
+ gen_helper_shll_s_pw(cpu_gpr[ret], v2_t, v1_t, tcg_env);
break;
case OPC_SHLL_OB:
check_dsp(ctx);
- gen_helper_shll_ob(cpu_gpr[ret], v2_t, t0, cpu_env);
+ gen_helper_shll_ob(cpu_gpr[ret], v2_t, t0, tcg_env);
break;
case OPC_SHLLV_OB:
check_dsp(ctx);
- gen_helper_shll_ob(cpu_gpr[ret], v2_t, v1_t, cpu_env);
+ gen_helper_shll_ob(cpu_gpr[ret], v2_t, v1_t, tcg_env);
break;
case OPC_SHLL_QH:
check_dsp(ctx);
- gen_helper_shll_qh(cpu_gpr[ret], v2_t, t0, cpu_env);
+ gen_helper_shll_qh(cpu_gpr[ret], v2_t, t0, tcg_env);
break;
case OPC_SHLLV_QH:
check_dsp(ctx);
- gen_helper_shll_qh(cpu_gpr[ret], v2_t, v1_t, cpu_env);
+ gen_helper_shll_qh(cpu_gpr[ret], v2_t, v1_t, tcg_env);
break;
case OPC_SHLL_S_QH:
check_dsp(ctx);
- gen_helper_shll_s_qh(cpu_gpr[ret], v2_t, t0, cpu_env);
+ gen_helper_shll_s_qh(cpu_gpr[ret], v2_t, t0, tcg_env);
break;
case OPC_SHLLV_S_QH:
check_dsp(ctx);
- gen_helper_shll_s_qh(cpu_gpr[ret], v2_t, v1_t, cpu_env);
+ gen_helper_shll_s_qh(cpu_gpr[ret], v2_t, v1_t, tcg_env);
break;
case OPC_SHRA_OB:
check_dsp_r2(ctx);
@@ -12357,16 +12356,16 @@
check_dsp_r2(ctx);
switch (op2) {
case OPC_MUL_PH:
- gen_helper_mul_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_mul_ph(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_MUL_S_PH:
- gen_helper_mul_s_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_mul_s_ph(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_MULQ_S_W:
- gen_helper_mulq_s_w(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_mulq_s_w(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_MULQ_RS_W:
- gen_helper_mulq_rs_w(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_mulq_rs_w(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
}
break;
@@ -12374,91 +12373,91 @@
switch (op2) {
case OPC_DPAU_H_QBL:
check_dsp(ctx);
- gen_helper_dpau_h_qbl(t0, v1_t, v2_t, cpu_env);
+ gen_helper_dpau_h_qbl(t0, v1_t, v2_t, tcg_env);
break;
case OPC_DPAU_H_QBR:
check_dsp(ctx);
- gen_helper_dpau_h_qbr(t0, v1_t, v2_t, cpu_env);
+ gen_helper_dpau_h_qbr(t0, v1_t, v2_t, tcg_env);
break;
case OPC_DPSU_H_QBL:
check_dsp(ctx);
- gen_helper_dpsu_h_qbl(t0, v1_t, v2_t, cpu_env);
+ gen_helper_dpsu_h_qbl(t0, v1_t, v2_t, tcg_env);
break;
case OPC_DPSU_H_QBR:
check_dsp(ctx);
- gen_helper_dpsu_h_qbr(t0, v1_t, v2_t, cpu_env);
+ gen_helper_dpsu_h_qbr(t0, v1_t, v2_t, tcg_env);
break;
case OPC_DPA_W_PH:
check_dsp_r2(ctx);
- gen_helper_dpa_w_ph(t0, v1_t, v2_t, cpu_env);
+ gen_helper_dpa_w_ph(t0, v1_t, v2_t, tcg_env);
break;
case OPC_DPAX_W_PH:
check_dsp_r2(ctx);
- gen_helper_dpax_w_ph(t0, v1_t, v2_t, cpu_env);
+ gen_helper_dpax_w_ph(t0, v1_t, v2_t, tcg_env);
break;
case OPC_DPAQ_S_W_PH:
check_dsp(ctx);
- gen_helper_dpaq_s_w_ph(t0, v1_t, v2_t, cpu_env);
+ gen_helper_dpaq_s_w_ph(t0, v1_t, v2_t, tcg_env);
break;
case OPC_DPAQX_S_W_PH:
check_dsp_r2(ctx);
- gen_helper_dpaqx_s_w_ph(t0, v1_t, v2_t, cpu_env);
+ gen_helper_dpaqx_s_w_ph(t0, v1_t, v2_t, tcg_env);
break;
case OPC_DPAQX_SA_W_PH:
check_dsp_r2(ctx);
- gen_helper_dpaqx_sa_w_ph(t0, v1_t, v2_t, cpu_env);
+ gen_helper_dpaqx_sa_w_ph(t0, v1_t, v2_t, tcg_env);
break;
case OPC_DPS_W_PH:
check_dsp_r2(ctx);
- gen_helper_dps_w_ph(t0, v1_t, v2_t, cpu_env);
+ gen_helper_dps_w_ph(t0, v1_t, v2_t, tcg_env);
break;
case OPC_DPSX_W_PH:
check_dsp_r2(ctx);
- gen_helper_dpsx_w_ph(t0, v1_t, v2_t, cpu_env);
+ gen_helper_dpsx_w_ph(t0, v1_t, v2_t, tcg_env);
break;
case OPC_DPSQ_S_W_PH:
check_dsp(ctx);
- gen_helper_dpsq_s_w_ph(t0, v1_t, v2_t, cpu_env);
+ gen_helper_dpsq_s_w_ph(t0, v1_t, v2_t, tcg_env);
break;
case OPC_DPSQX_S_W_PH:
check_dsp_r2(ctx);
- gen_helper_dpsqx_s_w_ph(t0, v1_t, v2_t, cpu_env);
+ gen_helper_dpsqx_s_w_ph(t0, v1_t, v2_t, tcg_env);
break;
case OPC_DPSQX_SA_W_PH:
check_dsp_r2(ctx);
- gen_helper_dpsqx_sa_w_ph(t0, v1_t, v2_t, cpu_env);
+ gen_helper_dpsqx_sa_w_ph(t0, v1_t, v2_t, tcg_env);
break;
case OPC_MULSAQ_S_W_PH:
check_dsp(ctx);
- gen_helper_mulsaq_s_w_ph(t0, v1_t, v2_t, cpu_env);
+ gen_helper_mulsaq_s_w_ph(t0, v1_t, v2_t, tcg_env);
break;
case OPC_DPAQ_SA_L_W:
check_dsp(ctx);
- gen_helper_dpaq_sa_l_w(t0, v1_t, v2_t, cpu_env);
+ gen_helper_dpaq_sa_l_w(t0, v1_t, v2_t, tcg_env);
break;
case OPC_DPSQ_SA_L_W:
check_dsp(ctx);
- gen_helper_dpsq_sa_l_w(t0, v1_t, v2_t, cpu_env);
+ gen_helper_dpsq_sa_l_w(t0, v1_t, v2_t, tcg_env);
break;
case OPC_MAQ_S_W_PHL:
check_dsp(ctx);
- gen_helper_maq_s_w_phl(t0, v1_t, v2_t, cpu_env);
+ gen_helper_maq_s_w_phl(t0, v1_t, v2_t, tcg_env);
break;
case OPC_MAQ_S_W_PHR:
check_dsp(ctx);
- gen_helper_maq_s_w_phr(t0, v1_t, v2_t, cpu_env);
+ gen_helper_maq_s_w_phr(t0, v1_t, v2_t, tcg_env);
break;
case OPC_MAQ_SA_W_PHL:
check_dsp(ctx);
- gen_helper_maq_sa_w_phl(t0, v1_t, v2_t, cpu_env);
+ gen_helper_maq_sa_w_phl(t0, v1_t, v2_t, tcg_env);
break;
case OPC_MAQ_SA_W_PHR:
check_dsp(ctx);
- gen_helper_maq_sa_w_phr(t0, v1_t, v2_t, cpu_env);
+ gen_helper_maq_sa_w_phr(t0, v1_t, v2_t, tcg_env);
break;
case OPC_MULSA_W_PH:
check_dsp_r2(ctx);
- gen_helper_mulsa_w_ph(t0, v1_t, v2_t, cpu_env);
+ gen_helper_mulsa_w_ph(t0, v1_t, v2_t, tcg_env);
break;
}
break;
@@ -12471,107 +12470,107 @@
switch (op2) {
case OPC_DMADD:
check_dsp(ctx);
- gen_helper_dmadd(v1_t, v2_t, t0, cpu_env);
+ gen_helper_dmadd(v1_t, v2_t, t0, tcg_env);
break;
case OPC_DMADDU:
check_dsp(ctx);
- gen_helper_dmaddu(v1_t, v2_t, t0, cpu_env);
+ gen_helper_dmaddu(v1_t, v2_t, t0, tcg_env);
break;
case OPC_DMSUB:
check_dsp(ctx);
- gen_helper_dmsub(v1_t, v2_t, t0, cpu_env);
+ gen_helper_dmsub(v1_t, v2_t, t0, tcg_env);
break;
case OPC_DMSUBU:
check_dsp(ctx);
- gen_helper_dmsubu(v1_t, v2_t, t0, cpu_env);
+ gen_helper_dmsubu(v1_t, v2_t, t0, tcg_env);
break;
case OPC_DPA_W_QH:
check_dsp_r2(ctx);
- gen_helper_dpa_w_qh(v1_t, v2_t, t0, cpu_env);
+ gen_helper_dpa_w_qh(v1_t, v2_t, t0, tcg_env);
break;
case OPC_DPAQ_S_W_QH:
check_dsp(ctx);
- gen_helper_dpaq_s_w_qh(v1_t, v2_t, t0, cpu_env);
+ gen_helper_dpaq_s_w_qh(v1_t, v2_t, t0, tcg_env);
break;
case OPC_DPAQ_SA_L_PW:
check_dsp(ctx);
- gen_helper_dpaq_sa_l_pw(v1_t, v2_t, t0, cpu_env);
+ gen_helper_dpaq_sa_l_pw(v1_t, v2_t, t0, tcg_env);
break;
case OPC_DPAU_H_OBL:
check_dsp(ctx);
- gen_helper_dpau_h_obl(v1_t, v2_t, t0, cpu_env);
+ gen_helper_dpau_h_obl(v1_t, v2_t, t0, tcg_env);
break;
case OPC_DPAU_H_OBR:
check_dsp(ctx);
- gen_helper_dpau_h_obr(v1_t, v2_t, t0, cpu_env);
+ gen_helper_dpau_h_obr(v1_t, v2_t, t0, tcg_env);
break;
case OPC_DPS_W_QH:
check_dsp_r2(ctx);
- gen_helper_dps_w_qh(v1_t, v2_t, t0, cpu_env);
+ gen_helper_dps_w_qh(v1_t, v2_t, t0, tcg_env);
break;
case OPC_DPSQ_S_W_QH:
check_dsp(ctx);
- gen_helper_dpsq_s_w_qh(v1_t, v2_t, t0, cpu_env);
+ gen_helper_dpsq_s_w_qh(v1_t, v2_t, t0, tcg_env);
break;
case OPC_DPSQ_SA_L_PW:
check_dsp(ctx);
- gen_helper_dpsq_sa_l_pw(v1_t, v2_t, t0, cpu_env);
+ gen_helper_dpsq_sa_l_pw(v1_t, v2_t, t0, tcg_env);
break;
case OPC_DPSU_H_OBL:
check_dsp(ctx);
- gen_helper_dpsu_h_obl(v1_t, v2_t, t0, cpu_env);
+ gen_helper_dpsu_h_obl(v1_t, v2_t, t0, tcg_env);
break;
case OPC_DPSU_H_OBR:
check_dsp(ctx);
- gen_helper_dpsu_h_obr(v1_t, v2_t, t0, cpu_env);
+ gen_helper_dpsu_h_obr(v1_t, v2_t, t0, tcg_env);
break;
case OPC_MAQ_S_L_PWL:
check_dsp(ctx);
- gen_helper_maq_s_l_pwl(v1_t, v2_t, t0, cpu_env);
+ gen_helper_maq_s_l_pwl(v1_t, v2_t, t0, tcg_env);
break;
case OPC_MAQ_S_L_PWR:
check_dsp(ctx);
- gen_helper_maq_s_l_pwr(v1_t, v2_t, t0, cpu_env);
+ gen_helper_maq_s_l_pwr(v1_t, v2_t, t0, tcg_env);
break;
case OPC_MAQ_S_W_QHLL:
check_dsp(ctx);
- gen_helper_maq_s_w_qhll(v1_t, v2_t, t0, cpu_env);
+ gen_helper_maq_s_w_qhll(v1_t, v2_t, t0, tcg_env);
break;
case OPC_MAQ_SA_W_QHLL:
check_dsp(ctx);
- gen_helper_maq_sa_w_qhll(v1_t, v2_t, t0, cpu_env);
+ gen_helper_maq_sa_w_qhll(v1_t, v2_t, t0, tcg_env);
break;
case OPC_MAQ_S_W_QHLR:
check_dsp(ctx);
- gen_helper_maq_s_w_qhlr(v1_t, v2_t, t0, cpu_env);
+ gen_helper_maq_s_w_qhlr(v1_t, v2_t, t0, tcg_env);
break;
case OPC_MAQ_SA_W_QHLR:
check_dsp(ctx);
- gen_helper_maq_sa_w_qhlr(v1_t, v2_t, t0, cpu_env);
+ gen_helper_maq_sa_w_qhlr(v1_t, v2_t, t0, tcg_env);
break;
case OPC_MAQ_S_W_QHRL:
check_dsp(ctx);
- gen_helper_maq_s_w_qhrl(v1_t, v2_t, t0, cpu_env);
+ gen_helper_maq_s_w_qhrl(v1_t, v2_t, t0, tcg_env);
break;
case OPC_MAQ_SA_W_QHRL:
check_dsp(ctx);
- gen_helper_maq_sa_w_qhrl(v1_t, v2_t, t0, cpu_env);
+ gen_helper_maq_sa_w_qhrl(v1_t, v2_t, t0, tcg_env);
break;
case OPC_MAQ_S_W_QHRR:
check_dsp(ctx);
- gen_helper_maq_s_w_qhrr(v1_t, v2_t, t0, cpu_env);
+ gen_helper_maq_s_w_qhrr(v1_t, v2_t, t0, tcg_env);
break;
case OPC_MAQ_SA_W_QHRR:
check_dsp(ctx);
- gen_helper_maq_sa_w_qhrr(v1_t, v2_t, t0, cpu_env);
+ gen_helper_maq_sa_w_qhrr(v1_t, v2_t, t0, tcg_env);
break;
case OPC_MULSAQ_S_L_PW:
check_dsp(ctx);
- gen_helper_mulsaq_s_l_pw(v1_t, v2_t, t0, cpu_env);
+ gen_helper_mulsaq_s_l_pw(v1_t, v2_t, t0, tcg_env);
break;
case OPC_MULSAQ_S_W_QH:
check_dsp(ctx);
- gen_helper_mulsaq_s_w_qh(v1_t, v2_t, t0, cpu_env);
+ gen_helper_mulsaq_s_w_qh(v1_t, v2_t, t0, tcg_env);
break;
}
}
@@ -12581,27 +12580,27 @@
switch (op2) {
case OPC_MULEU_S_PH_QBL:
check_dsp(ctx);
- gen_helper_muleu_s_ph_qbl(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_muleu_s_ph_qbl(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_MULEU_S_PH_QBR:
check_dsp(ctx);
- gen_helper_muleu_s_ph_qbr(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_muleu_s_ph_qbr(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_MULQ_RS_PH:
check_dsp(ctx);
- gen_helper_mulq_rs_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_mulq_rs_ph(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_MULEQ_S_W_PHL:
check_dsp(ctx);
- gen_helper_muleq_s_w_phl(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_muleq_s_w_phl(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_MULEQ_S_W_PHR:
check_dsp(ctx);
- gen_helper_muleq_s_w_phr(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_muleq_s_w_phr(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_MULQ_S_PH:
check_dsp_r2(ctx);
- gen_helper_mulq_s_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_mulq_s_ph(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
}
break;
@@ -12610,23 +12609,23 @@
switch (op2) {
case OPC_MULEQ_S_PW_QHL:
check_dsp(ctx);
- gen_helper_muleq_s_pw_qhl(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_muleq_s_pw_qhl(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_MULEQ_S_PW_QHR:
check_dsp(ctx);
- gen_helper_muleq_s_pw_qhr(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_muleq_s_pw_qhr(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_MULEU_S_QH_OBL:
check_dsp(ctx);
- gen_helper_muleu_s_qh_obl(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_muleu_s_qh_obl(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_MULEU_S_QH_OBR:
check_dsp(ctx);
- gen_helper_muleu_s_qh_obr(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_muleu_s_qh_obr(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_MULQ_RS_QH:
check_dsp(ctx);
- gen_helper_mulq_rs_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_mulq_rs_qh(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
}
break;
@@ -12795,15 +12794,15 @@
switch (op2) {
case OPC_CMPU_EQ_QB:
check_dsp(ctx);
- gen_helper_cmpu_eq_qb(v1_t, v2_t, cpu_env);
+ gen_helper_cmpu_eq_qb(v1_t, v2_t, tcg_env);
break;
case OPC_CMPU_LT_QB:
check_dsp(ctx);
- gen_helper_cmpu_lt_qb(v1_t, v2_t, cpu_env);
+ gen_helper_cmpu_lt_qb(v1_t, v2_t, tcg_env);
break;
case OPC_CMPU_LE_QB:
check_dsp(ctx);
- gen_helper_cmpu_le_qb(v1_t, v2_t, cpu_env);
+ gen_helper_cmpu_le_qb(v1_t, v2_t, tcg_env);
break;
case OPC_CMPGU_EQ_QB:
check_dsp(ctx);
@@ -12843,23 +12842,23 @@
break;
case OPC_CMP_EQ_PH:
check_dsp(ctx);
- gen_helper_cmp_eq_ph(v1_t, v2_t, cpu_env);
+ gen_helper_cmp_eq_ph(v1_t, v2_t, tcg_env);
break;
case OPC_CMP_LT_PH:
check_dsp(ctx);
- gen_helper_cmp_lt_ph(v1_t, v2_t, cpu_env);
+ gen_helper_cmp_lt_ph(v1_t, v2_t, tcg_env);
break;
case OPC_CMP_LE_PH:
check_dsp(ctx);
- gen_helper_cmp_le_ph(v1_t, v2_t, cpu_env);
+ gen_helper_cmp_le_ph(v1_t, v2_t, tcg_env);
break;
case OPC_PICK_QB:
check_dsp(ctx);
- gen_helper_pick_qb(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_pick_qb(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_PICK_PH:
check_dsp(ctx);
- gen_helper_pick_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_pick_ph(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_PACKRL_PH:
check_dsp(ctx);
@@ -12872,39 +12871,39 @@
switch (op2) {
case OPC_CMP_EQ_PW:
check_dsp(ctx);
- gen_helper_cmp_eq_pw(v1_t, v2_t, cpu_env);
+ gen_helper_cmp_eq_pw(v1_t, v2_t, tcg_env);
break;
case OPC_CMP_LT_PW:
check_dsp(ctx);
- gen_helper_cmp_lt_pw(v1_t, v2_t, cpu_env);
+ gen_helper_cmp_lt_pw(v1_t, v2_t, tcg_env);
break;
case OPC_CMP_LE_PW:
check_dsp(ctx);
- gen_helper_cmp_le_pw(v1_t, v2_t, cpu_env);
+ gen_helper_cmp_le_pw(v1_t, v2_t, tcg_env);
break;
case OPC_CMP_EQ_QH:
check_dsp(ctx);
- gen_helper_cmp_eq_qh(v1_t, v2_t, cpu_env);
+ gen_helper_cmp_eq_qh(v1_t, v2_t, tcg_env);
break;
case OPC_CMP_LT_QH:
check_dsp(ctx);
- gen_helper_cmp_lt_qh(v1_t, v2_t, cpu_env);
+ gen_helper_cmp_lt_qh(v1_t, v2_t, tcg_env);
break;
case OPC_CMP_LE_QH:
check_dsp(ctx);
- gen_helper_cmp_le_qh(v1_t, v2_t, cpu_env);
+ gen_helper_cmp_le_qh(v1_t, v2_t, tcg_env);
break;
case OPC_CMPGDU_EQ_OB:
check_dsp_r2(ctx);
- gen_helper_cmpgdu_eq_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_cmpgdu_eq_ob(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_CMPGDU_LT_OB:
check_dsp_r2(ctx);
- gen_helper_cmpgdu_lt_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_cmpgdu_lt_ob(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_CMPGDU_LE_OB:
check_dsp_r2(ctx);
- gen_helper_cmpgdu_le_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_cmpgdu_le_ob(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_CMPGU_EQ_OB:
check_dsp(ctx);
@@ -12920,15 +12919,15 @@
break;
case OPC_CMPU_EQ_OB:
check_dsp(ctx);
- gen_helper_cmpu_eq_ob(v1_t, v2_t, cpu_env);
+ gen_helper_cmpu_eq_ob(v1_t, v2_t, tcg_env);
break;
case OPC_CMPU_LT_OB:
check_dsp(ctx);
- gen_helper_cmpu_lt_ob(v1_t, v2_t, cpu_env);
+ gen_helper_cmpu_lt_ob(v1_t, v2_t, tcg_env);
break;
case OPC_CMPU_LE_OB:
check_dsp(ctx);
- gen_helper_cmpu_le_ob(v1_t, v2_t, cpu_env);
+ gen_helper_cmpu_le_ob(v1_t, v2_t, tcg_env);
break;
case OPC_PACKRL_PW:
check_dsp(ctx);
@@ -12936,15 +12935,15 @@
break;
case OPC_PICK_OB:
check_dsp(ctx);
- gen_helper_pick_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_pick_ob(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_PICK_PW:
check_dsp(ctx);
- gen_helper_pick_pw(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_pick_pw(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
case OPC_PICK_QH:
check_dsp(ctx);
- gen_helper_pick_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ gen_helper_pick_qh(cpu_gpr[ret], v1_t, v2_t, tcg_env);
break;
}
break;
@@ -13066,80 +13065,80 @@
case OPC_EXTR_W:
tcg_gen_movi_tl(t0, v2);
tcg_gen_movi_tl(t1, v1);
- gen_helper_extr_w(cpu_gpr[ret], t0, t1, cpu_env);
+ gen_helper_extr_w(cpu_gpr[ret], t0, t1, tcg_env);
break;
case OPC_EXTR_R_W:
tcg_gen_movi_tl(t0, v2);
tcg_gen_movi_tl(t1, v1);
- gen_helper_extr_r_w(cpu_gpr[ret], t0, t1, cpu_env);
+ gen_helper_extr_r_w(cpu_gpr[ret], t0, t1, tcg_env);
break;
case OPC_EXTR_RS_W:
tcg_gen_movi_tl(t0, v2);
tcg_gen_movi_tl(t1, v1);
- gen_helper_extr_rs_w(cpu_gpr[ret], t0, t1, cpu_env);
+ gen_helper_extr_rs_w(cpu_gpr[ret], t0, t1, tcg_env);
break;
case OPC_EXTR_S_H:
tcg_gen_movi_tl(t0, v2);
tcg_gen_movi_tl(t1, v1);
- gen_helper_extr_s_h(cpu_gpr[ret], t0, t1, cpu_env);
+ gen_helper_extr_s_h(cpu_gpr[ret], t0, t1, tcg_env);
break;
case OPC_EXTRV_S_H:
tcg_gen_movi_tl(t0, v2);
- gen_helper_extr_s_h(cpu_gpr[ret], t0, v1_t, cpu_env);
+ gen_helper_extr_s_h(cpu_gpr[ret], t0, v1_t, tcg_env);
break;
case OPC_EXTRV_W:
tcg_gen_movi_tl(t0, v2);
- gen_helper_extr_w(cpu_gpr[ret], t0, v1_t, cpu_env);
+ gen_helper_extr_w(cpu_gpr[ret], t0, v1_t, tcg_env);
break;
case OPC_EXTRV_R_W:
tcg_gen_movi_tl(t0, v2);
- gen_helper_extr_r_w(cpu_gpr[ret], t0, v1_t, cpu_env);
+ gen_helper_extr_r_w(cpu_gpr[ret], t0, v1_t, tcg_env);
break;
case OPC_EXTRV_RS_W:
tcg_gen_movi_tl(t0, v2);
- gen_helper_extr_rs_w(cpu_gpr[ret], t0, v1_t, cpu_env);
+ gen_helper_extr_rs_w(cpu_gpr[ret], t0, v1_t, tcg_env);
break;
case OPC_EXTP:
tcg_gen_movi_tl(t0, v2);
tcg_gen_movi_tl(t1, v1);
- gen_helper_extp(cpu_gpr[ret], t0, t1, cpu_env);
+ gen_helper_extp(cpu_gpr[ret], t0, t1, tcg_env);
break;
case OPC_EXTPV:
tcg_gen_movi_tl(t0, v2);
- gen_helper_extp(cpu_gpr[ret], t0, v1_t, cpu_env);
+ gen_helper_extp(cpu_gpr[ret], t0, v1_t, tcg_env);
break;
case OPC_EXTPDP:
tcg_gen_movi_tl(t0, v2);
tcg_gen_movi_tl(t1, v1);
- gen_helper_extpdp(cpu_gpr[ret], t0, t1, cpu_env);
+ gen_helper_extpdp(cpu_gpr[ret], t0, t1, tcg_env);
break;
case OPC_EXTPDPV:
tcg_gen_movi_tl(t0, v2);
- gen_helper_extpdp(cpu_gpr[ret], t0, v1_t, cpu_env);
+ gen_helper_extpdp(cpu_gpr[ret], t0, v1_t, tcg_env);
break;
case OPC_SHILO:
imm = (ctx->opcode >> 20) & 0x3F;
tcg_gen_movi_tl(t0, ret);
tcg_gen_movi_tl(t1, imm);
- gen_helper_shilo(t0, t1, cpu_env);
+ gen_helper_shilo(t0, t1, tcg_env);
break;
case OPC_SHILOV:
tcg_gen_movi_tl(t0, ret);
- gen_helper_shilo(t0, v1_t, cpu_env);
+ gen_helper_shilo(t0, v1_t, tcg_env);
break;
case OPC_MTHLIP:
tcg_gen_movi_tl(t0, ret);
- gen_helper_mthlip(t0, v1_t, cpu_env);
+ gen_helper_mthlip(t0, v1_t, tcg_env);
break;
case OPC_WRDSP:
imm = (ctx->opcode >> 11) & 0x3FF;
tcg_gen_movi_tl(t0, imm);
- gen_helper_wrdsp(v1_t, t0, cpu_env);
+ gen_helper_wrdsp(v1_t, t0, tcg_env);
break;
case OPC_RDDSP:
imm = (ctx->opcode >> 16) & 0x03FF;
tcg_gen_movi_tl(t0, imm);
- gen_helper_rddsp(cpu_gpr[ret], t0, cpu_env);
+ gen_helper_rddsp(cpu_gpr[ret], t0, tcg_env);
break;
}
break;
@@ -13149,7 +13148,7 @@
switch (op2) {
case OPC_DMTHLIP:
tcg_gen_movi_tl(t0, ret);
- gen_helper_dmthlip(v1_t, t0, cpu_env);
+ gen_helper_dmthlip(v1_t, t0, tcg_env);
break;
case OPC_DSHILO:
{
@@ -13157,97 +13156,97 @@
int ac = (ctx->opcode >> 11) & 0x03;
tcg_gen_movi_tl(t0, shift);
tcg_gen_movi_tl(t1, ac);
- gen_helper_dshilo(t0, t1, cpu_env);
+ gen_helper_dshilo(t0, t1, tcg_env);
break;
}
case OPC_DSHILOV:
{
int ac = (ctx->opcode >> 11) & 0x03;
tcg_gen_movi_tl(t0, ac);
- gen_helper_dshilo(v1_t, t0, cpu_env);
+ gen_helper_dshilo(v1_t, t0, tcg_env);
break;
}
case OPC_DEXTP:
tcg_gen_movi_tl(t0, v2);
tcg_gen_movi_tl(t1, v1);
- gen_helper_dextp(cpu_gpr[ret], t0, t1, cpu_env);
+ gen_helper_dextp(cpu_gpr[ret], t0, t1, tcg_env);
break;
case OPC_DEXTPV:
tcg_gen_movi_tl(t0, v2);
- gen_helper_dextp(cpu_gpr[ret], t0, v1_t, cpu_env);
+ gen_helper_dextp(cpu_gpr[ret], t0, v1_t, tcg_env);
break;
case OPC_DEXTPDP:
tcg_gen_movi_tl(t0, v2);
tcg_gen_movi_tl(t1, v1);
- gen_helper_dextpdp(cpu_gpr[ret], t0, t1, cpu_env);
+ gen_helper_dextpdp(cpu_gpr[ret], t0, t1, tcg_env);
break;
case OPC_DEXTPDPV:
tcg_gen_movi_tl(t0, v2);
- gen_helper_dextpdp(cpu_gpr[ret], t0, v1_t, cpu_env);
+ gen_helper_dextpdp(cpu_gpr[ret], t0, v1_t, tcg_env);
break;
case OPC_DEXTR_L:
tcg_gen_movi_tl(t0, v2);
tcg_gen_movi_tl(t1, v1);
- gen_helper_dextr_l(cpu_gpr[ret], t0, t1, cpu_env);
+ gen_helper_dextr_l(cpu_gpr[ret], t0, t1, tcg_env);
break;
case OPC_DEXTR_R_L:
tcg_gen_movi_tl(t0, v2);
tcg_gen_movi_tl(t1, v1);
- gen_helper_dextr_r_l(cpu_gpr[ret], t0, t1, cpu_env);
+ gen_helper_dextr_r_l(cpu_gpr[ret], t0, t1, tcg_env);
break;
case OPC_DEXTR_RS_L:
tcg_gen_movi_tl(t0, v2);
tcg_gen_movi_tl(t1, v1);
- gen_helper_dextr_rs_l(cpu_gpr[ret], t0, t1, cpu_env);
+ gen_helper_dextr_rs_l(cpu_gpr[ret], t0, t1, tcg_env);
break;
case OPC_DEXTR_W:
tcg_gen_movi_tl(t0, v2);
tcg_gen_movi_tl(t1, v1);
- gen_helper_dextr_w(cpu_gpr[ret], t0, t1, cpu_env);
+ gen_helper_dextr_w(cpu_gpr[ret], t0, t1, tcg_env);
break;
case OPC_DEXTR_R_W:
tcg_gen_movi_tl(t0, v2);
tcg_gen_movi_tl(t1, v1);
- gen_helper_dextr_r_w(cpu_gpr[ret], t0, t1, cpu_env);
+ gen_helper_dextr_r_w(cpu_gpr[ret], t0, t1, tcg_env);
break;
case OPC_DEXTR_RS_W:
tcg_gen_movi_tl(t0, v2);
tcg_gen_movi_tl(t1, v1);
- gen_helper_dextr_rs_w(cpu_gpr[ret], t0, t1, cpu_env);
+ gen_helper_dextr_rs_w(cpu_gpr[ret], t0, t1, tcg_env);
break;
case OPC_DEXTR_S_H:
tcg_gen_movi_tl(t0, v2);
tcg_gen_movi_tl(t1, v1);
- gen_helper_dextr_s_h(cpu_gpr[ret], t0, t1, cpu_env);
+ gen_helper_dextr_s_h(cpu_gpr[ret], t0, t1, tcg_env);
break;
case OPC_DEXTRV_S_H:
tcg_gen_movi_tl(t0, v2);
- gen_helper_dextr_s_h(cpu_gpr[ret], t0, v1_t, cpu_env);
+ gen_helper_dextr_s_h(cpu_gpr[ret], t0, v1_t, tcg_env);
break;
case OPC_DEXTRV_L:
tcg_gen_movi_tl(t0, v2);
- gen_helper_dextr_l(cpu_gpr[ret], t0, v1_t, cpu_env);
+ gen_helper_dextr_l(cpu_gpr[ret], t0, v1_t, tcg_env);
break;
case OPC_DEXTRV_R_L:
tcg_gen_movi_tl(t0, v2);
- gen_helper_dextr_r_l(cpu_gpr[ret], t0, v1_t, cpu_env);
+ gen_helper_dextr_r_l(cpu_gpr[ret], t0, v1_t, tcg_env);
break;
case OPC_DEXTRV_RS_L:
tcg_gen_movi_tl(t0, v2);
- gen_helper_dextr_rs_l(cpu_gpr[ret], t0, v1_t, cpu_env);
+ gen_helper_dextr_rs_l(cpu_gpr[ret], t0, v1_t, tcg_env);
break;
case OPC_DEXTRV_W:
tcg_gen_movi_tl(t0, v2);
- gen_helper_dextr_w(cpu_gpr[ret], t0, v1_t, cpu_env);
+ gen_helper_dextr_w(cpu_gpr[ret], t0, v1_t, tcg_env);
break;
case OPC_DEXTRV_R_W:
tcg_gen_movi_tl(t0, v2);
- gen_helper_dextr_r_w(cpu_gpr[ret], t0, v1_t, cpu_env);
+ gen_helper_dextr_r_w(cpu_gpr[ret], t0, v1_t, tcg_env);
break;
case OPC_DEXTRV_RS_W:
tcg_gen_movi_tl(t0, v2);
- gen_helper_dextr_rs_w(cpu_gpr[ret], t0, v1_t, cpu_env);
+ gen_helper_dextr_rs_w(cpu_gpr[ret], t0, v1_t, tcg_env);
break;
}
break;
@@ -13579,7 +13578,7 @@
MIPS_INVAL("PMON / selsl");
gen_reserved_instruction(ctx);
#else
- gen_helper_pmon(cpu_env, tcg_constant_i32(sa));
+ gen_helper_pmon(tcg_env, tcg_constant_i32(sa));
#endif
break;
case OPC_SYSCALL:
@@ -14102,7 +14101,7 @@
gen_load_gpr(t0, rt);
gen_load_gpr(t1, rs);
- gen_helper_insv(cpu_gpr[rt], cpu_env, t1, t0);
+ gen_helper_insv(cpu_gpr[rt], tcg_env, t1, t0);
break;
}
default: /* Invalid */
@@ -14371,7 +14370,7 @@
gen_load_gpr(t0, rt);
gen_load_gpr(t1, rs);
- gen_helper_dinsv(cpu_gpr[rt], cpu_env, t1, t0);
+ gen_helper_dinsv(cpu_gpr[rt], tcg_env, t1, t0);
break;
}
default: /* Invalid */
@@ -14606,7 +14605,7 @@
TCGv t0 = tcg_temp_new();
gen_load_gpr(t0, rs);
- gen_helper_yield(t0, cpu_env, t0);
+ gen_helper_yield(t0, tcg_env, t0);
gen_store_gpr(t0, rd);
}
break;
@@ -14797,32 +14796,32 @@
break;
case OPC_DVPE:
check_cp0_mt(ctx);
- gen_helper_dvpe(t0, cpu_env);
+ gen_helper_dvpe(t0, tcg_env);
gen_store_gpr(t0, rt);
break;
case OPC_EVPE:
check_cp0_mt(ctx);
- gen_helper_evpe(t0, cpu_env);
+ gen_helper_evpe(t0, tcg_env);
gen_store_gpr(t0, rt);
break;
case OPC_DVP:
check_insn(ctx, ISA_MIPS_R6);
if (ctx->vp) {
- gen_helper_dvp(t0, cpu_env);
+ gen_helper_dvp(t0, tcg_env);
gen_store_gpr(t0, rt);
}
break;
case OPC_EVP:
check_insn(ctx, ISA_MIPS_R6);
if (ctx->vp) {
- gen_helper_evp(t0, cpu_env);
+ gen_helper_evp(t0, tcg_env);
gen_store_gpr(t0, rt);
}
break;
case OPC_DI:
check_insn(ctx, ISA_MIPS_R2);
save_cpu_state(ctx, 1);
- gen_helper_di(t0, cpu_env);
+ gen_helper_di(t0, tcg_env);
gen_store_gpr(t0, rt);
/*
* Stop translation as we may have switched
@@ -14833,7 +14832,7 @@
case OPC_EI:
check_insn(ctx, ISA_MIPS_R2);
save_cpu_state(ctx, 1);
- gen_helper_ei(t0, cpu_env);
+ gen_helper_ei(t0, tcg_env);
gen_store_gpr(t0, rt);
/*
* DISAS_STOP isn't sufficient, we need to ensure we break
@@ -15377,7 +15376,7 @@
static void mips_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
- CPUMIPSState *env = cs->env_ptr;
+ CPUMIPSState *env = cpu_env(cs);
ctx->page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
ctx->saved_pc = -1;
@@ -15448,7 +15447,7 @@
static void mips_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{
- CPUMIPSState *env = cs->env_ptr;
+ CPUMIPSState *env = cpu_env(cs);
DisasContext *ctx = container_of(dcbase, DisasContext, base);
int insn_bytes;
int is_slot;
@@ -15564,11 +15563,9 @@
void mips_tcg_init(void)
{
- int i;
-
cpu_gpr[0] = NULL;
- for (i = 1; i < 32; i++)
- cpu_gpr[i] = tcg_global_mem_new(cpu_env,
+ for (unsigned i = 1; i < 32; i++)
+ cpu_gpr[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUMIPSState,
active_tc.gpr[i]),
regnames[i]);
@@ -15578,48 +15575,48 @@
for (unsigned i = 1; i < 32; i++) {
g_autofree char *rname = g_strdup_printf("%s[hi]", regnames[i]);
- cpu_gpr_hi[i] = tcg_global_mem_new_i64(cpu_env,
+ cpu_gpr_hi[i] = tcg_global_mem_new_i64(tcg_env,
offsetof(CPUMIPSState,
active_tc.gpr_hi[i]),
rname);
}
#endif /* !TARGET_MIPS64 */
- for (i = 0; i < 32; i++) {
+ for (unsigned i = 0; i < 32; i++) {
int off = offsetof(CPUMIPSState, active_fpu.fpr[i].wr.d[0]);
- fpu_f64[i] = tcg_global_mem_new_i64(cpu_env, off, fregnames[i]);
+ fpu_f64[i] = tcg_global_mem_new_i64(tcg_env, off, fregnames[i]);
}
msa_translate_init();
- cpu_PC = tcg_global_mem_new(cpu_env,
+ cpu_PC = tcg_global_mem_new(tcg_env,
offsetof(CPUMIPSState, active_tc.PC), "PC");
- for (i = 0; i < MIPS_DSP_ACC; i++) {
- cpu_HI[i] = tcg_global_mem_new(cpu_env,
+ for (unsigned i = 0; i < MIPS_DSP_ACC; i++) {
+ cpu_HI[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUMIPSState, active_tc.HI[i]),
regnames_HI[i]);
- cpu_LO[i] = tcg_global_mem_new(cpu_env,
+ cpu_LO[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUMIPSState, active_tc.LO[i]),
regnames_LO[i]);
}
- cpu_dspctrl = tcg_global_mem_new(cpu_env,
+ cpu_dspctrl = tcg_global_mem_new(tcg_env,
offsetof(CPUMIPSState,
active_tc.DSPControl),
"DSPControl");
- bcond = tcg_global_mem_new(cpu_env,
+ bcond = tcg_global_mem_new(tcg_env,
offsetof(CPUMIPSState, bcond), "bcond");
- btarget = tcg_global_mem_new(cpu_env,
+ btarget = tcg_global_mem_new(tcg_env,
offsetof(CPUMIPSState, btarget), "btarget");
- hflags = tcg_global_mem_new_i32(cpu_env,
+ hflags = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUMIPSState, hflags), "hflags");
- fpu_fcr0 = tcg_global_mem_new_i32(cpu_env,
+ fpu_fcr0 = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUMIPSState, active_fpu.fcr0),
"fcr0");
- fpu_fcr31 = tcg_global_mem_new_i32(cpu_env,
+ fpu_fcr31 = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUMIPSState, active_fpu.fcr31),
"fcr31");
- cpu_lladdr = tcg_global_mem_new(cpu_env, offsetof(CPUMIPSState, lladdr),
+ cpu_lladdr = tcg_global_mem_new(tcg_env, offsetof(CPUMIPSState, lladdr),
"lladdr");
- cpu_llval = tcg_global_mem_new(cpu_env, offsetof(CPUMIPSState, llval),
+ cpu_llval = tcg_global_mem_new(tcg_env, offsetof(CPUMIPSState, llval),
"llval");
if (TARGET_LONG_BITS == 32) {
diff --git a/target/mips/tcg/translate.h b/target/mips/tcg/translate.h
index db3dc93..cffcfea 100644
--- a/target/mips/tcg/translate.h
+++ b/target/mips/tcg/translate.h
@@ -123,15 +123,15 @@
};
#define gen_helper_0e1i(name, arg1, arg2) do { \
- gen_helper_##name(cpu_env, arg1, tcg_constant_i32(arg2)); \
+ gen_helper_##name(tcg_env, arg1, tcg_constant_i32(arg2)); \
} while (0)
#define gen_helper_1e0i(name, ret, arg1) do { \
- gen_helper_##name(ret, cpu_env, tcg_constant_i32(arg1)); \
+ gen_helper_##name(ret, tcg_env, tcg_constant_i32(arg1)); \
} while (0)
#define gen_helper_0e2i(name, arg1, arg2, arg3) do { \
- gen_helper_##name(cpu_env, arg1, arg2, tcg_constant_i32(arg3));\
+ gen_helper_##name(tcg_env, arg1, arg2, tcg_constant_i32(arg3));\
} while (0)
void generate_exception(DisasContext *ctx, int excp);
diff --git a/target/mips/tcg/vr54xx_translate.c b/target/mips/tcg/vr54xx_translate.c
index 2c1f6cc..c877ede 100644
--- a/target/mips/tcg/vr54xx_translate.c
+++ b/target/mips/tcg/vr54xx_translate.c
@@ -43,7 +43,7 @@
gen_load_gpr(t0, a->rs);
gen_load_gpr(t1, a->rt);
- gen_helper_mult_acc(t0, cpu_env, t0, t1);
+ gen_helper_mult_acc(t0, tcg_env, t0, t1);
gen_store_gpr(t0, a->rd);
return true;
diff --git a/target/nios2/cpu.c b/target/nios2/cpu.c
index bc5cbf8..15e499f 100644
--- a/target/nios2/cpu.c
+++ b/target/nios2/cpu.c
@@ -113,11 +113,9 @@
static void nios2_cpu_initfn(Object *obj)
{
+#if !defined(CONFIG_USER_ONLY)
Nios2CPU *cpu = NIOS2_CPU(obj);
- cpu_set_cpustate_pointers(cpu);
-
-#if !defined(CONFIG_USER_ONLY)
mmu_init(&cpu->env);
#endif
}
@@ -400,6 +398,7 @@
.name = TYPE_NIOS2_CPU,
.parent = TYPE_CPU,
.instance_size = sizeof(Nios2CPU),
+ .instance_align = __alignof(Nios2CPU),
.instance_init = nios2_cpu_initfn,
.class_size = sizeof(Nios2CPUClass),
.class_init = nios2_cpu_class_init,
diff --git a/target/nios2/cpu.h b/target/nios2/cpu.h
index 477a316..70b6377 100644
--- a/target/nios2/cpu.h
+++ b/target/nios2/cpu.h
@@ -218,7 +218,6 @@
CPUState parent_obj;
/*< public >*/
- CPUNegativeOffsetState neg;
CPUNios2State env;
bool diverr_present;
diff --git a/target/nios2/translate.c b/target/nios2/translate.c
index dfc546d..e806623 100644
--- a/target/nios2/translate.c
+++ b/target/nios2/translate.c
@@ -209,7 +209,7 @@
{
/* Note that PC is advanced for all hardware exceptions. */
tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
- gen_helper_raise_exception(cpu_env, tcg_constant_i32(index));
+ gen_helper_raise_exception(tcg_env, tcg_constant_i32(index));
dc->base.is_jmp = DISAS_NORETURN;
}
@@ -244,7 +244,7 @@
tcg_gen_lookup_and_goto_ptr();
gen_set_label(l);
- tcg_gen_st_tl(dest, cpu_env, offsetof(CPUNios2State, ctrl[CR_BADADDR]));
+ tcg_gen_st_tl(dest, tcg_env, offsetof(CPUNios2State, ctrl[CR_BADADDR]));
t_gen_helper_raise_exception(dc, EXCP_UNALIGND);
dc->base.is_jmp = DISAS_NORETURN;
@@ -414,7 +414,7 @@
#else
I_TYPE(instr, code);
TCGv dest = dest_gpr(dc, instr.b);
- gen_helper_rdprs(dest, cpu_env, tcg_constant_i32(instr.a));
+ gen_helper_rdprs(dest, tcg_env, tcg_constant_i32(instr.a));
tcg_gen_addi_tl(dest, dest, instr.imm16.s);
#endif
}
@@ -508,10 +508,10 @@
#else
if (FIELD_EX32(dc->tb_flags, TBFLAGS, CRS0)) {
TCGv tmp = tcg_temp_new();
- tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPUNios2State, ctrl[CR_ESTATUS]));
- gen_helper_eret(cpu_env, tmp, load_gpr(dc, R_EA));
+ tcg_gen_ld_tl(tmp, tcg_env, offsetof(CPUNios2State, ctrl[CR_ESTATUS]));
+ gen_helper_eret(tcg_env, tmp, load_gpr(dc, R_EA));
} else {
- gen_helper_eret(cpu_env, load_gpr(dc, R_SSTATUS), load_gpr(dc, R_EA));
+ gen_helper_eret(tcg_env, load_gpr(dc, R_SSTATUS), load_gpr(dc, R_EA));
}
dc->base.is_jmp = DISAS_NORETURN;
#endif
@@ -537,8 +537,8 @@
g_assert_not_reached();
#else
TCGv tmp = tcg_temp_new();
- tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPUNios2State, ctrl[CR_BSTATUS]));
- gen_helper_eret(cpu_env, tmp, load_gpr(dc, R_BA));
+ tcg_gen_ld_tl(tmp, tcg_env, offsetof(CPUNios2State, ctrl[CR_BSTATUS]));
+ gen_helper_eret(tcg_env, tmp, load_gpr(dc, R_BA));
dc->base.is_jmp = DISAS_NORETURN;
#endif
@@ -602,12 +602,12 @@
*/
t1 = tcg_temp_new();
t2 = tcg_temp_new();
- tcg_gen_ld_tl(t1, cpu_env, offsetof(CPUNios2State, ctrl[CR_IPENDING]));
- tcg_gen_ld_tl(t2, cpu_env, offsetof(CPUNios2State, ctrl[CR_IENABLE]));
+ tcg_gen_ld_tl(t1, tcg_env, offsetof(CPUNios2State, ctrl[CR_IPENDING]));
+ tcg_gen_ld_tl(t2, tcg_env, offsetof(CPUNios2State, ctrl[CR_IENABLE]));
tcg_gen_and_tl(dest, t1, t2);
break;
default:
- tcg_gen_ld_tl(dest, cpu_env,
+ tcg_gen_ld_tl(dest, tcg_env,
offsetof(CPUNios2State, ctrl[instr.imm5]));
break;
}
@@ -637,13 +637,13 @@
switch (instr.imm5) {
case CR_PTEADDR:
- gen_helper_mmu_write_pteaddr(cpu_env, v);
+ gen_helper_mmu_write_pteaddr(tcg_env, v);
break;
case CR_TLBACC:
- gen_helper_mmu_write_tlbacc(cpu_env, v);
+ gen_helper_mmu_write_tlbacc(tcg_env, v);
break;
case CR_TLBMISC:
- gen_helper_mmu_write_tlbmisc(cpu_env, v);
+ gen_helper_mmu_write_tlbmisc(tcg_env, v);
break;
case CR_STATUS:
case CR_IENABLE:
@@ -653,7 +653,7 @@
default:
if (wr == -1) {
/* The register is entirely writable. */
- tcg_gen_st_tl(v, cpu_env, ofs);
+ tcg_gen_st_tl(v, tcg_env, ofs);
} else {
/*
* The register is partially read-only or reserved:
@@ -665,12 +665,12 @@
if (ro != 0) {
TCGv o = tcg_temp_new();
- tcg_gen_ld_tl(o, cpu_env, ofs);
+ tcg_gen_ld_tl(o, tcg_env, ofs);
tcg_gen_andi_tl(o, o, ro);
tcg_gen_or_tl(n, n, o);
}
- tcg_gen_st_tl(n, cpu_env, ofs);
+ tcg_gen_st_tl(n, tcg_env, ofs);
}
break;
}
@@ -692,7 +692,7 @@
g_assert_not_reached();
#else
R_TYPE(instr, code);
- gen_helper_wrprs(cpu_env, tcg_constant_i32(instr.c),
+ gen_helper_wrprs(tcg_env, tcg_constant_i32(instr.c),
load_gpr(dc, instr.a));
/*
* The expected write to PRS[r0] is 0, from CRS[r0].
@@ -789,14 +789,14 @@
static void divs(DisasContext *dc, uint32_t code, uint32_t flags)
{
R_TYPE(instr, (code));
- gen_helper_divs(dest_gpr(dc, instr.c), cpu_env,
+ gen_helper_divs(dest_gpr(dc, instr.c), tcg_env,
load_gpr(dc, instr.a), load_gpr(dc, instr.b));
}
static void divu(DisasContext *dc, uint32_t code, uint32_t flags)
{
R_TYPE(instr, (code));
- gen_helper_divu(dest_gpr(dc, instr.c), cpu_env,
+ gen_helper_divu(dest_gpr(dc, instr.c), tcg_env,
load_gpr(dc, instr.a), load_gpr(dc, instr.b));
}
@@ -809,7 +809,7 @@
* things easier for cpu_loop if we pop this into env->error_code.
*/
R_TYPE(instr, code);
- tcg_gen_st_i32(tcg_constant_i32(instr.imm5), cpu_env,
+ tcg_gen_st_i32(tcg_constant_i32(instr.imm5), tcg_env,
offsetof(CPUNios2State, error_code));
#endif
t_gen_helper_raise_exception(dc, EXCP_TRAP);
@@ -944,7 +944,7 @@
static void nios2_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
- CPUNios2State *env = cs->env_ptr;
+ CPUNios2State *env = cpu_env(cs);
Nios2CPU *cpu = env_archcpu(env);
int page_insns;
@@ -970,7 +970,7 @@
static void nios2_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
- CPUNios2State *env = cs->env_ptr;
+ CPUNios2State *env = cpu_env(cs);
const Nios2Instruction *instr;
uint32_t code, pc;
uint8_t op;
@@ -1084,7 +1084,7 @@
void nios2_tcg_init(void)
{
#ifndef CONFIG_USER_ONLY
- TCGv_ptr crs = tcg_global_mem_new_ptr(cpu_env,
+ TCGv_ptr crs = tcg_global_mem_new_ptr(tcg_env,
offsetof(CPUNios2State, regs), "crs");
for (int i = 0; i < NUM_GP_REGS; i++) {
@@ -1097,12 +1097,12 @@
#endif
for (int i = 0; i < NUM_GP_REGS; i++) {
- cpu_R[i] = tcg_global_mem_new(cpu_env, offsetof_regs0(i),
+ cpu_R[i] = tcg_global_mem_new(tcg_env, offsetof_regs0(i),
gr_regnames[i]);
}
#undef offsetof_regs0
- cpu_pc = tcg_global_mem_new(cpu_env,
+ cpu_pc = tcg_global_mem_new(tcg_env,
offsetof(CPUNios2State, pc), "pc");
}
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
index 61d748c..f5a3d52 100644
--- a/target/openrisc/cpu.c
+++ b/target/openrisc/cpu.c
@@ -149,12 +149,8 @@
static void openrisc_cpu_initfn(Object *obj)
{
- OpenRISCCPU *cpu = OPENRISC_CPU(obj);
-
- cpu_set_cpustate_pointers(cpu);
-
#ifndef CONFIG_USER_ONLY
- qdev_init_gpio_in_named(DEVICE(cpu), openrisc_cpu_set_irq, "IRQ", NR_IRQS);
+ qdev_init_gpio_in_named(DEVICE(obj), openrisc_cpu_set_irq, "IRQ", NR_IRQS);
#endif
}
@@ -314,6 +310,7 @@
.name = TYPE_OPENRISC_CPU,
.parent = TYPE_CPU,
.instance_size = sizeof(OpenRISCCPU),
+ .instance_align = __alignof(OpenRISCCPU),
.instance_init = openrisc_cpu_initfn,
.abstract = true,
.class_size = sizeof(OpenRISCCPUClass),
diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h
index ce4d605..334997e 100644
--- a/target/openrisc/cpu.h
+++ b/target/openrisc/cpu.h
@@ -305,7 +305,6 @@
CPUState parent_obj;
/*< public >*/
- CPUNegativeOffsetState neg;
CPUOpenRISCState env;
};
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
index d657584..ecff441 100644
--- a/target/openrisc/translate.c
+++ b/target/openrisc/translate.c
@@ -95,37 +95,37 @@
};
int i;
- cpu_sr = tcg_global_mem_new(cpu_env,
+ cpu_sr = tcg_global_mem_new(tcg_env,
offsetof(CPUOpenRISCState, sr), "sr");
- cpu_dflag = tcg_global_mem_new_i32(cpu_env,
+ cpu_dflag = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUOpenRISCState, dflag),
"dflag");
- cpu_pc = tcg_global_mem_new(cpu_env,
+ cpu_pc = tcg_global_mem_new(tcg_env,
offsetof(CPUOpenRISCState, pc), "pc");
- cpu_ppc = tcg_global_mem_new(cpu_env,
+ cpu_ppc = tcg_global_mem_new(tcg_env,
offsetof(CPUOpenRISCState, ppc), "ppc");
- jmp_pc = tcg_global_mem_new(cpu_env,
+ jmp_pc = tcg_global_mem_new(tcg_env,
offsetof(CPUOpenRISCState, jmp_pc), "jmp_pc");
- cpu_sr_f = tcg_global_mem_new(cpu_env,
+ cpu_sr_f = tcg_global_mem_new(tcg_env,
offsetof(CPUOpenRISCState, sr_f), "sr_f");
- cpu_sr_cy = tcg_global_mem_new(cpu_env,
+ cpu_sr_cy = tcg_global_mem_new(tcg_env,
offsetof(CPUOpenRISCState, sr_cy), "sr_cy");
- cpu_sr_ov = tcg_global_mem_new(cpu_env,
+ cpu_sr_ov = tcg_global_mem_new(tcg_env,
offsetof(CPUOpenRISCState, sr_ov), "sr_ov");
- cpu_lock_addr = tcg_global_mem_new(cpu_env,
+ cpu_lock_addr = tcg_global_mem_new(tcg_env,
offsetof(CPUOpenRISCState, lock_addr),
"lock_addr");
- cpu_lock_value = tcg_global_mem_new(cpu_env,
+ cpu_lock_value = tcg_global_mem_new(tcg_env,
offsetof(CPUOpenRISCState, lock_value),
"lock_value");
- fpcsr = tcg_global_mem_new_i32(cpu_env,
+ fpcsr = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUOpenRISCState, fpcsr),
"fpcsr");
- cpu_mac = tcg_global_mem_new_i64(cpu_env,
+ cpu_mac = tcg_global_mem_new_i64(tcg_env,
offsetof(CPUOpenRISCState, mac),
"mac");
for (i = 0; i < 32; i++) {
- cpu_regs[i] = tcg_global_mem_new(cpu_env,
+ cpu_regs[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUOpenRISCState,
shadow_gpr[0][i]),
regnames[i]);
@@ -134,7 +134,7 @@
static void gen_exception(DisasContext *dc, unsigned int excp)
{
- gen_helper_exception(cpu_env, tcg_constant_i32(excp));
+ gen_helper_exception(tcg_env, tcg_constant_i32(excp));
}
static void gen_illegal_exception(DisasContext *dc)
@@ -182,21 +182,21 @@
static void gen_ove_cy(DisasContext *dc)
{
if (dc->tb_flags & SR_OVE) {
- gen_helper_ove_cy(cpu_env);
+ gen_helper_ove_cy(tcg_env);
}
}
static void gen_ove_ov(DisasContext *dc)
{
if (dc->tb_flags & SR_OVE) {
- gen_helper_ove_ov(cpu_env);
+ gen_helper_ove_ov(tcg_env);
}
}
static void gen_ove_cyov(DisasContext *dc)
{
if (dc->tb_flags & SR_OVE) {
- gen_helper_ove_cyov(cpu_env);
+ gen_helper_ove_cyov(tcg_env);
}
}
@@ -835,7 +835,7 @@
}
tcg_gen_ori_tl(spr, cpu_R(dc, a->a), a->k);
- gen_helper_mfspr(cpu_R(dc, a->d), cpu_env, cpu_R(dc, a->d), spr);
+ gen_helper_mfspr(cpu_R(dc, a->d), tcg_env, cpu_R(dc, a->d), spr);
return true;
}
@@ -860,7 +860,7 @@
dc->base.is_jmp = DISAS_EXIT;
tcg_gen_ori_tl(spr, cpu_R(dc, a->a), a->k);
- gen_helper_mtspr(cpu_env, spr, cpu_R(dc, a->b));
+ gen_helper_mtspr(tcg_env, spr, cpu_R(dc, a->b));
return true;
}
@@ -1102,7 +1102,7 @@
if (is_user(dc)) {
gen_illegal_exception(dc);
} else {
- gen_helper_rfe(cpu_env);
+ gen_helper_rfe(tcg_env);
dc->base.is_jmp = DISAS_EXIT;
}
return true;
@@ -1115,8 +1115,8 @@
return false;
}
check_r0_write(dc, a->d);
- fn(cpu_R(dc, a->d), cpu_env, cpu_R(dc, a->a));
- gen_helper_update_fpcsr(cpu_env);
+ fn(cpu_R(dc, a->d), tcg_env, cpu_R(dc, a->a));
+ gen_helper_update_fpcsr(tcg_env);
return true;
}
@@ -1127,8 +1127,8 @@
return false;
}
check_r0_write(dc, a->d);
- fn(cpu_R(dc, a->d), cpu_env, cpu_R(dc, a->a), cpu_R(dc, a->b));
- gen_helper_update_fpcsr(cpu_env);
+ fn(cpu_R(dc, a->d), tcg_env, cpu_R(dc, a->a), cpu_R(dc, a->b));
+ gen_helper_update_fpcsr(tcg_env);
return true;
}
@@ -1140,14 +1140,14 @@
return false;
}
if (swap) {
- fn(cpu_sr_f, cpu_env, cpu_R(dc, a->b), cpu_R(dc, a->a));
+ fn(cpu_sr_f, tcg_env, cpu_R(dc, a->b), cpu_R(dc, a->a));
} else {
- fn(cpu_sr_f, cpu_env, cpu_R(dc, a->a), cpu_R(dc, a->b));
+ fn(cpu_sr_f, tcg_env, cpu_R(dc, a->a), cpu_R(dc, a->b));
}
if (inv) {
tcg_gen_xori_tl(cpu_sr_f, cpu_sr_f, 1);
}
- gen_helper_update_fpcsr(cpu_env);
+ gen_helper_update_fpcsr(tcg_env);
return true;
}
@@ -1193,9 +1193,9 @@
return false;
}
check_r0_write(dc, a->d);
- gen_helper_float_madd_s(cpu_R(dc, a->d), cpu_env, cpu_R(dc, a->d),
+ gen_helper_float_madd_s(cpu_R(dc, a->d), tcg_env, cpu_R(dc, a->d),
cpu_R(dc, a->a), cpu_R(dc, a->b));
- gen_helper_update_fpcsr(cpu_env);
+ gen_helper_update_fpcsr(tcg_env);
return true;
}
@@ -1309,10 +1309,10 @@
t1 = tcg_temp_new_i64();
load_pair(dc, t0, a->a, a->ap);
load_pair(dc, t1, a->b, a->bp);
- fn(t0, cpu_env, t0, t1);
+ fn(t0, tcg_env, t0, t1);
save_pair(dc, t0, a->d, a->dp);
- gen_helper_update_fpcsr(cpu_env);
+ gen_helper_update_fpcsr(tcg_env);
return true;
}
@@ -1330,10 +1330,10 @@
t0 = tcg_temp_new_i64();
load_pair(dc, t0, a->a, a->ap);
- fn(t0, cpu_env, t0);
+ fn(t0, tcg_env, t0);
save_pair(dc, t0, a->d, a->dp);
- gen_helper_update_fpcsr(cpu_env);
+ gen_helper_update_fpcsr(tcg_env);
return true;
}
@@ -1354,15 +1354,15 @@
load_pair(dc, t0, a->a, a->ap);
load_pair(dc, t1, a->b, a->bp);
if (swap) {
- fn(cpu_sr_f, cpu_env, t1, t0);
+ fn(cpu_sr_f, tcg_env, t1, t0);
} else {
- fn(cpu_sr_f, cpu_env, t0, t1);
+ fn(cpu_sr_f, tcg_env, t0, t1);
}
if (inv) {
tcg_gen_xori_tl(cpu_sr_f, cpu_sr_f, 1);
}
- gen_helper_update_fpcsr(cpu_env);
+ gen_helper_update_fpcsr(tcg_env);
return true;
}
@@ -1412,10 +1412,10 @@
check_r0_write(dc, a->d);
t0 = tcg_temp_new_i64();
- gen_helper_stod(t0, cpu_env, cpu_R(dc, a->a));
+ gen_helper_stod(t0, tcg_env, cpu_R(dc, a->a));
save_pair(dc, t0, a->d, a->dp);
- gen_helper_update_fpcsr(cpu_env);
+ gen_helper_update_fpcsr(tcg_env);
return true;
}
@@ -1431,9 +1431,9 @@
t0 = tcg_temp_new_i64();
load_pair(dc, t0, a->a, a->ap);
- gen_helper_dtos(cpu_R(dc, a->d), cpu_env, t0);
+ gen_helper_dtos(cpu_R(dc, a->d), tcg_env, t0);
- gen_helper_update_fpcsr(cpu_env);
+ gen_helper_update_fpcsr(tcg_env);
return true;
}
@@ -1455,10 +1455,10 @@
load_pair(dc, t0, a->d, a->dp);
load_pair(dc, t1, a->a, a->ap);
load_pair(dc, t2, a->b, a->bp);
- gen_helper_float_madd_d(t0, cpu_env, t0, t1, t2);
+ gen_helper_float_madd_d(t0, tcg_env, t0, t1, t2);
save_pair(dc, t0, a->d, a->dp);
- gen_helper_update_fpcsr(cpu_env);
+ gen_helper_update_fpcsr(tcg_env);
return true;
}
@@ -1525,7 +1525,7 @@
static void openrisc_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
{
DisasContext *dc = container_of(dcb, DisasContext, base);
- CPUOpenRISCState *env = cs->env_ptr;
+ CPUOpenRISCState *env = cpu_env(cs);
int bound;
dc->mem_idx = cpu_mmu_index(env, false);
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
index d703a5f..30392eb 100644
--- a/target/ppc/cpu.h
+++ b/target/ppc/cpu.h
@@ -1317,7 +1317,6 @@
CPUState parent_obj;
/*< public >*/
- CPUNegativeOffsetState neg;
CPUPPCState env;
int vcpu_id;
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
index c62bf0e..40fe14a 100644
--- a/target/ppc/cpu_init.c
+++ b/target/ppc/cpu_init.c
@@ -7246,7 +7246,6 @@
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
CPUPPCState *env = &cpu->env;
- cpu_set_cpustate_pointers(cpu);
cpu->vcpu_id = UNASSIGNED_CPU_INDEX;
env->msr_mask = pcc->msr_mask;
diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c
index 99099cb..7926114 100644
--- a/target/ppc/excp_helper.c
+++ b/target/ppc/excp_helper.c
@@ -3189,7 +3189,7 @@
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
{
- CPUPPCState *env = cs->env_ptr;
+ CPUPPCState *env = cpu_env(cs);
uint32_t insn;
/* Restore state and reload the insn we executed, for filling in DSISR. */
@@ -3220,7 +3220,7 @@
int mmu_idx, MemTxAttrs attrs,
MemTxResult response, uintptr_t retaddr)
{
- CPUPPCState *env = cs->env_ptr;
+ CPUPPCState *env = cpu_env(cs);
switch (env->excp_model) {
#if defined(TARGET_PPC64)
@@ -3264,7 +3264,7 @@
void ppc_cpu_debug_excp_handler(CPUState *cs)
{
#if defined(TARGET_PPC64)
- CPUPPCState *env = cs->env_ptr;
+ CPUPPCState *env = cpu_env(cs);
if (env->insns_flags2 & PPC2_ISA207S) {
if (cs->watchpoint_hit) {
@@ -3286,7 +3286,7 @@
bool ppc_cpu_debug_check_breakpoint(CPUState *cs)
{
#if defined(TARGET_PPC64)
- CPUPPCState *env = cs->env_ptr;
+ CPUPPCState *env = cpu_env(cs);
if (env->insns_flags2 & PPC2_ISA207S) {
target_ulong priv;
@@ -3313,7 +3313,7 @@
bool ppc_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
{
#if defined(TARGET_PPC64)
- CPUPPCState *env = cs->env_ptr;
+ CPUPPCState *env = cpu_env(cs);
if (env->insns_flags2 & PPC2_ISA207S) {
if (wp == env->dawr0_watchpoint) {
diff --git a/target/ppc/power8-pmu-regs.c.inc b/target/ppc/power8-pmu-regs.c.inc
index 75513db..4956a8b 100644
--- a/target/ppc/power8-pmu-regs.c.inc
+++ b/target/ppc/power8-pmu-regs.c.inc
@@ -106,7 +106,7 @@
* translator_io_start() beforehand.
*/
translator_io_start(&ctx->base);
- gen_helper_store_mmcr0(cpu_env, val);
+ gen_helper_store_mmcr0(tcg_env, val);
/*
* End the translation block because MMCR0 writes can change
@@ -180,7 +180,7 @@
TCGv_i32 t_sprn = tcg_constant_i32(sprn);
translator_io_start(&ctx->base);
- gen_helper_read_pmc(cpu_gpr[gprn], cpu_env, t_sprn);
+ gen_helper_read_pmc(cpu_gpr[gprn], tcg_env, t_sprn);
}
void spr_read_PMC14_ureg(DisasContext *ctx, int gprn, int sprn)
@@ -213,7 +213,7 @@
TCGv_i32 t_sprn = tcg_constant_i32(sprn);
translator_io_start(&ctx->base);
- gen_helper_store_pmc(cpu_env, t_sprn, cpu_gpr[gprn]);
+ gen_helper_store_pmc(tcg_env, t_sprn, cpu_gpr[gprn]);
}
void spr_write_PMC14_ureg(DisasContext *ctx, int sprn, int gprn)
@@ -249,7 +249,7 @@
void spr_write_MMCR1(DisasContext *ctx, int sprn, int gprn)
{
translator_io_start(&ctx->base);
- gen_helper_store_mmcr1(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_mmcr1(tcg_env, cpu_gpr[gprn]);
}
#else
void spr_read_MMCR0_ureg(DisasContext *ctx, int gprn, int sprn)
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
index 5c28afb..329da4d 100644
--- a/target/ppc/translate.c
+++ b/target/ppc/translate.c
@@ -92,7 +92,7 @@
for (i = 0; i < 8; i++) {
snprintf(p, cpu_reg_names_size, "crf%d", i);
- cpu_crf[i] = tcg_global_mem_new_i32(cpu_env,
+ cpu_crf[i] = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUPPCState, crf[i]), p);
p += 5;
cpu_reg_names_size -= 5;
@@ -100,67 +100,67 @@
for (i = 0; i < 32; i++) {
snprintf(p, cpu_reg_names_size, "r%d", i);
- cpu_gpr[i] = tcg_global_mem_new(cpu_env,
+ cpu_gpr[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, gpr[i]), p);
p += (i < 10) ? 3 : 4;
cpu_reg_names_size -= (i < 10) ? 3 : 4;
snprintf(p, cpu_reg_names_size, "r%dH", i);
- cpu_gprh[i] = tcg_global_mem_new(cpu_env,
+ cpu_gprh[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, gprh[i]), p);
p += (i < 10) ? 4 : 5;
cpu_reg_names_size -= (i < 10) ? 4 : 5;
}
- cpu_nip = tcg_global_mem_new(cpu_env,
+ cpu_nip = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, nip), "nip");
- cpu_msr = tcg_global_mem_new(cpu_env,
+ cpu_msr = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, msr), "msr");
- cpu_ctr = tcg_global_mem_new(cpu_env,
+ cpu_ctr = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, ctr), "ctr");
- cpu_lr = tcg_global_mem_new(cpu_env,
+ cpu_lr = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, lr), "lr");
#if defined(TARGET_PPC64)
- cpu_cfar = tcg_global_mem_new(cpu_env,
+ cpu_cfar = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, cfar), "cfar");
#endif
- cpu_xer = tcg_global_mem_new(cpu_env,
+ cpu_xer = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, xer), "xer");
- cpu_so = tcg_global_mem_new(cpu_env,
+ cpu_so = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, so), "SO");
- cpu_ov = tcg_global_mem_new(cpu_env,
+ cpu_ov = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, ov), "OV");
- cpu_ca = tcg_global_mem_new(cpu_env,
+ cpu_ca = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, ca), "CA");
- cpu_ov32 = tcg_global_mem_new(cpu_env,
+ cpu_ov32 = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, ov32), "OV32");
- cpu_ca32 = tcg_global_mem_new(cpu_env,
+ cpu_ca32 = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, ca32), "CA32");
- cpu_reserve = tcg_global_mem_new(cpu_env,
+ cpu_reserve = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, reserve_addr),
"reserve_addr");
- cpu_reserve_length = tcg_global_mem_new(cpu_env,
+ cpu_reserve_length = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState,
reserve_length),
"reserve_length");
- cpu_reserve_val = tcg_global_mem_new(cpu_env,
+ cpu_reserve_val = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, reserve_val),
"reserve_val");
#if defined(TARGET_PPC64)
- cpu_reserve_val2 = tcg_global_mem_new(cpu_env,
+ cpu_reserve_val2 = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, reserve_val2),
"reserve_val2");
#endif
- cpu_fpscr = tcg_global_mem_new(cpu_env,
+ cpu_fpscr = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, fpscr), "fpscr");
- cpu_access_type = tcg_global_mem_new_i32(cpu_env,
+ cpu_access_type = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUPPCState, access_type),
"access_type");
}
@@ -240,7 +240,7 @@
{
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
/* Restart with exclusive lock. */
- gen_helper_exit_atomic(cpu_env);
+ gen_helper_exit_atomic(tcg_env);
ctx->base.is_jmp = DISAS_NORETURN;
return false;
}
@@ -261,12 +261,12 @@
/* SPR load/store helpers */
static inline void gen_load_spr(TCGv t, int reg)
{
- tcg_gen_ld_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
+ tcg_gen_ld_tl(t, tcg_env, offsetof(CPUPPCState, spr[reg]));
}
static inline void gen_store_spr(int reg, TCGv t)
{
- tcg_gen_st_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
+ tcg_gen_st_tl(t, tcg_env, offsetof(CPUPPCState, spr[reg]));
}
static inline void gen_set_access_type(DisasContext *ctx, int access_type)
@@ -296,7 +296,7 @@
gen_update_nip(ctx, ctx->cia);
t0 = tcg_constant_i32(excp);
t1 = tcg_constant_i32(error);
- gen_helper_raise_exception_err(cpu_env, t0, t1);
+ gen_helper_raise_exception_err(tcg_env, t0, t1);
ctx->base.is_jmp = DISAS_NORETURN;
}
@@ -310,7 +310,7 @@
*/
gen_update_nip(ctx, ctx->cia);
t0 = tcg_constant_i32(excp);
- gen_helper_raise_exception(cpu_env, t0);
+ gen_helper_raise_exception(tcg_env, t0);
ctx->base.is_jmp = DISAS_NORETURN;
}
@@ -321,7 +321,7 @@
gen_update_nip(ctx, nip);
t0 = tcg_constant_i32(excp);
- gen_helper_raise_exception(cpu_env, t0);
+ gen_helper_raise_exception(tcg_env, t0);
ctx->base.is_jmp = DISAS_NORETURN;
}
@@ -329,7 +329,7 @@
static void gen_ppc_maybe_interrupt(DisasContext *ctx)
{
translator_io_start(&ctx->base);
- gen_helper_ppc_maybe_interrupt(cpu_env);
+ gen_helper_ppc_maybe_interrupt(tcg_env);
}
#endif
@@ -355,14 +355,14 @@
gen_load_spr(t0, SPR_BOOKE_DBSR);
tcg_gen_ori_tl(t0, t0, dbsr);
gen_store_spr(SPR_BOOKE_DBSR, t0);
- gen_helper_raise_exception(cpu_env,
+ gen_helper_raise_exception(tcg_env,
tcg_constant_i32(POWERPC_EXCP_DEBUG));
ctx->base.is_jmp = DISAS_NORETURN;
} else {
if (!rfi_type) { /* BookS does not single step rfi type instructions */
TCGv t0 = tcg_temp_new();
tcg_gen_movi_tl(t0, ctx->cia);
- gen_helper_book3s_trace(cpu_env, t0);
+ gen_helper_book3s_trace(tcg_env, t0);
ctx->base.is_jmp = DISAS_NORETURN;
}
}
@@ -407,7 +407,7 @@
{
#ifdef PPC_DUMP_SPR_ACCESSES
TCGv_i32 t0 = tcg_constant_i32(sprn);
- gen_helper_load_dump_spr(cpu_env, t0);
+ gen_helper_load_dump_spr(tcg_env, t0);
#endif
}
@@ -421,7 +421,7 @@
{
#ifdef PPC_DUMP_SPR_ACCESSES
TCGv_i32 t0 = tcg_constant_i32(sprn);
- gen_helper_store_dump_spr(cpu_env, t0);
+ gen_helper_store_dump_spr(tcg_env, t0);
#endif
}
@@ -454,7 +454,7 @@
return;
}
- gen_helper_spr_core_write_generic(cpu_env, tcg_constant_i32(sprn),
+ gen_helper_spr_core_write_generic(tcg_env, tcg_constant_i32(sprn),
cpu_gpr[gprn]);
spr_store_dump_spr(sprn);
}
@@ -482,7 +482,7 @@
return;
}
- gen_helper_spr_write_CTRL(cpu_env, tcg_constant_i32(sprn),
+ gen_helper_spr_write_CTRL(tcg_env, tcg_constant_i32(sprn),
cpu_gpr[gprn]);
out:
spr_store_dump_spr(sprn);
@@ -578,20 +578,20 @@
void spr_write_ciabr(DisasContext *ctx, int sprn, int gprn)
{
translator_io_start(&ctx->base);
- gen_helper_store_ciabr(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_ciabr(tcg_env, cpu_gpr[gprn]);
}
/* Watchpoint */
void spr_write_dawr0(DisasContext *ctx, int sprn, int gprn)
{
translator_io_start(&ctx->base);
- gen_helper_store_dawr0(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_dawr0(tcg_env, cpu_gpr[gprn]);
}
void spr_write_dawrx0(DisasContext *ctx, int sprn, int gprn)
{
translator_io_start(&ctx->base);
- gen_helper_store_dawrx0(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_dawrx0(tcg_env, cpu_gpr[gprn]);
}
#endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */
@@ -630,13 +630,13 @@
void spr_read_decr(DisasContext *ctx, int gprn, int sprn)
{
translator_io_start(&ctx->base);
- gen_helper_load_decr(cpu_gpr[gprn], cpu_env);
+ gen_helper_load_decr(cpu_gpr[gprn], tcg_env);
}
void spr_write_decr(DisasContext *ctx, int sprn, int gprn)
{
translator_io_start(&ctx->base);
- gen_helper_store_decr(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_decr(tcg_env, cpu_gpr[gprn]);
}
#endif
@@ -645,90 +645,90 @@
void spr_read_tbl(DisasContext *ctx, int gprn, int sprn)
{
translator_io_start(&ctx->base);
- gen_helper_load_tbl(cpu_gpr[gprn], cpu_env);
+ gen_helper_load_tbl(cpu_gpr[gprn], tcg_env);
}
void spr_read_tbu(DisasContext *ctx, int gprn, int sprn)
{
translator_io_start(&ctx->base);
- gen_helper_load_tbu(cpu_gpr[gprn], cpu_env);
+ gen_helper_load_tbu(cpu_gpr[gprn], tcg_env);
}
void spr_read_atbl(DisasContext *ctx, int gprn, int sprn)
{
- gen_helper_load_atbl(cpu_gpr[gprn], cpu_env);
+ gen_helper_load_atbl(cpu_gpr[gprn], tcg_env);
}
void spr_read_atbu(DisasContext *ctx, int gprn, int sprn)
{
- gen_helper_load_atbu(cpu_gpr[gprn], cpu_env);
+ gen_helper_load_atbu(cpu_gpr[gprn], tcg_env);
}
#if !defined(CONFIG_USER_ONLY)
void spr_write_tbl(DisasContext *ctx, int sprn, int gprn)
{
translator_io_start(&ctx->base);
- gen_helper_store_tbl(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_tbl(tcg_env, cpu_gpr[gprn]);
}
void spr_write_tbu(DisasContext *ctx, int sprn, int gprn)
{
translator_io_start(&ctx->base);
- gen_helper_store_tbu(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_tbu(tcg_env, cpu_gpr[gprn]);
}
void spr_write_atbl(DisasContext *ctx, int sprn, int gprn)
{
- gen_helper_store_atbl(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_atbl(tcg_env, cpu_gpr[gprn]);
}
void spr_write_atbu(DisasContext *ctx, int sprn, int gprn)
{
- gen_helper_store_atbu(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_atbu(tcg_env, cpu_gpr[gprn]);
}
#if defined(TARGET_PPC64)
void spr_read_purr(DisasContext *ctx, int gprn, int sprn)
{
translator_io_start(&ctx->base);
- gen_helper_load_purr(cpu_gpr[gprn], cpu_env);
+ gen_helper_load_purr(cpu_gpr[gprn], tcg_env);
}
void spr_write_purr(DisasContext *ctx, int sprn, int gprn)
{
translator_io_start(&ctx->base);
- gen_helper_store_purr(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_purr(tcg_env, cpu_gpr[gprn]);
}
/* HDECR */
void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn)
{
translator_io_start(&ctx->base);
- gen_helper_load_hdecr(cpu_gpr[gprn], cpu_env);
+ gen_helper_load_hdecr(cpu_gpr[gprn], tcg_env);
}
void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn)
{
translator_io_start(&ctx->base);
- gen_helper_store_hdecr(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_hdecr(tcg_env, cpu_gpr[gprn]);
}
void spr_read_vtb(DisasContext *ctx, int gprn, int sprn)
{
translator_io_start(&ctx->base);
- gen_helper_load_vtb(cpu_gpr[gprn], cpu_env);
+ gen_helper_load_vtb(cpu_gpr[gprn], tcg_env);
}
void spr_write_vtb(DisasContext *ctx, int sprn, int gprn)
{
translator_io_start(&ctx->base);
- gen_helper_store_vtb(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_vtb(tcg_env, cpu_gpr[gprn]);
}
void spr_write_tbu40(DisasContext *ctx, int sprn, int gprn)
{
translator_io_start(&ctx->base);
- gen_helper_store_tbu40(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_tbu40(tcg_env, cpu_gpr[gprn]);
}
#endif
@@ -739,14 +739,14 @@
/* IBAT0L...IBAT7L */
void spr_read_ibat(DisasContext *ctx, int gprn, int sprn)
{
- tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
+ tcg_gen_ld_tl(cpu_gpr[gprn], tcg_env,
offsetof(CPUPPCState,
IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2]));
}
void spr_read_ibat_h(DisasContext *ctx, int gprn, int sprn)
{
- tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
+ tcg_gen_ld_tl(cpu_gpr[gprn], tcg_env,
offsetof(CPUPPCState,
IBAT[sprn & 1][((sprn - SPR_IBAT4U) / 2) + 4]));
}
@@ -754,39 +754,39 @@
void spr_write_ibatu(DisasContext *ctx, int sprn, int gprn)
{
TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_IBAT0U) / 2);
- gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]);
+ gen_helper_store_ibatu(tcg_env, t0, cpu_gpr[gprn]);
}
void spr_write_ibatu_h(DisasContext *ctx, int sprn, int gprn)
{
TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_IBAT4U) / 2) + 4);
- gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]);
+ gen_helper_store_ibatu(tcg_env, t0, cpu_gpr[gprn]);
}
void spr_write_ibatl(DisasContext *ctx, int sprn, int gprn)
{
TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_IBAT0L) / 2);
- gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]);
+ gen_helper_store_ibatl(tcg_env, t0, cpu_gpr[gprn]);
}
void spr_write_ibatl_h(DisasContext *ctx, int sprn, int gprn)
{
TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_IBAT4L) / 2) + 4);
- gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]);
+ gen_helper_store_ibatl(tcg_env, t0, cpu_gpr[gprn]);
}
/* DBAT0U...DBAT7U */
/* DBAT0L...DBAT7L */
void spr_read_dbat(DisasContext *ctx, int gprn, int sprn)
{
- tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
+ tcg_gen_ld_tl(cpu_gpr[gprn], tcg_env,
offsetof(CPUPPCState,
DBAT[sprn & 1][(sprn - SPR_DBAT0U) / 2]));
}
void spr_read_dbat_h(DisasContext *ctx, int gprn, int sprn)
{
- tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
+ tcg_gen_ld_tl(cpu_gpr[gprn], tcg_env,
offsetof(CPUPPCState,
DBAT[sprn & 1][((sprn - SPR_DBAT4U) / 2) + 4]));
}
@@ -794,31 +794,31 @@
void spr_write_dbatu(DisasContext *ctx, int sprn, int gprn)
{
TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_DBAT0U) / 2);
- gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]);
+ gen_helper_store_dbatu(tcg_env, t0, cpu_gpr[gprn]);
}
void spr_write_dbatu_h(DisasContext *ctx, int sprn, int gprn)
{
TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_DBAT4U) / 2) + 4);
- gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]);
+ gen_helper_store_dbatu(tcg_env, t0, cpu_gpr[gprn]);
}
void spr_write_dbatl(DisasContext *ctx, int sprn, int gprn)
{
TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_DBAT0L) / 2);
- gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]);
+ gen_helper_store_dbatl(tcg_env, t0, cpu_gpr[gprn]);
}
void spr_write_dbatl_h(DisasContext *ctx, int sprn, int gprn)
{
TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_DBAT4L) / 2) + 4);
- gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]);
+ gen_helper_store_dbatl(tcg_env, t0, cpu_gpr[gprn]);
}
/* SDR1 */
void spr_write_sdr1(DisasContext *ctx, int sprn, int gprn)
{
- gen_helper_store_sdr1(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_sdr1(tcg_env, cpu_gpr[gprn]);
}
#if defined(TARGET_PPC64)
@@ -826,33 +826,33 @@
/* PIDR */
void spr_write_pidr(DisasContext *ctx, int sprn, int gprn)
{
- gen_helper_store_pidr(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_pidr(tcg_env, cpu_gpr[gprn]);
}
void spr_write_lpidr(DisasContext *ctx, int sprn, int gprn)
{
- gen_helper_store_lpidr(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_lpidr(tcg_env, cpu_gpr[gprn]);
}
void spr_read_hior(DisasContext *ctx, int gprn, int sprn)
{
- tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, excp_prefix));
+ tcg_gen_ld_tl(cpu_gpr[gprn], tcg_env, offsetof(CPUPPCState, excp_prefix));
}
void spr_write_hior(DisasContext *ctx, int sprn, int gprn)
{
TCGv t0 = tcg_temp_new();
tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0x3FFFFF00000ULL);
- tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix));
+ tcg_gen_st_tl(t0, tcg_env, offsetof(CPUPPCState, excp_prefix));
}
void spr_write_ptcr(DisasContext *ctx, int sprn, int gprn)
{
- gen_helper_store_ptcr(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_ptcr(tcg_env, cpu_gpr[gprn]);
}
void spr_write_pcr(DisasContext *ctx, int sprn, int gprn)
{
- gen_helper_store_pcr(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_pcr(tcg_env, cpu_gpr[gprn]);
}
/* DPDES */
@@ -862,7 +862,7 @@
return;
}
- gen_helper_load_dpdes(cpu_gpr[gprn], cpu_env);
+ gen_helper_load_dpdes(cpu_gpr[gprn], tcg_env);
}
void spr_write_dpdes(DisasContext *ctx, int sprn, int gprn)
@@ -871,7 +871,7 @@
return;
}
- gen_helper_store_dpdes(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_dpdes(tcg_env, cpu_gpr[gprn]);
}
#endif
#endif
@@ -881,20 +881,20 @@
void spr_read_40x_pit(DisasContext *ctx, int gprn, int sprn)
{
translator_io_start(&ctx->base);
- gen_helper_load_40x_pit(cpu_gpr[gprn], cpu_env);
+ gen_helper_load_40x_pit(cpu_gpr[gprn], tcg_env);
}
void spr_write_40x_pit(DisasContext *ctx, int sprn, int gprn)
{
translator_io_start(&ctx->base);
- gen_helper_store_40x_pit(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_40x_pit(tcg_env, cpu_gpr[gprn]);
}
void spr_write_40x_dbcr0(DisasContext *ctx, int sprn, int gprn)
{
translator_io_start(&ctx->base);
gen_store_spr(sprn, cpu_gpr[gprn]);
- gen_helper_store_40x_dbcr0(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_40x_dbcr0(tcg_env, cpu_gpr[gprn]);
/* We must stop translation as we may have rebooted */
ctx->base.is_jmp = DISAS_EXIT_UPDATE;
}
@@ -902,38 +902,38 @@
void spr_write_40x_sler(DisasContext *ctx, int sprn, int gprn)
{
translator_io_start(&ctx->base);
- gen_helper_store_40x_sler(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_40x_sler(tcg_env, cpu_gpr[gprn]);
}
void spr_write_40x_tcr(DisasContext *ctx, int sprn, int gprn)
{
translator_io_start(&ctx->base);
- gen_helper_store_40x_tcr(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_40x_tcr(tcg_env, cpu_gpr[gprn]);
}
void spr_write_40x_tsr(DisasContext *ctx, int sprn, int gprn)
{
translator_io_start(&ctx->base);
- gen_helper_store_40x_tsr(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_40x_tsr(tcg_env, cpu_gpr[gprn]);
}
void spr_write_40x_pid(DisasContext *ctx, int sprn, int gprn)
{
TCGv t0 = tcg_temp_new();
tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xFF);
- gen_helper_store_40x_pid(cpu_env, t0);
+ gen_helper_store_40x_pid(tcg_env, t0);
}
void spr_write_booke_tcr(DisasContext *ctx, int sprn, int gprn)
{
translator_io_start(&ctx->base);
- gen_helper_store_booke_tcr(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_booke_tcr(tcg_env, cpu_gpr[gprn]);
}
void spr_write_booke_tsr(DisasContext *ctx, int sprn, int gprn)
{
translator_io_start(&ctx->base);
- gen_helper_store_booke_tsr(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_booke_tsr(tcg_env, cpu_gpr[gprn]);
}
#endif
@@ -951,7 +951,7 @@
void spr_read_spefscr(DisasContext *ctx, int gprn, int sprn)
{
TCGv_i32 t0 = tcg_temp_new_i32();
- tcg_gen_ld_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr));
+ tcg_gen_ld_i32(t0, tcg_env, offsetof(CPUPPCState, spe_fscr));
tcg_gen_extu_i32_tl(cpu_gpr[gprn], t0);
}
@@ -959,7 +959,7 @@
{
TCGv_i32 t0 = tcg_temp_new_i32();
tcg_gen_trunc_tl_i32(t0, cpu_gpr[gprn]);
- tcg_gen_st_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr));
+ tcg_gen_st_i32(t0, tcg_env, offsetof(CPUPPCState, spe_fscr));
}
#if !defined(CONFIG_USER_ONLY)
@@ -967,9 +967,9 @@
void spr_write_excp_prefix(DisasContext *ctx, int sprn, int gprn)
{
TCGv t0 = tcg_temp_new();
- tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUPPCState, ivpr_mask));
+ tcg_gen_ld_tl(t0, tcg_env, offsetof(CPUPPCState, ivpr_mask));
tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]);
- tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix));
+ tcg_gen_st_tl(t0, tcg_env, offsetof(CPUPPCState, excp_prefix));
gen_store_spr(sprn, t0);
}
@@ -991,9 +991,9 @@
}
TCGv t0 = tcg_temp_new();
- tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUPPCState, ivor_mask));
+ tcg_gen_ld_tl(t0, tcg_env, offsetof(CPUPPCState, ivor_mask));
tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]);
- tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_vectors[sprn_offs]));
+ tcg_gen_st_tl(t0, tcg_env, offsetof(CPUPPCState, excp_vectors[sprn_offs]));
gen_store_spr(sprn, t0);
}
#endif
@@ -1090,7 +1090,7 @@
#ifndef CONFIG_USER_ONLY
void spr_read_thrm(DisasContext *ctx, int gprn, int sprn)
{
- gen_helper_fixup_thrm(cpu_env);
+ gen_helper_fixup_thrm(tcg_env);
gen_load_spr(cpu_gpr[gprn], sprn);
spr_load_dump_spr(sprn);
}
@@ -1124,23 +1124,23 @@
void spr_write_booke206_mmucsr0(DisasContext *ctx, int sprn, int gprn)
{
- gen_helper_booke206_tlbflush(cpu_env, cpu_gpr[gprn]);
+ gen_helper_booke206_tlbflush(tcg_env, cpu_gpr[gprn]);
}
void spr_write_booke_pid(DisasContext *ctx, int sprn, int gprn)
{
TCGv_i32 t0 = tcg_constant_i32(sprn);
- gen_helper_booke_setpid(cpu_env, t0, cpu_gpr[gprn]);
+ gen_helper_booke_setpid(tcg_env, t0, cpu_gpr[gprn]);
}
void spr_write_eplc(DisasContext *ctx, int sprn, int gprn)
{
- gen_helper_booke_set_eplc(cpu_env, cpu_gpr[gprn]);
+ gen_helper_booke_set_eplc(tcg_env, cpu_gpr[gprn]);
}
void spr_write_epsc(DisasContext *ctx, int sprn, int gprn)
{
- gen_helper_booke_set_epsc(cpu_env, cpu_gpr[gprn]);
+ gen_helper_booke_set_epsc(tcg_env, cpu_gpr[gprn]);
}
#endif
@@ -1175,7 +1175,7 @@
TCGv_i32 t2 = tcg_constant_i32(sprn);
TCGv_i32 t3 = tcg_constant_i32(cause);
- gen_helper_fscr_facility_check(cpu_env, t1, t2, t3);
+ gen_helper_fscr_facility_check(tcg_env, t1, t2, t3);
}
static void gen_msr_facility_check(DisasContext *ctx, int facility_sprn,
@@ -1185,7 +1185,7 @@
TCGv_i32 t2 = tcg_constant_i32(sprn);
TCGv_i32 t3 = tcg_constant_i32(cause);
- gen_helper_msr_facility_check(cpu_env, t1, t2, t3);
+ gen_helper_msr_facility_check(tcg_env, t1, t2, t3);
}
void spr_read_prev_upper32(DisasContext *ctx, int gprn, int sprn)
@@ -1220,18 +1220,18 @@
void spr_read_tfmr(DisasContext *ctx, int gprn, int sprn)
{
- gen_helper_load_tfmr(cpu_gpr[gprn], cpu_env);
+ gen_helper_load_tfmr(cpu_gpr[gprn], tcg_env);
}
void spr_write_tfmr(DisasContext *ctx, int sprn, int gprn)
{
- gen_helper_store_tfmr(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_tfmr(tcg_env, cpu_gpr[gprn]);
}
void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn)
{
translator_io_start(&ctx->base);
- gen_helper_store_lpcr(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_lpcr(tcg_env, cpu_gpr[gprn]);
}
#endif /* !defined(CONFIG_USER_ONLY) */
@@ -1812,7 +1812,7 @@
static void gen_##name(DisasContext *ctx) \
{ \
TCGv_i32 t0 = tcg_constant_i32(compute_ov); \
- gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], cpu_env, \
+ gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], tcg_env, \
cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \
if (unlikely(Rc(ctx->opcode) != 0)) { \
gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); \
@@ -2317,7 +2317,7 @@
static void gen_pause(DisasContext *ctx)
{
TCGv_i32 t0 = tcg_constant_i32(0);
- tcg_gen_st_i32(t0, cpu_env,
+ tcg_gen_st_i32(t0, tcg_env,
-offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
/* Stop translation, this gives other CPUs a chance to run */
@@ -2912,7 +2912,7 @@
/* sraw & sraw. */
static void gen_sraw(DisasContext *ctx)
{
- gen_helper_sraw(cpu_gpr[rA(ctx->opcode)], cpu_env,
+ gen_helper_sraw(cpu_gpr[rA(ctx->opcode)], tcg_env,
cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
@@ -2995,7 +2995,7 @@
/* srad & srad. */
static void gen_srad(DisasContext *ctx)
{
- gen_helper_srad(cpu_gpr[rA(ctx->opcode)], cpu_env,
+ gen_helper_srad(cpu_gpr[rA(ctx->opcode)], tcg_env,
cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
@@ -3360,7 +3360,7 @@
t0 = tcg_temp_new();
t1 = tcg_constant_i32(rD(ctx->opcode));
gen_addr_imm_index(ctx, t0, 0);
- gen_helper_lmw(cpu_env, t0, t1);
+ gen_helper_lmw(tcg_env, t0, t1);
}
/* stmw */
@@ -3377,7 +3377,7 @@
t0 = tcg_temp_new();
t1 = tcg_constant_i32(rS(ctx->opcode));
gen_addr_imm_index(ctx, t0, 0);
- gen_helper_stmw(cpu_env, t0, t1);
+ gen_helper_stmw(tcg_env, t0, t1);
}
/*** Integer load and store strings ***/
@@ -3415,7 +3415,7 @@
gen_addr_register(ctx, t0);
t1 = tcg_constant_i32(nb);
t2 = tcg_constant_i32(start);
- gen_helper_lsw(cpu_env, t0, t1, t2);
+ gen_helper_lsw(tcg_env, t0, t1, t2);
}
/* lswx */
@@ -3434,7 +3434,7 @@
t1 = tcg_constant_i32(rD(ctx->opcode));
t2 = tcg_constant_i32(rA(ctx->opcode));
t3 = tcg_constant_i32(rB(ctx->opcode));
- gen_helper_lswx(cpu_env, t0, t1, t2, t3);
+ gen_helper_lswx(tcg_env, t0, t1, t2, t3);
}
/* stswi */
@@ -3456,7 +3456,7 @@
}
t1 = tcg_constant_i32(nb);
t2 = tcg_constant_i32(rS(ctx->opcode));
- gen_helper_stsw(cpu_env, t0, t1, t2);
+ gen_helper_stsw(tcg_env, t0, t1, t2);
}
/* stswx */
@@ -3476,7 +3476,7 @@
tcg_gen_trunc_tl_i32(t1, cpu_xer);
tcg_gen_andi_i32(t1, t1, 0x7F);
t2 = tcg_constant_i32(rS(ctx->opcode));
- gen_helper_stsw(cpu_env, t0, t1, t2);
+ gen_helper_stsw(tcg_env, t0, t1, t2);
}
/*** Memory synchronisation ***/
@@ -3543,12 +3543,12 @@
}
l = gen_new_label();
t = tcg_temp_new_i32();
- tcg_gen_ld_i32(t, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
+ tcg_gen_ld_i32(t, tcg_env, offsetof(CPUPPCState, tlb_need_flush));
tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, l);
if (global) {
- gen_helper_check_tlb_flush_global(cpu_env);
+ gen_helper_check_tlb_flush_global(tcg_env);
} else {
- gen_helper_check_tlb_flush_local(cpu_env);
+ gen_helper_check_tlb_flush_local(tcg_env);
}
gen_set_label(l);
}
@@ -3710,7 +3710,7 @@
if (need_serial) {
/* Restart with exclusive lock. */
- gen_helper_exit_atomic(cpu_env);
+ gen_helper_exit_atomic(tcg_env);
ctx->base.is_jmp = DISAS_NORETURN;
}
}
@@ -3766,7 +3766,7 @@
case 24: /* Store twin */
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
/* Restart with exclusive lock. */
- gen_helper_exit_atomic(cpu_env);
+ gen_helper_exit_atomic(tcg_env);
ctx->base.is_jmp = DISAS_NORETURN;
} else {
TCGv t = tcg_temp_new();
@@ -3876,8 +3876,8 @@
tcg_gen_mov_tl(cpu_reserve, EA);
tcg_gen_movi_tl(cpu_reserve_length, 16);
- tcg_gen_st_tl(hi, cpu_env, offsetof(CPUPPCState, reserve_val));
- tcg_gen_st_tl(lo, cpu_env, offsetof(CPUPPCState, reserve_val2));
+ tcg_gen_st_tl(hi, tcg_env, offsetof(CPUPPCState, reserve_val));
+ tcg_gen_st_tl(lo, tcg_env, offsetof(CPUPPCState, reserve_val2));
}
/* stqcx. */
@@ -4011,7 +4011,7 @@
*/
if (wc == 0) {
TCGv_i32 t0 = tcg_constant_i32(1);
- tcg_gen_st_i32(t0, cpu_env,
+ tcg_gen_st_i32(t0, tcg_env,
-offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
/* Stop translation, as the CPU is supposed to sleep from now */
gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
@@ -4058,7 +4058,7 @@
CHK_HV(ctx);
translator_io_start(&ctx->base);
t = tcg_constant_i32(PPC_PM_DOZE);
- gen_helper_pminsn(cpu_env, t);
+ gen_helper_pminsn(tcg_env, t);
/* Stop translation, as the CPU is supposed to sleep from now */
gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
#endif /* defined(CONFIG_USER_ONLY) */
@@ -4074,7 +4074,7 @@
CHK_HV(ctx);
translator_io_start(&ctx->base);
t = tcg_constant_i32(PPC_PM_NAP);
- gen_helper_pminsn(cpu_env, t);
+ gen_helper_pminsn(tcg_env, t);
/* Stop translation, as the CPU is supposed to sleep from now */
gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
#endif /* defined(CONFIG_USER_ONLY) */
@@ -4090,7 +4090,7 @@
CHK_HV(ctx);
translator_io_start(&ctx->base);
t = tcg_constant_i32(PPC_PM_STOP);
- gen_helper_pminsn(cpu_env, t);
+ gen_helper_pminsn(tcg_env, t);
/* Stop translation, as the CPU is supposed to sleep from now */
gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
#endif /* defined(CONFIG_USER_ONLY) */
@@ -4106,7 +4106,7 @@
CHK_HV(ctx);
translator_io_start(&ctx->base);
t = tcg_constant_i32(PPC_PM_SLEEP);
- gen_helper_pminsn(cpu_env, t);
+ gen_helper_pminsn(tcg_env, t);
/* Stop translation, as the CPU is supposed to sleep from now */
gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
#endif /* defined(CONFIG_USER_ONLY) */
@@ -4122,7 +4122,7 @@
CHK_HV(ctx);
translator_io_start(&ctx->base);
t = tcg_constant_i32(PPC_PM_RVWINKLE);
- gen_helper_pminsn(cpu_env, t);
+ gen_helper_pminsn(tcg_env, t);
/* Stop translation, as the CPU is supposed to sleep from now */
gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
#endif /* defined(CONFIG_USER_ONLY) */
@@ -4172,12 +4172,12 @@
/* Check for overflow, if it's enabled */
if (ctx->mmcr0_pmcjce) {
tcg_gen_brcondi_tl(TCG_COND_LT, t0, PMC_COUNTER_NEGATIVE_VAL, l);
- gen_helper_handle_pmc5_overflow(cpu_env);
+ gen_helper_handle_pmc5_overflow(tcg_env);
}
gen_set_label(l);
} else {
- gen_helper_insns_inc(cpu_env, tcg_constant_i32(ctx->base.num_insns));
+ gen_helper_insns_inc(tcg_env, tcg_constant_i32(ctx->base.num_insns));
}
#else
/*
@@ -4477,7 +4477,7 @@
CHK_SV(ctx);
translator_io_start(&ctx->base);
gen_update_cfar(ctx, ctx->cia);
- gen_helper_rfi(cpu_env);
+ gen_helper_rfi(tcg_env);
ctx->base.is_jmp = DISAS_EXIT;
#endif
}
@@ -4492,7 +4492,7 @@
CHK_SV(ctx);
translator_io_start(&ctx->base);
gen_update_cfar(ctx, ctx->cia);
- gen_helper_rfid(cpu_env);
+ gen_helper_rfid(tcg_env);
ctx->base.is_jmp = DISAS_EXIT;
#endif
}
@@ -4507,7 +4507,7 @@
CHK_SV(ctx);
translator_io_start(&ctx->base);
gen_update_cfar(ctx, ctx->cia);
- gen_helper_rfscv(cpu_env);
+ gen_helper_rfscv(tcg_env);
ctx->base.is_jmp = DISAS_EXIT;
#endif
}
@@ -4521,7 +4521,7 @@
/* Restore CPU state */
CHK_HV(ctx);
translator_io_start(&ctx->base);
- gen_helper_hrfid(cpu_env);
+ gen_helper_hrfid(tcg_env);
ctx->base.is_jmp = DISAS_EXIT;
#endif
}
@@ -4554,7 +4554,7 @@
/* Set the PC back to the faulting instruction. */
gen_update_nip(ctx, ctx->cia);
- gen_helper_scv(cpu_env, tcg_constant_i32(lev));
+ gen_helper_scv(tcg_env, tcg_constant_i32(lev));
ctx->base.is_jmp = DISAS_NORETURN;
}
@@ -4587,7 +4587,7 @@
return;
}
t0 = tcg_constant_i32(TO(ctx->opcode));
- gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
+ gen_helper_tw(tcg_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
t0);
}
@@ -4602,7 +4602,7 @@
}
t0 = tcg_constant_tl(SIMM(ctx->opcode));
t1 = tcg_constant_i32(TO(ctx->opcode));
- gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
+ gen_helper_tw(tcg_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
}
#if defined(TARGET_PPC64)
@@ -4615,7 +4615,7 @@
return;
}
t0 = tcg_constant_i32(TO(ctx->opcode));
- gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
+ gen_helper_td(tcg_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
t0);
}
@@ -4630,7 +4630,7 @@
}
t0 = tcg_constant_tl(SIMM(ctx->opcode));
t1 = tcg_constant_i32(TO(ctx->opcode));
- gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
+ gen_helper_td(tcg_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
}
#endif
@@ -4856,7 +4856,7 @@
tcg_gen_andi_tl(t1, cpu_msr, ~mask);
tcg_gen_or_tl(t0, t0, t1);
- gen_helper_store_msr(cpu_env, t0);
+ gen_helper_store_msr(tcg_env, t0);
/* Must stop the translation as machine state (may have) changed */
ctx->base.is_jmp = DISAS_EXIT_UPDATE;
@@ -4895,7 +4895,7 @@
tcg_gen_andi_tl(t1, cpu_msr, ~mask);
tcg_gen_or_tl(t0, t0, t1);
- gen_helper_store_msr(cpu_env, t0);
+ gen_helper_store_msr(tcg_env, t0);
/* Must stop the translation as machine state (may have) changed */
ctx->base.is_jmp = DISAS_EXIT_UPDATE;
@@ -5108,7 +5108,7 @@
tcgv_addr = tcg_temp_new();
tcgv_op = tcg_constant_i32(ctx->opcode & 0x03FF000);
gen_addr_reg_index(ctx, tcgv_addr);
- gen_helper_dcbz(cpu_env, tcgv_addr, tcgv_op);
+ gen_helper_dcbz(tcg_env, tcgv_addr, tcgv_op);
}
/* dcbzep */
@@ -5121,7 +5121,7 @@
tcgv_addr = tcg_temp_new();
tcgv_op = tcg_constant_i32(ctx->opcode & 0x03FF000);
gen_addr_reg_index(ctx, tcgv_addr);
- gen_helper_dcbzep(cpu_env, tcgv_addr, tcgv_op);
+ gen_helper_dcbzep(tcg_env, tcgv_addr, tcgv_op);
}
/* dst / dstt */
@@ -5158,7 +5158,7 @@
gen_set_access_type(ctx, ACCESS_CACHE);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
- gen_helper_icbi(cpu_env, t0);
+ gen_helper_icbi(tcg_env, t0);
}
/* icbiep */
@@ -5168,7 +5168,7 @@
gen_set_access_type(ctx, ACCESS_CACHE);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
- gen_helper_icbiep(cpu_env, t0);
+ gen_helper_icbiep(tcg_env, t0);
}
/* Optional: */
@@ -5195,7 +5195,7 @@
CHK_SV(ctx);
t0 = tcg_constant_tl(SR(ctx->opcode));
- gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
+ gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], tcg_env, t0);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5210,7 +5210,7 @@
CHK_SV(ctx);
t0 = tcg_temp_new();
tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
- gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
+ gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], tcg_env, t0);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5224,7 +5224,7 @@
CHK_SV(ctx);
t0 = tcg_constant_tl(SR(ctx->opcode));
- gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
+ gen_helper_store_sr(tcg_env, t0, cpu_gpr[rS(ctx->opcode)]);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5239,7 +5239,7 @@
t0 = tcg_temp_new();
tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
- gen_helper_store_sr(cpu_env, t0, cpu_gpr[rD(ctx->opcode)]);
+ gen_helper_store_sr(tcg_env, t0, cpu_gpr[rD(ctx->opcode)]);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5256,7 +5256,7 @@
CHK_SV(ctx);
t0 = tcg_constant_tl(SR(ctx->opcode));
- gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
+ gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], tcg_env, t0);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5271,7 +5271,7 @@
CHK_SV(ctx);
t0 = tcg_temp_new();
tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
- gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
+ gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], tcg_env, t0);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5285,7 +5285,7 @@
CHK_SV(ctx);
t0 = tcg_constant_tl(SR(ctx->opcode));
- gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
+ gen_helper_store_sr(tcg_env, t0, cpu_gpr[rS(ctx->opcode)]);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5300,7 +5300,7 @@
CHK_SV(ctx);
t0 = tcg_temp_new();
tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
- gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
+ gen_helper_store_sr(tcg_env, t0, cpu_gpr[rS(ctx->opcode)]);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5317,7 +5317,7 @@
#else
CHK_HV(ctx);
- gen_helper_tlbia(cpu_env);
+ gen_helper_tlbia(tcg_env);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5377,7 +5377,7 @@
GEN_PRIV(ctx);
#else
CHK_SV(ctx);
- gen_helper_6xx_tlbd(cpu_env, cpu_gpr[rB(ctx->opcode)]);
+ gen_helper_6xx_tlbd(tcg_env, cpu_gpr[rB(ctx->opcode)]);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5388,7 +5388,7 @@
GEN_PRIV(ctx);
#else
CHK_SV(ctx);
- gen_helper_6xx_tlbi(cpu_env, cpu_gpr[rB(ctx->opcode)]);
+ gen_helper_6xx_tlbi(tcg_env, cpu_gpr[rB(ctx->opcode)]);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5412,7 +5412,7 @@
CHK_SV(ctx);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
- gen_helper_tlbiva(cpu_env, cpu_gpr[rB(ctx->opcode)]);
+ gen_helper_tlbiva(tcg_env, cpu_gpr[rB(ctx->opcode)]);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5639,7 +5639,7 @@
CHK_SV(ctx);
dcrn = tcg_constant_tl(SPR(ctx->opcode));
- gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env, dcrn);
+ gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], tcg_env, dcrn);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5653,7 +5653,7 @@
CHK_SV(ctx);
dcrn = tcg_constant_tl(SPR(ctx->opcode));
- gen_helper_store_dcr(cpu_env, dcrn, cpu_gpr[rS(ctx->opcode)]);
+ gen_helper_store_dcr(tcg_env, dcrn, cpu_gpr[rS(ctx->opcode)]);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5665,7 +5665,7 @@
GEN_PRIV(ctx);
#else
CHK_SV(ctx);
- gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env,
+ gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], tcg_env,
cpu_gpr[rA(ctx->opcode)]);
/* Note: Rc update flag set leads to undefined state of Rc0 */
#endif /* defined(CONFIG_USER_ONLY) */
@@ -5679,7 +5679,7 @@
GEN_PRIV(ctx);
#else
CHK_SV(ctx);
- gen_helper_store_dcr(cpu_env, cpu_gpr[rA(ctx->opcode)],
+ gen_helper_store_dcr(tcg_env, cpu_gpr[rA(ctx->opcode)],
cpu_gpr[rS(ctx->opcode)]);
/* Note: Rc update flag set leads to undefined state of Rc0 */
#endif /* defined(CONFIG_USER_ONLY) */
@@ -5742,7 +5742,7 @@
#else
CHK_SV(ctx);
/* Restore CPU state */
- gen_helper_40x_rfci(cpu_env);
+ gen_helper_40x_rfci(tcg_env);
ctx->base.is_jmp = DISAS_EXIT;
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5754,7 +5754,7 @@
#else
CHK_SV(ctx);
/* Restore CPU state */
- gen_helper_rfci(cpu_env);
+ gen_helper_rfci(tcg_env);
ctx->base.is_jmp = DISAS_EXIT;
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5769,7 +5769,7 @@
#else
CHK_SV(ctx);
/* Restore CPU state */
- gen_helper_rfdi(cpu_env);
+ gen_helper_rfdi(tcg_env);
ctx->base.is_jmp = DISAS_EXIT;
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5782,7 +5782,7 @@
#else
CHK_SV(ctx);
/* Restore CPU state */
- gen_helper_rfmci(cpu_env);
+ gen_helper_rfmci(tcg_env);
ctx->base.is_jmp = DISAS_EXIT;
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5798,11 +5798,11 @@
CHK_SV(ctx);
switch (rB(ctx->opcode)) {
case 0:
- gen_helper_4xx_tlbre_hi(cpu_gpr[rD(ctx->opcode)], cpu_env,
+ gen_helper_4xx_tlbre_hi(cpu_gpr[rD(ctx->opcode)], tcg_env,
cpu_gpr[rA(ctx->opcode)]);
break;
case 1:
- gen_helper_4xx_tlbre_lo(cpu_gpr[rD(ctx->opcode)], cpu_env,
+ gen_helper_4xx_tlbre_lo(cpu_gpr[rD(ctx->opcode)], tcg_env,
cpu_gpr[rA(ctx->opcode)]);
break;
default:
@@ -5823,7 +5823,7 @@
CHK_SV(ctx);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
- gen_helper_4xx_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
+ gen_helper_4xx_tlbsx(cpu_gpr[rD(ctx->opcode)], tcg_env, t0);
if (Rc(ctx->opcode)) {
TCGLabel *l1 = gen_new_label();
tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
@@ -5844,11 +5844,11 @@
switch (rB(ctx->opcode)) {
case 0:
- gen_helper_4xx_tlbwe_hi(cpu_env, cpu_gpr[rA(ctx->opcode)],
+ gen_helper_4xx_tlbwe_hi(tcg_env, cpu_gpr[rA(ctx->opcode)],
cpu_gpr[rS(ctx->opcode)]);
break;
case 1:
- gen_helper_4xx_tlbwe_lo(cpu_env, cpu_gpr[rA(ctx->opcode)],
+ gen_helper_4xx_tlbwe_lo(tcg_env, cpu_gpr[rA(ctx->opcode)],
cpu_gpr[rS(ctx->opcode)]);
break;
default:
@@ -5874,7 +5874,7 @@
case 2:
{
TCGv_i32 t0 = tcg_constant_i32(rB(ctx->opcode));
- gen_helper_440_tlbre(cpu_gpr[rD(ctx->opcode)], cpu_env,
+ gen_helper_440_tlbre(cpu_gpr[rD(ctx->opcode)], tcg_env,
t0, cpu_gpr[rA(ctx->opcode)]);
}
break;
@@ -5896,7 +5896,7 @@
CHK_SV(ctx);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
- gen_helper_440_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
+ gen_helper_440_tlbsx(cpu_gpr[rD(ctx->opcode)], tcg_env, t0);
if (Rc(ctx->opcode)) {
TCGLabel *l1 = gen_new_label();
tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
@@ -5920,7 +5920,7 @@
case 2:
{
TCGv_i32 t0 = tcg_constant_i32(rB(ctx->opcode));
- gen_helper_440_tlbwe(cpu_env, t0, cpu_gpr[rA(ctx->opcode)],
+ gen_helper_440_tlbwe(tcg_env, t0, cpu_gpr[rA(ctx->opcode)],
cpu_gpr[rS(ctx->opcode)]);
}
break;
@@ -5940,7 +5940,7 @@
GEN_PRIV(ctx);
#else
CHK_SV(ctx);
- gen_helper_booke206_tlbre(cpu_env);
+ gen_helper_booke206_tlbre(tcg_env);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5959,7 +5959,7 @@
} else {
t0 = cpu_gpr[rB(ctx->opcode)];
}
- gen_helper_booke206_tlbsx(cpu_env, t0);
+ gen_helper_booke206_tlbsx(tcg_env, t0);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5970,7 +5970,7 @@
GEN_PRIV(ctx);
#else
CHK_SV(ctx);
- gen_helper_booke206_tlbwe(cpu_env);
+ gen_helper_booke206_tlbwe(tcg_env);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5984,7 +5984,7 @@
CHK_SV(ctx);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
- gen_helper_booke206_tlbivax(cpu_env, t0);
+ gen_helper_booke206_tlbivax(tcg_env, t0);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -6001,13 +6001,13 @@
switch ((ctx->opcode >> 21) & 0x3) {
case 0:
- gen_helper_booke206_tlbilx0(cpu_env, t0);
+ gen_helper_booke206_tlbilx0(tcg_env, t0);
break;
case 1:
- gen_helper_booke206_tlbilx1(cpu_env, t0);
+ gen_helper_booke206_tlbilx1(tcg_env, t0);
break;
case 3:
- gen_helper_booke206_tlbilx3(cpu_env, t0);
+ gen_helper_booke206_tlbilx3(tcg_env, t0);
break;
default:
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
@@ -6062,7 +6062,7 @@
static void gen_dlmzb(DisasContext *ctx)
{
TCGv_i32 t0 = tcg_constant_i32(Rc(ctx->opcode));
- gen_helper_dlmzb(cpu_gpr[rA(ctx->opcode)], cpu_env,
+ gen_helper_dlmzb(cpu_gpr[rA(ctx->opcode)], tcg_env,
cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0);
}
@@ -6129,7 +6129,7 @@
gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);
return;
}
- gen_helper_tbegin(cpu_env);
+ gen_helper_tbegin(tcg_env);
}
#define GEN_TM_NOOP(name) \
@@ -6225,12 +6225,12 @@
static inline void get_fpr(TCGv_i64 dst, int regno)
{
- tcg_gen_ld_i64(dst, cpu_env, fpr_offset(regno));
+ tcg_gen_ld_i64(dst, tcg_env, fpr_offset(regno));
}
static inline void set_fpr(int regno, TCGv_i64 src)
{
- tcg_gen_st_i64(src, cpu_env, fpr_offset(regno));
+ tcg_gen_st_i64(src, tcg_env, fpr_offset(regno));
/*
* Before PowerISA v3.1 the result of doubleword 1 of the VSR
* corresponding to the target FPR was undefined. However,
@@ -6238,17 +6238,17 @@
* Starting at ISA v3.1, the result for doubleword 1 is now defined
* to be 0.
*/
- tcg_gen_st_i64(tcg_constant_i64(0), cpu_env, vsr64_offset(regno, false));
+ tcg_gen_st_i64(tcg_constant_i64(0), tcg_env, vsr64_offset(regno, false));
}
static inline void get_avr64(TCGv_i64 dst, int regno, bool high)
{
- tcg_gen_ld_i64(dst, cpu_env, avr64_offset(regno, high));
+ tcg_gen_ld_i64(dst, tcg_env, avr64_offset(regno, high));
}
static inline void set_avr64(int regno, TCGv_i64 src, bool high)
{
- tcg_gen_st_i64(src, cpu_env, avr64_offset(regno, high));
+ tcg_gen_st_i64(src, tcg_env, avr64_offset(regno, high));
}
/*
@@ -7320,7 +7320,7 @@
static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
- CPUPPCState *env = cs->env_ptr;
+ CPUPPCState *env = cpu_env(cs);
uint32_t hflags = ctx->base.tb->flags;
ctx->spr_cb = env->spr_cb;
@@ -7384,7 +7384,7 @@
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
PowerPCCPU *cpu = POWERPC_CPU(cs);
- CPUPPCState *env = cs->env_ptr;
+ CPUPPCState *env = cpu_env(cs);
target_ulong pc;
uint32_t insn;
bool ok;
diff --git a/target/ppc/translate/branch-impl.c.inc b/target/ppc/translate/branch-impl.c.inc
index f9931b9..fb0fcf3 100644
--- a/target/ppc/translate/branch-impl.c.inc
+++ b/target/ppc/translate/branch-impl.c.inc
@@ -18,7 +18,7 @@
translator_io_start(&ctx->base);
gen_update_cfar(ctx, ctx->cia);
- gen_helper_rfebb(cpu_env, cpu_gpr[arg->s]);
+ gen_helper_rfebb(tcg_env, cpu_gpr[arg->s]);
ctx->base.is_jmp = DISAS_CHAIN;
diff --git a/target/ppc/translate/dfp-impl.c.inc b/target/ppc/translate/dfp-impl.c.inc
index 62911e0..3710765 100644
--- a/target/ppc/translate/dfp-impl.c.inc
+++ b/target/ppc/translate/dfp-impl.c.inc
@@ -3,7 +3,7 @@
static inline TCGv_ptr gen_fprp_ptr(int reg)
{
TCGv_ptr r = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(r, cpu_env, offsetof(CPUPPCState, vsr[reg].u64[0]));
+ tcg_gen_addi_ptr(r, tcg_env, offsetof(CPUPPCState, vsr[reg].u64[0]));
return r;
}
@@ -16,7 +16,7 @@
rt = gen_fprp_ptr(a->rt); \
ra = gen_fprp_ptr(a->ra); \
rb = gen_fprp_ptr(a->rb); \
- gen_helper_##NAME(cpu_env, rt, ra, rb); \
+ gen_helper_##NAME(tcg_env, rt, ra, rb); \
if (unlikely(a->rc)) { \
gen_set_cr1_from_fpscr(ctx); \
} \
@@ -32,7 +32,7 @@
ra = gen_fprp_ptr(a->ra); \
rb = gen_fprp_ptr(a->rb); \
gen_helper_##NAME(cpu_crf[a->bf], \
- cpu_env, ra, rb); \
+ tcg_env, ra, rb); \
return true; \
}
@@ -44,7 +44,7 @@
REQUIRE_FPU(ctx); \
rb = gen_fprp_ptr(a->rb); \
gen_helper_##NAME(cpu_crf[a->bf], \
- cpu_env, tcg_constant_i32(a->uim), rb);\
+ tcg_env, tcg_constant_i32(a->uim), rb);\
return true; \
}
@@ -56,7 +56,7 @@
REQUIRE_FPU(ctx); \
ra = gen_fprp_ptr(a->fra); \
gen_helper_##NAME(cpu_crf[a->bf], \
- cpu_env, ra, tcg_constant_i32(a->dm)); \
+ tcg_env, ra, tcg_constant_i32(a->dm)); \
return true; \
}
@@ -68,7 +68,7 @@
REQUIRE_FPU(ctx); \
rt = gen_fprp_ptr(a->frt); \
rb = gen_fprp_ptr(a->frb); \
- gen_helper_##NAME(cpu_env, rt, rb, \
+ gen_helper_##NAME(tcg_env, rt, rb, \
tcg_constant_i32(a->U32F1), \
tcg_constant_i32(a->U32F2)); \
if (unlikely(a->rc)) { \
@@ -86,7 +86,7 @@
rt = gen_fprp_ptr(a->frt); \
ra = gen_fprp_ptr(a->fra); \
rb = gen_fprp_ptr(a->frb); \
- gen_helper_##NAME(cpu_env, rt, ra, rb, \
+ gen_helper_##NAME(tcg_env, rt, ra, rb, \
tcg_constant_i32(a->I32FLD)); \
if (unlikely(a->rc)) { \
gen_set_cr1_from_fpscr(ctx); \
@@ -102,7 +102,7 @@
REQUIRE_FPU(ctx); \
rt = gen_fprp_ptr(a->rt); \
rb = gen_fprp_ptr(a->rb); \
- gen_helper_##NAME(cpu_env, rt, rb); \
+ gen_helper_##NAME(tcg_env, rt, rb); \
if (unlikely(a->rc)) { \
gen_set_cr1_from_fpscr(ctx); \
} \
@@ -117,7 +117,7 @@
REQUIRE_FPU(ctx); \
rt = gen_fprp_ptr(a->rt); \
rx = gen_fprp_ptr(a->FPRFLD); \
- gen_helper_##NAME(cpu_env, rt, rx, \
+ gen_helper_##NAME(tcg_env, rt, rx, \
tcg_constant_i32(a->I32FLD)); \
if (unlikely(a->rc)) { \
gen_set_cr1_from_fpscr(ctx); \
@@ -188,7 +188,7 @@
rt = gen_fprp_ptr(a->frtp);
rb = gen_avr_ptr(a->vrb);
- gen_helper_DCFFIXQQ(cpu_env, rt, rb);
+ gen_helper_DCFFIXQQ(tcg_env, rt, rb);
return true;
}
@@ -203,7 +203,7 @@
rt = gen_avr_ptr(a->vrt);
rb = gen_fprp_ptr(a->frbp);
- gen_helper_DCTFIXQQ(cpu_env, rt, rb);
+ gen_helper_DCTFIXQQ(tcg_env, rt, rb);
return true;
}
diff --git a/target/ppc/translate/fixedpoint-impl.c.inc b/target/ppc/translate/fixedpoint-impl.c.inc
index 7ff7e1e..51c6fa7 100644
--- a/target/ppc/translate/fixedpoint-impl.c.inc
+++ b/target/ppc/translate/fixedpoint-impl.c.inc
@@ -517,7 +517,7 @@
}
ea = do_ea_calc(ctx, a->ra, tcg_constant_tl(a->rt));
- helper(cpu_env, ea, cpu_gpr[a->ra], cpu_gpr[a->rb]);
+ helper(tcg_env, ea, cpu_gpr[a->ra], cpu_gpr[a->rb]);
return true;
}
diff --git a/target/ppc/translate/fp-impl.c.inc b/target/ppc/translate/fp-impl.c.inc
index 874774e..189cd8c 100644
--- a/target/ppc/translate/fp-impl.c.inc
+++ b/target/ppc/translate/fp-impl.c.inc
@@ -6,13 +6,13 @@
static inline void gen_reset_fpstatus(void)
{
- gen_helper_reset_fpstatus(cpu_env);
+ gen_helper_reset_fpstatus(tcg_env);
}
static inline void gen_compute_fprf_float64(TCGv_i64 arg)
{
- gen_helper_compute_fprf_float64(cpu_env, arg);
- gen_helper_float_check_status(cpu_env);
+ gen_helper_compute_fprf_float64(tcg_env, arg);
+ gen_helper_float_check_status(tcg_env);
}
#if defined(TARGET_PPC64)
@@ -49,7 +49,7 @@
get_fpr(t0, rA(ctx->opcode)); \
get_fpr(t1, rC(ctx->opcode)); \
get_fpr(t2, rB(ctx->opcode)); \
- gen_helper_f##name(t3, cpu_env, t0, t1, t2); \
+ gen_helper_f##name(t3, tcg_env, t0, t1, t2); \
set_fpr(rD(ctx->opcode), t3); \
if (set_fprf) { \
gen_compute_fprf_float64(t3); \
@@ -79,7 +79,7 @@
gen_reset_fpstatus(); \
get_fpr(t0, rA(ctx->opcode)); \
get_fpr(t1, rB(ctx->opcode)); \
- gen_helper_f##name(t2, cpu_env, t0, t1); \
+ gen_helper_f##name(t2, tcg_env, t0, t1); \
set_fpr(rD(ctx->opcode), t2); \
if (set_fprf) { \
gen_compute_fprf_float64(t2); \
@@ -108,7 +108,7 @@
gen_reset_fpstatus(); \
get_fpr(t0, rA(ctx->opcode)); \
get_fpr(t1, rC(ctx->opcode)); \
- gen_helper_f##name(t2, cpu_env, t0, t1); \
+ gen_helper_f##name(t2, tcg_env, t0, t1); \
set_fpr(rD(ctx->opcode), t2); \
if (set_fprf) { \
gen_compute_fprf_float64(t2); \
@@ -134,12 +134,12 @@
t1 = tcg_temp_new_i64(); \
gen_reset_fpstatus(); \
get_fpr(t0, rB(ctx->opcode)); \
- gen_helper_f##name(t1, cpu_env, t0); \
+ gen_helper_f##name(t1, tcg_env, t0); \
set_fpr(rD(ctx->opcode), t1); \
if (set_fprf) { \
- gen_helper_compute_fprf_float64(cpu_env, t1); \
+ gen_helper_compute_fprf_float64(tcg_env, t1); \
} \
- gen_helper_float_check_status(cpu_env); \
+ gen_helper_float_check_status(tcg_env); \
if (unlikely(Rc(ctx->opcode) != 0)) { \
gen_set_cr1_from_fpscr(ctx); \
} \
@@ -158,7 +158,7 @@
t1 = tcg_temp_new_i64(); \
gen_reset_fpstatus(); \
get_fpr(t0, rB(ctx->opcode)); \
- gen_helper_f##name(t1, cpu_env, t0); \
+ gen_helper_f##name(t1, tcg_env, t0); \
set_fpr(rD(ctx->opcode), t1); \
if (set_fprf) { \
gen_compute_fprf_float64(t1); \
@@ -197,7 +197,7 @@
t1 = tcg_temp_new_i64();
gen_reset_fpstatus();
get_fpr(t0, rB(ctx->opcode));
- gen_helper_frsqrtes(t1, cpu_env, t0);
+ gen_helper_frsqrtes(t1, tcg_env, t0);
set_fpr(rD(ctx->opcode), t1);
gen_compute_fprf_float64(t1);
if (unlikely(Rc(ctx->opcode) != 0)) {
@@ -245,7 +245,7 @@
gen_reset_fpstatus();
get_fpr(t0, a->frb);
- helper(t1, cpu_env, t0);
+ helper(t1, tcg_env, t0);
set_fpr(a->frt, t1);
gen_compute_fprf_float64(t1);
if (unlikely(a->rc != 0)) {
@@ -351,8 +351,8 @@
crf = tcg_constant_i32(crfD(ctx->opcode));
get_fpr(t0, rA(ctx->opcode));
get_fpr(t1, rB(ctx->opcode));
- gen_helper_fcmpo(cpu_env, t0, t1, crf);
- gen_helper_float_check_status(cpu_env);
+ gen_helper_fcmpo(tcg_env, t0, t1, crf);
+ gen_helper_float_check_status(tcg_env);
}
/* fcmpu */
@@ -371,8 +371,8 @@
crf = tcg_constant_i32(crfD(ctx->opcode));
get_fpr(t0, rA(ctx->opcode));
get_fpr(t1, rB(ctx->opcode));
- gen_helper_fcmpu(cpu_env, t0, t1, crf);
- gen_helper_float_check_status(cpu_env);
+ gen_helper_fcmpu(tcg_env, t0, t1, crf);
+ gen_helper_float_check_status(tcg_env);
}
/*** Floating-point move ***/
@@ -542,7 +542,7 @@
~((0xF << shift) & FP_EX_CLEAR_BITS));
/* FEX and VX need to be updated, so don't set fpscr directly */
tmask = tcg_constant_i32(1 << nibble);
- gen_helper_store_fpscr(cpu_env, tnew_fpscr, tmask);
+ gen_helper_store_fpscr(tcg_env, tnew_fpscr, tmask);
}
static TCGv_i64 place_from_fpscr(int rt, uint64_t mask)
@@ -565,7 +565,7 @@
tcg_gen_andi_i64(fpscr_masked, fpscr, ~clear_mask);
tcg_gen_or_i64(fpscr_masked, fpscr_masked, set_mask);
- gen_helper_store_fpscr(cpu_env, fpscr_masked, st_mask);
+ gen_helper_store_fpscr(tcg_env, fpscr_masked, st_mask);
}
static bool trans_MFFS_ISA207(DisasContext *ctx, arg_X_t_rc *a)
@@ -691,7 +691,7 @@
crb = 31 - crbD(ctx->opcode);
gen_reset_fpstatus();
if (likely(crb != FPSCR_FEX && crb != FPSCR_VX)) {
- gen_helper_fpscr_clrbit(cpu_env, tcg_constant_i32(crb));
+ gen_helper_fpscr_clrbit(tcg_env, tcg_constant_i32(crb));
}
if (unlikely(Rc(ctx->opcode) != 0)) {
tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
@@ -711,14 +711,14 @@
crb = 31 - crbD(ctx->opcode);
/* XXX: we pretend we can only do IEEE floating-point computations */
if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI)) {
- gen_helper_fpscr_setbit(cpu_env, tcg_constant_i32(crb));
+ gen_helper_fpscr_setbit(tcg_env, tcg_constant_i32(crb));
}
if (unlikely(Rc(ctx->opcode) != 0)) {
tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
}
/* We can raise a deferred exception */
- gen_helper_fpscr_check_status(cpu_env);
+ gen_helper_fpscr_check_status(tcg_env);
}
/* mtfsf */
@@ -748,13 +748,13 @@
}
t1 = tcg_temp_new_i64();
get_fpr(t1, rB(ctx->opcode));
- gen_helper_store_fpscr(cpu_env, t1, t0);
+ gen_helper_store_fpscr(tcg_env, t1, t0);
if (unlikely(Rc(ctx->opcode) != 0)) {
tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
}
/* We can raise a deferred exception */
- gen_helper_fpscr_check_status(cpu_env);
+ gen_helper_fpscr_check_status(tcg_env);
}
/* mtfsfi */
@@ -777,13 +777,13 @@
sh = (8 * w) + 7 - bf;
t0 = tcg_constant_i64(((uint64_t)FPIMM(ctx->opcode)) << (4 * sh));
t1 = tcg_constant_i32(1 << sh);
- gen_helper_store_fpscr(cpu_env, t0, t1);
+ gen_helper_store_fpscr(tcg_env, t0, t1);
if (unlikely(Rc(ctx->opcode) != 0)) {
tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
}
/* We can raise a deferred exception */
- gen_helper_fpscr_check_status(cpu_env);
+ gen_helper_fpscr_check_status(tcg_env);
}
static void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 dest, TCGv addr)
diff --git a/target/ppc/translate/processor-ctrl-impl.c.inc b/target/ppc/translate/processor-ctrl-impl.c.inc
index cc7a50d..0142801 100644
--- a/target/ppc/translate/processor-ctrl-impl.c.inc
+++ b/target/ppc/translate/processor-ctrl-impl.c.inc
@@ -35,9 +35,9 @@
#if !defined(CONFIG_USER_ONLY)
if (is_book3s_arch2x(ctx)) {
- gen_helper_book3s_msgclr(cpu_env, cpu_gpr[a->rb]);
+ gen_helper_book3s_msgclr(tcg_env, cpu_gpr[a->rb]);
} else {
- gen_helper_msgclr(cpu_env, cpu_gpr[a->rb]);
+ gen_helper_msgclr(tcg_env, cpu_gpr[a->rb]);
}
#else
qemu_build_not_reached();
@@ -75,7 +75,7 @@
REQUIRE_INSNS_FLAGS2(ctx, ISA207S);
REQUIRE_SV(ctx);
#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64)
- gen_helper_book3s_msgclrp(cpu_env, cpu_gpr[a->rb]);
+ gen_helper_book3s_msgclrp(tcg_env, cpu_gpr[a->rb]);
#else
qemu_build_not_reached();
#endif
@@ -88,7 +88,7 @@
REQUIRE_INSNS_FLAGS2(ctx, ISA207S);
REQUIRE_SV(ctx);
#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64)
- gen_helper_book3s_msgsndp(cpu_env, cpu_gpr[a->rb]);
+ gen_helper_book3s_msgsndp(tcg_env, cpu_gpr[a->rb]);
#else
qemu_build_not_reached();
#endif
diff --git a/target/ppc/translate/spe-impl.c.inc b/target/ppc/translate/spe-impl.c.inc
index f4a8584..454dac8 100644
--- a/target/ppc/translate/spe-impl.c.inc
+++ b/target/ppc/translate/spe-impl.c.inc
@@ -22,7 +22,7 @@
cpu_gprh[rA(ctx->opcode)]);
/* spe_acc := tmp */
- tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUPPCState, spe_acc));
+ tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUPPCState, spe_acc));
/* rD := rA */
tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
@@ -457,7 +457,7 @@
/* acc := rD */
gen_load_gpr64(tmp, rD(ctx->opcode));
- tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUPPCState, spe_acc));
+ tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUPPCState, spe_acc));
}
static inline void gen_evmwumiaa(DisasContext *ctx)
@@ -479,13 +479,13 @@
gen_load_gpr64(tmp, rD(ctx->opcode));
/* Load acc */
- tcg_gen_ld_i64(acc, cpu_env, offsetof(CPUPPCState, spe_acc));
+ tcg_gen_ld_i64(acc, tcg_env, offsetof(CPUPPCState, spe_acc));
/* acc := tmp + acc */
tcg_gen_add_i64(acc, acc, tmp);
/* Store acc */
- tcg_gen_st_i64(acc, cpu_env, offsetof(CPUPPCState, spe_acc));
+ tcg_gen_st_i64(acc, tcg_env, offsetof(CPUPPCState, spe_acc));
/* rD := acc */
gen_store_gpr64(rD(ctx->opcode), acc);
@@ -529,7 +529,7 @@
/* acc := rD */
gen_load_gpr64(tmp, rD(ctx->opcode));
- tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUPPCState, spe_acc));
+ tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUPPCState, spe_acc));
}
static inline void gen_evmwsmiaa(DisasContext *ctx)
@@ -551,13 +551,13 @@
gen_load_gpr64(tmp, rD(ctx->opcode));
/* Load acc */
- tcg_gen_ld_i64(acc, cpu_env, offsetof(CPUPPCState, spe_acc));
+ tcg_gen_ld_i64(acc, tcg_env, offsetof(CPUPPCState, spe_acc));
/* acc := tmp + acc */
tcg_gen_add_i64(acc, acc, tmp);
/* Store acc */
- tcg_gen_st_i64(acc, cpu_env, offsetof(CPUPPCState, spe_acc));
+ tcg_gen_st_i64(acc, tcg_env, offsetof(CPUPPCState, spe_acc));
/* rD := acc */
gen_store_gpr64(rD(ctx->opcode), acc);
@@ -878,7 +878,7 @@
{ \
TCGv_i32 t0 = tcg_temp_new_i32(); \
tcg_gen_trunc_tl_i32(t0, cpu_gpr[rB(ctx->opcode)]); \
- gen_helper_##name(t0, cpu_env, t0); \
+ gen_helper_##name(t0, tcg_env, t0); \
tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \
}
#define GEN_SPEFPUOP_CONV_32_64(name) \
@@ -893,7 +893,7 @@
t0 = tcg_temp_new_i64(); \
t1 = tcg_temp_new_i32(); \
gen_load_gpr64(t0, rB(ctx->opcode)); \
- gen_helper_##name(t1, cpu_env, t0); \
+ gen_helper_##name(t1, tcg_env, t0); \
tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1); \
}
#define GEN_SPEFPUOP_CONV_64_32(name) \
@@ -908,7 +908,7 @@
t0 = tcg_temp_new_i64(); \
t1 = tcg_temp_new_i32(); \
tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \
- gen_helper_##name(t0, cpu_env, t1); \
+ gen_helper_##name(t0, tcg_env, t1); \
gen_store_gpr64(rD(ctx->opcode), t0); \
}
#define GEN_SPEFPUOP_CONV_64_64(name) \
@@ -921,7 +921,7 @@
} \
t0 = tcg_temp_new_i64(); \
gen_load_gpr64(t0, rB(ctx->opcode)); \
- gen_helper_##name(t0, cpu_env, t0); \
+ gen_helper_##name(t0, tcg_env, t0); \
gen_store_gpr64(rD(ctx->opcode), t0); \
}
#define GEN_SPEFPUOP_ARITH2_32_32(name) \
@@ -931,7 +931,7 @@
TCGv_i32 t1 = tcg_temp_new_i32(); \
tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \
- gen_helper_##name(t0, cpu_env, t0, t1); \
+ gen_helper_##name(t0, tcg_env, t0, t1); \
tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \
}
#define GEN_SPEFPUOP_ARITH2_64_64(name) \
@@ -946,7 +946,7 @@
t1 = tcg_temp_new_i64(); \
gen_load_gpr64(t0, rA(ctx->opcode)); \
gen_load_gpr64(t1, rB(ctx->opcode)); \
- gen_helper_##name(t0, cpu_env, t0, t1); \
+ gen_helper_##name(t0, tcg_env, t0, t1); \
gen_store_gpr64(rD(ctx->opcode), t0); \
}
#define GEN_SPEFPUOP_COMP_32(name) \
@@ -957,7 +957,7 @@
\
tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \
- gen_helper_##name(cpu_crf[crfD(ctx->opcode)], cpu_env, t0, t1); \
+ gen_helper_##name(cpu_crf[crfD(ctx->opcode)], tcg_env, t0, t1); \
}
#define GEN_SPEFPUOP_COMP_64(name) \
static inline void gen_##name(DisasContext *ctx) \
@@ -971,7 +971,7 @@
t1 = tcg_temp_new_i64(); \
gen_load_gpr64(t0, rA(ctx->opcode)); \
gen_load_gpr64(t1, rB(ctx->opcode)); \
- gen_helper_##name(cpu_crf[crfD(ctx->opcode)], cpu_env, t0, t1); \
+ gen_helper_##name(cpu_crf[crfD(ctx->opcode)], tcg_env, t0, t1); \
}
/* Single precision floating-point vectors operations */
diff --git a/target/ppc/translate/storage-ctrl-impl.c.inc b/target/ppc/translate/storage-ctrl-impl.c.inc
index faa7b04..74c23a4 100644
--- a/target/ppc/translate/storage-ctrl-impl.c.inc
+++ b/target/ppc/translate/storage-ctrl-impl.c.inc
@@ -30,7 +30,7 @@
REQUIRE_SV(ctx);
#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64)
- gen_helper_SLBIE(cpu_env, cpu_gpr[a->rb]);
+ gen_helper_SLBIE(tcg_env, cpu_gpr[a->rb]);
#else
qemu_build_not_reached();
#endif
@@ -44,7 +44,7 @@
REQUIRE_SV(ctx);
#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64)
- gen_helper_SLBIEG(cpu_env, cpu_gpr[a->rb]);
+ gen_helper_SLBIEG(tcg_env, cpu_gpr[a->rb]);
#else
qemu_build_not_reached();
#endif
@@ -58,7 +58,7 @@
REQUIRE_SV(ctx);
#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64)
- gen_helper_SLBIA(cpu_env, tcg_constant_i32(a->ih));
+ gen_helper_SLBIA(tcg_env, tcg_constant_i32(a->ih));
#else
qemu_build_not_reached();
#endif
@@ -72,7 +72,7 @@
REQUIRE_SV(ctx);
#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64)
- gen_helper_SLBIAG(cpu_env, cpu_gpr[a->rs], tcg_constant_i32(a->l));
+ gen_helper_SLBIAG(tcg_env, cpu_gpr[a->rs], tcg_constant_i32(a->l));
#else
qemu_build_not_reached();
#endif
@@ -86,7 +86,7 @@
REQUIRE_SV(ctx);
#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64)
- gen_helper_SLBMTE(cpu_env, cpu_gpr[a->rb], cpu_gpr[a->rt]);
+ gen_helper_SLBMTE(tcg_env, cpu_gpr[a->rb], cpu_gpr[a->rt]);
#else
qemu_build_not_reached();
#endif
@@ -100,7 +100,7 @@
REQUIRE_SV(ctx);
#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64)
- gen_helper_SLBMFEV(cpu_gpr[a->rt], cpu_env, cpu_gpr[a->rb]);
+ gen_helper_SLBMFEV(cpu_gpr[a->rt], tcg_env, cpu_gpr[a->rb]);
#else
qemu_build_not_reached();
#endif
@@ -114,7 +114,7 @@
REQUIRE_SV(ctx);
#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64)
- gen_helper_SLBMFEE(cpu_gpr[a->rt], cpu_env, cpu_gpr[a->rb]);
+ gen_helper_SLBMFEE(cpu_gpr[a->rt], tcg_env, cpu_gpr[a->rb]);
#else
qemu_build_not_reached();
#endif
@@ -137,7 +137,7 @@
gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
return true;
}
- gen_helper_SLBFEE(cpu_gpr[a->rt], cpu_env,
+ gen_helper_SLBFEE(cpu_gpr[a->rt], tcg_env,
cpu_gpr[a->rb]);
l1 = gen_new_label();
l2 = gen_new_label();
@@ -211,7 +211,7 @@
if (!local && NARROW_MODE(ctx)) {
TCGv t0 = tcg_temp_new();
tcg_gen_ext32u_tl(t0, cpu_gpr[rb]);
- gen_helper_tlbie(cpu_env, t0);
+ gen_helper_tlbie(tcg_env, t0);
#if defined(TARGET_PPC64)
/*
@@ -219,7 +219,7 @@
* otherwise the results are undefined.
*/
} else if (a->r) {
- gen_helper_tlbie_isa300(cpu_env, cpu_gpr[rb], cpu_gpr[a->rs],
+ gen_helper_tlbie_isa300(tcg_env, cpu_gpr[rb], cpu_gpr[a->rs],
tcg_constant_i32(a->ric << TLBIE_F_RIC_SHIFT |
a->prs << TLBIE_F_PRS_SHIFT |
a->r << TLBIE_F_R_SHIFT |
@@ -228,7 +228,7 @@
#endif
} else {
- gen_helper_tlbie(cpu_env, cpu_gpr[rb]);
+ gen_helper_tlbie(tcg_env, cpu_gpr[rb]);
}
if (local) {
@@ -236,9 +236,9 @@
}
t1 = tcg_temp_new_i32();
- tcg_gen_ld_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
+ tcg_gen_ld_i32(t1, tcg_env, offsetof(CPUPPCState, tlb_need_flush));
tcg_gen_ori_i32(t1, t1, TLB_NEED_GLOBAL_FLUSH);
- tcg_gen_st_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
+ tcg_gen_st_i32(t1, tcg_env, offsetof(CPUPPCState, tlb_need_flush));
return true;
#endif
diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc
index 5cdf53a..4b91c34 100644
--- a/target/ppc/translate/vmx-impl.c.inc
+++ b/target/ppc/translate/vmx-impl.c.inc
@@ -10,7 +10,7 @@
static inline TCGv_ptr gen_avr_ptr(int reg)
{
TCGv_ptr r = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(r, cpu_env, avr_full_offset(reg));
+ tcg_gen_addi_ptr(r, tcg_env, avr_full_offset(reg));
return r;
}
@@ -96,7 +96,7 @@
tcg_gen_andi_tl(EA, EA, ~(size - 1)); \
} \
rs = gen_avr_ptr(rS(ctx->opcode)); \
- gen_helper_lve##name(cpu_env, rs, EA); \
+ gen_helper_lve##name(tcg_env, rs, EA); \
}
#define GEN_VR_STVE(name, opc2, opc3, size) \
@@ -115,7 +115,7 @@
tcg_gen_andi_tl(EA, EA, ~(size - 1)); \
} \
rs = gen_avr_ptr(rS(ctx->opcode)); \
- gen_helper_stve##name(cpu_env, rs, EA); \
+ gen_helper_stve##name(tcg_env, rs, EA); \
}
GEN_VR_LDX(lvx, 0x07, 0x03);
@@ -146,7 +146,7 @@
tcg_gen_movi_i64(avr, 0);
set_avr64(rD(ctx->opcode), avr, true);
t = tcg_temp_new_i32();
- gen_helper_mfvscr(t, cpu_env);
+ gen_helper_mfvscr(t, tcg_env);
tcg_gen_extu_i32_i64(avr, t);
set_avr64(rD(ctx->opcode), avr, false);
}
@@ -167,8 +167,8 @@
bofs += 3 * 4;
#endif
- tcg_gen_ld_i32(val, cpu_env, bofs);
- gen_helper_mtvscr(cpu_env, val);
+ tcg_gen_ld_i32(val, tcg_env, bofs);
+ gen_helper_mtvscr(tcg_env, val);
}
static void gen_vx_vmul10(DisasContext *ctx, bool add_cin, bool ret_carry)
@@ -287,7 +287,7 @@
ra = gen_avr_ptr(rA(ctx->opcode)); \
rb = gen_avr_ptr(rB(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
- gen_helper_##name(cpu_env, rd, ra, rb); \
+ gen_helper_##name(tcg_env, rd, ra, rb); \
}
#define GEN_VXFORM3(name, opc2, opc3) \
@@ -689,10 +689,10 @@
/* Perform count for every word element using tcg_gen_clzi_i32. */
for (i = 0; i < 4; i++) {
- tcg_gen_ld_i32(tmp, cpu_env,
+ tcg_gen_ld_i32(tmp, tcg_env,
offsetof(CPUPPCState, vsr[32 + VB].u64[0]) + i * 4);
tcg_gen_clzi_i32(tmp, tmp, 32);
- tcg_gen_st_i32(tmp, cpu_env,
+ tcg_gen_st_i32(tmp, tcg_env,
offsetof(CPUPPCState, vsr[32 + VT].u64[0]) + i * 4);
}
}
@@ -1174,7 +1174,7 @@
ra = gen_avr_ptr(rA(ctx->opcode)); \
rb = gen_avr_ptr(rB(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
- gen_helper_##opname(cpu_env, rd, ra, rb); \
+ gen_helper_##opname(tcg_env, rd, ra, rb); \
}
#define GEN_VXRFORM(name, opc2, opc3) \
@@ -1478,7 +1478,7 @@
} \
rb = gen_avr_ptr(rB(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
- gen_helper_##name(cpu_env, rd, rb); \
+ gen_helper_##name(tcg_env, rd, rb); \
}
#define GEN_VXFORM_NOA_2(name, opc2, opc3, opc4) \
@@ -1625,7 +1625,7 @@
uimm = tcg_constant_i32(UIMM5(ctx->opcode)); \
rb = gen_avr_ptr(rB(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
- gen_helper_##name(cpu_env, rd, rb, uimm); \
+ gen_helper_##name(tcg_env, rd, rb, uimm); \
}
#define GEN_VXFORM_UIMM_SPLAT(name, opc2, opc3, splat_max) \
@@ -1813,7 +1813,7 @@
if (right) {
tcg_gen_subfi_tl(rc, 32 - size, rc);
}
- gen_helper(cpu_env, vrt, vra, vrb, rc);
+ gen_helper(tcg_env, vrt, vra, vrb, rc);
return true;
}
@@ -1841,7 +1841,7 @@
tcg_gen_subfi_tl(idx, 16 - size, idx);
}
- gen_helper(cpu_env, t, rb, idx);
+ gen_helper(tcg_env, t, rb, idx);
return true;
}
@@ -2349,9 +2349,9 @@
rc = gen_avr_ptr(rC(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
if (Rc(ctx->opcode)) { \
- gen_helper_##name1(cpu_env, rd, ra, rb, rc); \
+ gen_helper_##name1(tcg_env, rd, ra, rb, rc); \
} else { \
- gen_helper_##name0(cpu_env, rd, ra, rb, rc); \
+ gen_helper_##name0(tcg_env, rd, ra, rb, rc); \
} \
}
@@ -2437,7 +2437,7 @@
vra = gen_avr_ptr(a->vra);
vrb = gen_avr_ptr(a->vrb);
vrc = gen_avr_ptr(a->rc);
- gen_helper(cpu_env, vrt, vra, vrb, vrc);
+ gen_helper(tcg_env, vrt, vra, vrb, vrc);
return true;
}
diff --git a/target/ppc/translate/vsx-impl.c.inc b/target/ppc/translate/vsx-impl.c.inc
index 0f5b005..6db87ab 100644
--- a/target/ppc/translate/vsx-impl.c.inc
+++ b/target/ppc/translate/vsx-impl.c.inc
@@ -2,25 +2,25 @@
static inline void get_cpu_vsr(TCGv_i64 dst, int n, bool high)
{
- tcg_gen_ld_i64(dst, cpu_env, vsr64_offset(n, high));
+ tcg_gen_ld_i64(dst, tcg_env, vsr64_offset(n, high));
}
static inline void set_cpu_vsr(int n, TCGv_i64 src, bool high)
{
- tcg_gen_st_i64(src, cpu_env, vsr64_offset(n, high));
+ tcg_gen_st_i64(src, tcg_env, vsr64_offset(n, high));
}
static inline TCGv_ptr gen_vsr_ptr(int reg)
{
TCGv_ptr r = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(r, cpu_env, vsr_full_offset(reg));
+ tcg_gen_addi_ptr(r, tcg_env, vsr_full_offset(reg));
return r;
}
static inline TCGv_ptr gen_acc_ptr(int reg)
{
TCGv_ptr r = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(r, cpu_env, acc_full_offset(reg));
+ tcg_gen_addi_ptr(r, tcg_env, acc_full_offset(reg));
return r;
}
@@ -257,7 +257,7 @@
xt = gen_vsr_ptr(xT(ctx->opcode)); \
gen_set_access_type(ctx, ACCESS_INT); \
gen_addr_register(ctx, EA); \
- gen_helper_##name(cpu_env, EA, xt, cpu_gpr[rB(ctx->opcode)]); \
+ gen_helper_##name(tcg_env, EA, xt, cpu_gpr[rB(ctx->opcode)]); \
}
VSX_VECTOR_LOAD_STORE_LENGTH(lxvl)
@@ -801,10 +801,10 @@
xa = gen_vsr_ptr(xA(ctx->opcode)); \
xb = gen_vsr_ptr(xB(ctx->opcode)); \
if ((ctx->opcode >> (31 - 21)) & 1) { \
- gen_helper_##name(cpu_crf[6], cpu_env, xt, xa, xb); \
+ gen_helper_##name(cpu_crf[6], tcg_env, xt, xa, xb); \
} else { \
ignored = tcg_temp_new_i32(); \
- gen_helper_##name(ignored, cpu_env, xt, xa, xb); \
+ gen_helper_##name(ignored, tcg_env, xt, xa, xb); \
} \
}
@@ -829,7 +829,7 @@
xt = gen_avr_ptr(a->rt);
xb = gen_avr_ptr(a->rb);
- gen_helper_XSCVQPDP(cpu_env, ro, xt, xb);
+ gen_helper_XSCVQPDP(tcg_env, ro, xt, xb);
return true;
}
@@ -843,7 +843,7 @@
xt = gen_avr_ptr(a->rt);
xb = gen_avr_ptr(a->rb);
- gen_helper(cpu_env, xt, xb);
+ gen_helper(tcg_env, xt, xb);
return true;
}
@@ -861,7 +861,7 @@
return; \
} \
opc = tcg_constant_i32(ctx->opcode); \
- gen_helper_##name(cpu_env, opc); \
+ gen_helper_##name(tcg_env, opc); \
}
#define GEN_VSX_HELPER_X3(name, op1, op2, inval, type) \
@@ -875,7 +875,7 @@
xt = gen_vsr_ptr(xT(ctx->opcode)); \
xa = gen_vsr_ptr(xA(ctx->opcode)); \
xb = gen_vsr_ptr(xB(ctx->opcode)); \
- gen_helper_##name(cpu_env, xt, xa, xb); \
+ gen_helper_##name(tcg_env, xt, xa, xb); \
}
#define GEN_VSX_HELPER_X2(name, op1, op2, inval, type) \
@@ -888,7 +888,7 @@
} \
xt = gen_vsr_ptr(xT(ctx->opcode)); \
xb = gen_vsr_ptr(xB(ctx->opcode)); \
- gen_helper_##name(cpu_env, xt, xb); \
+ gen_helper_##name(tcg_env, xt, xb); \
}
#define GEN_VSX_HELPER_X2_AB(name, op1, op2, inval, type) \
@@ -903,7 +903,7 @@
opc = tcg_constant_i32(ctx->opcode); \
xa = gen_vsr_ptr(xA(ctx->opcode)); \
xb = gen_vsr_ptr(xB(ctx->opcode)); \
- gen_helper_##name(cpu_env, opc, xa, xb); \
+ gen_helper_##name(tcg_env, opc, xa, xb); \
}
#define GEN_VSX_HELPER_X1(name, op1, op2, inval, type) \
@@ -917,7 +917,7 @@
} \
opc = tcg_constant_i32(ctx->opcode); \
xb = gen_vsr_ptr(xB(ctx->opcode)); \
- gen_helper_##name(cpu_env, opc, xb); \
+ gen_helper_##name(tcg_env, opc, xb); \
}
#define GEN_VSX_HELPER_R3(name, op1, op2, inval, type) \
@@ -933,7 +933,7 @@
xt = gen_vsr_ptr(rD(ctx->opcode) + 32); \
xa = gen_vsr_ptr(rA(ctx->opcode) + 32); \
xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \
- gen_helper_##name(cpu_env, opc, xt, xa, xb); \
+ gen_helper_##name(tcg_env, opc, xt, xa, xb); \
}
#define GEN_VSX_HELPER_R2(name, op1, op2, inval, type) \
@@ -948,7 +948,7 @@
opc = tcg_constant_i32(ctx->opcode); \
xt = gen_vsr_ptr(rD(ctx->opcode) + 32); \
xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \
- gen_helper_##name(cpu_env, opc, xt, xb); \
+ gen_helper_##name(tcg_env, opc, xt, xb); \
}
#define GEN_VSX_HELPER_R2_AB(name, op1, op2, inval, type) \
@@ -963,7 +963,7 @@
opc = tcg_constant_i32(ctx->opcode); \
xa = gen_vsr_ptr(rA(ctx->opcode) + 32); \
xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \
- gen_helper_##name(cpu_env, opc, xa, xb); \
+ gen_helper_##name(tcg_env, opc, xa, xb); \
}
#define GEN_VSX_HELPER_XT_XB_ENV(name, op1, op2, inval, type) \
@@ -978,7 +978,7 @@
t0 = tcg_temp_new_i64(); \
t1 = tcg_temp_new_i64(); \
get_cpu_vsr(t0, xB(ctx->opcode), true); \
- gen_helper_##name(t1, cpu_env, t0); \
+ gen_helper_##name(t1, tcg_env, t0); \
set_cpu_vsr(xT(ctx->opcode), t1, true); \
set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false); \
}
@@ -1191,7 +1191,7 @@
REQUIRE_VSX(ctx);
xb = vsr ? gen_vsr_ptr(a->xb) : gen_avr_ptr(a->xb);
- gen_helper(cpu_env, tcg_constant_i32(a->bf), tcg_constant_i32(a->uim), xb);
+ gen_helper(tcg_env, tcg_constant_i32(a->bf), tcg_constant_i32(a->uim), xb);
return true;
}
@@ -1420,7 +1420,7 @@
s2 = gen_vsr_ptr(src2);
s3 = gen_vsr_ptr(src3);
- gen_helper(cpu_env, t, s1, s2, s3);
+ gen_helper(tcg_env, t, s1, s2, s3);
return true;
}
@@ -1500,7 +1500,7 @@
s2 = gen_vsr_ptr(xT(ctx->opcode)); \
s3 = gen_vsr_ptr(xB(ctx->opcode)); \
} \
- gen_helper_##name(cpu_env, xt, s1, s2, s3); \
+ gen_helper_##name(tcg_env, xt, s1, s2, s3); \
}
GEN_VSX_HELPER_VSX_MADD(xvmadddp, 0x04, 0x0C, 0x0D, 0, PPC2_VSX)
@@ -1728,9 +1728,9 @@
imm = tcg_constant_i32(a->si);
- tcg_gen_st_i32(imm, cpu_env,
+ tcg_gen_st_i32(imm, tcg_env,
offsetof(CPUPPCState, vsr[a->xt].VsrW(0 + a->ix)));
- tcg_gen_st_i32(imm, cpu_env,
+ tcg_gen_st_i32(imm, tcg_env,
offsetof(CPUPPCState, vsr[a->xt].VsrW(2 + a->ix)));
return true;
@@ -2720,7 +2720,7 @@
xa = gen_vsr_ptr(a->xa);
xb = gen_vsr_ptr(a->xb);
- helper(cpu_env, xt, xa, xb);
+ helper(tcg_env, xt, xa, xb);
return true;
}
@@ -2741,7 +2741,7 @@
ra = gen_avr_ptr(a->ra);
rb = gen_avr_ptr(a->rb);
- helper(cpu_env, rt, ra, rb);
+ helper(tcg_env, rt, ra, rb);
return true;
}
@@ -2770,7 +2770,7 @@
xt = gen_vsr_ptr(a->xt);
xb = gen_vsr_ptr(a->xb);
- gen_helper_XVCVSPBF16(cpu_env, xt, xb);
+ gen_helper_XVCVSPBF16(tcg_env, xt, xb);
return true;
}
@@ -2833,7 +2833,7 @@
xb = gen_vsr_ptr(a->xb);
mask = ger_pack_masks(a->pmsk, a->ymsk, a->xmsk);
- helper(cpu_env, xa, xb, xt, tcg_constant_i32(mask));
+ helper(tcg_env, xa, xb, xt, tcg_constant_i32(mask));
return true;
}
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index f227c76..ac2b94b 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -704,7 +704,7 @@
CSR_MPMMASK,
};
- for (int i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
+ for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
int csrno = dump_csrs[i];
target_ulong val = 0;
RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
@@ -747,7 +747,7 @@
CSR_VTYPE,
CSR_VLENB,
};
- for (int i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) {
+ for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) {
int csrno = dump_rvv_csrs[i];
target_ulong val = 0;
RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
@@ -1649,12 +1649,8 @@
static void riscv_cpu_init(Object *obj)
{
- RISCVCPU *cpu = RISCV_CPU(obj);
-
- cpu_set_cpustate_pointers(cpu);
-
#ifndef CONFIG_USER_ONLY
- qdev_init_gpio_in(DEVICE(cpu), riscv_cpu_set_irq,
+ qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq,
IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
#endif /* CONFIG_USER_ONLY */
}
@@ -2314,7 +2310,7 @@
.name = TYPE_RISCV_CPU,
.parent = TYPE_CPU,
.instance_size = sizeof(RISCVCPU),
- .instance_align = __alignof__(RISCVCPU),
+ .instance_align = __alignof(RISCVCPU),
.instance_init = riscv_cpu_init,
.abstract = true,
.class_size = sizeof(RISCVCPUClass),
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index 6316cbc..ef9cf21 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -388,7 +388,7 @@
/* < private > */
CPUState parent_obj;
/* < public > */
- CPUNegativeOffsetState neg;
+
CPURISCVState env;
char *dyn_csr_xml;
diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc
index dc14d7f..620ab54 100644
--- a/target/riscv/insn_trans/trans_privileged.c.inc
+++ b/target/riscv/insn_trans/trans_privileged.c.inc
@@ -78,7 +78,7 @@
if (has_ext(ctx, RVS)) {
decode_save_opc(ctx);
translator_io_start(&ctx->base);
- gen_helper_sret(cpu_pc, cpu_env);
+ gen_helper_sret(cpu_pc, tcg_env);
exit_tb(ctx); /* no chaining */
ctx->base.is_jmp = DISAS_NORETURN;
} else {
@@ -95,7 +95,7 @@
#ifndef CONFIG_USER_ONLY
decode_save_opc(ctx);
translator_io_start(&ctx->base);
- gen_helper_mret(cpu_pc, cpu_env);
+ gen_helper_mret(cpu_pc, tcg_env);
exit_tb(ctx); /* no chaining */
ctx->base.is_jmp = DISAS_NORETURN;
return true;
@@ -109,7 +109,7 @@
#ifndef CONFIG_USER_ONLY
decode_save_opc(ctx);
gen_update_pc(ctx, ctx->cur_insn_len);
- gen_helper_wfi(cpu_env);
+ gen_helper_wfi(tcg_env);
return true;
#else
return false;
@@ -120,7 +120,7 @@
{
#ifndef CONFIG_USER_ONLY
decode_save_opc(ctx);
- gen_helper_tlb_flush(cpu_env);
+ gen_helper_tlb_flush(tcg_env);
return true;
#endif
return false;
diff --git a/target/riscv/insn_trans/trans_rvbf16.c.inc b/target/riscv/insn_trans/trans_rvbf16.c.inc
index 911bc29..4e39c00 100644
--- a/target/riscv/insn_trans/trans_rvbf16.c.inc
+++ b/target/riscv/insn_trans/trans_rvbf16.c.inc
@@ -43,7 +43,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_bf16_s(dest, cpu_env, src1);
+ gen_helper_fcvt_bf16_s(dest, tcg_env, src1);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -58,7 +58,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_s_bf16(dest, cpu_env, src1);
+ gen_helper_fcvt_s_bf16(dest, tcg_env, src1);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -82,7 +82,7 @@
data = FIELD_DP32(data, VDATA, VTA, ctx->vta);
data = FIELD_DP32(data, VDATA, VMA, ctx->vma);
tcg_gen_gvec_3_ptr(vreg_ofs(ctx, a->rd), vreg_ofs(ctx, 0),
- vreg_ofs(ctx, a->rs2), cpu_env,
+ vreg_ofs(ctx, a->rs2), tcg_env,
ctx->cfg_ptr->vlen / 8,
ctx->cfg_ptr->vlen / 8, data,
gen_helper_vfncvtbf16_f_f_w);
@@ -111,7 +111,7 @@
data = FIELD_DP32(data, VDATA, VTA, ctx->vta);
data = FIELD_DP32(data, VDATA, VMA, ctx->vma);
tcg_gen_gvec_3_ptr(vreg_ofs(ctx, a->rd), vreg_ofs(ctx, 0),
- vreg_ofs(ctx, a->rs2), cpu_env,
+ vreg_ofs(ctx, a->rs2), tcg_env,
ctx->cfg_ptr->vlen / 8,
ctx->cfg_ptr->vlen / 8, data,
gen_helper_vfwcvtbf16_f_f_v);
@@ -142,7 +142,7 @@
data = FIELD_DP32(data, VDATA, VMA, ctx->vma);
tcg_gen_gvec_4_ptr(vreg_ofs(ctx, a->rd), vreg_ofs(ctx, 0),
vreg_ofs(ctx, a->rs1),
- vreg_ofs(ctx, a->rs2), cpu_env,
+ vreg_ofs(ctx, a->rs2), tcg_env,
ctx->cfg_ptr->vlen / 8,
ctx->cfg_ptr->vlen / 8, data,
gen_helper_vfwmaccbf16_vv);
diff --git a/target/riscv/insn_trans/trans_rvd.c.inc b/target/riscv/insn_trans/trans_rvd.c.inc
index 6bdb55e..d9ce9e4 100644
--- a/target/riscv/insn_trans/trans_rvd.c.inc
+++ b/target/riscv/insn_trans/trans_rvd.c.inc
@@ -91,7 +91,7 @@
TCGv_i64 src3 = get_fpr_d(ctx, a->rs3);
gen_set_rm(ctx, a->rm);
- gen_helper_fmadd_d(dest, cpu_env, src1, src2, src3);
+ gen_helper_fmadd_d(dest, tcg_env, src1, src2, src3);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -109,7 +109,7 @@
TCGv_i64 src3 = get_fpr_d(ctx, a->rs3);
gen_set_rm(ctx, a->rm);
- gen_helper_fmsub_d(dest, cpu_env, src1, src2, src3);
+ gen_helper_fmsub_d(dest, tcg_env, src1, src2, src3);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -127,7 +127,7 @@
TCGv_i64 src3 = get_fpr_d(ctx, a->rs3);
gen_set_rm(ctx, a->rm);
- gen_helper_fnmsub_d(dest, cpu_env, src1, src2, src3);
+ gen_helper_fnmsub_d(dest, tcg_env, src1, src2, src3);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -145,7 +145,7 @@
TCGv_i64 src3 = get_fpr_d(ctx, a->rs3);
gen_set_rm(ctx, a->rm);
- gen_helper_fnmadd_d(dest, cpu_env, src1, src2, src3);
+ gen_helper_fnmadd_d(dest, tcg_env, src1, src2, src3);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -162,7 +162,7 @@
TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
gen_set_rm(ctx, a->rm);
- gen_helper_fadd_d(dest, cpu_env, src1, src2);
+ gen_helper_fadd_d(dest, tcg_env, src1, src2);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -179,7 +179,7 @@
TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
gen_set_rm(ctx, a->rm);
- gen_helper_fsub_d(dest, cpu_env, src1, src2);
+ gen_helper_fsub_d(dest, tcg_env, src1, src2);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -196,7 +196,7 @@
TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
gen_set_rm(ctx, a->rm);
- gen_helper_fmul_d(dest, cpu_env, src1, src2);
+ gen_helper_fmul_d(dest, tcg_env, src1, src2);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -213,7 +213,7 @@
TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
gen_set_rm(ctx, a->rm);
- gen_helper_fdiv_d(dest, cpu_env, src1, src2);
+ gen_helper_fdiv_d(dest, tcg_env, src1, src2);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -229,7 +229,7 @@
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
- gen_helper_fsqrt_d(dest, cpu_env, src1);
+ gen_helper_fsqrt_d(dest, tcg_env, src1);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -308,7 +308,7 @@
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
- gen_helper_fmin_d(dest, cpu_env, src1, src2);
+ gen_helper_fmin_d(dest, tcg_env, src1, src2);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -324,7 +324,7 @@
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
- gen_helper_fmax_d(dest, cpu_env, src1, src2);
+ gen_helper_fmax_d(dest, tcg_env, src1, src2);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -340,7 +340,7 @@
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_s_d(dest, cpu_env, src1);
+ gen_helper_fcvt_s_d(dest, tcg_env, src1);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -356,7 +356,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_d_s(dest, cpu_env, src1);
+ gen_helper_fcvt_d_s(dest, tcg_env, src1);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -372,7 +372,7 @@
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
- gen_helper_feq_d(dest, cpu_env, src1, src2);
+ gen_helper_feq_d(dest, tcg_env, src1, src2);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@@ -387,7 +387,7 @@
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
- gen_helper_flt_d(dest, cpu_env, src1, src2);
+ gen_helper_flt_d(dest, tcg_env, src1, src2);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@@ -402,7 +402,7 @@
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
- gen_helper_fle_d(dest, cpu_env, src1, src2);
+ gen_helper_fle_d(dest, tcg_env, src1, src2);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@@ -431,7 +431,7 @@
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_w_d(dest, cpu_env, src1);
+ gen_helper_fcvt_w_d(dest, tcg_env, src1);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@@ -446,7 +446,7 @@
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_wu_d(dest, cpu_env, src1);
+ gen_helper_fcvt_wu_d(dest, tcg_env, src1);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@@ -461,7 +461,7 @@
TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_d_w(dest, cpu_env, src);
+ gen_helper_fcvt_d_w(dest, tcg_env, src);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
@@ -478,7 +478,7 @@
TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_d_wu(dest, cpu_env, src);
+ gen_helper_fcvt_d_wu(dest, tcg_env, src);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
@@ -496,7 +496,7 @@
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_l_d(dest, cpu_env, src1);
+ gen_helper_fcvt_l_d(dest, tcg_env, src1);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@@ -512,7 +512,7 @@
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_lu_d(dest, cpu_env, src1);
+ gen_helper_fcvt_lu_d(dest, tcg_env, src1);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@@ -542,7 +542,7 @@
TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_d_l(dest, cpu_env, src);
+ gen_helper_fcvt_d_l(dest, tcg_env, src);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
@@ -560,7 +560,7 @@
TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_d_lu(dest, cpu_env, src);
+ gen_helper_fcvt_d_lu(dest, tcg_env, src);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
diff --git a/target/riscv/insn_trans/trans_rvf.c.inc b/target/riscv/insn_trans/trans_rvf.c.inc
index e7ab84c..97a3689 100644
--- a/target/riscv/insn_trans/trans_rvf.c.inc
+++ b/target/riscv/insn_trans/trans_rvf.c.inc
@@ -93,7 +93,7 @@
TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
gen_set_rm(ctx, a->rm);
- gen_helper_fmadd_s(dest, cpu_env, src1, src2, src3);
+ gen_helper_fmadd_s(dest, tcg_env, src1, src2, src3);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -110,7 +110,7 @@
TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
gen_set_rm(ctx, a->rm);
- gen_helper_fmsub_s(dest, cpu_env, src1, src2, src3);
+ gen_helper_fmsub_s(dest, tcg_env, src1, src2, src3);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -127,7 +127,7 @@
TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
gen_set_rm(ctx, a->rm);
- gen_helper_fnmsub_s(dest, cpu_env, src1, src2, src3);
+ gen_helper_fnmsub_s(dest, tcg_env, src1, src2, src3);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -144,7 +144,7 @@
TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
gen_set_rm(ctx, a->rm);
- gen_helper_fnmadd_s(dest, cpu_env, src1, src2, src3);
+ gen_helper_fnmadd_s(dest, tcg_env, src1, src2, src3);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -160,7 +160,7 @@
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
gen_set_rm(ctx, a->rm);
- gen_helper_fadd_s(dest, cpu_env, src1, src2);
+ gen_helper_fadd_s(dest, tcg_env, src1, src2);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -176,7 +176,7 @@
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
gen_set_rm(ctx, a->rm);
- gen_helper_fsub_s(dest, cpu_env, src1, src2);
+ gen_helper_fsub_s(dest, tcg_env, src1, src2);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -192,7 +192,7 @@
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
gen_set_rm(ctx, a->rm);
- gen_helper_fmul_s(dest, cpu_env, src1, src2);
+ gen_helper_fmul_s(dest, tcg_env, src1, src2);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -208,7 +208,7 @@
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
gen_set_rm(ctx, a->rm);
- gen_helper_fdiv_s(dest, cpu_env, src1, src2);
+ gen_helper_fdiv_s(dest, tcg_env, src1, src2);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -223,7 +223,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
- gen_helper_fsqrt_s(dest, cpu_env, src1);
+ gen_helper_fsqrt_s(dest, tcg_env, src1);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -363,7 +363,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
- gen_helper_fmin_s(dest, cpu_env, src1, src2);
+ gen_helper_fmin_s(dest, tcg_env, src1, src2);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -378,7 +378,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
- gen_helper_fmax_s(dest, cpu_env, src1, src2);
+ gen_helper_fmax_s(dest, tcg_env, src1, src2);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -393,7 +393,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_w_s(dest, cpu_env, src1);
+ gen_helper_fcvt_w_s(dest, tcg_env, src1);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@@ -407,7 +407,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_wu_s(dest, cpu_env, src1);
+ gen_helper_fcvt_wu_s(dest, tcg_env, src1);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@@ -439,7 +439,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
- gen_helper_feq_s(dest, cpu_env, src1, src2);
+ gen_helper_feq_s(dest, tcg_env, src1, src2);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@@ -453,7 +453,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
- gen_helper_flt_s(dest, cpu_env, src1, src2);
+ gen_helper_flt_s(dest, tcg_env, src1, src2);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@@ -467,7 +467,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
- gen_helper_fle_s(dest, cpu_env, src1, src2);
+ gen_helper_fle_s(dest, tcg_env, src1, src2);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@@ -480,7 +480,7 @@
TCGv dest = dest_gpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
- gen_helper_fclass_s(dest, cpu_env, src1);
+ gen_helper_fclass_s(dest, tcg_env, src1);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@@ -494,7 +494,7 @@
TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_s_w(dest, cpu_env, src);
+ gen_helper_fcvt_s_w(dest, tcg_env, src);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -509,7 +509,7 @@
TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_s_wu(dest, cpu_env, src);
+ gen_helper_fcvt_s_wu(dest, tcg_env, src);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -541,7 +541,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_l_s(dest, cpu_env, src1);
+ gen_helper_fcvt_l_s(dest, tcg_env, src1);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@@ -556,7 +556,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_lu_s(dest, cpu_env, src1);
+ gen_helper_fcvt_lu_s(dest, tcg_env, src1);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@@ -571,7 +571,7 @@
TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_s_l(dest, cpu_env, src);
+ gen_helper_fcvt_s_l(dest, tcg_env, src);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -587,7 +587,7 @@
TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_s_lu(dest, cpu_env, src);
+ gen_helper_fcvt_s_lu(dest, tcg_env, src);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
diff --git a/target/riscv/insn_trans/trans_rvh.c.inc b/target/riscv/insn_trans/trans_rvh.c.inc
index 3e93221..aa9d41c 100644
--- a/target/riscv/insn_trans/trans_rvh.c.inc
+++ b/target/riscv/insn_trans/trans_rvh.c.inc
@@ -45,7 +45,7 @@
TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE);
decode_save_opc(ctx);
- func(dest, cpu_env, addr);
+ func(dest, tcg_env, addr);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@@ -57,7 +57,7 @@
TCGv data = get_gpr(ctx, a->rs2, EXT_NONE);
decode_save_opc(ctx);
- func(cpu_env, addr, data);
+ func(tcg_env, addr, data);
return true;
}
#endif /* CONFIG_USER_ONLY */
@@ -148,7 +148,7 @@
REQUIRE_EXT(ctx, RVH);
#ifndef CONFIG_USER_ONLY
decode_save_opc(ctx);
- gen_helper_hyp_gvma_tlb_flush(cpu_env);
+ gen_helper_hyp_gvma_tlb_flush(tcg_env);
return true;
#endif
return false;
@@ -159,7 +159,7 @@
REQUIRE_EXT(ctx, RVH);
#ifndef CONFIG_USER_ONLY
decode_save_opc(ctx);
- gen_helper_hyp_tlb_flush(cpu_env);
+ gen_helper_hyp_tlb_flush(tcg_env);
return true;
#endif
return false;
diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc
index 2971422..25cb605 100644
--- a/target/riscv/insn_trans/trans_rvi.c.inc
+++ b/target/riscv/insn_trans/trans_rvi.c.inc
@@ -830,7 +830,7 @@
TCGv_i32 csr = tcg_constant_i32(rc);
translator_io_start(&ctx->base);
- gen_helper_csrr(dest, cpu_env, csr);
+ gen_helper_csrr(dest, tcg_env, csr);
gen_set_gpr(ctx, rd, dest);
return do_csr_post(ctx);
}
@@ -840,7 +840,7 @@
TCGv_i32 csr = tcg_constant_i32(rc);
translator_io_start(&ctx->base);
- gen_helper_csrw(cpu_env, csr, src);
+ gen_helper_csrw(tcg_env, csr, src);
return do_csr_post(ctx);
}
@@ -850,7 +850,7 @@
TCGv_i32 csr = tcg_constant_i32(rc);
translator_io_start(&ctx->base);
- gen_helper_csrrw(dest, cpu_env, csr, src, mask);
+ gen_helper_csrrw(dest, tcg_env, csr, src, mask);
gen_set_gpr(ctx, rd, dest);
return do_csr_post(ctx);
}
@@ -862,8 +862,8 @@
TCGv_i32 csr = tcg_constant_i32(rc);
translator_io_start(&ctx->base);
- gen_helper_csrr_i128(destl, cpu_env, csr);
- tcg_gen_ld_tl(desth, cpu_env, offsetof(CPURISCVState, retxh));
+ gen_helper_csrr_i128(destl, tcg_env, csr);
+ tcg_gen_ld_tl(desth, tcg_env, offsetof(CPURISCVState, retxh));
gen_set_gpr128(ctx, rd, destl, desth);
return do_csr_post(ctx);
}
@@ -873,7 +873,7 @@
TCGv_i32 csr = tcg_constant_i32(rc);
translator_io_start(&ctx->base);
- gen_helper_csrw_i128(cpu_env, csr, srcl, srch);
+ gen_helper_csrw_i128(tcg_env, csr, srcl, srch);
return do_csr_post(ctx);
}
@@ -885,8 +885,8 @@
TCGv_i32 csr = tcg_constant_i32(rc);
translator_io_start(&ctx->base);
- gen_helper_csrrw_i128(destl, cpu_env, csr, srcl, srch, maskl, maskh);
- tcg_gen_ld_tl(desth, cpu_env, offsetof(CPURISCVState, retxh));
+ gen_helper_csrrw_i128(destl, tcg_env, csr, srcl, srch, maskl, maskh);
+ tcg_gen_ld_tl(desth, tcg_env, offsetof(CPURISCVState, retxh));
gen_set_gpr128(ctx, rd, destl, desth);
return do_csr_post(ctx);
}
diff --git a/target/riscv/insn_trans/trans_rvm.c.inc b/target/riscv/insn_trans/trans_rvm.c.inc
index 2f0fd1f..795f0cc 100644
--- a/target/riscv/insn_trans/trans_rvm.c.inc
+++ b/target/riscv/insn_trans/trans_rvm.c.inc
@@ -169,8 +169,8 @@
static void gen_div_i128(TCGv rdl, TCGv rdh,
TCGv rs1l, TCGv rs1h, TCGv rs2l, TCGv rs2h)
{
- gen_helper_divs_i128(rdl, cpu_env, rs1l, rs1h, rs2l, rs2h);
- tcg_gen_ld_tl(rdh, cpu_env, offsetof(CPURISCVState, retxh));
+ gen_helper_divs_i128(rdl, tcg_env, rs1l, rs1h, rs2l, rs2h);
+ tcg_gen_ld_tl(rdh, tcg_env, offsetof(CPURISCVState, retxh));
}
static void gen_div(TCGv ret, TCGv source1, TCGv source2)
@@ -212,8 +212,8 @@
static void gen_divu_i128(TCGv rdl, TCGv rdh,
TCGv rs1l, TCGv rs1h, TCGv rs2l, TCGv rs2h)
{
- gen_helper_divu_i128(rdl, cpu_env, rs1l, rs1h, rs2l, rs2h);
- tcg_gen_ld_tl(rdh, cpu_env, offsetof(CPURISCVState, retxh));
+ gen_helper_divu_i128(rdl, tcg_env, rs1l, rs1h, rs2l, rs2h);
+ tcg_gen_ld_tl(rdh, tcg_env, offsetof(CPURISCVState, retxh));
}
static void gen_divu(TCGv ret, TCGv source1, TCGv source2)
@@ -244,8 +244,8 @@
static void gen_rem_i128(TCGv rdl, TCGv rdh,
TCGv rs1l, TCGv rs1h, TCGv rs2l, TCGv rs2h)
{
- gen_helper_rems_i128(rdl, cpu_env, rs1l, rs1h, rs2l, rs2h);
- tcg_gen_ld_tl(rdh, cpu_env, offsetof(CPURISCVState, retxh));
+ gen_helper_rems_i128(rdl, tcg_env, rs1l, rs1h, rs2l, rs2h);
+ tcg_gen_ld_tl(rdh, tcg_env, offsetof(CPURISCVState, retxh));
}
static void gen_rem(TCGv ret, TCGv source1, TCGv source2)
@@ -289,8 +289,8 @@
static void gen_remu_i128(TCGv rdl, TCGv rdh,
TCGv rs1l, TCGv rs1h, TCGv rs2l, TCGv rs2h)
{
- gen_helper_remu_i128(rdl, cpu_env, rs1l, rs1h, rs2l, rs2h);
- tcg_gen_ld_tl(rdh, cpu_env, offsetof(CPURISCVState, retxh));
+ gen_helper_remu_i128(rdl, tcg_env, rs1l, rs1h, rs2l, rs2h);
+ tcg_gen_ld_tl(rdh, tcg_env, offsetof(CPURISCVState, retxh));
}
static void gen_remu(TCGv ret, TCGv source1, TCGv source2)
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
index 63404f6..78bd363 100644
--- a/target/riscv/insn_trans/trans_rvv.c.inc
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
@@ -165,7 +165,7 @@
s1 = get_gpr(s, rs1, EXT_ZERO);
}
- gen_helper_vsetvl(dst, cpu_env, s1, s2);
+ gen_helper_vsetvl(dst, tcg_env, s1, s2);
gen_set_gpr(s, rd, dst);
mark_vs_dirty(s);
@@ -185,7 +185,7 @@
dst = dest_gpr(s, rd);
- gen_helper_vsetvl(dst, cpu_env, s1, s2);
+ gen_helper_vsetvl(dst, tcg_env, s1, s2);
gen_set_gpr(s, rd, dst);
mark_vs_dirty(s);
gen_update_pc(s, s->cur_insn_len);
@@ -633,10 +633,10 @@
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
- tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
- tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
+ tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
+ tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
- fn(dest, mask, base, cpu_env, desc);
+ fn(dest, mask, base, tcg_env, desc);
if (!is_store) {
mark_vs_dirty(s);
@@ -794,10 +794,10 @@
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
- tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
- tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
+ tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
+ tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
- fn(dest, mask, base, stride, cpu_env, desc);
+ fn(dest, mask, base, stride, tcg_env, desc);
if (!is_store) {
mark_vs_dirty(s);
@@ -900,11 +900,11 @@
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
- tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
- tcg_gen_addi_ptr(index, cpu_env, vreg_ofs(s, vs2));
- tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
+ tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
+ tcg_gen_addi_ptr(index, tcg_env, vreg_ofs(s, vs2));
+ tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
- fn(dest, mask, base, index, cpu_env, desc);
+ fn(dest, mask, base, index, tcg_env, desc);
if (!is_store) {
mark_vs_dirty(s);
@@ -1039,10 +1039,10 @@
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
- tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
- tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
+ tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
+ tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
- fn(dest, mask, base, cpu_env, desc);
+ fn(dest, mask, base, tcg_env, desc);
mark_vs_dirty(s);
gen_set_label(over);
@@ -1100,9 +1100,9 @@
s->cfg_ptr->vlen / 8, data));
base = get_gpr(s, rs1, EXT_NONE);
- tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
+ tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
- fn(dest, base, cpu_env, desc);
+ fn(dest, base, tcg_env, desc);
if (!is_store) {
mark_vs_dirty(s);
@@ -1199,7 +1199,7 @@
data = FIELD_DP32(data, VDATA, VMA, s->vma);
tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
- cpu_env, s->cfg_ptr->vlen / 8,
+ tcg_env, s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data, fn);
}
mark_vs_dirty(s);
@@ -1251,11 +1251,11 @@
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
- tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
- tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
- tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
+ tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
+ tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, vs2));
+ tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
- fn(dest, mask, src1, src2, cpu_env, desc);
+ fn(dest, mask, src1, src2, tcg_env, desc);
mark_vs_dirty(s);
gen_set_label(over);
@@ -1413,11 +1413,11 @@
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
- tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
- tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
- tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
+ tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
+ tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, vs2));
+ tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
- fn(dest, mask, src1, src2, cpu_env, desc);
+ fn(dest, mask, src1, src2, tcg_env, desc);
mark_vs_dirty(s);
gen_set_label(over);
@@ -1492,7 +1492,7 @@
tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
vreg_ofs(s, a->rs1),
vreg_ofs(s, a->rs2),
- cpu_env, s->cfg_ptr->vlen / 8,
+ tcg_env, s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8,
data, fn);
mark_vs_dirty(s);
@@ -1568,7 +1568,7 @@
tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
vreg_ofs(s, a->rs1),
vreg_ofs(s, a->rs2),
- cpu_env, s->cfg_ptr->vlen / 8,
+ tcg_env, s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data, fn);
mark_vs_dirty(s);
gen_set_label(over);
@@ -1639,7 +1639,7 @@
data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);
data = FIELD_DP32(data, VDATA, VMA, s->vma);
tcg_gen_gvec_4_ptr(vreg_ofs(s, vd), vreg_ofs(s, 0), vreg_ofs(s, vs1),
- vreg_ofs(s, vs2), cpu_env, s->cfg_ptr->vlen / 8,
+ vreg_ofs(s, vs2), tcg_env, s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data, fn);
mark_vs_dirty(s);
gen_set_label(over);
@@ -1830,7 +1830,7 @@
data = FIELD_DP32(data, VDATA, VMA, s->vma); \
tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
vreg_ofs(s, a->rs1), \
- vreg_ofs(s, a->rs2), cpu_env, \
+ vreg_ofs(s, a->rs2), tcg_env, \
s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlen / 8, data, \
fns[s->sew]); \
@@ -2036,7 +2036,7 @@
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1),
- cpu_env, s->cfg_ptr->vlen / 8,
+ tcg_env, s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data,
fns[s->sew]);
gen_set_label(over);
@@ -2084,8 +2084,8 @@
tcg_gen_ext_tl_i64(s1_i64, s1);
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
- tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
- fns[s->sew](dest, s1_i64, cpu_env, desc);
+ tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, a->rd));
+ fns[s->sew](dest, s1_i64, tcg_env, desc);
}
mark_vs_dirty(s);
@@ -2123,8 +2123,8 @@
dest = tcg_temp_new_ptr();
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
- tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
- fns[s->sew](dest, s1, cpu_env, desc);
+ tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, a->rd));
+ fns[s->sew](dest, s1, tcg_env, desc);
mark_vs_dirty(s);
gen_set_label(over);
@@ -2274,7 +2274,7 @@
data = FIELD_DP32(data, VDATA, VMA, s->vma); \
tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
vreg_ofs(s, a->rs1), \
- vreg_ofs(s, a->rs2), cpu_env, \
+ vreg_ofs(s, a->rs2), tcg_env, \
s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlen / 8, data, \
fns[s->sew - 1]); \
@@ -2306,15 +2306,15 @@
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
- tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
- tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
- tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
+ tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
+ tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, vs2));
+ tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
/* NaN-box f[rs1] */
t1 = tcg_temp_new_i64();
do_nanbox(s, t1, cpu_fpr[rs1]);
- fn(dest, mask, t1, src2, cpu_env, desc);
+ fn(dest, mask, t1, src2, tcg_env, desc);
mark_vs_dirty(s);
gen_set_label(over);
@@ -2390,7 +2390,7 @@
data = FIELD_DP32(data, VDATA, VMA, s->vma); \
tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
vreg_ofs(s, a->rs1), \
- vreg_ofs(s, a->rs2), cpu_env, \
+ vreg_ofs(s, a->rs2), tcg_env, \
s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlen / 8, data, \
fns[s->sew - 1]); \
@@ -2464,7 +2464,7 @@
data = FIELD_DP32(data, VDATA, VMA, s->vma); \
tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
vreg_ofs(s, a->rs1), \
- vreg_ofs(s, a->rs2), cpu_env, \
+ vreg_ofs(s, a->rs2), tcg_env, \
s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlen / 8, data, \
fns[s->sew - 1]); \
@@ -2580,7 +2580,7 @@
data = FIELD_DP32(data, VDATA, VTA, s->vta);
data = FIELD_DP32(data, VDATA, VMA, s->vma);
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
- vreg_ofs(s, a->rs2), cpu_env,
+ vreg_ofs(s, a->rs2), tcg_env,
s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data, fn);
mark_vs_dirty(s);
@@ -2693,9 +2693,9 @@
dest = tcg_temp_new_ptr();
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
- tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
+ tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, a->rd));
- fns[s->sew - 1](dest, t1, cpu_env, desc);
+ fns[s->sew - 1](dest, t1, tcg_env, desc);
mark_vs_dirty(s);
gen_set_label(over);
@@ -2769,7 +2769,7 @@
data = FIELD_DP32(data, VDATA, VTA, s->vta); \
data = FIELD_DP32(data, VDATA, VMA, s->vma); \
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
- vreg_ofs(s, a->rs2), cpu_env, \
+ vreg_ofs(s, a->rs2), tcg_env, \
s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlen / 8, data, \
fns[s->sew - 1]); \
@@ -2820,7 +2820,7 @@
data = FIELD_DP32(data, VDATA, VTA, s->vta); \
data = FIELD_DP32(data, VDATA, VMA, s->vma); \
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
- vreg_ofs(s, a->rs2), cpu_env, \
+ vreg_ofs(s, a->rs2), tcg_env, \
s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlen / 8, data, \
fns[s->sew]); \
@@ -2887,7 +2887,7 @@
data = FIELD_DP32(data, VDATA, VTA, s->vta); \
data = FIELD_DP32(data, VDATA, VMA, s->vma); \
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
- vreg_ofs(s, a->rs2), cpu_env, \
+ vreg_ofs(s, a->rs2), tcg_env, \
s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlen / 8, data, \
fns[s->sew - 1]); \
@@ -2936,7 +2936,7 @@
data = FIELD_DP32(data, VDATA, VTA, s->vta); \
data = FIELD_DP32(data, VDATA, VMA, s->vma); \
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
- vreg_ofs(s, a->rs2), cpu_env, \
+ vreg_ofs(s, a->rs2), tcg_env, \
s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlen / 8, data, \
fns[s->sew]); \
@@ -3026,7 +3026,7 @@
FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);\
tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
vreg_ofs(s, a->rs1), \
- vreg_ofs(s, a->rs2), cpu_env, \
+ vreg_ofs(s, a->rs2), tcg_env, \
s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlen / 8, data, fn); \
mark_vs_dirty(s); \
@@ -3064,10 +3064,10 @@
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
- tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2));
- tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
+ tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, a->rs2));
+ tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
- gen_helper_vcpop_m(dst, mask, src2, cpu_env, desc);
+ gen_helper_vcpop_m(dst, mask, src2, tcg_env, desc);
gen_set_gpr(s, a->rd, dst);
return true;
}
@@ -3093,10 +3093,10 @@
desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data));
- tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2));
- tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
+ tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, a->rs2));
+ tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
- gen_helper_vfirst_m(dst, mask, src2, cpu_env, desc);
+ gen_helper_vfirst_m(dst, mask, src2, tcg_env, desc);
gen_set_gpr(s, a->rd, dst);
return true;
}
@@ -3128,7 +3128,7 @@
data = FIELD_DP32(data, VDATA, VMA, s->vma); \
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), \
vreg_ofs(s, 0), vreg_ofs(s, a->rs2), \
- cpu_env, s->cfg_ptr->vlen / 8, \
+ tcg_env, s->cfg_ptr->vlen / 8, \
s->cfg_ptr->vlen / 8, \
data, fn); \
mark_vs_dirty(s); \
@@ -3170,7 +3170,7 @@
gen_helper_viota_m_w, gen_helper_viota_m_d,
};
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
- vreg_ofs(s, a->rs2), cpu_env,
+ vreg_ofs(s, a->rs2), tcg_env,
s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data, fns[s->sew]);
mark_vs_dirty(s);
@@ -3200,7 +3200,7 @@
gen_helper_vid_v_w, gen_helper_vid_v_d,
};
tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
- cpu_env, s->cfg_ptr->vlen / 8,
+ tcg_env, s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8,
data, fns[s->sew]);
mark_vs_dirty(s);
@@ -3288,7 +3288,7 @@
/* Convert the index to a pointer. */
tcg_gen_ext_i32_ptr(base, ofs);
- tcg_gen_add_ptr(base, base, cpu_env);
+ tcg_gen_add_ptr(base, base, tcg_env);
/* Perform the load. */
load_element(dest, base,
@@ -3306,7 +3306,7 @@
static void vec_element_loadi(DisasContext *s, TCGv_i64 dest,
int vreg, int idx, bool sign)
{
- load_element(dest, cpu_env, endian_ofs(s, vreg, idx), s->sew, sign);
+ load_element(dest, tcg_env, endian_ofs(s, vreg, idx), s->sew, sign);
}
/* Integer Scalar Move Instruction */
@@ -3340,7 +3340,7 @@
static void vec_element_storei(DisasContext *s, int vreg,
int idx, TCGv_i64 val)
{
- store_element(val, cpu_env, endian_ofs(s, vreg, idx), s->sew);
+ store_element(val, tcg_env, endian_ofs(s, vreg, idx), s->sew);
}
/* vmv.x.s rd, vs2 # x[rd] = vs2[0] */
@@ -3620,7 +3620,7 @@
data = FIELD_DP32(data, VDATA, VTA, s->vta);
tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
- cpu_env, s->cfg_ptr->vlen / 8,
+ tcg_env, s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data,
fns[s->sew]);
mark_vs_dirty(s);
@@ -3650,7 +3650,7 @@
TCGLabel *over = gen_new_label(); \
tcg_gen_brcondi_tl(TCG_COND_GEU, cpu_vstart, maxsz, over); \
tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), \
- cpu_env, maxsz, maxsz, 0, gen_helper_vmvr_v); \
+ tcg_env, maxsz, maxsz, 0, gen_helper_vmvr_v); \
mark_vs_dirty(s); \
gen_set_label(over); \
} \
@@ -3722,7 +3722,7 @@
data = FIELD_DP32(data, VDATA, VMA, s->vma);
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
- vreg_ofs(s, a->rs2), cpu_env,
+ vreg_ofs(s, a->rs2), tcg_env,
s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data, fn);
diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc
index c00c70d..e691519 100644
--- a/target/riscv/insn_trans/trans_rvvk.c.inc
+++ b/target/riscv/insn_trans/trans_rvvk.c.inc
@@ -170,7 +170,7 @@
data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s); \
data = FIELD_DP32(data, VDATA, VMA, s->vma); \
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
- vreg_ofs(s, a->rs2), cpu_env, \
+ vreg_ofs(s, a->rs2), tcg_env, \
s->cfg_ptr->vlen / 8, s->cfg_ptr->vlen / 8, \
data, fns[s->sew]); \
mark_vs_dirty(s); \
@@ -244,7 +244,7 @@
/* save opcode for unwinding in case we throw an exception */ \
decode_save_opc(s); \
egs = tcg_constant_i32(EGS); \
- gen_helper_egs_check(egs, cpu_env); \
+ gen_helper_egs_check(egs, tcg_env); \
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
} \
\
@@ -257,9 +257,9 @@
rs2_v = tcg_temp_new_ptr(); \
desc = tcg_constant_i32( \
simd_desc(s->cfg_ptr->vlen / 8, s->cfg_ptr->vlen / 8, data)); \
- tcg_gen_addi_ptr(rd_v, cpu_env, vreg_ofs(s, a->rd)); \
- tcg_gen_addi_ptr(rs2_v, cpu_env, vreg_ofs(s, a->rs2)); \
- gen_helper_##NAME(rd_v, rs2_v, cpu_env, desc); \
+ tcg_gen_addi_ptr(rd_v, tcg_env, vreg_ofs(s, a->rd)); \
+ tcg_gen_addi_ptr(rs2_v, tcg_env, vreg_ofs(s, a->rs2)); \
+ gen_helper_##NAME(rd_v, rs2_v, tcg_env, desc); \
mark_vs_dirty(s); \
gen_set_label(over); \
return true; \
@@ -320,7 +320,7 @@
/* save opcode for unwinding in case we throw an exception */ \
decode_save_opc(s); \
egs = tcg_constant_i32(EGS); \
- gen_helper_egs_check(egs, cpu_env); \
+ gen_helper_egs_check(egs, tcg_env); \
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
} \
\
@@ -335,9 +335,9 @@
uimm_v = tcg_constant_i32(a->rs1); \
desc = tcg_constant_i32( \
simd_desc(s->cfg_ptr->vlen / 8, s->cfg_ptr->vlen / 8, data)); \
- tcg_gen_addi_ptr(rd_v, cpu_env, vreg_ofs(s, a->rd)); \
- tcg_gen_addi_ptr(rs2_v, cpu_env, vreg_ofs(s, a->rs2)); \
- gen_helper_##NAME(rd_v, rs2_v, uimm_v, cpu_env, desc); \
+ tcg_gen_addi_ptr(rd_v, tcg_env, vreg_ofs(s, a->rd)); \
+ tcg_gen_addi_ptr(rs2_v, tcg_env, vreg_ofs(s, a->rs2)); \
+ gen_helper_##NAME(rd_v, rs2_v, uimm_v, tcg_env, desc); \
mark_vs_dirty(s); \
gen_set_label(over); \
return true; \
@@ -390,7 +390,7 @@
/* save opcode for unwinding in case we throw an exception */ \
decode_save_opc(s); \
egs = tcg_constant_i32(EGS); \
- gen_helper_egs_check(egs, cpu_env); \
+ gen_helper_egs_check(egs, tcg_env); \
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
} \
\
@@ -401,7 +401,7 @@
data = FIELD_DP32(data, VDATA, VMA, s->vma); \
\
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1), \
- vreg_ofs(s, a->rs2), cpu_env, \
+ vreg_ofs(s, a->rs2), tcg_env, \
s->cfg_ptr->vlen / 8, s->cfg_ptr->vlen / 8, \
data, gen_helper_##NAME); \
\
@@ -444,7 +444,7 @@
/* save opcode for unwinding in case we throw an exception */
decode_save_opc(s);
egs = tcg_constant_i32(ZVKNH_EGS);
- gen_helper_egs_check(egs, cpu_env);
+ gen_helper_egs_check(egs, tcg_env);
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
}
@@ -455,7 +455,7 @@
data = FIELD_DP32(data, VDATA, VMA, s->vma);
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1),
- vreg_ofs(s, a->rs2), cpu_env, s->cfg_ptr->vlen / 8,
+ vreg_ofs(s, a->rs2), tcg_env, s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data,
s->sew == MO_32 ?
gen_helper_vsha2cl32_vv : gen_helper_vsha2cl64_vv);
@@ -478,7 +478,7 @@
/* save opcode for unwinding in case we throw an exception */
decode_save_opc(s);
egs = tcg_constant_i32(ZVKNH_EGS);
- gen_helper_egs_check(egs, cpu_env);
+ gen_helper_egs_check(egs, tcg_env);
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
}
@@ -489,7 +489,7 @@
data = FIELD_DP32(data, VDATA, VMA, s->vma);
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1),
- vreg_ofs(s, a->rs2), cpu_env, s->cfg_ptr->vlen / 8,
+ vreg_ofs(s, a->rs2), tcg_env, s->cfg_ptr->vlen / 8,
s->cfg_ptr->vlen / 8, data,
s->sew == MO_32 ?
gen_helper_vsha2ch32_vv : gen_helper_vsha2ch64_vv);
diff --git a/target/riscv/insn_trans/trans_rvzce.c.inc b/target/riscv/insn_trans/trans_rvzce.c.inc
index 8d8a64f..2d992e1 100644
--- a/target/riscv/insn_trans/trans_rvzce.c.inc
+++ b/target/riscv/insn_trans/trans_rvzce.c.inc
@@ -298,7 +298,7 @@
* that might come from cpu_ld*_code() in the helper.
*/
gen_update_pc(ctx, 0);
- gen_helper_cm_jalt(cpu_pc, cpu_env, tcg_constant_i32(a->index));
+ gen_helper_cm_jalt(cpu_pc, tcg_env, tcg_constant_i32(a->index));
/* c.jt vs c.jalt depends on the index. */
if (a->index >= 32) {
diff --git a/target/riscv/insn_trans/trans_rvzfa.c.inc b/target/riscv/insn_trans/trans_rvzfa.c.inc
index 0fdd269..fd7e2da 100644
--- a/target/riscv/insn_trans/trans_rvzfa.c.inc
+++ b/target/riscv/insn_trans/trans_rvzfa.c.inc
@@ -187,7 +187,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
- gen_helper_fminm_s(dest, cpu_env, src1, src2);
+ gen_helper_fminm_s(dest, tcg_env, src1, src2);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
@@ -204,7 +204,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
- gen_helper_fmaxm_s(dest, cpu_env, src1, src2);
+ gen_helper_fmaxm_s(dest, tcg_env, src1, src2);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
@@ -221,7 +221,7 @@
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
- gen_helper_fminm_d(dest, cpu_env, src1, src2);
+ gen_helper_fminm_d(dest, tcg_env, src1, src2);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
@@ -238,7 +238,7 @@
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
- gen_helper_fmaxm_d(dest, cpu_env, src1, src2);
+ gen_helper_fmaxm_d(dest, tcg_env, src1, src2);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
@@ -255,7 +255,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
- gen_helper_fminm_h(dest, cpu_env, src1, src2);
+ gen_helper_fminm_h(dest, tcg_env, src1, src2);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
@@ -272,7 +272,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
- gen_helper_fmaxm_h(dest, cpu_env, src1, src2);
+ gen_helper_fmaxm_h(dest, tcg_env, src1, src2);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
@@ -289,7 +289,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
- gen_helper_fround_s(dest, cpu_env, src1);
+ gen_helper_fround_s(dest, tcg_env, src1);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
@@ -306,7 +306,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
- gen_helper_froundnx_s(dest, cpu_env, src1);
+ gen_helper_froundnx_s(dest, tcg_env, src1);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
@@ -323,7 +323,7 @@
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
- gen_helper_fround_d(dest, cpu_env, src1);
+ gen_helper_fround_d(dest, tcg_env, src1);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
@@ -340,7 +340,7 @@
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
- gen_helper_froundnx_d(dest, cpu_env, src1);
+ gen_helper_froundnx_d(dest, tcg_env, src1);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
@@ -357,7 +357,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
- gen_helper_fround_h(dest, cpu_env, src1);
+ gen_helper_fround_h(dest, tcg_env, src1);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
@@ -374,7 +374,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
- gen_helper_froundnx_h(dest, cpu_env, src1);
+ gen_helper_froundnx_h(dest, tcg_env, src1);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
@@ -393,7 +393,7 @@
/* Rounding mode is RTZ. */
gen_set_rm(ctx, RISCV_FRM_RTZ);
- gen_helper_fcvtmod_w_d(t1, cpu_env, src1);
+ gen_helper_fcvtmod_w_d(t1, tcg_env, src1);
tcg_gen_trunc_i64_tl(dst, t1);
gen_set_gpr(ctx, a->rd, dst);
@@ -440,7 +440,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
- gen_helper_fleq_s(dest, cpu_env, src1, src2);
+ gen_helper_fleq_s(dest, tcg_env, src1, src2);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@@ -455,7 +455,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
- gen_helper_fltq_s(dest, cpu_env, src1, src2);
+ gen_helper_fltq_s(dest, tcg_env, src1, src2);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@@ -470,7 +470,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
- gen_helper_fleq_d(dest, cpu_env, src1, src2);
+ gen_helper_fleq_d(dest, tcg_env, src1, src2);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@@ -485,7 +485,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
- gen_helper_fltq_d(dest, cpu_env, src1, src2);
+ gen_helper_fltq_d(dest, tcg_env, src1, src2);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@@ -500,7 +500,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
- gen_helper_fleq_h(dest, cpu_env, src1, src2);
+ gen_helper_fleq_h(dest, tcg_env, src1, src2);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@@ -515,7 +515,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
- gen_helper_fltq_h(dest, cpu_env, src1, src2);
+ gen_helper_fltq_h(dest, tcg_env, src1, src2);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
diff --git a/target/riscv/insn_trans/trans_rvzfh.c.inc b/target/riscv/insn_trans/trans_rvzfh.c.inc
index 4b01812..1eb458b 100644
--- a/target/riscv/insn_trans/trans_rvzfh.c.inc
+++ b/target/riscv/insn_trans/trans_rvzfh.c.inc
@@ -95,7 +95,7 @@
TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
gen_set_rm(ctx, a->rm);
- gen_helper_fmadd_h(dest, cpu_env, src1, src2, src3);
+ gen_helper_fmadd_h(dest, tcg_env, src1, src2, src3);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -112,7 +112,7 @@
TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
gen_set_rm(ctx, a->rm);
- gen_helper_fmsub_h(dest, cpu_env, src1, src2, src3);
+ gen_helper_fmsub_h(dest, tcg_env, src1, src2, src3);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -129,7 +129,7 @@
TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
gen_set_rm(ctx, a->rm);
- gen_helper_fnmsub_h(dest, cpu_env, src1, src2, src3);
+ gen_helper_fnmsub_h(dest, tcg_env, src1, src2, src3);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -146,7 +146,7 @@
TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
gen_set_rm(ctx, a->rm);
- gen_helper_fnmadd_h(dest, cpu_env, src1, src2, src3);
+ gen_helper_fnmadd_h(dest, tcg_env, src1, src2, src3);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -162,7 +162,7 @@
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
gen_set_rm(ctx, a->rm);
- gen_helper_fadd_h(dest, cpu_env, src1, src2);
+ gen_helper_fadd_h(dest, tcg_env, src1, src2);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -178,7 +178,7 @@
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
gen_set_rm(ctx, a->rm);
- gen_helper_fsub_h(dest, cpu_env, src1, src2);
+ gen_helper_fsub_h(dest, tcg_env, src1, src2);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -194,7 +194,7 @@
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
gen_set_rm(ctx, a->rm);
- gen_helper_fmul_h(dest, cpu_env, src1, src2);
+ gen_helper_fmul_h(dest, tcg_env, src1, src2);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -210,7 +210,7 @@
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
gen_set_rm(ctx, a->rm);
- gen_helper_fdiv_h(dest, cpu_env, src1, src2);
+ gen_helper_fdiv_h(dest, tcg_env, src1, src2);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -225,7 +225,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
- gen_helper_fsqrt_h(dest, cpu_env, src1);
+ gen_helper_fsqrt_h(dest, tcg_env, src1);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -366,7 +366,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
- gen_helper_fmin_h(dest, cpu_env, src1, src2);
+ gen_helper_fmin_h(dest, tcg_env, src1, src2);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -381,7 +381,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
- gen_helper_fmax_h(dest, cpu_env, src1, src2);
+ gen_helper_fmax_h(dest, tcg_env, src1, src2);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
return true;
@@ -396,7 +396,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_s_h(dest, cpu_env, src1);
+ gen_helper_fcvt_s_h(dest, tcg_env, src1);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
@@ -414,7 +414,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_d_h(dest, cpu_env, src1);
+ gen_helper_fcvt_d_h(dest, tcg_env, src1);
gen_set_fpr_d(ctx, a->rd, dest);
mark_fs_dirty(ctx);
@@ -431,7 +431,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_h_s(dest, cpu_env, src1);
+ gen_helper_fcvt_h_s(dest, tcg_env, src1);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
@@ -448,7 +448,7 @@
TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_h_d(dest, cpu_env, src1);
+ gen_helper_fcvt_h_d(dest, tcg_env, src1);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
@@ -464,7 +464,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
- gen_helper_feq_h(dest, cpu_env, src1, src2);
+ gen_helper_feq_h(dest, tcg_env, src1, src2);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@@ -478,7 +478,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
- gen_helper_flt_h(dest, cpu_env, src1, src2);
+ gen_helper_flt_h(dest, tcg_env, src1, src2);
gen_set_gpr(ctx, a->rd, dest);
return true;
@@ -493,7 +493,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
- gen_helper_fle_h(dest, cpu_env, src1, src2);
+ gen_helper_fle_h(dest, tcg_env, src1, src2);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@@ -506,7 +506,7 @@
TCGv dest = dest_gpr(ctx, a->rd);
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
- gen_helper_fclass_h(dest, cpu_env, src1);
+ gen_helper_fclass_h(dest, tcg_env, src1);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@@ -520,7 +520,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_w_h(dest, cpu_env, src1);
+ gen_helper_fcvt_w_h(dest, tcg_env, src1);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@@ -534,7 +534,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_wu_h(dest, cpu_env, src1);
+ gen_helper_fcvt_wu_h(dest, tcg_env, src1);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@@ -548,7 +548,7 @@
TCGv t0 = get_gpr(ctx, a->rs1, EXT_SIGN);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_h_w(dest, cpu_env, t0);
+ gen_helper_fcvt_h_w(dest, tcg_env, t0);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
@@ -564,7 +564,7 @@
TCGv t0 = get_gpr(ctx, a->rs1, EXT_SIGN);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_h_wu(dest, cpu_env, t0);
+ gen_helper_fcvt_h_wu(dest, tcg_env, t0);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
@@ -615,7 +615,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_l_h(dest, cpu_env, src1);
+ gen_helper_fcvt_l_h(dest, tcg_env, src1);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@@ -630,7 +630,7 @@
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_lu_h(dest, cpu_env, src1);
+ gen_helper_fcvt_lu_h(dest, tcg_env, src1);
gen_set_gpr(ctx, a->rd, dest);
return true;
}
@@ -645,7 +645,7 @@
TCGv t0 = get_gpr(ctx, a->rs1, EXT_SIGN);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_h_l(dest, cpu_env, t0);
+ gen_helper_fcvt_h_l(dest, tcg_env, t0);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
@@ -662,7 +662,7 @@
TCGv t0 = get_gpr(ctx, a->rs1, EXT_SIGN);
gen_set_rm(ctx, a->rm);
- gen_helper_fcvt_h_lu(dest, cpu_env, t0);
+ gen_helper_fcvt_h_lu(dest, tcg_env, t0);
gen_set_fpr_hs(ctx, a->rd, dest);
mark_fs_dirty(ctx);
diff --git a/target/riscv/insn_trans/trans_rvzicbo.c.inc b/target/riscv/insn_trans/trans_rvzicbo.c.inc
index 7df9c30..e5a7704 100644
--- a/target/riscv/insn_trans/trans_rvzicbo.c.inc
+++ b/target/riscv/insn_trans/trans_rvzicbo.c.inc
@@ -31,27 +31,27 @@
static bool trans_cbo_clean(DisasContext *ctx, arg_cbo_clean *a)
{
REQUIRE_ZICBOM(ctx);
- gen_helper_cbo_clean_flush(cpu_env, cpu_gpr[a->rs1]);
+ gen_helper_cbo_clean_flush(tcg_env, cpu_gpr[a->rs1]);
return true;
}
static bool trans_cbo_flush(DisasContext *ctx, arg_cbo_flush *a)
{
REQUIRE_ZICBOM(ctx);
- gen_helper_cbo_clean_flush(cpu_env, cpu_gpr[a->rs1]);
+ gen_helper_cbo_clean_flush(tcg_env, cpu_gpr[a->rs1]);
return true;
}
static bool trans_cbo_inval(DisasContext *ctx, arg_cbo_inval *a)
{
REQUIRE_ZICBOM(ctx);
- gen_helper_cbo_inval(cpu_env, cpu_gpr[a->rs1]);
+ gen_helper_cbo_inval(tcg_env, cpu_gpr[a->rs1]);
return true;
}
static bool trans_cbo_zero(DisasContext *ctx, arg_cbo_zero *a)
{
REQUIRE_ZICBOZ(ctx);
- gen_helper_cbo_zero(cpu_env, cpu_gpr[a->rs1]);
+ gen_helper_cbo_zero(tcg_env, cpu_gpr[a->rs1]);
return true;
}
diff --git a/target/riscv/insn_trans/trans_svinval.c.inc b/target/riscv/insn_trans/trans_svinval.c.inc
index f3cd7d5..0f692a1 100644
--- a/target/riscv/insn_trans/trans_svinval.c.inc
+++ b/target/riscv/insn_trans/trans_svinval.c.inc
@@ -29,7 +29,7 @@
REQUIRE_EXT(ctx, RVS);
#ifndef CONFIG_USER_ONLY
decode_save_opc(ctx);
- gen_helper_tlb_flush(cpu_env);
+ gen_helper_tlb_flush(tcg_env);
return true;
#endif
return false;
@@ -58,7 +58,7 @@
REQUIRE_EXT(ctx, RVH);
#ifndef CONFIG_USER_ONLY
decode_save_opc(ctx);
- gen_helper_hyp_tlb_flush(cpu_env);
+ gen_helper_hyp_tlb_flush(tcg_env);
return true;
#endif
return false;
@@ -71,7 +71,7 @@
REQUIRE_EXT(ctx, RVH);
#ifndef CONFIG_USER_ONLY
decode_save_opc(ctx);
- gen_helper_hyp_gvma_tlb_flush(cpu_env);
+ gen_helper_hyp_gvma_tlb_flush(tcg_env);
return true;
#endif
return false;
diff --git a/target/riscv/insn_trans/trans_xthead.c.inc b/target/riscv/insn_trans/trans_xthead.c.inc
index da093a4..810d766 100644
--- a/target/riscv/insn_trans/trans_xthead.c.inc
+++ b/target/riscv/insn_trans/trans_xthead.c.inc
@@ -985,7 +985,7 @@
#ifndef CONFIG_USER_ONLY
REQUIRE_PRIV_MS(ctx);
- gen_helper_tlb_flush_all(cpu_env);
+ gen_helper_tlb_flush_all(tcg_env);
return true;
#else
return false;
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
index 7dbf173..f0be79b 100644
--- a/target/riscv/translate.c
+++ b/target/riscv/translate.c
@@ -238,13 +238,13 @@
static void generate_exception(DisasContext *ctx, int excp)
{
gen_update_pc(ctx, 0);
- gen_helper_raise_exception(cpu_env, tcg_constant_i32(excp));
+ gen_helper_raise_exception(tcg_env, tcg_constant_i32(excp));
ctx->base.is_jmp = DISAS_NORETURN;
}
static void gen_exception_illegal(DisasContext *ctx)
{
- tcg_gen_st_i32(tcg_constant_i32(ctx->opcode), cpu_env,
+ tcg_gen_st_i32(tcg_constant_i32(ctx->opcode), tcg_env,
offsetof(CPURISCVState, bins));
if (ctx->virt_inst_excp) {
generate_exception(ctx, RISCV_EXCP_VIRT_INSTRUCTION_FAULT);
@@ -255,7 +255,7 @@
static void gen_exception_inst_addr_mis(DisasContext *ctx, TCGv target)
{
- tcg_gen_st_tl(target, cpu_env, offsetof(CPURISCVState, badaddr));
+ tcg_gen_st_tl(target, tcg_env, offsetof(CPURISCVState, badaddr));
generate_exception(ctx, RISCV_EXCP_INST_ADDR_MIS);
}
@@ -263,7 +263,7 @@
{
#ifndef CONFIG_USER_ONLY
if (ctx->itrigger) {
- gen_helper_itrigger_match(cpu_env);
+ gen_helper_itrigger_match(tcg_env);
}
#endif
tcg_gen_lookup_and_goto_ptr();
@@ -273,7 +273,7 @@
{
#ifndef CONFIG_USER_ONLY
if (ctx->itrigger) {
- gen_helper_itrigger_match(cpu_env);
+ gen_helper_itrigger_match(tcg_env);
}
#endif
tcg_gen_exit_tb(NULL, 0);
@@ -630,14 +630,14 @@
ctx->mstatus_fs = EXT_STATUS_DIRTY;
tmp = tcg_temp_new();
- tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
+ tcg_gen_ld_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus));
tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS);
- tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
+ tcg_gen_st_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus));
if (ctx->virt_enabled) {
- tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs));
+ tcg_gen_ld_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus_hs));
tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS);
- tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs));
+ tcg_gen_st_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus_hs));
}
}
}
@@ -659,14 +659,14 @@
ctx->mstatus_vs = EXT_STATUS_DIRTY;
tmp = tcg_temp_new();
- tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
+ tcg_gen_ld_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus));
tcg_gen_ori_tl(tmp, tmp, MSTATUS_VS);
- tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
+ tcg_gen_st_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus));
if (ctx->virt_enabled) {
- tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs));
+ tcg_gen_ld_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus_hs));
tcg_gen_ori_tl(tmp, tmp, MSTATUS_VS);
- tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs));
+ tcg_gen_st_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus_hs));
}
}
}
@@ -688,7 +688,7 @@
/* The helper may raise ILLEGAL_INSN -- record binv for unwind. */
decode_save_opc(ctx);
- gen_helper_set_rounding_mode(cpu_env, tcg_constant_i32(rm));
+ gen_helper_set_rounding_mode(tcg_env, tcg_constant_i32(rm));
}
static void gen_set_rm_chkfrm(DisasContext *ctx, int rm)
@@ -701,7 +701,7 @@
/* The helper may raise ILLEGAL_INSN -- record binv for unwind. */
decode_save_opc(ctx);
- gen_helper_set_rounding_mode_chkfrm(cpu_env, tcg_constant_i32(rm));
+ gen_helper_set_rounding_mode_chkfrm(tcg_env, tcg_constant_i32(rm));
}
static int ex_plus_1(DisasContext *ctx, int nf)
@@ -1074,7 +1074,7 @@
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
CPUState *cpu = ctx->cs;
- CPURISCVState *env = cpu->env_ptr;
+ CPURISCVState *env = cpu_env(cpu);
return cpu_ldl_code(env, pc);
}
@@ -1166,7 +1166,7 @@
static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
- CPURISCVState *env = cs->env_ptr;
+ CPURISCVState *env = cpu_env(cs);
RISCVCPU *cpu = RISCV_CPU(cs);
uint32_t tb_flags = ctx->base.tb->flags;
@@ -1219,7 +1219,7 @@
static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
- CPURISCVState *env = cpu->env_ptr;
+ CPURISCVState *env = cpu_env(cpu);
uint16_t opcode16 = translator_lduw(env, &ctx->base, ctx->base.pc_next);
ctx->ol = ctx->xl;
@@ -1306,28 +1306,28 @@
cpu_gprh[0] = NULL;
for (i = 1; i < 32; i++) {
- cpu_gpr[i] = tcg_global_mem_new(cpu_env,
+ cpu_gpr[i] = tcg_global_mem_new(tcg_env,
offsetof(CPURISCVState, gpr[i]), riscv_int_regnames[i]);
- cpu_gprh[i] = tcg_global_mem_new(cpu_env,
+ cpu_gprh[i] = tcg_global_mem_new(tcg_env,
offsetof(CPURISCVState, gprh[i]), riscv_int_regnamesh[i]);
}
for (i = 0; i < 32; i++) {
- cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
+ cpu_fpr[i] = tcg_global_mem_new_i64(tcg_env,
offsetof(CPURISCVState, fpr[i]), riscv_fpr_regnames[i]);
}
- cpu_pc = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, pc), "pc");
- cpu_vl = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, vl), "vl");
- cpu_vstart = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, vstart),
+ cpu_pc = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, pc), "pc");
+ cpu_vl = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, vl), "vl");
+ cpu_vstart = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, vstart),
"vstart");
- load_res = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_res),
+ load_res = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, load_res),
"load_res");
- load_val = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_val),
+ load_val = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, load_val),
"load_val");
/* Assign PM CSRs to tcg globals */
- pm_mask = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, cur_pmmask),
+ pm_mask = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, cur_pmmask),
"pmmask");
- pm_base = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, cur_pmbase),
+ pm_base = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, cur_pmbase),
"pmbase");
}
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
index 3fb05cc..cba02c1 100644
--- a/target/riscv/vector_helper.c
+++ b/target/riscv/vector_helper.c
@@ -516,7 +516,7 @@
k++;
continue;
}
- target_ulong addr = base + ((i * nf + k) << log2_esz);
+ addr = base + ((i * nf + k) << log2_esz);
ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
k++;
}
@@ -4791,9 +4791,10 @@
uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
uint32_t vta = vext_vta(desc); \
uint32_t vma = vext_vma(desc); \
- target_ulong i_max, i; \
+ target_ulong i_max, i_min, i; \
\
- i_max = MAX(MIN(s1 < vlmax ? vlmax - s1 : 0, vl), env->vstart); \
+ i_min = MIN(s1 < vlmax ? vlmax - s1 : 0, vl); \
+ i_max = MAX(i_min, env->vstart); \
for (i = env->vstart; i < i_max; ++i) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
index 157e57d..4d0d3a0 100644
--- a/target/rx/cpu.c
+++ b/target/rx/cpu.c
@@ -183,12 +183,8 @@
static void rx_cpu_init(Object *obj)
{
- CPUState *cs = CPU(obj);
RXCPU *cpu = RX_CPU(obj);
- CPURXState *env = &cpu->env;
- cpu_set_cpustate_pointers(cpu);
- cs->env_ptr = env;
qdev_init_gpio_in(DEVICE(cpu), rx_cpu_set_irq, 2);
}
@@ -248,6 +244,7 @@
.name = TYPE_RX_CPU,
.parent = TYPE_CPU,
.instance_size = sizeof(RXCPU),
+ .instance_align = __alignof(RXCPU),
.instance_init = rx_cpu_init,
.abstract = true,
.class_size = sizeof(RXCPUClass),
diff --git a/target/rx/cpu.h b/target/rx/cpu.h
index 7f03ffc..f66754e 100644
--- a/target/rx/cpu.h
+++ b/target/rx/cpu.h
@@ -111,7 +111,6 @@
CPUState parent_obj;
/*< public >*/
- CPUNegativeOffsetState neg;
CPURXState env;
};
diff --git a/target/rx/translate.c b/target/rx/translate.c
index f552a03..f886083 100644
--- a/target/rx/translate.c
+++ b/target/rx/translate.c
@@ -237,7 +237,7 @@
{
if (FIELD_EX32(ctx->tb_flags, PSW, PM)) {
if (is_exception) {
- gen_helper_raise_privilege_violation(cpu_env);
+ gen_helper_raise_privilege_violation(tcg_env);
}
return 0;
} else {
@@ -318,7 +318,7 @@
{
switch (cr) {
case 0: /* PSW */
- gen_helper_pack_psw(ret, cpu_env);
+ gen_helper_pack_psw(ret, tcg_env);
break;
case 1: /* PC */
tcg_gen_movi_i32(ret, pc);
@@ -370,7 +370,7 @@
}
switch (cr) {
case 0: /* PSW */
- gen_helper_set_psw(cpu_env, val);
+ gen_helper_set_psw(tcg_env, val);
if (is_privileged(ctx, 0)) {
/* PSW.{I,U} may be updated here. exit TB. */
ctx->base.is_jmp = DISAS_UPDATE;
@@ -385,7 +385,7 @@
}
break;
case 3: /* FPSW */
- gen_helper_set_fpsw(cpu_env, val);
+ gen_helper_set_fpsw(tcg_env, val);
break;
case 8: /* BPSW */
tcg_gen_mov_i32(cpu_bpsw, val);
@@ -1244,12 +1244,12 @@
static void rx_div(TCGv ret, TCGv arg1, TCGv arg2)
{
- gen_helper_div(ret, cpu_env, arg1, arg2);
+ gen_helper_div(ret, tcg_env, arg1, arg2);
}
static void rx_divu(TCGv ret, TCGv arg1, TCGv arg2)
{
- gen_helper_divu(ret, cpu_env, arg1, arg2);
+ gen_helper_divu(ret, tcg_env, arg1, arg2);
}
/* div #imm, rd */
@@ -1644,35 +1644,35 @@
/* scmpu */
static bool trans_SCMPU(DisasContext *ctx, arg_SCMPU *a)
{
- gen_helper_scmpu(cpu_env);
+ gen_helper_scmpu(tcg_env);
return true;
}
/* smovu */
static bool trans_SMOVU(DisasContext *ctx, arg_SMOVU *a)
{
- gen_helper_smovu(cpu_env);
+ gen_helper_smovu(tcg_env);
return true;
}
/* smovf */
static bool trans_SMOVF(DisasContext *ctx, arg_SMOVF *a)
{
- gen_helper_smovf(cpu_env);
+ gen_helper_smovf(tcg_env);
return true;
}
/* smovb */
static bool trans_SMOVB(DisasContext *ctx, arg_SMOVB *a)
{
- gen_helper_smovb(cpu_env);
+ gen_helper_smovb(tcg_env);
return true;
}
#define STRING(op) \
do { \
TCGv size = tcg_constant_i32(a->sz); \
- gen_helper_##op(cpu_env, size); \
+ gen_helper_##op(tcg_env, size); \
} while (0)
/* suntile.<bwl> */
@@ -1803,7 +1803,7 @@
static bool trans_RACW(DisasContext *ctx, arg_RACW *a)
{
TCGv imm = tcg_constant_i32(a->imm + 1);
- gen_helper_racw(cpu_env, imm);
+ gen_helper_racw(tcg_env, imm);
return true;
}
@@ -1825,7 +1825,7 @@
/* satr */
static bool trans_SATR(DisasContext *ctx, arg_SATR *a)
{
- gen_helper_satr(cpu_env);
+ gen_helper_satr(tcg_env);
return true;
}
@@ -1835,7 +1835,7 @@
cat3(arg_, name, _ir) * a) \
{ \
TCGv imm = tcg_constant_i32(li(ctx, 0)); \
- gen_helper_##op(cpu_regs[a->rd], cpu_env, \
+ gen_helper_##op(cpu_regs[a->rd], tcg_env, \
cpu_regs[a->rd], imm); \
return true; \
} \
@@ -1845,7 +1845,7 @@
TCGv val, mem; \
mem = tcg_temp_new(); \
val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \
- gen_helper_##op(cpu_regs[a->rd], cpu_env, \
+ gen_helper_##op(cpu_regs[a->rd], tcg_env, \
cpu_regs[a->rd], val); \
return true; \
}
@@ -1856,7 +1856,7 @@
TCGv val, mem; \
mem = tcg_temp_new(); \
val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \
- gen_helper_##op(cpu_regs[a->rd], cpu_env, val); \
+ gen_helper_##op(cpu_regs[a->rd], tcg_env, val); \
return true; \
}
@@ -1869,7 +1869,7 @@
static bool trans_FCMP_ir(DisasContext *ctx, arg_FCMP_ir * a)
{
TCGv imm = tcg_constant_i32(li(ctx, 0));
- gen_helper_fcmp(cpu_env, cpu_regs[a->rd], imm);
+ gen_helper_fcmp(tcg_env, cpu_regs[a->rd], imm);
return true;
}
@@ -1880,7 +1880,7 @@
TCGv val, mem;
mem = tcg_temp_new();
val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs);
- gen_helper_fcmp(cpu_env, cpu_regs[a->rd], val);
+ gen_helper_fcmp(tcg_env, cpu_regs[a->rd], val);
return true;
}
@@ -1894,7 +1894,7 @@
TCGv val, mem;
mem = tcg_temp_new();
val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
- gen_helper_itof(cpu_regs[a->rd], cpu_env, val);
+ gen_helper_itof(cpu_regs[a->rd], tcg_env, val);
return true;
}
@@ -2146,7 +2146,7 @@
psw = tcg_temp_new();
tcg_gen_mov_i32(cpu_pc, cpu_bpc);
tcg_gen_mov_i32(psw, cpu_bpsw);
- gen_helper_set_psw_rte(cpu_env, psw);
+ gen_helper_set_psw_rte(tcg_env, psw);
ctx->base.is_jmp = DISAS_EXIT;
}
return true;
@@ -2160,7 +2160,7 @@
psw = tcg_temp_new();
pop(cpu_pc);
pop(psw);
- gen_helper_set_psw_rte(cpu_env, psw);
+ gen_helper_set_psw_rte(tcg_env, psw);
ctx->base.is_jmp = DISAS_EXIT;
}
return true;
@@ -2170,7 +2170,7 @@
static bool trans_BRK(DisasContext *ctx, arg_BRK *a)
{
tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
- gen_helper_rxbrk(cpu_env);
+ gen_helper_rxbrk(tcg_env);
ctx->base.is_jmp = DISAS_NORETURN;
return true;
}
@@ -2183,7 +2183,7 @@
tcg_debug_assert(a->imm < 0x100);
vec = tcg_constant_i32(a->imm);
tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
- gen_helper_rxint(cpu_env, vec);
+ gen_helper_rxint(tcg_env, vec);
ctx->base.is_jmp = DISAS_NORETURN;
return true;
}
@@ -2193,14 +2193,14 @@
{
if (is_privileged(ctx, 1)) {
tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
- gen_helper_wait(cpu_env);
+ gen_helper_wait(tcg_env);
}
return true;
}
static void rx_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
{
- CPURXState *env = cs->env_ptr;
+ CPURXState *env = cpu_env(cs);
DisasContext *ctx = container_of(dcbase, DisasContext, base);
ctx->env = env;
ctx->tb_flags = ctx->base.tb->flags;
@@ -2225,7 +2225,7 @@
ctx->pc = ctx->base.pc_next;
insn = decode_load(ctx);
if (!decode(ctx, insn)) {
- gen_helper_raise_illegal_instruction(cpu_env);
+ gen_helper_raise_illegal_instruction(tcg_env);
}
}
@@ -2279,7 +2279,7 @@
}
#define ALLOC_REGISTER(sym, name) \
- cpu_##sym = tcg_global_mem_new_i32(cpu_env, \
+ cpu_##sym = tcg_global_mem_new_i32(tcg_env, \
offsetof(CPURXState, sym), name)
void rx_translate_init(void)
@@ -2291,7 +2291,7 @@
int i;
for (i = 0; i < NUM_REGS; i++) {
- cpu_regs[i] = tcg_global_mem_new_i32(cpu_env,
+ cpu_regs[i] = tcg_global_mem_new_i32(tcg_env,
offsetof(CPURXState, regs[i]),
regnames[i]);
}
@@ -2311,6 +2311,6 @@
ALLOC_REGISTER(isp, "ISP");
ALLOC_REGISTER(fintv, "FINTV");
ALLOC_REGISTER(intb, "INTB");
- cpu_acc = tcg_global_mem_new_i64(cpu_env,
+ cpu_acc = tcg_global_mem_new_i64(tcg_env,
offsetof(CPURXState, acc), "ACC");
}
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
index df16749..4f7599d 100644
--- a/target/s390x/cpu.c
+++ b/target/s390x/cpu.c
@@ -274,9 +274,7 @@
static void s390_cpu_initfn(Object *obj)
{
CPUState *cs = CPU(obj);
- S390CPU *cpu = S390_CPU(obj);
- cpu_set_cpustate_pointers(cpu);
cs->exception_index = EXCP_HLT;
#if !defined(CONFIG_USER_ONLY)
diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h
index 304029e..7bea707 100644
--- a/target/s390x/cpu.h
+++ b/target/s390x/cpu.h
@@ -170,7 +170,6 @@
CPUState parent_obj;
/*< public >*/
- CPUNegativeOffsetState neg;
CPUS390XState env;
S390CPUModel *model;
/* needed for live migration */
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
index dc7041e..4bae150 100644
--- a/target/s390x/tcg/translate.c
+++ b/target/s390x/tcg/translate.c
@@ -199,28 +199,28 @@
{
int i;
- psw_addr = tcg_global_mem_new_i64(cpu_env,
+ psw_addr = tcg_global_mem_new_i64(tcg_env,
offsetof(CPUS390XState, psw.addr),
"psw_addr");
- psw_mask = tcg_global_mem_new_i64(cpu_env,
+ psw_mask = tcg_global_mem_new_i64(tcg_env,
offsetof(CPUS390XState, psw.mask),
"psw_mask");
- gbea = tcg_global_mem_new_i64(cpu_env,
+ gbea = tcg_global_mem_new_i64(tcg_env,
offsetof(CPUS390XState, gbea),
"gbea");
- cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
+ cc_op = tcg_global_mem_new_i32(tcg_env, offsetof(CPUS390XState, cc_op),
"cc_op");
- cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
+ cc_src = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_src),
"cc_src");
- cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
+ cc_dst = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_dst),
"cc_dst");
- cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
+ cc_vr = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_vr),
"cc_vr");
for (i = 0; i < 16; i++) {
snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
- regs[i] = tcg_global_mem_new(cpu_env,
+ regs[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUS390XState, regs[i]),
cpu_reg_names[i]);
}
@@ -290,7 +290,7 @@
{
TCGv_i64 r = tcg_temp_new_i64();
- tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
+ tcg_gen_ld_i64(r, tcg_env, freg64_offset(reg));
return r;
}
@@ -298,7 +298,7 @@
{
TCGv_i64 r = tcg_temp_new_i64();
- tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
+ tcg_gen_ld32u_i64(r, tcg_env, freg32_offset(reg));
return r;
}
@@ -319,7 +319,7 @@
static void store_freg(int reg, TCGv_i64 v)
{
- tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
+ tcg_gen_st_i64(v, tcg_env, freg64_offset(reg));
}
static void store_reg32_i64(int reg, TCGv_i64 v)
@@ -335,7 +335,7 @@
static void store_freg32_i64(int reg, TCGv_i64 v)
{
- tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
+ tcg_gen_st32_i64(v, tcg_env, freg32_offset(reg));
}
static void update_psw_addr(DisasContext *s)
@@ -351,7 +351,7 @@
if (s->base.tb->flags & FLAG_MASK_PER) {
TCGv_i64 next_pc = to_next ? tcg_constant_i64(s->pc_tmp) : psw_addr;
- gen_helper_per_branch(cpu_env, gbea, next_pc);
+ gen_helper_per_branch(tcg_env, gbea, next_pc);
}
#endif
}
@@ -365,7 +365,7 @@
tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
tcg_gen_movi_i64(gbea, s->base.pc_next);
- gen_helper_per_branch(cpu_env, gbea, psw_addr);
+ gen_helper_per_branch(tcg_env, gbea, psw_addr);
gen_set_label(lab);
} else {
@@ -424,16 +424,16 @@
static void gen_exception(int excp)
{
- gen_helper_exception(cpu_env, tcg_constant_i32(excp));
+ gen_helper_exception(tcg_env, tcg_constant_i32(excp));
}
static void gen_program_exception(DisasContext *s, int code)
{
/* Remember what pgm exception this was. */
- tcg_gen_st_i32(tcg_constant_i32(code), cpu_env,
+ tcg_gen_st_i32(tcg_constant_i32(code), tcg_env,
offsetof(CPUS390XState, int_pgm_code));
- tcg_gen_st_i32(tcg_constant_i32(s->ilen), cpu_env,
+ tcg_gen_st_i32(tcg_constant_i32(s->ilen), tcg_env,
offsetof(CPUS390XState, int_pgm_ilen));
/* update the psw */
@@ -453,7 +453,7 @@
static inline void gen_data_exception(uint8_t dxc)
{
- gen_helper_data_exception(cpu_env, tcg_constant_i32(dxc));
+ gen_helper_data_exception(tcg_env, tcg_constant_i32(dxc));
}
static inline void gen_trap(DisasContext *s)
@@ -620,7 +620,7 @@
case CC_OP_LCBB:
case CC_OP_MULS_32:
/* 1 argument */
- gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
+ gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, dummy, cc_dst, dummy);
break;
case CC_OP_ADDU:
case CC_OP_ICM:
@@ -636,18 +636,18 @@
case CC_OP_VC:
case CC_OP_MULS_64:
/* 2 arguments */
- gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
+ gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, cc_src, cc_dst, dummy);
break;
case CC_OP_ADD_64:
case CC_OP_SUB_64:
case CC_OP_ADD_32:
case CC_OP_SUB_32:
/* 3 arguments */
- gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
+ gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, cc_src, cc_dst, cc_vr);
break;
case CC_OP_DYNAMIC:
/* unknown operation - assume 3 arguments and cc_op in env */
- gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
+ gen_helper_calc_cc(cc_op, tcg_env, cc_op, cc_src, cc_dst, cc_vr);
break;
default:
g_assert_not_reached();
@@ -1398,19 +1398,19 @@
static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
{
- gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
+ gen_helper_aeb(o->out, tcg_env, o->in1, o->in2);
return DISAS_NEXT;
}
static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
{
- gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
+ gen_helper_adb(o->out, tcg_env, o->in1, o->in2);
return DISAS_NEXT;
}
static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
{
- gen_helper_axb(o->out_128, cpu_env, o->in1_128, o->in2_128);
+ gen_helper_axb(o->out_128, tcg_env, o->in1_128, o->in2_128);
return DISAS_NEXT;
}
@@ -1546,7 +1546,7 @@
if (have_field(s, ri)) { \
if (unlikely(s->ex_value)) { \
cdest = tcg_temp_new_i64(); \
- tcg_gen_ld_i64(cdest, cpu_env, offsetof(CPUS390XState, ex_target));\
+ tcg_gen_ld_i64(cdest, tcg_env, offsetof(CPUS390XState, ex_target));\
tcg_gen_addi_i64(cdest, cdest, (int64_t)get_field(s, ri) * 2); \
is_imm = false; \
} else { \
@@ -1734,21 +1734,21 @@
static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
{
- gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
+ gen_helper_ceb(cc_op, tcg_env, o->in1, o->in2);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
{
- gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
+ gen_helper_cdb(cc_op, tcg_env, o->in1, o->in2);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
{
- gen_helper_cxb(cc_op, cpu_env, o->in1_128, o->in2_128);
+ gen_helper_cxb(cc_op, tcg_env, o->in1_128, o->in2_128);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1785,7 +1785,7 @@
if (!m34) {
return DISAS_NORETURN;
}
- gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
+ gen_helper_cfeb(o->out, tcg_env, o->in2, m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1797,7 +1797,7 @@
if (!m34) {
return DISAS_NORETURN;
}
- gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
+ gen_helper_cfdb(o->out, tcg_env, o->in2, m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1809,7 +1809,7 @@
if (!m34) {
return DISAS_NORETURN;
}
- gen_helper_cfxb(o->out, cpu_env, o->in2_128, m34);
+ gen_helper_cfxb(o->out, tcg_env, o->in2_128, m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1821,7 +1821,7 @@
if (!m34) {
return DISAS_NORETURN;
}
- gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
+ gen_helper_cgeb(o->out, tcg_env, o->in2, m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1833,7 +1833,7 @@
if (!m34) {
return DISAS_NORETURN;
}
- gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
+ gen_helper_cgdb(o->out, tcg_env, o->in2, m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1845,7 +1845,7 @@
if (!m34) {
return DISAS_NORETURN;
}
- gen_helper_cgxb(o->out, cpu_env, o->in2_128, m34);
+ gen_helper_cgxb(o->out, tcg_env, o->in2_128, m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1857,7 +1857,7 @@
if (!m34) {
return DISAS_NORETURN;
}
- gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
+ gen_helper_clfeb(o->out, tcg_env, o->in2, m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1869,7 +1869,7 @@
if (!m34) {
return DISAS_NORETURN;
}
- gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
+ gen_helper_clfdb(o->out, tcg_env, o->in2, m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1881,7 +1881,7 @@
if (!m34) {
return DISAS_NORETURN;
}
- gen_helper_clfxb(o->out, cpu_env, o->in2_128, m34);
+ gen_helper_clfxb(o->out, tcg_env, o->in2_128, m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1893,7 +1893,7 @@
if (!m34) {
return DISAS_NORETURN;
}
- gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
+ gen_helper_clgeb(o->out, tcg_env, o->in2, m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1905,7 +1905,7 @@
if (!m34) {
return DISAS_NORETURN;
}
- gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
+ gen_helper_clgdb(o->out, tcg_env, o->in2, m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1917,7 +1917,7 @@
if (!m34) {
return DISAS_NORETURN;
}
- gen_helper_clgxb(o->out, cpu_env, o->in2_128, m34);
+ gen_helper_clgxb(o->out, tcg_env, o->in2_128, m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1929,7 +1929,7 @@
if (!m34) {
return DISAS_NORETURN;
}
- gen_helper_cegb(o->out, cpu_env, o->in2, m34);
+ gen_helper_cegb(o->out, tcg_env, o->in2, m34);
return DISAS_NEXT;
}
@@ -1940,7 +1940,7 @@
if (!m34) {
return DISAS_NORETURN;
}
- gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
+ gen_helper_cdgb(o->out, tcg_env, o->in2, m34);
return DISAS_NEXT;
}
@@ -1951,7 +1951,7 @@
if (!m34) {
return DISAS_NORETURN;
}
- gen_helper_cxgb(o->out_128, cpu_env, o->in2, m34);
+ gen_helper_cxgb(o->out_128, tcg_env, o->in2, m34);
return DISAS_NEXT;
}
@@ -1962,7 +1962,7 @@
if (!m34) {
return DISAS_NORETURN;
}
- gen_helper_celgb(o->out, cpu_env, o->in2, m34);
+ gen_helper_celgb(o->out, tcg_env, o->in2, m34);
return DISAS_NEXT;
}
@@ -1973,7 +1973,7 @@
if (!m34) {
return DISAS_NORETURN;
}
- gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
+ gen_helper_cdlgb(o->out, tcg_env, o->in2, m34);
return DISAS_NEXT;
}
@@ -1984,7 +1984,7 @@
if (!m34) {
return DISAS_NORETURN;
}
- gen_helper_cxlgb(o->out_128, cpu_env, o->in2, m34);
+ gen_helper_cxlgb(o->out_128, tcg_env, o->in2, m34);
return DISAS_NEXT;
}
@@ -1994,7 +1994,7 @@
TCGv_i128 pair = tcg_temp_new_i128();
TCGv_i64 len = tcg_temp_new_i64();
- gen_helper_cksm(pair, cpu_env, o->in1, o->in2, regs[r2 + 1]);
+ gen_helper_cksm(pair, tcg_env, o->in1, o->in2, regs[r2 + 1]);
set_cc_static(s);
tcg_gen_extr_i128_i64(o->out, len, pair);
@@ -2022,7 +2022,7 @@
return DISAS_NEXT;
default:
vl = tcg_constant_i32(l);
- gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
+ gen_helper_clc(cc_op, tcg_env, vl, o->addr1, o->in2);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -2042,7 +2042,7 @@
t1 = tcg_constant_i32(r1);
t2 = tcg_constant_i32(r2);
- gen_helper_clcl(cc_op, cpu_env, t1, t2);
+ gen_helper_clcl(cc_op, tcg_env, t1, t2);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -2061,7 +2061,7 @@
t1 = tcg_constant_i32(r1);
t3 = tcg_constant_i32(r3);
- gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
+ gen_helper_clcle(cc_op, tcg_env, t1, o->in2, t3);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -2080,7 +2080,7 @@
t1 = tcg_constant_i32(r1);
t3 = tcg_constant_i32(r3);
- gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
+ gen_helper_clclu(cc_op, tcg_env, t1, o->in2, t3);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -2091,7 +2091,7 @@
TCGv_i32 t1 = tcg_temp_new_i32();
tcg_gen_extrl_i64_i32(t1, o->in1);
- gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
+ gen_helper_clm(cc_op, tcg_env, t1, m3, o->in2);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -2100,7 +2100,7 @@
{
TCGv_i128 pair = tcg_temp_new_i128();
- gen_helper_clst(pair, cpu_env, regs[0], o->in1, o->in2);
+ gen_helper_clst(pair, tcg_env, regs[0], o->in1, o->in2);
tcg_gen_extr_i128_i64(o->in2, o->in1, pair);
set_cc_static(s);
@@ -2169,9 +2169,9 @@
TCGv_i32 t_r3 = tcg_constant_i32(r3);
if (tb_cflags(s->base.tb) & CF_PARALLEL) {
- gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
+ gen_helper_csst_parallel(cc_op, tcg_env, t_r3, o->addr1, o->in2);
} else {
- gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
+ gen_helper_csst(cc_op, tcg_env, t_r3, o->addr1, o->in2);
}
set_cc_static(s);
@@ -2213,7 +2213,7 @@
tcg_gen_and_i64(cc, cc, o->in2);
tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
- gen_helper_purge(cpu_env);
+ gen_helper_purge(tcg_env);
gen_set_label(lab);
return DISAS_NEXT;
@@ -2271,22 +2271,22 @@
switch (s->insn->data) {
case 12:
- gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
+ gen_helper_cu12(cc_op, tcg_env, tr1, tr2, chk);
break;
case 14:
- gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
+ gen_helper_cu14(cc_op, tcg_env, tr1, tr2, chk);
break;
case 21:
- gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
+ gen_helper_cu21(cc_op, tcg_env, tr1, tr2, chk);
break;
case 24:
- gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
+ gen_helper_cu24(cc_op, tcg_env, tr1, tr2, chk);
break;
case 41:
- gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
+ gen_helper_cu41(cc_op, tcg_env, tr1, tr2, chk);
break;
case 42:
- gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
+ gen_helper_cu42(cc_op, tcg_env, tr1, tr2, chk);
break;
default:
g_assert_not_reached();
@@ -2303,21 +2303,21 @@
TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
TCGv_i32 func_code = tcg_constant_i32(get_field(s, i2));
- gen_helper_diag(cpu_env, r1, r3, func_code);
+ gen_helper_diag(tcg_env, r1, r3, func_code);
return DISAS_NEXT;
}
#endif
static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
{
- gen_helper_divs32(o->out, cpu_env, o->in1, o->in2);
+ gen_helper_divs32(o->out, tcg_env, o->in1, o->in2);
tcg_gen_extr32_i64(o->out2, o->out, o->out);
return DISAS_NEXT;
}
static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
{
- gen_helper_divu32(o->out, cpu_env, o->in1, o->in2);
+ gen_helper_divu32(o->out, tcg_env, o->in1, o->in2);
tcg_gen_extr32_i64(o->out2, o->out, o->out);
return DISAS_NEXT;
}
@@ -2326,7 +2326,7 @@
{
TCGv_i128 t = tcg_temp_new_i128();
- gen_helper_divs64(t, cpu_env, o->in1, o->in2);
+ gen_helper_divs64(t, tcg_env, o->in1, o->in2);
tcg_gen_extr_i128_i64(o->out2, o->out, t);
return DISAS_NEXT;
}
@@ -2335,33 +2335,33 @@
{
TCGv_i128 t = tcg_temp_new_i128();
- gen_helper_divu64(t, cpu_env, o->out, o->out2, o->in2);
+ gen_helper_divu64(t, tcg_env, o->out, o->out2, o->in2);
tcg_gen_extr_i128_i64(o->out2, o->out, t);
return DISAS_NEXT;
}
static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
{
- gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
+ gen_helper_deb(o->out, tcg_env, o->in1, o->in2);
return DISAS_NEXT;
}
static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
{
- gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
+ gen_helper_ddb(o->out, tcg_env, o->in1, o->in2);
return DISAS_NEXT;
}
static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
{
- gen_helper_dxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
+ gen_helper_dxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
return DISAS_NEXT;
}
static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
{
int r2 = get_field(s, r2);
- tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
+ tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, aregs[r2]));
return DISAS_NEXT;
}
@@ -2374,7 +2374,7 @@
static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
{
- tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
+ tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, fpc));
return DISAS_NEXT;
}
@@ -2420,7 +2420,7 @@
}
ilen = tcg_constant_i32(s->ilen);
- gen_helper_ex(cpu_env, ilen, v1, o->in2);
+ gen_helper_ex(tcg_env, ilen, v1, o->in2);
return DISAS_PC_CC_UPDATED;
}
@@ -2432,7 +2432,7 @@
if (!m34) {
return DISAS_NORETURN;
}
- gen_helper_fieb(o->out, cpu_env, o->in2, m34);
+ gen_helper_fieb(o->out, tcg_env, o->in2, m34);
return DISAS_NEXT;
}
@@ -2443,7 +2443,7 @@
if (!m34) {
return DISAS_NORETURN;
}
- gen_helper_fidb(o->out, cpu_env, o->in2, m34);
+ gen_helper_fidb(o->out, tcg_env, o->in2, m34);
return DISAS_NEXT;
}
@@ -2454,7 +2454,7 @@
if (!m34) {
return DISAS_NORETURN;
}
- gen_helper_fixb(o->out_128, cpu_env, o->in2_128, m34);
+ gen_helper_fixb(o->out_128, tcg_env, o->in2_128, m34);
return DISAS_NEXT;
}
@@ -2575,7 +2575,7 @@
} else {
m4 = tcg_constant_i32(0);
}
- gen_helper_idte(cpu_env, o->in1, o->in2, m4);
+ gen_helper_idte(tcg_env, o->in1, o->in2, m4);
return DISAS_NEXT;
}
@@ -2588,13 +2588,13 @@
} else {
m4 = tcg_constant_i32(0);
}
- gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
+ gen_helper_ipte(tcg_env, o->in1, o->in2, m4);
return DISAS_NEXT;
}
static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
{
- gen_helper_iske(o->out, cpu_env, o->in2);
+ gen_helper_iske(o->out, tcg_env, o->in2);
return DISAS_NEXT;
}
#endif
@@ -2648,28 +2648,28 @@
t_r2 = tcg_constant_i32(r2);
t_r3 = tcg_constant_i32(r3);
type = tcg_constant_i32(s->insn->data);
- gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
+ gen_helper_msa(cc_op, tcg_env, t_r1, t_r2, t_r3, type);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
{
- gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
+ gen_helper_keb(cc_op, tcg_env, o->in1, o->in2);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
{
- gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
+ gen_helper_kdb(cc_op, tcg_env, o->in1, o->in2);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
{
- gen_helper_kxb(cc_op, cpu_env, o->in1_128, o->in2_128);
+ gen_helper_kxb(cc_op, tcg_env, o->in1_128, o->in2_128);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -2720,7 +2720,7 @@
static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
{
- gen_helper_ldeb(o->out, cpu_env, o->in2);
+ gen_helper_ldeb(o->out, tcg_env, o->in2);
return DISAS_NEXT;
}
@@ -2731,7 +2731,7 @@
if (!m34) {
return DISAS_NORETURN;
}
- gen_helper_ledb(o->out, cpu_env, o->in2, m34);
+ gen_helper_ledb(o->out, tcg_env, o->in2, m34);
return DISAS_NEXT;
}
@@ -2742,7 +2742,7 @@
if (!m34) {
return DISAS_NORETURN;
}
- gen_helper_ldxb(o->out, cpu_env, o->in2_128, m34);
+ gen_helper_ldxb(o->out, tcg_env, o->in2_128, m34);
return DISAS_NEXT;
}
@@ -2753,19 +2753,19 @@
if (!m34) {
return DISAS_NORETURN;
}
- gen_helper_lexb(o->out, cpu_env, o->in2_128, m34);
+ gen_helper_lexb(o->out, tcg_env, o->in2_128, m34);
return DISAS_NEXT;
}
static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
{
- gen_helper_lxdb(o->out_128, cpu_env, o->in2);
+ gen_helper_lxdb(o->out_128, tcg_env, o->in2);
return DISAS_NEXT;
}
static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
{
- gen_helper_lxeb(o->out_128, cpu_env, o->in2);
+ gen_helper_lxeb(o->out_128, tcg_env, o->in2);
return DISAS_NEXT;
}
@@ -2919,7 +2919,7 @@
TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
- gen_helper_lctl(cpu_env, r1, o->in2, r3);
+ gen_helper_lctl(tcg_env, r1, o->in2, r3);
/* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
s->exit_to_mainloop = true;
return DISAS_TOO_MANY;
@@ -2930,7 +2930,7 @@
TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
- gen_helper_lctlg(cpu_env, r1, o->in2, r3);
+ gen_helper_lctlg(tcg_env, r1, o->in2, r3);
/* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
s->exit_to_mainloop = true;
return DISAS_TOO_MANY;
@@ -2938,14 +2938,14 @@
static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
{
- gen_helper_lra(o->out, cpu_env, o->out, o->in2);
+ gen_helper_lra(o->out, tcg_env, o->out, o->in2);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
{
- tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
+ tcg_gen_st_i64(o->in2, tcg_env, offsetof(CPUS390XState, pp));
return DISAS_NEXT;
}
@@ -2965,7 +2965,7 @@
tcg_gen_andi_i64(addr, mask, PSW_MASK_SHORT_ADDR);
tcg_gen_andi_i64(mask, mask, PSW_MASK_SHORT_CTRL);
tcg_gen_xori_i64(mask, mask, PSW_MASK_SHORTPSW);
- gen_helper_load_psw(cpu_env, mask, addr);
+ gen_helper_load_psw(tcg_env, mask, addr);
return DISAS_NORETURN;
}
@@ -2981,7 +2981,7 @@
MO_TEUQ | MO_ALIGN_8);
tcg_gen_addi_i64(o->in2, o->in2, 8);
tcg_gen_qemu_ld_i64(t2, o->in2, get_mem_index(s), MO_TEUQ);
- gen_helper_load_psw(cpu_env, t1, t2);
+ gen_helper_load_psw(tcg_env, t1, t2);
return DISAS_NORETURN;
}
#endif
@@ -2991,7 +2991,7 @@
TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
- gen_helper_lam(cpu_env, r1, o->in2, r3);
+ gen_helper_lam(tcg_env, r1, o->in2, r3);
return DISAS_NEXT;
}
@@ -3185,7 +3185,7 @@
}
#if !defined(CONFIG_USER_ONLY)
- gen_helper_monitor_call(cpu_env, o->addr1,
+ gen_helper_monitor_call(tcg_env, o->addr1,
tcg_constant_i32(monitor_class));
#endif
/* Defaults to a NOP. */
@@ -3216,7 +3216,7 @@
break;
case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
if (b2) {
- tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
+ tcg_gen_ld32u_i64(ar1, tcg_env, offsetof(CPUS390XState, aregs[b2]));
} else {
tcg_gen_movi_i64(ar1, 0);
}
@@ -3226,7 +3226,7 @@
break;
}
- tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
+ tcg_gen_st32_i64(ar1, tcg_env, offsetof(CPUS390XState, aregs[1]));
return DISAS_NEXT;
}
@@ -3243,13 +3243,13 @@
{
TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
- gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
+ gen_helper_mvc(tcg_env, l, o->addr1, o->in2);
return DISAS_NEXT;
}
static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o)
{
- gen_helper_mvcrl(cpu_env, regs[0], o->addr1, o->in2);
+ gen_helper_mvcrl(tcg_env, regs[0], o->addr1, o->in2);
return DISAS_NEXT;
}
@@ -3257,7 +3257,7 @@
{
TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
- gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
+ gen_helper_mvcin(tcg_env, l, o->addr1, o->in2);
return DISAS_NEXT;
}
@@ -3275,7 +3275,7 @@
t1 = tcg_constant_i32(r1);
t2 = tcg_constant_i32(r2);
- gen_helper_mvcl(cc_op, cpu_env, t1, t2);
+ gen_helper_mvcl(cc_op, tcg_env, t1, t2);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -3294,7 +3294,7 @@
t1 = tcg_constant_i32(r1);
t3 = tcg_constant_i32(r3);
- gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
+ gen_helper_mvcle(cc_op, tcg_env, t1, o->in2, t3);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -3313,7 +3313,7 @@
t1 = tcg_constant_i32(r1);
t3 = tcg_constant_i32(r3);
- gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
+ gen_helper_mvclu(cc_op, tcg_env, t1, o->in2, t3);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -3321,7 +3321,7 @@
static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
{
int r3 = get_field(s, r3);
- gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
+ gen_helper_mvcos(cc_op, tcg_env, o->addr1, o->in2, regs[r3]);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -3331,7 +3331,7 @@
{
int r1 = get_field(s, l1);
int r3 = get_field(s, r3);
- gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2, regs[r3]);
+ gen_helper_mvcp(cc_op, tcg_env, regs[r1], o->addr1, o->in2, regs[r3]);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -3340,7 +3340,7 @@
{
int r1 = get_field(s, l1);
int r3 = get_field(s, r3);
- gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2, regs[r3]);
+ gen_helper_mvcs(cc_op, tcg_env, regs[r1], o->addr1, o->in2, regs[r3]);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -3350,7 +3350,7 @@
{
TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
- gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
+ gen_helper_mvn(tcg_env, l, o->addr1, o->in2);
return DISAS_NEXT;
}
@@ -3358,7 +3358,7 @@
{
TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
- gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
+ gen_helper_mvo(tcg_env, l, o->addr1, o->in2);
return DISAS_NEXT;
}
@@ -3367,7 +3367,7 @@
TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
- gen_helper_mvpg(cc_op, cpu_env, regs[0], t1, t2);
+ gen_helper_mvpg(cc_op, tcg_env, regs[0], t1, t2);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -3377,7 +3377,7 @@
TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
- gen_helper_mvst(cc_op, cpu_env, t1, t2);
+ gen_helper_mvst(cc_op, tcg_env, t1, t2);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -3386,7 +3386,7 @@
{
TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
- gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
+ gen_helper_mvz(tcg_env, l, o->addr1, o->in2);
return DISAS_NEXT;
}
@@ -3410,59 +3410,59 @@
static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
{
- gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
+ gen_helper_meeb(o->out, tcg_env, o->in1, o->in2);
return DISAS_NEXT;
}
static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
{
- gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
+ gen_helper_mdeb(o->out, tcg_env, o->in1, o->in2);
return DISAS_NEXT;
}
static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
{
- gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
+ gen_helper_mdb(o->out, tcg_env, o->in1, o->in2);
return DISAS_NEXT;
}
static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
{
- gen_helper_mxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
+ gen_helper_mxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
return DISAS_NEXT;
}
static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
{
- gen_helper_mxdb(o->out_128, cpu_env, o->in1, o->in2);
+ gen_helper_mxdb(o->out_128, tcg_env, o->in1, o->in2);
return DISAS_NEXT;
}
static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
{
TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
- gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
+ gen_helper_maeb(o->out, tcg_env, o->in1, o->in2, r3);
return DISAS_NEXT;
}
static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
{
TCGv_i64 r3 = load_freg(get_field(s, r3));
- gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
+ gen_helper_madb(o->out, tcg_env, o->in1, o->in2, r3);
return DISAS_NEXT;
}
static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
{
TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
- gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
+ gen_helper_mseb(o->out, tcg_env, o->in1, o->in2, r3);
return DISAS_NEXT;
}
static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
{
TCGv_i64 r3 = load_freg(get_field(s, r3));
- gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
+ gen_helper_msdb(o->out, tcg_env, o->in1, o->in2, r3);
return DISAS_NEXT;
}
@@ -3499,7 +3499,7 @@
{
TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
- gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
+ gen_helper_nc(cc_op, tcg_env, l, o->addr1, o->in2);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -3533,7 +3533,7 @@
{
TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
- gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
+ gen_helper_oc(cc_op, tcg_env, l, o->addr1, o->in2);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -3585,7 +3585,7 @@
{
TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
- gen_helper_pack(cpu_env, l, o->addr1, o->in2);
+ gen_helper_pack(tcg_env, l, o->addr1, o->in2);
return DISAS_NEXT;
}
@@ -3600,7 +3600,7 @@
return DISAS_NORETURN;
}
l = tcg_constant_i32(l2);
- gen_helper_pka(cpu_env, o->addr1, o->in2, l);
+ gen_helper_pka(tcg_env, o->addr1, o->in2, l);
return DISAS_NEXT;
}
@@ -3615,7 +3615,7 @@
return DISAS_NORETURN;
}
l = tcg_constant_i32(l2);
- gen_helper_pku(cpu_env, o->addr1, o->in2, l);
+ gen_helper_pku(tcg_env, o->addr1, o->in2, l);
return DISAS_NEXT;
}
@@ -3634,7 +3634,7 @@
#ifndef CONFIG_USER_ONLY
static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
{
- gen_helper_ptlb(cpu_env);
+ gen_helper_ptlb(tcg_env);
return DISAS_NEXT;
}
#endif
@@ -3822,14 +3822,14 @@
#ifndef CONFIG_USER_ONLY
static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
{
- gen_helper_rrbe(cc_op, cpu_env, o->in2);
+ gen_helper_rrbe(cc_op, tcg_env, o->in2);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
{
- gen_helper_sacf(cpu_env, o->in2);
+ gen_helper_sacf(tcg_env, o->in2);
/* Addressing mode has changed, so end the block. */
return DISAS_TOO_MANY;
}
@@ -3872,50 +3872,50 @@
static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
{
int r1 = get_field(s, r1);
- tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
+ tcg_gen_st32_i64(o->in2, tcg_env, offsetof(CPUS390XState, aregs[r1]));
return DISAS_NEXT;
}
static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
{
- gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
+ gen_helper_seb(o->out, tcg_env, o->in1, o->in2);
return DISAS_NEXT;
}
static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
{
- gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
+ gen_helper_sdb(o->out, tcg_env, o->in1, o->in2);
return DISAS_NEXT;
}
static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
{
- gen_helper_sxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
+ gen_helper_sxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
return DISAS_NEXT;
}
static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
{
- gen_helper_sqeb(o->out, cpu_env, o->in2);
+ gen_helper_sqeb(o->out, tcg_env, o->in2);
return DISAS_NEXT;
}
static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
{
- gen_helper_sqdb(o->out, cpu_env, o->in2);
+ gen_helper_sqdb(o->out, tcg_env, o->in2);
return DISAS_NEXT;
}
static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
{
- gen_helper_sqxb(o->out_128, cpu_env, o->in2_128);
+ gen_helper_sqxb(o->out_128, tcg_env, o->in2_128);
return DISAS_NEXT;
}
#ifndef CONFIG_USER_ONLY
static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
{
- gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
+ gen_helper_servc(cc_op, tcg_env, o->in2, o->in1);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -3925,7 +3925,7 @@
TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
- gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
+ gen_helper_sigp(cc_op, tcg_env, o->in2, r1, r3);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -4013,13 +4013,13 @@
static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
{
- gen_helper_sfpc(cpu_env, o->in2);
+ gen_helper_sfpc(tcg_env, o->in2);
return DISAS_NEXT;
}
static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
{
- gen_helper_sfas(cpu_env, o->in2);
+ gen_helper_sfas(tcg_env, o->in2);
return DISAS_NEXT;
}
@@ -4027,7 +4027,7 @@
{
/* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
- gen_helper_srnm(cpu_env, o->addr1);
+ gen_helper_srnm(tcg_env, o->addr1);
return DISAS_NEXT;
}
@@ -4035,7 +4035,7 @@
{
/* Bits 0-55 are are ignored. */
tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
- gen_helper_srnm(cpu_env, o->addr1);
+ gen_helper_srnm(tcg_env, o->addr1);
return DISAS_NEXT;
}
@@ -4047,9 +4047,9 @@
tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
/* No need to call a helper, we don't implement dfp */
- tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
+ tcg_gen_ld32u_i64(tmp, tcg_env, offsetof(CPUS390XState, fpc));
tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
- tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
+ tcg_gen_st32_i64(tmp, tcg_env, offsetof(CPUS390XState, fpc));
return DISAS_NEXT;
}
@@ -4085,7 +4085,7 @@
tcg_gen_qemu_ld_i64(regs[r3], o->addr1, get_mem_index(s), MO_TEUQ);
/* subtract CPU timer from first operand and store in GR0 */
- gen_helper_stpt(tmp, cpu_env);
+ gen_helper_stpt(tmp, tcg_env);
tcg_gen_sub_i64(regs[0], o->in1, tmp);
/* store second operand in GR1 */
@@ -4103,7 +4103,7 @@
static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
{
- gen_helper_sske(cpu_env, o->in1, o->in2);
+ gen_helper_sske(tcg_env, o->in1, o->in2);
return DISAS_NEXT;
}
@@ -4131,14 +4131,14 @@
static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
{
- tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
+ tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, core_id));
return DISAS_NEXT;
}
#endif
static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
{
- gen_helper_stck(o->out, cpu_env);
+ gen_helper_stck(o->out, tcg_env);
/* ??? We don't implement clock states. */
gen_op_movi_cc(s, 0);
return DISAS_NEXT;
@@ -4149,9 +4149,9 @@
TCGv_i64 c1 = tcg_temp_new_i64();
TCGv_i64 c2 = tcg_temp_new_i64();
TCGv_i64 todpr = tcg_temp_new_i64();
- gen_helper_stck(c1, cpu_env);
+ gen_helper_stck(c1, tcg_env);
/* 16 bit value store in an uint32_t (only valid bits set) */
- tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
+ tcg_gen_ld32u_i64(todpr, tcg_env, offsetof(CPUS390XState, todpr));
/* Shift the 64-bit value into its place as a zero-extended
104-bit value. Note that "bit positions 64-103 are always
non-zero so that they compare differently to STCK"; we set
@@ -4171,26 +4171,26 @@
#ifndef CONFIG_USER_ONLY
static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
{
- gen_helper_sck(cc_op, cpu_env, o->in2);
+ gen_helper_sck(cc_op, tcg_env, o->in2);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
{
- gen_helper_sckc(cpu_env, o->in2);
+ gen_helper_sckc(tcg_env, o->in2);
return DISAS_NEXT;
}
static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
{
- gen_helper_sckpf(cpu_env, regs[0]);
+ gen_helper_sckpf(tcg_env, regs[0]);
return DISAS_NEXT;
}
static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
{
- gen_helper_stckc(o->out, cpu_env);
+ gen_helper_stckc(o->out, tcg_env);
return DISAS_NEXT;
}
@@ -4199,7 +4199,7 @@
TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
- gen_helper_stctg(cpu_env, r1, o->in2, r3);
+ gen_helper_stctg(tcg_env, r1, o->in2, r3);
return DISAS_NEXT;
}
@@ -4208,98 +4208,98 @@
TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
- gen_helper_stctl(cpu_env, r1, o->in2, r3);
+ gen_helper_stctl(tcg_env, r1, o->in2, r3);
return DISAS_NEXT;
}
static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
{
- tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
+ tcg_gen_ld_i64(o->out, tcg_env, offsetof(CPUS390XState, cpuid));
return DISAS_NEXT;
}
static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
{
- gen_helper_spt(cpu_env, o->in2);
+ gen_helper_spt(tcg_env, o->in2);
return DISAS_NEXT;
}
static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
{
- gen_helper_stfl(cpu_env);
+ gen_helper_stfl(tcg_env);
return DISAS_NEXT;
}
static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
{
- gen_helper_stpt(o->out, cpu_env);
+ gen_helper_stpt(o->out, tcg_env);
return DISAS_NEXT;
}
static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
{
- gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
+ gen_helper_stsi(cc_op, tcg_env, o->in2, regs[0], regs[1]);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
{
- gen_helper_spx(cpu_env, o->in2);
+ gen_helper_spx(tcg_env, o->in2);
return DISAS_NEXT;
}
static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
{
- gen_helper_xsch(cpu_env, regs[1]);
+ gen_helper_xsch(tcg_env, regs[1]);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
{
- gen_helper_csch(cpu_env, regs[1]);
+ gen_helper_csch(tcg_env, regs[1]);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
{
- gen_helper_hsch(cpu_env, regs[1]);
+ gen_helper_hsch(tcg_env, regs[1]);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
{
- gen_helper_msch(cpu_env, regs[1], o->in2);
+ gen_helper_msch(tcg_env, regs[1], o->in2);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
{
- gen_helper_rchp(cpu_env, regs[1]);
+ gen_helper_rchp(tcg_env, regs[1]);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
{
- gen_helper_rsch(cpu_env, regs[1]);
+ gen_helper_rsch(tcg_env, regs[1]);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
{
- gen_helper_sal(cpu_env, regs[1]);
+ gen_helper_sal(tcg_env, regs[1]);
return DISAS_NEXT;
}
static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
{
- gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
+ gen_helper_schm(tcg_env, regs[1], regs[2], o->in2);
return DISAS_NEXT;
}
@@ -4318,49 +4318,49 @@
static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
{
- gen_helper_ssch(cpu_env, regs[1], o->in2);
+ gen_helper_ssch(tcg_env, regs[1], o->in2);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
{
- gen_helper_stsch(cpu_env, regs[1], o->in2);
+ gen_helper_stsch(tcg_env, regs[1], o->in2);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
{
- gen_helper_stcrw(cpu_env, o->in2);
+ gen_helper_stcrw(tcg_env, o->in2);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
{
- gen_helper_tpi(cc_op, cpu_env, o->addr1);
+ gen_helper_tpi(cc_op, tcg_env, o->addr1);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
{
- gen_helper_tsch(cpu_env, regs[1], o->in2);
+ gen_helper_tsch(tcg_env, regs[1], o->in2);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
{
- gen_helper_chsc(cpu_env, o->in2);
+ gen_helper_chsc(tcg_env, o->in2);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
{
- tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
+ tcg_gen_ld_i64(o->out, tcg_env, offsetof(CPUS390XState, psa));
tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
return DISAS_NEXT;
}
@@ -4397,7 +4397,7 @@
if (s->base.tb->flags & FLAG_MASK_PER) {
update_psw_addr(s);
- gen_helper_per_store_real(cpu_env);
+ gen_helper_per_store_real(tcg_env);
}
return DISAS_NEXT;
}
@@ -4405,7 +4405,7 @@
static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
{
- gen_helper_stfle(cc_op, cpu_env, o->in2);
+ gen_helper_stfle(cc_op, tcg_env, o->in2);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -4441,7 +4441,7 @@
TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
- gen_helper_stam(cpu_env, r1, o->in2, r3);
+ gen_helper_stam(tcg_env, r1, o->in2, r3);
return DISAS_NEXT;
}
@@ -4548,7 +4548,7 @@
TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
- gen_helper_srst(cpu_env, r1, r2);
+ gen_helper_srst(tcg_env, r1, r2);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -4558,7 +4558,7 @@
TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
- gen_helper_srstu(cpu_env, r1, r2);
+ gen_helper_srstu(tcg_env, r1, r2);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -4631,10 +4631,10 @@
update_cc_op(s);
t = tcg_constant_i32(get_field(s, i1) & 0xff);
- tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
+ tcg_gen_st_i32(t, tcg_env, offsetof(CPUS390XState, int_svc_code));
t = tcg_constant_i32(s->ilen);
- tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
+ tcg_gen_st_i32(t, tcg_env, offsetof(CPUS390XState, int_svc_ilen));
gen_exception(EXCP_SVC);
return DISAS_NORETURN;
@@ -4652,21 +4652,21 @@
static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
{
- gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
+ gen_helper_tceb(cc_op, tcg_env, o->in1, o->in2);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
{
- gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
+ gen_helper_tcdb(cc_op, tcg_env, o->in1, o->in2);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
{
- gen_helper_tcxb(cc_op, cpu_env, o->in1_128, o->in2);
+ gen_helper_tcxb(cc_op, tcg_env, o->in1_128, o->in2);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -4675,14 +4675,14 @@
static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
{
- gen_helper_testblock(cc_op, cpu_env, o->in2);
+ gen_helper_testblock(cc_op, tcg_env, o->in2);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
{
- gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
+ gen_helper_tprot(cc_op, tcg_env, o->addr1, o->in2);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -4693,7 +4693,7 @@
{
TCGv_i32 l1 = tcg_constant_i32(get_field(s, l1) + 1);
- gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
+ gen_helper_tp(cc_op, tcg_env, o->addr1, l1);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -4702,7 +4702,7 @@
{
TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
- gen_helper_tr(cpu_env, l, o->addr1, o->in2);
+ gen_helper_tr(tcg_env, l, o->addr1, o->in2);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -4711,7 +4711,7 @@
{
TCGv_i128 pair = tcg_temp_new_i128();
- gen_helper_tre(pair, cpu_env, o->out, o->out2, o->in2);
+ gen_helper_tre(pair, tcg_env, o->out, o->out2, o->in2);
tcg_gen_extr_i128_i64(o->out2, o->out, pair);
set_cc_static(s);
return DISAS_NEXT;
@@ -4721,7 +4721,7 @@
{
TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
- gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
+ gen_helper_trt(cc_op, tcg_env, l, o->addr1, o->in2);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -4730,7 +4730,7 @@
{
TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
- gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
+ gen_helper_trtr(cc_op, tcg_env, l, o->addr1, o->in2);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -4756,7 +4756,7 @@
tcg_gen_ext16u_i32(tst, tst);
}
}
- gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
+ gen_helper_trXX(cc_op, tcg_env, r1, r2, tst, sizes);
set_cc_static(s);
return DISAS_NEXT;
@@ -4776,7 +4776,7 @@
{
TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
- gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
+ gen_helper_unpk(tcg_env, l, o->addr1, o->in2);
return DISAS_NEXT;
}
@@ -4791,7 +4791,7 @@
return DISAS_NORETURN;
}
l = tcg_constant_i32(l1);
- gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
+ gen_helper_unpka(cc_op, tcg_env, o->addr1, l, o->in2);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -4807,7 +4807,7 @@
return DISAS_NORETURN;
}
l = tcg_constant_i32(l1);
- gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
+ gen_helper_unpku(cc_op, tcg_env, o->addr1, l, o->in2);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -4860,7 +4860,7 @@
/* But in general we'll defer to a helper. */
o->in2 = get_address(s, 0, b2, d2);
t32 = tcg_constant_i32(l);
- gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
+ gen_helper_xc(cc_op, tcg_env, t32, o->addr1, o->in2);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -4926,7 +4926,7 @@
{
TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
- gen_helper_clp(cpu_env, r2);
+ gen_helper_clp(tcg_env, r2);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -4936,7 +4936,7 @@
TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
- gen_helper_pcilg(cpu_env, r1, r2);
+ gen_helper_pcilg(tcg_env, r1, r2);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -4946,7 +4946,7 @@
TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
- gen_helper_pcistg(cpu_env, r1, r2);
+ gen_helper_pcistg(tcg_env, r1, r2);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -4956,14 +4956,14 @@
TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
- gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
+ gen_helper_stpcifc(tcg_env, r1, o->addr1, ar);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
{
- gen_helper_sic(cpu_env, o->in1, o->in2);
+ gen_helper_sic(tcg_env, o->in1, o->in2);
return DISAS_NEXT;
}
@@ -4972,7 +4972,7 @@
TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
- gen_helper_rpcit(cpu_env, r1, r2);
+ gen_helper_rpcit(tcg_env, r1, r2);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -4983,7 +4983,7 @@
TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
- gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
+ gen_helper_pcistb(tcg_env, r1, r3, o->addr1, ar);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -4993,7 +4993,7 @@
TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
- gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
+ gen_helper_mpcifc(tcg_env, r1, o->addr1, ar);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -6176,7 +6176,7 @@
if (unlikely(s->ex_value)) {
/* Drop the EX data now, so that it's clear on exception paths. */
- tcg_gen_st_i64(tcg_constant_i64(0), cpu_env,
+ tcg_gen_st_i64(tcg_constant_i64(0), tcg_env,
offsetof(CPUS390XState, ex_value));
/* Extract the values saved by EXECUTE. */
@@ -6310,7 +6310,7 @@
#ifndef CONFIG_USER_ONLY
if (s->base.tb->flags & FLAG_MASK_PER) {
TCGv_i64 addr = tcg_constant_i64(s->base.pc_next);
- gen_helper_per_ifetch(cpu_env, addr);
+ gen_helper_per_ifetch(tcg_env, addr);
}
#endif
@@ -6415,7 +6415,7 @@
}
/* Call the helper to check for a possible PER exception. */
- gen_helper_per_check_exception(cpu_env);
+ gen_helper_per_check_exception(tcg_env);
}
#endif
@@ -6463,7 +6463,7 @@
static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{
- CPUS390XState *env = cs->env_ptr;
+ CPUS390XState *env = cpu_env(cs);
DisasContext *dc = container_of(dcbase, DisasContext, base);
dc->base.is_jmp = translate_one(env, dc);
diff --git a/target/s390x/tcg/translate_vx.c.inc b/target/s390x/tcg/translate_vx.c.inc
index ec94d39..e073e5a 100644
--- a/target/s390x/tcg/translate_vx.c.inc
+++ b/target/s390x/tcg/translate_vx.c.inc
@@ -36,7 +36,7 @@
*
* CC handling:
* As gvec ool-helpers can currently not return values (besides via
- * pointers like vectors or cpu_env), whenever we have to set the CC and
+ * pointers like vectors or tcg_env), whenever we have to set the CC and
* can't conclude the value from the result vector, we will directly
* set it in "env->cc_op" and mark it as static via set_cc_static()".
* Whenever this is done, the helper writes globals (cc_op).
@@ -69,26 +69,26 @@
switch ((unsigned)memop) {
case ES_8:
- tcg_gen_ld8u_i64(dst, cpu_env, offs);
+ tcg_gen_ld8u_i64(dst, tcg_env, offs);
break;
case ES_16:
- tcg_gen_ld16u_i64(dst, cpu_env, offs);
+ tcg_gen_ld16u_i64(dst, tcg_env, offs);
break;
case ES_32:
- tcg_gen_ld32u_i64(dst, cpu_env, offs);
+ tcg_gen_ld32u_i64(dst, tcg_env, offs);
break;
case ES_8 | MO_SIGN:
- tcg_gen_ld8s_i64(dst, cpu_env, offs);
+ tcg_gen_ld8s_i64(dst, tcg_env, offs);
break;
case ES_16 | MO_SIGN:
- tcg_gen_ld16s_i64(dst, cpu_env, offs);
+ tcg_gen_ld16s_i64(dst, tcg_env, offs);
break;
case ES_32 | MO_SIGN:
- tcg_gen_ld32s_i64(dst, cpu_env, offs);
+ tcg_gen_ld32s_i64(dst, tcg_env, offs);
break;
case ES_64:
case ES_64 | MO_SIGN:
- tcg_gen_ld_i64(dst, cpu_env, offs);
+ tcg_gen_ld_i64(dst, tcg_env, offs);
break;
default:
g_assert_not_reached();
@@ -102,20 +102,20 @@
switch (memop) {
case ES_8:
- tcg_gen_ld8u_i32(dst, cpu_env, offs);
+ tcg_gen_ld8u_i32(dst, tcg_env, offs);
break;
case ES_16:
- tcg_gen_ld16u_i32(dst, cpu_env, offs);
+ tcg_gen_ld16u_i32(dst, tcg_env, offs);
break;
case ES_8 | MO_SIGN:
- tcg_gen_ld8s_i32(dst, cpu_env, offs);
+ tcg_gen_ld8s_i32(dst, tcg_env, offs);
break;
case ES_16 | MO_SIGN:
- tcg_gen_ld16s_i32(dst, cpu_env, offs);
+ tcg_gen_ld16s_i32(dst, tcg_env, offs);
break;
case ES_32:
case ES_32 | MO_SIGN:
- tcg_gen_ld_i32(dst, cpu_env, offs);
+ tcg_gen_ld_i32(dst, tcg_env, offs);
break;
default:
g_assert_not_reached();
@@ -129,16 +129,16 @@
switch (memop) {
case ES_8:
- tcg_gen_st8_i64(src, cpu_env, offs);
+ tcg_gen_st8_i64(src, tcg_env, offs);
break;
case ES_16:
- tcg_gen_st16_i64(src, cpu_env, offs);
+ tcg_gen_st16_i64(src, tcg_env, offs);
break;
case ES_32:
- tcg_gen_st32_i64(src, cpu_env, offs);
+ tcg_gen_st32_i64(src, tcg_env, offs);
break;
case ES_64:
- tcg_gen_st_i64(src, cpu_env, offs);
+ tcg_gen_st_i64(src, tcg_env, offs);
break;
default:
g_assert_not_reached();
@@ -152,13 +152,13 @@
switch (memop) {
case ES_8:
- tcg_gen_st8_i32(src, cpu_env, offs);
+ tcg_gen_st8_i32(src, tcg_env, offs);
break;
case ES_16:
- tcg_gen_st16_i32(src, cpu_env, offs);
+ tcg_gen_st16_i32(src, tcg_env, offs);
break;
case ES_32:
- tcg_gen_st_i32(src, cpu_env, offs);
+ tcg_gen_st_i32(src, tcg_env, offs);
break;
default:
g_assert_not_reached();
@@ -173,16 +173,16 @@
/* mask off invalid parts from the element nr */
tcg_gen_andi_i64(tmp, enr, NUM_VEC_ELEMENTS(es) - 1);
- /* convert it to an element offset relative to cpu_env (vec_reg_offset() */
+ /* convert it to an element offset relative to tcg_env (vec_reg_offset() */
tcg_gen_shli_i64(tmp, tmp, es);
#if !HOST_BIG_ENDIAN
tcg_gen_xori_i64(tmp, tmp, 8 - NUM_VEC_ELEMENT_BYTES(es));
#endif
tcg_gen_addi_i64(tmp, tmp, vec_full_reg_offset(reg));
- /* generate the final ptr by adding cpu_env */
+ /* generate the final ptr by adding tcg_env */
tcg_gen_trunc_i64_ptr(ptr, tmp);
- tcg_gen_add_ptr(ptr, ptr, cpu_env);
+ tcg_gen_add_ptr(ptr, ptr, tcg_env);
}
#define gen_gvec_2(v1, v2, gen) \
@@ -754,8 +754,8 @@
tcg_gen_ori_i64(bytes, o->addr1, -block_size);
tcg_gen_neg_i64(bytes, bytes);
- tcg_gen_addi_ptr(a0, cpu_env, v1_offs);
- gen_helper_vll(cpu_env, a0, o->addr1, bytes);
+ tcg_gen_addi_ptr(a0, tcg_env, v1_offs);
+ gen_helper_vll(tcg_env, a0, o->addr1, bytes);
return DISAS_NEXT;
}
@@ -812,8 +812,8 @@
/* convert highest index into an actual length */
tcg_gen_addi_i64(o->in2, o->in2, 1);
- tcg_gen_addi_ptr(a0, cpu_env, v1_offs);
- gen_helper_vll(cpu_env, a0, o->addr1, o->in2);
+ tcg_gen_addi_ptr(a0, tcg_env, v1_offs);
+ gen_helper_vll(tcg_env, a0, o->addr1, o->in2);
return DISAS_NEXT;
}
@@ -898,7 +898,7 @@
switch (s->fields.op2) {
case 0x97:
if (get_field(s, m5) & 0x1) {
- gen_gvec_3_ptr(v1, v2, v3, cpu_env, 0, vpks_cc[es - 1]);
+ gen_gvec_3_ptr(v1, v2, v3, tcg_env, 0, vpks_cc[es - 1]);
set_cc_static(s);
} else {
gen_gvec_3_ool(v1, v2, v3, 0, vpks[es - 1]);
@@ -906,7 +906,7 @@
break;
case 0x95:
if (get_field(s, m5) & 0x1) {
- gen_gvec_3_ptr(v1, v2, v3, cpu_env, 0, vpkls_cc[es - 1]);
+ gen_gvec_3_ptr(v1, v2, v3, tcg_env, 0, vpkls_cc[es - 1]);
set_cc_static(s);
} else {
gen_gvec_3_ool(v1, v2, v3, 0, vpkls[es - 1]);
@@ -1058,7 +1058,7 @@
TCGv_i64 tmp;
/* Probe write access before actually modifying memory */
- gen_helper_probe_write_access(cpu_env, o->addr1,
+ gen_helper_probe_write_access(tcg_env, o->addr1,
tcg_constant_i64(16));
tmp = tcg_temp_new_i64();
@@ -1098,7 +1098,7 @@
}
/* Probe write access before actually modifying memory */
- gen_helper_probe_write_access(cpu_env, o->addr1, tcg_constant_i64(16));
+ gen_helper_probe_write_access(tcg_env, o->addr1, tcg_constant_i64(16));
t0 = tcg_temp_new_i64();
t1 = tcg_temp_new_i64();
@@ -1169,7 +1169,7 @@
}
/* Probe write access before actually modifying memory */
- gen_helper_probe_write_access(cpu_env, o->addr1, tcg_constant_i64(16));
+ gen_helper_probe_write_access(tcg_env, o->addr1, tcg_constant_i64(16));
/* Begin with the two doublewords swapped... */
t0 = tcg_temp_new_i64();
@@ -1211,7 +1211,7 @@
}
/* Probe write access before actually modifying memory */
- gen_helper_probe_write_access(cpu_env, o->addr1,
+ gen_helper_probe_write_access(tcg_env, o->addr1,
tcg_constant_i64((v3 - v1 + 1) * 16));
tmp = tcg_temp_new_i64();
@@ -1236,8 +1236,8 @@
/* convert highest index into an actual length */
tcg_gen_addi_i64(o->in2, o->in2, 1);
- tcg_gen_addi_ptr(a0, cpu_env, v1_offs);
- gen_helper_vstl(cpu_env, a0, o->addr1, o->in2);
+ tcg_gen_addi_ptr(a0, tcg_env, v1_offs);
+ gen_helper_vstl(tcg_env, a0, o->addr1, o->in2);
return DISAS_NEXT;
}
@@ -2479,7 +2479,7 @@
static DisasJumpType op_vtm(DisasContext *s, DisasOps *o)
{
gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2),
- cpu_env, 0, gen_helper_gvec_vtm);
+ tcg_env, 0, gen_helper_gvec_vtm);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -2505,7 +2505,7 @@
if (extract32(m5, 0, 1)) {
gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2),
- get_field(s, v3), cpu_env, m5, g_cc[es]);
+ get_field(s, v3), tcg_env, m5, g_cc[es]);
set_cc_static(s);
} else {
gen_gvec_3_ool(get_field(s, v1), get_field(s, v2),
@@ -2536,7 +2536,7 @@
if (extract32(m5, 0, 1)) {
gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2),
- get_field(s, v3), cpu_env, m5, g_cc[es]);
+ get_field(s, v3), tcg_env, m5, g_cc[es]);
set_cc_static(s);
} else {
gen_gvec_3_ool(get_field(s, v1), get_field(s, v2),
@@ -2567,7 +2567,7 @@
if (extract32(m5, 0, 1)) {
gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2),
- get_field(s, v3), cpu_env, m5, g_cc[es]);
+ get_field(s, v3), tcg_env, m5, g_cc[es]);
set_cc_static(s);
} else {
gen_gvec_3_ool(get_field(s, v1), get_field(s, v2),
@@ -2598,7 +2598,7 @@
if (extract32(m5, 0, 1)) {
gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2),
- cpu_env, 0, g_cc[es]);
+ tcg_env, 0, g_cc[es]);
set_cc_static(s);
} else {
gen_gvec_2_ool(get_field(s, v1), get_field(s, v2), 0,
@@ -2641,11 +2641,11 @@
if (extract32(m6, 2, 1)) {
gen_gvec_4_ptr(get_field(s, v1), get_field(s, v2),
get_field(s, v3), get_field(s, v4),
- cpu_env, m6, g_cc_rt[es]);
+ tcg_env, m6, g_cc_rt[es]);
} else {
gen_gvec_4_ptr(get_field(s, v1), get_field(s, v2),
get_field(s, v3), get_field(s, v4),
- cpu_env, m6, g_cc[es]);
+ tcg_env, m6, g_cc[es]);
}
set_cc_static(s);
} else {
@@ -2682,7 +2682,7 @@
gen_gvec_4_ptr(get_field(s, v1), get_field(s, v2),
get_field(s, v3), get_field(s, v4),
- cpu_env, 0, fns[es][zs]);
+ tcg_env, 0, fns[es][zs]);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -2780,7 +2780,7 @@
}
gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2),
- get_field(s, v3), cpu_env, m5, fn);
+ get_field(s, v3), tcg_env, m5, fn);
return DISAS_NEXT;
}
@@ -2822,7 +2822,7 @@
return DISAS_NORETURN;
}
- gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, 0, fn);
+ gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), tcg_env, 0, fn);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -2893,7 +2893,7 @@
}
gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2), get_field(s, v3),
- cpu_env, m5, fn);
+ tcg_env, m5, fn);
if (cs) {
set_cc_static(s);
}
@@ -3007,7 +3007,7 @@
return DISAS_NORETURN;
}
- gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env,
+ gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), tcg_env,
deposit32(m4, 4, 4, erm), fn);
return DISAS_NEXT;
}
@@ -3036,7 +3036,7 @@
return DISAS_NORETURN;
}
- gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, m4, fn);
+ gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), tcg_env, m4, fn);
return DISAS_NEXT;
}
@@ -3080,7 +3080,7 @@
}
gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2), get_field(s, v3),
- cpu_env, deposit32(m5, 4, 4, m6), fn);
+ tcg_env, deposit32(m5, 4, 4, m6), fn);
return DISAS_NEXT;
}
@@ -3169,7 +3169,7 @@
}
gen_gvec_4_ptr(get_field(s, v1), get_field(s, v2),
- get_field(s, v3), get_field(s, v4), cpu_env, m5, fn);
+ get_field(s, v3), get_field(s, v4), tcg_env, m5, fn);
return DISAS_NEXT;
}
@@ -3291,7 +3291,7 @@
return DISAS_NORETURN;
}
- gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, m4, fn);
+ gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), tcg_env, m4, fn);
return DISAS_NEXT;
}
@@ -3325,7 +3325,7 @@
return DISAS_NORETURN;
}
- gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env,
+ gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), tcg_env,
deposit32(m5, 4, 12, i3), fn);
set_cc_static(s);
return DISAS_NEXT;
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
index 61769ff..788e41f 100644
--- a/target/sh4/cpu.c
+++ b/target/sh4/cpu.c
@@ -239,8 +239,6 @@
SuperHCPU *cpu = SUPERH_CPU(obj);
CPUSH4State *env = &cpu->env;
- cpu_set_cpustate_pointers(cpu);
-
env->movcal_backup_tail = &(env->movcal_backup);
}
@@ -315,6 +313,7 @@
.name = TYPE_SUPERH_CPU,
.parent = TYPE_CPU,
.instance_size = sizeof(SuperHCPU),
+ .instance_align = __alignof(SuperHCPU),
.instance_init = superh_cpu_initfn,
.abstract = true,
.class_size = sizeof(SuperHCPUClass),
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
index 1399d38..f75a235 100644
--- a/target/sh4/cpu.h
+++ b/target/sh4/cpu.h
@@ -208,7 +208,6 @@
CPUState parent_obj;
/*< public >*/
- CPUNegativeOffsetState neg;
CPUSH4State env;
};
diff --git a/target/sh4/op_helper.c b/target/sh4/op_helper.c
index a663335..ada41ba 100644
--- a/target/sh4/op_helper.c
+++ b/target/sh4/op_helper.c
@@ -29,7 +29,7 @@
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
{
- CPUSH4State *env = cs->env_ptr;
+ CPUSH4State *env = cpu_env(cs);
env->tea = addr;
switch (access_type) {
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
index c1e590f..cbd8dfc 100644
--- a/target/sh4/translate.c
+++ b/target/sh4/translate.c
@@ -96,63 +96,63 @@
};
for (i = 0; i < 24; i++) {
- cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env,
+ cpu_gregs[i] = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUSH4State, gregs[i]),
gregnames[i]);
}
memcpy(cpu_gregs + 24, cpu_gregs + 8, 8 * sizeof(TCGv));
- cpu_pc = tcg_global_mem_new_i32(cpu_env,
+ cpu_pc = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUSH4State, pc), "PC");
- cpu_sr = tcg_global_mem_new_i32(cpu_env,
+ cpu_sr = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUSH4State, sr), "SR");
- cpu_sr_m = tcg_global_mem_new_i32(cpu_env,
+ cpu_sr_m = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUSH4State, sr_m), "SR_M");
- cpu_sr_q = tcg_global_mem_new_i32(cpu_env,
+ cpu_sr_q = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUSH4State, sr_q), "SR_Q");
- cpu_sr_t = tcg_global_mem_new_i32(cpu_env,
+ cpu_sr_t = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUSH4State, sr_t), "SR_T");
- cpu_ssr = tcg_global_mem_new_i32(cpu_env,
+ cpu_ssr = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUSH4State, ssr), "SSR");
- cpu_spc = tcg_global_mem_new_i32(cpu_env,
+ cpu_spc = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUSH4State, spc), "SPC");
- cpu_gbr = tcg_global_mem_new_i32(cpu_env,
+ cpu_gbr = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUSH4State, gbr), "GBR");
- cpu_vbr = tcg_global_mem_new_i32(cpu_env,
+ cpu_vbr = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUSH4State, vbr), "VBR");
- cpu_sgr = tcg_global_mem_new_i32(cpu_env,
+ cpu_sgr = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUSH4State, sgr), "SGR");
- cpu_dbr = tcg_global_mem_new_i32(cpu_env,
+ cpu_dbr = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUSH4State, dbr), "DBR");
- cpu_mach = tcg_global_mem_new_i32(cpu_env,
+ cpu_mach = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUSH4State, mach), "MACH");
- cpu_macl = tcg_global_mem_new_i32(cpu_env,
+ cpu_macl = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUSH4State, macl), "MACL");
- cpu_pr = tcg_global_mem_new_i32(cpu_env,
+ cpu_pr = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUSH4State, pr), "PR");
- cpu_fpscr = tcg_global_mem_new_i32(cpu_env,
+ cpu_fpscr = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUSH4State, fpscr), "FPSCR");
- cpu_fpul = tcg_global_mem_new_i32(cpu_env,
+ cpu_fpul = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUSH4State, fpul), "FPUL");
- cpu_flags = tcg_global_mem_new_i32(cpu_env,
+ cpu_flags = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUSH4State, flags), "_flags_");
- cpu_delayed_pc = tcg_global_mem_new_i32(cpu_env,
+ cpu_delayed_pc = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUSH4State, delayed_pc),
"_delayed_pc_");
- cpu_delayed_cond = tcg_global_mem_new_i32(cpu_env,
+ cpu_delayed_cond = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUSH4State,
delayed_cond),
"_delayed_cond_");
- cpu_lock_addr = tcg_global_mem_new_i32(cpu_env,
+ cpu_lock_addr = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUSH4State, lock_addr),
"_lock_addr_");
- cpu_lock_value = tcg_global_mem_new_i32(cpu_env,
+ cpu_lock_value = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUSH4State, lock_value),
"_lock_value_");
for (i = 0; i < 32; i++)
- cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env,
+ cpu_fregs[i] = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUSH4State, fregs[i]),
fregnames[i]);
}
@@ -416,7 +416,7 @@
if (opcode != 0x0093 /* ocbi */
&& opcode != 0x00c3 /* movca.l */)
{
- gen_helper_discard_movcal_backup(cpu_env);
+ gen_helper_discard_movcal_backup(tcg_env);
ctx->has_movcal = 0;
}
}
@@ -449,7 +449,7 @@
return;
case 0x0038: /* ldtlb */
CHECK_PRIVILEGED
- gen_helper_ldtlb(cpu_env);
+ gen_helper_ldtlb(tcg_env);
return;
case 0x002b: /* rte */
CHECK_PRIVILEGED
@@ -486,7 +486,7 @@
case 0x001b: /* sleep */
CHECK_PRIVILEGED
tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next + 2);
- gen_helper_sleep(cpu_env);
+ gen_helper_sleep(tcg_env);
return;
}
@@ -807,7 +807,7 @@
arg1 = tcg_temp_new();
tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx,
MO_TESL | MO_ALIGN);
- gen_helper_macl(cpu_env, arg0, arg1);
+ gen_helper_macl(tcg_env, arg0, arg1);
tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
}
@@ -821,7 +821,7 @@
arg1 = tcg_temp_new();
tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx,
MO_TESL | MO_ALIGN);
- gen_helper_macw(cpu_env, arg0, arg1);
+ gen_helper_macw(tcg_env, arg0, arg1);
tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
}
@@ -1069,49 +1069,49 @@
gen_load_fpr64(ctx, fp1, B7_4);
switch (ctx->opcode & 0xf00f) {
case 0xf000: /* fadd Rm,Rn */
- gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
+ gen_helper_fadd_DT(fp0, tcg_env, fp0, fp1);
break;
case 0xf001: /* fsub Rm,Rn */
- gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
+ gen_helper_fsub_DT(fp0, tcg_env, fp0, fp1);
break;
case 0xf002: /* fmul Rm,Rn */
- gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
+ gen_helper_fmul_DT(fp0, tcg_env, fp0, fp1);
break;
case 0xf003: /* fdiv Rm,Rn */
- gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
+ gen_helper_fdiv_DT(fp0, tcg_env, fp0, fp1);
break;
case 0xf004: /* fcmp/eq Rm,Rn */
- gen_helper_fcmp_eq_DT(cpu_sr_t, cpu_env, fp0, fp1);
+ gen_helper_fcmp_eq_DT(cpu_sr_t, tcg_env, fp0, fp1);
return;
case 0xf005: /* fcmp/gt Rm,Rn */
- gen_helper_fcmp_gt_DT(cpu_sr_t, cpu_env, fp0, fp1);
+ gen_helper_fcmp_gt_DT(cpu_sr_t, tcg_env, fp0, fp1);
return;
}
gen_store_fpr64(ctx, fp0, B11_8);
} else {
switch (ctx->opcode & 0xf00f) {
case 0xf000: /* fadd Rm,Rn */
- gen_helper_fadd_FT(FREG(B11_8), cpu_env,
+ gen_helper_fadd_FT(FREG(B11_8), tcg_env,
FREG(B11_8), FREG(B7_4));
break;
case 0xf001: /* fsub Rm,Rn */
- gen_helper_fsub_FT(FREG(B11_8), cpu_env,
+ gen_helper_fsub_FT(FREG(B11_8), tcg_env,
FREG(B11_8), FREG(B7_4));
break;
case 0xf002: /* fmul Rm,Rn */
- gen_helper_fmul_FT(FREG(B11_8), cpu_env,
+ gen_helper_fmul_FT(FREG(B11_8), tcg_env,
FREG(B11_8), FREG(B7_4));
break;
case 0xf003: /* fdiv Rm,Rn */
- gen_helper_fdiv_FT(FREG(B11_8), cpu_env,
+ gen_helper_fdiv_FT(FREG(B11_8), tcg_env,
FREG(B11_8), FREG(B7_4));
break;
case 0xf004: /* fcmp/eq Rm,Rn */
- gen_helper_fcmp_eq_FT(cpu_sr_t, cpu_env,
+ gen_helper_fcmp_eq_FT(cpu_sr_t, tcg_env,
FREG(B11_8), FREG(B7_4));
return;
case 0xf005: /* fcmp/gt Rm,Rn */
- gen_helper_fcmp_gt_FT(cpu_sr_t, cpu_env,
+ gen_helper_fcmp_gt_FT(cpu_sr_t, tcg_env,
FREG(B11_8), FREG(B7_4));
return;
}
@@ -1121,7 +1121,7 @@
case 0xf00e: /* fmac FR0,RM,Rn */
CHECK_FPU_ENABLED
CHECK_FPSCR_PR_0
- gen_helper_fmac_FT(FREG(B11_8), cpu_env,
+ gen_helper_fmac_FT(FREG(B11_8), tcg_env,
FREG(0), FREG(B7_4), FREG(B11_8));
return;
}
@@ -1260,7 +1260,7 @@
CHECK_NOT_DELAY_SLOT
gen_save_cpu_state(ctx, true);
imm = tcg_constant_i32(B7_0);
- gen_helper_trapa(cpu_env, imm);
+ gen_helper_trapa(tcg_env, imm);
ctx->base.is_jmp = DISAS_NORETURN;
}
return;
@@ -1438,7 +1438,7 @@
LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
case 0x406a: /* lds Rm,FPSCR */
CHECK_FPU_ENABLED
- gen_helper_ld_fpscr(cpu_env, REG(B11_8));
+ gen_helper_ld_fpscr(tcg_env, REG(B11_8));
ctx->base.is_jmp = DISAS_STOP;
return;
case 0x4066: /* lds.l @Rm+,FPSCR */
@@ -1448,7 +1448,7 @@
tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx,
MO_TESL | MO_ALIGN);
tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
- gen_helper_ld_fpscr(cpu_env, addr);
+ gen_helper_ld_fpscr(tcg_env, addr);
ctx->base.is_jmp = DISAS_STOP;
}
return;
@@ -1473,7 +1473,7 @@
TCGv val = tcg_temp_new();
tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx,
MO_TEUL | MO_ALIGN);
- gen_helper_movcal(cpu_env, REG(B11_8), val);
+ gen_helper_movcal(tcg_env, REG(B11_8), val);
tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx,
MO_TEUL | MO_ALIGN);
}
@@ -1560,7 +1560,7 @@
return;
case 0x0093: /* ocbi @Rn */
{
- gen_helper_ocbi(cpu_env, REG(B11_8));
+ gen_helper_ocbi(tcg_env, REG(B11_8));
}
return;
case 0x00a3: /* ocbp @Rn */
@@ -1659,11 +1659,11 @@
goto do_illegal;
}
fp = tcg_temp_new_i64();
- gen_helper_float_DT(fp, cpu_env, cpu_fpul);
+ gen_helper_float_DT(fp, tcg_env, cpu_fpul);
gen_store_fpr64(ctx, fp, B11_8);
}
else {
- gen_helper_float_FT(FREG(B11_8), cpu_env, cpu_fpul);
+ gen_helper_float_FT(FREG(B11_8), tcg_env, cpu_fpul);
}
return;
case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
@@ -1675,10 +1675,10 @@
}
fp = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp, B11_8);
- gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
+ gen_helper_ftrc_DT(cpu_fpul, tcg_env, fp);
}
else {
- gen_helper_ftrc_FT(cpu_fpul, cpu_env, FREG(B11_8));
+ gen_helper_ftrc_FT(cpu_fpul, tcg_env, FREG(B11_8));
}
return;
case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
@@ -1697,16 +1697,16 @@
}
TCGv_i64 fp = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp, B11_8);
- gen_helper_fsqrt_DT(fp, cpu_env, fp);
+ gen_helper_fsqrt_DT(fp, tcg_env, fp);
gen_store_fpr64(ctx, fp, B11_8);
} else {
- gen_helper_fsqrt_FT(FREG(B11_8), cpu_env, FREG(B11_8));
+ gen_helper_fsqrt_FT(FREG(B11_8), tcg_env, FREG(B11_8));
}
return;
case 0xf07d: /* fsrra FRn */
CHECK_FPU_ENABLED
CHECK_FPSCR_PR_0
- gen_helper_fsrra_FT(FREG(B11_8), cpu_env, FREG(B11_8));
+ gen_helper_fsrra_FT(FREG(B11_8), tcg_env, FREG(B11_8));
break;
case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
CHECK_FPU_ENABLED
@@ -1722,7 +1722,7 @@
CHECK_FPU_ENABLED
{
TCGv_i64 fp = tcg_temp_new_i64();
- gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
+ gen_helper_fcnvsd_FT_DT(fp, tcg_env, cpu_fpul);
gen_store_fpr64(ctx, fp, B11_8);
}
return;
@@ -1731,7 +1731,7 @@
{
TCGv_i64 fp = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp, B11_8);
- gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
+ gen_helper_fcnvds_DT_FT(cpu_fpul, tcg_env, fp);
}
return;
case 0xf0ed: /* fipr FVm,FVn */
@@ -1740,7 +1740,7 @@
{
TCGv m = tcg_constant_i32((ctx->opcode >> 8) & 3);
TCGv n = tcg_constant_i32((ctx->opcode >> 10) & 3);
- gen_helper_fipr(cpu_env, m, n);
+ gen_helper_fipr(tcg_env, m, n);
return;
}
break;
@@ -1752,7 +1752,7 @@
goto do_illegal;
}
TCGv n = tcg_constant_i32((ctx->opcode >> 10) & 3);
- gen_helper_ftrv(cpu_env, n);
+ gen_helper_ftrv(tcg_env, n);
return;
}
break;
@@ -1766,10 +1766,10 @@
if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {
do_illegal_slot:
gen_save_cpu_state(ctx, true);
- gen_helper_raise_slot_illegal_instruction(cpu_env);
+ gen_helper_raise_slot_illegal_instruction(tcg_env);
} else {
gen_save_cpu_state(ctx, true);
- gen_helper_raise_illegal_instruction(cpu_env);
+ gen_helper_raise_illegal_instruction(tcg_env);
}
ctx->base.is_jmp = DISAS_NORETURN;
return;
@@ -1777,9 +1777,9 @@
do_fpu_disabled:
gen_save_cpu_state(ctx, true);
if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {
- gen_helper_raise_slot_fpu_disable(cpu_env);
+ gen_helper_raise_slot_fpu_disable(tcg_env);
} else {
- gen_helper_raise_fpu_disable(cpu_env);
+ gen_helper_raise_fpu_disable(tcg_env);
}
ctx->base.is_jmp = DISAS_NORETURN;
return;
@@ -2153,7 +2153,7 @@
cpu_exec_step_atomic holding the exclusive lock. */
ctx->envflags |= TB_FLAG_GUSA_EXCLUSIVE;
gen_save_cpu_state(ctx, false);
- gen_helper_exclusive(cpu_env);
+ gen_helper_exclusive(tcg_env);
ctx->base.is_jmp = DISAS_NORETURN;
/* We're not executing an instruction, but we must report one for the
@@ -2179,7 +2179,7 @@
static void sh4_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
- CPUSH4State *env = cs->env_ptr;
+ CPUSH4State *env = cpu_env(cs);
uint32_t tbflags;
int bound;
@@ -2236,7 +2236,7 @@
static void sh4_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{
- CPUSH4State *env = cs->env_ptr;
+ CPUSH4State *env = cpu_env(cs);
DisasContext *ctx = container_of(dcbase, DisasContext, base);
#ifdef CONFIG_USER_ONLY
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
index 130ab8f..8ba96ae 100644
--- a/target/sparc/cpu.c
+++ b/target/sparc/cpu.c
@@ -793,8 +793,6 @@
SPARCCPUClass *scc = SPARC_CPU_GET_CLASS(obj);
CPUSPARCState *env = &cpu->env;
- cpu_set_cpustate_pointers(cpu);
-
if (scc->cpu_def) {
env->def = *scc->cpu_def;
}
@@ -930,6 +928,7 @@
.name = TYPE_SPARC_CPU,
.parent = TYPE_CPU,
.instance_size = sizeof(SPARCCPU),
+ .instance_align = __alignof(SPARCCPU),
.instance_init = sparc_cpu_initfn,
.abstract = true,
.class_size = sizeof(SPARCCPUClass),
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
index 9804457..b3a98f1 100644
--- a/target/sparc/cpu.h
+++ b/target/sparc/cpu.h
@@ -561,7 +561,6 @@
CPUState parent_obj;
/*< public >*/
- CPUNegativeOffsetState neg;
CPUSPARCState env;
};
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
index 3bf0ab8..f92ff80 100644
--- a/target/sparc/translate.c
+++ b/target/sparc/translate.c
@@ -187,25 +187,25 @@
static void gen_op_load_fpr_QT0(unsigned int src)
{
- tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
+ tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt0) +
offsetof(CPU_QuadU, ll.upper));
- tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
+ tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) +
offsetof(CPU_QuadU, ll.lower));
}
static void gen_op_load_fpr_QT1(unsigned int src)
{
- tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
+ tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt1) +
offsetof(CPU_QuadU, ll.upper));
- tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
+ tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt1) +
offsetof(CPU_QuadU, ll.lower));
}
static void gen_op_store_QT0_fpr(unsigned int dst)
{
- tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
+ tcg_gen_ld_i64(cpu_fpr[dst / 2], tcg_env, offsetof(CPUSPARCState, qt0) +
offsetof(CPU_QuadU, ll.upper));
- tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
+ tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) +
offsetof(CPU_QuadU, ll.lower));
}
@@ -443,7 +443,7 @@
default:
/* We need external help to produce the carry. */
carry_32 = tcg_temp_new_i32();
- gen_helper_compute_C_icc(carry_32, cpu_env);
+ gen_helper_compute_C_icc(carry_32, tcg_env);
break;
}
@@ -516,7 +516,7 @@
default:
/* We need external help to produce the carry. */
carry_32 = tcg_temp_new_i32();
- gen_helper_compute_C_icc(carry_32, cpu_env);
+ gen_helper_compute_C_icc(carry_32, tcg_env);
break;
}
@@ -967,7 +967,7 @@
{
if (dc->cc_op != CC_OP_FLAGS) {
dc->cc_op = CC_OP_FLAGS;
- gen_helper_compute_psr(cpu_env);
+ gen_helper_compute_psr(tcg_env);
}
}
@@ -980,13 +980,13 @@
static void gen_exception(DisasContext *dc, int which)
{
save_state(dc);
- gen_helper_raise_exception(cpu_env, tcg_constant_i32(which));
+ gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
dc->base.is_jmp = DISAS_NORETURN;
}
static void gen_check_align(TCGv addr, int mask)
{
- gen_helper_check_align(cpu_env, addr, tcg_constant_i32(mask));
+ gen_helper_check_align(tcg_env, addr, tcg_constant_i32(mask));
}
static void gen_mov_pc_npc(DisasContext *dc)
@@ -1120,7 +1120,7 @@
default:
do_dynamic:
- gen_helper_compute_psr(cpu_env);
+ gen_helper_compute_psr(tcg_env);
dc->cc_op = CC_OP_FLAGS;
/* FALLTHRU */
@@ -1425,16 +1425,16 @@
{
switch (fccno) {
case 0:
- gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
break;
case 1:
- gen_helper_fcmps_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ gen_helper_fcmps_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
break;
case 2:
- gen_helper_fcmps_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ gen_helper_fcmps_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
break;
case 3:
- gen_helper_fcmps_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ gen_helper_fcmps_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
break;
}
}
@@ -1443,16 +1443,16 @@
{
switch (fccno) {
case 0:
- gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
break;
case 1:
- gen_helper_fcmpd_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ gen_helper_fcmpd_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
break;
case 2:
- gen_helper_fcmpd_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ gen_helper_fcmpd_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
break;
case 3:
- gen_helper_fcmpd_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ gen_helper_fcmpd_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
break;
}
}
@@ -1461,16 +1461,16 @@
{
switch (fccno) {
case 0:
- gen_helper_fcmpq(cpu_fsr, cpu_env);
+ gen_helper_fcmpq(cpu_fsr, tcg_env);
break;
case 1:
- gen_helper_fcmpq_fcc1(cpu_fsr, cpu_env);
+ gen_helper_fcmpq_fcc1(cpu_fsr, tcg_env);
break;
case 2:
- gen_helper_fcmpq_fcc2(cpu_fsr, cpu_env);
+ gen_helper_fcmpq_fcc2(cpu_fsr, tcg_env);
break;
case 3:
- gen_helper_fcmpq_fcc3(cpu_fsr, cpu_env);
+ gen_helper_fcmpq_fcc3(cpu_fsr, tcg_env);
break;
}
}
@@ -1479,16 +1479,16 @@
{
switch (fccno) {
case 0:
- gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
break;
case 1:
- gen_helper_fcmpes_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ gen_helper_fcmpes_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
break;
case 2:
- gen_helper_fcmpes_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ gen_helper_fcmpes_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
break;
case 3:
- gen_helper_fcmpes_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ gen_helper_fcmpes_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
break;
}
}
@@ -1497,16 +1497,16 @@
{
switch (fccno) {
case 0:
- gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
break;
case 1:
- gen_helper_fcmped_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ gen_helper_fcmped_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
break;
case 2:
- gen_helper_fcmped_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ gen_helper_fcmped_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
break;
case 3:
- gen_helper_fcmped_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ gen_helper_fcmped_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
break;
}
}
@@ -1515,16 +1515,16 @@
{
switch (fccno) {
case 0:
- gen_helper_fcmpeq(cpu_fsr, cpu_env);
+ gen_helper_fcmpeq(cpu_fsr, tcg_env);
break;
case 1:
- gen_helper_fcmpeq_fcc1(cpu_fsr, cpu_env);
+ gen_helper_fcmpeq_fcc1(cpu_fsr, tcg_env);
break;
case 2:
- gen_helper_fcmpeq_fcc2(cpu_fsr, cpu_env);
+ gen_helper_fcmpeq_fcc2(cpu_fsr, tcg_env);
break;
case 3:
- gen_helper_fcmpeq_fcc3(cpu_fsr, cpu_env);
+ gen_helper_fcmpeq_fcc3(cpu_fsr, tcg_env);
break;
}
}
@@ -1533,32 +1533,32 @@
static void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
{
- gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
}
static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
{
- gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
}
static void gen_op_fcmpq(int fccno)
{
- gen_helper_fcmpq(cpu_fsr, cpu_env);
+ gen_helper_fcmpq(cpu_fsr, tcg_env);
}
static void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
{
- gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
}
static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
{
- gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
}
static void gen_op_fcmpeq(int fccno)
{
- gen_helper_fcmpeq(cpu_fsr, cpu_env);
+ gen_helper_fcmpeq(cpu_fsr, tcg_env);
}
#endif
@@ -1593,8 +1593,8 @@
src = gen_load_fpr_F(dc, rs);
dst = gen_dest_fpr_F(dc);
- gen(dst, cpu_env, src);
- gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+ gen(dst, tcg_env, src);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
gen_store_fpr_F(dc, rd, dst);
}
@@ -1621,8 +1621,8 @@
src2 = gen_load_fpr_F(dc, rs2);
dst = gen_dest_fpr_F(dc);
- gen(dst, cpu_env, src1, src2);
- gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+ gen(dst, tcg_env, src1, src2);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
gen_store_fpr_F(dc, rd, dst);
}
@@ -1651,8 +1651,8 @@
src = gen_load_fpr_D(dc, rs);
dst = gen_dest_fpr_D(dc, rd);
- gen(dst, cpu_env, src);
- gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+ gen(dst, tcg_env, src);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
gen_store_fpr_D(dc, rd, dst);
}
@@ -1681,8 +1681,8 @@
src2 = gen_load_fpr_D(dc, rs2);
dst = gen_dest_fpr_D(dc, rd);
- gen(dst, cpu_env, src1, src2);
- gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+ gen(dst, tcg_env, src1, src2);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
gen_store_fpr_D(dc, rd, dst);
}
@@ -1737,8 +1737,8 @@
{
gen_op_load_fpr_QT1(QFPREG(rs));
- gen(cpu_env);
- gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+ gen(tcg_env);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
gen_op_store_QT0_fpr(QFPREG(rd));
gen_update_fprs_dirty(dc, QFPREG(rd));
@@ -1750,7 +1750,7 @@
{
gen_op_load_fpr_QT1(QFPREG(rs));
- gen(cpu_env);
+ gen(tcg_env);
gen_op_store_QT0_fpr(QFPREG(rd));
gen_update_fprs_dirty(dc, QFPREG(rd));
@@ -1763,8 +1763,8 @@
gen_op_load_fpr_QT0(QFPREG(rs1));
gen_op_load_fpr_QT1(QFPREG(rs2));
- gen(cpu_env);
- gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+ gen(tcg_env);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
gen_op_store_QT0_fpr(QFPREG(rd));
gen_update_fprs_dirty(dc, QFPREG(rd));
@@ -1780,8 +1780,8 @@
src2 = gen_load_fpr_F(dc, rs2);
dst = gen_dest_fpr_D(dc, rd);
- gen(dst, cpu_env, src1, src2);
- gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+ gen(dst, tcg_env, src1, src2);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
gen_store_fpr_D(dc, rd, dst);
}
@@ -1794,8 +1794,8 @@
src1 = gen_load_fpr_D(dc, rs1);
src2 = gen_load_fpr_D(dc, rs2);
- gen(cpu_env, src1, src2);
- gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+ gen(tcg_env, src1, src2);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
gen_op_store_QT0_fpr(QFPREG(rd));
gen_update_fprs_dirty(dc, QFPREG(rd));
@@ -1811,8 +1811,8 @@
src = gen_load_fpr_F(dc, rs);
dst = gen_dest_fpr_D(dc, rd);
- gen(dst, cpu_env, src);
- gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+ gen(dst, tcg_env, src);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
gen_store_fpr_D(dc, rd, dst);
}
@@ -1827,7 +1827,7 @@
src = gen_load_fpr_F(dc, rs);
dst = gen_dest_fpr_D(dc, rd);
- gen(dst, cpu_env, src);
+ gen(dst, tcg_env, src);
gen_store_fpr_D(dc, rd, dst);
}
@@ -1841,8 +1841,8 @@
src = gen_load_fpr_D(dc, rs);
dst = gen_dest_fpr_F(dc);
- gen(dst, cpu_env, src);
- gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+ gen(dst, tcg_env, src);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
gen_store_fpr_F(dc, rd, dst);
}
@@ -1855,8 +1855,8 @@
gen_op_load_fpr_QT1(QFPREG(rs));
dst = gen_dest_fpr_F(dc);
- gen(dst, cpu_env);
- gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+ gen(dst, tcg_env);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
gen_store_fpr_F(dc, rd, dst);
}
@@ -1869,8 +1869,8 @@
gen_op_load_fpr_QT1(QFPREG(rs));
dst = gen_dest_fpr_D(dc, rd);
- gen(dst, cpu_env);
- gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+ gen(dst, tcg_env);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
gen_store_fpr_D(dc, rd, dst);
}
@@ -1882,7 +1882,7 @@
src = gen_load_fpr_F(dc, rs);
- gen(cpu_env, src);
+ gen(tcg_env, src);
gen_op_store_QT0_fpr(QFPREG(rd));
gen_update_fprs_dirty(dc, QFPREG(rd));
@@ -1895,7 +1895,7 @@
src = gen_load_fpr_D(dc, rs);
- gen(cpu_env, src);
+ gen(tcg_env, src);
gen_op_store_QT0_fpr(QFPREG(rd));
gen_update_fprs_dirty(dc, QFPREG(rd));
@@ -2170,11 +2170,11 @@
save_state(dc);
#ifdef TARGET_SPARC64
- gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_mop);
+ gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
#else
{
TCGv_i64 t64 = tcg_temp_new_i64();
- gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
+ gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
tcg_gen_trunc_i64_tl(dst, t64);
}
#endif
@@ -2243,12 +2243,12 @@
save_state(dc);
#ifdef TARGET_SPARC64
- gen_helper_st_asi(cpu_env, addr, src, r_asi, r_mop);
+ gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
#else
{
TCGv_i64 t64 = tcg_temp_new_i64();
tcg_gen_extu_tl_i64(t64, src);
- gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
+ gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
}
#endif
@@ -2313,7 +2313,7 @@
/* ??? In theory, this should be raise DAE_invalid_asi.
But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
- gen_helper_exit_atomic(cpu_env);
+ gen_helper_exit_atomic(tcg_env);
} else {
TCGv_i32 r_asi = tcg_constant_i32(da.asi);
TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
@@ -2321,10 +2321,10 @@
save_state(dc);
t64 = tcg_temp_new_i64();
- gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
+ gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
s64 = tcg_constant_i64(0xff);
- gen_helper_st_asi(cpu_env, addr, s64, r_asi, r_mop);
+ gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
tcg_gen_trunc_i64_tl(dst, t64);
@@ -2423,19 +2423,19 @@
switch (size) {
case 4:
d64 = tcg_temp_new_i64();
- gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
+ gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
d32 = gen_dest_fpr_F(dc);
tcg_gen_extrl_i64_i32(d32, d64);
gen_store_fpr_F(dc, rd, d32);
break;
case 8:
- gen_helper_ld_asi(cpu_fpr[rd / 2], cpu_env, addr, r_asi, r_mop);
+ gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr, r_asi, r_mop);
break;
case 16:
d64 = tcg_temp_new_i64();
- gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
+ gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
tcg_gen_addi_tl(addr, addr, 8);
- gen_helper_ld_asi(cpu_fpr[rd/2+1], cpu_env, addr, r_asi, r_mop);
+ gen_helper_ld_asi(cpu_fpr[rd/2+1], tcg_env, addr, r_asi, r_mop);
tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
break;
default:
@@ -2575,7 +2575,7 @@
TCGv_i64 tmp = tcg_temp_new_i64();
save_state(dc);
- gen_helper_ld_asi(tmp, cpu_env, addr, r_asi, r_mop);
+ gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
/* See above. */
if ((da.memop & MO_BSWAP) == MO_TE) {
@@ -2641,7 +2641,7 @@
}
save_state(dc);
- gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
+ gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
}
break;
}
@@ -2694,7 +2694,7 @@
TCGv_i32 r_mop = tcg_constant_i32(MO_UQ);
save_state(dc);
- gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
+ gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
}
break;
}
@@ -2744,7 +2744,7 @@
TCGv_i32 r_mop = tcg_constant_i32(MO_UQ);
save_state(dc);
- gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
+ gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
}
break;
}
@@ -2820,19 +2820,19 @@
}
#ifndef CONFIG_USER_ONLY
-static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env cpu_env)
+static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env tcg_env)
{
TCGv_i32 r_tl = tcg_temp_new_i32();
/* load env->tl into r_tl */
- tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
+ tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
/* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
/* calculate offset to current trap state from env->ts, reuse r_tl */
tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
- tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
+ tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
/* tsptr = env->ts[env->tl & MAXTL_MASK] */
{
@@ -3159,7 +3159,7 @@
tcg_gen_addi_i32(trap, trap, TT_TRAP);
}
- gen_helper_raise_exception(cpu_env, trap);
+ gen_helper_raise_exception(tcg_env, trap);
if (cond == 8) {
/* An unconditional trap ends the TB. */
@@ -3197,7 +3197,7 @@
#ifdef TARGET_SPARC64
case 0x2: /* V9 rdccr */
update_psr(dc);
- gen_helper_rdccr(cpu_dst, cpu_env);
+ gen_helper_rdccr(cpu_dst, tcg_env);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x3: /* V9 rdasi */
@@ -3211,12 +3211,12 @@
r_tickptr = tcg_temp_new_ptr();
r_const = tcg_constant_i32(dc->mem_idx);
- tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ tcg_gen_ld_ptr(r_tickptr, tcg_env,
offsetof(CPUSPARCState, tick));
if (translator_io_start(&dc->base)) {
dc->base.is_jmp = DISAS_EXIT;
}
- gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
+ gen_helper_tick_get_count(cpu_dst, tcg_env, r_tickptr,
r_const);
gen_store_gpr(dc, rd, cpu_dst);
}
@@ -3245,7 +3245,7 @@
gen_store_gpr(dc, rd, cpu_gsr);
break;
case 0x16: /* Softint */
- tcg_gen_ld32s_tl(cpu_dst, cpu_env,
+ tcg_gen_ld32s_tl(cpu_dst, tcg_env,
offsetof(CPUSPARCState, softint));
gen_store_gpr(dc, rd, cpu_dst);
break;
@@ -3259,12 +3259,12 @@
r_tickptr = tcg_temp_new_ptr();
r_const = tcg_constant_i32(dc->mem_idx);
- tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ tcg_gen_ld_ptr(r_tickptr, tcg_env,
offsetof(CPUSPARCState, stick));
if (translator_io_start(&dc->base)) {
dc->base.is_jmp = DISAS_EXIT;
}
- gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
+ gen_helper_tick_get_count(cpu_dst, tcg_env, r_tickptr,
r_const);
gen_store_gpr(dc, rd, cpu_dst);
}
@@ -3299,7 +3299,7 @@
goto priv_insn;
}
update_psr(dc);
- gen_helper_rdpsr(cpu_dst, cpu_env);
+ gen_helper_rdpsr(cpu_dst, tcg_env);
#else
CHECK_IU_FEATURE(dc, HYPV);
if (!hypervisor(dc))
@@ -3307,7 +3307,7 @@
rs1 = GET_FIELD(insn, 13, 17);
switch (rs1) {
case 0: // hpstate
- tcg_gen_ld_i64(cpu_dst, cpu_env,
+ tcg_gen_ld_i64(cpu_dst, tcg_env,
offsetof(CPUSPARCState, hpstate));
break;
case 1: // htstate
@@ -3344,7 +3344,7 @@
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
- gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ gen_load_trap_state_at_tl(r_tsptr, tcg_env);
tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tpc));
}
@@ -3354,7 +3354,7 @@
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
- gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ gen_load_trap_state_at_tl(r_tsptr, tcg_env);
tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tnpc));
}
@@ -3364,7 +3364,7 @@
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
- gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ gen_load_trap_state_at_tl(r_tsptr, tcg_env);
tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tstate));
}
@@ -3373,7 +3373,7 @@
{
TCGv_ptr r_tsptr = tcg_temp_new_ptr();
- gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ gen_load_trap_state_at_tl(r_tsptr, tcg_env);
tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tt));
}
@@ -3385,12 +3385,12 @@
r_tickptr = tcg_temp_new_ptr();
r_const = tcg_constant_i32(dc->mem_idx);
- tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ tcg_gen_ld_ptr(r_tickptr, tcg_env,
offsetof(CPUSPARCState, tick));
if (translator_io_start(&dc->base)) {
dc->base.is_jmp = DISAS_EXIT;
}
- gen_helper_tick_get_count(cpu_tmp0, cpu_env,
+ gen_helper_tick_get_count(cpu_tmp0, tcg_env,
r_tickptr, r_const);
}
break;
@@ -3398,43 +3398,43 @@
tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
break;
case 6: // pstate
- tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+ tcg_gen_ld32s_tl(cpu_tmp0, tcg_env,
offsetof(CPUSPARCState, pstate));
break;
case 7: // tl
- tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+ tcg_gen_ld32s_tl(cpu_tmp0, tcg_env,
offsetof(CPUSPARCState, tl));
break;
case 8: // pil
- tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+ tcg_gen_ld32s_tl(cpu_tmp0, tcg_env,
offsetof(CPUSPARCState, psrpil));
break;
case 9: // cwp
- gen_helper_rdcwp(cpu_tmp0, cpu_env);
+ gen_helper_rdcwp(cpu_tmp0, tcg_env);
break;
case 10: // cansave
- tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+ tcg_gen_ld32s_tl(cpu_tmp0, tcg_env,
offsetof(CPUSPARCState, cansave));
break;
case 11: // canrestore
- tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+ tcg_gen_ld32s_tl(cpu_tmp0, tcg_env,
offsetof(CPUSPARCState, canrestore));
break;
case 12: // cleanwin
- tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+ tcg_gen_ld32s_tl(cpu_tmp0, tcg_env,
offsetof(CPUSPARCState, cleanwin));
break;
case 13: // otherwin
- tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+ tcg_gen_ld32s_tl(cpu_tmp0, tcg_env,
offsetof(CPUSPARCState, otherwin));
break;
case 14: // wstate
- tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+ tcg_gen_ld32s_tl(cpu_tmp0, tcg_env,
offsetof(CPUSPARCState, wstate));
break;
case 16: // UA2005 gl
CHECK_IU_FEATURE(dc, GL);
- tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+ tcg_gen_ld32s_tl(cpu_tmp0, tcg_env,
offsetof(CPUSPARCState, gl));
break;
case 26: // UA2005 strand status
@@ -3459,7 +3459,7 @@
#if defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)
} else if (xop == 0x2b) { /* rdtbr / V9 flushw */
#ifdef TARGET_SPARC64
- gen_helper_flushw(cpu_env);
+ gen_helper_flushw(tcg_env);
#else
if (!supervisor(dc))
goto priv_insn;
@@ -4002,28 +4002,28 @@
break;
#ifdef TARGET_SPARC64
case 0xd: /* V9 udivx */
- gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
+ gen_helper_udivx(cpu_dst, tcg_env, cpu_src1, cpu_src2);
break;
#endif
case 0xe: /* udiv */
CHECK_IU_FEATURE(dc, DIV);
if (xop & 0x10) {
- gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
+ gen_helper_udiv_cc(cpu_dst, tcg_env, cpu_src1,
cpu_src2);
dc->cc_op = CC_OP_DIV;
} else {
- gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
+ gen_helper_udiv(cpu_dst, tcg_env, cpu_src1,
cpu_src2);
}
break;
case 0xf: /* sdiv */
CHECK_IU_FEATURE(dc, DIV);
if (xop & 0x10) {
- gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
+ gen_helper_sdiv_cc(cpu_dst, tcg_env, cpu_src1,
cpu_src2);
dc->cc_op = CC_OP_DIV;
} else {
- gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
+ gen_helper_sdiv(cpu_dst, tcg_env, cpu_src1,
cpu_src2);
}
break;
@@ -4048,13 +4048,13 @@
dc->cc_op = CC_OP_TSUB;
break;
case 0x22: /* taddcctv */
- gen_helper_taddcctv(cpu_dst, cpu_env,
+ gen_helper_taddcctv(cpu_dst, tcg_env,
cpu_src1, cpu_src2);
gen_store_gpr(dc, rd, cpu_dst);
dc->cc_op = CC_OP_TADDTV;
break;
case 0x23: /* tsubcctv */
- gen_helper_tsubcctv(cpu_dst, cpu_env,
+ gen_helper_tsubcctv(cpu_dst, tcg_env,
cpu_src1, cpu_src2);
gen_store_gpr(dc, rd, cpu_dst);
dc->cc_op = CC_OP_TSUBTV;
@@ -4122,20 +4122,20 @@
CPU_FEATURE_POWERDOWN)) {
/* LEON3 power-down */
save_state(dc);
- gen_helper_power_down(cpu_env);
+ gen_helper_power_down(tcg_env);
}
break;
#else
case 0x2: /* V9 wrccr */
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
- gen_helper_wrccr(cpu_env, cpu_tmp0);
+ gen_helper_wrccr(tcg_env, cpu_tmp0);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
dc->cc_op = CC_OP_FLAGS;
break;
case 0x3: /* V9 wrasi */
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff);
- tcg_gen_st32_tl(cpu_tmp0, cpu_env,
+ tcg_gen_st32_tl(cpu_tmp0, tcg_env,
offsetof(CPUSPARCState, asi));
/*
* End TB to notice changed ASI.
@@ -4173,19 +4173,19 @@
if (!supervisor(dc))
goto illegal_insn;
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
- gen_helper_set_softint(cpu_env, cpu_tmp0);
+ gen_helper_set_softint(tcg_env, cpu_tmp0);
break;
case 0x15: /* Softint clear */
if (!supervisor(dc))
goto illegal_insn;
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
- gen_helper_clear_softint(cpu_env, cpu_tmp0);
+ gen_helper_clear_softint(tcg_env, cpu_tmp0);
break;
case 0x16: /* Softint write */
if (!supervisor(dc))
goto illegal_insn;
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
- gen_helper_write_softint(cpu_env, cpu_tmp0);
+ gen_helper_write_softint(tcg_env, cpu_tmp0);
break;
case 0x17: /* Tick compare */
#if !defined(CONFIG_USER_ONLY)
@@ -4198,7 +4198,7 @@
tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
cpu_src2);
r_tickptr = tcg_temp_new_ptr();
- tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ tcg_gen_ld_ptr(r_tickptr, tcg_env,
offsetof(CPUSPARCState, tick));
translator_io_start(&dc->base);
gen_helper_tick_set_limit(r_tickptr,
@@ -4218,7 +4218,7 @@
tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
cpu_src2);
r_tickptr = tcg_temp_new_ptr();
- tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ tcg_gen_ld_ptr(r_tickptr, tcg_env,
offsetof(CPUSPARCState, stick));
translator_io_start(&dc->base);
gen_helper_tick_set_count(r_tickptr,
@@ -4238,7 +4238,7 @@
tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
cpu_src2);
r_tickptr = tcg_temp_new_ptr();
- tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ tcg_gen_ld_ptr(r_tickptr, tcg_env,
offsetof(CPUSPARCState, stick));
translator_io_start(&dc->base);
gen_helper_tick_set_limit(r_tickptr,
@@ -4266,10 +4266,10 @@
#ifdef TARGET_SPARC64
switch (rd) {
case 0:
- gen_helper_saved(cpu_env);
+ gen_helper_saved(tcg_env);
break;
case 1:
- gen_helper_restored(cpu_env);
+ gen_helper_restored(tcg_env);
break;
case 2: /* UA2005 allclean */
case 3: /* UA2005 otherw */
@@ -4282,7 +4282,7 @@
#else
cpu_tmp0 = tcg_temp_new();
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
- gen_helper_wrpsr(cpu_env, cpu_tmp0);
+ gen_helper_wrpsr(tcg_env, cpu_tmp0);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
dc->cc_op = CC_OP_FLAGS;
save_state(dc);
@@ -4305,7 +4305,7 @@
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
- gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ gen_load_trap_state_at_tl(r_tsptr, tcg_env);
tcg_gen_st_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tpc));
}
@@ -4315,7 +4315,7 @@
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
- gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ gen_load_trap_state_at_tl(r_tsptr, tcg_env);
tcg_gen_st_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tnpc));
}
@@ -4325,7 +4325,7 @@
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
- gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ gen_load_trap_state_at_tl(r_tsptr, tcg_env);
tcg_gen_st_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state,
tstate));
@@ -4336,7 +4336,7 @@
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
- gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ gen_load_trap_state_at_tl(r_tsptr, tcg_env);
tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tt));
}
@@ -4346,7 +4346,7 @@
TCGv_ptr r_tickptr;
r_tickptr = tcg_temp_new_ptr();
- tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ tcg_gen_ld_ptr(r_tickptr, tcg_env,
offsetof(CPUSPARCState, tick));
translator_io_start(&dc->base);
gen_helper_tick_set_count(r_tickptr,
@@ -4363,12 +4363,12 @@
if (translator_io_start(&dc->base)) {
dc->base.is_jmp = DISAS_EXIT;
}
- gen_helper_wrpstate(cpu_env, cpu_tmp0);
+ gen_helper_wrpstate(tcg_env, cpu_tmp0);
dc->npc = DYNAMIC_PC;
break;
case 7: // tl
save_state(dc);
- tcg_gen_st32_tl(cpu_tmp0, cpu_env,
+ tcg_gen_st32_tl(cpu_tmp0, tcg_env,
offsetof(CPUSPARCState, tl));
dc->npc = DYNAMIC_PC;
break;
@@ -4376,39 +4376,39 @@
if (translator_io_start(&dc->base)) {
dc->base.is_jmp = DISAS_EXIT;
}
- gen_helper_wrpil(cpu_env, cpu_tmp0);
+ gen_helper_wrpil(tcg_env, cpu_tmp0);
break;
case 9: // cwp
- gen_helper_wrcwp(cpu_env, cpu_tmp0);
+ gen_helper_wrcwp(tcg_env, cpu_tmp0);
break;
case 10: // cansave
- tcg_gen_st32_tl(cpu_tmp0, cpu_env,
+ tcg_gen_st32_tl(cpu_tmp0, tcg_env,
offsetof(CPUSPARCState,
cansave));
break;
case 11: // canrestore
- tcg_gen_st32_tl(cpu_tmp0, cpu_env,
+ tcg_gen_st32_tl(cpu_tmp0, tcg_env,
offsetof(CPUSPARCState,
canrestore));
break;
case 12: // cleanwin
- tcg_gen_st32_tl(cpu_tmp0, cpu_env,
+ tcg_gen_st32_tl(cpu_tmp0, tcg_env,
offsetof(CPUSPARCState,
cleanwin));
break;
case 13: // otherwin
- tcg_gen_st32_tl(cpu_tmp0, cpu_env,
+ tcg_gen_st32_tl(cpu_tmp0, tcg_env,
offsetof(CPUSPARCState,
otherwin));
break;
case 14: // wstate
- tcg_gen_st32_tl(cpu_tmp0, cpu_env,
+ tcg_gen_st32_tl(cpu_tmp0, tcg_env,
offsetof(CPUSPARCState,
wstate));
break;
case 16: // UA2005 gl
CHECK_IU_FEATURE(dc, GL);
- gen_helper_wrgl(cpu_env, cpu_tmp0);
+ gen_helper_wrgl(tcg_env, cpu_tmp0);
break;
case 26: // UA2005 strand status
CHECK_IU_FEATURE(dc, HYPV);
@@ -4442,7 +4442,7 @@
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
switch (rd) {
case 0: // hpstate
- tcg_gen_st_i64(cpu_tmp0, cpu_env,
+ tcg_gen_st_i64(cpu_tmp0, tcg_env,
offsetof(CPUSPARCState,
hpstate));
save_state(dc);
@@ -4465,7 +4465,7 @@
tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
r_tickptr = tcg_temp_new_ptr();
- tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ tcg_gen_ld_ptr(r_tickptr, tcg_env,
offsetof(CPUSPARCState, hstick));
translator_io_start(&dc->base);
gen_helper_tick_set_limit(r_tickptr,
@@ -4518,7 +4518,7 @@
break;
}
case 0x2d: /* V9 sdivx */
- gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
+ gen_helper_sdivx(cpu_dst, tcg_env, cpu_src1, cpu_src2);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x2e: /* V9 popc */
@@ -5019,7 +5019,7 @@
tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
}
}
- gen_helper_restore(cpu_env);
+ gen_helper_restore(tcg_env);
gen_mov_pc_npc(dc);
gen_check_align(cpu_tmp0, 3);
tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
@@ -5064,7 +5064,7 @@
gen_check_align(cpu_tmp0, 3);
tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
dc->npc = DYNAMIC_PC;
- gen_helper_rett(cpu_env);
+ gen_helper_rett(tcg_env);
}
goto jmp_insn;
#endif
@@ -5074,11 +5074,11 @@
/* nop */
break;
case 0x3c: /* save */
- gen_helper_save(cpu_env);
+ gen_helper_save(tcg_env);
gen_store_gpr(dc, rd, cpu_tmp0);
break;
case 0x3d: /* restore */
- gen_helper_restore(cpu_env);
+ gen_helper_restore(tcg_env);
gen_store_gpr(dc, rd, cpu_tmp0);
break;
#if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
@@ -5091,7 +5091,7 @@
dc->npc = DYNAMIC_PC;
dc->pc = DYNAMIC_PC;
translator_io_start(&dc->base);
- gen_helper_done(cpu_env);
+ gen_helper_done(tcg_env);
goto jmp_insn;
case 1:
if (!supervisor(dc))
@@ -5099,7 +5099,7 @@
dc->npc = DYNAMIC_PC;
dc->pc = DYNAMIC_PC;
translator_io_start(&dc->base);
- gen_helper_retry(cpu_env);
+ gen_helper_retry(tcg_env);
goto jmp_insn;
default:
goto illegal_insn;
@@ -5302,14 +5302,14 @@
TCGv_i64 t64 = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(t64, cpu_addr,
dc->mem_idx, MO_TEUQ | MO_ALIGN);
- gen_helper_ldxfsr(cpu_fsr, cpu_env, cpu_fsr, t64);
+ gen_helper_ldxfsr(cpu_fsr, tcg_env, cpu_fsr, t64);
break;
}
#endif
cpu_dst_32 = tcg_temp_new_i32();
tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
dc->mem_idx, MO_TEUL | MO_ALIGN);
- gen_helper_ldfsr(cpu_fsr, cpu_env, cpu_fsr, cpu_dst_32);
+ gen_helper_ldfsr(cpu_fsr, tcg_env, cpu_fsr, cpu_dst_32);
break;
case 0x22: /* ldqf, load quad fpreg */
CHECK_FPU_FEATURE(dc, FLOAT128);
@@ -5568,7 +5568,7 @@
static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
- CPUSPARCState *env = cs->env_ptr;
+ CPUSPARCState *env = cpu_env(cs);
int bound;
dc->pc = dc->base.pc_first;
@@ -5625,7 +5625,7 @@
static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
- CPUSPARCState *env = cs->env_ptr;
+ CPUSPARCState *env = cpu_env(cs);
unsigned int insn;
insn = translator_ldl(env, &dc->base, dc->pc);
@@ -5770,21 +5770,21 @@
unsigned int i;
- cpu_regwptr = tcg_global_mem_new_ptr(cpu_env,
+ cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
offsetof(CPUSPARCState, regwptr),
"regwptr");
for (i = 0; i < ARRAY_SIZE(r32); ++i) {
- *r32[i].ptr = tcg_global_mem_new_i32(cpu_env, r32[i].off, r32[i].name);
+ *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
}
for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
- *rtl[i].ptr = tcg_global_mem_new(cpu_env, rtl[i].off, rtl[i].name);
+ *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
}
cpu_regs[0] = NULL;
for (i = 1; i < 8; ++i) {
- cpu_regs[i] = tcg_global_mem_new(cpu_env,
+ cpu_regs[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUSPARCState, gregs[i]),
gregnames[i]);
}
@@ -5796,7 +5796,7 @@
}
for (i = 0; i < TARGET_DPREGS; i++) {
- cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
+ cpu_fpr[i] = tcg_global_mem_new_i64(tcg_env,
offsetof(CPUSPARCState, fpr[i]),
fregnames[i]);
}
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
index 133a9ac..d147762 100644
--- a/target/tricore/cpu.c
+++ b/target/tricore/cpu.c
@@ -124,14 +124,6 @@
tcc->parent_realize(dev, errp);
}
-
-static void tricore_cpu_initfn(Object *obj)
-{
- TriCoreCPU *cpu = TRICORE_CPU(obj);
-
- cpu_set_cpustate_pointers(cpu);
-}
-
static ObjectClass *tricore_cpu_class_by_name(const char *cpu_model)
{
ObjectClass *oc;
@@ -230,7 +222,7 @@
.name = TYPE_TRICORE_CPU,
.parent = TYPE_CPU,
.instance_size = sizeof(TriCoreCPU),
- .instance_init = tricore_cpu_initfn,
+ .instance_align = __alignof(TriCoreCPU),
.abstract = true,
.class_size = sizeof(TriCoreCPUClass),
.class_init = tricore_cpu_class_init,
diff --git a/target/tricore/cpu.h b/target/tricore/cpu.h
index 3708405..a357b57 100644
--- a/target/tricore/cpu.h
+++ b/target/tricore/cpu.h
@@ -30,150 +30,25 @@
/* GPR Register */
uint32_t gpr_a[16];
uint32_t gpr_d[16];
- /* CSFR Register */
- uint32_t PCXI;
/* Frequently accessed PSW_USB bits are stored separately for efficiency.
This contains all the other bits. Use psw_{read,write} to access
the whole PSW. */
uint32_t PSW;
-
- /* PSW flag cache for faster execution
- */
+ /* PSW flag cache for faster execution */
uint32_t PSW_USB_C;
uint32_t PSW_USB_V; /* Only if bit 31 set, then flag is set */
uint32_t PSW_USB_SV; /* Only if bit 31 set, then flag is set */
uint32_t PSW_USB_AV; /* Only if bit 31 set, then flag is set. */
uint32_t PSW_USB_SAV; /* Only if bit 31 set, then flag is set. */
- uint32_t PC;
- uint32_t SYSCON;
- uint32_t CPU_ID;
- uint32_t CORE_ID;
- uint32_t BIV;
- uint32_t BTV;
- uint32_t ISP;
- uint32_t ICR;
- uint32_t FCX;
- uint32_t LCX;
- uint32_t COMPAT;
+#define R(ADDR, NAME, FEATURE) uint32_t NAME;
+#define A(ADDR, NAME, FEATURE) uint32_t NAME;
+#define E(ADDR, NAME, FEATURE) uint32_t NAME;
+#include "csfr.h.inc"
+#undef R
+#undef A
+#undef E
- /* Mem Protection Register */
- uint32_t DPR0_0L;
- uint32_t DPR0_0U;
- uint32_t DPR0_1L;
- uint32_t DPR0_1U;
- uint32_t DPR0_2L;
- uint32_t DPR0_2U;
- uint32_t DPR0_3L;
- uint32_t DPR0_3U;
-
- uint32_t DPR1_0L;
- uint32_t DPR1_0U;
- uint32_t DPR1_1L;
- uint32_t DPR1_1U;
- uint32_t DPR1_2L;
- uint32_t DPR1_2U;
- uint32_t DPR1_3L;
- uint32_t DPR1_3U;
-
- uint32_t DPR2_0L;
- uint32_t DPR2_0U;
- uint32_t DPR2_1L;
- uint32_t DPR2_1U;
- uint32_t DPR2_2L;
- uint32_t DPR2_2U;
- uint32_t DPR2_3L;
- uint32_t DPR2_3U;
-
- uint32_t DPR3_0L;
- uint32_t DPR3_0U;
- uint32_t DPR3_1L;
- uint32_t DPR3_1U;
- uint32_t DPR3_2L;
- uint32_t DPR3_2U;
- uint32_t DPR3_3L;
- uint32_t DPR3_3U;
-
- uint32_t CPR0_0L;
- uint32_t CPR0_0U;
- uint32_t CPR0_1L;
- uint32_t CPR0_1U;
- uint32_t CPR0_2L;
- uint32_t CPR0_2U;
- uint32_t CPR0_3L;
- uint32_t CPR0_3U;
-
- uint32_t CPR1_0L;
- uint32_t CPR1_0U;
- uint32_t CPR1_1L;
- uint32_t CPR1_1U;
- uint32_t CPR1_2L;
- uint32_t CPR1_2U;
- uint32_t CPR1_3L;
- uint32_t CPR1_3U;
-
- uint32_t CPR2_0L;
- uint32_t CPR2_0U;
- uint32_t CPR2_1L;
- uint32_t CPR2_1U;
- uint32_t CPR2_2L;
- uint32_t CPR2_2U;
- uint32_t CPR2_3L;
- uint32_t CPR2_3U;
-
- uint32_t CPR3_0L;
- uint32_t CPR3_0U;
- uint32_t CPR3_1L;
- uint32_t CPR3_1U;
- uint32_t CPR3_2L;
- uint32_t CPR3_2U;
- uint32_t CPR3_3L;
- uint32_t CPR3_3U;
-
- uint32_t DPM0;
- uint32_t DPM1;
- uint32_t DPM2;
- uint32_t DPM3;
-
- uint32_t CPM0;
- uint32_t CPM1;
- uint32_t CPM2;
- uint32_t CPM3;
-
- /* Memory Management Registers */
- uint32_t MMU_CON;
- uint32_t MMU_ASI;
- uint32_t MMU_TVA;
- uint32_t MMU_TPA;
- uint32_t MMU_TPX;
- uint32_t MMU_TFA;
- /* {1.3.1 only */
- uint32_t BMACON;
- uint32_t SMACON;
- uint32_t DIEAR;
- uint32_t DIETR;
- uint32_t CCDIER;
- uint32_t MIECON;
- uint32_t PIEAR;
- uint32_t PIETR;
- uint32_t CCPIER;
- /*} */
- /* Debug Registers */
- uint32_t DBGSR;
- uint32_t EXEVT;
- uint32_t CREVT;
- uint32_t SWEVT;
- uint32_t TR0EVT;
- uint32_t TR1EVT;
- uint32_t DMS;
- uint32_t DCX;
- uint32_t DBGTCR;
- uint32_t CCTRL;
- uint32_t CCNT;
- uint32_t ICNT;
- uint32_t M1CNT;
- uint32_t M2CNT;
- uint32_t M3CNT;
/* Floating Point Registers */
float_status fp_status;
@@ -192,7 +67,6 @@
CPUState parent_obj;
/*< public >*/
- CPUNegativeOffsetState neg;
CPUTriCoreState env;
};
diff --git a/target/tricore/fpu_helper.c b/target/tricore/fpu_helper.c
index cb7ee7d..5d38aea 100644
--- a/target/tricore/fpu_helper.c
+++ b/target/tricore/fpu_helper.c
@@ -373,6 +373,80 @@
return (uint32_t)result;
}
+uint32_t helper_hptof(CPUTriCoreState *env, uint32_t arg)
+{
+ float16 f_arg = make_float16(arg);
+ uint32_t result = 0;
+ int32_t flags = 0;
+
+ /*
+ * if we have any NAN we need to move the top 2 and lower 8 input mantissa
+ * bits to the top 2 and lower 8 output mantissa bits respectively.
+ * Softfloat on the other hand uses the top 10 mantissa bits.
+ */
+ if (float16_is_any_nan(f_arg)) {
+ if (float16_is_signaling_nan(f_arg, &env->fp_status)) {
+ flags |= float_flag_invalid;
+ }
+ result = 0;
+ result = float32_set_sign(result, f_arg >> 15);
+ result = deposit32(result, 23, 8, 0xff);
+ result = deposit32(result, 21, 2, extract32(f_arg, 8, 2));
+ result = deposit32(result, 0, 8, extract32(f_arg, 0, 8));
+ } else {
+ set_flush_inputs_to_zero(0, &env->fp_status);
+ result = float16_to_float32(f_arg, true, &env->fp_status);
+ set_flush_inputs_to_zero(1, &env->fp_status);
+ flags = f_get_excp_flags(env);
+ }
+
+ if (flags) {
+ f_update_psw_flags(env, flags);
+ } else {
+ env->FPU_FS = 0;
+ }
+
+ return result;
+}
+
+uint32_t helper_ftohp(CPUTriCoreState *env, uint32_t arg)
+{
+ float32 f_arg = make_float32(arg);
+ uint32_t result = 0;
+ int32_t flags = 0;
+
+ /*
+ * if we have any NAN we need to move the top 2 and lower 8 input mantissa
+ * bits to the top 2 and lower 8 output mantissa bits respectively.
+ * Softfloat on the other hand uses the top 10 mantissa bits.
+ */
+ if (float32_is_any_nan(f_arg)) {
+ if (float32_is_signaling_nan(f_arg, &env->fp_status)) {
+ flags |= float_flag_invalid;
+ }
+ result = float16_set_sign(result, arg >> 31);
+ result = deposit32(result, 10, 5, 0x1f);
+ result = deposit32(result, 8, 2, extract32(arg, 21, 2));
+ result = deposit32(result, 0, 8, extract32(arg, 0, 8));
+ if (extract32(result, 0, 10) == 0) {
+ result |= (1 << 8);
+ }
+ } else {
+ set_flush_to_zero(0, &env->fp_status);
+ result = float32_to_float16(f_arg, true, &env->fp_status);
+ set_flush_to_zero(1, &env->fp_status);
+ flags = f_get_excp_flags(env);
+ }
+
+ if (flags) {
+ f_update_psw_flags(env, flags);
+ } else {
+ env->FPU_FS = 0;
+ }
+
+ return result;
+}
+
uint32_t helper_itof(CPUTriCoreState *env, uint32_t arg)
{
float32 f_result;
@@ -429,6 +503,38 @@
return result;
}
+uint32_t helper_ftou(CPUTriCoreState *env, uint32_t arg)
+{
+ float32 f_arg = make_float32(arg);
+ uint32_t result;
+ int32_t flags = 0;
+
+ result = float32_to_uint32(f_arg, &env->fp_status);
+
+ flags = f_get_excp_flags(env);
+ if (flags & float_flag_invalid) {
+ flags &= ~float_flag_inexact;
+ if (float32_is_any_nan(f_arg)) {
+ result = 0;
+ }
+ /*
+ * we need to check arg < 0.0 before rounding as TriCore needs to raise
+ * float_flag_invalid as well. For instance, when we have a negative
+ * exponent and sign, softfloat would only raise float_flat_inexact.
+ */
+ } else if (float32_lt_quiet(f_arg, 0, &env->fp_status)) {
+ flags = float_flag_invalid;
+ result = 0;
+ }
+
+ if (flags) {
+ f_update_psw_flags(env, flags);
+ } else {
+ env->FPU_FS = 0;
+ }
+ return result;
+}
+
uint32_t helper_ftouz(CPUTriCoreState *env, uint32_t arg)
{
float32 f_arg = make_float32(arg);
@@ -443,6 +549,11 @@
if (float32_is_any_nan(f_arg)) {
result = 0;
}
+ /*
+ * we need to check arg < 0.0 before rounding as TriCore needs to raise
+ * float_flag_invalid as well. For instance, when we have a negative
+ * exponent and sign, softfloat would only raise float_flat_inexact.
+ */
} else if (float32_lt_quiet(f_arg, 0, &env->fp_status)) {
flags = float_flag_invalid;
result = 0;
diff --git a/target/tricore/helper.c b/target/tricore/helper.c
index 6d076ac..7e5da3c 100644
--- a/target/tricore/helper.c
+++ b/target/tricore/helper.c
@@ -120,16 +120,31 @@
void fpu_set_state(CPUTriCoreState *env)
{
- set_float_rounding_mode(env->PSW & MASK_PSW_FPU_RM, &env->fp_status);
+ switch (extract32(env->PSW, 24, 2)) {
+ case 0:
+ set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
+ break;
+ case 1:
+ set_float_rounding_mode(float_round_up, &env->fp_status);
+ break;
+ case 2:
+ set_float_rounding_mode(float_round_down, &env->fp_status);
+ break;
+ case 3:
+ set_float_rounding_mode(float_round_to_zero, &env->fp_status);
+ break;
+ }
+
set_flush_inputs_to_zero(1, &env->fp_status);
set_flush_to_zero(1, &env->fp_status);
+ set_float_detect_tininess(float_tininess_before_rounding, &env->fp_status);
set_default_nan_mode(1, &env->fp_status);
}
uint32_t psw_read(CPUTriCoreState *env)
{
/* clear all USB bits */
- env->PSW &= 0x6ffffff;
+ env->PSW &= 0x7ffffff;
/* now set them from the cache */
env->PSW |= ((env->PSW_USB_C != 0) << 31);
env->PSW |= ((env->PSW_USB_V & (1 << 31)) >> 1);
diff --git a/target/tricore/helper.h b/target/tricore/helper.h
index 31d71ea..1d97d07 100644
--- a/target/tricore/helper.h
+++ b/target/tricore/helper.h
@@ -111,9 +111,12 @@
DEF_HELPER_3(fcmp, i32, env, i32, i32)
DEF_HELPER_2(qseed, i32, env, i32)
DEF_HELPER_2(ftoi, i32, env, i32)
+DEF_HELPER_2(ftohp, i32, env, i32)
+DEF_HELPER_2(hptof, i32, env, i32)
DEF_HELPER_2(itof, i32, env, i32)
DEF_HELPER_2(utof, i32, env, i32)
DEF_HELPER_2(ftoiz, i32, env, i32)
+DEF_HELPER_2(ftou, i32, env, i32)
DEF_HELPER_2(ftouz, i32, env, i32)
DEF_HELPER_2(updfl, void, env, i32)
/* dvinit */
@@ -134,6 +137,7 @@
DEF_HELPER_FLAGS_2(crc32b, TCG_CALL_NO_RWG_SE, i32, i32, i32)
DEF_HELPER_FLAGS_2(crc32_be, TCG_CALL_NO_RWG_SE, i32, i32, i32)
DEF_HELPER_FLAGS_2(crc32_le, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_3(crcn, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32)
DEF_HELPER_FLAGS_2(shuffle, TCG_CALL_NO_RWG_SE, i32, i32, i32)
/* CSA */
DEF_HELPER_2(call, void, env, i32)
diff --git a/target/tricore/op_helper.c b/target/tricore/op_helper.c
index 89be1ed..ba9c444 100644
--- a/target/tricore/op_helper.c
+++ b/target/tricore/op_helper.c
@@ -2308,6 +2308,69 @@
return crc32(arg1, buf, 4);
}
+static uint32_t crc_div(uint32_t crc_in, uint32_t data, uint32_t gen,
+ uint32_t n, uint32_t m)
+{
+ uint32_t i;
+
+ data = data << n;
+ for (i = 0; i < m; i++) {
+ if (crc_in & (1u << (n - 1))) {
+ crc_in <<= 1;
+ if (data & (1u << (m - 1))) {
+ crc_in++;
+ }
+ crc_in ^= gen;
+ } else {
+ crc_in <<= 1;
+ if (data & (1u << (m - 1))) {
+ crc_in++;
+ }
+ }
+ data <<= 1;
+ }
+
+ return crc_in;
+}
+
+uint32_t helper_crcn(uint32_t arg0, uint32_t arg1, uint32_t arg2)
+{
+ uint32_t crc_out, crc_in;
+ uint32_t n = extract32(arg0, 12, 4) + 1;
+ uint32_t gen = extract32(arg0, 16, n);
+ uint32_t inv = extract32(arg0, 9, 1);
+ uint32_t le = extract32(arg0, 8, 1);
+ uint32_t m = extract32(arg0, 0, 3) + 1;
+ uint32_t data = extract32(arg1, 0, m);
+ uint32_t seed = extract32(arg2, 0, n);
+
+ if (le == 1) {
+ if (m == 0) {
+ data = 0;
+ } else {
+ data = revbit32(data) >> (32 - m);
+ }
+ }
+
+ if (inv == 1) {
+ seed = ~seed;
+ }
+
+ if (m > n) {
+ crc_in = (data >> (m - n)) ^ seed;
+ } else {
+ crc_in = (data << (n - m)) ^ seed;
+ }
+
+ crc_out = crc_div(crc_in, data, gen, n, m);
+
+ if (inv) {
+ crc_out = ~crc_out;
+ }
+
+ return extract32(crc_out, 0, n);
+}
+
uint32_t helper_shuffle(uint32_t arg0, uint32_t arg1)
{
uint32_t resb;
@@ -2395,7 +2458,7 @@
return count == 0;
}
-static void save_context_upper(CPUTriCoreState *env, int ea)
+static void save_context_upper(CPUTriCoreState *env, target_ulong ea)
{
cpu_stl_data(env, ea, env->PCXI);
cpu_stl_data(env, ea+4, psw_read(env));
@@ -2415,7 +2478,7 @@
cpu_stl_data(env, ea+60, env->gpr_d[15]);
}
-static void save_context_lower(CPUTriCoreState *env, int ea)
+static void save_context_lower(CPUTriCoreState *env, target_ulong ea)
{
cpu_stl_data(env, ea, env->PCXI);
cpu_stl_data(env, ea+4, env->gpr_a[11]);
@@ -2435,7 +2498,7 @@
cpu_stl_data(env, ea+60, env->gpr_d[7]);
}
-static void restore_context_upper(CPUTriCoreState *env, int ea,
+static void restore_context_upper(CPUTriCoreState *env, target_ulong ea,
target_ulong *new_PCXI, target_ulong *new_PSW)
{
*new_PCXI = cpu_ldl_data(env, ea);
@@ -2456,7 +2519,7 @@
env->gpr_d[15] = cpu_ldl_data(env, ea+60);
}
-static void restore_context_lower(CPUTriCoreState *env, int ea,
+static void restore_context_lower(CPUTriCoreState *env, target_ulong ea,
target_ulong *ra, target_ulong *pcxi)
{
*pcxi = cpu_ldl_data(env, ea);
@@ -2700,26 +2763,26 @@
}
}
-void helper_ldlcx(CPUTriCoreState *env, uint32_t ea)
+void helper_ldlcx(CPUTriCoreState *env, target_ulong ea)
{
uint32_t dummy;
/* insn doesn't load PCXI and RA */
restore_context_lower(env, ea, &dummy, &dummy);
}
-void helper_lducx(CPUTriCoreState *env, uint32_t ea)
+void helper_lducx(CPUTriCoreState *env, target_ulong ea)
{
uint32_t dummy;
/* insn doesn't load PCXI and PSW */
restore_context_upper(env, ea, &dummy, &dummy);
}
-void helper_stlcx(CPUTriCoreState *env, uint32_t ea)
+void helper_stlcx(CPUTriCoreState *env, target_ulong ea)
{
save_context_lower(env, ea);
}
-void helper_stucx(CPUTriCoreState *env, uint32_t ea)
+void helper_stucx(CPUTriCoreState *env, target_ulong ea)
{
save_context_upper(env, ea);
}
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
index 6ae5ccb..dd812ec 100644
--- a/target/tricore/translate.c
+++ b/target/tricore/translate.c
@@ -132,7 +132,7 @@
#define gen_helper_1arg(name, arg) do { \
TCGv_i32 helper_tmp = tcg_constant_i32(arg); \
- gen_helper_##name(cpu_env, helper_tmp); \
+ gen_helper_##name(tcg_env, helper_tmp); \
} while (0)
#define GEN_HELPER_LL(name, ret, arg0, arg1, n) do { \
@@ -191,7 +191,7 @@
#define GEN_HELPER_RR(name, rl, rh, arg1, arg2) do { \
TCGv_i64 ret = tcg_temp_new_i64(); \
\
- gen_helper_##name(ret, cpu_env, arg1, arg2); \
+ gen_helper_##name(ret, tcg_env, arg1, arg2); \
tcg_gen_extr_i64_i32(rl, rh, ret); \
} while (0)
@@ -341,7 +341,7 @@
#define R(ADDRESS, REG, FEATURE) \
case ADDRESS: \
if (has_feature(ctx, FEATURE)) { \
- tcg_gen_ld_tl(ret, cpu_env, offsetof(CPUTriCoreState, REG)); \
+ tcg_gen_ld_tl(ret, tcg_env, offsetof(CPUTriCoreState, REG)); \
} \
break;
#define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
@@ -350,7 +350,7 @@
{
/* since we're caching PSW make this a special case */
if (offset == 0xfe04) {
- gen_helper_psw_read(ret, cpu_env);
+ gen_helper_psw_read(ret, tcg_env);
} else {
switch (offset) {
#include "csfr.h.inc"
@@ -366,7 +366,7 @@
#define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) \
case ADDRESS: \
if (has_feature(ctx, FEATURE)) { \
- tcg_gen_st_tl(r1, cpu_env, offsetof(CPUTriCoreState, REG)); \
+ tcg_gen_st_tl(r1, tcg_env, offsetof(CPUTriCoreState, REG)); \
} \
break;
/* Endinit protected registers
@@ -380,7 +380,7 @@
if (ctx->priv == TRICORE_PRIV_SM) {
/* since we're caching PSW make this a special case */
if (offset == 0xfe04) {
- gen_helper_psw_write(cpu_env, r1);
+ gen_helper_psw_write(tcg_env, r1);
ctx->base.is_jmp = DISAS_EXIT_UPDATE;
} else {
switch (offset) {
@@ -788,7 +788,7 @@
tcg_gen_shli_i64(temp64, temp64, 16);
tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
- gen_helper_add64_ssov(temp64, cpu_env, temp64_2, temp64);
+ gen_helper_add64_ssov(temp64, tcg_env, temp64_2, temp64);
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
}
@@ -843,7 +843,7 @@
break;
}
tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
- gen_helper_add64_ssov(temp64, cpu_env, temp64_2, temp64);
+ gen_helper_add64_ssov(temp64, tcg_env, temp64_2, temp64);
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
}
@@ -867,7 +867,7 @@
GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
- gen_helper_addr_h(ret, cpu_env, temp64, r1_low, r1_high);
+ gen_helper_addr_h(ret, tcg_env, temp64, r1_low, r1_high);
}
static inline void
@@ -904,7 +904,7 @@
}
tcg_gen_andi_tl(temp2, r1, 0xffff0000);
tcg_gen_shli_tl(temp, r1, 16);
- gen_helper_addsur_h(ret, cpu_env, temp64, temp, temp2);
+ gen_helper_addsur_h(ret, tcg_env, temp64, temp, temp2);
}
@@ -928,7 +928,7 @@
GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
- gen_helper_addr_h_ssov(ret, cpu_env, temp64, r1_low, r1_high);
+ gen_helper_addr_h_ssov(ret, tcg_env, temp64, r1_low, r1_high);
}
static inline void
@@ -965,21 +965,21 @@
}
tcg_gen_andi_tl(temp2, r1, 0xffff0000);
tcg_gen_shli_tl(temp, r1, 16);
- gen_helper_addsur_h_ssov(ret, cpu_env, temp64, temp, temp2);
+ gen_helper_addsur_h_ssov(ret, tcg_env, temp64, temp, temp2);
}
static inline void
gen_maddr_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
{
TCGv t_n = tcg_constant_i32(n);
- gen_helper_maddr_q(ret, cpu_env, r1, r2, r3, t_n);
+ gen_helper_maddr_q(ret, tcg_env, r1, r2, r3, t_n);
}
static inline void
gen_maddrs_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
{
TCGv t_n = tcg_constant_i32(n);
- gen_helper_maddr_q_ssov(ret, cpu_env, r1, r2, r3, t_n);
+ gen_helper_maddr_q_ssov(ret, tcg_env, r1, r2, r3, t_n);
}
static inline void
@@ -1115,7 +1115,7 @@
tcg_gen_shli_i64(t2, t2, 16);
tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
- gen_helper_add64_ssov(t1, cpu_env, t1, t2);
+ gen_helper_add64_ssov(t1, tcg_env, t1, t2);
tcg_gen_extr_i64_i32(rl, rh, t1);
}
@@ -1182,7 +1182,7 @@
tcg_gen_mul_i64(t2, t2, t3);
tcg_gen_sari_i64(t2, t2, up_shift - n);
- gen_helper_madd32_q_add_ssov(ret, cpu_env, t1, t2);
+ gen_helper_madd32_q_add_ssov(ret, tcg_env, t1, t2);
}
static inline void
@@ -1193,7 +1193,7 @@
TCGv t_n = tcg_constant_i32(n);
tcg_gen_concat_i32_i64(r1, arg1_low, arg1_high);
- gen_helper_madd64_q_ssov(r1, cpu_env, r1, arg2, arg3, t_n);
+ gen_helper_madd64_q_ssov(r1, tcg_env, r1, arg2, arg3, t_n);
tcg_gen_extr_i64_i32(rl, rh, r1);
}
@@ -1638,7 +1638,7 @@
break;
}
tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
- gen_helper_sub64_ssov(temp64, cpu_env, temp64_2, temp64);
+ gen_helper_sub64_ssov(temp64, tcg_env, temp64_2, temp64);
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
}
@@ -1662,7 +1662,7 @@
GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
- gen_helper_subr_h(ret, cpu_env, temp64, r1_low, r1_high);
+ gen_helper_subr_h(ret, tcg_env, temp64, r1_low, r1_high);
}
static inline void
@@ -1696,7 +1696,7 @@
GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n);
break;
}
- gen_helper_subr_h_ssov(ret, cpu_env, temp64, r1_low, r1_high);
+ gen_helper_subr_h_ssov(ret, tcg_env, temp64, r1_low, r1_high);
}
static inline void
@@ -1714,14 +1714,14 @@
gen_msubr_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
{
TCGv temp = tcg_constant_i32(n);
- gen_helper_msubr_q(ret, cpu_env, r1, r2, r3, temp);
+ gen_helper_msubr_q(ret, tcg_env, r1, r2, r3, temp);
}
static inline void
gen_msubrs_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
{
TCGv temp = tcg_constant_i32(n);
- gen_helper_msubr_q_ssov(ret, cpu_env, r1, r2, r3, temp);
+ gen_helper_msubr_q_ssov(ret, tcg_env, r1, r2, r3, temp);
}
static inline void
@@ -1848,7 +1848,7 @@
tcg_gen_shli_i64(t2, t2, 16);
tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
- gen_helper_sub64_ssov(t1, cpu_env, t1, t2);
+ gen_helper_sub64_ssov(t1, tcg_env, t1, t2);
tcg_gen_extr_i64_i32(rl, rh, t1);
}
@@ -1920,7 +1920,7 @@
tcg_gen_sari_i64(t3, t2, up_shift - n);
tcg_gen_add_i64(t3, t3, t4);
- gen_helper_msub32_q_sub_ssov(ret, cpu_env, t1, t3);
+ gen_helper_msub32_q_sub_ssov(ret, tcg_env, t1, t3);
}
static inline void
@@ -1931,7 +1931,7 @@
TCGv t_n = tcg_constant_i32(n);
tcg_gen_concat_i32_i64(r1, arg1_low, arg1_high);
- gen_helper_msub64_q_ssov(r1, cpu_env, r1, arg2, arg3, t_n);
+ gen_helper_msub64_q_ssov(r1, tcg_env, r1, arg2, arg3, t_n);
tcg_gen_extr_i64_i32(rl, rh, r1);
}
@@ -2018,7 +2018,7 @@
}
tcg_gen_andi_tl(temp2, r1, 0xffff0000);
tcg_gen_shli_tl(temp, r1, 16);
- gen_helper_subadr_h(ret, cpu_env, temp64, temp, temp2);
+ gen_helper_subadr_h(ret, tcg_env, temp64, temp, temp2);
}
static inline void
@@ -2084,7 +2084,7 @@
tcg_gen_shli_i64(temp64, temp64, 16);
tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
- gen_helper_sub64_ssov(temp64, cpu_env, temp64_2, temp64);
+ gen_helper_sub64_ssov(temp64, tcg_env, temp64_2, temp64);
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
}
@@ -2111,7 +2111,7 @@
}
tcg_gen_andi_tl(temp2, r1, 0xffff0000);
tcg_gen_shli_tl(temp, r1, 16);
- gen_helper_subadr_h_ssov(ret, cpu_env, temp64, temp, temp2);
+ gen_helper_subadr_h_ssov(ret, tcg_env, temp64, temp, temp2);
}
static inline void gen_abs(TCGv ret, TCGv r1)
@@ -2164,7 +2164,7 @@
static inline void gen_absdifsi(TCGv ret, TCGv r1, int32_t con)
{
TCGv temp = tcg_constant_i32(con);
- gen_helper_absdif_ssov(ret, cpu_env, r1, temp);
+ gen_helper_absdif_ssov(ret, tcg_env, r1, temp);
}
static inline void gen_mul_i32s(TCGv ret, TCGv r1, TCGv r2)
@@ -2238,26 +2238,26 @@
static inline void gen_mulsi_i32(TCGv ret, TCGv r1, int32_t con)
{
TCGv temp = tcg_constant_i32(con);
- gen_helper_mul_ssov(ret, cpu_env, r1, temp);
+ gen_helper_mul_ssov(ret, tcg_env, r1, temp);
}
static inline void gen_mulsui_i32(TCGv ret, TCGv r1, int32_t con)
{
TCGv temp = tcg_constant_i32(con);
- gen_helper_mul_suov(ret, cpu_env, r1, temp);
+ gen_helper_mul_suov(ret, tcg_env, r1, temp);
}
/* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */
static inline void gen_maddsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
{
TCGv temp = tcg_constant_i32(con);
- gen_helper_madd32_ssov(ret, cpu_env, r1, r2, temp);
+ gen_helper_madd32_ssov(ret, tcg_env, r1, r2, temp);
}
static inline void gen_maddsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
{
TCGv temp = tcg_constant_i32(con);
- gen_helper_madd32_suov(ret, cpu_env, r1, r2, temp);
+ gen_helper_madd32_suov(ret, tcg_env, r1, r2, temp);
}
static void
@@ -2371,7 +2371,7 @@
{
TCGv_i64 temp64 = tcg_temp_new_i64();
tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
- gen_helper_madd64_ssov(temp64, cpu_env, r1, temp64, r3);
+ gen_helper_madd64_ssov(temp64, tcg_env, r1, temp64, r3);
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
}
@@ -2389,7 +2389,7 @@
{
TCGv_i64 temp64 = tcg_temp_new_i64();
tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
- gen_helper_madd64_suov(temp64, cpu_env, r1, temp64, r3);
+ gen_helper_madd64_suov(temp64, tcg_env, r1, temp64, r3);
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
}
@@ -2404,13 +2404,13 @@
static inline void gen_msubsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
{
TCGv temp = tcg_constant_i32(con);
- gen_helper_msub32_ssov(ret, cpu_env, r1, r2, temp);
+ gen_helper_msub32_ssov(ret, tcg_env, r1, r2, temp);
}
static inline void gen_msubsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
{
TCGv temp = tcg_constant_i32(con);
- gen_helper_msub32_suov(ret, cpu_env, r1, r2, temp);
+ gen_helper_msub32_suov(ret, tcg_env, r1, r2, temp);
}
static inline void
@@ -2419,7 +2419,7 @@
{
TCGv_i64 temp64 = tcg_temp_new_i64();
tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
- gen_helper_msub64_ssov(temp64, cpu_env, r1, temp64, r3);
+ gen_helper_msub64_ssov(temp64, tcg_env, r1, temp64, r3);
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
}
@@ -2437,7 +2437,7 @@
{
TCGv_i64 temp64 = tcg_temp_new_i64();
tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
- gen_helper_msub64_suov(temp64, cpu_env, r1, temp64, r3);
+ gen_helper_msub64_suov(temp64, tcg_env, r1, temp64, r3);
tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
}
@@ -2542,7 +2542,7 @@
static void gen_shas(TCGv ret, TCGv r1, TCGv r2)
{
- gen_helper_sha_ssov(ret, cpu_env, r1, r2);
+ gen_helper_sha_ssov(ret, tcg_env, r1, r2);
}
static void gen_shasi(TCGv ret, TCGv r1, int32_t con)
@@ -2595,29 +2595,29 @@
static inline void gen_adds(TCGv ret, TCGv r1, TCGv r2)
{
- gen_helper_add_ssov(ret, cpu_env, r1, r2);
+ gen_helper_add_ssov(ret, tcg_env, r1, r2);
}
static inline void gen_addsi(TCGv ret, TCGv r1, int32_t con)
{
TCGv temp = tcg_constant_i32(con);
- gen_helper_add_ssov(ret, cpu_env, r1, temp);
+ gen_helper_add_ssov(ret, tcg_env, r1, temp);
}
static inline void gen_addsui(TCGv ret, TCGv r1, int32_t con)
{
TCGv temp = tcg_constant_i32(con);
- gen_helper_add_suov(ret, cpu_env, r1, temp);
+ gen_helper_add_suov(ret, tcg_env, r1, temp);
}
static inline void gen_subs(TCGv ret, TCGv r1, TCGv r2)
{
- gen_helper_sub_ssov(ret, cpu_env, r1, r2);
+ gen_helper_sub_ssov(ret, tcg_env, r1, r2);
}
static inline void gen_subsu(TCGv ret, TCGv r1, TCGv r2)
{
- gen_helper_sub_suov(ret, cpu_env, r1, r2);
+ gen_helper_sub_suov(ret, tcg_env, r1, r2);
}
static inline void gen_bit_2op(TCGv ret, TCGv r1, TCGv r2,
@@ -2767,9 +2767,9 @@
TCGv_i64 ret = tcg_temp_new_i64();
if (!has_feature(ctx, TRICORE_FEATURE_131)) {
- gen_helper_dvinit_b_13(ret, cpu_env, r1, r2);
+ gen_helper_dvinit_b_13(ret, tcg_env, r1, r2);
} else {
- gen_helper_dvinit_b_131(ret, cpu_env, r1, r2);
+ gen_helper_dvinit_b_131(ret, tcg_env, r1, r2);
}
tcg_gen_extr_i64_i32(rl, rh, ret);
}
@@ -2780,9 +2780,9 @@
TCGv_i64 ret = tcg_temp_new_i64();
if (!has_feature(ctx, TRICORE_FEATURE_131)) {
- gen_helper_dvinit_h_13(ret, cpu_env, r1, r2);
+ gen_helper_dvinit_h_13(ret, tcg_env, r1, r2);
} else {
- gen_helper_dvinit_h_131(ret, cpu_env, r1, r2);
+ gen_helper_dvinit_h_131(ret, tcg_env, r1, r2);
}
tcg_gen_extr_i64_i32(rl, rh, ret);
}
@@ -2841,7 +2841,7 @@
TCGv_i32 tintemp = tcg_constant_i32(tin);
gen_save_pc(ctx->base.pc_next);
- gen_helper_raise_exception_sync(cpu_env, classtemp, tintemp);
+ gen_helper_raise_exception_sync(tcg_env, classtemp, tintemp);
ctx->base.is_jmp = DISAS_NORETURN;
}
@@ -2996,7 +2996,7 @@
break;
case OPC2_32_SYS_RET:
case OPC2_16_SR_RET:
- gen_helper_ret(cpu_env);
+ gen_helper_ret(tcg_env);
ctx->base.is_jmp = DISAS_EXIT;
break;
/* B-format */
@@ -3493,7 +3493,7 @@
gen_compute_branch(ctx, op2, 0, 0, 0, 0);
break;
case OPC2_16_SR_RFE:
- gen_helper_rfe(cpu_env);
+ gen_helper_rfe(tcg_env);
ctx->base.is_jmp = DISAS_EXIT;
break;
case OPC2_16_SR_DEBUG:
@@ -4741,7 +4741,7 @@
switch (op2) {
case OPC2_32_BO_LDLCX_SHORTOFF:
tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
- gen_helper_ldlcx(cpu_env, temp);
+ gen_helper_ldlcx(tcg_env, temp);
break;
case OPC2_32_BO_LDMST_SHORTOFF:
tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
@@ -4757,18 +4757,18 @@
break;
case OPC2_32_BO_LDUCX_SHORTOFF:
tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
- gen_helper_lducx(cpu_env, temp);
+ gen_helper_lducx(tcg_env, temp);
break;
case OPC2_32_BO_LEA_SHORTOFF:
tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], off10);
break;
case OPC2_32_BO_STLCX_SHORTOFF:
tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
- gen_helper_stlcx(cpu_env, temp);
+ gen_helper_stlcx(tcg_env, temp);
break;
case OPC2_32_BO_STUCX_SHORTOFF:
tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
- gen_helper_stucx(cpu_env, temp);
+ gen_helper_stucx(tcg_env, temp);
break;
case OPC2_32_BO_SWAP_W_SHORTOFF:
tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
@@ -4962,8 +4962,6 @@
const9 = MASK_OP_RC_CONST9(ctx->opcode);
op2 = MASK_OP_RC_OP2(ctx->opcode);
- temp = tcg_temp_new();
-
switch (op2) {
case OPC2_32_RC_AND:
tcg_gen_andi_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
@@ -4972,10 +4970,12 @@
tcg_gen_andi_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], ~const9);
break;
case OPC2_32_RC_NAND:
+ temp = tcg_temp_new();
tcg_gen_movi_tl(temp, const9);
tcg_gen_nand_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp);
break;
case OPC2_32_RC_NOR:
+ temp = tcg_temp_new();
tcg_gen_movi_tl(temp, const9);
tcg_gen_nor_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp);
break;
@@ -5013,7 +5013,7 @@
break;
case OPC2_32_RC_SHUFFLE:
if (has_feature(ctx, TRICORE_FEATURE_162)) {
- TCGv temp = tcg_constant_i32(const9);
+ temp = tcg_constant_i32(const9);
gen_helper_shuffle(cpu_gpr_d[r2], cpu_gpr_d[r1], temp);
} else {
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -5310,8 +5310,11 @@
}
break;
case OPC2_32_RCPW_INSERT:
+ /* tcg_gen_deposit_tl() does not handle the case of width = 0 */
+ if (width == 0) {
+ tcg_gen_mov_tl(cpu_gpr_d[r2], cpu_gpr_d[r1]);
/* if pos + width > 32 undefined result */
- if (pos + width <= 32) {
+ } else if (pos + width <= 32) {
temp = tcg_constant_i32(const4);
tcg_gen_deposit_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp, pos, width);
}
@@ -5590,44 +5593,44 @@
gen_abs(cpu_gpr_d[r3], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_ABS_B:
- gen_helper_abs_b(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r2]);
+ gen_helper_abs_b(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r2]);
break;
case OPC2_32_RR_ABS_H:
- gen_helper_abs_h(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r2]);
+ gen_helper_abs_h(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r2]);
break;
case OPC2_32_RR_ABSDIF:
gen_absdif(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_ABSDIF_B:
- gen_helper_absdif_b(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ gen_helper_absdif_b(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC2_32_RR_ABSDIF_H:
- gen_helper_absdif_h(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ gen_helper_absdif_h(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC2_32_RR_ABSDIFS:
- gen_helper_absdif_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ gen_helper_absdif_ssov(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC2_32_RR_ABSDIFS_H:
- gen_helper_absdif_h_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ gen_helper_absdif_h_ssov(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC2_32_RR_ABSS:
- gen_helper_abs_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r2]);
+ gen_helper_abs_ssov(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r2]);
break;
case OPC2_32_RR_ABSS_H:
- gen_helper_abs_h_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r2]);
+ gen_helper_abs_h_ssov(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r2]);
break;
case OPC2_32_RR_ADD:
gen_add_d(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_ADD_B:
- gen_helper_add_b(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ gen_helper_add_b(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_ADD_H:
- gen_helper_add_h(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ gen_helper_add_h(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_ADDC:
gen_addc_CC(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
@@ -5636,15 +5639,15 @@
gen_adds(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_ADDS_H:
- gen_helper_add_h_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ gen_helper_add_h_ssov(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC2_32_RR_ADDS_HU:
- gen_helper_add_h_suov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ gen_helper_add_h_suov(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC2_32_RR_ADDS_U:
- gen_helper_add_suov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ gen_helper_add_suov(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC2_32_RR_ADDX:
@@ -5862,10 +5865,10 @@
gen_sub_d(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_SUB_B:
- gen_helper_sub_b(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ gen_helper_sub_b(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_SUB_H:
- gen_helper_sub_h(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ gen_helper_sub_h(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_SUBC:
gen_subc_CC(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
@@ -5877,11 +5880,11 @@
gen_subsu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_SUBS_H:
- gen_helper_sub_h_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ gen_helper_sub_h_ssov(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC2_32_RR_SUBS_HU:
- gen_helper_sub_h_suov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ gen_helper_sub_h_suov(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC2_32_RR_SUBX:
@@ -5971,7 +5974,7 @@
gen_helper_sh_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_SHA:
- gen_helper_sha(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ gen_helper_sha(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_SHA_H:
gen_helper_sha_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
@@ -6255,34 +6258,55 @@
}
break;
case OPC2_32_RR_MUL_F:
- gen_helper_fmul(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ gen_helper_fmul(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_DIV_F:
- gen_helper_fdiv(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ gen_helper_fdiv(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_FTOHP:
+ if (has_feature(ctx, TRICORE_FEATURE_162)) {
+ gen_helper_ftohp(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r1]);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC2_32_RR_HPTOF:
+ if (has_feature(ctx, TRICORE_FEATURE_162)) {
+ gen_helper_hptof(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r1]);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
break;
case OPC2_32_RR_CMP_F:
- gen_helper_fcmp(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ gen_helper_fcmp(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_FTOI:
- gen_helper_ftoi(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1]);
+ gen_helper_ftoi(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r1]);
break;
case OPC2_32_RR_ITOF:
- gen_helper_itof(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1]);
+ gen_helper_itof(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RR_FTOU:
+ gen_helper_ftou(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r1]);
break;
case OPC2_32_RR_FTOUZ:
- gen_helper_ftouz(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1]);
+ if (has_feature(ctx, TRICORE_FEATURE_131)) {
+ gen_helper_ftouz(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r1]);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
break;
case OPC2_32_RR_UPDFL:
- gen_helper_updfl(cpu_env, cpu_gpr_d[r1]);
+ gen_helper_updfl(tcg_env, cpu_gpr_d[r1]);
break;
case OPC2_32_RR_UTOF:
- gen_helper_utof(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1]);
+ gen_helper_utof(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r1]);
break;
case OPC2_32_RR_FTOIZ:
- gen_helper_ftoiz(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1]);
+ gen_helper_ftoiz(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r1]);
break;
case OPC2_32_RR_QSEED_F:
- gen_helper_qseed(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1]);
+ gen_helper_qseed(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r1]);
break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
@@ -6483,7 +6507,7 @@
cpu_gpr_d[r2]);
break;
case OPC2_32_RR2_MULS_32:
- gen_helper_mul_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ gen_helper_mul_ssov(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
case OPC2_32_RR2_MUL_U_64:
@@ -6492,7 +6516,7 @@
cpu_gpr_d[r2]);
break;
case OPC2_32_RR2_MULS_U_32:
- gen_helper_mul_suov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ gen_helper_mul_suov(cpu_gpr_d[r3], tcg_env, cpu_gpr_d[r1],
cpu_gpr_d[r2]);
break;
default:
@@ -6554,7 +6578,10 @@
break;
case OPC2_32_RRPW_INSERT:
- if (pos + width <= 32) {
+ /* tcg_gen_deposit_tl() does not handle the case of width = 0 */
+ if (width == 0) {
+ tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ } else if (pos + width <= 32) {
tcg_gen_deposit_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
pos, width);
}
@@ -6669,18 +6696,26 @@
gen_helper_pack(cpu_gpr_d[r4], cpu_PSW_C, cpu_gpr_d[r3],
cpu_gpr_d[r3+1], cpu_gpr_d[r1]);
break;
+ case OPC2_32_RRR_CRCN:
+ if (has_feature(ctx, TRICORE_FEATURE_162)) {
+ gen_helper_crcn(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ cpu_gpr_d[r3]);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
case OPC2_32_RRR_ADD_F:
- gen_helper_fadd(cpu_gpr_d[r4], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r3]);
+ gen_helper_fadd(cpu_gpr_d[r4], tcg_env, cpu_gpr_d[r1], cpu_gpr_d[r3]);
break;
case OPC2_32_RRR_SUB_F:
- gen_helper_fsub(cpu_gpr_d[r4], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r3]);
+ gen_helper_fsub(cpu_gpr_d[r4], tcg_env, cpu_gpr_d[r1], cpu_gpr_d[r3]);
break;
case OPC2_32_RRR_MADD_F:
- gen_helper_fmadd(cpu_gpr_d[r4], cpu_env, cpu_gpr_d[r1],
+ gen_helper_fmadd(cpu_gpr_d[r4], tcg_env, cpu_gpr_d[r1],
cpu_gpr_d[r2], cpu_gpr_d[r3]);
break;
case OPC2_32_RRR_MSUB_F:
- gen_helper_fmsub(cpu_gpr_d[r4], cpu_env, cpu_gpr_d[r1],
+ gen_helper_fmsub(cpu_gpr_d[r4], tcg_env, cpu_gpr_d[r1],
cpu_gpr_d[r2], cpu_gpr_d[r3]);
break;
default:
@@ -6711,7 +6746,7 @@
cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR2_MADDS_32:
- gen_helper_madd32_ssov(cpu_gpr_d[r4], cpu_env, cpu_gpr_d[r1],
+ gen_helper_madd32_ssov(cpu_gpr_d[r4], tcg_env, cpu_gpr_d[r1],
cpu_gpr_d[r3], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR2_MADDS_64:
@@ -6727,7 +6762,7 @@
cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR2_MADDS_U_32:
- gen_helper_madd32_suov(cpu_gpr_d[r4], cpu_env, cpu_gpr_d[r1],
+ gen_helper_madd32_suov(cpu_gpr_d[r4], tcg_env, cpu_gpr_d[r1],
cpu_gpr_d[r3], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR2_MADDS_U_64:
@@ -6764,7 +6799,7 @@
cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR2_MSUBS_32:
- gen_helper_msub32_ssov(cpu_gpr_d[r4], cpu_env, cpu_gpr_d[r1],
+ gen_helper_msub32_ssov(cpu_gpr_d[r4], tcg_env, cpu_gpr_d[r1],
cpu_gpr_d[r3], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR2_MSUBS_64:
@@ -6780,7 +6815,7 @@
cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR2_MSUBS_U_32:
- gen_helper_msub32_suov(cpu_gpr_d[r4], cpu_env, cpu_gpr_d[r1],
+ gen_helper_msub32_suov(cpu_gpr_d[r4], tcg_env, cpu_gpr_d[r1],
cpu_gpr_d[r3], cpu_gpr_d[r2]);
break;
case OPC2_32_RRR2_MSUBS_U_64:
@@ -7933,7 +7968,7 @@
gen_fret(ctx);
break;
case OPC2_32_SYS_RFE:
- gen_helper_rfe(cpu_env);
+ gen_helper_rfe(tcg_env);
ctx->base.is_jmp = DISAS_EXIT;
break;
case OPC2_32_SYS_RFM:
@@ -7941,10 +7976,10 @@
tmp = tcg_temp_new();
l1 = gen_new_label();
- tcg_gen_ld32u_tl(tmp, cpu_env, offsetof(CPUTriCoreState, DBGSR));
+ tcg_gen_ld32u_tl(tmp, tcg_env, offsetof(CPUTriCoreState, DBGSR));
tcg_gen_andi_tl(tmp, tmp, MASK_DBGSR_DE);
tcg_gen_brcondi_tl(TCG_COND_NE, tmp, 1, l1);
- gen_helper_rfm(cpu_env);
+ gen_helper_rfm(tcg_env);
gen_set_label(l1);
ctx->base.is_jmp = DISAS_EXIT;
} else {
@@ -7952,10 +7987,10 @@
}
break;
case OPC2_32_SYS_RSLCX:
- gen_helper_rslcx(cpu_env);
+ gen_helper_rslcx(tcg_env);
break;
case OPC2_32_SYS_SVLCX:
- gen_helper_svlcx(cpu_env);
+ gen_helper_svlcx(tcg_env);
break;
case OPC2_32_SYS_RESTORE:
if (has_feature(ctx, TRICORE_FEATURE_16)) {
@@ -8192,12 +8227,12 @@
temp2 = tcg_temp_new(); /* width*/
temp3 = tcg_temp_new(); /* pos */
- CHECK_REG_PAIR(r3);
+ CHECK_REG_PAIR(r2);
- tcg_gen_andi_tl(temp2, cpu_gpr_d[r3+1], 0x1f);
- tcg_gen_andi_tl(temp3, cpu_gpr_d[r3], 0x1f);
+ tcg_gen_andi_tl(temp2, cpu_gpr_d[r2 + 1], 0x1f);
+ tcg_gen_andi_tl(temp3, cpu_gpr_d[r2], 0x1f);
- gen_insert(cpu_gpr_d[r2], cpu_gpr_d[r1], temp, temp2, temp3);
+ gen_insert(cpu_gpr_d[r3], cpu_gpr_d[r1], temp, temp2, temp3);
break;
/* RCRW Format */
case OPCM_32_RCRW_MASK_INSERT:
@@ -8331,7 +8366,7 @@
CPUState *cs)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
- CPUTriCoreState *env = cs->env_ptr;
+ CPUTriCoreState *env = cpu_env(cs);
ctx->mem_idx = cpu_mmu_index(env, false);
uint32_t tb_flags = (uint32_t)ctx->base.tb->flags;
@@ -8367,7 +8402,7 @@
* 4 bytes from the page boundary, so we cross the page if the first
* 16 bits indicate that this is a 32 bit insn.
*/
- uint16_t insn = cpu_lduw_code(env, ctx->base.pc_next);
+ uint16_t insn = translator_lduw(env, &ctx->base, ctx->base.pc_next);
return !tricore_insn_is_16bit(insn);
}
@@ -8376,18 +8411,19 @@
static void tricore_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
- CPUTriCoreState *env = cpu->env_ptr;
+ CPUTriCoreState *env = cpu_env(cpu);
uint16_t insn_lo;
bool is_16bit;
- insn_lo = cpu_lduw_code(env, ctx->base.pc_next);
+ insn_lo = translator_lduw(env, &ctx->base, ctx->base.pc_next);
is_16bit = tricore_insn_is_16bit(insn_lo);
if (is_16bit) {
ctx->opcode = insn_lo;
ctx->pc_succ_insn = ctx->base.pc_next + 2;
decode_16Bit_opc(ctx);
} else {
- uint32_t insn_hi = cpu_lduw_code(env, ctx->base.pc_next + 2);
+ uint32_t insn_hi = translator_lduw(env, &ctx->base,
+ ctx->base.pc_next + 2);
ctx->opcode = insn_hi << 16 | insn_lo;
ctx->pc_succ_insn = ctx->base.pc_next + 4;
decode_32Bit_opc(ctx);
@@ -8470,13 +8506,13 @@
static void tricore_tcg_init_csfr(void)
{
- cpu_PCXI = tcg_global_mem_new(cpu_env,
+ cpu_PCXI = tcg_global_mem_new(tcg_env,
offsetof(CPUTriCoreState, PCXI), "PCXI");
- cpu_PSW = tcg_global_mem_new(cpu_env,
+ cpu_PSW = tcg_global_mem_new(tcg_env,
offsetof(CPUTriCoreState, PSW), "PSW");
- cpu_PC = tcg_global_mem_new(cpu_env,
+ cpu_PC = tcg_global_mem_new(tcg_env,
offsetof(CPUTriCoreState, PC), "PC");
- cpu_ICR = tcg_global_mem_new(cpu_env,
+ cpu_ICR = tcg_global_mem_new(tcg_env,
offsetof(CPUTriCoreState, ICR), "ICR");
}
@@ -8486,30 +8522,30 @@
/* reg init */
for (i = 0 ; i < 16 ; i++) {
- cpu_gpr_a[i] = tcg_global_mem_new(cpu_env,
+ cpu_gpr_a[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUTriCoreState, gpr_a[i]),
regnames_a[i]);
}
for (i = 0 ; i < 16 ; i++) {
- cpu_gpr_d[i] = tcg_global_mem_new(cpu_env,
+ cpu_gpr_d[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUTriCoreState, gpr_d[i]),
regnames_d[i]);
}
tricore_tcg_init_csfr();
/* init PSW flag cache */
- cpu_PSW_C = tcg_global_mem_new(cpu_env,
+ cpu_PSW_C = tcg_global_mem_new(tcg_env,
offsetof(CPUTriCoreState, PSW_USB_C),
"PSW_C");
- cpu_PSW_V = tcg_global_mem_new(cpu_env,
+ cpu_PSW_V = tcg_global_mem_new(tcg_env,
offsetof(CPUTriCoreState, PSW_USB_V),
"PSW_V");
- cpu_PSW_SV = tcg_global_mem_new(cpu_env,
+ cpu_PSW_SV = tcg_global_mem_new(tcg_env,
offsetof(CPUTriCoreState, PSW_USB_SV),
"PSW_SV");
- cpu_PSW_AV = tcg_global_mem_new(cpu_env,
+ cpu_PSW_AV = tcg_global_mem_new(tcg_env,
offsetof(CPUTriCoreState, PSW_USB_AV),
"PSW_AV");
- cpu_PSW_SAV = tcg_global_mem_new(cpu_env,
+ cpu_PSW_SAV = tcg_global_mem_new(tcg_env,
offsetof(CPUTriCoreState, PSW_USB_SAV),
"PSW_SAV");
}
diff --git a/target/tricore/tricore-opcodes.h b/target/tricore/tricore-opcodes.h
index bc62b73..60d2402 100644
--- a/target/tricore/tricore-opcodes.h
+++ b/target/tricore/tricore-opcodes.h
@@ -1152,6 +1152,8 @@
OPC2_32_RR_ITOF = 0x14,
OPC2_32_RR_CMP_F = 0x00,
OPC2_32_RR_FTOIZ = 0x13,
+ OPC2_32_RR_FTOHP = 0x25, /* 1.6.2 only */
+ OPC2_32_RR_HPTOF = 0x24, /* 1.6.2 only */
OPC2_32_RR_FTOQ31 = 0x11,
OPC2_32_RR_FTOQ31Z = 0x18,
OPC2_32_RR_FTOU = 0x12,
@@ -1247,6 +1249,7 @@
OPC2_32_RRR_SUB_F = 0x03,
OPC2_32_RRR_MADD_F = 0x06,
OPC2_32_RRR_MSUB_F = 0x07,
+ OPC2_32_RRR_CRCN = 0x01, /* 1.6.2 up */
};
/*
* RRR1 Format
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
index acaf8c9..ea1dae7 100644
--- a/target/xtensa/cpu.c
+++ b/target/xtensa/cpu.c
@@ -185,7 +185,6 @@
XtensaCPUClass *xcc = XTENSA_CPU_GET_CLASS(obj);
CPUXtensaState *env = &cpu->env;
- cpu_set_cpustate_pointers(cpu);
env->config = xcc->config;
#ifndef CONFIG_USER_ONLY
@@ -273,6 +272,7 @@
.name = TYPE_XTENSA_CPU,
.parent = TYPE_CPU,
.instance_size = sizeof(XtensaCPU),
+ .instance_align = __alignof(XtensaCPU),
.instance_init = xtensa_cpu_initfn,
.abstract = true,
.class_size = sizeof(XtensaCPUClass),
diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h
index 87fe992..c6bbef1 100644
--- a/target/xtensa/cpu.h
+++ b/target/xtensa/cpu.h
@@ -560,9 +560,8 @@
CPUState parent_obj;
/*< public >*/
- Clock *clock;
- CPUNegativeOffsetState neg;
CPUXtensaState env;
+ Clock *clock;
};
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
index b7386ff..54bee7d 100644
--- a/target/xtensa/translate.c
+++ b/target/xtensa/translate.c
@@ -154,49 +154,49 @@
};
int i;
- cpu_pc = tcg_global_mem_new_i32(cpu_env,
+ cpu_pc = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUXtensaState, pc), "pc");
for (i = 0; i < 16; i++) {
- cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
+ cpu_R[i] = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUXtensaState, regs[i]),
regnames[i]);
}
for (i = 0; i < 16; i++) {
- cpu_FR[i] = tcg_global_mem_new_i32(cpu_env,
+ cpu_FR[i] = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUXtensaState,
fregs[i].f32[FP_F32_LOW]),
fregnames[i]);
}
for (i = 0; i < 16; i++) {
- cpu_FRD[i] = tcg_global_mem_new_i64(cpu_env,
+ cpu_FRD[i] = tcg_global_mem_new_i64(tcg_env,
offsetof(CPUXtensaState,
fregs[i].f64),
fregnames[i]);
}
for (i = 0; i < 4; i++) {
- cpu_MR[i] = tcg_global_mem_new_i32(cpu_env,
+ cpu_MR[i] = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUXtensaState,
sregs[MR + i]),
mregnames[i]);
}
for (i = 0; i < 16; i++) {
- cpu_BR[i] = tcg_global_mem_new_i32(cpu_env,
+ cpu_BR[i] = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUXtensaState,
sregs[BR]),
bregnames[i]);
if (i % 4 == 0) {
- cpu_BR4[i / 4] = tcg_global_mem_new_i32(cpu_env,
+ cpu_BR4[i / 4] = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUXtensaState,
sregs[BR]),
bregnames[i]);
}
if (i % 8 == 0) {
- cpu_BR8[i / 8] = tcg_global_mem_new_i32(cpu_env,
+ cpu_BR8[i / 8] = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUXtensaState,
sregs[BR]),
bregnames[i]);
@@ -205,7 +205,7 @@
for (i = 0; i < 256; ++i) {
if (sr_name[i]) {
- cpu_SR[i] = tcg_global_mem_new_i32(cpu_env,
+ cpu_SR[i] = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUXtensaState,
sregs[i]),
sr_name[i]);
@@ -214,7 +214,7 @@
for (i = 0; i < 256; ++i) {
if (ur_name[i]) {
- cpu_UR[i] = tcg_global_mem_new_i32(cpu_env,
+ cpu_UR[i] = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUXtensaState,
uregs[i]),
ur_name[i]);
@@ -222,15 +222,15 @@
}
cpu_windowbase_next =
- tcg_global_mem_new_i32(cpu_env,
+ tcg_global_mem_new_i32(tcg_env,
offsetof(CPUXtensaState, windowbase_next),
"windowbase_next");
cpu_exclusive_addr =
- tcg_global_mem_new_i32(cpu_env,
+ tcg_global_mem_new_i32(tcg_env,
offsetof(CPUXtensaState, exclusive_addr),
"exclusive_addr");
cpu_exclusive_val =
- tcg_global_mem_new_i32(cpu_env,
+ tcg_global_mem_new_i32(tcg_env,
offsetof(CPUXtensaState, exclusive_val),
"exclusive_val");
}
@@ -311,13 +311,13 @@
static void gen_exception(DisasContext *dc, int excp)
{
- gen_helper_exception(cpu_env, tcg_constant_i32(excp));
+ gen_helper_exception(tcg_env, tcg_constant_i32(excp));
}
static void gen_exception_cause(DisasContext *dc, uint32_t cause)
{
TCGv_i32 pc = tcg_constant_i32(dc->pc);
- gen_helper_exception_cause(cpu_env, pc, tcg_constant_i32(cause));
+ gen_helper_exception_cause(tcg_env, pc, tcg_constant_i32(cause));
if (cause == ILLEGAL_INSTRUCTION_CAUSE ||
cause == SYSCALL_CAUSE) {
dc->base.is_jmp = DISAS_NORETURN;
@@ -327,7 +327,7 @@
static void gen_debug_exception(DisasContext *dc, uint32_t cause)
{
TCGv_i32 pc = tcg_constant_i32(dc->pc);
- gen_helper_debug_exception(cpu_env, pc, tcg_constant_i32(cause));
+ gen_helper_debug_exception(tcg_env, pc, tcg_constant_i32(cause));
if (cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BI | DEBUGCAUSE_BN)) {
dc->base.is_jmp = DISAS_NORETURN;
}
@@ -536,7 +536,7 @@
TCGv_i32 pc = tcg_constant_i32(dc->pc);
TCGv_i32 w = tcg_constant_i32(r / 4);
- gen_helper_window_check(cpu_env, pc, w);
+ gen_helper_window_check(tcg_env, pc, w);
dc->base.is_jmp = DISAS_NORETURN;
return false;
}
@@ -576,11 +576,11 @@
#ifndef CONFIG_USER_ONLY
if (op_flags & XTENSA_OP_CHECK_INTERRUPTS) {
translator_io_start(&dc->base);
- gen_helper_check_interrupts(cpu_env);
+ gen_helper_check_interrupts(tcg_env);
}
#endif
if (op_flags & XTENSA_OP_SYNC_REGISTER_WINDOW) {
- gen_helper_sync_windowbase(cpu_env);
+ gen_helper_sync_windowbase(tcg_env);
}
if (op_flags & XTENSA_OP_EXIT_TB_M1) {
slot = -1;
@@ -1042,13 +1042,13 @@
if (op_flags & XTENSA_OP_UNDERFLOW) {
TCGv_i32 pc = tcg_constant_i32(dc->pc);
- gen_helper_test_underflow_retw(cpu_env, pc);
+ gen_helper_test_underflow_retw(tcg_env, pc);
}
if (op_flags & XTENSA_OP_ALLOCA) {
TCGv_i32 pc = tcg_constant_i32(dc->pc);
- gen_helper_movsp(cpu_env, pc);
+ gen_helper_movsp(tcg_env, pc);
}
if (coprocessor && !gen_check_cpenable(dc, coprocessor)) {
@@ -1140,7 +1140,7 @@
CPUState *cpu)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
- CPUXtensaState *env = cpu->env_ptr;
+ CPUXtensaState *env = cpu_env(cpu);
uint32_t tb_flags = dc->base.tb->flags;
dc->config = env->config;
@@ -1180,7 +1180,7 @@
static void xtensa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
- CPUXtensaState *env = cpu->env_ptr;
+ CPUXtensaState *env = cpu_env(cpu);
target_ulong page_start;
/* These two conditions only apply to the first insn in the TB,
@@ -1589,7 +1589,7 @@
TCGv_i32 pc = tcg_constant_i32(dc->pc);
TCGv_i32 s = tcg_constant_i32(arg[0].imm);
TCGv_i32 imm = tcg_constant_i32(arg[1].imm);
- gen_helper_entry(cpu_env, pc, s, imm);
+ gen_helper_entry(tcg_env, pc, s, imm);
}
static void translate_extui(DisasContext *dc, const OpcodeArg arg[],
@@ -1620,7 +1620,7 @@
tcg_gen_movi_i32(cpu_pc, dc->pc);
tcg_gen_addi_i32(addr, arg[0].in, arg[1].imm);
- gen_helper_itlb_hit_test(cpu_env, addr);
+ gen_helper_itlb_hit_test(tcg_env, addr);
#endif
}
@@ -1630,7 +1630,7 @@
#ifndef CONFIG_USER_ONLY
TCGv_i32 dtlb = tcg_constant_i32(par[0]);
- gen_helper_itlb(cpu_env, arg[0].in, dtlb);
+ gen_helper_itlb(tcg_env, arg[0].in, dtlb);
#endif
}
@@ -1667,7 +1667,7 @@
if (!option_enabled(dc, XTENSA_OPTION_MPU)) {
TCGv_i32 pc = tcg_constant_i32(dc->pc);
- gen_helper_check_exclusive(cpu_env, pc, addr,
+ gen_helper_check_exclusive(tcg_env, pc, addr,
tcg_constant_i32(is_write));
}
}
@@ -1959,7 +1959,7 @@
TCGv_i32 dtlb = tcg_constant_i32(par[0]);
tcg_gen_movi_i32(cpu_pc, dc->pc);
- gen_helper_ptlb(arg[0].out, cpu_env, arg[1].in, dtlb);
+ gen_helper_ptlb(arg[0].out, tcg_env, arg[1].in, dtlb);
#endif
}
@@ -1968,7 +1968,7 @@
{
#ifndef CONFIG_USER_ONLY
tcg_gen_movi_i32(cpu_pc, dc->pc);
- gen_helper_pptlb(arg[0].out, cpu_env, arg[1].in);
+ gen_helper_pptlb(arg[0].out, tcg_env, arg[1].in);
#endif
}
@@ -2020,7 +2020,7 @@
static void translate_rer(DisasContext *dc, const OpcodeArg arg[],
const uint32_t par[])
{
- gen_helper_rer(arg[0].out, cpu_env, arg[1].in);
+ gen_helper_rer(arg[0].out, tcg_env, arg[1].in);
}
static void translate_ret(DisasContext *dc, const OpcodeArg arg[],
@@ -2039,7 +2039,7 @@
} else {
TCGv_i32 pc = tcg_constant_i32(dc->pc);
- gen_helper_test_ill_retw(cpu_env, pc);
+ gen_helper_test_ill_retw(tcg_env, pc);
return 0;
}
}
@@ -2053,7 +2053,7 @@
cpu_SR[WINDOW_START], tmp);
tcg_gen_movi_i32(tmp, dc->pc);
tcg_gen_deposit_i32(tmp, tmp, cpu_R[0], 0, 30);
- gen_helper_retw(cpu_env, cpu_R[0]);
+ gen_helper_retw(tcg_env, cpu_R[0]);
gen_jump(dc, tmp);
}
@@ -2093,7 +2093,7 @@
cpu_SR[WINDOW_START], tmp);
}
- gen_helper_restore_owb(cpu_env);
+ gen_helper_restore_owb(tcg_env);
gen_jump(dc, cpu_SR[EPC1]);
}
@@ -2126,7 +2126,7 @@
{
#ifndef CONFIG_USER_ONLY
translator_io_start(&dc->base);
- gen_helper_update_ccount(cpu_env);
+ gen_helper_update_ccount(tcg_env);
tcg_gen_mov_i32(arg[0].out, cpu_SR[par[0]]);
#endif
}
@@ -2154,7 +2154,7 @@
};
TCGv_i32 dtlb = tcg_constant_i32(par[0]);
- helper[par[1]](arg[0].out, cpu_env, arg[1].in, dtlb);
+ helper[par[1]](arg[0].out, tcg_env, arg[1].in, dtlb);
#endif
}
@@ -2162,7 +2162,7 @@
const uint32_t par[])
{
#ifndef CONFIG_USER_ONLY
- gen_helper_rptlb0(arg[0].out, cpu_env, arg[1].in);
+ gen_helper_rptlb0(arg[0].out, tcg_env, arg[1].in);
#endif
}
@@ -2170,7 +2170,7 @@
const uint32_t par[])
{
#ifndef CONFIG_USER_ONLY
- gen_helper_rptlb1(arg[0].out, cpu_env, arg[1].in);
+ gen_helper_rptlb1(arg[0].out, tcg_env, arg[1].in);
#endif
}
@@ -2196,7 +2196,7 @@
{
TCGv_i32 pc = tcg_constant_i32(dc->pc);
- gen_helper_check_atomctl(cpu_env, pc, addr);
+ gen_helper_check_atomctl(tcg_env, pc, addr);
}
#endif
@@ -2297,7 +2297,7 @@
{
#ifndef CONFIG_USER_ONLY
if (semihosting_enabled(dc->cring != 0)) {
- gen_helper_simcall(cpu_env);
+ gen_helper_simcall(tcg_env);
}
#endif
}
@@ -2442,7 +2442,7 @@
TCGv_i32 pc = tcg_constant_i32(dc->base.pc_next);
translator_io_start(&dc->base);
- gen_helper_waiti(cpu_env, pc, tcg_constant_i32(arg[0].imm));
+ gen_helper_waiti(tcg_env, pc, tcg_constant_i32(arg[0].imm));
#endif
}
@@ -2452,7 +2452,7 @@
#ifndef CONFIG_USER_ONLY
TCGv_i32 dtlb = tcg_constant_i32(par[0]);
- gen_helper_wtlb(cpu_env, arg[0].in, arg[1].in, dtlb);
+ gen_helper_wtlb(tcg_env, arg[0].in, arg[1].in, dtlb);
#endif
}
@@ -2460,14 +2460,14 @@
const uint32_t par[])
{
#ifndef CONFIG_USER_ONLY
- gen_helper_wptlb(cpu_env, arg[0].in, arg[1].in);
+ gen_helper_wptlb(tcg_env, arg[0].in, arg[1].in);
#endif
}
static void translate_wer(DisasContext *dc, const OpcodeArg arg[],
const uint32_t par[])
{
- gen_helper_wer(cpu_env, arg[0].in, arg[1].in);
+ gen_helper_wer(tcg_env, arg[0].in, arg[1].in);
}
static void translate_wrmsk_expstate(DisasContext *dc, const OpcodeArg arg[],
@@ -2508,7 +2508,7 @@
assert(id < dc->config->nccompare);
translator_io_start(&dc->base);
tcg_gen_mov_i32(cpu_SR[par[0]], arg[0].in);
- gen_helper_update_ccompare(cpu_env, tcg_constant_i32(id));
+ gen_helper_update_ccompare(tcg_env, tcg_constant_i32(id));
#endif
}
@@ -2517,7 +2517,7 @@
{
#ifndef CONFIG_USER_ONLY
translator_io_start(&dc->base);
- gen_helper_wsr_ccount(cpu_env, arg[0].in);
+ gen_helper_wsr_ccount(tcg_env, arg[0].in);
#endif
}
@@ -2528,7 +2528,7 @@
unsigned id = par[0] - DBREAKA;
assert(id < dc->config->ndbreak);
- gen_helper_wsr_dbreaka(cpu_env, tcg_constant_i32(id), arg[0].in);
+ gen_helper_wsr_dbreaka(tcg_env, tcg_constant_i32(id), arg[0].in);
#endif
}
@@ -2539,7 +2539,7 @@
unsigned id = par[0] - DBREAKC;
assert(id < dc->config->ndbreak);
- gen_helper_wsr_dbreakc(cpu_env, tcg_constant_i32(id), arg[0].in);
+ gen_helper_wsr_dbreakc(tcg_env, tcg_constant_i32(id), arg[0].in);
#endif
}
@@ -2550,7 +2550,7 @@
unsigned id = par[0] - IBREAKA;
assert(id < dc->config->nibreak);
- gen_helper_wsr_ibreaka(cpu_env, tcg_constant_i32(id), arg[0].in);
+ gen_helper_wsr_ibreaka(tcg_env, tcg_constant_i32(id), arg[0].in);
#endif
}
@@ -2558,7 +2558,7 @@
const uint32_t par[])
{
#ifndef CONFIG_USER_ONLY
- gen_helper_wsr_ibreakenable(cpu_env, arg[0].in);
+ gen_helper_wsr_ibreakenable(tcg_env, arg[0].in);
#endif
}
@@ -2578,7 +2578,7 @@
const uint32_t par[])
{
#ifndef CONFIG_USER_ONLY
- gen_helper_intclear(cpu_env, arg[0].in);
+ gen_helper_intclear(tcg_env, arg[0].in);
#endif
}
@@ -2586,7 +2586,7 @@
const uint32_t par[])
{
#ifndef CONFIG_USER_ONLY
- gen_helper_intset(cpu_env, arg[0].in);
+ gen_helper_intset(tcg_env, arg[0].in);
#endif
}
@@ -2594,7 +2594,7 @@
const uint32_t par[])
{
#ifndef CONFIG_USER_ONLY
- gen_helper_wsr_memctl(cpu_env, arg[0].in);
+ gen_helper_wsr_memctl(tcg_env, arg[0].in);
#endif
}
@@ -2602,7 +2602,7 @@
const uint32_t par[])
{
#ifndef CONFIG_USER_ONLY
- gen_helper_wsr_mpuenb(cpu_env, arg[0].in);
+ gen_helper_wsr_mpuenb(tcg_env, arg[0].in);
#endif
}
@@ -2625,7 +2625,7 @@
const uint32_t par[])
{
#ifndef CONFIG_USER_ONLY
- gen_helper_wsr_rasid(cpu_env, arg[0].in);
+ gen_helper_wsr_rasid(tcg_env, arg[0].in);
#endif
}
@@ -2704,9 +2704,9 @@
TCGv_i32 tmp = tcg_temp_new_i32();
translator_io_start(&dc->base);
- gen_helper_update_ccount(cpu_env);
+ gen_helper_update_ccount(tcg_env);
tcg_gen_mov_i32(tmp, cpu_SR[par[0]]);
- gen_helper_wsr_ccount(cpu_env, arg[0].in);
+ gen_helper_wsr_ccount(tcg_env, arg[0].in);
tcg_gen_mov_i32(arg[0].out, tmp);
#endif
@@ -6295,7 +6295,7 @@
static void translate_fpu2k_add_s(DisasContext *dc, const OpcodeArg arg[],
const uint32_t par[])
{
- gen_helper_fpu2k_add_s(arg[0].out, cpu_env,
+ gen_helper_fpu2k_add_s(arg[0].out, tcg_env,
arg[1].in, arg[2].in);
}
@@ -6330,7 +6330,7 @@
tcg_gen_ori_i32(set_br, arg[0].in, 1 << arg[0].imm);
tcg_gen_andi_i32(clr_br, arg[0].in, ~(1 << arg[0].imm));
- helper[par[0]](res, cpu_env, arg[1].in, arg[2].in);
+ helper[par[0]](res, tcg_env, arg[1].in, arg[2].in);
tcg_gen_movcond_i32(TCG_COND_NE,
arg[0].out, res, zero,
set_br, clr_br);
@@ -6359,7 +6359,7 @@
tcg_gen_andi_i32(clr_br, arg[0].in, ~(1 << arg[0].imm));
get_f32_i2(arg, arg32, 1, 2);
- helper[par[0]](res, cpu_env, arg32[1].in, arg32[2].in);
+ helper[par[0]](res, tcg_env, arg32[1].in, arg32[2].in);
tcg_gen_movcond_i32(TCG_COND_NE,
arg[0].out, res, zero,
set_br, clr_br);
@@ -6412,9 +6412,9 @@
TCGv_i32 scale = tcg_constant_i32(-arg[2].imm);
if (par[0]) {
- gen_helper_uitof_d(arg[0].out, cpu_env, arg[1].in, scale);
+ gen_helper_uitof_d(arg[0].out, tcg_env, arg[1].in, scale);
} else {
- gen_helper_itof_d(arg[0].out, cpu_env, arg[1].in, scale);
+ gen_helper_itof_d(arg[0].out, tcg_env, arg[1].in, scale);
}
}
@@ -6426,9 +6426,9 @@
get_f32_o1(arg, arg32, 0);
if (par[0]) {
- gen_helper_uitof_s(arg32[0].out, cpu_env, arg[1].in, scale);
+ gen_helper_uitof_s(arg32[0].out, tcg_env, arg[1].in, scale);
} else {
- gen_helper_itof_s(arg32[0].out, cpu_env, arg[1].in, scale);
+ gen_helper_itof_s(arg32[0].out, tcg_env, arg[1].in, scale);
}
put_f32_o1(arg, arg32, 0);
}
@@ -6440,10 +6440,10 @@
TCGv_i32 scale = tcg_constant_i32(arg[2].imm);
if (par[1]) {
- gen_helper_ftoui_d(arg[0].out, cpu_env, arg[1].in,
+ gen_helper_ftoui_d(arg[0].out, tcg_env, arg[1].in,
rounding_mode, scale);
} else {
- gen_helper_ftoi_d(arg[0].out, cpu_env, arg[1].in,
+ gen_helper_ftoi_d(arg[0].out, tcg_env, arg[1].in,
rounding_mode, scale);
}
}
@@ -6457,10 +6457,10 @@
get_f32_i1(arg, arg32, 1);
if (par[1]) {
- gen_helper_ftoui_s(arg[0].out, cpu_env, arg32[1].in,
+ gen_helper_ftoui_s(arg[0].out, tcg_env, arg32[1].in,
rounding_mode, scale);
} else {
- gen_helper_ftoi_s(arg[0].out, cpu_env, arg32[1].in,
+ gen_helper_ftoi_s(arg[0].out, tcg_env, arg32[1].in,
rounding_mode, scale);
}
put_f32_i1(arg, arg32, 1);
@@ -6505,7 +6505,7 @@
static void translate_fpu2k_madd_s(DisasContext *dc, const OpcodeArg arg[],
const uint32_t par[])
{
- gen_helper_fpu2k_madd_s(arg[0].out, cpu_env,
+ gen_helper_fpu2k_madd_s(arg[0].out, tcg_env,
arg[0].in, arg[1].in, arg[2].in);
}
@@ -6584,14 +6584,14 @@
static void translate_fpu2k_mul_s(DisasContext *dc, const OpcodeArg arg[],
const uint32_t par[])
{
- gen_helper_fpu2k_mul_s(arg[0].out, cpu_env,
+ gen_helper_fpu2k_mul_s(arg[0].out, tcg_env,
arg[1].in, arg[2].in);
}
static void translate_fpu2k_msub_s(DisasContext *dc, const OpcodeArg arg[],
const uint32_t par[])
{
- gen_helper_fpu2k_msub_s(arg[0].out, cpu_env,
+ gen_helper_fpu2k_msub_s(arg[0].out, tcg_env,
arg[0].in, arg[1].in, arg[2].in);
}
@@ -6630,7 +6630,7 @@
static void translate_fpu2k_sub_s(DisasContext *dc, const OpcodeArg arg[],
const uint32_t par[])
{
- gen_helper_fpu2k_sub_s(arg[0].out, cpu_env,
+ gen_helper_fpu2k_sub_s(arg[0].out, tcg_env,
arg[1].in, arg[2].in);
}
@@ -6653,7 +6653,7 @@
static void translate_wur_fpu2k_fcr(DisasContext *dc, const OpcodeArg arg[],
const uint32_t par[])
{
- gen_helper_wur_fpu2k_fcr(cpu_env, arg[0].in);
+ gen_helper_wur_fpu2k_fcr(tcg_env, arg[0].in);
}
static void translate_wur_fpu2k_fsr(DisasContext *dc, const OpcodeArg arg[],
@@ -6882,20 +6882,20 @@
static void translate_add_d(DisasContext *dc, const OpcodeArg arg[],
const uint32_t par[])
{
- gen_helper_add_d(arg[0].out, cpu_env, arg[1].in, arg[2].in);
+ gen_helper_add_d(arg[0].out, tcg_env, arg[1].in, arg[2].in);
}
static void translate_add_s(DisasContext *dc, const OpcodeArg arg[],
const uint32_t par[])
{
if (option_enabled(dc, XTENSA_OPTION_DFPU_SINGLE_ONLY)) {
- gen_helper_fpu2k_add_s(arg[0].out, cpu_env,
+ gen_helper_fpu2k_add_s(arg[0].out, tcg_env,
arg[1].in, arg[2].in);
} else {
OpcodeArg arg32[3];
get_f32_o1_i2(arg, arg32, 0, 1, 2);
- gen_helper_add_s(arg32[0].out, cpu_env, arg32[1].in, arg32[2].in);
+ gen_helper_add_s(arg32[0].out, tcg_env, arg32[1].in, arg32[2].in);
put_f32_o1_i2(arg, arg32, 0, 1, 2);
}
}
@@ -6906,7 +6906,7 @@
TCGv_i32 v = tcg_temp_new_i32();
tcg_gen_extrl_i64_i32(v, arg[1].in);
- gen_helper_cvtd_s(arg[0].out, cpu_env, v);
+ gen_helper_cvtd_s(arg[0].out, tcg_env, v);
}
static void translate_cvts_d(DisasContext *dc, const OpcodeArg arg[],
@@ -6914,7 +6914,7 @@
{
TCGv_i32 v = tcg_temp_new_i32();
- gen_helper_cvts_d(v, cpu_env, arg[1].in);
+ gen_helper_cvts_d(v, tcg_env, arg[1].in);
tcg_gen_extu_i32_i64(arg[0].out, v);
}
@@ -7039,7 +7039,7 @@
static void translate_madd_d(DisasContext *dc, const OpcodeArg arg[],
const uint32_t par[])
{
- gen_helper_madd_d(arg[0].out, cpu_env,
+ gen_helper_madd_d(arg[0].out, tcg_env,
arg[0].in, arg[1].in, arg[2].in);
}
@@ -7047,13 +7047,13 @@
const uint32_t par[])
{
if (option_enabled(dc, XTENSA_OPTION_DFPU_SINGLE_ONLY)) {
- gen_helper_fpu2k_madd_s(arg[0].out, cpu_env,
+ gen_helper_fpu2k_madd_s(arg[0].out, tcg_env,
arg[0].in, arg[1].in, arg[2].in);
} else {
OpcodeArg arg32[3];
get_f32_o1_i3(arg, arg32, 0, 0, 1, 2);
- gen_helper_madd_s(arg32[0].out, cpu_env,
+ gen_helper_madd_s(arg32[0].out, tcg_env,
arg32[0].in, arg32[1].in, arg32[2].in);
put_f32_o1_i3(arg, arg32, 0, 0, 1, 2);
}
@@ -7062,20 +7062,20 @@
static void translate_mul_d(DisasContext *dc, const OpcodeArg arg[],
const uint32_t par[])
{
- gen_helper_mul_d(arg[0].out, cpu_env, arg[1].in, arg[2].in);
+ gen_helper_mul_d(arg[0].out, tcg_env, arg[1].in, arg[2].in);
}
static void translate_mul_s(DisasContext *dc, const OpcodeArg arg[],
const uint32_t par[])
{
if (option_enabled(dc, XTENSA_OPTION_DFPU_SINGLE_ONLY)) {
- gen_helper_fpu2k_mul_s(arg[0].out, cpu_env,
+ gen_helper_fpu2k_mul_s(arg[0].out, tcg_env,
arg[1].in, arg[2].in);
} else {
OpcodeArg arg32[3];
get_f32_o1_i2(arg, arg32, 0, 1, 2);
- gen_helper_mul_s(arg32[0].out, cpu_env, arg32[1].in, arg32[2].in);
+ gen_helper_mul_s(arg32[0].out, tcg_env, arg32[1].in, arg32[2].in);
put_f32_o1_i2(arg, arg32, 0, 1, 2);
}
}
@@ -7083,7 +7083,7 @@
static void translate_msub_d(DisasContext *dc, const OpcodeArg arg[],
const uint32_t par[])
{
- gen_helper_msub_d(arg[0].out, cpu_env,
+ gen_helper_msub_d(arg[0].out, tcg_env,
arg[0].in, arg[1].in, arg[2].in);
}
@@ -7091,13 +7091,13 @@
const uint32_t par[])
{
if (option_enabled(dc, XTENSA_OPTION_DFPU_SINGLE_ONLY)) {
- gen_helper_fpu2k_msub_s(arg[0].out, cpu_env,
+ gen_helper_fpu2k_msub_s(arg[0].out, tcg_env,
arg[0].in, arg[1].in, arg[2].in);
} else {
OpcodeArg arg32[3];
get_f32_o1_i3(arg, arg32, 0, 0, 1, 2);
- gen_helper_msub_s(arg32[0].out, cpu_env,
+ gen_helper_msub_s(arg32[0].out, tcg_env,
arg32[0].in, arg32[1].in, arg32[2].in);
put_f32_o1_i3(arg, arg32, 0, 0, 1, 2);
}
@@ -7106,20 +7106,20 @@
static void translate_sub_d(DisasContext *dc, const OpcodeArg arg[],
const uint32_t par[])
{
- gen_helper_sub_d(arg[0].out, cpu_env, arg[1].in, arg[2].in);
+ gen_helper_sub_d(arg[0].out, tcg_env, arg[1].in, arg[2].in);
}
static void translate_sub_s(DisasContext *dc, const OpcodeArg arg[],
const uint32_t par[])
{
if (option_enabled(dc, XTENSA_OPTION_DFPU_SINGLE_ONLY)) {
- gen_helper_fpu2k_sub_s(arg[0].out, cpu_env,
+ gen_helper_fpu2k_sub_s(arg[0].out, tcg_env,
arg[1].in, arg[2].in);
} else {
OpcodeArg arg32[3];
get_f32_o1_i2(arg, arg32, 0, 1, 2);
- gen_helper_sub_s(arg32[0].out, cpu_env, arg32[1].in, arg32[2].in);
+ gen_helper_sub_s(arg32[0].out, tcg_env, arg32[1].in, arg32[2].in);
put_f32_o1_i2(arg, arg32, 0, 1, 2);
}
}
@@ -7127,7 +7127,7 @@
static void translate_mkdadj_d(DisasContext *dc, const OpcodeArg arg[],
const uint32_t par[])
{
- gen_helper_mkdadj_d(arg[0].out, cpu_env, arg[0].in, arg[1].in);
+ gen_helper_mkdadj_d(arg[0].out, tcg_env, arg[0].in, arg[1].in);
}
static void translate_mkdadj_s(DisasContext *dc, const OpcodeArg arg[],
@@ -7136,14 +7136,14 @@
OpcodeArg arg32[2];
get_f32_o1_i2(arg, arg32, 0, 0, 1);
- gen_helper_mkdadj_s(arg32[0].out, cpu_env, arg32[0].in, arg32[1].in);
+ gen_helper_mkdadj_s(arg32[0].out, tcg_env, arg32[0].in, arg32[1].in);
put_f32_o1_i2(arg, arg32, 0, 0, 1);
}
static void translate_mksadj_d(DisasContext *dc, const OpcodeArg arg[],
const uint32_t par[])
{
- gen_helper_mksadj_d(arg[0].out, cpu_env, arg[1].in);
+ gen_helper_mksadj_d(arg[0].out, tcg_env, arg[1].in);
}
static void translate_mksadj_s(DisasContext *dc, const OpcodeArg arg[],
@@ -7152,26 +7152,26 @@
OpcodeArg arg32[2];
get_f32_o1_i1(arg, arg32, 0, 1);
- gen_helper_mksadj_s(arg32[0].out, cpu_env, arg32[1].in);
+ gen_helper_mksadj_s(arg32[0].out, tcg_env, arg32[1].in);
put_f32_o1_i1(arg, arg32, 0, 1);
}
static void translate_wur_fpu_fcr(DisasContext *dc, const OpcodeArg arg[],
const uint32_t par[])
{
- gen_helper_wur_fpu_fcr(cpu_env, arg[0].in);
+ gen_helper_wur_fpu_fcr(tcg_env, arg[0].in);
}
static void translate_rur_fpu_fsr(DisasContext *dc, const OpcodeArg arg[],
const uint32_t par[])
{
- gen_helper_rur_fpu_fsr(arg[0].out, cpu_env);
+ gen_helper_rur_fpu_fsr(arg[0].out, tcg_env);
}
static void translate_wur_fpu_fsr(DisasContext *dc, const OpcodeArg arg[],
const uint32_t par[])
{
- gen_helper_wur_fpu_fsr(cpu_env, arg[0].in);
+ gen_helper_wur_fpu_fsr(tcg_env, arg[0].in);
}
static const XtensaOpcodeOps fpu_ops[] = {
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
index 06ea3c7..69f2daf 100644
--- a/tcg/aarch64/tcg-target.c.inc
+++ b/tcg/aarch64/tcg-target.c.inc
@@ -1679,7 +1679,7 @@
mask_type = (s->page_bits + s->tlb_dyn_max_bits > 32
? TCG_TYPE_I64 : TCG_TYPE_I32);
- /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {tmp0,tmp1}. */
+ /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {tmp0,tmp1}. */
QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 8);
tcg_out_insn(s, 3314, LDP, TCG_REG_TMP0, TCG_REG_TMP1, TCG_AREG0,
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
index b1d5636..a2f6010 100644
--- a/tcg/arm/tcg-target.c.inc
+++ b/tcg/arm/tcg-target.c.inc
@@ -1420,7 +1420,7 @@
ldst->addrlo_reg = addrlo;
ldst->addrhi_reg = addrhi;
- /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {r0,r1}. */
+ /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {r0,r1}. */
QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4);
tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off);
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
index b701df5..8f70910 100644
--- a/tcg/loongarch64/tcg-target.c.inc
+++ b/tcg/loongarch64/tcg-target.c.inc
@@ -1852,43 +1852,45 @@
tcg_out_opc_vnor_v(s, a0, a1, a1);
break;
case INDEX_op_cmp_vec:
- TCGCond cond = args[3];
- if (const_args[2]) {
- /*
- * cmp_vec dest, src, value
- * Try vseqi/vslei/vslti
- */
- int64_t value = sextract64(a2, 0, 8 << vece);
- if ((cond == TCG_COND_EQ || cond == TCG_COND_LE || \
- cond == TCG_COND_LT) && (-0x10 <= value && value <= 0x0f)) {
- tcg_out32(s, encode_vdvjsk5_insn(cmp_vec_imm_insn[cond][vece], \
- a0, a1, value));
- break;
- } else if ((cond == TCG_COND_LEU || cond == TCG_COND_LTU) &&
- (0x00 <= value && value <= 0x1f)) {
- tcg_out32(s, encode_vdvjuk5_insn(cmp_vec_imm_insn[cond][vece], \
- a0, a1, value));
- break;
+ {
+ TCGCond cond = args[3];
+ if (const_args[2]) {
+ /*
+ * cmp_vec dest, src, value
+ * Try vseqi/vslei/vslti
+ */
+ int64_t value = sextract64(a2, 0, 8 << vece);
+ if ((cond == TCG_COND_EQ || cond == TCG_COND_LE || \
+ cond == TCG_COND_LT) && (-0x10 <= value && value <= 0x0f)) {
+ tcg_out32(s, encode_vdvjsk5_insn(cmp_vec_imm_insn[cond][vece], \
+ a0, a1, value));
+ break;
+ } else if ((cond == TCG_COND_LEU || cond == TCG_COND_LTU) &&
+ (0x00 <= value && value <= 0x1f)) {
+ tcg_out32(s, encode_vdvjuk5_insn(cmp_vec_imm_insn[cond][vece], \
+ a0, a1, value));
+ break;
+ }
+
+ /*
+ * Fallback to:
+ * dupi_vec temp, a2
+ * cmp_vec a0, a1, temp, cond
+ */
+ tcg_out_dupi_vec(s, type, vece, temp_vec, a2);
+ a2 = temp_vec;
}
- /*
- * Fallback to:
- * dupi_vec temp, a2
- * cmp_vec a0, a1, temp, cond
- */
- tcg_out_dupi_vec(s, type, vece, temp_vec, a2);
- a2 = temp_vec;
- }
-
- insn = cmp_vec_insn[cond][vece];
- if (insn == 0) {
- TCGArg t;
- t = a1, a1 = a2, a2 = t;
- cond = tcg_swap_cond(cond);
insn = cmp_vec_insn[cond][vece];
- tcg_debug_assert(insn != 0);
+ if (insn == 0) {
+ TCGArg t;
+ t = a1, a1 = a2, a2 = t;
+ cond = tcg_swap_cond(cond);
+ insn = cmp_vec_insn[cond][vece];
+ tcg_debug_assert(insn != 0);
+ }
+ tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
}
- tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
break;
case INDEX_op_add_vec:
tcg_out_addsub_vec(s, vece, a0, a1, a2, const_args[2], true);
diff --git a/tcg/meson.build b/tcg/meson.build
index 0014dca..4be4a61 100644
--- a/tcg/meson.build
+++ b/tcg/meson.build
@@ -28,7 +28,7 @@
tcg_ss.sources() + genh,
name_suffix: 'fa',
c_args: '-DCONFIG_USER_ONLY',
- build_by_default: have_user)
+ build_by_default: false)
tcg_user = declare_dependency(link_with: libtcg_user,
dependencies: tcg_ss.dependencies())
@@ -38,7 +38,7 @@
tcg_ss.sources() + genh,
name_suffix: 'fa',
c_args: '-DCONFIG_SOFTMMU',
- build_by_default: have_system)
+ build_by_default: false)
tcg_softmmu = declare_dependency(link_with: libtcg_softmmu,
dependencies: tcg_ss.dependencies())
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
index 41b1ae1..feb2d36 100644
--- a/tcg/tcg-op-gvec.c
+++ b/tcg/tcg-op-gvec.c
@@ -120,8 +120,8 @@
a0 = tcg_temp_ebb_new_ptr();
a1 = tcg_temp_ebb_new_ptr();
- tcg_gen_addi_ptr(a0, cpu_env, dofs);
- tcg_gen_addi_ptr(a1, cpu_env, aofs);
+ tcg_gen_addi_ptr(a0, tcg_env, dofs);
+ tcg_gen_addi_ptr(a1, tcg_env, aofs);
fn(a0, a1, desc);
@@ -141,8 +141,8 @@
a0 = tcg_temp_ebb_new_ptr();
a1 = tcg_temp_ebb_new_ptr();
- tcg_gen_addi_ptr(a0, cpu_env, dofs);
- tcg_gen_addi_ptr(a1, cpu_env, aofs);
+ tcg_gen_addi_ptr(a0, tcg_env, dofs);
+ tcg_gen_addi_ptr(a1, tcg_env, aofs);
fn(a0, a1, c, desc);
@@ -162,9 +162,9 @@
a1 = tcg_temp_ebb_new_ptr();
a2 = tcg_temp_ebb_new_ptr();
- tcg_gen_addi_ptr(a0, cpu_env, dofs);
- tcg_gen_addi_ptr(a1, cpu_env, aofs);
- tcg_gen_addi_ptr(a2, cpu_env, bofs);
+ tcg_gen_addi_ptr(a0, tcg_env, dofs);
+ tcg_gen_addi_ptr(a1, tcg_env, aofs);
+ tcg_gen_addi_ptr(a2, tcg_env, bofs);
fn(a0, a1, a2, desc);
@@ -186,10 +186,10 @@
a2 = tcg_temp_ebb_new_ptr();
a3 = tcg_temp_ebb_new_ptr();
- tcg_gen_addi_ptr(a0, cpu_env, dofs);
- tcg_gen_addi_ptr(a1, cpu_env, aofs);
- tcg_gen_addi_ptr(a2, cpu_env, bofs);
- tcg_gen_addi_ptr(a3, cpu_env, cofs);
+ tcg_gen_addi_ptr(a0, tcg_env, dofs);
+ tcg_gen_addi_ptr(a1, tcg_env, aofs);
+ tcg_gen_addi_ptr(a2, tcg_env, bofs);
+ tcg_gen_addi_ptr(a3, tcg_env, cofs);
fn(a0, a1, a2, a3, desc);
@@ -213,11 +213,11 @@
a3 = tcg_temp_ebb_new_ptr();
a4 = tcg_temp_ebb_new_ptr();
- tcg_gen_addi_ptr(a0, cpu_env, dofs);
- tcg_gen_addi_ptr(a1, cpu_env, aofs);
- tcg_gen_addi_ptr(a2, cpu_env, bofs);
- tcg_gen_addi_ptr(a3, cpu_env, cofs);
- tcg_gen_addi_ptr(a4, cpu_env, xofs);
+ tcg_gen_addi_ptr(a0, tcg_env, dofs);
+ tcg_gen_addi_ptr(a1, tcg_env, aofs);
+ tcg_gen_addi_ptr(a2, tcg_env, bofs);
+ tcg_gen_addi_ptr(a3, tcg_env, cofs);
+ tcg_gen_addi_ptr(a4, tcg_env, xofs);
fn(a0, a1, a2, a3, a4, desc);
@@ -240,8 +240,8 @@
a0 = tcg_temp_ebb_new_ptr();
a1 = tcg_temp_ebb_new_ptr();
- tcg_gen_addi_ptr(a0, cpu_env, dofs);
- tcg_gen_addi_ptr(a1, cpu_env, aofs);
+ tcg_gen_addi_ptr(a0, tcg_env, dofs);
+ tcg_gen_addi_ptr(a1, tcg_env, aofs);
fn(a0, a1, ptr, desc);
@@ -262,9 +262,9 @@
a1 = tcg_temp_ebb_new_ptr();
a2 = tcg_temp_ebb_new_ptr();
- tcg_gen_addi_ptr(a0, cpu_env, dofs);
- tcg_gen_addi_ptr(a1, cpu_env, aofs);
- tcg_gen_addi_ptr(a2, cpu_env, bofs);
+ tcg_gen_addi_ptr(a0, tcg_env, dofs);
+ tcg_gen_addi_ptr(a1, tcg_env, aofs);
+ tcg_gen_addi_ptr(a2, tcg_env, bofs);
fn(a0, a1, a2, ptr, desc);
@@ -288,10 +288,10 @@
a2 = tcg_temp_ebb_new_ptr();
a3 = tcg_temp_ebb_new_ptr();
- tcg_gen_addi_ptr(a0, cpu_env, dofs);
- tcg_gen_addi_ptr(a1, cpu_env, aofs);
- tcg_gen_addi_ptr(a2, cpu_env, bofs);
- tcg_gen_addi_ptr(a3, cpu_env, cofs);
+ tcg_gen_addi_ptr(a0, tcg_env, dofs);
+ tcg_gen_addi_ptr(a1, tcg_env, aofs);
+ tcg_gen_addi_ptr(a2, tcg_env, bofs);
+ tcg_gen_addi_ptr(a3, tcg_env, cofs);
fn(a0, a1, a2, a3, ptr, desc);
@@ -317,11 +317,11 @@
a3 = tcg_temp_ebb_new_ptr();
a4 = tcg_temp_ebb_new_ptr();
- tcg_gen_addi_ptr(a0, cpu_env, dofs);
- tcg_gen_addi_ptr(a1, cpu_env, aofs);
- tcg_gen_addi_ptr(a2, cpu_env, bofs);
- tcg_gen_addi_ptr(a3, cpu_env, cofs);
- tcg_gen_addi_ptr(a4, cpu_env, eofs);
+ tcg_gen_addi_ptr(a0, tcg_env, dofs);
+ tcg_gen_addi_ptr(a1, tcg_env, aofs);
+ tcg_gen_addi_ptr(a2, tcg_env, bofs);
+ tcg_gen_addi_ptr(a3, tcg_env, cofs);
+ tcg_gen_addi_ptr(a4, tcg_env, eofs);
fn(a0, a1, a2, a3, a4, ptr, desc);
@@ -482,7 +482,7 @@
* are misaligned wrt the maximum vector size, so do that first.
*/
if (dofs & 8) {
- tcg_gen_stl_vec(t_vec, cpu_env, dofs + i, TCG_TYPE_V64);
+ tcg_gen_stl_vec(t_vec, tcg_env, dofs + i, TCG_TYPE_V64);
i += 8;
}
@@ -494,17 +494,17 @@
* that e.g. size == 80 would be expanded with 2x32 + 1x16.
*/
for (; i + 32 <= oprsz; i += 32) {
- tcg_gen_stl_vec(t_vec, cpu_env, dofs + i, TCG_TYPE_V256);
+ tcg_gen_stl_vec(t_vec, tcg_env, dofs + i, TCG_TYPE_V256);
}
/* fallthru */
case TCG_TYPE_V128:
for (; i + 16 <= oprsz; i += 16) {
- tcg_gen_stl_vec(t_vec, cpu_env, dofs + i, TCG_TYPE_V128);
+ tcg_gen_stl_vec(t_vec, tcg_env, dofs + i, TCG_TYPE_V128);
}
break;
case TCG_TYPE_V64:
for (; i < oprsz; i += 8) {
- tcg_gen_stl_vec(t_vec, cpu_env, dofs + i, TCG_TYPE_V64);
+ tcg_gen_stl_vec(t_vec, tcg_env, dofs + i, TCG_TYPE_V64);
}
break;
default:
@@ -605,14 +605,14 @@
/* Implement inline if we picked an implementation size above. */
if (t_32) {
for (i = 0; i < oprsz; i += 4) {
- tcg_gen_st_i32(t_32, cpu_env, dofs + i);
+ tcg_gen_st_i32(t_32, tcg_env, dofs + i);
}
tcg_temp_free_i32(t_32);
goto done;
}
if (t_64) {
for (i = 0; i < oprsz; i += 8) {
- tcg_gen_st_i64(t_64, cpu_env, dofs + i);
+ tcg_gen_st_i64(t_64, tcg_env, dofs + i);
}
tcg_temp_free_i64(t_64);
goto done;
@@ -621,7 +621,7 @@
/* Otherwise implement out of line. */
t_ptr = tcg_temp_ebb_new_ptr();
- tcg_gen_addi_ptr(t_ptr, cpu_env, dofs);
+ tcg_gen_addi_ptr(t_ptr, tcg_env, dofs);
/*
* This may be expand_clr for the tail of an operation, e.g.
@@ -709,12 +709,12 @@
uint32_t i;
for (i = 0; i < oprsz; i += 4) {
- tcg_gen_ld_i32(t0, cpu_env, aofs + i);
+ tcg_gen_ld_i32(t0, tcg_env, aofs + i);
if (load_dest) {
- tcg_gen_ld_i32(t1, cpu_env, dofs + i);
+ tcg_gen_ld_i32(t1, tcg_env, dofs + i);
}
fni(t1, t0);
- tcg_gen_st_i32(t1, cpu_env, dofs + i);
+ tcg_gen_st_i32(t1, tcg_env, dofs + i);
}
tcg_temp_free_i32(t0);
tcg_temp_free_i32(t1);
@@ -729,12 +729,12 @@
uint32_t i;
for (i = 0; i < oprsz; i += 4) {
- tcg_gen_ld_i32(t0, cpu_env, aofs + i);
+ tcg_gen_ld_i32(t0, tcg_env, aofs + i);
if (load_dest) {
- tcg_gen_ld_i32(t1, cpu_env, dofs + i);
+ tcg_gen_ld_i32(t1, tcg_env, dofs + i);
}
fni(t1, t0, c);
- tcg_gen_st_i32(t1, cpu_env, dofs + i);
+ tcg_gen_st_i32(t1, tcg_env, dofs + i);
}
tcg_temp_free_i32(t0);
tcg_temp_free_i32(t1);
@@ -749,13 +749,13 @@
uint32_t i;
for (i = 0; i < oprsz; i += 4) {
- tcg_gen_ld_i32(t0, cpu_env, aofs + i);
+ tcg_gen_ld_i32(t0, tcg_env, aofs + i);
if (scalar_first) {
fni(t1, c, t0);
} else {
fni(t1, t0, c);
}
- tcg_gen_st_i32(t1, cpu_env, dofs + i);
+ tcg_gen_st_i32(t1, tcg_env, dofs + i);
}
tcg_temp_free_i32(t0);
tcg_temp_free_i32(t1);
@@ -772,13 +772,13 @@
uint32_t i;
for (i = 0; i < oprsz; i += 4) {
- tcg_gen_ld_i32(t0, cpu_env, aofs + i);
- tcg_gen_ld_i32(t1, cpu_env, bofs + i);
+ tcg_gen_ld_i32(t0, tcg_env, aofs + i);
+ tcg_gen_ld_i32(t1, tcg_env, bofs + i);
if (load_dest) {
- tcg_gen_ld_i32(t2, cpu_env, dofs + i);
+ tcg_gen_ld_i32(t2, tcg_env, dofs + i);
}
fni(t2, t0, t1);
- tcg_gen_st_i32(t2, cpu_env, dofs + i);
+ tcg_gen_st_i32(t2, tcg_env, dofs + i);
}
tcg_temp_free_i32(t2);
tcg_temp_free_i32(t1);
@@ -795,13 +795,13 @@
uint32_t i;
for (i = 0; i < oprsz; i += 4) {
- tcg_gen_ld_i32(t0, cpu_env, aofs + i);
- tcg_gen_ld_i32(t1, cpu_env, bofs + i);
+ tcg_gen_ld_i32(t0, tcg_env, aofs + i);
+ tcg_gen_ld_i32(t1, tcg_env, bofs + i);
if (load_dest) {
- tcg_gen_ld_i32(t2, cpu_env, dofs + i);
+ tcg_gen_ld_i32(t2, tcg_env, dofs + i);
}
fni(t2, t0, t1, c);
- tcg_gen_st_i32(t2, cpu_env, dofs + i);
+ tcg_gen_st_i32(t2, tcg_env, dofs + i);
}
tcg_temp_free_i32(t0);
tcg_temp_free_i32(t1);
@@ -820,13 +820,13 @@
uint32_t i;
for (i = 0; i < oprsz; i += 4) {
- tcg_gen_ld_i32(t1, cpu_env, aofs + i);
- tcg_gen_ld_i32(t2, cpu_env, bofs + i);
- tcg_gen_ld_i32(t3, cpu_env, cofs + i);
+ tcg_gen_ld_i32(t1, tcg_env, aofs + i);
+ tcg_gen_ld_i32(t2, tcg_env, bofs + i);
+ tcg_gen_ld_i32(t3, tcg_env, cofs + i);
fni(t0, t1, t2, t3);
- tcg_gen_st_i32(t0, cpu_env, dofs + i);
+ tcg_gen_st_i32(t0, tcg_env, dofs + i);
if (write_aofs) {
- tcg_gen_st_i32(t1, cpu_env, aofs + i);
+ tcg_gen_st_i32(t1, tcg_env, aofs + i);
}
}
tcg_temp_free_i32(t3);
@@ -847,11 +847,11 @@
uint32_t i;
for (i = 0; i < oprsz; i += 4) {
- tcg_gen_ld_i32(t1, cpu_env, aofs + i);
- tcg_gen_ld_i32(t2, cpu_env, bofs + i);
- tcg_gen_ld_i32(t3, cpu_env, cofs + i);
+ tcg_gen_ld_i32(t1, tcg_env, aofs + i);
+ tcg_gen_ld_i32(t2, tcg_env, bofs + i);
+ tcg_gen_ld_i32(t3, tcg_env, cofs + i);
fni(t0, t1, t2, t3, c);
- tcg_gen_st_i32(t0, cpu_env, dofs + i);
+ tcg_gen_st_i32(t0, tcg_env, dofs + i);
}
tcg_temp_free_i32(t3);
tcg_temp_free_i32(t2);
@@ -868,12 +868,12 @@
uint32_t i;
for (i = 0; i < oprsz; i += 8) {
- tcg_gen_ld_i64(t0, cpu_env, aofs + i);
+ tcg_gen_ld_i64(t0, tcg_env, aofs + i);
if (load_dest) {
- tcg_gen_ld_i64(t1, cpu_env, dofs + i);
+ tcg_gen_ld_i64(t1, tcg_env, dofs + i);
}
fni(t1, t0);
- tcg_gen_st_i64(t1, cpu_env, dofs + i);
+ tcg_gen_st_i64(t1, tcg_env, dofs + i);
}
tcg_temp_free_i64(t0);
tcg_temp_free_i64(t1);
@@ -888,12 +888,12 @@
uint32_t i;
for (i = 0; i < oprsz; i += 8) {
- tcg_gen_ld_i64(t0, cpu_env, aofs + i);
+ tcg_gen_ld_i64(t0, tcg_env, aofs + i);
if (load_dest) {
- tcg_gen_ld_i64(t1, cpu_env, dofs + i);
+ tcg_gen_ld_i64(t1, tcg_env, dofs + i);
}
fni(t1, t0, c);
- tcg_gen_st_i64(t1, cpu_env, dofs + i);
+ tcg_gen_st_i64(t1, tcg_env, dofs + i);
}
tcg_temp_free_i64(t0);
tcg_temp_free_i64(t1);
@@ -908,13 +908,13 @@
uint32_t i;
for (i = 0; i < oprsz; i += 8) {
- tcg_gen_ld_i64(t0, cpu_env, aofs + i);
+ tcg_gen_ld_i64(t0, tcg_env, aofs + i);
if (scalar_first) {
fni(t1, c, t0);
} else {
fni(t1, t0, c);
}
- tcg_gen_st_i64(t1, cpu_env, dofs + i);
+ tcg_gen_st_i64(t1, tcg_env, dofs + i);
}
tcg_temp_free_i64(t0);
tcg_temp_free_i64(t1);
@@ -931,13 +931,13 @@
uint32_t i;
for (i = 0; i < oprsz; i += 8) {
- tcg_gen_ld_i64(t0, cpu_env, aofs + i);
- tcg_gen_ld_i64(t1, cpu_env, bofs + i);
+ tcg_gen_ld_i64(t0, tcg_env, aofs + i);
+ tcg_gen_ld_i64(t1, tcg_env, bofs + i);
if (load_dest) {
- tcg_gen_ld_i64(t2, cpu_env, dofs + i);
+ tcg_gen_ld_i64(t2, tcg_env, dofs + i);
}
fni(t2, t0, t1);
- tcg_gen_st_i64(t2, cpu_env, dofs + i);
+ tcg_gen_st_i64(t2, tcg_env, dofs + i);
}
tcg_temp_free_i64(t2);
tcg_temp_free_i64(t1);
@@ -954,13 +954,13 @@
uint32_t i;
for (i = 0; i < oprsz; i += 8) {
- tcg_gen_ld_i64(t0, cpu_env, aofs + i);
- tcg_gen_ld_i64(t1, cpu_env, bofs + i);
+ tcg_gen_ld_i64(t0, tcg_env, aofs + i);
+ tcg_gen_ld_i64(t1, tcg_env, bofs + i);
if (load_dest) {
- tcg_gen_ld_i64(t2, cpu_env, dofs + i);
+ tcg_gen_ld_i64(t2, tcg_env, dofs + i);
}
fni(t2, t0, t1, c);
- tcg_gen_st_i64(t2, cpu_env, dofs + i);
+ tcg_gen_st_i64(t2, tcg_env, dofs + i);
}
tcg_temp_free_i64(t0);
tcg_temp_free_i64(t1);
@@ -979,13 +979,13 @@
uint32_t i;
for (i = 0; i < oprsz; i += 8) {
- tcg_gen_ld_i64(t1, cpu_env, aofs + i);
- tcg_gen_ld_i64(t2, cpu_env, bofs + i);
- tcg_gen_ld_i64(t3, cpu_env, cofs + i);
+ tcg_gen_ld_i64(t1, tcg_env, aofs + i);
+ tcg_gen_ld_i64(t2, tcg_env, bofs + i);
+ tcg_gen_ld_i64(t3, tcg_env, cofs + i);
fni(t0, t1, t2, t3);
- tcg_gen_st_i64(t0, cpu_env, dofs + i);
+ tcg_gen_st_i64(t0, tcg_env, dofs + i);
if (write_aofs) {
- tcg_gen_st_i64(t1, cpu_env, aofs + i);
+ tcg_gen_st_i64(t1, tcg_env, aofs + i);
}
}
tcg_temp_free_i64(t3);
@@ -1006,11 +1006,11 @@
uint32_t i;
for (i = 0; i < oprsz; i += 8) {
- tcg_gen_ld_i64(t1, cpu_env, aofs + i);
- tcg_gen_ld_i64(t2, cpu_env, bofs + i);
- tcg_gen_ld_i64(t3, cpu_env, cofs + i);
+ tcg_gen_ld_i64(t1, tcg_env, aofs + i);
+ tcg_gen_ld_i64(t2, tcg_env, bofs + i);
+ tcg_gen_ld_i64(t3, tcg_env, cofs + i);
fni(t0, t1, t2, t3, c);
- tcg_gen_st_i64(t0, cpu_env, dofs + i);
+ tcg_gen_st_i64(t0, tcg_env, dofs + i);
}
tcg_temp_free_i64(t3);
tcg_temp_free_i64(t2);
@@ -1029,12 +1029,12 @@
uint32_t i;
for (i = 0; i < oprsz; i += tysz) {
- tcg_gen_ld_vec(t0, cpu_env, aofs + i);
+ tcg_gen_ld_vec(t0, tcg_env, aofs + i);
if (load_dest) {
- tcg_gen_ld_vec(t1, cpu_env, dofs + i);
+ tcg_gen_ld_vec(t1, tcg_env, dofs + i);
}
fni(vece, t1, t0);
- tcg_gen_st_vec(t1, cpu_env, dofs + i);
+ tcg_gen_st_vec(t1, tcg_env, dofs + i);
}
tcg_temp_free_vec(t0);
tcg_temp_free_vec(t1);
@@ -1052,12 +1052,12 @@
uint32_t i;
for (i = 0; i < oprsz; i += tysz) {
- tcg_gen_ld_vec(t0, cpu_env, aofs + i);
+ tcg_gen_ld_vec(t0, tcg_env, aofs + i);
if (load_dest) {
- tcg_gen_ld_vec(t1, cpu_env, dofs + i);
+ tcg_gen_ld_vec(t1, tcg_env, dofs + i);
}
fni(vece, t1, t0, c);
- tcg_gen_st_vec(t1, cpu_env, dofs + i);
+ tcg_gen_st_vec(t1, tcg_env, dofs + i);
}
tcg_temp_free_vec(t0);
tcg_temp_free_vec(t1);
@@ -1073,13 +1073,13 @@
uint32_t i;
for (i = 0; i < oprsz; i += tysz) {
- tcg_gen_ld_vec(t0, cpu_env, aofs + i);
+ tcg_gen_ld_vec(t0, tcg_env, aofs + i);
if (scalar_first) {
fni(vece, t1, c, t0);
} else {
fni(vece, t1, t0, c);
}
- tcg_gen_st_vec(t1, cpu_env, dofs + i);
+ tcg_gen_st_vec(t1, tcg_env, dofs + i);
}
tcg_temp_free_vec(t0);
tcg_temp_free_vec(t1);
@@ -1097,13 +1097,13 @@
uint32_t i;
for (i = 0; i < oprsz; i += tysz) {
- tcg_gen_ld_vec(t0, cpu_env, aofs + i);
- tcg_gen_ld_vec(t1, cpu_env, bofs + i);
+ tcg_gen_ld_vec(t0, tcg_env, aofs + i);
+ tcg_gen_ld_vec(t1, tcg_env, bofs + i);
if (load_dest) {
- tcg_gen_ld_vec(t2, cpu_env, dofs + i);
+ tcg_gen_ld_vec(t2, tcg_env, dofs + i);
}
fni(vece, t2, t0, t1);
- tcg_gen_st_vec(t2, cpu_env, dofs + i);
+ tcg_gen_st_vec(t2, tcg_env, dofs + i);
}
tcg_temp_free_vec(t2);
tcg_temp_free_vec(t1);
@@ -1126,13 +1126,13 @@
uint32_t i;
for (i = 0; i < oprsz; i += tysz) {
- tcg_gen_ld_vec(t0, cpu_env, aofs + i);
- tcg_gen_ld_vec(t1, cpu_env, bofs + i);
+ tcg_gen_ld_vec(t0, tcg_env, aofs + i);
+ tcg_gen_ld_vec(t1, tcg_env, bofs + i);
if (load_dest) {
- tcg_gen_ld_vec(t2, cpu_env, dofs + i);
+ tcg_gen_ld_vec(t2, tcg_env, dofs + i);
}
fni(vece, t2, t0, t1, c);
- tcg_gen_st_vec(t2, cpu_env, dofs + i);
+ tcg_gen_st_vec(t2, tcg_env, dofs + i);
}
tcg_temp_free_vec(t0);
tcg_temp_free_vec(t1);
@@ -1153,13 +1153,13 @@
uint32_t i;
for (i = 0; i < oprsz; i += tysz) {
- tcg_gen_ld_vec(t1, cpu_env, aofs + i);
- tcg_gen_ld_vec(t2, cpu_env, bofs + i);
- tcg_gen_ld_vec(t3, cpu_env, cofs + i);
+ tcg_gen_ld_vec(t1, tcg_env, aofs + i);
+ tcg_gen_ld_vec(t2, tcg_env, bofs + i);
+ tcg_gen_ld_vec(t3, tcg_env, cofs + i);
fni(vece, t0, t1, t2, t3);
- tcg_gen_st_vec(t0, cpu_env, dofs + i);
+ tcg_gen_st_vec(t0, tcg_env, dofs + i);
if (write_aofs) {
- tcg_gen_st_vec(t1, cpu_env, aofs + i);
+ tcg_gen_st_vec(t1, tcg_env, aofs + i);
}
}
tcg_temp_free_vec(t3);
@@ -1185,11 +1185,11 @@
uint32_t i;
for (i = 0; i < oprsz; i += tysz) {
- tcg_gen_ld_vec(t1, cpu_env, aofs + i);
- tcg_gen_ld_vec(t2, cpu_env, bofs + i);
- tcg_gen_ld_vec(t3, cpu_env, cofs + i);
+ tcg_gen_ld_vec(t1, tcg_env, aofs + i);
+ tcg_gen_ld_vec(t2, tcg_env, bofs + i);
+ tcg_gen_ld_vec(t3, tcg_env, cofs + i);
fni(vece, t0, t1, t2, t3, c);
- tcg_gen_st_vec(t0, cpu_env, dofs + i);
+ tcg_gen_st_vec(t0, tcg_env, dofs + i);
}
tcg_temp_free_vec(t3);
tcg_temp_free_vec(t2);
@@ -1730,27 +1730,27 @@
TCGType type = choose_vector_type(NULL, vece, oprsz, 0);
if (type != 0) {
TCGv_vec t_vec = tcg_temp_new_vec(type);
- tcg_gen_dup_mem_vec(vece, t_vec, cpu_env, aofs);
+ tcg_gen_dup_mem_vec(vece, t_vec, tcg_env, aofs);
do_dup_store(type, dofs, oprsz, maxsz, t_vec);
tcg_temp_free_vec(t_vec);
} else if (vece <= MO_32) {
TCGv_i32 in = tcg_temp_ebb_new_i32();
switch (vece) {
case MO_8:
- tcg_gen_ld8u_i32(in, cpu_env, aofs);
+ tcg_gen_ld8u_i32(in, tcg_env, aofs);
break;
case MO_16:
- tcg_gen_ld16u_i32(in, cpu_env, aofs);
+ tcg_gen_ld16u_i32(in, tcg_env, aofs);
break;
default:
- tcg_gen_ld_i32(in, cpu_env, aofs);
+ tcg_gen_ld_i32(in, tcg_env, aofs);
break;
}
do_dup(vece, dofs, oprsz, maxsz, in, NULL, 0);
tcg_temp_free_i32(in);
} else {
TCGv_i64 in = tcg_temp_ebb_new_i64();
- tcg_gen_ld_i64(in, cpu_env, aofs);
+ tcg_gen_ld_i64(in, tcg_env, aofs);
do_dup(vece, dofs, oprsz, maxsz, NULL, in, 0);
tcg_temp_free_i64(in);
}
@@ -1762,20 +1762,20 @@
if (TCG_TARGET_HAS_v128) {
TCGv_vec in = tcg_temp_new_vec(TCG_TYPE_V128);
- tcg_gen_ld_vec(in, cpu_env, aofs);
+ tcg_gen_ld_vec(in, tcg_env, aofs);
for (i = (aofs == dofs) * 16; i < oprsz; i += 16) {
- tcg_gen_st_vec(in, cpu_env, dofs + i);
+ tcg_gen_st_vec(in, tcg_env, dofs + i);
}
tcg_temp_free_vec(in);
} else {
TCGv_i64 in0 = tcg_temp_ebb_new_i64();
TCGv_i64 in1 = tcg_temp_ebb_new_i64();
- tcg_gen_ld_i64(in0, cpu_env, aofs);
- tcg_gen_ld_i64(in1, cpu_env, aofs + 8);
+ tcg_gen_ld_i64(in0, tcg_env, aofs);
+ tcg_gen_ld_i64(in1, tcg_env, aofs + 8);
for (i = (aofs == dofs) * 16; i < oprsz; i += 16) {
- tcg_gen_st_i64(in0, cpu_env, dofs + i);
- tcg_gen_st_i64(in1, cpu_env, dofs + i + 8);
+ tcg_gen_st_i64(in0, tcg_env, dofs + i);
+ tcg_gen_st_i64(in1, tcg_env, dofs + i + 8);
}
tcg_temp_free_i64(in0);
tcg_temp_free_i64(in1);
@@ -1792,20 +1792,20 @@
if (TCG_TARGET_HAS_v256) {
TCGv_vec in = tcg_temp_new_vec(TCG_TYPE_V256);
- tcg_gen_ld_vec(in, cpu_env, aofs);
+ tcg_gen_ld_vec(in, tcg_env, aofs);
for (i = (aofs == dofs) * 32; i < oprsz; i += 32) {
- tcg_gen_st_vec(in, cpu_env, dofs + i);
+ tcg_gen_st_vec(in, tcg_env, dofs + i);
}
tcg_temp_free_vec(in);
} else if (TCG_TARGET_HAS_v128) {
TCGv_vec in0 = tcg_temp_new_vec(TCG_TYPE_V128);
TCGv_vec in1 = tcg_temp_new_vec(TCG_TYPE_V128);
- tcg_gen_ld_vec(in0, cpu_env, aofs);
- tcg_gen_ld_vec(in1, cpu_env, aofs + 16);
+ tcg_gen_ld_vec(in0, tcg_env, aofs);
+ tcg_gen_ld_vec(in1, tcg_env, aofs + 16);
for (i = (aofs == dofs) * 32; i < oprsz; i += 32) {
- tcg_gen_st_vec(in0, cpu_env, dofs + i);
- tcg_gen_st_vec(in1, cpu_env, dofs + i + 16);
+ tcg_gen_st_vec(in0, tcg_env, dofs + i);
+ tcg_gen_st_vec(in1, tcg_env, dofs + i + 16);
}
tcg_temp_free_vec(in0);
tcg_temp_free_vec(in1);
@@ -1815,11 +1815,11 @@
for (j = 0; j < 4; ++j) {
in[j] = tcg_temp_ebb_new_i64();
- tcg_gen_ld_i64(in[j], cpu_env, aofs + j * 8);
+ tcg_gen_ld_i64(in[j], tcg_env, aofs + j * 8);
}
for (i = (aofs == dofs) * 32; i < oprsz; i += 32) {
for (j = 0; j < 4; ++j) {
- tcg_gen_st_i64(in[j], cpu_env, dofs + i + j * 8);
+ tcg_gen_st_i64(in[j], tcg_env, dofs + i + j * 8);
}
}
for (j = 0; j < 4; ++j) {
@@ -3140,9 +3140,9 @@
uint32_t i;
for (i = 0; i < oprsz; i += tysz) {
- tcg_gen_ld_vec(t0, cpu_env, aofs + i);
+ tcg_gen_ld_vec(t0, tcg_env, aofs + i);
fni(vece, t0, t0, shift);
- tcg_gen_st_vec(t0, cpu_env, dofs + i);
+ tcg_gen_st_vec(t0, tcg_env, dofs + i);
}
tcg_temp_free_vec(t0);
}
@@ -3248,8 +3248,8 @@
tcg_gen_shli_i32(desc, shift, SIMD_DATA_SHIFT);
tcg_gen_ori_i32(desc, desc, simd_desc(oprsz, maxsz, 0));
- tcg_gen_addi_ptr(a0, cpu_env, dofs);
- tcg_gen_addi_ptr(a1, cpu_env, aofs);
+ tcg_gen_addi_ptr(a0, tcg_env, dofs);
+ tcg_gen_addi_ptr(a1, tcg_env, aofs);
g->fno[vece](a0, a1, desc);
@@ -3690,10 +3690,10 @@
uint32_t i;
for (i = 0; i < oprsz; i += 4) {
- tcg_gen_ld_i32(t0, cpu_env, aofs + i);
- tcg_gen_ld_i32(t1, cpu_env, bofs + i);
+ tcg_gen_ld_i32(t0, tcg_env, aofs + i);
+ tcg_gen_ld_i32(t1, tcg_env, bofs + i);
tcg_gen_negsetcond_i32(cond, t0, t0, t1);
- tcg_gen_st_i32(t0, cpu_env, dofs + i);
+ tcg_gen_st_i32(t0, tcg_env, dofs + i);
}
tcg_temp_free_i32(t1);
tcg_temp_free_i32(t0);
@@ -3707,10 +3707,10 @@
uint32_t i;
for (i = 0; i < oprsz; i += 8) {
- tcg_gen_ld_i64(t0, cpu_env, aofs + i);
- tcg_gen_ld_i64(t1, cpu_env, bofs + i);
+ tcg_gen_ld_i64(t0, tcg_env, aofs + i);
+ tcg_gen_ld_i64(t1, tcg_env, bofs + i);
tcg_gen_negsetcond_i64(cond, t0, t0, t1);
- tcg_gen_st_i64(t0, cpu_env, dofs + i);
+ tcg_gen_st_i64(t0, tcg_env, dofs + i);
}
tcg_temp_free_i64(t1);
tcg_temp_free_i64(t0);
@@ -3725,10 +3725,10 @@
uint32_t i;
for (i = 0; i < oprsz; i += tysz) {
- tcg_gen_ld_vec(t0, cpu_env, aofs + i);
- tcg_gen_ld_vec(t1, cpu_env, bofs + i);
+ tcg_gen_ld_vec(t0, tcg_env, aofs + i);
+ tcg_gen_ld_vec(t1, tcg_env, bofs + i);
tcg_gen_cmp_vec(cond, vece, t0, t0, t1);
- tcg_gen_st_vec(t0, cpu_env, dofs + i);
+ tcg_gen_st_vec(t0, tcg_env, dofs + i);
}
tcg_temp_free_vec(t1);
tcg_temp_free_vec(t0);
@@ -3855,9 +3855,9 @@
uint32_t i;
for (i = 0; i < oprsz; i += tysz) {
- tcg_gen_ld_vec(t1, cpu_env, aofs + i);
+ tcg_gen_ld_vec(t1, tcg_env, aofs + i);
tcg_gen_cmp_vec(cond, vece, t0, t1, c);
- tcg_gen_st_vec(t0, cpu_env, dofs + i);
+ tcg_gen_st_vec(t0, tcg_env, dofs + i);
}
}
@@ -3950,9 +3950,9 @@
uint32_t i;
for (i = 0; i < oprsz; i += 8) {
- tcg_gen_ld_i64(t0, cpu_env, aofs + i);
+ tcg_gen_ld_i64(t0, tcg_env, aofs + i);
tcg_gen_negsetcond_i64(cond, t0, t0, c);
- tcg_gen_st_i64(t0, cpu_env, dofs + i);
+ tcg_gen_st_i64(t0, tcg_env, dofs + i);
}
tcg_temp_free_i64(t0);
} else if (vece == MO_32 && check_size_impl(oprsz, 4)) {
@@ -3962,9 +3962,9 @@
tcg_gen_extrl_i64_i32(t1, c);
for (i = 0; i < oprsz; i += 8) {
- tcg_gen_ld_i32(t0, cpu_env, aofs + i);
+ tcg_gen_ld_i32(t0, tcg_env, aofs + i);
tcg_gen_negsetcond_i32(cond, t0, t0, t1);
- tcg_gen_st_i32(t0, cpu_env, dofs + i);
+ tcg_gen_st_i32(t0, tcg_env, dofs + i);
}
tcg_temp_free_i32(t0);
tcg_temp_free_i32(t1);
diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c
index d54c305..df4f22c 100644
--- a/tcg/tcg-op-ldst.c
+++ b/tcg/tcg-op-ldst.c
@@ -589,7 +589,7 @@
tcg_gen_extu_i32_i64(ext_addr, temp_tcgv_i32(addr));
addr = tcgv_i64_temp(ext_addr);
}
- gen_helper_ld_i128(val, cpu_env, temp_tcgv_i64(addr),
+ gen_helper_ld_i128(val, tcg_env, temp_tcgv_i64(addr),
tcg_constant_i32(orig_oi));
}
@@ -698,7 +698,7 @@
tcg_gen_extu_i32_i64(ext_addr, temp_tcgv_i32(addr));
addr = tcgv_i64_temp(ext_addr);
}
- gen_helper_st_i128(cpu_env, temp_tcgv_i64(addr), val,
+ gen_helper_st_i128(tcg_env, temp_tcgv_i64(addr), val,
tcg_constant_i32(orig_oi));
}
@@ -847,7 +847,7 @@
oi = make_memop_idx(memop & ~MO_SIGN, idx);
a64 = maybe_extend_addr64(addr);
- gen(retv, cpu_env, a64, cmpv, newv, tcg_constant_i32(oi));
+ gen(retv, tcg_env, a64, cmpv, newv, tcg_constant_i32(oi));
maybe_free_addr64(a64);
if (memop & MO_SIGN) {
@@ -927,12 +927,12 @@
if (gen) {
MemOpIdx oi = make_memop_idx(memop, idx);
TCGv_i64 a64 = maybe_extend_addr64(addr);
- gen(retv, cpu_env, a64, cmpv, newv, tcg_constant_i32(oi));
+ gen(retv, tcg_env, a64, cmpv, newv, tcg_constant_i32(oi));
maybe_free_addr64(a64);
return;
}
- gen_helper_exit_atomic(cpu_env);
+ gen_helper_exit_atomic(tcg_env);
/*
* Produce a result for a well-formed opcode stream. This satisfies
@@ -990,7 +990,7 @@
MemOpIdx oi = make_memop_idx(memop, idx);
TCGv_i64 a64 = maybe_extend_addr64(addr);
- gen_helper_nonatomic_cmpxchgo(retv, cpu_env, a64, cmpv, newv,
+ gen_helper_nonatomic_cmpxchgo(retv, tcg_env, a64, cmpv, newv,
tcg_constant_i32(oi));
maybe_free_addr64(a64);
} else {
@@ -1049,12 +1049,12 @@
if (gen) {
MemOpIdx oi = make_memop_idx(memop, idx);
TCGv_i64 a64 = maybe_extend_addr64(addr);
- gen(retv, cpu_env, a64, cmpv, newv, tcg_constant_i32(oi));
+ gen(retv, tcg_env, a64, cmpv, newv, tcg_constant_i32(oi));
maybe_free_addr64(a64);
return;
}
- gen_helper_exit_atomic(cpu_env);
+ gen_helper_exit_atomic(tcg_env);
/*
* Produce a result for a well-formed opcode stream. This satisfies
@@ -1108,7 +1108,7 @@
oi = make_memop_idx(memop & ~MO_SIGN, idx);
a64 = maybe_extend_addr64(addr);
- gen(ret, cpu_env, a64, val, tcg_constant_i32(oi));
+ gen(ret, tcg_env, a64, val, tcg_constant_i32(oi));
maybe_free_addr64(a64);
if (memop & MO_SIGN) {
@@ -1146,12 +1146,12 @@
if (gen) {
MemOpIdx oi = make_memop_idx(memop & ~MO_SIGN, idx);
TCGv_i64 a64 = maybe_extend_addr64(addr);
- gen(ret, cpu_env, a64, val, tcg_constant_i32(oi));
+ gen(ret, tcg_env, a64, val, tcg_constant_i32(oi));
maybe_free_addr64(a64);
return;
}
- gen_helper_exit_atomic(cpu_env);
+ gen_helper_exit_atomic(tcg_env);
/* Produce a result, so that we have a well-formed opcode stream
with respect to uses of the result in the (dead) code following. */
tcg_gen_movi_i64(ret, 0);
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
index 02a8cad..393dbcd 100644
--- a/tcg/tcg-op.c
+++ b/tcg/tcg-op.c
@@ -2939,7 +2939,7 @@
plugin_gen_disable_mem_helpers();
ptr = tcg_temp_ebb_new_ptr();
- gen_helper_lookup_tb_ptr(ptr, cpu_env);
+ gen_helper_lookup_tb_ptr(ptr, tcg_env);
tcg_gen_op1i(INDEX_op_goto_ptr, tcgv_ptr_arg(ptr));
tcg_temp_free_ptr(ptr);
}
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 604fa9b..f664cf1 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -36,6 +36,7 @@
#include "qemu/timer.h"
#include "exec/translation-block.h"
#include "exec/tlb-common.h"
+#include "tcg/startup.h"
#include "tcg/tcg-op-common.h"
#if UINTPTR_MAX == UINT32_MAX
@@ -231,7 +232,7 @@
TCGContext **tcg_ctxs;
unsigned int tcg_cur_ctxs;
unsigned int tcg_max_ctxs;
-TCGv_env cpu_env = 0;
+TCGv_env tcg_env;
const void *tcg_code_gen_epilogue;
uintptr_t tcg_splitwx_diff;
@@ -406,7 +407,8 @@
#if defined(CONFIG_SOFTMMU) && !defined(CONFIG_TCG_INTERPRETER)
static int tlb_mask_table_ofs(TCGContext *s, int which)
{
- return s->tlb_fast_offset + which * sizeof(CPUTLBDescFast);
+ return (offsetof(CPUNegativeOffsetState, tlb.f[which]) -
+ sizeof(CPUNegativeOffsetState));
}
#endif
@@ -734,6 +736,13 @@
#include "tcg-target.c.inc"
+#ifndef CONFIG_TCG_INTERPRETER
+/* Validate CPUTLBDescFast placement. */
+QEMU_BUILD_BUG_ON((int)(offsetof(CPUNegativeOffsetState, tlb.f[0]) -
+ sizeof(CPUNegativeOffsetState))
+ < MIN_TLB_MASK_TABLE_OFS);
+#endif
+
static void alloc_tcg_plugin_context(TCGContext *s)
{
#ifdef CONFIG_PLUGIN
@@ -1353,7 +1362,7 @@
tcg_debug_assert(!tcg_regset_test_reg(s->reserved_regs, TCG_AREG0));
ts = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, TCG_AREG0, "env");
- cpu_env = temp_tcgv_ptr(ts);
+ tcg_env = temp_tcgv_ptr(ts);
}
void tcg_init(size_t tb_size, int splitwx, unsigned max_cpus)
@@ -1387,8 +1396,9 @@
return tb;
}
-void tcg_prologue_init(TCGContext *s)
+void tcg_prologue_init(void)
{
+ TCGContext *s = tcg_ctx;
size_t prologue_size;
s->code_ptr = s->code_gen_ptr;
@@ -1497,11 +1507,6 @@
tcg_debug_assert(s->addr_type == TCG_TYPE_I32 ||
s->addr_type == TCG_TYPE_I64);
-#if defined(CONFIG_SOFTMMU) && !defined(CONFIG_TCG_INTERPRETER)
- tcg_debug_assert(s->tlb_fast_offset < 0);
- tcg_debug_assert(s->tlb_fast_offset >= MIN_TLB_MASK_TABLE_OFS);
-#endif
-
tcg_debug_assert(s->insn_start_words > 0);
}
@@ -2549,21 +2554,21 @@
{
const char *s_al, *s_op, *s_at;
MemOpIdx oi = op->args[k++];
- MemOp op = get_memop(oi);
+ MemOp mop = get_memop(oi);
unsigned ix = get_mmuidx(oi);
- s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT];
- s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)];
- s_at = atom_name[(op & MO_ATOM_MASK) >> MO_ATOM_SHIFT];
- op &= ~(MO_AMASK | MO_BSWAP | MO_SSIZE | MO_ATOM_MASK);
+ s_al = alignment_name[(mop & MO_AMASK) >> MO_ASHIFT];
+ s_op = ldst_name[mop & (MO_BSWAP | MO_SSIZE)];
+ s_at = atom_name[(mop & MO_ATOM_MASK) >> MO_ATOM_SHIFT];
+ mop &= ~(MO_AMASK | MO_BSWAP | MO_SSIZE | MO_ATOM_MASK);
/* If all fields are accounted for, print symbolically. */
- if (!op && s_al && s_op && s_at) {
+ if (!mop && s_al && s_op && s_at) {
col += ne_fprintf(f, ",%s%s%s,%u",
s_at, s_al, s_op, ix);
} else {
- op = get_memop(oi);
- col += ne_fprintf(f, ",$0x%x,%u", op, ix);
+ mop = get_memop(oi);
+ col += ne_fprintf(f, ",$0x%x,%u", mop, ix);
}
i = 1;
}
diff --git a/tests/Makefile.include b/tests/Makefile.include
index 3898742..dab1989 100644
--- a/tests/Makefile.include
+++ b/tests/Makefile.include
@@ -73,7 +73,7 @@
build-tcg: $(BUILD_TCG_TARGET_RULES)
.PHONY: check-tcg
-.ninja-goals.check-tcg = all
+.ninja-goals.check-tcg = all test-plugins
check-tcg: $(RUN_TCG_TARGET_RULES)
.PHONY: clean-tcg
diff --git a/tests/avocado/boot_linux_console.py b/tests/avocado/boot_linux_console.py
index 01ee149..6eab515 100644
--- a/tests/avocado/boot_linux_console.py
+++ b/tests/avocado/boot_linux_console.py
@@ -116,7 +116,6 @@
console_pattern = 'Kernel command line: %s' % kernel_command_line
self.wait_for_console_pattern(console_pattern)
- @skip('https://gitlab.com/qemu-project/qemu/-/issues/1884')
def test_mips_malta(self):
"""
:avocado: tags=arch:mips
@@ -139,7 +138,6 @@
console_pattern = 'Kernel command line: %s' % kernel_command_line
self.wait_for_console_pattern(console_pattern)
- @skip('https://gitlab.com/qemu-project/qemu/-/issues/1884')
def test_mips64el_malta(self):
"""
This test requires the ar tool to extract "data.tar.gz" from
@@ -193,7 +191,6 @@
console_pattern = 'Kernel command line: %s' % kernel_command_line
self.wait_for_console_pattern(console_pattern)
- @skip('https://gitlab.com/qemu-project/qemu/-/issues/1884')
def test_mips_malta_cpio(self):
"""
:avocado: tags=arch:mips
@@ -235,7 +232,6 @@
# Wait for VM to shut down gracefully
self.vm.wait()
- @skip('https://gitlab.com/qemu-project/qemu/-/issues/1884')
@skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code')
def test_mips64el_malta_5KEc_cpio(self):
"""
@@ -296,7 +292,6 @@
console_pattern = 'Kernel command line: %s' % kernel_command_line
self.wait_for_console_pattern(console_pattern)
- @skip('https://gitlab.com/qemu-project/qemu/-/issues/1884')
def test_mips_malta32el_nanomips_4k(self):
"""
:avocado: tags=arch:mipsel
@@ -310,7 +305,6 @@
kernel_hash = '477456aafd2a0f1ddc9482727f20fe9575565dd6'
self.do_test_mips_malta32el_nanomips(kernel_url, kernel_hash)
- @skip('https://gitlab.com/qemu-project/qemu/-/issues/1884')
def test_mips_malta32el_nanomips_16k_up(self):
"""
:avocado: tags=arch:mipsel
@@ -324,7 +318,6 @@
kernel_hash = 'e882868f944c71c816e832e2303b7874d044a7bc'
self.do_test_mips_malta32el_nanomips(kernel_url, kernel_hash)
- @skip('https://gitlab.com/qemu-project/qemu/-/issues/1884')
def test_mips_malta32el_nanomips_64k_dbg(self):
"""
:avocado: tags=arch:mipsel
diff --git a/tests/avocado/machine_mips_malta.py b/tests/avocado/machine_mips_malta.py
index 3620266..9223345 100644
--- a/tests/avocado/machine_mips_malta.py
+++ b/tests/avocado/machine_mips_malta.py
@@ -11,7 +11,6 @@
import gzip
import logging
-from avocado import skip
from avocado import skipIf
from avocado import skipUnless
from avocado.utils import archive
@@ -94,7 +93,6 @@
cv2.imwrite(debug_png, screendump_bgr)
self.assertGreaterEqual(tuxlogo_count, cpu_cores_count)
- @skip('https://gitlab.com/qemu-project/qemu/-/issues/1884')
def test_mips_malta_i6400_framebuffer_logo_1core(self):
"""
:avocado: tags=arch:mips64el
@@ -103,7 +101,6 @@
"""
self.do_test_i6400_framebuffer_logo(1)
- @skip('https://gitlab.com/qemu-project/qemu/-/issues/1884')
@skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab')
def test_mips_malta_i6400_framebuffer_logo_7cores(self):
"""
@@ -114,7 +111,6 @@
"""
self.do_test_i6400_framebuffer_logo(7)
- @skip('https://gitlab.com/qemu-project/qemu/-/issues/1884')
@skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab')
def test_mips_malta_i6400_framebuffer_logo_8cores(self):
"""
@@ -146,7 +142,6 @@
wait_for_console_pattern(self, prompt)
self.vm.shutdown()
- @skip('https://gitlab.com/qemu-project/qemu/-/issues/1884')
def test_mipsel_malta_yamon(self):
"""
:avocado: tags=arch:mipsel
@@ -155,7 +150,6 @@
"""
self.do_test_yamon()
- @skip('https://gitlab.com/qemu-project/qemu/-/issues/1884')
def test_mips64el_malta_yamon(self):
"""
:avocado: tags=arch:mips64el
diff --git a/tests/avocado/replay_kernel.py b/tests/avocado/replay_kernel.py
index f7ccfd2..a186105 100644
--- a/tests/avocado/replay_kernel.py
+++ b/tests/avocado/replay_kernel.py
@@ -98,7 +98,6 @@
self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5)
- @skip('https://gitlab.com/qemu-project/qemu/-/issues/1884')
def test_mips_malta(self):
"""
:avocado: tags=arch:mips
@@ -117,7 +116,6 @@
self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5)
- @skip('https://gitlab.com/qemu-project/qemu/-/issues/1884')
def test_mips64el_malta(self):
"""
This test requires the ar tool to extract "data.tar.gz" from
@@ -433,7 +431,6 @@
# making it very slow.
timeout = 180
- @skip('https://gitlab.com/qemu-project/qemu/-/issues/1884')
def test_mips_malta_cpio(self):
"""
:avocado: tags=arch:mips
@@ -463,7 +460,6 @@
self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5,
args=('-initrd', initrd_path))
- @skip('https://gitlab.com/qemu-project/qemu/-/issues/1884')
@skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code')
def test_mips64el_malta_5KEc_cpio(self):
"""
@@ -506,7 +502,6 @@
console_pattern = 'Kernel command line: %s' % kernel_command_line
self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5)
- @skip('https://gitlab.com/qemu-project/qemu/-/issues/1884')
def test_mips_malta32el_nanomips_4k(self):
"""
:avocado: tags=arch:mipsel
@@ -521,7 +516,6 @@
kernel_path_xz = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
self.do_test_mips_malta32el_nanomips(kernel_path_xz)
- @skip('https://gitlab.com/qemu-project/qemu/-/issues/1884')
def test_mips_malta32el_nanomips_16k_up(self):
"""
:avocado: tags=arch:mipsel
@@ -536,7 +530,6 @@
kernel_path_xz = self.fetch_asset(kernel_url, asset_hash=kernel_hash)
self.do_test_mips_malta32el_nanomips(kernel_path_xz)
- @skip('https://gitlab.com/qemu-project/qemu/-/issues/1884')
def test_mips_malta32el_nanomips_64k_dbg(self):
"""
:avocado: tags=arch:mipsel
diff --git a/tests/avocado/tuxrun_baselines.py b/tests/avocado/tuxrun_baselines.py
index 610b7e2..e12250e 100644
--- a/tests/avocado/tuxrun_baselines.py
+++ b/tests/avocado/tuxrun_baselines.py
@@ -352,7 +352,6 @@
self.common_tuxrun(csums=sums, drive="virtio-blk-pci")
- @skip('https://gitlab.com/qemu-project/qemu/-/issues/1884')
def test_mips32(self):
"""
:avocado: tags=arch:mips
@@ -371,7 +370,6 @@
self.common_tuxrun(csums=sums, drive="driver=ide-hd,bus=ide.0,unit=0")
- @skip('https://gitlab.com/qemu-project/qemu/-/issues/1884')
def test_mips32el(self):
"""
:avocado: tags=arch:mipsel
@@ -389,7 +387,6 @@
self.common_tuxrun(csums=sums, drive="driver=ide-hd,bus=ide.0,unit=0")
- @skip('https://gitlab.com/qemu-project/qemu/-/issues/1884')
def test_mips64(self):
"""
:avocado: tags=arch:mips64
@@ -407,7 +404,6 @@
self.common_tuxrun(csums=sums, drive="driver=ide-hd,bus=ide.0,unit=0")
- @skip('https://gitlab.com/qemu-project/qemu/-/issues/1884')
def test_mips64el(self):
"""
:avocado: tags=arch:mips64el
diff --git a/tests/meson.build b/tests/meson.build
index debaa45..9996a29 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -80,10 +80,7 @@
subdir('fp')
endif
-if get_option('plugins')
- subdir('plugin')
-endif
-
+subdir('plugin')
subdir('unit')
subdir('qapi-schema')
subdir('qtest')
diff --git a/tests/migration/i386/a-b-bootblock.S b/tests/migration/i386/a-b-bootblock.S
index 3d464c7..6bb9999 100644
--- a/tests/migration/i386/a-b-bootblock.S
+++ b/tests/migration/i386/a-b-bootblock.S
@@ -34,19 +34,31 @@
mov $16,%eax
mov %eax,%ds
+# Start from 1MB
+.set TEST_MEM_START, (1024*1024)
+.set TEST_MEM_END, (100*1024*1024)
+
mov $65,%ax
mov $0x3f8,%dx
outb %al,%dx
# bl keeps a counter so we limit the output speed
mov $0, %bl
+
+pre_zero:
+ mov $TEST_MEM_START,%eax
+do_zero:
+ movb $0, (%eax)
+ add $4096,%eax
+ cmp $TEST_MEM_END,%eax
+ jl do_zero
+
mainloop:
- # Start from 1MB
- mov $(1024*1024),%eax
+ mov $TEST_MEM_START,%eax
innerloop:
incb (%eax)
add $4096,%eax
- cmp $(100*1024*1024),%eax
+ cmp $TEST_MEM_END,%eax
jl innerloop
inc %bl
diff --git a/tests/migration/i386/a-b-bootblock.h b/tests/migration/i386/a-b-bootblock.h
index b7b0fce..5b52391 100644
--- a/tests/migration/i386/a-b-bootblock.h
+++ b/tests/migration/i386/a-b-bootblock.h
@@ -4,18 +4,18 @@
* the header and the assembler differences in your patch submission.
*/
unsigned char x86_bootsect[] = {
- 0xfa, 0x0f, 0x01, 0x16, 0x78, 0x7c, 0x66, 0xb8, 0x01, 0x00, 0x00, 0x00,
+ 0xfa, 0x0f, 0x01, 0x16, 0x8c, 0x7c, 0x66, 0xb8, 0x01, 0x00, 0x00, 0x00,
0x0f, 0x22, 0xc0, 0x66, 0xea, 0x20, 0x7c, 0x00, 0x00, 0x08, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe4, 0x92, 0x0c, 0x02,
0xe6, 0x92, 0xb8, 0x10, 0x00, 0x00, 0x00, 0x8e, 0xd8, 0x66, 0xb8, 0x41,
0x00, 0x66, 0xba, 0xf8, 0x03, 0xee, 0xb3, 0x00, 0xb8, 0x00, 0x00, 0x10,
- 0x00, 0xfe, 0x00, 0x05, 0x00, 0x10, 0x00, 0x00, 0x3d, 0x00, 0x00, 0x40,
- 0x06, 0x7c, 0xf2, 0xfe, 0xc3, 0x80, 0xe3, 0x3f, 0x75, 0xe6, 0x66, 0xb8,
- 0x42, 0x00, 0x66, 0xba, 0xf8, 0x03, 0xee, 0xeb, 0xdb, 0x8d, 0x76, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00,
- 0x00, 0x9a, 0xcf, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x92, 0xcf, 0x00,
- 0x27, 0x00, 0x60, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0xc6, 0x00, 0x00, 0x05, 0x00, 0x10, 0x00, 0x00, 0x3d, 0x00, 0x00,
+ 0x40, 0x06, 0x7c, 0xf1, 0xb8, 0x00, 0x00, 0x10, 0x00, 0xfe, 0x00, 0x05,
+ 0x00, 0x10, 0x00, 0x00, 0x3d, 0x00, 0x00, 0x40, 0x06, 0x7c, 0xf2, 0xfe,
+ 0xc3, 0x80, 0xe3, 0x3f, 0x75, 0xe6, 0x66, 0xb8, 0x42, 0x00, 0x66, 0xba,
+ 0xf8, 0x03, 0xee, 0xeb, 0xdb, 0x8d, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x9a, 0xcf, 0x00,
+ 0xff, 0xff, 0x00, 0x00, 0x00, 0x92, 0xcf, 0x00, 0x27, 0x00, 0x74, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
diff --git a/tests/migration/s390x/a-b-bios.c b/tests/migration/s390x/a-b-bios.c
index a0327cd..ff99a3e 100644
--- a/tests/migration/s390x/a-b-bios.c
+++ b/tests/migration/s390x/a-b-bios.c
@@ -27,6 +27,14 @@
sclp_setup();
sclp_print("A");
+ /*
+ * Make sure all of the pages have consistent contents before incrementing
+ * the first byte below.
+ */
+ for (addr = START_ADDRESS; addr < END_ADDRESS; addr += 4096) {
+ *(volatile char *)addr = 0;
+ }
+
while (1) {
for (addr = START_ADDRESS; addr < END_ADDRESS; addr += 4096) {
*(volatile char *)addr += 1; /* Change pages */
diff --git a/tests/migration/s390x/a-b-bios.h b/tests/migration/s390x/a-b-bios.h
index e722dc7..96103da 100644
--- a/tests/migration/s390x/a-b-bios.h
+++ b/tests/migration/s390x/a-b-bios.h
@@ -6,10 +6,10 @@
unsigned char s390x_elf[] = {
0x7f, 0x45, 0x4c, 0x46, 0x02, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x16, 0x00, 0x00, 0x00, 0x01,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x78, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xa8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x80,
0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x38, 0x00, 0x07, 0x00, 0x40,
- 0x00, 0x0c, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x04,
+ 0x00, 0x0d, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x04,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x88, 0x00, 0x00, 0x00, 0x00,
@@ -21,140 +21,154 @@
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x0c,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x0c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xac,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xac, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x06,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x10, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x17, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0x10,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe8, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x98, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x17, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0xb8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x38, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x01, 0x18, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00,
0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x07, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0x10,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0x10, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0xd0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd0,
+ 0x00, 0x00, 0x07, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0xb8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x01, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x64, 0x74, 0xe5, 0x51,
0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x10, 0x64, 0x74, 0xe5, 0x52, 0x00, 0x00, 0x00, 0x04,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x10, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x17, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0x10,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd0, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0xd0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x17, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0xb8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x38, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x01, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x2f, 0x6c, 0x69, 0x62, 0x2f, 0x6c, 0x64, 0x36, 0x34, 0x2e, 0x73, 0x6f,
0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x02, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xeb, 0xef, 0xf0, 0x70,
- 0x00, 0x24, 0xa7, 0xfb, 0xff, 0x60, 0xc0, 0xe5, 0x00, 0x00, 0x01, 0x1f,
- 0xc0, 0x20, 0x00, 0x00, 0x02, 0x64, 0xc0, 0xe5, 0x00, 0x00, 0x01, 0x35,
- 0xa5, 0x1e, 0x00, 0x10, 0xa7, 0x29, 0x63, 0x00, 0xe3, 0x30, 0x10, 0x00,
- 0x00, 0x90, 0xa7, 0x3a, 0x00, 0x01, 0x42, 0x30, 0x10, 0x00, 0xa7, 0x1b,
- 0x10, 0x00, 0xa7, 0x27, 0xff, 0xf7, 0xc0, 0x20, 0x00, 0x00, 0x02, 0x50,
- 0xa7, 0xf4, 0xff, 0xeb, 0x07, 0x07, 0x07, 0x07, 0xc0, 0xf0, 0x00, 0x00,
- 0x56, 0xc4, 0xc0, 0x20, 0x00, 0x00, 0x0a, 0xbd, 0xc0, 0x30, 0x00, 0x00,
- 0x56, 0xbe, 0xb9, 0x0b, 0x00, 0x32, 0xb9, 0x02, 0x00, 0x33, 0xa7, 0x84,
- 0x00, 0x19, 0xa7, 0x3b, 0xff, 0xff, 0xeb, 0x43, 0x00, 0x08, 0x00, 0x0c,
- 0xb9, 0x02, 0x00, 0x44, 0xb9, 0x04, 0x00, 0x12, 0xa7, 0x84, 0x00, 0x09,
- 0xd7, 0xff, 0x10, 0x00, 0x10, 0x00, 0x41, 0x10, 0x11, 0x00, 0xa7, 0x47,
- 0xff, 0xfb, 0xc0, 0x20, 0x00, 0x00, 0x00, 0x07, 0x44, 0x30, 0x20, 0x00,
- 0xa7, 0xf4, 0xff, 0xb6, 0xd7, 0x00, 0x10, 0x00, 0x10, 0x00, 0xc0, 0x10,
- 0x00, 0x00, 0x00, 0x29, 0xb2, 0xb2, 0x10, 0x00, 0xeb, 0x00, 0xf0, 0x00,
- 0x00, 0x25, 0x96, 0x02, 0xf0, 0x06, 0xeb, 0x00, 0xf0, 0x00, 0x00, 0x2f,
- 0xc0, 0x10, 0x00, 0x00, 0x00, 0x11, 0xe3, 0x10, 0x01, 0xb8, 0x00, 0x24,
- 0xc0, 0x10, 0x00, 0x00, 0x00, 0x26, 0xd2, 0x07, 0x01, 0xb0, 0x10, 0x00,
- 0xc0, 0x10, 0x00, 0x00, 0x00, 0x18, 0xb2, 0xb2, 0x10, 0x00, 0xeb, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x02, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x03, 0xa8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0xf0, 0xeb, 0xef, 0xf0, 0x70,
+ 0x00, 0x24, 0xa7, 0xfb, 0xff, 0x60, 0xc0, 0xe5, 0x00, 0x00, 0x01, 0x5f,
+ 0xc0, 0x20, 0x00, 0x00, 0x02, 0xa8, 0xc0, 0xe5, 0x00, 0x00, 0x01, 0x75,
+ 0xa5, 0x2e, 0x00, 0x10, 0xa7, 0x19, 0x63, 0x00, 0x92, 0x00, 0x20, 0x00,
+ 0xa7, 0x2b, 0x10, 0x00, 0xa7, 0x17, 0xff, 0xfc, 0xa5, 0x1e, 0x00, 0x10,
+ 0xa7, 0x29, 0x63, 0x00, 0xe3, 0x30, 0x10, 0x00, 0x00, 0x90, 0xa7, 0x3a,
+ 0x00, 0x01, 0x42, 0x30, 0x10, 0x00, 0xa7, 0x1b, 0x10, 0x00, 0xa7, 0x27,
+ 0xff, 0xf7, 0xc0, 0x20, 0x00, 0x00, 0x02, 0x8a, 0xc0, 0xe5, 0x00, 0x00,
+ 0x01, 0x56, 0xa7, 0xf4, 0xff, 0xeb, 0x07, 0x07, 0xc0, 0xf0, 0x00, 0x00,
+ 0x4e, 0x5c, 0xc0, 0x20, 0x00, 0x00, 0x00, 0x7d, 0xe3, 0x20, 0x20, 0x00,
+ 0x00, 0x04, 0xc0, 0x30, 0x00, 0x00, 0x96, 0xa3, 0xb9, 0x0b, 0x00, 0x32,
+ 0xb9, 0x02, 0x00, 0x33, 0xa7, 0x84, 0x00, 0x19, 0xa7, 0x3b, 0xff, 0xff,
+ 0xeb, 0x43, 0x00, 0x08, 0x00, 0x0c, 0xb9, 0x02, 0x00, 0x44, 0xb9, 0x04,
+ 0x00, 0x12, 0xa7, 0x84, 0x00, 0x09, 0xd7, 0xff, 0x10, 0x00, 0x10, 0x00,
+ 0x41, 0x10, 0x11, 0x00, 0xa7, 0x47, 0xff, 0xfb, 0xc0, 0x20, 0x00, 0x00,
+ 0x00, 0x0d, 0x44, 0x30, 0x20, 0x00, 0xc0, 0x20, 0x00, 0x00, 0x00, 0x5b,
+ 0xd2, 0x0f, 0x01, 0xd0, 0x20, 0x00, 0xa7, 0xf4, 0xff, 0xa1, 0xd7, 0x00,
+ 0x10, 0x00, 0x10, 0x00, 0xc0, 0x10, 0x00, 0x00, 0x00, 0x50, 0xb2, 0xb2,
+ 0x10, 0x00, 0xa7, 0xf4, 0x00, 0x00, 0xeb, 0x00, 0xf0, 0x00, 0x00, 0x25,
+ 0x96, 0x02, 0xf0, 0x06, 0xeb, 0x00, 0xf0, 0x00, 0x00, 0x2f, 0xc0, 0x10,
+ 0x00, 0x00, 0x00, 0x2a, 0xe3, 0x10, 0x01, 0xb8, 0x00, 0x24, 0xc0, 0x10,
+ 0x00, 0x00, 0x00, 0x4b, 0xd2, 0x07, 0x01, 0xb0, 0x10, 0x00, 0xc0, 0x10,
+ 0x00, 0x00, 0x00, 0x3d, 0xb2, 0xb2, 0x10, 0x00, 0xeb, 0x66, 0xf0, 0x00,
+ 0x00, 0x25, 0x96, 0xff, 0xf0, 0x04, 0xeb, 0x66, 0xf0, 0x00, 0x00, 0x2f,
+ 0xc0, 0x10, 0x00, 0x00, 0x00, 0x1a, 0xe3, 0x10, 0x01, 0xf8, 0x00, 0x24,
+ 0xc0, 0x10, 0x00, 0x00, 0x00, 0x36, 0xd2, 0x07, 0x01, 0xf0, 0x10, 0x00,
+ 0xc0, 0x10, 0x00, 0x00, 0x00, 0x24, 0xb2, 0xb2, 0x10, 0x00, 0xeb, 0x00,
0xf0, 0x00, 0x00, 0x25, 0x94, 0xfd, 0xf0, 0x06, 0xeb, 0x00, 0xf0, 0x00,
- 0x00, 0x2f, 0x07, 0xfe, 0x07, 0x07, 0x07, 0x07, 0x00, 0x02, 0x00, 0x01,
+ 0x00, 0x2f, 0x07, 0xfe, 0xeb, 0x66, 0xf0, 0x00, 0x00, 0x25, 0x94, 0x00,
+ 0xf0, 0x04, 0xeb, 0x66, 0xf0, 0x00, 0x00, 0x2f, 0x07, 0xfe, 0x07, 0x07,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0xf0, 0x00, 0x02, 0x00, 0x01,
0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x03, 0x02, 0x00, 0x01, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x80, 0x00, 0x00, 0x00,
- 0xeb, 0xbf, 0xf0, 0x58, 0x00, 0x24, 0xc0, 0x10, 0x00, 0x00, 0x0e, 0x59,
- 0xa7, 0xfb, 0xff, 0x60, 0xb2, 0x20, 0x00, 0x21, 0xb2, 0x22, 0x00, 0xb0,
- 0x88, 0xb0, 0x00, 0x1c, 0xc0, 0xe5, 0xff, 0xff, 0xff, 0xba, 0xa7, 0xbe,
- 0x00, 0x03, 0xa7, 0x84, 0x00, 0x13, 0xa7, 0xbe, 0x00, 0x02, 0xa7, 0x28,
- 0x00, 0x00, 0xa7, 0x74, 0x00, 0x04, 0xa7, 0x28, 0xff, 0xfe, 0xe3, 0x40,
- 0xf1, 0x10, 0x00, 0x04, 0xb9, 0x14, 0x00, 0x22, 0xeb, 0xbf, 0xf0, 0xf8,
- 0x00, 0x04, 0x07, 0xf4, 0xa7, 0x28, 0xff, 0xff, 0xa7, 0xf4, 0xff, 0xf5,
- 0x07, 0x07, 0x07, 0x07, 0xeb, 0xbf, 0xf0, 0x58, 0x00, 0x24, 0xc0, 0xd0,
- 0x00, 0x00, 0x01, 0x21, 0xa7, 0xfb, 0xff, 0x60, 0xa7, 0xb9, 0x00, 0x00,
- 0xa7, 0x19, 0x00, 0x00, 0xc0, 0x40, 0x00, 0x00, 0x0e, 0x24, 0xa7, 0x3b,
- 0x00, 0x01, 0xa7, 0x37, 0x00, 0x23, 0xc0, 0x20, 0x00, 0x00, 0x0e, 0x1d,
- 0x18, 0x31, 0xa7, 0x1a, 0x00, 0x06, 0x40, 0x10, 0x20, 0x08, 0xa7, 0x3a,
- 0x00, 0x0e, 0xa7, 0x18, 0x1a, 0x00, 0x40, 0x30, 0x20, 0x00, 0x92, 0x00,
- 0x20, 0x02, 0x40, 0x10, 0x20, 0x0a, 0xe3, 0x20, 0xd0, 0x00, 0x00, 0x04,
- 0xc0, 0xe5, 0xff, 0xff, 0xff, 0xac, 0xe3, 0x40, 0xf1, 0x10, 0x00, 0x04,
- 0xb9, 0x04, 0x00, 0x2b, 0xeb, 0xbf, 0xf0, 0xf8, 0x00, 0x04, 0x07, 0xf4,
- 0xb9, 0x04, 0x00, 0x51, 0xa7, 0x5b, 0x00, 0x01, 0xa7, 0x09, 0x0f, 0xf7,
- 0xb9, 0x21, 0x00, 0x50, 0xa7, 0x24, 0xff, 0xd7, 0x41, 0xeb, 0x20, 0x00,
- 0x95, 0x0a, 0xe0, 0x00, 0xa7, 0x74, 0x00, 0x08, 0x41, 0x11, 0x40, 0x0e,
- 0x92, 0x0d, 0x10, 0x00, 0xb9, 0x04, 0x00, 0x15, 0x43, 0x5b, 0x20, 0x00,
- 0x42, 0x51, 0x40, 0x0e, 0xa7, 0xbb, 0x00, 0x01, 0x41, 0x10, 0x10, 0x01,
- 0xa7, 0xf4, 0xff, 0xbf, 0xc0, 0x50, 0x00, 0x00, 0x00, 0xd4, 0xc0, 0x10,
- 0x00, 0x00, 0x0d, 0xd9, 0xa7, 0x48, 0x00, 0x1c, 0x40, 0x40, 0x10, 0x00,
- 0x50, 0x20, 0x10, 0x0c, 0xa7, 0x48, 0x00, 0x04, 0xe3, 0x20, 0x50, 0x00,
- 0x00, 0x04, 0x40, 0x40, 0x10, 0x0a, 0x50, 0x30, 0x10, 0x10, 0xc0, 0xf4,
- 0xff, 0xff, 0xff, 0x6b, 0xa7, 0x39, 0x00, 0x40, 0xa7, 0x29, 0x00, 0x00,
- 0xc0, 0xf4, 0xff, 0xff, 0xff, 0xe4, 0x07, 0x07, 0xb9, 0x04, 0x00, 0x13,
- 0xa7, 0x2a, 0xff, 0xff, 0xb9, 0x04, 0x00, 0x34, 0xa7, 0x48, 0x00, 0x01,
- 0x15, 0x24, 0xa7, 0x24, 0x00, 0x07, 0xb9, 0x04, 0x00, 0x21, 0xc0, 0xf4,
- 0xff, 0xff, 0xff, 0x7f, 0xa7, 0x29, 0xff, 0xff, 0x07, 0xfe, 0x07, 0x07,
- 0xa7, 0x39, 0x00, 0x00, 0x41, 0x13, 0x20, 0x00, 0x95, 0x00, 0x10, 0x00,
- 0xa7, 0x74, 0x00, 0x05, 0xc0, 0xf4, 0xff, 0xff, 0xff, 0x70, 0xa7, 0x3b,
- 0x00, 0x01, 0xa7, 0xf4, 0xff, 0xf5, 0x07, 0x07, 0xeb, 0xbf, 0xf0, 0x58,
- 0x00, 0x24, 0xc0, 0xd0, 0x00, 0x00, 0x00, 0x91, 0xa7, 0xfb, 0xff, 0x60,
- 0xb9, 0x04, 0x00, 0xb2, 0xa7, 0x19, 0x00, 0x20, 0xc0, 0x20, 0x00, 0x00,
- 0x0d, 0x8c, 0x92, 0x00, 0x20, 0x00, 0xa7, 0x2b, 0x00, 0x01, 0xa7, 0x17,
- 0xff, 0xfc, 0xc0, 0x10, 0x00, 0x00, 0x0d, 0x83, 0xa7, 0x28, 0x00, 0x20,
- 0x40, 0x20, 0x10, 0x00, 0xe3, 0x20, 0xd0, 0x00, 0x00, 0x04, 0xc0, 0xe5,
- 0xff, 0xff, 0xff, 0x1d, 0x12, 0x22, 0xa7, 0x74, 0x00, 0x17, 0xa7, 0x19,
- 0x00, 0x00, 0xc0, 0x40, 0x00, 0x00, 0x00, 0x75, 0xc0, 0x50, 0x00, 0x00,
- 0x0d, 0x7a, 0xa7, 0x29, 0x00, 0x08, 0xe3, 0x31, 0x50, 0x00, 0x00, 0x90,
- 0x43, 0x33, 0x40, 0x00, 0x42, 0x31, 0xb0, 0x00, 0xa7, 0x1b, 0x00, 0x01,
- 0xa7, 0x27, 0xff, 0xf7, 0xe3, 0x40, 0xf1, 0x10, 0x00, 0x04, 0xeb, 0xbf,
- 0xf0, 0xf8, 0x00, 0x04, 0x07, 0xf4, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
- 0xeb, 0xaf, 0xf0, 0x50, 0x00, 0x24, 0xc0, 0xd0, 0x00, 0x00, 0x00, 0x51,
- 0xa7, 0xfb, 0xff, 0x60, 0xa7, 0x19, 0x0f, 0xf8, 0xb9, 0x21, 0x00, 0x31,
- 0xb9, 0x04, 0x00, 0xa2, 0xa7, 0xc4, 0x00, 0x2d, 0xa7, 0xb9, 0x0f, 0xf8,
- 0xc0, 0x10, 0x00, 0x00, 0x0d, 0x42, 0xa7, 0x28, 0x10, 0x00, 0x40, 0x20,
- 0x10, 0x00, 0x92, 0x00, 0x10, 0x02, 0xe3, 0x20, 0xd0, 0x00, 0x00, 0x04,
- 0xc0, 0xe5, 0xff, 0xff, 0xfe, 0xda, 0xa7, 0xbb, 0x00, 0x01, 0xa7, 0x19,
- 0x00, 0x00, 0xc0, 0x20, 0x00, 0x00, 0x0d, 0x2f, 0xa7, 0xb7, 0x00, 0x17,
- 0xc0, 0x10, 0x00, 0x00, 0x0d, 0x2a, 0xe3, 0x40, 0xf1, 0x10, 0x00, 0x04,
- 0xe3, 0x20, 0x10, 0x08, 0x00, 0x91, 0xa7, 0x2a, 0xff, 0xf9, 0xb9, 0x14,
- 0x00, 0x22, 0xeb, 0xaf, 0xf0, 0xf0, 0x00, 0x04, 0x07, 0xf4, 0xb9, 0x04,
- 0x00, 0xb3, 0xa7, 0xf4, 0xff, 0xd5, 0x43, 0x31, 0x20, 0x0f, 0x42, 0x31,
- 0xa0, 0x00, 0xa7, 0x1b, 0x00, 0x01, 0xa7, 0xf4, 0xff, 0xe3, 0x07, 0x07,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x78, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x01,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05, 0x2e, 0x2e, 0x2e, 0x2e,
+ 0x00, 0x00, 0x00, 0x01, 0x80, 0x00, 0x00, 0x00, 0xeb, 0xbf, 0xf0, 0x58,
+ 0x00, 0x24, 0xc0, 0x10, 0x00, 0x00, 0x4e, 0x0d, 0xa7, 0xfb, 0xff, 0x60,
+ 0xb2, 0x20, 0x00, 0x21, 0xb2, 0x22, 0x00, 0xb0, 0x88, 0xb0, 0x00, 0x1c,
+ 0xc0, 0xe5, 0xff, 0xff, 0xff, 0x91, 0xa7, 0xbe, 0x00, 0x03, 0xa7, 0x84,
+ 0x00, 0x13, 0xa7, 0xbe, 0x00, 0x02, 0xa7, 0x28, 0x00, 0x00, 0xa7, 0x74,
+ 0x00, 0x04, 0xa7, 0x28, 0xff, 0xfe, 0xe3, 0x40, 0xf1, 0x10, 0x00, 0x04,
+ 0xb9, 0x14, 0x00, 0x22, 0xeb, 0xbf, 0xf0, 0xf8, 0x00, 0x04, 0x07, 0xf4,
+ 0xa7, 0x28, 0xff, 0xff, 0xa7, 0xf4, 0xff, 0xf5, 0x07, 0x07, 0x07, 0x07,
+ 0xeb, 0xbf, 0xf0, 0x58, 0x00, 0x24, 0xc0, 0xd0, 0x00, 0x00, 0x01, 0x25,
+ 0xa7, 0xfb, 0xff, 0x60, 0xa7, 0xb9, 0x00, 0x00, 0xa7, 0x19, 0x00, 0x00,
+ 0xc0, 0x40, 0x00, 0x00, 0x4d, 0xd8, 0xa7, 0x3b, 0x00, 0x01, 0xa7, 0x37,
+ 0x00, 0x23, 0xc0, 0x20, 0x00, 0x00, 0x4d, 0xd1, 0x18, 0x31, 0xa7, 0x1a,
+ 0x00, 0x06, 0x40, 0x10, 0x20, 0x08, 0xa7, 0x3a, 0x00, 0x0e, 0xa7, 0x18,
+ 0x1a, 0x00, 0x40, 0x30, 0x20, 0x00, 0x92, 0x00, 0x20, 0x02, 0x40, 0x10,
+ 0x20, 0x0a, 0xe3, 0x20, 0xd0, 0x00, 0x00, 0x04, 0xc0, 0xe5, 0xff, 0xff,
+ 0xff, 0xac, 0xe3, 0x40, 0xf1, 0x10, 0x00, 0x04, 0xb9, 0x04, 0x00, 0x2b,
+ 0xeb, 0xbf, 0xf0, 0xf8, 0x00, 0x04, 0x07, 0xf4, 0xb9, 0x04, 0x00, 0x51,
+ 0xa7, 0x5b, 0x00, 0x01, 0xa7, 0x09, 0x0f, 0xf7, 0xb9, 0x21, 0x00, 0x50,
+ 0xa7, 0x24, 0xff, 0xd7, 0x41, 0xeb, 0x20, 0x00, 0x95, 0x0a, 0xe0, 0x00,
+ 0xa7, 0x74, 0x00, 0x08, 0x41, 0x11, 0x40, 0x0e, 0x92, 0x0d, 0x10, 0x00,
+ 0xb9, 0x04, 0x00, 0x15, 0x43, 0x5b, 0x20, 0x00, 0x42, 0x51, 0x40, 0x0e,
+ 0xa7, 0xbb, 0x00, 0x01, 0x41, 0x10, 0x10, 0x01, 0xa7, 0xf4, 0xff, 0xbf,
+ 0xc0, 0x50, 0x00, 0x00, 0x00, 0xd8, 0xc0, 0x10, 0x00, 0x00, 0x4d, 0x8d,
+ 0xa7, 0x48, 0x00, 0x1c, 0x40, 0x40, 0x10, 0x00, 0x50, 0x20, 0x10, 0x0c,
+ 0xa7, 0x48, 0x00, 0x04, 0xe3, 0x20, 0x50, 0x00, 0x00, 0x04, 0x40, 0x40,
+ 0x10, 0x0a, 0x50, 0x30, 0x10, 0x10, 0xc0, 0xf4, 0xff, 0xff, 0xff, 0x6b,
+ 0xa7, 0x39, 0x00, 0x40, 0xa7, 0x29, 0x00, 0x00, 0xc0, 0xf4, 0xff, 0xff,
+ 0xff, 0xe4, 0x07, 0x07, 0xb9, 0x04, 0x00, 0x13, 0xa7, 0x2a, 0xff, 0xff,
+ 0xb9, 0x04, 0x00, 0x34, 0xa7, 0x48, 0x00, 0x01, 0x15, 0x24, 0xa7, 0x24,
+ 0x00, 0x07, 0xb9, 0x04, 0x00, 0x21, 0xc0, 0xf4, 0xff, 0xff, 0xff, 0x7f,
+ 0xa7, 0x29, 0xff, 0xff, 0x07, 0xfe, 0x07, 0x07, 0xa7, 0x39, 0x00, 0x00,
+ 0x41, 0x13, 0x20, 0x00, 0x95, 0x00, 0x10, 0x00, 0xa7, 0x74, 0x00, 0x05,
+ 0xc0, 0xf4, 0xff, 0xff, 0xff, 0x70, 0xa7, 0x3b, 0x00, 0x01, 0xa7, 0xf4,
+ 0xff, 0xf5, 0x07, 0x07, 0xeb, 0xbf, 0xf0, 0x58, 0x00, 0x24, 0xc0, 0xd0,
+ 0x00, 0x00, 0x00, 0x95, 0xa7, 0xfb, 0xff, 0x60, 0xb9, 0x04, 0x00, 0xb2,
+ 0xa7, 0x19, 0x00, 0x20, 0xc0, 0x20, 0x00, 0x00, 0x4d, 0x40, 0x92, 0x00,
+ 0x20, 0x00, 0xa7, 0x2b, 0x00, 0x01, 0xa7, 0x17, 0xff, 0xfc, 0xc0, 0x10,
+ 0x00, 0x00, 0x4d, 0x37, 0xa7, 0x28, 0x10, 0x00, 0x40, 0x20, 0x10, 0x00,
+ 0xe3, 0x20, 0xd0, 0x00, 0x00, 0x04, 0xc0, 0xe5, 0xff, 0xff, 0xff, 0x1d,
+ 0x12, 0x22, 0xa7, 0x74, 0x00, 0x19, 0xa7, 0x19, 0x00, 0x00, 0xc0, 0x40,
+ 0x00, 0x00, 0x00, 0x79, 0xa7, 0x39, 0x00, 0x08, 0xc0, 0x20, 0x00, 0x00,
+ 0x4d, 0x2c, 0x41, 0x21, 0x20, 0x00, 0xe3, 0x20, 0x20, 0x00, 0x00, 0x90,
+ 0x43, 0x22, 0x40, 0x00, 0x42, 0x21, 0xb0, 0x00, 0xa7, 0x1b, 0x00, 0x01,
+ 0xa7, 0x37, 0xff, 0xf2, 0xe3, 0x40, 0xf1, 0x10, 0x00, 0x04, 0xeb, 0xbf,
+ 0xf0, 0xf8, 0x00, 0x04, 0x07, 0xf4, 0x07, 0x07, 0xeb, 0xaf, 0xf0, 0x50,
+ 0x00, 0x24, 0xc0, 0xd0, 0x00, 0x00, 0x00, 0x55, 0xa7, 0xfb, 0xff, 0x60,
+ 0xa7, 0x19, 0x0f, 0xf8, 0xb9, 0x21, 0x00, 0x31, 0xb9, 0x04, 0x00, 0xa2,
+ 0xa7, 0xc4, 0x00, 0x2a, 0xa7, 0xb9, 0x0f, 0xf8, 0xc0, 0x10, 0x00, 0x00,
+ 0x4c, 0xf6, 0xa7, 0x28, 0x10, 0x00, 0x40, 0x20, 0x10, 0x00, 0x92, 0x00,
+ 0x10, 0x02, 0xe3, 0x20, 0xd0, 0x00, 0x00, 0x04, 0xc0, 0xe5, 0xff, 0xff,
+ 0xfe, 0xda, 0xa7, 0xbb, 0x00, 0x01, 0xa7, 0x19, 0x00, 0x00, 0xa7, 0xb7,
+ 0x00, 0x17, 0xc0, 0x10, 0x00, 0x00, 0x4c, 0xe1, 0xe3, 0x40, 0xf1, 0x10,
+ 0x00, 0x04, 0xe3, 0x20, 0x10, 0x08, 0x00, 0x91, 0xa7, 0x2a, 0xff, 0xf9,
+ 0xb9, 0x14, 0x00, 0x22, 0xeb, 0xaf, 0xf0, 0xf0, 0x00, 0x04, 0x07, 0xf4,
+ 0xb9, 0x04, 0x00, 0xb3, 0xa7, 0xf4, 0xff, 0xd8, 0xc0, 0x20, 0x00, 0x00,
+ 0x4c, 0xcc, 0x41, 0x31, 0xa0, 0x00, 0x41, 0x21, 0x20, 0x00, 0xa7, 0x1b,
+ 0x00, 0x01, 0xd2, 0x00, 0x30, 0x00, 0x20, 0x0f, 0xa7, 0xf4, 0xff, 0xdd,
+ 0x07, 0x07, 0x07, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0x00, 0x05,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e,
0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e,
0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e,
0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e,
0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e,
- 0x20, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e,
- 0x3c, 0x28, 0x2b, 0x7c, 0x26, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e,
- 0x2e, 0x2e, 0x21, 0x24, 0x2a, 0x29, 0x3b, 0x2e, 0x2d, 0x2f, 0x2e, 0x2e,
- 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2c, 0x25, 0x5f, 0x3e, 0x3f,
- 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x60, 0x3a, 0x23,
- 0x40, 0x27, 0x3d, 0x22, 0x2e, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
- 0x68, 0x69, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x6a, 0x6b, 0x6c,
- 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e,
- 0x2e, 0x2e, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x2e, 0x2e,
+ 0x2e, 0x2e, 0x2e, 0x2e, 0x20, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e,
+ 0x2e, 0x2e, 0x2e, 0x2e, 0x3c, 0x28, 0x2b, 0x7c, 0x26, 0x2e, 0x2e, 0x2e,
+ 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x21, 0x24, 0x2a, 0x29, 0x3b, 0x2e,
+ 0x2d, 0x2f, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2c,
+ 0x25, 0x5f, 0x3e, 0x3f, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e,
+ 0x2e, 0x60, 0x3a, 0x23, 0x40, 0x27, 0x3d, 0x22, 0x2e, 0x61, 0x62, 0x63,
+ 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e,
+ 0x2e, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x2e, 0x2e,
+ 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
+ 0x79, 0x7a, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e,
0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e,
- 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x41, 0x42, 0x43,
- 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e,
- 0x2e, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x2e, 0x2e,
- 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
- 0x59, 0x5a, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x30, 0x31, 0x32, 0x33,
- 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e,
- 0x41, 0x00, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x2e, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x2e, 0x2e,
+ 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50,
+ 0x51, 0x52, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x53, 0x54,
+ 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x2e, 0x2e,
+ 0x2e, 0x2e, 0x2e, 0x2e, 0x41, 0x00, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x6f, 0xff, 0xfe, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xd8,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x02, 0x28, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06,
@@ -163,7 +177,15 @@
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x6f, 0xff, 0xff, 0xfb, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x30,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xff, 0xff, 0xfb,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x6f, 0xff, 0xff, 0xf9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -171,83 +193,87 @@
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0x10, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x47, 0x43, 0x43, 0x3a, 0x20, 0x28, 0x47, 0x4e, 0x55, 0x29, 0x20, 0x38,
- 0x2e, 0x32, 0x2e, 0x31, 0x20, 0x32, 0x30, 0x31, 0x38, 0x30, 0x39, 0x30,
- 0x35, 0x20, 0x28, 0x52, 0x65, 0x64, 0x20, 0x48, 0x61, 0x74, 0x20, 0x38,
- 0x2e, 0x32, 0x2e, 0x31, 0x2d, 0x33, 0x29, 0x00, 0x00, 0x2e, 0x73, 0x68,
- 0x73, 0x74, 0x72, 0x74, 0x61, 0x62, 0x00, 0x2e, 0x69, 0x6e, 0x74, 0x65,
- 0x72, 0x70, 0x00, 0x2e, 0x67, 0x6e, 0x75, 0x2e, 0x68, 0x61, 0x73, 0x68,
- 0x00, 0x2e, 0x64, 0x79, 0x6e, 0x73, 0x79, 0x6d, 0x00, 0x2e, 0x64, 0x79,
- 0x6e, 0x73, 0x74, 0x72, 0x00, 0x2e, 0x74, 0x65, 0x78, 0x74, 0x00, 0x2e,
- 0x72, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x00, 0x2e, 0x64, 0x79, 0x6e, 0x61,
- 0x6d, 0x69, 0x63, 0x00, 0x2e, 0x67, 0x6f, 0x74, 0x00, 0x2e, 0x62, 0x73,
- 0x73, 0x00, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x00, 0x00,
+ 0x00, 0x00, 0x17, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x43, 0x43, 0x3a,
+ 0x20, 0x28, 0x55, 0x62, 0x75, 0x6e, 0x74, 0x75, 0x20, 0x31, 0x31, 0x2e,
+ 0x34, 0x2e, 0x30, 0x2d, 0x31, 0x75, 0x62, 0x75, 0x6e, 0x74, 0x75, 0x31,
+ 0x7e, 0x32, 0x32, 0x2e, 0x30, 0x34, 0x29, 0x20, 0x31, 0x31, 0x2e, 0x34,
+ 0x2e, 0x30, 0x00, 0x00, 0x2e, 0x73, 0x68, 0x73, 0x74, 0x72, 0x74, 0x61,
+ 0x62, 0x00, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x00, 0x2e, 0x67,
+ 0x6e, 0x75, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x00, 0x2e, 0x64, 0x79, 0x6e,
+ 0x73, 0x79, 0x6d, 0x00, 0x2e, 0x64, 0x79, 0x6e, 0x73, 0x74, 0x72, 0x00,
+ 0x2e, 0x72, 0x65, 0x6c, 0x61, 0x2e, 0x64, 0x79, 0x6e, 0x00, 0x2e, 0x74,
+ 0x65, 0x78, 0x74, 0x00, 0x2e, 0x72, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x00,
+ 0x2e, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x00, 0x2e, 0x67, 0x6f,
+ 0x74, 0x00, 0x2e, 0x62, 0x73, 0x73, 0x00, 0x2e, 0x63, 0x6f, 0x6d, 0x6d,
+ 0x65, 0x6e, 0x74, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b,
- 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
+ 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x01, 0xc8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x13, 0x6f, 0xff, 0xff, 0xf6, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xd8,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xd8, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1d, 0x00, 0x00, 0x00, 0x0b,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x01, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xf8,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x04,
- 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x25,
- 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x28, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x02, 0x28, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x2d, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x30,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x30, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x03, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x01,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x05, 0xe8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0xe8,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x24, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3b,
- 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0x10, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x07, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd0,
- 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,
- 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0xe0,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xe0, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x49, 0x00, 0x00, 0x00, 0x08,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xf8,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4e,
- 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x07, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2c,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
- 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x24, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x57, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x6f, 0xff, 0xff, 0xf6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x01, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xd8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x03,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1d,
+ 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xf8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x01, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30,
+ 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18,
+ 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x28,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x28, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2d, 0x00, 0x00, 0x00, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x02, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x30,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x03,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x37,
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x48, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x02, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x40,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x3d, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x88,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x88, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x01, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00, 0x00, 0x00, 0x06,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x17, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xb8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x20, 0x00, 0x00, 0x00, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x4e,
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0xd8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x53, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x01, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x58, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2b, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x09, 0x1b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x61,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
diff --git a/tests/plugin/meson.build b/tests/plugin/meson.build
index 2bbfc4b..322cafc 100644
--- a/tests/plugin/meson.build
+++ b/tests/plugin/meson.build
@@ -1,7 +1,13 @@
t = []
-foreach i : ['bb', 'empty', 'insn', 'mem', 'syscall']
- t += shared_module(i, files(i + '.c'),
- include_directories: '../../include/qemu',
- dependencies: glib)
-endforeach
-alias_target('test-plugins', t)
+if get_option('plugins')
+ foreach i : ['bb', 'empty', 'insn', 'mem', 'syscall']
+ t += shared_module(i, files(i + '.c'),
+ include_directories: '../../include/qemu',
+ dependencies: glib)
+ endforeach
+endif
+if t.length() > 0
+ alias_target('test-plugins', t)
+else
+ run_target('test-plugins', command: find_program('true'))
+endif
diff --git a/tests/qemu-iotests/common.rc b/tests/qemu-iotests/common.rc
index d145f08..95c1257 100644
--- a/tests/qemu-iotests/common.rc
+++ b/tests/qemu-iotests/common.rc
@@ -979,10 +979,15 @@
#
_require_large_file()
{
- if ! truncate --size="$1" "$TEST_IMG"; then
+ if [ -z "$TEST_IMG_FILE" ]; then
+ FILENAME="$TEST_IMG"
+ else
+ FILENAME="$TEST_IMG_FILE"
+ fi
+ if ! truncate --size="$1" "$FILENAME"; then
_notrun "file system on $TEST_DIR does not support large enough files"
fi
- rm "$TEST_IMG"
+ rm "$FILENAME"
}
# Check that a set of devices is available in the QEMU binary
diff --git a/tests/qemu-iotests/meson.build b/tests/qemu-iotests/meson.build
index 44761e1..53847cb 100644
--- a/tests/qemu-iotests/meson.build
+++ b/tests/qemu-iotests/meson.build
@@ -1,4 +1,4 @@
-if not have_tools or targetos == 'windows' or get_option('gprof')
+if not have_tools or targetos == 'windows'
subdir_done()
endif
diff --git a/tests/qemu-iotests/tests/nbd-multiconn b/tests/qemu-iotests/tests/nbd-multiconn
index b121f2e..478a1ea 100755
--- a/tests/qemu-iotests/tests/nbd-multiconn
+++ b/tests/qemu-iotests/tests/nbd-multiconn
@@ -142,4 +142,4 @@
iotests.main(supported_fmts=['qcow2'])
except ImportError:
- iotests.notrun('libnbd not installed')
+ iotests.notrun('Python bindings to libnbd are not installed')
diff --git a/tests/qtest/m48t59-test.c b/tests/qtest/m48t59-test.c
index 9487faf..b9cd209 100644
--- a/tests/qtest/m48t59-test.c
+++ b/tests/qtest/m48t59-test.c
@@ -192,19 +192,22 @@
}
if (!(tm_cmp(&start, datep) <= 0 && tm_cmp(datep, &end) <= 0)) {
- long t, s;
+ long date_s, start_s;
+ unsigned long diff;
start.tm_isdst = datep->tm_isdst;
- t = (long)mktime(datep);
- s = (long)mktime(&start);
- if (t < s) {
- g_test_message("RTC is %ld second(s) behind wall-clock", (s - t));
+ date_s = (long)mktime(datep);
+ start_s = (long)mktime(&start);
+ if (date_s < start_s) {
+ diff = start_s - date_s;
+ g_test_message("RTC is %ld second(s) behind wall-clock", diff);
} else {
- g_test_message("RTC is %ld second(s) ahead of wall-clock", (t - s));
+ diff = date_s - start_s;
+ g_test_message("RTC is %ld second(s) ahead of wall-clock", diff);
}
- g_assert_cmpint(ABS(t - s), <=, wiggle);
+ g_assert_cmpint(diff, <=, wiggle);
}
qtest_quit(qts);
diff --git a/tests/qtest/migration-test.c b/tests/qtest/migration-test.c
index 1b43df5..46f1c27 100644
--- a/tests/qtest/migration-test.c
+++ b/tests/qtest/migration-test.c
@@ -116,6 +116,7 @@
#endif
static char *tmpfs;
+static char *bootpath;
/* The boot file modifies memory area in [start_address, end_address)
* repeatedly. It outputs a 'B' at a fixed rate while it's still running.
@@ -124,14 +125,47 @@
#include "tests/migration/aarch64/a-b-kernel.h"
#include "tests/migration/s390x/a-b-bios.h"
-static void init_bootfile(const char *bootpath, void *content, size_t len)
+static void bootfile_create(char *dir)
{
+ const char *arch = qtest_get_arch();
+ unsigned char *content;
+ size_t len;
+
+ bootpath = g_strdup_printf("%s/bootsect", dir);
+ if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) {
+ /* the assembled x86 boot sector should be exactly one sector large */
+ g_assert(sizeof(x86_bootsect) == 512);
+ content = x86_bootsect;
+ len = sizeof(x86_bootsect);
+ } else if (g_str_equal(arch, "s390x")) {
+ content = s390x_elf;
+ len = sizeof(s390x_elf);
+ } else if (strcmp(arch, "ppc64") == 0) {
+ /*
+ * sane architectures can be programmed at the boot prompt
+ */
+ return;
+ } else if (strcmp(arch, "aarch64") == 0) {
+ content = aarch64_kernel;
+ len = sizeof(aarch64_kernel);
+ g_assert(sizeof(aarch64_kernel) <= ARM_TEST_MAX_KERNEL_SIZE);
+ } else {
+ g_assert_not_reached();
+ }
+
FILE *bootfile = fopen(bootpath, "wb");
g_assert_cmpint(fwrite(content, len, 1, bootfile), ==, 1);
fclose(bootfile);
}
+static void bootfile_delete(void)
+{
+ unlink(bootpath);
+ g_free(bootpath);
+ bootpath = NULL;
+}
+
/*
* Wait for some output in the serial output file,
* we get an 'A' followed by an endless string of 'B's
@@ -707,9 +741,9 @@
g_autofree gchar *cmd_source = NULL;
g_autofree gchar *cmd_target = NULL;
const gchar *ignore_stderr;
- g_autofree char *bootpath = NULL;
g_autofree char *shmem_opts = NULL;
g_autofree char *shmem_path = NULL;
+ const char *kvm_opts = NULL;
const char *arch = qtest_get_arch();
const char *memory_size;
@@ -722,17 +756,12 @@
got_src_stop = false;
got_dst_resume = false;
- bootpath = g_strdup_printf("%s/bootsect", tmpfs);
if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) {
- /* the assembled x86 boot sector should be exactly one sector large */
- assert(sizeof(x86_bootsect) == 512);
- init_bootfile(bootpath, x86_bootsect, sizeof(x86_bootsect));
memory_size = "150M";
arch_opts = g_strdup_printf("-drive file=%s,format=raw", bootpath);
start_address = X86_TEST_MEM_START;
end_address = X86_TEST_MEM_END;
} else if (g_str_equal(arch, "s390x")) {
- init_bootfile(bootpath, s390x_elf, sizeof(s390x_elf));
memory_size = "128M";
arch_opts = g_strdup_printf("-bios %s", bootpath);
start_address = S390_TEST_MEM_START;
@@ -747,14 +776,11 @@
"until'", end_address, start_address);
arch_opts = g_strdup("-nodefaults -machine vsmt=8");
} else if (strcmp(arch, "aarch64") == 0) {
- init_bootfile(bootpath, aarch64_kernel, sizeof(aarch64_kernel));
memory_size = "150M";
arch_opts = g_strdup_printf("-machine virt,gic-version=max -cpu max "
"-kernel %s", bootpath);
start_address = ARM_TEST_MEM_START;
end_address = ARM_TEST_MEM_END;
-
- g_assert(sizeof(aarch64_kernel) <= ARM_TEST_MAX_KERNEL_SIZE);
} else {
g_assert_not_reached();
}
@@ -780,9 +806,10 @@
"-object memory-backend-file,id=mem0,size=%s"
",mem-path=%s,share=on -numa node,memdev=mem0",
memory_size, shmem_path);
- } else {
- shmem_path = NULL;
- shmem_opts = g_strdup("");
+ }
+
+ if (args->use_dirty_ring) {
+ kvm_opts = ",dirty-ring-size=4096";
}
cmd_source = g_strdup_printf("-accel kvm%s -accel tcg "
@@ -790,12 +817,11 @@
"-m %s "
"-serial file:%s/src_serial "
"%s %s %s %s %s",
- args->use_dirty_ring ?
- ",dirty-ring-size=4096" : "",
+ kvm_opts ? kvm_opts : "",
memory_size, tmpfs,
arch_opts ? arch_opts : "",
arch_source ? arch_source : "",
- shmem_opts,
+ shmem_opts ? shmem_opts : "",
args->opts_source ? args->opts_source : "",
ignore_stderr);
if (!args->only_target) {
@@ -811,12 +837,11 @@
"-serial file:%s/dest_serial "
"-incoming %s "
"%s %s %s %s %s",
- args->use_dirty_ring ?
- ",dirty-ring-size=4096" : "",
+ kvm_opts ? kvm_opts : "",
memory_size, tmpfs, uri,
arch_opts ? arch_opts : "",
arch_target ? arch_target : "",
- shmem_opts,
+ shmem_opts ? shmem_opts : "",
args->opts_target ? args->opts_target : "",
ignore_stderr);
*to = qtest_init(cmd_target);
@@ -863,7 +888,6 @@
qtest_quit(to);
- cleanup("bootsect");
cleanup("migsocket");
cleanup("src_serial");
cleanup("dest_serial");
@@ -2624,15 +2648,7 @@
static QTestState *dirtylimit_start_vm(void)
{
QTestState *vm = NULL;
- g_autofree gchar *cmd = NULL;
- const char *arch = qtest_get_arch();
- g_autofree char *bootpath = NULL;
-
- assert((strcmp(arch, "x86_64") == 0));
- bootpath = g_strdup_printf("%s/bootsect", tmpfs);
- assert(sizeof(x86_bootsect) == 512);
- init_bootfile(bootpath, x86_bootsect, sizeof(x86_bootsect));
-
+ g_autofree gchar *
cmd = g_strdup_printf("-accel kvm,dirty-ring-size=4096 "
"-name dirtylimit-test,debug-threads=on "
"-m 150M -smp 1 "
@@ -2647,7 +2663,6 @@
static void dirtylimit_stop_vm(QTestState *vm)
{
qtest_quit(vm);
- cleanup("bootsect");
cleanup("vm_serial");
}
@@ -2809,6 +2824,7 @@
g_get_tmp_dir(), err->message);
}
g_assert(tmpfs);
+ bootfile_create(tmpfs);
module_call_init(MODULE_INIT_QOM);
@@ -2956,6 +2972,7 @@
g_assert_cmpint(ret, ==, 0);
+ bootfile_delete();
ret = rmdir(tmpfs);
if (ret != 0) {
g_test_message("unable to rmdir: path (%s): %s",
diff --git a/tests/qtest/test-x86-cpuid-compat.c b/tests/qtest/test-x86-cpuid-compat.c
index b39c905..6a39454 100644
--- a/tests/qtest/test-x86-cpuid-compat.c
+++ b/tests/qtest/test-x86-cpuid-compat.c
@@ -313,18 +313,10 @@
"xlevel2", 0);
}
/*
- * QEMU 1.4.0 had auto-level enabled for CPUID[7], already,
+ * QEMU 2.3.0 had auto-level enabled for CPUID[7], already,
* and the compat code that sets default level shouldn't
* disable the auto-level=7 code:
*/
- if (qtest_has_machine("pc-i440fx-1.4")) {
- add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-1.4/off",
- "-machine pc-i440fx-1.4 -cpu Nehalem",
- "level", 2);
- add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-1.5/on",
- "-machine pc-i440fx-1.4 -cpu Nehalem,smap=on",
- "level", 7);
- }
if (qtest_has_machine("pc-i440fx-2.3")) {
add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.3/off",
"-machine pc-i440fx-2.3 -cpu Penryn",
diff --git a/tests/tcg/tricore/Makefile.softmmu-target b/tests/tcg/tricore/Makefile.softmmu-target
index 2ec0bd3..258aeb4 100644
--- a/tests/tcg/tricore/Makefile.softmmu-target
+++ b/tests/tcg/tricore/Makefile.softmmu-target
@@ -9,11 +9,15 @@
TESTS += test_abs.asm.tst
TESTS += test_bmerge.asm.tst
TESTS += test_clz.asm.tst
+TESTS += test_crcn.asm.tst
TESTS += test_dextr.asm.tst
TESTS += test_dvstep.asm.tst
TESTS += test_fadd.asm.tst
TESTS += test_fmul.asm.tst
+TESTS += test_ftohp.asm.tst
TESTS += test_ftoi.asm.tst
+TESTS += test_ftou.asm.tst
+TESTS += test_hptof.asm.tst
TESTS += test_imask.asm.tst
TESTS += test_insert.asm.tst
TESTS += test_ld_bu.asm.tst
@@ -25,7 +29,7 @@
TESTS += test_boot_to_main.c.tst
TESTS += test_context_save_areas.c.tst
-QEMU_OPTS += -M tricore_testboard -cpu tc27x -nographic -kernel
+QEMU_OPTS += -M tricore_testboard -cpu tc37x -nographic -kernel
%.pS: $(ASM_TESTS_PATH)/%.S
$(CC) -E -o $@ $<
diff --git a/tests/tcg/tricore/asm/macros.h b/tests/tcg/tricore/asm/macros.h
index b5087b5..e831f73 100644
--- a/tests/tcg/tricore/asm/macros.h
+++ b/tests/tcg/tricore/asm/macros.h
@@ -12,31 +12,31 @@
#define TESTDEV_ADDR 0xf0000000
/* Register definitions */
#define DREG_RS1 %d0
-#define DREG_RS2 %d1
-#define DREG_RS3 %d2
-#define DREG_CALC_RESULT %d3
-#define DREG_CALC_PSW %d4
-#define DREG_CORRECT_PSW %d5
-#define DREG_TEMP_LI %d10
-#define DREG_TEMP %d11
-#define DREG_TEST_NUM %d14
-#define DREG_CORRECT_RESULT %d15
-#define DREG_CORRECT_RESULT_2 %d13
+#define DREG_RS2 %d2
+#define DREG_RS3 %d4
+#define DREG_CALC_RESULT %d5
+#define DREG_CALC_PSW %d6
+#define DREG_CORRECT_PSW %d7
+#define DREG_TEMP_LI %d13
+#define DREG_TEMP %d14
+#define DREG_TEST_NUM %d8
+#define DREG_CORRECT_RESULT %d9
+#define DREG_CORRECT_RESULT_2 %d10
#define AREG_ADDR %a0
#define AREG_CORRECT_RESULT %a3
#define DREG_DEV_ADDR %a15
-#define EREG_RS1 %e6
-#define EREG_RS1_LO %d6
-#define EREG_RS1_HI %d7
-#define EREG_RS2 %e8
-#define EREG_RS2_LO %d8
-#define EREG_RS2_HI %d9
-#define EREG_CALC_RESULT %e8
-#define EREG_CALC_RESULT_HI %d9
-#define EREG_CALC_RESULT_LO %d8
+#define EREG_RS1 %e0
+#define EREG_RS1_LO %d0
+#define EREG_RS1_HI %d1
+#define EREG_RS2 %e2
+#define EREG_RS2_LO %d2
+#define EREG_RS2_HI %d3
+#define EREG_CALC_RESULT %e6
+#define EREG_CALC_RESULT_LO %d6
+#define EREG_CALC_RESULT_HI %d7
#define EREG_CORRECT_RESULT_LO %d0
#define EREG_CORRECT_RESULT_HI %d1
@@ -46,7 +46,8 @@
code; \
LI(DREG_CORRECT_RESULT, correct) \
mov DREG_TEST_NUM, num; \
- jne testreg, DREG_CORRECT_RESULT, fail \
+ jne testreg, DREG_CORRECT_RESULT, fail; \
+ mov testreg, 0
#define TEST_CASE_E(num, correct_lo, correct_hi, code...) \
test_ ## num: \
@@ -161,6 +162,30 @@
insn DREG_CALC_RESULT, DREG_RS1, imm1, DREG_RS2, imm2; \
)
+#define TEST_D_DDII(insn, num, result, rs1, rs2, imm1, imm2) \
+ TEST_CASE(num, DREG_CALC_RESULT, result, \
+ LI(DREG_RS1, rs1); \
+ LI(DREG_RS2, rs2); \
+ rstv; \
+ insn DREG_CALC_RESULT, DREG_RS1, DREG_RS2, imm1, imm2; \
+ )
+
+#define TEST_D_DIE(insn, num, result, rs1, imm1, rs2_lo, rs2_hi)\
+ TEST_CASE(num, DREG_CALC_RESULT, result, \
+ LI(DREG_RS1, rs1); \
+ LI(EREG_RS2_LO, rs2_lo); \
+ LI(EREG_RS2_HI, rs2_hi); \
+ rstv; \
+ insn DREG_CALC_RESULT, DREG_RS1, imm1, EREG_RS2; \
+ )
+
+#define TEST_D_DIII(insn, num, result, rs1, imm1, imm2, imm3)\
+ TEST_CASE(num, DREG_CALC_RESULT, result, \
+ LI(DREG_RS1, rs1); \
+ rstv; \
+ insn DREG_CALC_RESULT, DREG_RS1, imm1, imm2, imm3; \
+ )
+
#define TEST_E_ED(insn, num, res_hi, res_lo, rs1_hi, rs1_lo, rs2) \
TEST_CASE_E(num, res_lo, res_hi, \
LI(EREG_RS1_LO, rs1_lo); \
diff --git a/tests/tcg/tricore/asm/test_crcn.S b/tests/tcg/tricore/asm/test_crcn.S
new file mode 100644
index 0000000..51a2272
--- /dev/null
+++ b/tests/tcg/tricore/asm/test_crcn.S
@@ -0,0 +1,9 @@
+#include "macros.h"
+.text
+.global _start
+_start:
+# insn num result rs1 rs2 rs3
+# | | | | | |
+ TEST_D_DDD(crcn, 1, 0x00002bed, 0x0, 0xa10ddeed, 0x0)
+
+ TEST_PASSFAIL
diff --git a/tests/tcg/tricore/asm/test_ftohp.S b/tests/tcg/tricore/asm/test_ftohp.S
new file mode 100644
index 0000000..9e23141
--- /dev/null
+++ b/tests/tcg/tricore/asm/test_ftohp.S
@@ -0,0 +1,14 @@
+#include "macros.h"
+.text
+.global _start
+_start:
+ TEST_D_D(ftohp, 1, 0xffff, 0xffffffff)
+ TEST_D_D(ftohp, 2, 0xfc00, 0xff800000)
+ TEST_D_D(ftohp, 3, 0x7c00, 0x7f800000)
+ TEST_D_D(ftohp, 4, 0x0, 0x0)
+ TEST_D_D(ftohp, 5, 0x5, 0x34a43580)
+
+ #TEST_D_D_PSW(ftohp, 6, 0x400, 0x8c000b80, 0x387fee74)
+
+ TEST_PASSFAIL
+
diff --git a/tests/tcg/tricore/asm/test_ftou.S b/tests/tcg/tricore/asm/test_ftou.S
new file mode 100644
index 0000000..10f106a
--- /dev/null
+++ b/tests/tcg/tricore/asm/test_ftou.S
@@ -0,0 +1,12 @@
+#include "macros.h"
+.text
+.global _start
+_start:
+ TEST_D_D(ftou, 1, 0x00000000, 0x1733f6c2)
+ TEST_D_D(ftou, 2, 0x00000000, 0x2c9d9cdc)
+ TEST_D_D(ftou, 3, 0xffffffff, 0x56eb7395)
+ TEST_D_D(ftou, 4, 0x79900800, 0x4ef32010)
+ TEST_D_D(ftou, 5, 0x0353f510, 0x4c54fd44)
+
+ TEST_PASSFAIL
+
diff --git a/tests/tcg/tricore/asm/test_hptof.S b/tests/tcg/tricore/asm/test_hptof.S
new file mode 100644
index 0000000..8adc5e5
--- /dev/null
+++ b/tests/tcg/tricore/asm/test_hptof.S
@@ -0,0 +1,12 @@
+#include "macros.h"
+.text
+.global _start
+_start:
+ TEST_D_D(hptof, 1, 0xba190000, 0xcc0e90c8)
+ TEST_D_D(hptof, 2, 0x3eaea000, 0x8be23575)
+ TEST_D_D(hptof, 3, 0xc33b8000, 0xcc48d9dc)
+ TEST_D_D(hptof, 4, 0x43e2a000, 0xaef95f15)
+ TEST_D_D(hptof, 5, 0x3d55e000, 0x04932aaf)
+
+ TEST_PASSFAIL
+
diff --git a/tests/tcg/tricore/asm/test_insert.S b/tests/tcg/tricore/asm/test_insert.S
index d5fd223..223d7ce 100644
--- a/tests/tcg/tricore/asm/test_insert.S
+++ b/tests/tcg/tricore/asm/test_insert.S
@@ -6,4 +6,18 @@
# | | | | | | |
TEST_D_DIDI(insert, 1, 0x7fffffff, 0xffffffff, 0xa, 0x10, 0x8)
+# insn num result rs1 imm1 imm2 imm3
+# | | | | | | |
+ TEST_D_DIII(insert, 2, 0xd38fe370, 0xd38fe370, 0x4, 0x4 , 0x0)
+ TEST_D_DIII(insert, 3, 0xd38fe374, 0xd38fe370, 0x4, 0x0 , 0x4)
+
+# insn num result rs1 rs2 pos width
+# | | | | | | |
+ TEST_D_DDII(insert, 4, 0x03c1e53c, 0x03c1e53c, 0x45821385, 0x7 ,0x0)
+
+# insn num result rs1 imm1 rs2_h rs2_l
+# | | | | | | |
+ TEST_D_DIE(insert, 5, 0xe30c308d, 0xe30c308d ,0x3 , 0x00000000 ,0x00000000)
+ TEST_D_DIE(insert, 6, 0x669b0120, 0x669b2820 ,0x2 , 0x5530a1c7 ,0x3a2b0f67)
+
TEST_PASSFAIL
diff --git a/tests/unit/test-throttle.c b/tests/unit/test-throttle.c
index cb587e3..ac35d65 100644
--- a/tests/unit/test-throttle.c
+++ b/tests/unit/test-throttle.c
@@ -625,7 +625,7 @@
throttle_config_init(&cfg);
for (i = 0; i < 3; i++) {
- BucketType index = to_test[is_ops][i];
+ index = to_test[is_ops][i];
cfg.buckets[index].avg = avg;
}
diff --git a/ui/cocoa.m b/ui/cocoa.m
index df6d13b..145f42d 100644
--- a/ui/cocoa.m
+++ b/ui/cocoa.m
@@ -2001,7 +2001,7 @@
COCOA_DEBUG("qemu_cocoa: cocoa_refresh\n");
graphic_hw_update(NULL);
- if (qemu_input_is_absolute()) {
+ if (qemu_input_is_absolute(dcl->con)) {
dispatch_async(dispatch_get_main_queue(), ^{
if (![cocoaView isAbsoluteEnabled]) {
if ([cocoaView isMouseGrabbed]) {
diff --git a/ui/console.c b/ui/console.c
index 4a4f19e..8ee66d1 100644
--- a/ui/console.c
+++ b/ui/console.c
@@ -1434,25 +1434,23 @@
return con->gl_block;
}
-bool qemu_console_is_multihead(DeviceState *dev)
+static bool qemu_graphic_console_is_multihead(QemuGraphicConsole *c)
{
QemuConsole *con;
- Object *obj;
- uint32_t f = 0xffffffff;
- uint32_t h;
QTAILQ_FOREACH(con, &consoles, next) {
- obj = object_property_get_link(OBJECT(con),
- "device", &error_abort);
- if (DEVICE(obj) != dev) {
+ QemuGraphicConsole *candidate;
+
+ if (!QEMU_IS_GRAPHIC_CONSOLE(con)) {
continue;
}
- h = object_property_get_uint(OBJECT(con),
- "head", &error_abort);
- if (f == 0xffffffff) {
- f = h;
- } else if (h != f) {
+ candidate = QEMU_GRAPHIC_CONSOLE(con);
+ if (candidate->device != c->device) {
+ continue;
+ }
+
+ if (candidate->head != c->head) {
return true;
}
}
@@ -1468,7 +1466,7 @@
bool multihead;
dev = DEVICE(c->device);
- multihead = qemu_console_is_multihead(dev);
+ multihead = qemu_graphic_console_is_multihead(c);
if (multihead) {
return g_strdup_printf("%s.%d", dev->id ?
dev->id :
diff --git a/ui/dbus-console.c b/ui/dbus-console.c
index 36f7349..49da9cc 100644
--- a/ui/dbus-console.c
+++ b/ui/dbus-console.c
@@ -386,7 +386,7 @@
{
trace_dbus_mouse_rel_motion(dx, dy);
- if (qemu_input_is_absolute()) {
+ if (qemu_input_is_absolute(ddc->dcl.con)) {
g_dbus_method_invocation_return_error(
invocation, DBUS_DISPLAY_ERROR,
DBUS_DISPLAY_ERROR_INVALID,
@@ -453,7 +453,7 @@
trace_dbus_mouse_set_pos(x, y);
- if (!qemu_input_is_absolute()) {
+ if (!qemu_input_is_absolute(ddc->dcl.con)) {
g_dbus_method_invocation_return_error(
invocation, DBUS_DISPLAY_ERROR,
DBUS_DISPLAY_ERROR_INVALID,
@@ -514,7 +514,7 @@
dbus_mouse_update_is_absolute(DBusDisplayConsole *ddc)
{
g_object_set(ddc->iface_mouse,
- "is-absolute", qemu_input_is_absolute(),
+ "is-absolute", qemu_input_is_absolute(ddc->dcl.con),
NULL);
}
diff --git a/ui/dbus.c b/ui/dbus.c
index 32f1bbe..866467a 100644
--- a/ui/dbus.c
+++ b/ui/dbus.c
@@ -220,9 +220,8 @@
}
if (dd->audiodev && *dd->audiodev) {
- AudioState *audio_state = audio_state_by_name(dd->audiodev);
+ AudioState *audio_state = audio_state_by_name(dd->audiodev, errp);
if (!audio_state) {
- error_setg(errp, "Audiodev '%s' not found", dd->audiodev);
return;
}
if (!g_str_equal(audio_state->drv->name, "dbus")) {
diff --git a/ui/gtk.c b/ui/gtk.c
index e09f97a..935de12 100644
--- a/ui/gtk.c
+++ b/ui/gtk.c
@@ -204,7 +204,7 @@
}
window = gtk_widget_get_window(GTK_WIDGET(vc->gfx.drawing_area));
- if (s->full_screen || qemu_input_is_absolute() || s->ptr_owner == vc) {
+ if (s->full_screen || qemu_input_is_absolute(vc->gfx.dcl.con) || s->ptr_owner == vc) {
gdk_window_set_cursor(window, s->null_cursor);
} else {
gdk_window_set_cursor(window, NULL);
@@ -453,7 +453,7 @@
gint x_root, y_root;
if (!gtk_widget_get_realized(vc->gfx.drawing_area) ||
- qemu_input_is_absolute()) {
+ qemu_input_is_absolute(dcl->con)) {
return;
}
@@ -689,7 +689,7 @@
s = container_of(notify, GtkDisplayState, mouse_mode_notifier);
/* release the grab at switching to absolute mode */
- if (qemu_input_is_absolute() && s->ptr_owner) {
+ if (s->ptr_owner && qemu_input_is_absolute(s->ptr_owner->gfx.dcl.con)) {
if (!s->ptr_owner->window) {
gtk_check_menu_item_set_active(GTK_CHECK_MENU_ITEM(s->grab_item),
FALSE);
@@ -726,6 +726,10 @@
{
QemuUIInfo info;
+ if (!dpy_ui_info_supported(vc->gfx.dcl.con)) {
+ return;
+ }
+
info = *dpy_get_ui_info(vc->gfx.dcl.con);
info.refresh_rate = refresh_rate;
dpy_set_ui_info(vc->gfx.dcl.con, &info, true);
@@ -735,6 +739,10 @@
{
QemuUIInfo info;
+ if (!dpy_ui_info_supported(vc->gfx.dcl.con)) {
+ return;
+ }
+
info = *dpy_get_ui_info(vc->gfx.dcl.con);
info.width = width;
info.height = height;
@@ -903,7 +911,7 @@
x = (motion->x - mx) / vc->gfx.scale_x * ws;
y = (motion->y - my) / vc->gfx.scale_y * ws;
- if (qemu_input_is_absolute()) {
+ if (qemu_input_is_absolute(vc->gfx.dcl.con)) {
if (x < 0 || y < 0 ||
x >= surface_width(vc->gfx.ds) ||
y >= surface_height(vc->gfx.ds)) {
@@ -923,15 +931,15 @@
s->last_y = y;
s->last_set = TRUE;
- if (!qemu_input_is_absolute() && s->ptr_owner == vc) {
+ if (!qemu_input_is_absolute(vc->gfx.dcl.con) && s->ptr_owner == vc) {
GdkScreen *screen = gtk_widget_get_screen(vc->gfx.drawing_area);
GdkDisplay *dpy = gtk_widget_get_display(widget);
GdkWindow *win = gtk_widget_get_window(widget);
GdkMonitor *monitor = gdk_display_get_monitor_at_window(dpy, win);
GdkRectangle geometry;
- int x = (int)motion->x_root;
- int y = (int)motion->y_root;
+ int xr = (int)motion->x_root;
+ int yr = (int)motion->y_root;
gdk_monitor_get_geometry(monitor, &geometry);
@@ -942,13 +950,13 @@
* may still be only half way across the screen. Without
* this warp, the server pointer would thus appear to hit
* an invisible wall */
- if (x <= geometry.x || x - geometry.x >= geometry.width - 1 ||
- y <= geometry.y || y - geometry.y >= geometry.height - 1) {
+ if (xr <= geometry.x || xr - geometry.x >= geometry.width - 1 ||
+ yr <= geometry.y || yr - geometry.y >= geometry.height - 1) {
GdkDevice *dev = gdk_event_get_device((GdkEvent *)motion);
- x = geometry.x + geometry.width / 2;
- y = geometry.y + geometry.height / 2;
+ xr = geometry.x + geometry.width / 2;
+ yr = geometry.y + geometry.height / 2;
- gdk_device_warp(dev, screen, x, y);
+ gdk_device_warp(dev, screen, xr, yr);
s->last_set = FALSE;
return FALSE;
}
@@ -965,7 +973,7 @@
/* implicitly grab the input at the first click in the relative mode */
if (button->button == 1 && button->type == GDK_BUTTON_PRESS &&
- !qemu_input_is_absolute() && s->ptr_owner != vc) {
+ !qemu_input_is_absolute(vc->gfx.dcl.con) && s->ptr_owner != vc) {
if (!vc->window) {
gtk_check_menu_item_set_active(GTK_CHECK_MENU_ITEM(s->grab_item),
TRUE);
diff --git a/ui/input.c b/ui/input.c
index 1aad64b..cbe8573 100644
--- a/ui/input.c
+++ b/ui/input.c
@@ -56,7 +56,7 @@
s->id = id++;
QTAILQ_INSERT_TAIL(&handlers, s, node);
- qemu_input_check_mode_change();
+ notifier_list_notify(&mouse_mode_notifiers, NULL);
return s;
}
@@ -64,21 +64,21 @@
{
QTAILQ_REMOVE(&handlers, s, node);
QTAILQ_INSERT_HEAD(&handlers, s, node);
- qemu_input_check_mode_change();
+ notifier_list_notify(&mouse_mode_notifiers, NULL);
}
void qemu_input_handler_deactivate(QemuInputHandlerState *s)
{
QTAILQ_REMOVE(&handlers, s, node);
QTAILQ_INSERT_TAIL(&handlers, s, node);
- qemu_input_check_mode_change();
+ notifier_list_notify(&mouse_mode_notifiers, NULL);
}
void qemu_input_handler_unregister(QemuInputHandlerState *s)
{
QTAILQ_REMOVE(&handlers, s, node);
g_free(s);
- qemu_input_check_mode_change();
+ notifier_list_notify(&mouse_mode_notifiers, NULL);
}
void qemu_input_handler_bind(QemuInputHandlerState *s,
@@ -494,12 +494,12 @@
}
}
-bool qemu_input_is_absolute(void)
+bool qemu_input_is_absolute(QemuConsole *con)
{
QemuInputHandlerState *s;
s = qemu_input_find_handler(INPUT_EVENT_MASK_REL | INPUT_EVENT_MASK_ABS,
- NULL);
+ con);
return (s != NULL) && (s->handler->mask & INPUT_EVENT_MASK_ABS);
}
@@ -583,21 +583,6 @@
qemu_input_event_send(src, &evt);
}
-void qemu_input_check_mode_change(void)
-{
- static int current_is_absolute;
- int is_absolute;
-
- is_absolute = qemu_input_is_absolute();
-
- if (is_absolute != current_is_absolute) {
- trace_input_mouse_mode(is_absolute);
- notifier_list_notify(&mouse_mode_notifiers, NULL);
- }
-
- current_is_absolute = is_absolute;
-}
-
void qemu_add_mouse_mode_change_notifier(Notifier *notify)
{
notifier_list_add(&mouse_mode_notifiers, notify);
@@ -657,6 +642,6 @@
}
qemu_input_handler_activate(s);
- qemu_input_check_mode_change();
+ notifier_list_notify(&mouse_mode_notifiers, NULL);
return true;
}
diff --git a/ui/qemu-pixman.c b/ui/qemu-pixman.c
index be00a96..b43ec38 100644
--- a/ui/qemu-pixman.c
+++ b/ui/qemu-pixman.c
@@ -96,7 +96,9 @@
} drm_format_pixman_map[] = {
{ DRM_FORMAT_RGB888, PIXMAN_LE_r8g8b8 },
{ DRM_FORMAT_ARGB8888, PIXMAN_LE_a8r8g8b8 },
- { DRM_FORMAT_XRGB8888, PIXMAN_LE_x8r8g8b8 }
+ { DRM_FORMAT_XRGB8888, PIXMAN_LE_x8r8g8b8 },
+ { DRM_FORMAT_XBGR8888, PIXMAN_LE_x8b8g8r8 },
+ { DRM_FORMAT_ABGR8888, PIXMAN_LE_a8b8g8r8 },
};
pixman_format_code_t qemu_drm_format_to_pixman(uint32_t drm_format)
diff --git a/ui/sdl2.c b/ui/sdl2.c
index 178cc05..fbfdb64 100644
--- a/ui/sdl2.c
+++ b/ui/sdl2.c
@@ -203,7 +203,7 @@
SDL_ShowCursor(SDL_DISABLE);
SDL_SetCursor(sdl_cursor_hidden);
- if (!qemu_input_is_absolute()) {
+ if (!qemu_input_is_absolute(scon->dcl.con)) {
SDL_SetRelativeMouseMode(SDL_TRUE);
}
}
@@ -214,12 +214,12 @@
return;
}
- if (!qemu_input_is_absolute()) {
+ if (!qemu_input_is_absolute(scon->dcl.con)) {
SDL_SetRelativeMouseMode(SDL_FALSE);
}
if (guest_cursor &&
- (gui_grab || qemu_input_is_absolute() || absolute_enabled)) {
+ (gui_grab || qemu_input_is_absolute(scon->dcl.con) || absolute_enabled)) {
SDL_SetCursor(guest_sprite);
} else {
SDL_SetCursor(sdl_cursor_normal);
@@ -245,7 +245,7 @@
}
if (guest_cursor) {
SDL_SetCursor(guest_sprite);
- if (!qemu_input_is_absolute() && !absolute_enabled) {
+ if (!qemu_input_is_absolute(scon->dcl.con) && !absolute_enabled) {
SDL_WarpMouseInWindow(scon->real_window, guest_x, guest_y);
}
} else {
@@ -280,7 +280,7 @@
static void sdl_mouse_mode_change(Notifier *notify, void *data)
{
- if (qemu_input_is_absolute()) {
+ if (qemu_input_is_absolute(sdl2_console[0].dcl.con)) {
if (!absolute_enabled) {
absolute_enabled = 1;
SDL_SetRelativeMouseMode(SDL_FALSE);
@@ -311,7 +311,7 @@
prev_state = state;
}
- if (qemu_input_is_absolute()) {
+ if (qemu_input_is_absolute(scon->dcl.con)) {
qemu_input_queue_abs(scon->dcl.con, INPUT_AXIS_X,
x, 0, surface_width(scon->surface));
qemu_input_queue_abs(scon->dcl.con, INPUT_AXIS_Y,
@@ -497,7 +497,7 @@
return;
}
- if (qemu_input_is_absolute() || absolute_enabled) {
+ if (qemu_input_is_absolute(scon->dcl.con) || absolute_enabled) {
int scr_w, scr_h;
SDL_GetWindowSize(scon->real_window, &scr_w, &scr_h);
max_x = scr_w - 1;
@@ -513,7 +513,7 @@
sdl_grab_start(scon);
}
}
- if (gui_grab || qemu_input_is_absolute() || absolute_enabled) {
+ if (gui_grab || qemu_input_is_absolute(scon->dcl.con) || absolute_enabled) {
sdl_send_mouse_event(scon, ev->motion.xrel, ev->motion.yrel,
ev->motion.x, ev->motion.y, ev->motion.state);
}
@@ -530,7 +530,7 @@
}
bev = &ev->button;
- if (!gui_grab && !qemu_input_is_absolute()) {
+ if (!gui_grab && !qemu_input_is_absolute(scon->dcl.con)) {
if (ev->type == SDL_MOUSEBUTTONUP && bev->button == SDL_BUTTON_LEFT) {
/* start grabbing all events */
sdl_grab_start(scon);
@@ -603,7 +603,7 @@
}
/* fall through */
case SDL_WINDOWEVENT_ENTER:
- if (!gui_grab && (qemu_input_is_absolute() || absolute_enabled)) {
+ if (!gui_grab && (qemu_input_is_absolute(scon->dcl.con) || absolute_enabled)) {
absolute_mouse_grab(scon);
}
/* If a new console window opened using a hotkey receives the
@@ -733,9 +733,9 @@
if (!guest_cursor) {
sdl_show_cursor(scon);
}
- if (gui_grab || qemu_input_is_absolute() || absolute_enabled) {
+ if (gui_grab || qemu_input_is_absolute(scon->dcl.con) || absolute_enabled) {
SDL_SetCursor(guest_sprite);
- if (!qemu_input_is_absolute() && !absolute_enabled) {
+ if (!qemu_input_is_absolute(scon->dcl.con) && !absolute_enabled) {
SDL_WarpMouseInWindow(scon->real_window, x, y);
}
}
@@ -773,7 +773,7 @@
return;
}
if (guest_cursor &&
- (gui_grab || qemu_input_is_absolute() || absolute_enabled)) {
+ (gui_grab || qemu_input_is_absolute(dcl->con) || absolute_enabled)) {
SDL_SetCursor(guest_sprite);
}
}
diff --git a/ui/spice-display.c b/ui/spice-display.c
index 5cc47bd..6eb98a5 100644
--- a/ui/spice-display.c
+++ b/ui/spice-display.c
@@ -1081,15 +1081,16 @@
}
if (render_cursor) {
- int x, y;
+ int ptr_x, ptr_y;
+
qemu_mutex_lock(&ssd->lock);
- x = ssd->ptr_x;
- y = ssd->ptr_y;
+ ptr_x = ssd->ptr_x;
+ ptr_y = ssd->ptr_y;
qemu_mutex_unlock(&ssd->lock);
egl_texture_blit(ssd->gls, &ssd->blit_fb, &ssd->guest_fb,
!y_0_top);
egl_texture_blend(ssd->gls, &ssd->blit_fb, &ssd->cursor_fb,
- !y_0_top, x, y, 1.0, 1.0);
+ !y_0_top, ptr_x, ptr_y, 1.0, 1.0);
glFlush();
}
diff --git a/ui/spice-input.c b/ui/spice-input.c
index bbd5025..a5c5d78 100644
--- a/ui/spice-input.c
+++ b/ui/spice-input.c
@@ -224,7 +224,7 @@
static void mouse_mode_notifier(Notifier *notifier, void *data)
{
QemuSpicePointer *pointer = container_of(notifier, QemuSpicePointer, mouse_mode);
- bool is_absolute = qemu_input_is_absolute();
+ bool is_absolute = qemu_input_is_absolute(NULL);
if (pointer->absolute == is_absolute) {
return;
diff --git a/ui/trace-events b/ui/trace-events
index 76b19a2..16c35c9 100644
--- a/ui/trace-events
+++ b/ui/trace-events
@@ -92,7 +92,6 @@
input_event_abs(int conidx, const char *axis, int value) "con %d, axis %s, value 0x%x"
input_event_mtt(int conidx, const char *axis, int value) "con %d, axis %s, value 0x%x"
input_event_sync(void) ""
-input_mouse_mode(int absolute) "absolute %d"
# sdl2-input.c
sdl2_process_key(int sdl_scancode, int qcode, const char *action) "translated SDL scancode %d to QKeyCode %d (%s)"
diff --git a/ui/vnc-enc-zrle.c.inc b/ui/vnc-enc-zrle.c.inc
index a8ca37d..2ef7501 100644
--- a/ui/vnc-enc-zrle.c.inc
+++ b/ui/vnc-enc-zrle.c.inc
@@ -153,11 +153,12 @@
}
if (use_rle) {
- ZRLE_PIXEL *ptr = data;
- ZRLE_PIXEL *end = ptr + w * h;
ZRLE_PIXEL *run_start;
ZRLE_PIXEL pix;
+ ptr = data;
+ end = ptr + w * h;
+
while (ptr < end) {
int len;
int index = 0;
@@ -198,7 +199,7 @@
}
} else if (use_palette) { /* no RLE */
int bppp;
- ZRLE_PIXEL *ptr = data;
+ ptr = data;
/* packed pixels */
@@ -241,8 +242,6 @@
#endif
{
#ifdef ZRLE_COMPACT_PIXEL
- ZRLE_PIXEL *ptr;
-
for (ptr = data; ptr < data + w * h; ptr++) {
ZRLE_WRITE_PIXEL(vs, *ptr);
}
diff --git a/ui/vnc-palette.c b/ui/vnc-palette.c
index dc7c0ba..4e88c41 100644
--- a/ui/vnc-palette.c
+++ b/ui/vnc-palette.c
@@ -86,8 +86,6 @@
return 0;
}
if (!entry) {
- VncPaletteEntry *entry;
-
entry = &palette->pool[palette->size];
entry->color = color;
entry->idx = idx;
diff --git a/ui/vnc.c b/ui/vnc.c
index 6fd8699..6056028 100644
--- a/ui/vnc.c
+++ b/ui/vnc.c
@@ -1584,15 +1584,15 @@
*/
static int vnc_client_read(VncState *vs)
{
- size_t ret;
+ size_t sz;
#ifdef CONFIG_VNC_SASL
if (vs->sasl.conn && vs->sasl.runSSF)
- ret = vnc_client_read_sasl(vs);
+ sz = vnc_client_read_sasl(vs);
else
#endif /* CONFIG_VNC_SASL */
- ret = vnc_client_read_plain(vs);
- if (!ret) {
+ sz = vnc_client_read_plain(vs);
+ if (!sz) {
if (vs->disconnecting) {
vnc_disconnect_finish(vs);
return -1;
@@ -1771,7 +1771,7 @@
static void check_pointer_type_change(Notifier *notifier, void *data)
{
VncState *vs = container_of(notifier, VncState, mouse_mode_notifier);
- int absolute = qemu_input_is_absolute();
+ int absolute = qemu_input_is_absolute(vs->vd->dcl.con);
if (vnc_has_feature(vs, VNC_FEATURE_POINTER_TYPE_CHANGE) && vs->absolute != absolute) {
vnc_lock_output(vs);
@@ -2195,7 +2195,10 @@
send_ext_key_event_ack(vs);
break;
case VNC_ENCODING_AUDIO:
- send_ext_audio_ack(vs);
+ if (vs->vd->audio_state) {
+ vs->features |= VNC_FEATURE_AUDIO_MASK;
+ send_ext_audio_ack(vs);
+ }
break;
case VNC_ENCODING_WMVi:
vs->features |= VNC_FEATURE_WMVI_MASK;
@@ -2205,7 +2208,7 @@
break;
case VNC_ENCODING_XVP:
if (vs->vd->power_control) {
- vs->features |= VNC_FEATURE_XVP;
+ vs->features |= VNC_FEATURE_XVP_MASK;
send_xvp_message(vs, VNC_XVP_CODE_INIT);
}
break;
@@ -2454,7 +2457,7 @@
vnc_client_cut_text(vs, read_u32(data, 4), data + 8);
break;
case VNC_MSG_CLIENT_XVP:
- if (!(vs->features & VNC_FEATURE_XVP)) {
+ if (!vnc_has_feature(vs, VNC_FEATURE_XVP)) {
error_report("vnc: xvp client message while disabled");
vnc_client_error(vs);
break;
@@ -2502,6 +2505,12 @@
read_u32(data, 4), read_u32(data, 8));
break;
case VNC_MSG_CLIENT_QEMU_AUDIO:
+ if (!vnc_has_feature(vs, VNC_FEATURE_AUDIO)) {
+ error_report("Audio message %d with audio disabled", read_u8(data, 2));
+ vnc_client_error(vs);
+ break;
+ }
+
if (len == 2)
return 4;
@@ -2551,7 +2560,7 @@
vs, vs->ioc, vs->as.fmt, vs->as.nchannels, vs->as.freq);
break;
default:
- VNC_DEBUG("Invalid audio message %d\n", read_u8(data, 4));
+ VNC_DEBUG("Invalid audio message %d\n", read_u8(data, 2));
vnc_client_error(vs);
break;
}
@@ -3118,8 +3127,8 @@
cmp_bytes = MIN(VNC_DIRTY_PIXELS_PER_BIT * VNC_SERVER_FB_BYTES,
server_stride);
if (vd->guest.format != VNC_SERVER_FB_FORMAT) {
- int width = pixman_image_get_width(vd->server);
- tmpbuf = qemu_pixman_linebuf_create(VNC_SERVER_FB_FORMAT, width);
+ int w = pixman_image_get_width(vd->server);
+ tmpbuf = qemu_pixman_linebuf_create(VNC_SERVER_FB_FORMAT, w);
} else {
int guest_bpp =
PIXMAN_FORMAT_BPP(pixman_image_get_format(vd->guest.fb));
@@ -4172,9 +4181,8 @@
audiodev = qemu_opt_get(opts, "audiodev");
if (audiodev) {
- vd->audio_state = audio_state_by_name(audiodev);
+ vd->audio_state = audio_state_by_name(audiodev, errp);
if (!vd->audio_state) {
- error_setg(errp, "Audiodev '%s' not found", audiodev);
goto fail;
}
}
diff --git a/ui/vnc.h b/ui/vnc.h
index 757fa83..96d19dc 100644
--- a/ui/vnc.h
+++ b/ui/vnc.h
@@ -464,6 +464,7 @@
VNC_FEATURE_LED_STATE,
VNC_FEATURE_XVP,
VNC_FEATURE_CLIPBOARD_EXT,
+ VNC_FEATURE_AUDIO,
};
#define VNC_FEATURE_RESIZE_MASK (1 << VNC_FEATURE_RESIZE)
@@ -481,6 +482,7 @@
#define VNC_FEATURE_LED_STATE_MASK (1 << VNC_FEATURE_LED_STATE)
#define VNC_FEATURE_XVP_MASK (1 << VNC_FEATURE_XVP)
#define VNC_FEATURE_CLIPBOARD_EXT_MASK (1 << VNC_FEATURE_CLIPBOARD_EXT)
+#define VNC_FEATURE_AUDIO_MASK (1 << VNC_FEATURE_AUDIO)
/* Client -> Server message IDs */
diff --git a/util/coroutine-sigaltstack.c b/util/coroutine-sigaltstack.c
index e2690c5..037d641 100644
--- a/util/coroutine-sigaltstack.c
+++ b/util/coroutine-sigaltstack.c
@@ -22,9 +22,9 @@
*/
/* XXX Is there a nicer way to disable glibc's stack check for longjmp? */
-#ifdef _FORTIFY_SOURCE
#undef _FORTIFY_SOURCE
-#endif
+#define _FORTIFY_SOURCE 0
+
#include "qemu/osdep.h"
#include <pthread.h>
#include "qemu/coroutine_int.h"
diff --git a/util/coroutine-ucontext.c b/util/coroutine-ucontext.c
index ddc98fb..7b304c7 100644
--- a/util/coroutine-ucontext.c
+++ b/util/coroutine-ucontext.c
@@ -19,9 +19,9 @@
*/
/* XXX Is there a nicer way to disable glibc's stack check for longjmp? */
-#ifdef _FORTIFY_SOURCE
#undef _FORTIFY_SOURCE
-#endif
+#define _FORTIFY_SOURCE 0
+
#include "qemu/osdep.h"
#include <ucontext.h>
#include "qemu/coroutine_int.h"
diff --git a/util/oslib-win32.c b/util/oslib-win32.c
index 19a0ea7..55b0189 100644
--- a/util/oslib-win32.c
+++ b/util/oslib-win32.c
@@ -479,7 +479,7 @@
return ret;
}
-EXCEPTION_DISPOSITION
+QEMU_USED EXCEPTION_DISPOSITION
win32_close_exception_handler(struct _EXCEPTION_RECORD *exception_record,
void *registration, struct _CONTEXT *context,
void *dispatcher)
diff --git a/util/vhost-user-server.c b/util/vhost-user-server.c
index b4b6bf3..5ccc6d2 100644
--- a/util/vhost-user-server.c
+++ b/util/vhost-user-server.c
@@ -278,7 +278,7 @@
VuFdWatch *vu_fd_watch = find_vu_fd_watch(server, fd);
if (!vu_fd_watch) {
- VuFdWatch *vu_fd_watch = g_new0(VuFdWatch, 1);
+ vu_fd_watch = g_new0(VuFdWatch, 1);
QTAILQ_INSERT_TAIL(&server->vu_fd_watches, vu_fd_watch, next);