Merge tag 'pull-target-arm-20240111' of https://git.linaro.org/people/pmaydell/qemu-arm into staging
target-arm queue:
* Emulate FEAT_NV, FEAT_NV2
* add cache controller for Freescale i.MX6
* Add minimal support for the B-L475E-IOT01A board
* Allow SoC models to configure M-profile CPUs with correct number
of NVIC priority bits
* Add missing QOM parent for v7-M SoCs
* Set CTR_EL0.{IDC,DIC} for the 'max' CPU
* hw/intc/arm_gicv3_cpuif: handle LPIs in in the list registers
# -----BEGIN PGP SIGNATURE-----
#
# iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmWfypMZHHBldGVyLm1h
# eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3sleD/4tQOMteba5BNMDns6R96E4
# kj5q0Iy9XyzQ486Q4cIZXI5N3BddCp2ks8WeS2W3w4IT/lms0U6UwXV4E98I4I/b
# KSfOoUd/cp8IvdvzfpWbmQcPMoauHZdCUN33pYYXOjfi1RkpzgNU5Qgh09Nl/xYU
# V3oaEvWhLtepT/fwJLYxoqVHDaEmyW+6zriF0+eGjZvkhgPyhllla9eti7AyHTfH
# T3A4Fyx/wudRE3NP6xsLfxldriJTxQeba+TqLSh3IXn/PMtK13/ARsY/hl72Q4ML
# Fgad8Zho4eXbuOQ9oiqb7gp4K3IKd9/8FbCzECoIAq7AnLAD4KwpLQR8GULRvYW3
# 0eQq2txTXQWNcmWpIyDRRME+qeNVwWSk+QJDs5WuhVqlVQ4hpqtgFf1EX+7ORdS1
# WG0fb8etvr8oCSkzCmP/o6xYGJ0EyTVMU5DmWviy3bxMrUMcmobjvCQr/n2gC713
# 1NDmEaYPbl+pX8EMu8byst7/No2PXRgIO0UVVb4KZybfhNy+BBs+LiMVlSRS5YH4
# 8NWtoYZlG9RcPnY+8Xrxz9VTi2cNAAcdbf5uK3snJxkFV2SmV3oBoMxWen3mee0f
# 2PNVEbt9zvPV8hViBVLsqRhVXd9wMq6motIRlkKge1u1TvwIxO21ibykI3tvYOGv
# BffIjhUdnYtX90JAtXtFDw==
# =yQwf
# -----END PGP SIGNATURE-----
# gpg: Signature made Thu 11 Jan 2024 11:01:39 GMT
# gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE
# gpg: issuer "peter.maydell@linaro.org"
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [ultimate]
# gpg: aka "Peter Maydell <pmaydell@gmail.com>" [ultimate]
# gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [ultimate]
# gpg: aka "Peter Maydell <peter@archaic.org.uk>" [ultimate]
# Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE
* tag 'pull-target-arm-20240111' of https://git.linaro.org/people/pmaydell/qemu-arm: (41 commits)
target/arm: Add FEAT_NV2 to max, neoverse-n2, neoverse-v1 CPUs
target/arm: Enhance CPU_LOG_INT to show SPSR on AArch64 exception-entry
target/arm: Report HCR_EL2.{NV,NV1,NV2} in cpu dumps
hw/intc/arm_gicv3_cpuif: Mark up VNCR offsets for GIC CPU registers
target/arm: Mark up VNCR offsets (offsets >= 0x200, except GIC)
target/arm: Mark up VNCR offsets (offsets 0x168..0x1f8)
target/arm: Mark up VNCR offsets (offsets 0x100..0x160)
target/arm: Mark up VNCR offsets (offsets 0x0..0xff)
target/arm: Report VNCR_EL2 based faults correctly
target/arm: Implement FEAT_NV2 redirection of sysregs to RAM
target/arm: Handle FEAT_NV2 redirection of SPSR_EL2, ELR_EL2, ESR_EL2, FAR_EL2
target/arm: Handle FEAT_NV2 changes to when SPSR_EL1.M reports EL2
target/arm: Implement VNCR_EL2 register
target/arm: Handle HCR_EL2 accesses for FEAT_NV2 bits
target/arm: Add FEAT_NV to max, neoverse-n2, neoverse-v1 CPUs
target/arm: Handle FEAT_NV page table attribute changes
target/arm: Treat LDTR* and STTR* as LDR/STR when NV, NV1 is 1, 1
target/arm: Don't honour PSTATE.PAN when HCR_EL2.{NV, NV1} == {1, 1}
target/arm: Always use arm_pan_enabled() when checking if PAN is enabled
target/arm: Trap registers when HCR_EL2.{NV, NV1} == {1, 1}
...
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
diff --git a/disas/riscv.c b/disas/riscv.c
index e9458e5..8a546d5 100644
--- a/disas/riscv.c
+++ b/disas/riscv.c
@@ -903,6 +903,9 @@
rv_op_vwsll_vv = 872,
rv_op_vwsll_vx = 873,
rv_op_vwsll_vi = 874,
+ rv_op_amocas_w = 875,
+ rv_op_amocas_d = 876,
+ rv_op_amocas_q = 877,
} rv_op;
/* register names */
@@ -2090,6 +2093,9 @@
{ "vwsll.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, 0, 0, 0 },
{ "vwsll.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, 0, 0, 0 },
{ "vwsll.vi", rv_codec_v_i, rv_fmt_vd_vs2_uimm_vm, NULL, 0, 0, 0 },
+ { "amocas.w", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
+ { "amocas.d", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
+ { "amocas.q", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 },
};
/* CSR names */
@@ -2841,6 +2847,9 @@
case 34: op = rv_op_amoxor_w; break;
case 35: op = rv_op_amoxor_d; break;
case 36: op = rv_op_amoxor_q; break;
+ case 42: op = rv_op_amocas_w; break;
+ case 43: op = rv_op_amocas_d; break;
+ case 44: op = rv_op_amocas_q; break;
case 66: op = rv_op_amoor_w; break;
case 67: op = rv_op_amoor_d; break;
case 68: op = rv_op_amoor_q; break;
diff --git a/docs/system/riscv/sifive_u.rst b/docs/system/riscv/sifive_u.rst
index 7b16656..8f55ae8 100644
--- a/docs/system/riscv/sifive_u.rst
+++ b/docs/system/riscv/sifive_u.rst
@@ -210,7 +210,7 @@
Running U-Boot
--------------
-U-Boot mainline v2021.07 release is tested at the time of writing. To build a
+U-Boot mainline v2024.01 release is tested at the time of writing. To build a
U-Boot mainline bootloader that can be booted by the ``sifive_u`` machine, use
the sifive_unleashed_defconfig with similar commands as described above for
Linux:
@@ -325,15 +325,10 @@
$ export CROSS_COMPILE=riscv64-linux-
$ make sifive_unleashed_defconfig
- $ make menuconfig
-
-then manually select the following configuration:
-
- * Device Tree Control ---> Provider of DTB for DT Control ---> Prior Stage bootloader DTB
-
-and unselect the following configuration:
-
- * Library routines ---> Allow access to binman information in the device tree
+ $ ./scripts/config --enable OF_BOARD
+ $ ./scripts/config --disable BINMAN_FDT
+ $ ./scripts/config --disable SPL
+ $ make olddefconfig
This changes U-Boot to use the QEMU generated device tree blob, and bypass
running the U-Boot SPL stage.
@@ -352,17 +347,13 @@
$ export CROSS_COMPILE=riscv64-linux-
$ make sifive_unleashed_defconfig
- $ make menuconfig
-
-then manually update the following configuration in U-Boot:
-
- * Device Tree Control ---> Provider of DTB for DT Control ---> Prior Stage bootloader DTB
- * RISC-V architecture ---> Base ISA ---> RV32I
- * Boot options ---> Boot images ---> Text Base ---> 0x80400000
-
-and unselect the following configuration:
-
- * Library routines ---> Allow access to binman information in the device tree
+ $ ./scripts/config --disable ARCH_RV64I
+ $ ./scripts/config --enable ARCH_RV32I
+ $ ./scripts/config --set-val TEXT_BASE 0x80400000
+ $ ./scripts/config --enable OF_BOARD
+ $ ./scripts/config --disable BINMAN_FDT
+ $ ./scripts/config --disable SPL
+ $ make olddefconfig
Use the same command line options to boot the 32-bit U-Boot S-mode image:
diff --git a/docs/system/riscv/virt.rst b/docs/system/riscv/virt.rst
index f5fa7b8..9a06f95 100644
--- a/docs/system/riscv/virt.rst
+++ b/docs/system/riscv/virt.rst
@@ -95,6 +95,11 @@
SiFive CLINT. When not specified, this option is assumed to be "off".
This option is restricted to the TCG accelerator.
+- acpi=[on|off|auto]
+
+ When this option is "on" (which is the default), ACPI tables are generated and
+ exposed as firmware tables etc/acpi/rsdp and etc/acpi/tables.
+
- aia=[none|aplic|aplic-imsic]
This option allows selecting interrupt controller defined by the AIA
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index 5e7cf6c..a22a2f4 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -35,7 +35,7 @@
#include "target/arm/cpu.h"
#include "hw/acpi/acpi-defs.h"
#include "hw/acpi/acpi.h"
-#include "hw/nvram/fw_cfg.h"
+#include "hw/nvram/fw_cfg_acpi.h"
#include "hw/acpi/bios-linker-loader.h"
#include "hw/acpi/aml-build.h"
#include "hw/acpi/utils.h"
@@ -58,6 +58,7 @@
#include "migration/vmstate.h"
#include "hw/acpi/ghes.h"
#include "hw/acpi/viot.h"
+#include "hw/virtio/virtio-acpi.h"
#define ARM_SPI_BASE 32
@@ -94,21 +95,6 @@
aml_append(scope, dev);
}
-static void acpi_dsdt_add_fw_cfg(Aml *scope, const MemMapEntry *fw_cfg_memmap)
-{
- Aml *dev = aml_device("FWCF");
- aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0002")));
- /* device present, functioning, decoding, not shown in UI */
- aml_append(dev, aml_name_decl("_STA", aml_int(0xB)));
- aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
-
- Aml *crs = aml_resource_template();
- aml_append(crs, aml_memory32_fixed(fw_cfg_memmap->base,
- fw_cfg_memmap->size, AML_READ_WRITE));
- aml_append(dev, aml_name_decl("_CRS", crs));
- aml_append(scope, dev);
-}
-
static void acpi_dsdt_add_flash(Aml *scope, const MemMapEntry *flash_memmap)
{
Aml *dev, *crs;
@@ -133,32 +119,6 @@
aml_append(scope, dev);
}
-static void acpi_dsdt_add_virtio(Aml *scope,
- const MemMapEntry *virtio_mmio_memmap,
- uint32_t mmio_irq, int num)
-{
- hwaddr base = virtio_mmio_memmap->base;
- hwaddr size = virtio_mmio_memmap->size;
- int i;
-
- for (i = 0; i < num; i++) {
- uint32_t irq = mmio_irq + i;
- Aml *dev = aml_device("VR%02u", i);
- aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0005")));
- aml_append(dev, aml_name_decl("_UID", aml_int(i)));
- aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
-
- Aml *crs = aml_resource_template();
- aml_append(crs, aml_memory32_fixed(base, size, AML_READ_WRITE));
- aml_append(crs,
- aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
- AML_EXCLUSIVE, &irq, 1));
- aml_append(dev, aml_name_decl("_CRS", crs));
- aml_append(scope, dev);
- base += size;
- }
-}
-
static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap,
uint32_t irq, VirtMachineState *vms)
{
@@ -864,9 +824,10 @@
if (vmc->acpi_expose_flash) {
acpi_dsdt_add_flash(scope, &memmap[VIRT_FLASH]);
}
- acpi_dsdt_add_fw_cfg(scope, &memmap[VIRT_FW_CFG]);
- acpi_dsdt_add_virtio(scope, &memmap[VIRT_MMIO],
- (irqmap[VIRT_MMIO] + ARM_SPI_BASE), NUM_VIRTIO_TRANSPORTS);
+ fw_cfg_acpi_dsdt_add(scope, &memmap[VIRT_FW_CFG]);
+ virtio_acpi_dsdt_add(scope, memmap[VIRT_MMIO].base, memmap[VIRT_MMIO].size,
+ (irqmap[VIRT_MMIO] + ARM_SPI_BASE),
+ 0, NUM_VIRTIO_TRANSPORTS);
acpi_dsdt_add_pci(scope, memmap, irqmap[VIRT_PCIE] + ARM_SPI_BASE, vms);
if (vms->acpi_dev) {
build_ged_aml(scope, "\\_SB."GED_DEVICE,
diff --git a/hw/i386/acpi-microvm.c b/hw/i386/acpi-microvm.c
index 2909a73..279da6b 100644
--- a/hw/i386/acpi-microvm.c
+++ b/hw/i386/acpi-microvm.c
@@ -37,6 +37,7 @@
#include "hw/pci/pci.h"
#include "hw/pci/pcie_host.h"
#include "hw/usb/xhci.h"
+#include "hw/virtio/virtio-acpi.h"
#include "hw/virtio/virtio-mmio.h"
#include "hw/input/i8042.h"
@@ -77,19 +78,7 @@
uint32_t irq = mms->virtio_irq_base + index;
hwaddr base = VIRTIO_MMIO_BASE + index * 512;
hwaddr size = 512;
-
- Aml *dev = aml_device("VR%02u", (unsigned)index);
- aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0005")));
- aml_append(dev, aml_name_decl("_UID", aml_int(index)));
- aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
-
- Aml *crs = aml_resource_template();
- aml_append(crs, aml_memory32_fixed(base, size, AML_READ_WRITE));
- aml_append(crs,
- aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
- AML_EXCLUSIVE, &irq, 1));
- aml_append(dev, aml_name_decl("_CRS", crs));
- aml_append(scope, dev);
+ virtio_acpi_dsdt_add(scope, base, size, irq, index, 1);
}
}
}
diff --git a/hw/nvram/fw_cfg-acpi.c b/hw/nvram/fw_cfg-acpi.c
new file mode 100644
index 0000000..4e48bae
--- /dev/null
+++ b/hw/nvram/fw_cfg-acpi.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Add fw_cfg device in DSDT
+ *
+ */
+
+#include "hw/nvram/fw_cfg_acpi.h"
+#include "hw/acpi/aml-build.h"
+
+void fw_cfg_acpi_dsdt_add(Aml *scope, const MemMapEntry *fw_cfg_memmap)
+{
+ Aml *dev = aml_device("FWCF");
+ aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0002")));
+ /* device present, functioning, decoding, not shown in UI */
+ aml_append(dev, aml_name_decl("_STA", aml_int(0xB)));
+ aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
+
+ Aml *crs = aml_resource_template();
+ aml_append(crs, aml_memory32_fixed(fw_cfg_memmap->base,
+ fw_cfg_memmap->size, AML_READ_WRITE));
+ aml_append(dev, aml_name_decl("_CRS", crs));
+ aml_append(scope, dev);
+}
diff --git a/hw/nvram/meson.build b/hw/nvram/meson.build
index 75e415b..4996c72 100644
--- a/hw/nvram/meson.build
+++ b/hw/nvram/meson.build
@@ -17,3 +17,4 @@
system_ss.add(when: 'CONFIG_XLNX_BBRAM', if_true: files('xlnx-bbram.c'))
specific_ss.add(when: 'CONFIG_PSERIES', if_true: files('spapr_nvram.c'))
+specific_ss.add(when: 'CONFIG_ACPI', if_true: files('fw_cfg-acpi.c'))
diff --git a/hw/pci-host/gpex-acpi.c b/hw/pci-host/gpex-acpi.c
index 1092dc3..f69413e 100644
--- a/hw/pci-host/gpex-acpi.c
+++ b/hw/pci-host/gpex-acpi.c
@@ -281,3 +281,16 @@
crs_range_set_free(&crs_range_set);
}
+
+void acpi_dsdt_add_gpex_host(Aml *scope, uint32_t irq)
+{
+ bool ambig;
+ Object *obj = object_resolve_path_type("", TYPE_GPEX_HOST, &ambig);
+
+ if (!obj || ambig) {
+ return;
+ }
+
+ GPEX_HOST(obj)->gpex_cfg.irq = irq;
+ acpi_dsdt_add_gpex(scope, &GPEX_HOST(obj)->gpex_cfg);
+}
diff --git a/hw/pci-host/gpex.c b/hw/pci-host/gpex.c
index e117e47..e9cf455 100644
--- a/hw/pci-host/gpex.c
+++ b/hw/pci-host/gpex.c
@@ -154,6 +154,18 @@
*/
DEFINE_PROP_BOOL("allow-unmapped-accesses", GPEXHost,
allow_unmapped_accesses, true),
+ DEFINE_PROP_UINT64(PCI_HOST_ECAM_BASE, GPEXHost, gpex_cfg.ecam.base, 0),
+ DEFINE_PROP_SIZE(PCI_HOST_ECAM_SIZE, GPEXHost, gpex_cfg.ecam.size, 0),
+ DEFINE_PROP_UINT64(PCI_HOST_PIO_BASE, GPEXHost, gpex_cfg.pio.base, 0),
+ DEFINE_PROP_SIZE(PCI_HOST_PIO_SIZE, GPEXHost, gpex_cfg.pio.size, 0),
+ DEFINE_PROP_UINT64(PCI_HOST_BELOW_4G_MMIO_BASE, GPEXHost,
+ gpex_cfg.mmio32.base, 0),
+ DEFINE_PROP_SIZE(PCI_HOST_BELOW_4G_MMIO_SIZE, GPEXHost,
+ gpex_cfg.mmio32.size, 0),
+ DEFINE_PROP_UINT64(PCI_HOST_ABOVE_4G_MMIO_BASE, GPEXHost,
+ gpex_cfg.mmio64.base, 0),
+ DEFINE_PROP_SIZE(PCI_HOST_ABOVE_4G_MMIO_SIZE, GPEXHost,
+ gpex_cfg.mmio64.size, 0),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/riscv/Kconfig b/hw/riscv/Kconfig
index b6a5eb4..a50717b 100644
--- a/hw/riscv/Kconfig
+++ b/hw/riscv/Kconfig
@@ -45,6 +45,7 @@
select FW_CFG_DMA
select PLATFORM_BUS
select ACPI
+ select ACPI_PCI
config SHAKTI_C
bool
diff --git a/hw/riscv/virt-acpi-build.c b/hw/riscv/virt-acpi-build.c
index d3bfaf5..26c7e44 100644
--- a/hw/riscv/virt-acpi-build.c
+++ b/hw/riscv/virt-acpi-build.c
@@ -27,16 +27,21 @@
#include "hw/acpi/acpi-defs.h"
#include "hw/acpi/acpi.h"
#include "hw/acpi/aml-build.h"
+#include "hw/acpi/pci.h"
#include "hw/acpi/utils.h"
+#include "hw/intc/riscv_aclint.h"
+#include "hw/nvram/fw_cfg_acpi.h"
+#include "hw/pci-host/gpex.h"
+#include "hw/riscv/virt.h"
+#include "hw/riscv/numa.h"
+#include "hw/virtio/virtio-acpi.h"
+#include "migration/vmstate.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "sysemu/reset.h"
-#include "migration/vmstate.h"
-#include "hw/riscv/virt.h"
-#include "hw/riscv/numa.h"
-#include "hw/intc/riscv_aclint.h"
#define ACPI_BUILD_TABLE_SIZE 0x20000
+#define ACPI_BUILD_INTC_ID(socket, index) ((socket << 24) | (index))
typedef struct AcpiBuildState {
/* Copy of table in RAM (for patching) */
@@ -58,17 +63,56 @@
static void riscv_acpi_madt_add_rintc(uint32_t uid,
const CPUArchIdList *arch_ids,
- GArray *entry)
+ GArray *entry,
+ RISCVVirtState *s)
{
+ uint8_t guest_index_bits = imsic_num_bits(s->aia_guests + 1);
uint64_t hart_id = arch_ids->cpus[uid].arch_id;
+ uint32_t imsic_size, local_cpu_id, socket_id;
+ uint64_t imsic_socket_addr, imsic_addr;
+ MachineState *ms = MACHINE(s);
+ socket_id = arch_ids->cpus[uid].props.node_id;
+ local_cpu_id = (arch_ids->cpus[uid].arch_id -
+ riscv_socket_first_hartid(ms, socket_id)) %
+ riscv_socket_hart_count(ms, socket_id);
+ imsic_socket_addr = s->memmap[VIRT_IMSIC_S].base +
+ (socket_id * VIRT_IMSIC_GROUP_MAX_SIZE);
+ imsic_size = IMSIC_HART_SIZE(guest_index_bits);
+ imsic_addr = imsic_socket_addr + local_cpu_id * imsic_size;
build_append_int_noprefix(entry, 0x18, 1); /* Type */
- build_append_int_noprefix(entry, 20, 1); /* Length */
+ build_append_int_noprefix(entry, 36, 1); /* Length */
build_append_int_noprefix(entry, 1, 1); /* Version */
build_append_int_noprefix(entry, 0, 1); /* Reserved */
build_append_int_noprefix(entry, 0x1, 4); /* Flags */
build_append_int_noprefix(entry, hart_id, 8); /* Hart ID */
build_append_int_noprefix(entry, uid, 4); /* ACPI Processor UID */
+ /* External Interrupt Controller ID */
+ if (s->aia_type == VIRT_AIA_TYPE_APLIC) {
+ build_append_int_noprefix(entry,
+ ACPI_BUILD_INTC_ID(
+ arch_ids->cpus[uid].props.node_id,
+ local_cpu_id),
+ 4);
+ } else if (s->aia_type == VIRT_AIA_TYPE_NONE) {
+ build_append_int_noprefix(entry,
+ ACPI_BUILD_INTC_ID(
+ arch_ids->cpus[uid].props.node_id,
+ 2 * local_cpu_id + 1),
+ 4);
+ } else {
+ build_append_int_noprefix(entry, 0, 4);
+ }
+
+ if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
+ /* IMSIC Base address */
+ build_append_int_noprefix(entry, imsic_addr, 8);
+ /* IMSIC Size */
+ build_append_int_noprefix(entry, imsic_size, 4);
+ } else {
+ build_append_int_noprefix(entry, 0, 8);
+ build_append_int_noprefix(entry, 0, 4);
+ }
}
static void acpi_dsdt_add_cpus(Aml *scope, RISCVVirtState *s)
@@ -87,7 +131,7 @@
aml_int(arch_ids->cpus[i].arch_id)));
/* build _MAT object */
- riscv_acpi_madt_add_rintc(i, arch_ids, madt_buf);
+ riscv_acpi_madt_add_rintc(i, arch_ids, madt_buf, s);
aml_append(dev, aml_name_decl("_MAT",
aml_buffer(madt_buf->len,
(uint8_t *)madt_buf->data)));
@@ -97,19 +141,36 @@
}
}
-static void acpi_dsdt_add_fw_cfg(Aml *scope, const MemMapEntry *fw_cfg_memmap)
+static void
+acpi_dsdt_add_uart(Aml *scope, const MemMapEntry *uart_memmap,
+ uint32_t uart_irq)
{
- Aml *dev = aml_device("FWCF");
- aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0002")));
-
- /* device present, functioning, decoding, not shown in UI */
- aml_append(dev, aml_name_decl("_STA", aml_int(0xB)));
- aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
+ Aml *dev = aml_device("COM0");
+ aml_append(dev, aml_name_decl("_HID", aml_string("PNP0501")));
+ aml_append(dev, aml_name_decl("_UID", aml_int(0)));
Aml *crs = aml_resource_template();
- aml_append(crs, aml_memory32_fixed(fw_cfg_memmap->base,
- fw_cfg_memmap->size, AML_READ_WRITE));
+ aml_append(crs, aml_memory32_fixed(uart_memmap->base,
+ uart_memmap->size, AML_READ_WRITE));
+ aml_append(crs,
+ aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
+ AML_EXCLUSIVE, &uart_irq, 1));
aml_append(dev, aml_name_decl("_CRS", crs));
+
+ Aml *pkg = aml_package(2);
+ aml_append(pkg, aml_string("clock-frequency"));
+ aml_append(pkg, aml_int(3686400));
+
+ Aml *UUID = aml_touuid("DAFFD814-6EBA-4D8C-8A91-BC9BBF4AA301");
+
+ Aml *pkg1 = aml_package(1);
+ aml_append(pkg1, pkg);
+
+ Aml *package = aml_package(2);
+ aml_append(package, UUID);
+ aml_append(package, pkg1);
+
+ aml_append(dev, aml_name_decl("_DSD", package));
aml_append(scope, dev);
}
@@ -121,6 +182,7 @@
* 5.2.36 RISC-V Hart Capabilities Table (RHCT)
* REF: https://github.com/riscv-non-isa/riscv-acpi/issues/16
* https://drive.google.com/file/d/1nP3nFiH4jkPMp6COOxP6123DCZKR-tia/view
+ * https://drive.google.com/file/d/1sKbOa8m1UZw1JkquZYe3F1zQBN1xXsaf/view
*/
static void build_rhct(GArray *table_data,
BIOSLinker *linker,
@@ -130,8 +192,10 @@
MachineState *ms = MACHINE(s);
const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(ms);
size_t len, aligned_len;
- uint32_t isa_offset, num_rhct_nodes;
- RISCVCPU *cpu;
+ uint32_t isa_offset, num_rhct_nodes, cmo_offset = 0;
+ RISCVCPU *cpu = &s->soc[0].harts[0];
+ uint32_t mmu_offset = 0;
+ uint8_t satp_mode_max;
char *isa;
AcpiTable table = { .sig = "RHCT", .rev = 1, .oem_id = s->oem_id,
@@ -147,6 +211,13 @@
/* ISA + N hart info */
num_rhct_nodes = 1 + ms->smp.cpus;
+ if (cpu->cfg.ext_zicbom || cpu->cfg.ext_zicboz) {
+ num_rhct_nodes++;
+ }
+
+ if (cpu->cfg.satp_mode.supported != 0) {
+ num_rhct_nodes++;
+ }
/* Number of RHCT nodes*/
build_append_int_noprefix(table_data, num_rhct_nodes, 4);
@@ -158,7 +229,6 @@
isa_offset = table_data->len - table.table_offset;
build_append_int_noprefix(table_data, 0, 2); /* Type 0 */
- cpu = &s->soc[0].harts[0];
isa = riscv_isa_string(cpu);
len = 8 + strlen(isa) + 1;
aligned_len = (len % 2) ? (len + 1) : len;
@@ -174,14 +244,87 @@
build_append_int_noprefix(table_data, 0x0, 1); /* Optional Padding */
}
+ /* CMO node */
+ if (cpu->cfg.ext_zicbom || cpu->cfg.ext_zicboz) {
+ cmo_offset = table_data->len - table.table_offset;
+ build_append_int_noprefix(table_data, 1, 2); /* Type */
+ build_append_int_noprefix(table_data, 10, 2); /* Length */
+ build_append_int_noprefix(table_data, 0x1, 2); /* Revision */
+ build_append_int_noprefix(table_data, 0, 1); /* Reserved */
+
+ /* CBOM block size */
+ if (cpu->cfg.cbom_blocksize) {
+ build_append_int_noprefix(table_data,
+ __builtin_ctz(cpu->cfg.cbom_blocksize),
+ 1);
+ } else {
+ build_append_int_noprefix(table_data, 0, 1);
+ }
+
+ /* CBOP block size */
+ build_append_int_noprefix(table_data, 0, 1);
+
+ /* CBOZ block size */
+ if (cpu->cfg.cboz_blocksize) {
+ build_append_int_noprefix(table_data,
+ __builtin_ctz(cpu->cfg.cboz_blocksize),
+ 1);
+ } else {
+ build_append_int_noprefix(table_data, 0, 1);
+ }
+ }
+
+ /* MMU node structure */
+ if (cpu->cfg.satp_mode.supported != 0) {
+ satp_mode_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
+ mmu_offset = table_data->len - table.table_offset;
+ build_append_int_noprefix(table_data, 2, 2); /* Type */
+ build_append_int_noprefix(table_data, 8, 2); /* Length */
+ build_append_int_noprefix(table_data, 0x1, 2); /* Revision */
+ build_append_int_noprefix(table_data, 0, 1); /* Reserved */
+ /* MMU Type */
+ if (satp_mode_max == VM_1_10_SV57) {
+ build_append_int_noprefix(table_data, 2, 1); /* Sv57 */
+ } else if (satp_mode_max == VM_1_10_SV48) {
+ build_append_int_noprefix(table_data, 1, 1); /* Sv48 */
+ } else if (satp_mode_max == VM_1_10_SV39) {
+ build_append_int_noprefix(table_data, 0, 1); /* Sv39 */
+ } else {
+ assert(1);
+ }
+ }
+
/* Hart Info Node */
for (int i = 0; i < arch_ids->len; i++) {
+ len = 16;
+ int num_offsets = 1;
build_append_int_noprefix(table_data, 0xFFFF, 2); /* Type */
- build_append_int_noprefix(table_data, 16, 2); /* Length */
- build_append_int_noprefix(table_data, 0x1, 2); /* Revision */
- build_append_int_noprefix(table_data, 1, 2); /* Number of offsets */
- build_append_int_noprefix(table_data, i, 4); /* ACPI Processor UID */
- build_append_int_noprefix(table_data, isa_offset, 4); /* Offsets[0] */
+
+ /* Length */
+ if (cmo_offset) {
+ len += 4;
+ num_offsets++;
+ }
+
+ if (mmu_offset) {
+ len += 4;
+ num_offsets++;
+ }
+
+ build_append_int_noprefix(table_data, len, 2);
+ build_append_int_noprefix(table_data, 0x1, 2); /* Revision */
+ /* Number of offsets */
+ build_append_int_noprefix(table_data, num_offsets, 2);
+ build_append_int_noprefix(table_data, i, 4); /* ACPI Processor UID */
+ /* Offsets */
+ build_append_int_noprefix(table_data, isa_offset, 4);
+ if (cmo_offset) {
+ build_append_int_noprefix(table_data, cmo_offset, 4);
+ }
+
+ if (mmu_offset) {
+ build_append_int_noprefix(table_data, mmu_offset, 4);
+ }
}
acpi_table_end(linker, &table);
@@ -209,6 +352,8 @@
RISCVVirtState *s)
{
Aml *scope, *dsdt;
+ MachineState *ms = MACHINE(s);
+ uint8_t socket_count;
const MemMapEntry *memmap = s->memmap;
AcpiTable table = { .sig = "DSDT", .rev = 2, .oem_id = s->oem_id,
.oem_table_id = s->oem_table_id };
@@ -226,7 +371,30 @@
scope = aml_scope("\\_SB");
acpi_dsdt_add_cpus(scope, s);
- acpi_dsdt_add_fw_cfg(scope, &memmap[VIRT_FW_CFG]);
+ fw_cfg_acpi_dsdt_add(scope, &memmap[VIRT_FW_CFG]);
+
+ socket_count = riscv_socket_count(ms);
+
+ acpi_dsdt_add_uart(scope, &memmap[VIRT_UART0], UART0_IRQ);
+
+ if (socket_count == 1) {
+ virtio_acpi_dsdt_add(scope, memmap[VIRT_VIRTIO].base,
+ memmap[VIRT_VIRTIO].size,
+ VIRTIO_IRQ, 0, VIRTIO_COUNT);
+ acpi_dsdt_add_gpex_host(scope, PCIE_IRQ);
+ } else if (socket_count == 2) {
+ virtio_acpi_dsdt_add(scope, memmap[VIRT_VIRTIO].base,
+ memmap[VIRT_VIRTIO].size,
+ VIRTIO_IRQ + VIRT_IRQCHIP_NUM_SOURCES, 0,
+ VIRTIO_COUNT);
+ acpi_dsdt_add_gpex_host(scope, PCIE_IRQ + VIRT_IRQCHIP_NUM_SOURCES);
+ } else {
+ virtio_acpi_dsdt_add(scope, memmap[VIRT_VIRTIO].base,
+ memmap[VIRT_VIRTIO].size,
+ VIRTIO_IRQ + VIRT_IRQCHIP_NUM_SOURCES, 0,
+ VIRTIO_COUNT);
+ acpi_dsdt_add_gpex_host(scope, PCIE_IRQ + VIRT_IRQCHIP_NUM_SOURCES * 2);
+ }
aml_append(dsdt, scope);
@@ -242,6 +410,7 @@
* 5.2.12 Multiple APIC Description Table (MADT)
* REF: https://github.com/riscv-non-isa/riscv-acpi/issues/15
* https://drive.google.com/file/d/1R6k4MshhN3WTT-hwqAquu5nX6xSEqK2l/view
+ * https://drive.google.com/file/d/1oMGPyOD58JaPgMl1pKasT-VKsIKia7zR/view
*/
static void build_madt(GArray *table_data,
BIOSLinker *linker,
@@ -250,6 +419,21 @@
MachineClass *mc = MACHINE_GET_CLASS(s);
MachineState *ms = MACHINE(s);
const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(ms);
+ uint8_t group_index_bits = imsic_num_bits(riscv_socket_count(ms));
+ uint8_t guest_index_bits = imsic_num_bits(s->aia_guests + 1);
+ uint16_t imsic_max_hart_per_socket = 0;
+ uint8_t hart_index_bits;
+ uint64_t aplic_addr;
+ uint32_t gsi_base;
+ uint8_t socket;
+
+ for (socket = 0; socket < riscv_socket_count(ms); socket++) {
+ if (imsic_max_hart_per_socket < s->soc[socket].num_harts) {
+ imsic_max_hart_per_socket = s->soc[socket].num_harts;
+ }
+ }
+
+ hart_index_bits = imsic_num_bits(imsic_max_hart_per_socket);
AcpiTable table = { .sig = "APIC", .rev = 6, .oem_id = s->oem_id,
.oem_table_id = s->oem_table_id };
@@ -261,7 +445,84 @@
/* RISC-V Local INTC structures per HART */
for (int i = 0; i < arch_ids->len; i++) {
- riscv_acpi_madt_add_rintc(i, arch_ids, table_data);
+ riscv_acpi_madt_add_rintc(i, arch_ids, table_data, s);
+ }
+
+ /* IMSIC */
+ if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
+ /* IMSIC */
+ build_append_int_noprefix(table_data, 0x19, 1); /* Type */
+ build_append_int_noprefix(table_data, 16, 1); /* Length */
+ build_append_int_noprefix(table_data, 1, 1); /* Version */
+ build_append_int_noprefix(table_data, 0, 1); /* Reserved */
+ build_append_int_noprefix(table_data, 0, 4); /* Flags */
+ /* Number of supervisor mode Interrupt Identities */
+ build_append_int_noprefix(table_data, VIRT_IRQCHIP_NUM_MSIS, 2);
+ /* Number of guest mode Interrupt Identities */
+ build_append_int_noprefix(table_data, VIRT_IRQCHIP_NUM_MSIS, 2);
+ /* Guest Index Bits */
+ build_append_int_noprefix(table_data, guest_index_bits, 1);
+ /* Hart Index Bits */
+ build_append_int_noprefix(table_data, hart_index_bits, 1);
+ /* Group Index Bits */
+ build_append_int_noprefix(table_data, group_index_bits, 1);
+ /* Group Index Shift */
+ build_append_int_noprefix(table_data, IMSIC_MMIO_GROUP_MIN_SHIFT, 1);
+ }
+
+ if (s->aia_type != VIRT_AIA_TYPE_NONE) {
+ /* APLICs */
+ for (socket = 0; socket < riscv_socket_count(ms); socket++) {
+ aplic_addr = s->memmap[VIRT_APLIC_S].base +
+ s->memmap[VIRT_APLIC_S].size * socket;
+ gsi_base = VIRT_IRQCHIP_NUM_SOURCES * socket;
+ build_append_int_noprefix(table_data, 0x1A, 1); /* Type */
+ build_append_int_noprefix(table_data, 36, 1); /* Length */
+ build_append_int_noprefix(table_data, 1, 1); /* Version */
+ build_append_int_noprefix(table_data, socket, 1); /* APLIC ID */
+ build_append_int_noprefix(table_data, 0, 4); /* Flags */
+ build_append_int_noprefix(table_data, 0, 8); /* Hardware ID */
+ /* Number of IDCs */
+ if (s->aia_type == VIRT_AIA_TYPE_APLIC) {
+ build_append_int_noprefix(table_data,
+ s->soc[socket].num_harts,
+ 2);
+ } else {
+ build_append_int_noprefix(table_data, 0, 2);
+ }
+ /* Total External Interrupt Sources Supported */
+ build_append_int_noprefix(table_data, VIRT_IRQCHIP_NUM_SOURCES, 2);
+ /* Global System Interrupt Base */
+ build_append_int_noprefix(table_data, gsi_base, 4);
+ /* APLIC Address */
+ build_append_int_noprefix(table_data, aplic_addr, 8);
+ /* APLIC size */
+ build_append_int_noprefix(table_data,
+ s->memmap[VIRT_APLIC_S].size, 4);
+ }
+ } else {
+ /* PLICs */
+ for (socket = 0; socket < riscv_socket_count(ms); socket++) {
+ aplic_addr = s->memmap[VIRT_PLIC].base +
+ s->memmap[VIRT_PLIC].size * socket;
+ gsi_base = VIRT_IRQCHIP_NUM_SOURCES * socket;
+ build_append_int_noprefix(table_data, 0x1B, 1); /* Type */
+ build_append_int_noprefix(table_data, 36, 1); /* Length */
+ build_append_int_noprefix(table_data, 1, 1); /* Version */
+ build_append_int_noprefix(table_data, socket, 1); /* PLIC ID */
+ build_append_int_noprefix(table_data, 0, 8); /* Hardware ID */
+ /* Total External Interrupt Sources Supported */
+ build_append_int_noprefix(table_data,
+ VIRT_IRQCHIP_NUM_SOURCES - 1, 2);
+ build_append_int_noprefix(table_data, 0, 2); /* Max Priority */
+ build_append_int_noprefix(table_data, 0, 4); /* Flags */
+ /* PLIC Size */
+ build_append_int_noprefix(table_data, s->memmap[VIRT_PLIC].size, 4);
+ /* PLIC Address */
+ build_append_int_noprefix(table_data, aplic_addr, 8);
+ /* Global System Interrupt Vector Base */
+ build_append_int_noprefix(table_data, gsi_base, 4);
+ }
}
acpi_table_end(linker, &table);
@@ -294,6 +555,16 @@
acpi_add_table(table_offsets, tables_blob);
build_rhct(tables_blob, tables->linker, s);
+ acpi_add_table(table_offsets, tables_blob);
+ {
+ AcpiMcfgInfo mcfg = {
+ .base = s->memmap[VIRT_PCIE_MMIO].base,
+ .size = s->memmap[VIRT_PCIE_MMIO].size,
+ };
+ build_mcfg(tables_blob, tables->linker, &mcfg, s->oem_id,
+ s->oem_table_id);
+ }
+
/* XSDT is pointed to by RSDP */
xsdt = tables_blob->len;
build_xsdt(tables_blob, tables->linker, table_offsets, s->oem_id,
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
index d2eac24..f9fd134 100644
--- a/hw/riscv/virt.c
+++ b/hw/riscv/virt.c
@@ -38,7 +38,6 @@
#include "kvm/kvm_riscv.h"
#include "hw/intc/riscv_aclint.h"
#include "hw/intc/riscv_aplic.h"
-#include "hw/intc/riscv_imsic.h"
#include "hw/intc/sifive_plic.h"
#include "hw/misc/sifive_test.h"
#include "hw/platform-bus.h"
@@ -54,28 +53,6 @@
#include "hw/acpi/aml-build.h"
#include "qapi/qapi-visit-common.h"
-/*
- * The virt machine physical address space used by some of the devices
- * namely ACLINT, PLIC, APLIC, and IMSIC depend on number of Sockets,
- * number of CPUs, and number of IMSIC guest files.
- *
- * Various limits defined by VIRT_SOCKETS_MAX_BITS, VIRT_CPUS_MAX_BITS,
- * and VIRT_IRQCHIP_MAX_GUESTS_BITS are tuned for maximum utilization
- * of virt machine physical address space.
- */
-
-#define VIRT_IMSIC_GROUP_MAX_SIZE (1U << IMSIC_MMIO_GROUP_MIN_SHIFT)
-#if VIRT_IMSIC_GROUP_MAX_SIZE < \
- IMSIC_GROUP_SIZE(VIRT_CPUS_MAX_BITS, VIRT_IRQCHIP_MAX_GUESTS_BITS)
-#error "Can't accommodate single IMSIC group in address space"
-#endif
-
-#define VIRT_IMSIC_MAX_SIZE (VIRT_SOCKETS_MAX * \
- VIRT_IMSIC_GROUP_MAX_SIZE)
-#if 0x4000000 < VIRT_IMSIC_MAX_SIZE
-#error "Can't accommodate all IMSIC groups in address space"
-#endif
-
/* KVM AIA only supports APLIC MSI. APLIC Wired is always emulated by QEMU. */
static bool virt_use_kvm_aia(RISCVVirtState *s)
{
@@ -273,6 +250,11 @@
cpu_ptr->cfg.cboz_blocksize);
}
+ if (cpu_ptr->cfg.ext_zicbop) {
+ qemu_fdt_setprop_cell(ms->fdt, cpu_name, "riscv,cbop-block-size",
+ cpu_ptr->cfg.cbop_blocksize);
+ }
+
qemu_fdt_setprop_string(ms->fdt, cpu_name, "compatible", "riscv");
qemu_fdt_setprop_string(ms->fdt, cpu_name, "status", "okay");
qemu_fdt_setprop_cell(ms->fdt, cpu_name, "reg",
@@ -460,24 +442,6 @@
"sifive,plic-1.0.0", "riscv,plic0"
};
- if (kvm_enabled()) {
- plic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2);
- } else {
- plic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 4);
- }
-
- for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) {
- if (kvm_enabled()) {
- plic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
- plic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_S_EXT);
- } else {
- plic_cells[cpu * 4 + 0] = cpu_to_be32(intc_phandles[cpu]);
- plic_cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_EXT);
- plic_cells[cpu * 4 + 2] = cpu_to_be32(intc_phandles[cpu]);
- plic_cells[cpu * 4 + 3] = cpu_to_be32(IRQ_S_EXT);
- }
- }
-
plic_phandles[socket] = (*phandle)++;
plic_addr = memmap[VIRT_PLIC].base + (memmap[VIRT_PLIC].size * socket);
plic_name = g_strdup_printf("/soc/plic@%lx", plic_addr);
@@ -490,8 +454,33 @@
(char **)&plic_compat,
ARRAY_SIZE(plic_compat));
qemu_fdt_setprop(ms->fdt, plic_name, "interrupt-controller", NULL, 0);
- qemu_fdt_setprop(ms->fdt, plic_name, "interrupts-extended",
- plic_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 4);
+
+ if (kvm_enabled()) {
+ plic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2);
+
+ for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) {
+ plic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
+ plic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_S_EXT);
+ }
+
+ qemu_fdt_setprop(ms->fdt, plic_name, "interrupts-extended",
+ plic_cells,
+ s->soc[socket].num_harts * sizeof(uint32_t) * 2);
+ } else {
+ plic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 4);
+
+ for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) {
+ plic_cells[cpu * 4 + 0] = cpu_to_be32(intc_phandles[cpu]);
+ plic_cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_EXT);
+ plic_cells[cpu * 4 + 2] = cpu_to_be32(intc_phandles[cpu]);
+ plic_cells[cpu * 4 + 3] = cpu_to_be32(IRQ_S_EXT);
+ }
+
+ qemu_fdt_setprop(ms->fdt, plic_name, "interrupts-extended",
+ plic_cells,
+ s->soc[socket].num_harts * sizeof(uint32_t) * 4);
+ }
+
qemu_fdt_setprop_cells(ms->fdt, plic_name, "reg",
0x0, plic_addr, 0x0, memmap[VIRT_PLIC].size);
qemu_fdt_setprop_cell(ms->fdt, plic_name, "riscv,ndev",
@@ -512,7 +501,7 @@
g_free(plic_cells);
}
-static uint32_t imsic_num_bits(uint32_t count)
+uint32_t imsic_num_bits(uint32_t count)
{
uint32_t ret = 0;
@@ -1077,21 +1066,45 @@
}
static inline DeviceState *gpex_pcie_init(MemoryRegion *sys_mem,
- hwaddr ecam_base, hwaddr ecam_size,
- hwaddr mmio_base, hwaddr mmio_size,
- hwaddr high_mmio_base,
- hwaddr high_mmio_size,
- hwaddr pio_base,
- DeviceState *irqchip)
+ DeviceState *irqchip,
+ RISCVVirtState *s)
{
DeviceState *dev;
MemoryRegion *ecam_alias, *ecam_reg;
MemoryRegion *mmio_alias, *high_mmio_alias, *mmio_reg;
+ hwaddr ecam_base = s->memmap[VIRT_PCIE_ECAM].base;
+ hwaddr ecam_size = s->memmap[VIRT_PCIE_ECAM].size;
+ hwaddr mmio_base = s->memmap[VIRT_PCIE_MMIO].base;
+ hwaddr mmio_size = s->memmap[VIRT_PCIE_MMIO].size;
+ hwaddr high_mmio_base = virt_high_pcie_memmap.base;
+ hwaddr high_mmio_size = virt_high_pcie_memmap.size;
+ hwaddr pio_base = s->memmap[VIRT_PCIE_PIO].base;
+ hwaddr pio_size = s->memmap[VIRT_PCIE_PIO].size;
qemu_irq irq;
int i;
dev = qdev_new(TYPE_GPEX_HOST);
+ /* Set GPEX object properties for the virt machine */
+ object_property_set_uint(OBJECT(GPEX_HOST(dev)), PCI_HOST_ECAM_BASE,
+ ecam_base, NULL);
+ object_property_set_int(OBJECT(GPEX_HOST(dev)), PCI_HOST_ECAM_SIZE,
+ ecam_size, NULL);
+ object_property_set_uint(OBJECT(GPEX_HOST(dev)),
+ PCI_HOST_BELOW_4G_MMIO_BASE,
+ mmio_base, NULL);
+ object_property_set_int(OBJECT(GPEX_HOST(dev)), PCI_HOST_BELOW_4G_MMIO_SIZE,
+ mmio_size, NULL);
+ object_property_set_uint(OBJECT(GPEX_HOST(dev)),
+ PCI_HOST_ABOVE_4G_MMIO_BASE,
+ high_mmio_base, NULL);
+ object_property_set_int(OBJECT(GPEX_HOST(dev)), PCI_HOST_ABOVE_4G_MMIO_SIZE,
+ high_mmio_size, NULL);
+ object_property_set_uint(OBJECT(GPEX_HOST(dev)), PCI_HOST_PIO_BASE,
+ pio_base, NULL);
+ object_property_set_int(OBJECT(GPEX_HOST(dev)), PCI_HOST_PIO_SIZE,
+ pio_size, NULL);
+
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
ecam_alias = g_new0(MemoryRegion, 1);
@@ -1122,6 +1135,7 @@
gpex_set_irq_num(GPEX_HOST(dev), i, PCIE_IRQ + i);
}
+ GPEX_HOST(dev)->gpex_cfg.bus = PCI_HOST_BRIDGE(GPEX_HOST(dev))->bus;
return dev;
}
@@ -1517,15 +1531,7 @@
qdev_get_gpio_in(virtio_irqchip, VIRTIO_IRQ + i));
}
- gpex_pcie_init(system_memory,
- memmap[VIRT_PCIE_ECAM].base,
- memmap[VIRT_PCIE_ECAM].size,
- memmap[VIRT_PCIE_MMIO].base,
- memmap[VIRT_PCIE_MMIO].size,
- virt_high_pcie_memmap.base,
- virt_high_pcie_memmap.size,
- memmap[VIRT_PCIE_PIO].base,
- pcie_irqchip);
+ gpex_pcie_init(system_memory, pcie_irqchip, s);
create_platform_bus(s, mmio_irqchip);
diff --git a/hw/virtio/meson.build b/hw/virtio/meson.build
index c8c1001..47baf00 100644
--- a/hw/virtio/meson.build
+++ b/hw/virtio/meson.build
@@ -77,3 +77,4 @@
system_ss.add(files('virtio-hmp-cmds.c'))
specific_ss.add_all(when: 'CONFIG_VIRTIO', if_true: specific_virtio_ss)
+system_ss.add(when: 'CONFIG_ACPI', if_true: files('virtio-acpi.c'))
diff --git a/hw/virtio/virtio-acpi.c b/hw/virtio/virtio-acpi.c
new file mode 100644
index 0000000..e18cb38
--- /dev/null
+++ b/hw/virtio/virtio-acpi.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * virtio ACPI Support
+ *
+ */
+
+#include "hw/virtio/virtio-acpi.h"
+#include "hw/acpi/aml-build.h"
+
+void virtio_acpi_dsdt_add(Aml *scope, const hwaddr base, const hwaddr size,
+ uint32_t mmio_irq, long int start_index, int num)
+{
+ hwaddr virtio_base = base;
+ uint32_t irq = mmio_irq;
+ long int i;
+
+ for (i = start_index; i < start_index + num; i++) {
+ Aml *dev = aml_device("VR%02u", (unsigned)i);
+ aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0005")));
+ aml_append(dev, aml_name_decl("_UID", aml_int(i)));
+ aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
+
+ Aml *crs = aml_resource_template();
+ aml_append(crs, aml_memory32_fixed(virtio_base, size, AML_READ_WRITE));
+ aml_append(crs,
+ aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
+ AML_EXCLUSIVE, &irq, 1));
+ aml_append(dev, aml_name_decl("_CRS", crs));
+ aml_append(scope, dev);
+ virtio_base += size;
+ irq++;
+ }
+}
diff --git a/include/hw/nvram/fw_cfg_acpi.h b/include/hw/nvram/fw_cfg_acpi.h
new file mode 100644
index 0000000..b6553d8
--- /dev/null
+++ b/include/hw/nvram/fw_cfg_acpi.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * ACPI support for fw_cfg
+ *
+ */
+
+#ifndef FW_CFG_ACPI_H
+#define FW_CFG_ACPI_H
+
+#include "qemu/osdep.h"
+#include "exec/hwaddr.h"
+
+void fw_cfg_acpi_dsdt_add(Aml *scope, const MemMapEntry *fw_cfg_memmap);
+
+#endif
diff --git a/include/hw/pci-host/gpex.h b/include/hw/pci-host/gpex.h
index b0240bd..dce8835 100644
--- a/include/hw/pci-host/gpex.h
+++ b/include/hw/pci-host/gpex.h
@@ -40,6 +40,15 @@
/*< public >*/
};
+struct GPEXConfig {
+ MemMapEntry ecam;
+ MemMapEntry mmio32;
+ MemMapEntry mmio64;
+ MemMapEntry pio;
+ int irq;
+ PCIBus *bus;
+};
+
struct GPEXHost {
/*< private >*/
PCIExpressHost parent_obj;
@@ -55,19 +64,22 @@
int irq_num[GPEX_NUM_IRQS];
bool allow_unmapped_accesses;
-};
-struct GPEXConfig {
- MemMapEntry ecam;
- MemMapEntry mmio32;
- MemMapEntry mmio64;
- MemMapEntry pio;
- int irq;
- PCIBus *bus;
+ struct GPEXConfig gpex_cfg;
};
int gpex_set_irq_num(GPEXHost *s, int index, int gsi);
void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg);
+void acpi_dsdt_add_gpex_host(Aml *scope, uint32_t irq);
+
+#define PCI_HOST_PIO_BASE "x-pio-base"
+#define PCI_HOST_PIO_SIZE "x-pio-size"
+#define PCI_HOST_ECAM_BASE "x-ecam-base"
+#define PCI_HOST_ECAM_SIZE "x-ecam-size"
+#define PCI_HOST_BELOW_4G_MMIO_BASE "x-below-4g-mmio-base"
+#define PCI_HOST_BELOW_4G_MMIO_SIZE "x-below-4g-mmio-size"
+#define PCI_HOST_ABOVE_4G_MMIO_BASE "x-above-4g-mmio-base"
+#define PCI_HOST_ABOVE_4G_MMIO_SIZE "x-above-4g-mmio-size"
#endif /* HW_GPEX_H */
diff --git a/include/hw/riscv/virt.h b/include/hw/riscv/virt.h
index e5c474b..f89790f 100644
--- a/include/hw/riscv/virt.h
+++ b/include/hw/riscv/virt.h
@@ -23,6 +23,7 @@
#include "hw/riscv/riscv_hart.h"
#include "hw/sysbus.h"
#include "hw/block/flash.h"
+#include "hw/intc/riscv_imsic.h"
#define VIRT_CPUS_MAX_BITS 9
#define VIRT_CPUS_MAX (1 << VIRT_CPUS_MAX_BITS)
@@ -60,6 +61,7 @@
char *oem_table_id;
OnOffAuto acpi;
const MemMapEntry *memmap;
+ struct GPEXHost *gpex_host;
};
enum {
@@ -127,4 +129,28 @@
bool virt_is_acpi_enabled(RISCVVirtState *s);
void virt_acpi_setup(RISCVVirtState *vms);
+uint32_t imsic_num_bits(uint32_t count);
+
+/*
+ * The virt machine physical address space used by some of the devices
+ * namely ACLINT, PLIC, APLIC, and IMSIC depend on number of Sockets,
+ * number of CPUs, and number of IMSIC guest files.
+ *
+ * Various limits defined by VIRT_SOCKETS_MAX_BITS, VIRT_CPUS_MAX_BITS,
+ * and VIRT_IRQCHIP_MAX_GUESTS_BITS are tuned for maximum utilization
+ * of virt machine physical address space.
+ */
+
+#define VIRT_IMSIC_GROUP_MAX_SIZE (1U << IMSIC_MMIO_GROUP_MIN_SHIFT)
+#if VIRT_IMSIC_GROUP_MAX_SIZE < \
+ IMSIC_GROUP_SIZE(VIRT_CPUS_MAX_BITS, VIRT_IRQCHIP_MAX_GUESTS_BITS)
+#error "Can't accomodate single IMSIC group in address space"
+#endif
+
+#define VIRT_IMSIC_MAX_SIZE (VIRT_SOCKETS_MAX * \
+ VIRT_IMSIC_GROUP_MAX_SIZE)
+#if 0x4000000 < VIRT_IMSIC_MAX_SIZE
+#error "Can't accomodate all IMSIC groups in address space"
+#endif
+
#endif
diff --git a/include/hw/virtio/virtio-acpi.h b/include/hw/virtio/virtio-acpi.h
new file mode 100644
index 0000000..844e102
--- /dev/null
+++ b/include/hw/virtio/virtio-acpi.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * ACPI support for virtio
+ */
+
+#ifndef VIRTIO_ACPI_H
+#define VIRTIO_ACPI_H
+
+#include "qemu/osdep.h"
+#include "exec/hwaddr.h"
+
+void virtio_acpi_dsdt_add(Aml *scope, const hwaddr virtio_mmio_base,
+ const hwaddr virtio_mmio_size, uint32_t mmio_irq,
+ long int start_index, int num);
+
+#endif
diff --git a/include/qemu/fifo8.h b/include/qemu/fifo8.h
index 16be02f..c6295c6 100644
--- a/include/qemu/fifo8.h
+++ b/include/qemu/fifo8.h
@@ -71,7 +71,7 @@
* fifo8_pop_buf:
* @fifo: FIFO to pop from
* @max: maximum number of bytes to pop
- * @num: actual number of returned bytes
+ * @numptr: pointer filled with number of bytes returned (can be NULL)
*
* Pop a number of elements from the FIFO up to a maximum of max. The buffer
* containing the popped data is returned. This buffer points directly into
@@ -82,16 +82,43 @@
* around in the ring buffer; in this case only a contiguous part of the data
* is returned.
*
- * The number of valid bytes returned is populated in *num; will always return
- * at least 1 byte. max must not be 0 or greater than the number of bytes in
- * the FIFO.
+ * The number of valid bytes returned is populated in *numptr; will always
+ * return at least 1 byte. max must not be 0 or greater than the number of
+ * bytes in the FIFO.
*
* Clients are responsible for checking the availability of requested data
* using fifo8_num_used().
*
* Returns: A pointer to popped data.
*/
-const uint8_t *fifo8_pop_buf(Fifo8 *fifo, uint32_t max, uint32_t *num);
+const uint8_t *fifo8_pop_buf(Fifo8 *fifo, uint32_t max, uint32_t *numptr);
+
+/**
+ * fifo8_peek_buf: read upto max bytes from the fifo
+ * @fifo: FIFO to read from
+ * @max: maximum number of bytes to peek
+ * @numptr: pointer filled with number of bytes returned (can be NULL)
+ *
+ * Peek into a number of elements from the FIFO up to a maximum of max.
+ * The buffer containing the data peeked into is returned. This buffer points
+ * directly into the FIFO backing store. Since data is invalidated once any
+ * of the fifo8_* APIs are called on the FIFO, it is the caller responsibility
+ * to access it before doing further API calls.
+ *
+ * The function may return fewer bytes than requested when the data wraps
+ * around in the ring buffer; in this case only a contiguous part of the data
+ * is returned.
+ *
+ * The number of valid bytes returned is populated in *numptr; will always
+ * return at least 1 byte. max must not be 0 or greater than the number of
+ * bytes in the FIFO.
+ *
+ * Clients are responsible for checking the availability of requested data
+ * using fifo8_num_used().
+ *
+ * Returns: A pointer to peekable data.
+ */
+const uint8_t *fifo8_peek_buf(Fifo8 *fifo, uint32_t max, uint32_t *numptr);
/**
* fifo8_reset:
diff --git a/include/standard-headers/drm/drm_fourcc.h b/include/standard-headers/drm/drm_fourcc.h
index 72279f4..3afb701 100644
--- a/include/standard-headers/drm/drm_fourcc.h
+++ b/include/standard-headers/drm/drm_fourcc.h
@@ -322,6 +322,8 @@
* index 1 = Cr:Cb plane, [39:0] Cr1:Cb1:Cr0:Cb0 little endian
*/
#define DRM_FORMAT_NV15 fourcc_code('N', 'V', '1', '5') /* 2x2 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV20 fourcc_code('N', 'V', '2', '0') /* 2x1 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV30 fourcc_code('N', 'V', '3', '0') /* non-subsampled Cr:Cb plane */
/*
* 2 plane YCbCr MSB aligned
diff --git a/include/standard-headers/linux/pci_regs.h b/include/standard-headers/linux/pci_regs.h
index e5f558d..a391932 100644
--- a/include/standard-headers/linux/pci_regs.h
+++ b/include/standard-headers/linux/pci_regs.h
@@ -80,6 +80,7 @@
#define PCI_HEADER_TYPE_NORMAL 0
#define PCI_HEADER_TYPE_BRIDGE 1
#define PCI_HEADER_TYPE_CARDBUS 2
+#define PCI_HEADER_TYPE_MFD 0x80 /* Multi-Function Device (possible) */
#define PCI_BIST 0x0f /* 8 bits */
#define PCI_BIST_CODE_MASK 0x0f /* Return result */
@@ -637,6 +638,7 @@
#define PCI_EXP_RTCAP 0x1e /* Root Capabilities */
#define PCI_EXP_RTCAP_CRSVIS 0x0001 /* CRS Software Visibility capability */
#define PCI_EXP_RTSTA 0x20 /* Root Status */
+#define PCI_EXP_RTSTA_PME_RQ_ID 0x0000ffff /* PME Requester ID */
#define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */
#define PCI_EXP_RTSTA_PENDING 0x00020000 /* PME pending */
/*
@@ -930,12 +932,13 @@
/* Process Address Space ID */
#define PCI_PASID_CAP 0x04 /* PASID feature register */
-#define PCI_PASID_CAP_EXEC 0x02 /* Exec permissions Supported */
-#define PCI_PASID_CAP_PRIV 0x04 /* Privilege Mode Supported */
+#define PCI_PASID_CAP_EXEC 0x0002 /* Exec permissions Supported */
+#define PCI_PASID_CAP_PRIV 0x0004 /* Privilege Mode Supported */
+#define PCI_PASID_CAP_WIDTH 0x1f00
#define PCI_PASID_CTRL 0x06 /* PASID control register */
-#define PCI_PASID_CTRL_ENABLE 0x01 /* Enable bit */
-#define PCI_PASID_CTRL_EXEC 0x02 /* Exec permissions Enable */
-#define PCI_PASID_CTRL_PRIV 0x04 /* Privilege Mode Enable */
+#define PCI_PASID_CTRL_ENABLE 0x0001 /* Enable bit */
+#define PCI_PASID_CTRL_EXEC 0x0002 /* Exec permissions Enable */
+#define PCI_PASID_CTRL_PRIV 0x0004 /* Privilege Mode Enable */
#define PCI_EXT_CAP_PASID_SIZEOF 8
/* Single Root I/O Virtualization */
@@ -975,6 +978,8 @@
#define PCI_LTR_VALUE_MASK 0x000003ff
#define PCI_LTR_SCALE_MASK 0x00001c00
#define PCI_LTR_SCALE_SHIFT 10
+#define PCI_LTR_NOSNOOP_VALUE 0x03ff0000 /* Max No-Snoop Latency Value */
+#define PCI_LTR_NOSNOOP_SCALE 0x1c000000 /* Scale for Max Value */
#define PCI_EXT_CAP_LTR_SIZEOF 8
/* Access Control Service */
@@ -1042,9 +1047,16 @@
#define PCI_EXP_DPC_STATUS 0x08 /* DPC Status */
#define PCI_EXP_DPC_STATUS_TRIGGER 0x0001 /* Trigger Status */
#define PCI_EXP_DPC_STATUS_TRIGGER_RSN 0x0006 /* Trigger Reason */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR 0x0000 /* Uncorrectable error */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_NFE 0x0002 /* Rcvd ERR_NONFATAL */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE 0x0004 /* Rcvd ERR_FATAL */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_IN_EXT 0x0006 /* Reason in Trig Reason Extension field */
#define PCI_EXP_DPC_STATUS_INTERRUPT 0x0008 /* Interrupt Status */
#define PCI_EXP_DPC_RP_BUSY 0x0010 /* Root Port Busy */
#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT 0x0060 /* Trig Reason Extension */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO 0x0000 /* RP PIO error */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_SW_TRIGGER 0x0020 /* DPC SW Trigger bit */
+#define PCI_EXP_DPC_RP_PIO_FEP 0x1f00 /* RP PIO First Err Ptr */
#define PCI_EXP_DPC_SOURCE_ID 0x0A /* DPC Source Identifier */
@@ -1088,6 +1100,8 @@
#define PCI_L1SS_CTL1_LTR_L12_TH_VALUE 0x03ff0000 /* LTR_L1.2_THRESHOLD_Value */
#define PCI_L1SS_CTL1_LTR_L12_TH_SCALE 0xe0000000 /* LTR_L1.2_THRESHOLD_Scale */
#define PCI_L1SS_CTL2 0x0c /* Control 2 Register */
+#define PCI_L1SS_CTL2_T_PWR_ON_SCALE 0x00000003 /* T_POWER_ON Scale */
+#define PCI_L1SS_CTL2_T_PWR_ON_VALUE 0x000000f8 /* T_POWER_ON Value */
/* Designated Vendor-Specific (DVSEC, PCI_EXT_CAP_ID_DVSEC) */
#define PCI_DVSEC_HEADER1 0x4 /* Designated Vendor-Specific Header1 */
diff --git a/include/standard-headers/linux/vhost_types.h b/include/standard-headers/linux/vhost_types.h
index 5ad07e1..fd54044 100644
--- a/include/standard-headers/linux/vhost_types.h
+++ b/include/standard-headers/linux/vhost_types.h
@@ -185,5 +185,12 @@
* DRIVER_OK
*/
#define VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK 0x6
+/* Device may expose the virtqueue's descriptor area, driver area and
+ * device area to a different group for ASID binding than where its
+ * buffers may reside. Requires VHOST_BACKEND_F_IOTLB_ASID.
+ */
+#define VHOST_BACKEND_F_DESC_ASID 0x7
+/* IOTLB don't flush memory mapping across device reset */
+#define VHOST_BACKEND_F_IOTLB_PERSIST 0x8
#endif
diff --git a/include/standard-headers/linux/virtio_config.h b/include/standard-headers/linux/virtio_config.h
index 8a7d0dc..bfd1ca6 100644
--- a/include/standard-headers/linux/virtio_config.h
+++ b/include/standard-headers/linux/virtio_config.h
@@ -103,6 +103,11 @@
*/
#define VIRTIO_F_NOTIFICATION_DATA 38
+/* This feature indicates that the driver uses the data provided by the device
+ * as a virtqueue identifier in available buffer notifications.
+ */
+#define VIRTIO_F_NOTIF_CONFIG_DATA 39
+
/*
* This feature indicates that the driver can reset a queue individually.
*/
diff --git a/include/standard-headers/linux/virtio_pci.h b/include/standard-headers/linux/virtio_pci.h
index be912cf..b7fdfd0 100644
--- a/include/standard-headers/linux/virtio_pci.h
+++ b/include/standard-headers/linux/virtio_pci.h
@@ -166,6 +166,17 @@
uint32_t queue_used_hi; /* read-write */
};
+/*
+ * Warning: do not use sizeof on this: use offsetofend for
+ * specific fields you need.
+ */
+struct virtio_pci_modern_common_cfg {
+ struct virtio_pci_common_cfg cfg;
+
+ uint16_t queue_notify_data; /* read-write */
+ uint16_t queue_reset; /* read-write */
+};
+
/* Fields in VIRTIO_PCI_CAP_PCI_CFG: */
struct virtio_pci_cfg_cap {
struct virtio_pci_cap cap;
diff --git a/linux-headers/asm-arm64/kvm.h b/linux-headers/asm-arm64/kvm.h
index 38e5957..c59ea55 100644
--- a/linux-headers/asm-arm64/kvm.h
+++ b/linux-headers/asm-arm64/kvm.h
@@ -491,6 +491,38 @@
#define KVM_HYPERCALL_EXIT_SMC (1U << 0)
#define KVM_HYPERCALL_EXIT_16BIT (1U << 1)
+/*
+ * Get feature ID registers userspace writable mask.
+ *
+ * From DDI0487J.a, D19.2.66 ("ID_AA64MMFR2_EL1, AArch64 Memory Model
+ * Feature Register 2"):
+ *
+ * "The Feature ID space is defined as the System register space in
+ * AArch64 with op0==3, op1=={0, 1, 3}, CRn==0, CRm=={0-7},
+ * op2=={0-7}."
+ *
+ * This covers all currently known R/O registers that indicate
+ * anything useful feature wise, including the ID registers.
+ *
+ * If we ever need to introduce a new range, it will be described as
+ * such in the range field.
+ */
+#define KVM_ARM_FEATURE_ID_RANGE_IDX(op0, op1, crn, crm, op2) \
+ ({ \
+ __u64 __op1 = (op1) & 3; \
+ __op1 -= (__op1 == 3); \
+ (__op1 << 6 | ((crm) & 7) << 3 | (op2)); \
+ })
+
+#define KVM_ARM_FEATURE_ID_RANGE 0
+#define KVM_ARM_FEATURE_ID_RANGE_SIZE (3 * 8 * 8)
+
+struct reg_mask_range {
+ __u64 addr; /* Pointer to mask array */
+ __u32 range; /* Requested range */
+ __u32 reserved[13];
+};
+
#endif
#endif /* __ARM_KVM_H__ */
diff --git a/linux-headers/asm-generic/unistd.h b/linux-headers/asm-generic/unistd.h
index abe087c..756b013 100644
--- a/linux-headers/asm-generic/unistd.h
+++ b/linux-headers/asm-generic/unistd.h
@@ -71,7 +71,7 @@
#define __NR_getcwd 17
__SYSCALL(__NR_getcwd, sys_getcwd)
#define __NR_lookup_dcookie 18
-__SC_COMP(__NR_lookup_dcookie, sys_lookup_dcookie, compat_sys_lookup_dcookie)
+__SYSCALL(__NR_lookup_dcookie, sys_ni_syscall)
#define __NR_eventfd2 19
__SYSCALL(__NR_eventfd2, sys_eventfd2)
#define __NR_epoll_create1 20
@@ -816,15 +816,21 @@
__SYSCALL(__NR_futex_waitv, sys_futex_waitv)
#define __NR_set_mempolicy_home_node 450
__SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node)
-
#define __NR_cachestat 451
__SYSCALL(__NR_cachestat, sys_cachestat)
-
#define __NR_fchmodat2 452
__SYSCALL(__NR_fchmodat2, sys_fchmodat2)
+#define __NR_map_shadow_stack 453
+__SYSCALL(__NR_map_shadow_stack, sys_map_shadow_stack)
+#define __NR_futex_wake 454
+__SYSCALL(__NR_futex_wake, sys_futex_wake)
+#define __NR_futex_wait 455
+__SYSCALL(__NR_futex_wait, sys_futex_wait)
+#define __NR_futex_requeue 456
+__SYSCALL(__NR_futex_requeue, sys_futex_requeue)
#undef __NR_syscalls
-#define __NR_syscalls 453
+#define __NR_syscalls 457
/*
* 32 bit systems traditionally used different
diff --git a/linux-headers/asm-loongarch/bitsperlong.h b/linux-headers/asm-loongarch/bitsperlong.h
new file mode 100644
index 0000000..6dc0bb0
--- /dev/null
+++ b/linux-headers/asm-loongarch/bitsperlong.h
@@ -0,0 +1 @@
+#include <asm-generic/bitsperlong.h>
diff --git a/linux-headers/asm-loongarch/kvm.h b/linux-headers/asm-loongarch/kvm.h
new file mode 100644
index 0000000..c6ad2ee
--- /dev/null
+++ b/linux-headers/asm-loongarch/kvm.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __UAPI_ASM_LOONGARCH_KVM_H
+#define __UAPI_ASM_LOONGARCH_KVM_H
+
+#include <linux/types.h>
+
+/*
+ * KVM LoongArch specific structures and definitions.
+ *
+ * Some parts derived from the x86 version of this file.
+ */
+
+#define __KVM_HAVE_READONLY_MEM
+
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+#define KVM_DIRTY_LOG_PAGE_OFFSET 64
+
+/*
+ * for KVM_GET_REGS and KVM_SET_REGS
+ */
+struct kvm_regs {
+ /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
+ __u64 gpr[32];
+ __u64 pc;
+};
+
+/*
+ * for KVM_GET_FPU and KVM_SET_FPU
+ */
+struct kvm_fpu {
+ __u32 fcsr;
+ __u64 fcc; /* 8x8 */
+ struct kvm_fpureg {
+ __u64 val64[4];
+ } fpr[32];
+};
+
+/*
+ * For LoongArch, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various
+ * registers. The id field is broken down as follows:
+ *
+ * bits[63..52] - As per linux/kvm.h
+ * bits[51..32] - Must be zero.
+ * bits[31..16] - Register set.
+ *
+ * Register set = 0: GP registers from kvm_regs (see definitions below).
+ *
+ * Register set = 1: CSR registers.
+ *
+ * Register set = 2: KVM specific registers (see definitions below).
+ *
+ * Register set = 3: FPU / SIMD registers (see definitions below).
+ *
+ * Other sets registers may be added in the future. Each set would
+ * have its own identifier in bits[31..16].
+ */
+
+#define KVM_REG_LOONGARCH_GPR (KVM_REG_LOONGARCH | 0x00000ULL)
+#define KVM_REG_LOONGARCH_CSR (KVM_REG_LOONGARCH | 0x10000ULL)
+#define KVM_REG_LOONGARCH_KVM (KVM_REG_LOONGARCH | 0x20000ULL)
+#define KVM_REG_LOONGARCH_FPSIMD (KVM_REG_LOONGARCH | 0x30000ULL)
+#define KVM_REG_LOONGARCH_CPUCFG (KVM_REG_LOONGARCH | 0x40000ULL)
+#define KVM_REG_LOONGARCH_MASK (KVM_REG_LOONGARCH | 0x70000ULL)
+#define KVM_CSR_IDX_MASK 0x7fff
+#define KVM_CPUCFG_IDX_MASK 0x7fff
+
+/*
+ * KVM_REG_LOONGARCH_KVM - KVM specific control registers.
+ */
+
+#define KVM_REG_LOONGARCH_COUNTER (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 1)
+#define KVM_REG_LOONGARCH_VCPU_RESET (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 2)
+
+#define LOONGARCH_REG_SHIFT 3
+#define LOONGARCH_REG_64(TYPE, REG) (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT))
+#define KVM_IOC_CSRID(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG)
+#define KVM_IOC_CPUCFG(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG)
+
+struct kvm_debug_exit_arch {
+};
+
+/* for KVM_SET_GUEST_DEBUG */
+struct kvm_guest_debug_arch {
+};
+
+/* definition of registers in kvm_run */
+struct kvm_sync_regs {
+};
+
+/* dummy definition */
+struct kvm_sregs {
+};
+
+struct kvm_iocsr_entry {
+ __u32 addr;
+ __u32 pad;
+ __u64 data;
+};
+
+#define KVM_NR_IRQCHIPS 1
+#define KVM_IRQCHIP_NUM_PINS 64
+#define KVM_MAX_CORES 256
+
+#endif /* __UAPI_ASM_LOONGARCH_KVM_H */
diff --git a/linux-headers/asm-loongarch/mman.h b/linux-headers/asm-loongarch/mman.h
new file mode 100644
index 0000000..8eebf89
--- /dev/null
+++ b/linux-headers/asm-loongarch/mman.h
@@ -0,0 +1 @@
+#include <asm-generic/mman.h>
diff --git a/linux-headers/asm-loongarch/unistd.h b/linux-headers/asm-loongarch/unistd.h
new file mode 100644
index 0000000..fcb6689
--- /dev/null
+++ b/linux-headers/asm-loongarch/unistd.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_CLONE3
+
+#include <asm-generic/unistd.h>
diff --git a/linux-headers/asm-mips/unistd_n32.h b/linux-headers/asm-mips/unistd_n32.h
index 46d8500..994b6f0 100644
--- a/linux-headers/asm-mips/unistd_n32.h
+++ b/linux-headers/asm-mips/unistd_n32.h
@@ -381,5 +381,9 @@
#define __NR_set_mempolicy_home_node (__NR_Linux + 450)
#define __NR_cachestat (__NR_Linux + 451)
#define __NR_fchmodat2 (__NR_Linux + 452)
+#define __NR_map_shadow_stack (__NR_Linux + 453)
+#define __NR_futex_wake (__NR_Linux + 454)
+#define __NR_futex_wait (__NR_Linux + 455)
+#define __NR_futex_requeue (__NR_Linux + 456)
#endif /* _ASM_UNISTD_N32_H */
diff --git a/linux-headers/asm-mips/unistd_n64.h b/linux-headers/asm-mips/unistd_n64.h
index c2f7ac6..41dcf58 100644
--- a/linux-headers/asm-mips/unistd_n64.h
+++ b/linux-headers/asm-mips/unistd_n64.h
@@ -357,5 +357,9 @@
#define __NR_set_mempolicy_home_node (__NR_Linux + 450)
#define __NR_cachestat (__NR_Linux + 451)
#define __NR_fchmodat2 (__NR_Linux + 452)
+#define __NR_map_shadow_stack (__NR_Linux + 453)
+#define __NR_futex_wake (__NR_Linux + 454)
+#define __NR_futex_wait (__NR_Linux + 455)
+#define __NR_futex_requeue (__NR_Linux + 456)
#endif /* _ASM_UNISTD_N64_H */
diff --git a/linux-headers/asm-mips/unistd_o32.h b/linux-headers/asm-mips/unistd_o32.h
index 757c68f..ae9d334 100644
--- a/linux-headers/asm-mips/unistd_o32.h
+++ b/linux-headers/asm-mips/unistd_o32.h
@@ -427,5 +427,9 @@
#define __NR_set_mempolicy_home_node (__NR_Linux + 450)
#define __NR_cachestat (__NR_Linux + 451)
#define __NR_fchmodat2 (__NR_Linux + 452)
+#define __NR_map_shadow_stack (__NR_Linux + 453)
+#define __NR_futex_wake (__NR_Linux + 454)
+#define __NR_futex_wait (__NR_Linux + 455)
+#define __NR_futex_requeue (__NR_Linux + 456)
#endif /* _ASM_UNISTD_O32_H */
diff --git a/linux-headers/asm-powerpc/unistd_32.h b/linux-headers/asm-powerpc/unistd_32.h
index 8ef94bb..b9b23d6 100644
--- a/linux-headers/asm-powerpc/unistd_32.h
+++ b/linux-headers/asm-powerpc/unistd_32.h
@@ -434,6 +434,10 @@
#define __NR_set_mempolicy_home_node 450
#define __NR_cachestat 451
#define __NR_fchmodat2 452
+#define __NR_map_shadow_stack 453
+#define __NR_futex_wake 454
+#define __NR_futex_wait 455
+#define __NR_futex_requeue 456
#endif /* _ASM_UNISTD_32_H */
diff --git a/linux-headers/asm-powerpc/unistd_64.h b/linux-headers/asm-powerpc/unistd_64.h
index 0e7ee43..cbb4b3e 100644
--- a/linux-headers/asm-powerpc/unistd_64.h
+++ b/linux-headers/asm-powerpc/unistd_64.h
@@ -406,6 +406,10 @@
#define __NR_set_mempolicy_home_node 450
#define __NR_cachestat 451
#define __NR_fchmodat2 452
+#define __NR_map_shadow_stack 453
+#define __NR_futex_wake 454
+#define __NR_futex_wait 455
+#define __NR_futex_requeue 456
#endif /* _ASM_UNISTD_64_H */
diff --git a/linux-headers/asm-riscv/kvm.h b/linux-headers/asm-riscv/kvm.h
index 992c5e4..60d3b21 100644
--- a/linux-headers/asm-riscv/kvm.h
+++ b/linux-headers/asm-riscv/kvm.h
@@ -80,6 +80,7 @@
unsigned long sip;
unsigned long satp;
unsigned long scounteren;
+ unsigned long senvcfg;
};
/* AIA CSR registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
@@ -93,6 +94,11 @@
unsigned long iprio2h;
};
+/* Smstateen CSR for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
+struct kvm_riscv_smstateen_csr {
+ unsigned long sstateen0;
+};
+
/* TIMER registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
struct kvm_riscv_timer {
__u64 frequency;
@@ -131,6 +137,8 @@
KVM_RISCV_ISA_EXT_ZICSR,
KVM_RISCV_ISA_EXT_ZIFENCEI,
KVM_RISCV_ISA_EXT_ZIHPM,
+ KVM_RISCV_ISA_EXT_SMSTATEEN,
+ KVM_RISCV_ISA_EXT_ZICOND,
KVM_RISCV_ISA_EXT_MAX,
};
@@ -148,6 +156,7 @@
KVM_RISCV_SBI_EXT_PMU,
KVM_RISCV_SBI_EXT_EXPERIMENTAL,
KVM_RISCV_SBI_EXT_VENDOR,
+ KVM_RISCV_SBI_EXT_DBCN,
KVM_RISCV_SBI_EXT_MAX,
};
@@ -178,10 +187,13 @@
#define KVM_REG_RISCV_CSR (0x03 << KVM_REG_RISCV_TYPE_SHIFT)
#define KVM_REG_RISCV_CSR_GENERAL (0x0 << KVM_REG_RISCV_SUBTYPE_SHIFT)
#define KVM_REG_RISCV_CSR_AIA (0x1 << KVM_REG_RISCV_SUBTYPE_SHIFT)
+#define KVM_REG_RISCV_CSR_SMSTATEEN (0x2 << KVM_REG_RISCV_SUBTYPE_SHIFT)
#define KVM_REG_RISCV_CSR_REG(name) \
(offsetof(struct kvm_riscv_csr, name) / sizeof(unsigned long))
#define KVM_REG_RISCV_CSR_AIA_REG(name) \
(offsetof(struct kvm_riscv_aia_csr, name) / sizeof(unsigned long))
+#define KVM_REG_RISCV_CSR_SMSTATEEN_REG(name) \
+ (offsetof(struct kvm_riscv_smstateen_csr, name) / sizeof(unsigned long))
/* Timer registers are mapped as type 4 */
#define KVM_REG_RISCV_TIMER (0x04 << KVM_REG_RISCV_TYPE_SHIFT)
diff --git a/linux-headers/asm-riscv/ptrace.h b/linux-headers/asm-riscv/ptrace.h
new file mode 100644
index 0000000..1e3166c
--- /dev/null
+++ b/linux-headers/asm-riscv/ptrace.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_PTRACE_H
+#define _ASM_RISCV_PTRACE_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+#define PTRACE_GETFDPIC 33
+
+#define PTRACE_GETFDPIC_EXEC 0
+#define PTRACE_GETFDPIC_INTERP 1
+
+/*
+ * User-mode register state for core dumps, ptrace, sigcontext
+ *
+ * This decouples struct pt_regs from the userspace ABI.
+ * struct user_regs_struct must form a prefix of struct pt_regs.
+ */
+struct user_regs_struct {
+ unsigned long pc;
+ unsigned long ra;
+ unsigned long sp;
+ unsigned long gp;
+ unsigned long tp;
+ unsigned long t0;
+ unsigned long t1;
+ unsigned long t2;
+ unsigned long s0;
+ unsigned long s1;
+ unsigned long a0;
+ unsigned long a1;
+ unsigned long a2;
+ unsigned long a3;
+ unsigned long a4;
+ unsigned long a5;
+ unsigned long a6;
+ unsigned long a7;
+ unsigned long s2;
+ unsigned long s3;
+ unsigned long s4;
+ unsigned long s5;
+ unsigned long s6;
+ unsigned long s7;
+ unsigned long s8;
+ unsigned long s9;
+ unsigned long s10;
+ unsigned long s11;
+ unsigned long t3;
+ unsigned long t4;
+ unsigned long t5;
+ unsigned long t6;
+};
+
+struct __riscv_f_ext_state {
+ __u32 f[32];
+ __u32 fcsr;
+};
+
+struct __riscv_d_ext_state {
+ __u64 f[32];
+ __u32 fcsr;
+};
+
+struct __riscv_q_ext_state {
+ __u64 f[64] __attribute__((aligned(16)));
+ __u32 fcsr;
+ /*
+ * Reserved for expansion of sigcontext structure. Currently zeroed
+ * upon signal, and must be zero upon sigreturn.
+ */
+ __u32 reserved[3];
+};
+
+struct __riscv_ctx_hdr {
+ __u32 magic;
+ __u32 size;
+};
+
+struct __riscv_extra_ext_header {
+ __u32 __padding[129] __attribute__((aligned(16)));
+ /*
+ * Reserved for expansion of sigcontext structure. Currently zeroed
+ * upon signal, and must be zero upon sigreturn.
+ */
+ __u32 reserved;
+ struct __riscv_ctx_hdr hdr;
+};
+
+union __riscv_fp_state {
+ struct __riscv_f_ext_state f;
+ struct __riscv_d_ext_state d;
+ struct __riscv_q_ext_state q;
+};
+
+struct __riscv_v_ext_state {
+ unsigned long vstart;
+ unsigned long vl;
+ unsigned long vtype;
+ unsigned long vcsr;
+ unsigned long vlenb;
+ void *datap;
+ /*
+ * In signal handler, datap will be set a correct user stack offset
+ * and vector registers will be copied to the address of datap
+ * pointer.
+ */
+};
+
+struct __riscv_v_regset_state {
+ unsigned long vstart;
+ unsigned long vl;
+ unsigned long vtype;
+ unsigned long vcsr;
+ unsigned long vlenb;
+ char vreg[];
+};
+
+/*
+ * According to spec: The number of bits in a single vector register,
+ * VLEN >= ELEN, which must be a power of 2, and must be no greater than
+ * 2^16 = 65536bits = 8192bytes
+ */
+#define RISCV_MAX_VLENB (8192)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_PTRACE_H */
diff --git a/linux-headers/asm-s390/unistd_32.h b/linux-headers/asm-s390/unistd_32.h
index 716fa36..c093e6d 100644
--- a/linux-headers/asm-s390/unistd_32.h
+++ b/linux-headers/asm-s390/unistd_32.h
@@ -425,5 +425,9 @@
#define __NR_set_mempolicy_home_node 450
#define __NR_cachestat 451
#define __NR_fchmodat2 452
+#define __NR_map_shadow_stack 453
+#define __NR_futex_wake 454
+#define __NR_futex_wait 455
+#define __NR_futex_requeue 456
#endif /* _ASM_S390_UNISTD_32_H */
diff --git a/linux-headers/asm-s390/unistd_64.h b/linux-headers/asm-s390/unistd_64.h
index b2a11b1..114c056 100644
--- a/linux-headers/asm-s390/unistd_64.h
+++ b/linux-headers/asm-s390/unistd_64.h
@@ -373,5 +373,9 @@
#define __NR_set_mempolicy_home_node 450
#define __NR_cachestat 451
#define __NR_fchmodat2 452
+#define __NR_map_shadow_stack 453
+#define __NR_futex_wake 454
+#define __NR_futex_wait 455
+#define __NR_futex_requeue 456
#endif /* _ASM_S390_UNISTD_64_H */
diff --git a/linux-headers/asm-x86/unistd_32.h b/linux-headers/asm-x86/unistd_32.h
index d749ad1..329649c 100644
--- a/linux-headers/asm-x86/unistd_32.h
+++ b/linux-headers/asm-x86/unistd_32.h
@@ -443,6 +443,10 @@
#define __NR_set_mempolicy_home_node 450
#define __NR_cachestat 451
#define __NR_fchmodat2 452
+#define __NR_map_shadow_stack 453
+#define __NR_futex_wake 454
+#define __NR_futex_wait 455
+#define __NR_futex_requeue 456
#endif /* _ASM_UNISTD_32_H */
diff --git a/linux-headers/asm-x86/unistd_64.h b/linux-headers/asm-x86/unistd_64.h
index cea6728..4583606 100644
--- a/linux-headers/asm-x86/unistd_64.h
+++ b/linux-headers/asm-x86/unistd_64.h
@@ -366,6 +366,9 @@
#define __NR_cachestat 451
#define __NR_fchmodat2 452
#define __NR_map_shadow_stack 453
+#define __NR_futex_wake 454
+#define __NR_futex_wait 455
+#define __NR_futex_requeue 456
#endif /* _ASM_UNISTD_64_H */
diff --git a/linux-headers/asm-x86/unistd_x32.h b/linux-headers/asm-x86/unistd_x32.h
index 5b2e79b..146d74d 100644
--- a/linux-headers/asm-x86/unistd_x32.h
+++ b/linux-headers/asm-x86/unistd_x32.h
@@ -318,6 +318,9 @@
#define __NR_set_mempolicy_home_node (__X32_SYSCALL_BIT + 450)
#define __NR_cachestat (__X32_SYSCALL_BIT + 451)
#define __NR_fchmodat2 (__X32_SYSCALL_BIT + 452)
+#define __NR_futex_wake (__X32_SYSCALL_BIT + 454)
+#define __NR_futex_wait (__X32_SYSCALL_BIT + 455)
+#define __NR_futex_requeue (__X32_SYSCALL_BIT + 456)
#define __NR_rt_sigaction (__X32_SYSCALL_BIT + 512)
#define __NR_rt_sigreturn (__X32_SYSCALL_BIT + 513)
#define __NR_ioctl (__X32_SYSCALL_BIT + 514)
diff --git a/linux-headers/linux/iommufd.h b/linux-headers/linux/iommufd.h
index 218bf7a..806d98d 100644
--- a/linux-headers/linux/iommufd.h
+++ b/linux-headers/linux/iommufd.h
@@ -47,6 +47,8 @@
IOMMUFD_CMD_VFIO_IOAS,
IOMMUFD_CMD_HWPT_ALLOC,
IOMMUFD_CMD_GET_HW_INFO,
+ IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING,
+ IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP,
};
/**
@@ -348,19 +350,85 @@
#define IOMMU_VFIO_IOAS _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VFIO_IOAS)
/**
+ * enum iommufd_hwpt_alloc_flags - Flags for HWPT allocation
+ * @IOMMU_HWPT_ALLOC_NEST_PARENT: If set, allocate a HWPT that can serve as
+ * the parent HWPT in a nesting configuration.
+ * @IOMMU_HWPT_ALLOC_DIRTY_TRACKING: Dirty tracking support for device IOMMU is
+ * enforced on device attachment
+ */
+enum iommufd_hwpt_alloc_flags {
+ IOMMU_HWPT_ALLOC_NEST_PARENT = 1 << 0,
+ IOMMU_HWPT_ALLOC_DIRTY_TRACKING = 1 << 1,
+};
+
+/**
+ * enum iommu_hwpt_vtd_s1_flags - Intel VT-d stage-1 page table
+ * entry attributes
+ * @IOMMU_VTD_S1_SRE: Supervisor request
+ * @IOMMU_VTD_S1_EAFE: Extended access enable
+ * @IOMMU_VTD_S1_WPE: Write protect enable
+ */
+enum iommu_hwpt_vtd_s1_flags {
+ IOMMU_VTD_S1_SRE = 1 << 0,
+ IOMMU_VTD_S1_EAFE = 1 << 1,
+ IOMMU_VTD_S1_WPE = 1 << 2,
+};
+
+/**
+ * struct iommu_hwpt_vtd_s1 - Intel VT-d stage-1 page table
+ * info (IOMMU_HWPT_DATA_VTD_S1)
+ * @flags: Combination of enum iommu_hwpt_vtd_s1_flags
+ * @pgtbl_addr: The base address of the stage-1 page table.
+ * @addr_width: The address width of the stage-1 page table
+ * @__reserved: Must be 0
+ */
+struct iommu_hwpt_vtd_s1 {
+ __aligned_u64 flags;
+ __aligned_u64 pgtbl_addr;
+ __u32 addr_width;
+ __u32 __reserved;
+};
+
+/**
+ * enum iommu_hwpt_data_type - IOMMU HWPT Data Type
+ * @IOMMU_HWPT_DATA_NONE: no data
+ * @IOMMU_HWPT_DATA_VTD_S1: Intel VT-d stage-1 page table
+ */
+enum iommu_hwpt_data_type {
+ IOMMU_HWPT_DATA_NONE,
+ IOMMU_HWPT_DATA_VTD_S1,
+};
+
+/**
* struct iommu_hwpt_alloc - ioctl(IOMMU_HWPT_ALLOC)
* @size: sizeof(struct iommu_hwpt_alloc)
- * @flags: Must be 0
+ * @flags: Combination of enum iommufd_hwpt_alloc_flags
* @dev_id: The device to allocate this HWPT for
- * @pt_id: The IOAS to connect this HWPT to
+ * @pt_id: The IOAS or HWPT to connect this HWPT to
* @out_hwpt_id: The ID of the new HWPT
* @__reserved: Must be 0
+ * @data_type: One of enum iommu_hwpt_data_type
+ * @data_len: Length of the type specific data
+ * @data_uptr: User pointer to the type specific data
*
* Explicitly allocate a hardware page table object. This is the same object
* type that is returned by iommufd_device_attach() and represents the
* underlying iommu driver's iommu_domain kernel object.
*
- * A HWPT will be created with the IOVA mappings from the given IOAS.
+ * A kernel-managed HWPT will be created with the mappings from the given
+ * IOAS via the @pt_id. The @data_type for this allocation must be set to
+ * IOMMU_HWPT_DATA_NONE. The HWPT can be allocated as a parent HWPT for a
+ * nesting configuration by passing IOMMU_HWPT_ALLOC_NEST_PARENT via @flags.
+ *
+ * A user-managed nested HWPT will be created from a given parent HWPT via
+ * @pt_id, in which the parent HWPT must be allocated previously via the
+ * same ioctl from a given IOAS (@pt_id). In this case, the @data_type
+ * must be set to a pre-defined type corresponding to an I/O page table
+ * type supported by the underlying IOMMU hardware.
+ *
+ * If the @data_type is set to IOMMU_HWPT_DATA_NONE, @data_len and
+ * @data_uptr should be zero. Otherwise, both @data_len and @data_uptr
+ * must be given.
*/
struct iommu_hwpt_alloc {
__u32 size;
@@ -369,13 +437,26 @@
__u32 pt_id;
__u32 out_hwpt_id;
__u32 __reserved;
+ __u32 data_type;
+ __u32 data_len;
+ __aligned_u64 data_uptr;
};
#define IOMMU_HWPT_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_ALLOC)
/**
+ * enum iommu_hw_info_vtd_flags - Flags for VT-d hw_info
+ * @IOMMU_HW_INFO_VTD_ERRATA_772415_SPR17: If set, disallow read-only mappings
+ * on a nested_parent domain.
+ * https://www.intel.com/content/www/us/en/content-details/772415/content-details.html
+ */
+enum iommu_hw_info_vtd_flags {
+ IOMMU_HW_INFO_VTD_ERRATA_772415_SPR17 = 1 << 0,
+};
+
+/**
* struct iommu_hw_info_vtd - Intel VT-d hardware information
*
- * @flags: Must be 0
+ * @flags: Combination of enum iommu_hw_info_vtd_flags
* @__reserved: Must be 0
*
* @cap_reg: Value of Intel VT-d capability register defined in VT-d spec
@@ -405,6 +486,20 @@
};
/**
+ * enum iommufd_hw_capabilities
+ * @IOMMU_HW_CAP_DIRTY_TRACKING: IOMMU hardware support for dirty tracking
+ * If available, it means the following APIs
+ * are supported:
+ *
+ * IOMMU_HWPT_GET_DIRTY_BITMAP
+ * IOMMU_HWPT_SET_DIRTY_TRACKING
+ *
+ */
+enum iommufd_hw_capabilities {
+ IOMMU_HW_CAP_DIRTY_TRACKING = 1 << 0,
+};
+
+/**
* struct iommu_hw_info - ioctl(IOMMU_GET_HW_INFO)
* @size: sizeof(struct iommu_hw_info)
* @flags: Must be 0
@@ -415,6 +510,8 @@
* the iommu type specific hardware information data
* @out_data_type: Output the iommu hardware info type as defined in the enum
* iommu_hw_info_type.
+ * @out_capabilities: Output the generic iommu capability info type as defined
+ * in the enum iommu_hw_capabilities.
* @__reserved: Must be 0
*
* Query an iommu type specific hardware information data from an iommu behind
@@ -439,6 +536,81 @@
__aligned_u64 data_uptr;
__u32 out_data_type;
__u32 __reserved;
+ __aligned_u64 out_capabilities;
};
#define IOMMU_GET_HW_INFO _IO(IOMMUFD_TYPE, IOMMUFD_CMD_GET_HW_INFO)
+
+/*
+ * enum iommufd_hwpt_set_dirty_tracking_flags - Flags for steering dirty
+ * tracking
+ * @IOMMU_HWPT_DIRTY_TRACKING_ENABLE: Enable dirty tracking
+ */
+enum iommufd_hwpt_set_dirty_tracking_flags {
+ IOMMU_HWPT_DIRTY_TRACKING_ENABLE = 1,
+};
+
+/**
+ * struct iommu_hwpt_set_dirty_tracking - ioctl(IOMMU_HWPT_SET_DIRTY_TRACKING)
+ * @size: sizeof(struct iommu_hwpt_set_dirty_tracking)
+ * @flags: Combination of enum iommufd_hwpt_set_dirty_tracking_flags
+ * @hwpt_id: HW pagetable ID that represents the IOMMU domain
+ * @__reserved: Must be 0
+ *
+ * Toggle dirty tracking on an HW pagetable.
+ */
+struct iommu_hwpt_set_dirty_tracking {
+ __u32 size;
+ __u32 flags;
+ __u32 hwpt_id;
+ __u32 __reserved;
+};
+#define IOMMU_HWPT_SET_DIRTY_TRACKING _IO(IOMMUFD_TYPE, \
+ IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING)
+
+/**
+ * enum iommufd_hwpt_get_dirty_bitmap_flags - Flags for getting dirty bits
+ * @IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR: Just read the PTEs without clearing
+ * any dirty bits metadata. This flag
+ * can be passed in the expectation
+ * where the next operation is an unmap
+ * of the same IOVA range.
+ *
+ */
+enum iommufd_hwpt_get_dirty_bitmap_flags {
+ IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR = 1,
+};
+
+/**
+ * struct iommu_hwpt_get_dirty_bitmap - ioctl(IOMMU_HWPT_GET_DIRTY_BITMAP)
+ * @size: sizeof(struct iommu_hwpt_get_dirty_bitmap)
+ * @hwpt_id: HW pagetable ID that represents the IOMMU domain
+ * @flags: Combination of enum iommufd_hwpt_get_dirty_bitmap_flags
+ * @__reserved: Must be 0
+ * @iova: base IOVA of the bitmap first bit
+ * @length: IOVA range size
+ * @page_size: page size granularity of each bit in the bitmap
+ * @data: bitmap where to set the dirty bits. The bitmap bits each
+ * represent a page_size which you deviate from an arbitrary iova.
+ *
+ * Checking a given IOVA is dirty:
+ *
+ * data[(iova / page_size) / 64] & (1ULL << ((iova / page_size) % 64))
+ *
+ * Walk the IOMMU pagetables for a given IOVA range to return a bitmap
+ * with the dirty IOVAs. In doing so it will also by default clear any
+ * dirty bit metadata set in the IOPTE.
+ */
+struct iommu_hwpt_get_dirty_bitmap {
+ __u32 size;
+ __u32 hwpt_id;
+ __u32 flags;
+ __u32 __reserved;
+ __aligned_u64 iova;
+ __aligned_u64 length;
+ __aligned_u64 page_size;
+ __aligned_u64 data;
+};
+#define IOMMU_HWPT_GET_DIRTY_BITMAP _IO(IOMMUFD_TYPE, \
+ IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP)
+
#endif
diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h
index 0d74ee9..549fea3 100644
--- a/linux-headers/linux/kvm.h
+++ b/linux-headers/linux/kvm.h
@@ -264,6 +264,7 @@
#define KVM_EXIT_RISCV_SBI 35
#define KVM_EXIT_RISCV_CSR 36
#define KVM_EXIT_NOTIFY 37
+#define KVM_EXIT_LOONGARCH_IOCSR 38
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -336,6 +337,13 @@
__u32 len;
__u8 is_write;
} mmio;
+ /* KVM_EXIT_LOONGARCH_IOCSR */
+ struct {
+ __u64 phys_addr;
+ __u8 data[8];
+ __u32 len;
+ __u8 is_write;
+ } iocsr_io;
/* KVM_EXIT_HYPERCALL */
struct {
__u64 nr;
@@ -1188,6 +1196,7 @@
#define KVM_CAP_COUNTER_OFFSET 227
#define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228
#define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229
+#define KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES 230
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1358,6 +1367,7 @@
#define KVM_REG_ARM64 0x6000000000000000ULL
#define KVM_REG_MIPS 0x7000000000000000ULL
#define KVM_REG_RISCV 0x8000000000000000ULL
+#define KVM_REG_LOONGARCH 0x9000000000000000ULL
#define KVM_REG_SIZE_SHIFT 52
#define KVM_REG_SIZE_MASK 0x00f0000000000000ULL
@@ -1558,6 +1568,7 @@
#define KVM_ARM_MTE_COPY_TAGS _IOR(KVMIO, 0xb4, struct kvm_arm_copy_mte_tags)
/* Available with KVM_CAP_COUNTER_OFFSET */
#define KVM_ARM_SET_COUNTER_OFFSET _IOW(KVMIO, 0xb5, struct kvm_arm_counter_offset)
+#define KVM_ARM_GET_REG_WRITABLE_MASKS _IOR(KVMIO, 0xb6, struct reg_mask_range)
/* ioctl for vm fd */
#define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device)
diff --git a/linux-headers/linux/psp-sev.h b/linux-headers/linux/psp-sev.h
index 12ccb70..bcb2133 100644
--- a/linux-headers/linux/psp-sev.h
+++ b/linux-headers/linux/psp-sev.h
@@ -68,6 +68,7 @@
SEV_RET_INVALID_PARAM,
SEV_RET_RESOURCE_LIMIT,
SEV_RET_SECURE_DATA_INVALID,
+ SEV_RET_INVALID_KEY = 0x27,
SEV_RET_MAX,
} sev_ret_code;
diff --git a/linux-headers/linux/stddef.h b/linux-headers/linux/stddef.h
index 9bb0708..bf9749d 100644
--- a/linux-headers/linux/stddef.h
+++ b/linux-headers/linux/stddef.h
@@ -27,8 +27,13 @@
union { \
struct { MEMBERS } ATTRS; \
struct TAG { MEMBERS } ATTRS NAME; \
- }
+ } ATTRS
+#ifdef __cplusplus
+/* sizeof(struct{}) is 1 in C++, not 0, can't use C version of the macro. */
+#define __DECLARE_FLEX_ARRAY(T, member) \
+ T member[0]
+#else
/**
* __DECLARE_FLEX_ARRAY() - Declare a flexible array usable in a union
*
@@ -49,3 +54,5 @@
#ifndef __counted_by
#define __counted_by(m)
#endif
+
+#endif /* _LINUX_STDDEF_H */
diff --git a/linux-headers/linux/userfaultfd.h b/linux-headers/linux/userfaultfd.h
index 59978fb..953c75f 100644
--- a/linux-headers/linux/userfaultfd.h
+++ b/linux-headers/linux/userfaultfd.h
@@ -40,7 +40,8 @@
UFFD_FEATURE_EXACT_ADDRESS | \
UFFD_FEATURE_WP_HUGETLBFS_SHMEM | \
UFFD_FEATURE_WP_UNPOPULATED | \
- UFFD_FEATURE_POISON)
+ UFFD_FEATURE_POISON | \
+ UFFD_FEATURE_WP_ASYNC)
#define UFFD_API_IOCTLS \
((__u64)1 << _UFFDIO_REGISTER | \
(__u64)1 << _UFFDIO_UNREGISTER | \
@@ -216,6 +217,11 @@
* (i.e. empty ptes). This will be the default behavior for shmem
* & hugetlbfs, so this flag only affects anonymous memory behavior
* when userfault write-protection mode is registered.
+ *
+ * UFFD_FEATURE_WP_ASYNC indicates that userfaultfd write-protection
+ * asynchronous mode is supported in which the write fault is
+ * automatically resolved and write-protection is un-set.
+ * It implies UFFD_FEATURE_WP_UNPOPULATED.
*/
#define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0)
#define UFFD_FEATURE_EVENT_FORK (1<<1)
@@ -232,6 +238,7 @@
#define UFFD_FEATURE_WP_HUGETLBFS_SHMEM (1<<12)
#define UFFD_FEATURE_WP_UNPOPULATED (1<<13)
#define UFFD_FEATURE_POISON (1<<14)
+#define UFFD_FEATURE_WP_ASYNC (1<<15)
__u64 features;
__u64 ioctls;
diff --git a/linux-headers/linux/vfio.h b/linux-headers/linux/vfio.h
index acf72b4..8e175ece3 100644
--- a/linux-headers/linux/vfio.h
+++ b/linux-headers/linux/vfio.h
@@ -277,8 +277,8 @@
#define VFIO_REGION_INFO_FLAG_CAPS (1 << 3) /* Info supports caps */
__u32 index; /* Region index */
__u32 cap_offset; /* Offset within info struct of first cap */
- __u64 size; /* Region size (bytes) */
- __u64 offset; /* Region offset from start of device fd */
+ __aligned_u64 size; /* Region size (bytes) */
+ __aligned_u64 offset; /* Region offset from start of device fd */
};
#define VFIO_DEVICE_GET_REGION_INFO _IO(VFIO_TYPE, VFIO_BASE + 8)
@@ -294,8 +294,8 @@
#define VFIO_REGION_INFO_CAP_SPARSE_MMAP 1
struct vfio_region_sparse_mmap_area {
- __u64 offset; /* Offset of mmap'able area within region */
- __u64 size; /* Size of mmap'able area */
+ __aligned_u64 offset; /* Offset of mmap'able area within region */
+ __aligned_u64 size; /* Size of mmap'able area */
};
struct vfio_region_info_cap_sparse_mmap {
@@ -450,9 +450,9 @@
VFIO_DEVICE_STATE_V1_RESUMING)
__u32 reserved;
- __u64 pending_bytes;
- __u64 data_offset;
- __u64 data_size;
+ __aligned_u64 pending_bytes;
+ __aligned_u64 data_offset;
+ __aligned_u64 data_size;
};
/*
@@ -476,7 +476,7 @@
struct vfio_region_info_cap_nvlink2_ssatgt {
struct vfio_info_cap_header header;
- __u64 tgt;
+ __aligned_u64 tgt;
};
/*
@@ -816,7 +816,7 @@
__u32 drm_plane_type; /* type of plane: DRM_PLANE_TYPE_* */
/* out */
__u32 drm_format; /* drm format of plane */
- __u64 drm_format_mod; /* tiled mode */
+ __aligned_u64 drm_format_mod; /* tiled mode */
__u32 width; /* width of plane */
__u32 height; /* height of plane */
__u32 stride; /* stride of plane */
@@ -829,6 +829,7 @@
__u32 region_index; /* region index */
__u32 dmabuf_id; /* dma-buf id */
};
+ __u32 reserved;
};
#define VFIO_DEVICE_QUERY_GFX_PLANE _IO(VFIO_TYPE, VFIO_BASE + 14)
@@ -863,9 +864,10 @@
#define VFIO_DEVICE_IOEVENTFD_32 (1 << 2) /* 4-byte write */
#define VFIO_DEVICE_IOEVENTFD_64 (1 << 3) /* 8-byte write */
#define VFIO_DEVICE_IOEVENTFD_SIZE_MASK (0xf)
- __u64 offset; /* device fd offset of write */
- __u64 data; /* data to be written */
+ __aligned_u64 offset; /* device fd offset of write */
+ __aligned_u64 data; /* data to be written */
__s32 fd; /* -1 for de-assignment */
+ __u32 reserved;
};
#define VFIO_DEVICE_IOEVENTFD _IO(VFIO_TYPE, VFIO_BASE + 16)
@@ -1434,6 +1436,27 @@
#define VFIO_DEVICE_FEATURE_MIG_DATA_SIZE 9
+/**
+ * Upon VFIO_DEVICE_FEATURE_SET, set or clear the BUS mastering for the device
+ * based on the operation specified in op flag.
+ *
+ * The functionality is incorporated for devices that needs bus master control,
+ * but the in-band device interface lacks the support. Consequently, it is not
+ * applicable to PCI devices, as bus master control for PCI devices is managed
+ * in-band through the configuration space. At present, this feature is supported
+ * only for CDX devices.
+ * When the device's BUS MASTER setting is configured as CLEAR, it will result in
+ * blocking all incoming DMA requests from the device. On the other hand, configuring
+ * the device's BUS MASTER setting as SET (enable) will grant the device the
+ * capability to perform DMA to the host memory.
+ */
+struct vfio_device_feature_bus_master {
+ __u32 op;
+#define VFIO_DEVICE_FEATURE_CLEAR_MASTER 0 /* Clear Bus Master */
+#define VFIO_DEVICE_FEATURE_SET_MASTER 1 /* Set Bus Master */
+};
+#define VFIO_DEVICE_FEATURE_BUS_MASTER 10
+
/* -------- API for Type1 VFIO IOMMU -------- */
/**
@@ -1449,7 +1472,7 @@
__u32 flags;
#define VFIO_IOMMU_INFO_PGSIZES (1 << 0) /* supported page sizes info */
#define VFIO_IOMMU_INFO_CAPS (1 << 1) /* Info supports caps */
- __u64 iova_pgsizes; /* Bitmap of supported page sizes */
+ __aligned_u64 iova_pgsizes; /* Bitmap of supported page sizes */
__u32 cap_offset; /* Offset within info struct of first cap */
__u32 pad;
};
diff --git a/linux-headers/linux/vhost.h b/linux-headers/linux/vhost.h
index f5c48b6..649560c 100644
--- a/linux-headers/linux/vhost.h
+++ b/linux-headers/linux/vhost.h
@@ -219,4 +219,12 @@
*/
#define VHOST_VDPA_RESUME _IO(VHOST_VIRTIO, 0x7E)
+/* Get the group for the descriptor table including driver & device areas
+ * of a virtqueue: read index, write group in num.
+ * The virtqueue index is stored in the index field of vhost_vring_state.
+ * The group ID of the descriptor table for this specific virtqueue
+ * is returned via num field of vhost_vring_state.
+ */
+#define VHOST_VDPA_GET_VRING_DESC_GROUP _IOWR(VHOST_VIRTIO, 0x7F, \
+ struct vhost_vring_state)
#endif
diff --git a/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin b/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin
index 9a2ba3f..60ca116 100644
--- a/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin
+++ b/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin
Binary files differ
diff --git a/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin b/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin
index 5d4e812..bae158d 100644
--- a/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin
+++ b/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin
Binary files differ
diff --git a/roms/opensbi b/roms/opensbi
index 057eb10..a2b255b 160000
--- a/roms/opensbi
+++ b/roms/opensbi
@@ -1 +1 @@
-Subproject commit 057eb10b6d523540012e6947d5c9f63e95244e94
+Subproject commit a2b255b88918715173942f2c5e1f97ac9e90c877
diff --git a/scripts/update-linux-headers.sh b/scripts/update-linux-headers.sh
index 34295c0..a0006ee 100755
--- a/scripts/update-linux-headers.sh
+++ b/scripts/update-linux-headers.sh
@@ -156,6 +156,9 @@
cp_portable "$tmpdir/bootparam.h" \
"$output/include/standard-headers/asm-$arch"
fi
+ if [ $arch = riscv ]; then
+ cp "$tmpdir/include/asm/ptrace.h" "$output/linux-headers/asm-riscv/"
+ fi
done
rm -rf "$output/linux-headers/linux"
diff --git a/target/riscv/cpu-qom.h b/target/riscv/cpu-qom.h
index 91b3361..9219c2f 100644
--- a/target/riscv/cpu-qom.h
+++ b/target/riscv/cpu-qom.h
@@ -23,6 +23,8 @@
#define TYPE_RISCV_CPU "riscv-cpu"
#define TYPE_RISCV_DYNAMIC_CPU "riscv-dynamic-cpu"
+#define TYPE_RISCV_VENDOR_CPU "riscv-vendor-cpu"
+#define TYPE_RISCV_BARE_CPU "riscv-bare-cpu"
#define RISCV_CPU_TYPE_SUFFIX "-" TYPE_RISCV_CPU
#define RISCV_CPU_TYPE_NAME(name) (name RISCV_CPU_TYPE_SUFFIX)
@@ -32,6 +34,9 @@
#define TYPE_RISCV_CPU_BASE32 RISCV_CPU_TYPE_NAME("rv32")
#define TYPE_RISCV_CPU_BASE64 RISCV_CPU_TYPE_NAME("rv64")
#define TYPE_RISCV_CPU_BASE128 RISCV_CPU_TYPE_NAME("x-rv128")
+#define TYPE_RISCV_CPU_RV64I RISCV_CPU_TYPE_NAME("rv64i")
+#define TYPE_RISCV_CPU_RVA22U64 RISCV_CPU_TYPE_NAME("rva22u64")
+#define TYPE_RISCV_CPU_RVA22S64 RISCV_CPU_TYPE_NAME("rva22s64")
#define TYPE_RISCV_CPU_IBEX RISCV_CPU_TYPE_NAME("lowrisc-ibex")
#define TYPE_RISCV_CPU_SHAKTI_C RISCV_CPU_TYPE_NAME("shakti-c")
#define TYPE_RISCV_CPU_SIFIVE_E31 RISCV_CPU_TYPE_NAME("sifive-e31")
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index b07a76e..8cbfc7e 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -53,6 +53,11 @@
#define BYTE(x) (x)
#endif
+bool riscv_cpu_is_32bit(RISCVCPU *cpu)
+{
+ return riscv_cpu_mxl(&cpu->env) == MXL_RV32;
+}
+
#define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
{#_name, _min_ver, CPU_CFG_OFFSET(_prop)}
@@ -78,6 +83,7 @@
*/
const RISCVIsaExtData isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom),
+ ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop),
ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz),
ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr),
@@ -87,6 +93,7 @@
ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm),
ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
+ ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas),
ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
@@ -370,6 +377,17 @@
/* Set the satp mode to the max supported */
static void set_satp_mode_default_map(RISCVCPU *cpu)
{
+ /*
+ * Bare CPUs do not default to the max available.
+ * Users must set a valid satp_mode in the command
+ * line.
+ */
+ if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) {
+ warn_report("No satp mode set. Defaulting to 'bare'");
+ cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE);
+ return;
+ }
+
cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported;
}
#endif
@@ -552,6 +570,28 @@
set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
#endif
}
+
+static void rv64i_bare_cpu_init(Object *obj)
+{
+ CPURISCVState *env = &RISCV_CPU(obj)->env;
+ riscv_cpu_set_misa(env, MXL_RV64, RVI);
+
+ /* Remove the defaults from the parent class */
+ RISCV_CPU(obj)->cfg.ext_zicntr = false;
+ RISCV_CPU(obj)->cfg.ext_zihpm = false;
+
+ /* Set to QEMU's first supported priv version */
+ env->priv_ver = PRIV_VERSION_1_10_0;
+
+ /*
+ * Support all available satp_mode settings. The default
+ * value will be set to MBARE if the user doesn't set
+ * satp_mode manually (see set_satp_mode_default()).
+ */
+#ifndef CONFIG_USER_ONLY
+ set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV64);
+#endif
+}
#else
static void rv32_base_cpu_init(Object *obj)
{
@@ -892,6 +932,14 @@
env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
/*
+ * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor
+ * extension is enabled.
+ */
+ if (riscv_has_ext(env, RVH)) {
+ env->mideleg |= HS_MODE_INTERRUPTS;
+ }
+
+ /*
* Clear mseccfg and unlock all the PMP entries upon reset.
* This is allowed as per the priv and smepmp specifications
* and is needed to clear stale entries across reboots.
@@ -943,7 +991,7 @@
#ifndef CONFIG_USER_ONLY
static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
{
- bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
+ bool rv32 = riscv_cpu_is_32bit(cpu);
uint8_t satp_mode_map_max, satp_mode_supported_max;
/* The CPU wants the OS to decide which satp mode to use */
@@ -1019,6 +1067,14 @@
{
Error *local_err = NULL;
+#ifndef CONFIG_USER_ONLY
+ riscv_cpu_satp_mode_finalize(cpu, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+#endif
+
/*
* KVM accel does not have a specialized finalize()
* callback because its extensions are validated
@@ -1031,14 +1087,6 @@
return;
}
}
-
-#ifndef CONFIG_USER_ONLY
- riscv_cpu_satp_mode_finalize(cpu, &local_err);
- if (local_err != NULL) {
- error_propagate(errp, local_err);
- return;
- }
-#endif
}
static void riscv_cpu_realize(DeviceState *dev, Error **errp)
@@ -1297,6 +1345,7 @@
MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true),
+ MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false),
MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true),
MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true),
MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false),
@@ -1340,6 +1389,7 @@
MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false),
MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true),
+ MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true),
MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true),
MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false),
@@ -1406,6 +1456,13 @@
DEFINE_PROP_END_OF_LIST(),
};
+const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
+ MULTI_EXT_CFG_BOOL("svade", svade, true),
+ MULTI_EXT_CFG_BOOL("zic64b", zic64b, true),
+
+ DEFINE_PROP_END_OF_LIST(),
+};
+
/* Deprecated entries marked for future removal */
const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = {
MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true),
@@ -1474,11 +1531,79 @@
DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64),
DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64),
+ DEFINE_PROP_UINT16("cbop_blocksize", RISCVCPU, cfg.cbop_blocksize, 64),
DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64),
DEFINE_PROP_END_OF_LIST(),
};
+/*
+ * RVA22U64 defines some 'named features' or 'synthetic extensions'
+ * that are cache related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa
+ * and Zicclsm. We do not implement caching in QEMU so we'll consider
+ * all these named features as always enabled.
+ *
+ * There's no riscv,isa update for them (nor for zic64b, despite it
+ * having a cfg offset) at this moment.
+ */
+static RISCVCPUProfile RVA22U64 = {
+ .parent = NULL,
+ .name = "rva22u64",
+ .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU,
+ .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
+ .satp_mode = RISCV_PROFILE_ATTR_UNUSED,
+ .ext_offsets = {
+ CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause),
+ CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb),
+ CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin),
+ CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr),
+ CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom),
+ CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz),
+
+ /* mandatory named features for this profile */
+ CPU_CFG_OFFSET(zic64b),
+
+ RISCV_PROFILE_EXT_LIST_END
+ }
+};
+
+/*
+ * As with RVA22U64, RVA22S64 also defines 'named features'.
+ *
+ * Cache related features that we consider enabled since we don't
+ * implement cache: Ssccptr
+ *
+ * Other named features that we already implement: Sstvecd, Sstvala,
+ * Sscounterenw
+ *
+ * Named features that we need to enable: svade
+ *
+ * The remaining features/extensions comes from RVA22U64.
+ */
+static RISCVCPUProfile RVA22S64 = {
+ .parent = &RVA22U64,
+ .name = "rva22s64",
+ .misa_ext = RVS,
+ .priv_spec = PRIV_VERSION_1_12_0,
+ .satp_mode = VM_1_10_SV39,
+ .ext_offsets = {
+ /* rva22s64 exts */
+ CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt),
+ CPU_CFG_OFFSET(ext_svinval),
+
+ /* rva22s64 named features */
+ CPU_CFG_OFFSET(svade),
+
+ RISCV_PROFILE_EXT_LIST_END
+ }
+};
+
+RISCVCPUProfile *riscv_profiles[] = {
+ &RVA22U64,
+ &RVA22S64,
+ NULL,
+};
+
static Property riscv_cpu_properties[] = {
DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
@@ -1499,6 +1624,22 @@
DEFINE_PROP_END_OF_LIST(),
};
+#if defined(TARGET_RISCV64)
+static void rva22u64_profile_cpu_init(Object *obj)
+{
+ rv64i_bare_cpu_init(obj);
+
+ RVA22U64.enabled = true;
+}
+
+static void rva22s64_profile_cpu_init(Object *obj)
+{
+ rv64i_bare_cpu_init(obj);
+
+ RVA22S64.enabled = true;
+}
+#endif
+
static const gchar *riscv_gdb_arch_name(CPUState *cs)
{
RISCVCPU *cpu = RISCV_CPU(cs);
@@ -1570,9 +1711,9 @@
static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- bool value = RISCV_CPU(obj)->cfg.mvendorid;
+ uint32_t value = RISCV_CPU(obj)->cfg.mvendorid;
- visit_type_bool(v, name, &value, errp);
+ visit_type_uint32(v, name, &value, errp);
}
static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name,
@@ -1599,9 +1740,9 @@
static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- bool value = RISCV_CPU(obj)->cfg.mimpid;
+ uint64_t value = RISCV_CPU(obj)->cfg.mimpid;
- visit_type_bool(v, name, &value, errp);
+ visit_type_uint64(v, name, &value, errp);
}
static void cpu_set_marchid(Object *obj, Visitor *v, const char *name,
@@ -1649,9 +1790,9 @@
static void cpu_get_marchid(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- bool value = RISCV_CPU(obj)->cfg.marchid;
+ uint64_t value = RISCV_CPU(obj)->cfg.marchid;
- visit_type_bool(v, name, &value, errp);
+ visit_type_uint64(v, name, &value, errp);
}
static void riscv_cpu_class_init(ObjectClass *c, void *data)
@@ -1746,6 +1887,27 @@
.instance_init = initfn \
}
+#define DEFINE_VENDOR_CPU(type_name, initfn) \
+ { \
+ .name = type_name, \
+ .parent = TYPE_RISCV_VENDOR_CPU, \
+ .instance_init = initfn \
+ }
+
+#define DEFINE_BARE_CPU(type_name, initfn) \
+ { \
+ .name = type_name, \
+ .parent = TYPE_RISCV_BARE_CPU, \
+ .instance_init = initfn \
+ }
+
+#define DEFINE_PROFILE_CPU(type_name, initfn) \
+ { \
+ .name = type_name, \
+ .parent = TYPE_RISCV_BARE_CPU, \
+ .instance_init = initfn \
+ }
+
static const TypeInfo riscv_cpu_type_infos[] = {
{
.name = TYPE_RISCV_CPU,
@@ -1763,22 +1925,35 @@
.parent = TYPE_RISCV_CPU,
.abstract = true,
},
+ {
+ .name = TYPE_RISCV_VENDOR_CPU,
+ .parent = TYPE_RISCV_CPU,
+ .abstract = true,
+ },
+ {
+ .name = TYPE_RISCV_BARE_CPU,
+ .parent = TYPE_RISCV_CPU,
+ .abstract = true,
+ },
DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init),
DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, riscv_max_cpu_init),
#if defined(TARGET_RISCV32)
DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init),
- DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init),
- DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init),
- DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init),
- DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init),
+ DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init),
+ DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init),
+ DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init),
+ DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init),
#elif defined(TARGET_RISCV64)
DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init),
- DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init),
- DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init),
- DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init),
- DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init),
- DEFINE_CPU(TYPE_RISCV_CPU_VEYRON_V1, rv64_veyron_v1_cpu_init),
+ DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init),
+ DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init),
+ DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init),
+ DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init),
+ DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, rv64_veyron_v1_cpu_init),
DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init),
+ DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, rv64i_bare_cpu_init),
+ DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, rva22u64_profile_cpu_init),
+ DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, rva22s64_profile_cpu_init),
#endif
};
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index 2725528..5f3955c 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -76,6 +76,22 @@
#define CPU_CFG_OFFSET(_prop) offsetof(struct RISCVCPUConfig, _prop)
+typedef struct riscv_cpu_profile {
+ struct riscv_cpu_profile *parent;
+ const char *name;
+ uint32_t misa_ext;
+ bool enabled;
+ bool user_set;
+ int priv_spec;
+ int satp_mode;
+ const int32_t ext_offsets[];
+} RISCVCPUProfile;
+
+#define RISCV_PROFILE_EXT_LIST_END -1
+#define RISCV_PROFILE_ATTR_UNUSED -1
+
+extern RISCVCPUProfile *riscv_profiles[];
+
/* Privileged specification version */
enum {
PRIV_VERSION_1_10_0 = 0,
@@ -679,6 +695,7 @@
uint64_t *cs_base, uint32_t *pflags);
void riscv_cpu_update_mask(CPURISCVState *env);
+bool riscv_cpu_is_32bit(RISCVCPU *cpu);
RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
target_ulong *ret_value,
@@ -765,6 +782,7 @@
extern const RISCVCPUMultiExtConfig riscv_cpu_extensions[];
extern const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[];
extern const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[];
+extern const RISCVCPUMultiExtConfig riscv_cpu_named_features[];
extern const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[];
extern Property riscv_cpu_options[];
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
index f4605fb..780ae6e 100644
--- a/target/riscv/cpu_cfg.h
+++ b/target/riscv/cpu_cfg.h
@@ -65,6 +65,7 @@
bool ext_zicntr;
bool ext_zicsr;
bool ext_zicbom;
+ bool ext_zicbop;
bool ext_zicboz;
bool ext_zicond;
bool ext_zihintntl;
@@ -77,6 +78,7 @@
bool ext_svnapot;
bool ext_svpbmt;
bool ext_zdinx;
+ bool ext_zacas;
bool ext_zawrs;
bool ext_zfa;
bool ext_zfbfmin;
@@ -115,6 +117,8 @@
bool ext_smepmp;
bool rvv_ta_all_1s;
bool rvv_ma_all_1s;
+ bool svade;
+ bool zic64b;
uint32_t mvendorid;
uint64_t marchid;
@@ -142,6 +146,7 @@
uint16_t vlen;
uint16_t elen;
uint16_t cbom_blocksize;
+ uint16_t cbop_blocksize;
uint16_t cboz_blocksize;
bool mmu;
bool pmp;
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index 99d1275..c7cc7eb 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -1749,8 +1749,8 @@
* See if we need to adjust cause. Yes if its VS mode interrupt
* no if hypervisor has delegated one of hs mode's interrupt
*/
- if (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT ||
- cause == IRQ_VS_EXT) {
+ if (async && (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT ||
+ cause == IRQ_VS_EXT)) {
cause = cause - 1;
}
write_gva = false;
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index c50a333..674ea07 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -195,8 +195,11 @@
if ((riscv_cpu_mxl(env) == MXL_RV32) && csrno >= CSR_MCYCLEH) {
/* Offset for RV32 mhpmcounternh counters */
- base_csrno += 0x80;
+ csrno -= 0x80;
}
+
+ g_assert(csrno >= CSR_MHPMCOUNTER3 && csrno <= CSR_MHPMCOUNTER31);
+
ctr_index = csrno - base_csrno;
if ((BIT(ctr_index) & pmu_avail_ctrs >> 3) == 0) {
/* The PMU is not enabled or counter is out of range */
@@ -1328,11 +1331,14 @@
mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE |
MSTATUS_SPP | MSTATUS_MPRV | MSTATUS_SUM |
MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR |
- MSTATUS_TW | MSTATUS_VS;
+ MSTATUS_TW;
if (riscv_has_ext(env, RVF)) {
mask |= MSTATUS_FS;
}
+ if (riscv_has_ext(env, RVV)) {
+ mask |= MSTATUS_VS;
+ }
if (xl != MXL_RV32 || env->debugger) {
if (riscv_has_ext(env, RVH)) {
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
index 33597fe..f22df04 100644
--- a/target/riscv/insn32.decode
+++ b/target/riscv/insn32.decode
@@ -1004,3 +1004,9 @@
vsm4k_vi 100001 1 ..... ..... 010 ..... 1110111 @r_vm_1
vsm4r_vv 101000 1 ..... 10000 010 ..... 1110111 @r2_vm_1
vsm4r_vs 101001 1 ..... 10000 010 ..... 1110111 @r2_vm_1
+
+# *** RV32 Zacas Standard Extension ***
+amocas_w 00101 . . ..... ..... 010 ..... 0101111 @atom_st
+amocas_d 00101 . . ..... ..... 011 ..... 0101111 @atom_st
+# *** RV64 Zacas Standard Extension ***
+amocas_q 00101 . . ..... ..... 100 ..... 0101111 @atom_st
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
index 78bd363..3871f0e 100644
--- a/target/riscv/insn_trans/trans_rvv.c.inc
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
@@ -3631,19 +3631,19 @@
}
/*
- * Whole Vector Register Move Instructions ignore vtype and vl setting.
- * Thus, we don't need to check vill bit. (Section 16.6)
+ * Whole Vector Register Move Instructions depend on vtype register(vsew).
+ * Thus, we need to check vill bit. (Section 16.6)
*/
#define GEN_VMV_WHOLE_TRANS(NAME, LEN) \
static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \
{ \
if (require_rvv(s) && \
+ vext_check_isa_ill(s) && \
QEMU_IS_ALIGNED(a->rd, LEN) && \
QEMU_IS_ALIGNED(a->rs2, LEN)) { \
uint32_t maxsz = (s->cfg_ptr->vlen >> 3) * LEN; \
if (s->vstart_eq_zero) { \
- /* EEW = 8 */ \
- tcg_gen_gvec_mov(MO_8, vreg_ofs(s, a->rd), \
+ tcg_gen_gvec_mov(s->sew, vreg_ofs(s, a->rd), \
vreg_ofs(s, a->rs2), maxsz, maxsz); \
mark_vs_dirty(s); \
} else { \
diff --git a/target/riscv/insn_trans/trans_rvzacas.c.inc b/target/riscv/insn_trans/trans_rvzacas.c.inc
new file mode 100644
index 0000000..5d274d4
--- /dev/null
+++ b/target/riscv/insn_trans/trans_rvzacas.c.inc
@@ -0,0 +1,150 @@
+/*
+ * RISC-V translation routines for the RV64 Zacas Standard Extension.
+ *
+ * Copyright (c) 2020-2023 PLCT Lab
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define REQUIRE_ZACAS(ctx) do { \
+ if (!ctx->cfg_ptr->ext_zacas) { \
+ return false; \
+ } \
+} while (0)
+
+static bool gen_cmpxchg(DisasContext *ctx, arg_atomic *a, MemOp mop)
+{
+ TCGv dest = get_gpr(ctx, a->rd, EXT_NONE);
+ TCGv src1 = get_address(ctx, a->rs1, 0);
+ TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
+
+ decode_save_opc(ctx);
+ tcg_gen_atomic_cmpxchg_tl(dest, src1, dest, src2, ctx->mem_idx, mop);
+
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+static bool trans_amocas_w(DisasContext *ctx, arg_amocas_w *a)
+{
+ REQUIRE_ZACAS(ctx);
+ return gen_cmpxchg(ctx, a, MO_ALIGN | MO_TESL);
+}
+
+static TCGv_i64 get_gpr_pair(DisasContext *ctx, int reg_num)
+{
+ TCGv_i64 t;
+
+ assert(get_ol(ctx) == MXL_RV32);
+
+ if (reg_num == 0) {
+ return tcg_constant_i64(0);
+ }
+
+ t = tcg_temp_new_i64();
+ tcg_gen_concat_tl_i64(t, cpu_gpr[reg_num], cpu_gpr[reg_num + 1]);
+ return t;
+}
+
+static void gen_set_gpr_pair(DisasContext *ctx, int reg_num, TCGv_i64 t)
+{
+ assert(get_ol(ctx) == MXL_RV32);
+
+ if (reg_num != 0) {
+#ifdef TARGET_RISCV32
+ tcg_gen_extr_i64_i32(cpu_gpr[reg_num], cpu_gpr[reg_num + 1], t);
+#else
+ tcg_gen_ext32s_i64(cpu_gpr[reg_num], t);
+ tcg_gen_sari_i64(cpu_gpr[reg_num + 1], t, 32);
+#endif
+
+ if (get_xl_max(ctx) == MXL_RV128) {
+ tcg_gen_sari_tl(cpu_gprh[reg_num], cpu_gpr[reg_num], 63);
+ tcg_gen_sari_tl(cpu_gprh[reg_num + 1], cpu_gpr[reg_num + 1], 63);
+ }
+ }
+}
+
+static bool gen_cmpxchg64(DisasContext *ctx, arg_atomic *a, MemOp mop)
+{
+ /*
+ * Encodings with odd numbered registers specified in rs2 and rd are
+ * reserved.
+ */
+ if ((a->rs2 | a->rd) & 1) {
+ return false;
+ }
+
+ TCGv_i64 dest = get_gpr_pair(ctx, a->rd);
+ TCGv src1 = get_address(ctx, a->rs1, 0);
+ TCGv_i64 src2 = get_gpr_pair(ctx, a->rs2);
+
+ decode_save_opc(ctx);
+ tcg_gen_atomic_cmpxchg_i64(dest, src1, dest, src2, ctx->mem_idx, mop);
+
+ gen_set_gpr_pair(ctx, a->rd, dest);
+ return true;
+}
+
+static bool trans_amocas_d(DisasContext *ctx, arg_amocas_d *a)
+{
+ REQUIRE_ZACAS(ctx);
+ switch (get_ol(ctx)) {
+ case MXL_RV32:
+ return gen_cmpxchg64(ctx, a, MO_ALIGN | MO_TEUQ);
+ case MXL_RV64:
+ case MXL_RV128:
+ return gen_cmpxchg(ctx, a, MO_ALIGN | MO_TEUQ);
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static bool trans_amocas_q(DisasContext *ctx, arg_amocas_q *a)
+{
+ REQUIRE_ZACAS(ctx);
+ REQUIRE_64BIT(ctx);
+
+ /*
+ * Encodings with odd numbered registers specified in rs2 and rd are
+ * reserved.
+ */
+ if ((a->rs2 | a->rd) & 1) {
+ return false;
+ }
+
+#ifdef TARGET_RISCV64
+ TCGv_i128 dest = tcg_temp_new_i128();
+ TCGv src1 = get_address(ctx, a->rs1, 0);
+ TCGv_i128 src2 = tcg_temp_new_i128();
+ TCGv_i64 src2l = get_gpr(ctx, a->rs2, EXT_NONE);
+ TCGv_i64 src2h = get_gpr(ctx, a->rs2 == 0 ? 0 : a->rs2 + 1, EXT_NONE);
+ TCGv_i64 destl = get_gpr(ctx, a->rd, EXT_NONE);
+ TCGv_i64 desth = get_gpr(ctx, a->rd == 0 ? 0 : a->rd + 1, EXT_NONE);
+
+ tcg_gen_concat_i64_i128(src2, src2l, src2h);
+ tcg_gen_concat_i64_i128(dest, destl, desth);
+ decode_save_opc(ctx);
+ tcg_gen_atomic_cmpxchg_i128(dest, src1, dest, src2, ctx->mem_idx,
+ (MO_ALIGN | MO_TEUO));
+
+ tcg_gen_extr_i128_i64(destl, desth, dest);
+
+ if (a->rd != 0) {
+ gen_set_gpr(ctx, a->rd, destl);
+ gen_set_gpr(ctx, a->rd + 1, desth);
+ }
+#endif
+
+ return true;
+}
diff --git a/target/riscv/insn_trans/trans_xthead.c.inc b/target/riscv/insn_trans/trans_xthead.c.inc
index 810d766..dbb6411 100644
--- a/target/riscv/insn_trans/trans_xthead.c.inc
+++ b/target/riscv/insn_trans/trans_xthead.c.inc
@@ -296,7 +296,7 @@
NOP_PRIVCHECK(th_dcache_cisw, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
NOP_PRIVCHECK(th_dcache_isw, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
NOP_PRIVCHECK(th_dcache_cpal1, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
-NOP_PRIVCHECK(th_dcache_cval1, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
+NOP_PRIVCHECK(th_dcache_cval1, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MSU)
NOP_PRIVCHECK(th_icache_iall, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
NOP_PRIVCHECK(th_icache_ialls, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS)
diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c
index 117e33c..680a729 100644
--- a/target/riscv/kvm/kvm-cpu.c
+++ b/target/riscv/kvm/kvm-cpu.c
@@ -18,6 +18,7 @@
#include "qemu/osdep.h"
#include <sys/ioctl.h>
+#include <sys/prctl.h>
#include <linux/kvm.h>
@@ -47,6 +48,9 @@
#include "sysemu/runstate.h"
#include "hw/riscv/numa.h"
+#define PR_RISCV_V_SET_CONTROL 69
+#define PR_RISCV_V_VSTATE_CTRL_ON 2
+
void riscv_kvm_aplic_request(void *opaque, int irq, int level)
{
kvm_set_irq(kvm_state, irq, !!level);
@@ -54,7 +58,7 @@
static bool cap_has_mp_state;
-static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type,
+static uint64_t kvm_riscv_reg_id_ulong(CPURISCVState *env, uint64_t type,
uint64_t idx)
{
uint64_t id = KVM_REG_RISCV | type | idx;
@@ -72,18 +76,38 @@
return id;
}
-#define RISCV_CORE_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, \
- KVM_REG_RISCV_CORE_REG(name))
+static uint64_t kvm_riscv_reg_id_u32(uint64_t type, uint64_t idx)
+{
+ return KVM_REG_RISCV | KVM_REG_SIZE_U32 | type | idx;
+}
-#define RISCV_CSR_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_CSR, \
- KVM_REG_RISCV_CSR_REG(name))
+static uint64_t kvm_riscv_reg_id_u64(uint64_t type, uint64_t idx)
+{
+ return KVM_REG_RISCV | KVM_REG_SIZE_U64 | type | idx;
+}
-#define RISCV_TIMER_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_TIMER, \
+#define RISCV_CORE_REG(env, name) \
+ kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, \
+ KVM_REG_RISCV_CORE_REG(name))
+
+#define RISCV_CSR_REG(env, name) \
+ kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CSR, \
+ KVM_REG_RISCV_CSR_REG(name))
+
+#define RISCV_CONFIG_REG(env, name) \
+ kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG, \
+ KVM_REG_RISCV_CONFIG_REG(name))
+
+#define RISCV_TIMER_REG(name) kvm_riscv_reg_id_u64(KVM_REG_RISCV_TIMER, \
KVM_REG_RISCV_TIMER_REG(name))
-#define RISCV_FP_F_REG(env, idx) kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_F, idx)
+#define RISCV_FP_F_REG(idx) kvm_riscv_reg_id_u32(KVM_REG_RISCV_FP_F, idx)
-#define RISCV_FP_D_REG(env, idx) kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_D, idx)
+#define RISCV_FP_D_REG(idx) kvm_riscv_reg_id_u64(KVM_REG_RISCV_FP_D, idx)
+
+#define RISCV_VECTOR_CSR_REG(env, name) \
+ kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_VECTOR, \
+ KVM_REG_RISCV_VECTOR_CSR_REG(name))
#define KVM_RISCV_GET_CSR(cs, env, csr, reg) \
do { \
@@ -101,17 +125,17 @@
} \
} while (0)
-#define KVM_RISCV_GET_TIMER(cs, env, name, reg) \
+#define KVM_RISCV_GET_TIMER(cs, name, reg) \
do { \
- int ret = kvm_get_one_reg(cs, RISCV_TIMER_REG(env, name), ®); \
+ int ret = kvm_get_one_reg(cs, RISCV_TIMER_REG(name), ®); \
if (ret) { \
abort(); \
} \
} while (0)
-#define KVM_RISCV_SET_TIMER(cs, env, name, reg) \
+#define KVM_RISCV_SET_TIMER(cs, name, reg) \
do { \
- int ret = kvm_set_one_reg(cs, RISCV_TIMER_REG(env, name), ®); \
+ int ret = kvm_set_one_reg(cs, RISCV_TIMER_REG(name), ®); \
if (ret) { \
abort(); \
} \
@@ -138,6 +162,7 @@
KVM_MISA_CFG(RVH, KVM_RISCV_ISA_EXT_H),
KVM_MISA_CFG(RVI, KVM_RISCV_ISA_EXT_I),
KVM_MISA_CFG(RVM, KVM_RISCV_ISA_EXT_M),
+ KVM_MISA_CFG(RVV, KVM_RISCV_ISA_EXT_V),
};
static void kvm_cpu_get_misa_ext_cfg(Object *obj, Visitor *v,
@@ -202,8 +227,8 @@
/* If we're here we're going to disable the MISA bit */
reg = 0;
- id = kvm_riscv_reg_id(env, KVM_REG_RISCV_ISA_EXT,
- misa_cfg->kvm_reg_id);
+ id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT,
+ misa_cfg->kvm_reg_id);
ret = kvm_set_one_reg(cs, id, ®);
if (ret != 0) {
/*
@@ -364,8 +389,8 @@
continue;
}
- id = kvm_riscv_reg_id(env, KVM_REG_RISCV_ISA_EXT,
- multi_ext_cfg->kvm_reg_id);
+ id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT,
+ multi_ext_cfg->kvm_reg_id);
reg = kvm_cpu_cfg_get(cpu, multi_ext_cfg);
ret = kvm_set_one_reg(cs, id, ®);
if (ret != 0) {
@@ -398,7 +423,7 @@
}
if (value) {
- error_setg(errp, "extension %s is not available with KVM",
+ error_setg(errp, "'%s' is not available with KVM",
propname);
}
}
@@ -479,6 +504,11 @@
riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_extensions);
riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_vendor_exts);
riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_experimental_exts);
+
+ /* We don't have the needed KVM support for profiles */
+ for (i = 0; riscv_profiles[i] != NULL; i++) {
+ riscv_cpu_add_kvm_unavail_prop(cpu_obj, riscv_profiles[i]->name);
+ }
}
static int kvm_riscv_get_regs_core(CPUState *cs)
@@ -495,7 +525,7 @@
env->pc = reg;
for (i = 1; i < 32; i++) {
- uint64_t id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, i);
+ uint64_t id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, i);
ret = kvm_get_one_reg(cs, id, ®);
if (ret) {
return ret;
@@ -520,7 +550,7 @@
}
for (i = 1; i < 32; i++) {
- uint64_t id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, i);
+ uint64_t id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, i);
reg = env->gpr[i];
ret = kvm_set_one_reg(cs, id, ®);
if (ret) {
@@ -574,7 +604,7 @@
if (riscv_has_ext(env, RVD)) {
uint64_t reg;
for (i = 0; i < 32; i++) {
- ret = kvm_get_one_reg(cs, RISCV_FP_D_REG(env, i), ®);
+ ret = kvm_get_one_reg(cs, RISCV_FP_D_REG(i), ®);
if (ret) {
return ret;
}
@@ -586,7 +616,7 @@
if (riscv_has_ext(env, RVF)) {
uint32_t reg;
for (i = 0; i < 32; i++) {
- ret = kvm_get_one_reg(cs, RISCV_FP_F_REG(env, i), ®);
+ ret = kvm_get_one_reg(cs, RISCV_FP_F_REG(i), ®);
if (ret) {
return ret;
}
@@ -608,7 +638,7 @@
uint64_t reg;
for (i = 0; i < 32; i++) {
reg = env->fpr[i];
- ret = kvm_set_one_reg(cs, RISCV_FP_D_REG(env, i), ®);
+ ret = kvm_set_one_reg(cs, RISCV_FP_D_REG(i), ®);
if (ret) {
return ret;
}
@@ -620,7 +650,7 @@
uint32_t reg;
for (i = 0; i < 32; i++) {
reg = env->fpr[i];
- ret = kvm_set_one_reg(cs, RISCV_FP_F_REG(env, i), ®);
+ ret = kvm_set_one_reg(cs, RISCV_FP_F_REG(i), ®);
if (ret) {
return ret;
}
@@ -639,10 +669,10 @@
return;
}
- KVM_RISCV_GET_TIMER(cs, env, time, env->kvm_timer_time);
- KVM_RISCV_GET_TIMER(cs, env, compare, env->kvm_timer_compare);
- KVM_RISCV_GET_TIMER(cs, env, state, env->kvm_timer_state);
- KVM_RISCV_GET_TIMER(cs, env, frequency, env->kvm_timer_frequency);
+ KVM_RISCV_GET_TIMER(cs, time, env->kvm_timer_time);
+ KVM_RISCV_GET_TIMER(cs, compare, env->kvm_timer_compare);
+ KVM_RISCV_GET_TIMER(cs, state, env->kvm_timer_state);
+ KVM_RISCV_GET_TIMER(cs, frequency, env->kvm_timer_frequency);
env->kvm_timer_dirty = true;
}
@@ -656,8 +686,8 @@
return;
}
- KVM_RISCV_SET_TIMER(cs, env, time, env->kvm_timer_time);
- KVM_RISCV_SET_TIMER(cs, env, compare, env->kvm_timer_compare);
+ KVM_RISCV_SET_TIMER(cs, time, env->kvm_timer_time);
+ KVM_RISCV_SET_TIMER(cs, compare, env->kvm_timer_compare);
/*
* To set register of RISCV_TIMER_REG(state) will occur a error from KVM
@@ -666,7 +696,7 @@
* TODO If KVM changes, adapt here.
*/
if (env->kvm_timer_state) {
- KVM_RISCV_SET_TIMER(cs, env, state, env->kvm_timer_state);
+ KVM_RISCV_SET_TIMER(cs, state, env->kvm_timer_state);
}
/*
@@ -675,7 +705,7 @@
* during the migration.
*/
if (migration_is_running(migrate_get_current()->state)) {
- KVM_RISCV_GET_TIMER(cs, env, frequency, reg);
+ KVM_RISCV_GET_TIMER(cs, frequency, reg);
if (reg != env->kvm_timer_frequency) {
error_report("Dst Hosts timer frequency != Src Hosts");
}
@@ -684,6 +714,65 @@
env->kvm_timer_dirty = false;
}
+static int kvm_riscv_get_regs_vector(CPUState *cs)
+{
+ CPURISCVState *env = &RISCV_CPU(cs)->env;
+ target_ulong reg;
+ int ret = 0;
+
+ if (!riscv_has_ext(env, RVV)) {
+ return 0;
+ }
+
+ ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vstart), ®);
+ if (ret) {
+ return ret;
+ }
+ env->vstart = reg;
+
+ ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vl), ®);
+ if (ret) {
+ return ret;
+ }
+ env->vl = reg;
+
+ ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vtype), ®);
+ if (ret) {
+ return ret;
+ }
+ env->vtype = reg;
+
+ return 0;
+}
+
+static int kvm_riscv_put_regs_vector(CPUState *cs)
+{
+ CPURISCVState *env = &RISCV_CPU(cs)->env;
+ target_ulong reg;
+ int ret = 0;
+
+ if (!riscv_has_ext(env, RVV)) {
+ return 0;
+ }
+
+ reg = env->vstart;
+ ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vstart), ®);
+ if (ret) {
+ return ret;
+ }
+
+ reg = env->vl;
+ ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vl), ®);
+ if (ret) {
+ return ret;
+ }
+
+ reg = env->vtype;
+ ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vtype), ®);
+
+ return ret;
+}
+
typedef struct KVMScratchCPU {
int kvmfd;
int vmfd;
@@ -746,24 +835,21 @@
struct kvm_one_reg reg;
int ret;
- reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
- KVM_REG_RISCV_CONFIG_REG(mvendorid));
+ reg.id = RISCV_CONFIG_REG(env, mvendorid);
reg.addr = (uint64_t)&cpu->cfg.mvendorid;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
if (ret != 0) {
error_report("Unable to retrieve mvendorid from host, error %d", ret);
}
- reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
- KVM_REG_RISCV_CONFIG_REG(marchid));
+ reg.id = RISCV_CONFIG_REG(env, marchid);
reg.addr = (uint64_t)&cpu->cfg.marchid;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
if (ret != 0) {
error_report("Unable to retrieve marchid from host, error %d", ret);
}
- reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
- KVM_REG_RISCV_CONFIG_REG(mimpid));
+ reg.id = RISCV_CONFIG_REG(env, mimpid);
reg.addr = (uint64_t)&cpu->cfg.mimpid;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
if (ret != 0) {
@@ -778,8 +864,7 @@
struct kvm_one_reg reg;
int ret;
- reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
- KVM_REG_RISCV_CONFIG_REG(isa));
+ reg.id = RISCV_CONFIG_REG(env, isa);
reg.addr = (uint64_t)&env->misa_ext_mask;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
@@ -800,8 +885,8 @@
struct kvm_one_reg reg;
int ret;
- reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
- cbomz_cfg->kvm_reg_id);
+ reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG,
+ cbomz_cfg->kvm_reg_id);
reg.addr = (uint64_t)kvmconfig_get_cfg_addr(cpu, cbomz_cfg);
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
if (ret != 0) {
@@ -822,8 +907,8 @@
KVMCPUConfig *multi_ext_cfg = &kvm_multi_ext_cfgs[i];
struct kvm_one_reg reg;
- reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_ISA_EXT,
- multi_ext_cfg->kvm_reg_id);
+ reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT,
+ multi_ext_cfg->kvm_reg_id);
reg.addr = (uint64_t)&val;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
if (ret != 0) {
@@ -914,8 +999,8 @@
for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
multi_ext_cfg = &kvm_multi_ext_cfgs[i];
- reg_id = kvm_riscv_reg_id(&cpu->env, KVM_REG_RISCV_ISA_EXT,
- multi_ext_cfg->kvm_reg_id);
+ reg_id = kvm_riscv_reg_id_ulong(&cpu->env, KVM_REG_RISCV_ISA_EXT,
+ multi_ext_cfg->kvm_reg_id);
reg_search = bsearch(®_id, reglist->reg, reglist->n,
sizeof(uint64_t), uint64_cmp);
if (!reg_search) {
@@ -983,6 +1068,11 @@
return ret;
}
+ ret = kvm_riscv_get_regs_vector(cs);
+ if (ret) {
+ return ret;
+ }
+
return ret;
}
@@ -1023,6 +1113,11 @@
return ret;
}
+ ret = kvm_riscv_put_regs_vector(cs);
+ if (ret) {
+ return ret;
+ }
+
if (KVM_PUT_RESET_STATE == level) {
RISCVCPU *cpu = RISCV_CPU(cs);
if (cs->cpu_index == 0) {
@@ -1082,8 +1177,7 @@
uint64_t id;
int ret;
- id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
- KVM_REG_RISCV_CONFIG_REG(mvendorid));
+ id = RISCV_CONFIG_REG(env, mvendorid);
/*
* cfg.mvendorid is an uint32 but a target_ulong will
* be written. Assign it to a target_ulong var to avoid
@@ -1095,15 +1189,13 @@
return ret;
}
- id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
- KVM_REG_RISCV_CONFIG_REG(marchid));
+ id = RISCV_CONFIG_REG(env, marchid);
ret = kvm_set_one_reg(cs, id, &cpu->cfg.marchid);
if (ret != 0) {
return ret;
}
- id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
- KVM_REG_RISCV_CONFIG_REG(mimpid));
+ id = RISCV_CONFIG_REG(env, mimpid);
ret = kvm_set_one_reg(cs, id, &cpu->cfg.mimpid);
return ret;
@@ -1376,21 +1468,24 @@
exit(1);
}
- socket_bits = find_last_bit(&socket_count, BITS_PER_LONG) + 1;
- ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
- KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS,
- &socket_bits, true, NULL);
- if (ret < 0) {
- error_report("KVM AIA: failed to set group_bits");
- exit(1);
- }
- ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
- KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT,
- &group_shift, true, NULL);
- if (ret < 0) {
- error_report("KVM AIA: failed to set group_shift");
- exit(1);
+ if (socket_count > 1) {
+ socket_bits = find_last_bit(&socket_count, BITS_PER_LONG) + 1;
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
+ KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS,
+ &socket_bits, true, NULL);
+ if (ret < 0) {
+ error_report("KVM AIA: failed to set group_bits");
+ exit(1);
+ }
+
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
+ KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT,
+ &group_shift, true, NULL);
+ if (ret < 0) {
+ error_report("KVM AIA: failed to set group_shift");
+ exit(1);
+ }
}
guest_bits = guest_num == 0 ? 0 :
@@ -1479,11 +1574,36 @@
}
}
+/*
+ * We'll get here via the following path:
+ *
+ * riscv_cpu_realize()
+ * -> cpu_exec_realizefn()
+ * -> kvm_cpu_realize() (via accel_cpu_common_realize())
+ */
+static bool kvm_cpu_realize(CPUState *cs, Error **errp)
+{
+ RISCVCPU *cpu = RISCV_CPU(cs);
+ int ret;
+
+ if (riscv_has_ext(&cpu->env, RVV)) {
+ ret = prctl(PR_RISCV_V_SET_CONTROL, PR_RISCV_V_VSTATE_CTRL_ON);
+ if (ret) {
+ error_setg(errp, "Error in prctl PR_RISCV_V_SET_CONTROL, code: %s",
+ strerrorname_np(errno));
+ return false;
+ }
+ }
+
+ return true;
+}
+
static void kvm_cpu_accel_class_init(ObjectClass *oc, void *data)
{
AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
acc->cpu_instance_init = kvm_cpu_instance_init;
+ acc->cpu_target_realize = kvm_cpu_realize;
}
static const TypeInfo kvm_cpu_accel_type_info = {
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
index 162e88a..2a76b61 100644
--- a/target/riscv/pmp.c
+++ b/target/riscv/pmp.c
@@ -126,7 +126,7 @@
/* If !mseccfg.MML then ignore writes with encoding RW=01 */
if ((val & PMP_WRITE) && !(val & PMP_READ) &&
!MSECCFG_MML_ISSET(env)) {
- val &= ~(PMP_WRITE | PMP_READ);
+ return false;
}
env->pmp_state.pmp[pmp_index].cfg_reg = val;
pmp_update_rule_addr(env, pmp_index);
@@ -150,8 +150,7 @@
}
}
-static void pmp_decode_napot(target_ulong a, target_ulong *sa,
- target_ulong *ea)
+static void pmp_decode_napot(hwaddr a, hwaddr *sa, hwaddr *ea)
{
/*
* aaaa...aaa0 8-byte NAPOT range
@@ -173,8 +172,8 @@
uint8_t this_cfg = env->pmp_state.pmp[pmp_index].cfg_reg;
target_ulong this_addr = env->pmp_state.pmp[pmp_index].addr_reg;
target_ulong prev_addr = 0u;
- target_ulong sa = 0u;
- target_ulong ea = 0u;
+ hwaddr sa = 0u;
+ hwaddr ea = 0u;
if (pmp_index >= 1u) {
prev_addr = env->pmp_state.pmp[pmp_index - 1].addr_reg;
@@ -227,8 +226,7 @@
}
}
-static int pmp_is_in_range(CPURISCVState *env, int pmp_index,
- target_ulong addr)
+static int pmp_is_in_range(CPURISCVState *env, int pmp_index, hwaddr addr)
{
int result = 0;
@@ -305,14 +303,14 @@
* Return true if a pmp rule match or default match
* Return false if no match
*/
-bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
+bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
target_ulong size, pmp_priv_t privs,
pmp_priv_t *allowed_privs, target_ulong mode)
{
int i = 0;
int pmp_size = 0;
- target_ulong s = 0;
- target_ulong e = 0;
+ hwaddr s = 0;
+ hwaddr e = 0;
/* Short cut if no rules */
if (0 == pmp_get_num_rules(env)) {
@@ -624,12 +622,12 @@
* To avoid this we return a size of 1 (which means no caching) if the PMP
* region only covers partial of the TLB page.
*/
-target_ulong pmp_get_tlb_size(CPURISCVState *env, target_ulong addr)
+target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr)
{
- target_ulong pmp_sa;
- target_ulong pmp_ea;
- target_ulong tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1);
- target_ulong tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1;
+ hwaddr pmp_sa;
+ hwaddr pmp_ea;
+ hwaddr tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1);
+ hwaddr tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1;
int i;
/*
diff --git a/target/riscv/pmp.h b/target/riscv/pmp.h
index 9af8614..f5c10ce 100644
--- a/target/riscv/pmp.h
+++ b/target/riscv/pmp.h
@@ -53,8 +53,8 @@
} pmp_entry_t;
typedef struct {
- target_ulong sa;
- target_ulong ea;
+ hwaddr sa;
+ hwaddr ea;
} pmp_addr_t;
typedef struct {
@@ -73,11 +73,11 @@
void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
target_ulong val);
target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index);
-bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
+bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
target_ulong size, pmp_priv_t privs,
pmp_priv_t *allowed_privs,
target_ulong mode);
-target_ulong pmp_get_tlb_size(CPURISCVState *env, target_ulong addr);
+target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr);
void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index);
void pmp_update_rule_nums(CPURISCVState *env);
uint32_t pmp_get_num_rules(CPURISCVState *env);
diff --git a/target/riscv/riscv-qmp-cmds.c b/target/riscv/riscv-qmp-cmds.c
index c5551d2..c48b9cf 100644
--- a/target/riscv/riscv-qmp-cmds.c
+++ b/target/riscv/riscv-qmp-cmds.c
@@ -26,6 +26,7 @@
#include "qapi/error.h"
#include "qapi/qapi-commands-machine-target.h"
+#include "qapi/qmp/qbool.h"
#include "qapi/qmp/qdict.h"
#include "qapi/qmp/qerror.h"
#include "qapi/qobject-input-visitor.h"
@@ -98,6 +99,35 @@
}
}
+static void riscv_obj_add_named_feats_qdict(Object *obj, QDict *qdict_out)
+{
+ const RISCVCPUMultiExtConfig *named_cfg;
+ RISCVCPU *cpu = RISCV_CPU(obj);
+ QObject *value;
+ bool flag_val;
+
+ for (int i = 0; riscv_cpu_named_features[i].name != NULL; i++) {
+ named_cfg = &riscv_cpu_named_features[i];
+ flag_val = isa_ext_is_enabled(cpu, named_cfg->offset);
+ value = QOBJECT(qbool_from_bool(flag_val));
+
+ qdict_put_obj(qdict_out, named_cfg->name, value);
+ }
+}
+
+static void riscv_obj_add_profiles_qdict(Object *obj, QDict *qdict_out)
+{
+ RISCVCPUProfile *profile;
+ QObject *value;
+
+ for (int i = 0; riscv_profiles[i] != NULL; i++) {
+ profile = riscv_profiles[i];
+ value = QOBJECT(qbool_from_bool(profile->enabled));
+
+ qdict_put_obj(qdict_out, profile->name, value);
+ }
+}
+
static void riscv_cpuobj_validate_qdict_in(Object *obj, QObject *props,
const QDict *qdict_in,
Error **errp)
@@ -128,11 +158,6 @@
goto err;
}
- riscv_cpu_finalize_features(RISCV_CPU(obj), &local_err);
- if (local_err) {
- goto err;
- }
-
visit_end_struct(visitor, NULL);
err:
@@ -190,6 +215,13 @@
}
}
+ riscv_cpu_finalize_features(RISCV_CPU(obj), &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ object_unref(obj);
+ return NULL;
+ }
+
expansion_info = g_new0(CpuModelExpansionInfo, 1);
expansion_info->model = g_malloc0(sizeof(*expansion_info->model));
expansion_info->model->name = g_strdup(model->name);
@@ -199,6 +231,8 @@
riscv_obj_add_multiext_props(obj, qdict_out, riscv_cpu_extensions);
riscv_obj_add_multiext_props(obj, qdict_out, riscv_cpu_experimental_exts);
riscv_obj_add_multiext_props(obj, qdict_out, riscv_cpu_vendor_exts);
+ riscv_obj_add_named_feats_qdict(obj, qdict_out);
+ riscv_obj_add_profiles_qdict(obj, qdict_out);
/* Add our CPU boolean options too */
riscv_obj_add_qdict_prop(obj, qdict_out, "mmu");
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
index 8a35683..14133ff 100644
--- a/target/riscv/tcg/tcg-cpu.c
+++ b/target/riscv/tcg/tcg-cpu.c
@@ -34,6 +34,7 @@
/* Hash that stores user set extensions */
static GHashTable *multi_ext_user_opts;
+static GHashTable *misa_ext_user_opts;
static bool cpu_cfg_ext_is_user_set(uint32_t ext_offset)
{
@@ -41,6 +42,52 @@
GUINT_TO_POINTER(ext_offset));
}
+static bool cpu_misa_ext_is_user_set(uint32_t misa_bit)
+{
+ return g_hash_table_contains(misa_ext_user_opts,
+ GUINT_TO_POINTER(misa_bit));
+}
+
+static void cpu_cfg_ext_add_user_opt(uint32_t ext_offset, bool value)
+{
+ g_hash_table_insert(multi_ext_user_opts, GUINT_TO_POINTER(ext_offset),
+ (gpointer)value);
+}
+
+static void cpu_misa_ext_add_user_opt(uint32_t bit, bool value)
+{
+ g_hash_table_insert(misa_ext_user_opts, GUINT_TO_POINTER(bit),
+ (gpointer)value);
+}
+
+static void riscv_cpu_write_misa_bit(RISCVCPU *cpu, uint32_t bit,
+ bool enabled)
+{
+ CPURISCVState *env = &cpu->env;
+
+ if (enabled) {
+ env->misa_ext |= bit;
+ env->misa_ext_mask |= bit;
+ } else {
+ env->misa_ext &= ~bit;
+ env->misa_ext_mask &= ~bit;
+ }
+}
+
+static const char *cpu_priv_ver_to_str(int priv_ver)
+{
+ switch (priv_ver) {
+ case PRIV_VERSION_1_10_0:
+ return "v1.10.0";
+ case PRIV_VERSION_1_11_0:
+ return "v1.11.0";
+ case PRIV_VERSION_1_12_0:
+ return "v1.12.0";
+ }
+
+ g_assert_not_reached();
+}
+
static void riscv_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
@@ -114,6 +161,79 @@
g_assert_not_reached();
}
+static const char *cpu_cfg_ext_get_name(uint32_t ext_offset)
+{
+ const RISCVCPUMultiExtConfig *feat;
+ const RISCVIsaExtData *edata;
+
+ for (edata = isa_edata_arr; edata->name != NULL; edata++) {
+ if (edata->ext_enable_offset == ext_offset) {
+ return edata->name;
+ }
+ }
+
+ for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) {
+ if (feat->offset == ext_offset) {
+ return feat->name;
+ }
+ }
+
+ g_assert_not_reached();
+}
+
+static bool cpu_cfg_offset_is_named_feat(uint32_t ext_offset)
+{
+ const RISCVCPUMultiExtConfig *feat;
+
+ for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) {
+ if (feat->offset == ext_offset) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void riscv_cpu_enable_named_feat(RISCVCPU *cpu, uint32_t feat_offset)
+{
+ switch (feat_offset) {
+ case CPU_CFG_OFFSET(zic64b):
+ cpu->cfg.cbom_blocksize = 64;
+ cpu->cfg.cbop_blocksize = 64;
+ cpu->cfg.cboz_blocksize = 64;
+ break;
+ case CPU_CFG_OFFSET(svade):
+ cpu->cfg.ext_svadu = false;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void cpu_bump_multi_ext_priv_ver(CPURISCVState *env,
+ uint32_t ext_offset)
+{
+ int ext_priv_ver;
+
+ if (env->priv_ver == PRIV_VERSION_LATEST) {
+ return;
+ }
+
+ if (cpu_cfg_offset_is_named_feat(ext_offset)) {
+ return;
+ }
+
+ ext_priv_ver = cpu_cfg_ext_get_min_version(ext_offset);
+
+ if (env->priv_ver < ext_priv_ver) {
+ /*
+ * Note: the 'priv_spec' command line option, if present,
+ * will take precedence over this priv_ver bump.
+ */
+ env->priv_ver = ext_priv_ver;
+ }
+}
+
static void cpu_cfg_ext_auto_update(RISCVCPU *cpu, uint32_t ext_offset,
bool value)
{
@@ -273,6 +393,55 @@
}
}
+static void riscv_cpu_update_named_features(RISCVCPU *cpu)
+{
+ cpu->cfg.zic64b = cpu->cfg.cbom_blocksize == 64 &&
+ cpu->cfg.cbop_blocksize == 64 &&
+ cpu->cfg.cboz_blocksize == 64;
+
+ cpu->cfg.svade = !cpu->cfg.ext_svadu;
+}
+
+static void riscv_cpu_validate_g(RISCVCPU *cpu)
+{
+ const char *warn_msg = "RVG mandates disabled extension %s";
+ uint32_t g_misa_bits[] = {RVI, RVM, RVA, RVF, RVD};
+ bool send_warn = cpu_misa_ext_is_user_set(RVG);
+
+ for (int i = 0; i < ARRAY_SIZE(g_misa_bits); i++) {
+ uint32_t bit = g_misa_bits[i];
+
+ if (riscv_has_ext(&cpu->env, bit)) {
+ continue;
+ }
+
+ if (!cpu_misa_ext_is_user_set(bit)) {
+ riscv_cpu_write_misa_bit(cpu, bit, true);
+ continue;
+ }
+
+ if (send_warn) {
+ warn_report(warn_msg, riscv_get_misa_ext_name(bit));
+ }
+ }
+
+ if (!cpu->cfg.ext_zicsr) {
+ if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicsr))) {
+ cpu->cfg.ext_zicsr = true;
+ } else if (send_warn) {
+ warn_report(warn_msg, "zicsr");
+ }
+ }
+
+ if (!cpu->cfg.ext_zifencei) {
+ if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zifencei))) {
+ cpu->cfg.ext_zifencei = true;
+ } else if (send_warn) {
+ warn_report(warn_msg, "zifencei");
+ }
+ }
+}
+
/*
* Check consistency between chosen extensions while setting
* cpu->cfg accordingly.
@@ -282,31 +451,8 @@
CPURISCVState *env = &cpu->env;
Error *local_err = NULL;
- /* Do some ISA extension error checking */
- if (riscv_has_ext(env, RVG) &&
- !(riscv_has_ext(env, RVI) && riscv_has_ext(env, RVM) &&
- riscv_has_ext(env, RVA) && riscv_has_ext(env, RVF) &&
- riscv_has_ext(env, RVD) &&
- cpu->cfg.ext_zicsr && cpu->cfg.ext_zifencei)) {
-
- if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicsr)) &&
- !cpu->cfg.ext_zicsr) {
- error_setg(errp, "RVG requires Zicsr but user set Zicsr to false");
- return;
- }
-
- if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zifencei)) &&
- !cpu->cfg.ext_zifencei) {
- error_setg(errp, "RVG requires Zifencei but user set "
- "Zifencei to false");
- return;
- }
-
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zicsr), true);
- cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zifencei), true);
-
- env->misa_ext |= RVI | RVM | RVA | RVF | RVD;
- env->misa_ext_mask |= RVI | RVM | RVA | RVF | RVD;
+ if (riscv_has_ext(env, RVG)) {
+ riscv_cpu_validate_g(cpu);
}
if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) {
@@ -343,6 +489,11 @@
return;
}
+ if ((cpu->cfg.ext_zacas) && !riscv_has_ext(env, RVA)) {
+ error_setg(errp, "Zacas extension requires A extension");
+ return;
+ }
+
if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) {
error_setg(errp, "Zawrs extension requires A extension");
return;
@@ -620,6 +771,106 @@
riscv_cpu_disable_priv_spec_isa_exts(cpu);
}
+#ifndef CONFIG_USER_ONLY
+static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu,
+ RISCVCPUProfile *profile,
+ bool send_warn)
+{
+ int satp_max = satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
+
+ if (profile->satp_mode > satp_max) {
+ if (send_warn) {
+ bool is_32bit = riscv_cpu_is_32bit(cpu);
+ const char *req_satp = satp_mode_str(profile->satp_mode, is_32bit);
+ const char *cur_satp = satp_mode_str(satp_max, is_32bit);
+
+ warn_report("Profile %s requires satp mode %s, "
+ "but satp mode %s was set", profile->name,
+ req_satp, cur_satp);
+ }
+
+ return false;
+ }
+
+ return true;
+}
+#endif
+
+static void riscv_cpu_validate_profile(RISCVCPU *cpu,
+ RISCVCPUProfile *profile)
+{
+ CPURISCVState *env = &cpu->env;
+ const char *warn_msg = "Profile %s mandates disabled extension %s";
+ bool send_warn = profile->user_set && profile->enabled;
+ bool parent_enabled, profile_impl = true;
+ int i;
+
+#ifndef CONFIG_USER_ONLY
+ if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) {
+ profile_impl = riscv_cpu_validate_profile_satp(cpu, profile,
+ send_warn);
+ }
+#endif
+
+ if (profile->priv_spec != RISCV_PROFILE_ATTR_UNUSED &&
+ profile->priv_spec != env->priv_ver) {
+ profile_impl = false;
+
+ if (send_warn) {
+ warn_report("Profile %s requires priv spec %s, "
+ "but priv ver %s was set", profile->name,
+ cpu_priv_ver_to_str(profile->priv_spec),
+ cpu_priv_ver_to_str(env->priv_ver));
+ }
+ }
+
+ for (i = 0; misa_bits[i] != 0; i++) {
+ uint32_t bit = misa_bits[i];
+
+ if (!(profile->misa_ext & bit)) {
+ continue;
+ }
+
+ if (!riscv_has_ext(&cpu->env, bit)) {
+ profile_impl = false;
+
+ if (send_warn) {
+ warn_report(warn_msg, profile->name,
+ riscv_get_misa_ext_name(bit));
+ }
+ }
+ }
+
+ for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) {
+ int ext_offset = profile->ext_offsets[i];
+
+ if (!isa_ext_is_enabled(cpu, ext_offset)) {
+ profile_impl = false;
+
+ if (send_warn) {
+ warn_report(warn_msg, profile->name,
+ cpu_cfg_ext_get_name(ext_offset));
+ }
+ }
+ }
+
+ profile->enabled = profile_impl;
+
+ if (profile->parent != NULL) {
+ parent_enabled = object_property_get_bool(OBJECT(cpu),
+ profile->parent->name,
+ NULL);
+ profile->enabled = profile->enabled && parent_enabled;
+ }
+}
+
+static void riscv_cpu_validate_profiles(RISCVCPU *cpu)
+{
+ for (int i = 0; riscv_profiles[i] != NULL; i++) {
+ riscv_cpu_validate_profile(cpu, riscv_profiles[i]);
+ }
+}
+
void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
{
CPURISCVState *env = &cpu->env;
@@ -637,6 +888,9 @@
return;
}
+ riscv_cpu_update_named_features(cpu);
+ riscv_cpu_validate_profiles(cpu);
+
if (cpu->cfg.ext_smepmp && !cpu->cfg.pmp) {
/*
* Enhanced PMP should only be available
@@ -663,6 +917,11 @@
return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
}
+static bool riscv_cpu_is_vendor(Object *cpu_obj)
+{
+ return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL;
+}
+
/*
* We'll get here via the following path:
*
@@ -731,13 +990,15 @@
target_ulong misa_bit = misa_ext_cfg->misa_bit;
RISCVCPU *cpu = RISCV_CPU(obj);
CPURISCVState *env = &cpu->env;
- bool generic_cpu = riscv_cpu_is_generic(obj);
+ bool vendor_cpu = riscv_cpu_is_vendor(obj);
bool prev_val, value;
if (!visit_type_bool(v, name, &value, errp)) {
return;
}
+ cpu_misa_ext_add_user_opt(misa_bit, value);
+
prev_val = env->misa_ext & misa_bit;
if (value == prev_val) {
@@ -745,19 +1006,23 @@
}
if (value) {
- if (!generic_cpu) {
+ if (vendor_cpu) {
g_autofree char *cpuname = riscv_cpu_get_name(cpu);
error_setg(errp, "'%s' CPU does not allow enabling extensions",
cpuname);
return;
}
- env->misa_ext |= misa_bit;
- env->misa_ext_mask |= misa_bit;
- } else {
- env->misa_ext &= ~misa_bit;
- env->misa_ext_mask &= ~misa_bit;
+ if (misa_bit == RVH && env->priv_ver < PRIV_VERSION_1_12_0) {
+ /*
+ * Note: the 'priv_spec' command line option, if present,
+ * will take precedence over this priv_ver bump.
+ */
+ env->priv_ver = PRIV_VERSION_1_12_0;
+ }
}
+
+ riscv_cpu_write_misa_bit(cpu, misa_bit, value);
}
static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name,
@@ -821,7 +1086,116 @@
NULL, (void *)misa_cfg);
object_property_set_description(cpu_obj, name, desc);
if (use_def_vals) {
- object_property_set_bool(cpu_obj, name, misa_cfg->enabled, NULL);
+ riscv_cpu_write_misa_bit(RISCV_CPU(cpu_obj), bit,
+ misa_cfg->enabled);
+ }
+ }
+}
+
+static void cpu_set_profile(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ RISCVCPUProfile *profile = opaque;
+ RISCVCPU *cpu = RISCV_CPU(obj);
+ bool value;
+ int i, ext_offset;
+
+ if (riscv_cpu_is_vendor(obj)) {
+ error_setg(errp, "Profile %s is not available for vendor CPUs",
+ profile->name);
+ return;
+ }
+
+ if (cpu->env.misa_mxl != MXL_RV64) {
+ error_setg(errp, "Profile %s only available for 64 bit CPUs",
+ profile->name);
+ return;
+ }
+
+ if (!visit_type_bool(v, name, &value, errp)) {
+ return;
+ }
+
+ profile->user_set = true;
+ profile->enabled = value;
+
+ if (profile->parent != NULL) {
+ object_property_set_bool(obj, profile->parent->name,
+ profile->enabled, NULL);
+ }
+
+ if (profile->enabled) {
+ cpu->env.priv_ver = profile->priv_spec;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) {
+ const char *satp_prop = satp_mode_str(profile->satp_mode,
+ riscv_cpu_is_32bit(cpu));
+ object_property_set_bool(obj, satp_prop, profile->enabled, NULL);
+ }
+#endif
+
+ for (i = 0; misa_bits[i] != 0; i++) {
+ uint32_t bit = misa_bits[i];
+
+ if (!(profile->misa_ext & bit)) {
+ continue;
+ }
+
+ if (bit == RVI && !profile->enabled) {
+ /*
+ * Disabling profiles will not disable the base
+ * ISA RV64I.
+ */
+ continue;
+ }
+
+ cpu_misa_ext_add_user_opt(bit, profile->enabled);
+ riscv_cpu_write_misa_bit(cpu, bit, profile->enabled);
+ }
+
+ for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) {
+ ext_offset = profile->ext_offsets[i];
+
+ if (profile->enabled) {
+ if (cpu_cfg_offset_is_named_feat(ext_offset)) {
+ riscv_cpu_enable_named_feat(cpu, ext_offset);
+ }
+
+ cpu_bump_multi_ext_priv_ver(&cpu->env, ext_offset);
+ }
+
+ cpu_cfg_ext_add_user_opt(ext_offset, profile->enabled);
+ isa_ext_update_enabled(cpu, ext_offset, profile->enabled);
+ }
+}
+
+static void cpu_get_profile(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ RISCVCPUProfile *profile = opaque;
+ bool value = profile->enabled;
+
+ visit_type_bool(v, name, &value, errp);
+}
+
+static void riscv_cpu_add_profiles(Object *cpu_obj)
+{
+ for (int i = 0; riscv_profiles[i] != NULL; i++) {
+ const RISCVCPUProfile *profile = riscv_profiles[i];
+
+ object_property_add(cpu_obj, profile->name, "bool",
+ cpu_get_profile, cpu_set_profile,
+ NULL, (void *)profile);
+
+ /*
+ * CPUs might enable a profile right from the start.
+ * Enable its mandatory extensions right away in this
+ * case.
+ */
+ if (profile->enabled) {
+ object_property_set_bool(cpu_obj, profile->name, true, NULL);
}
}
}
@@ -850,7 +1224,7 @@
{
const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque;
RISCVCPU *cpu = RISCV_CPU(obj);
- bool generic_cpu = riscv_cpu_is_generic(obj);
+ bool vendor_cpu = riscv_cpu_is_vendor(obj);
bool prev_val, value;
if (!visit_type_bool(v, name, &value, errp)) {
@@ -864,9 +1238,7 @@
multi_ext_cfg->name, lower);
}
- g_hash_table_insert(multi_ext_user_opts,
- GUINT_TO_POINTER(multi_ext_cfg->offset),
- (gpointer)value);
+ cpu_cfg_ext_add_user_opt(multi_ext_cfg->offset, value);
prev_val = isa_ext_is_enabled(cpu, multi_ext_cfg->offset);
@@ -874,13 +1246,17 @@
return;
}
- if (value && !generic_cpu) {
+ if (value && vendor_cpu) {
g_autofree char *cpuname = riscv_cpu_get_name(cpu);
error_setg(errp, "'%s' CPU does not allow enabling extensions",
cpuname);
return;
}
+ if (value) {
+ cpu_bump_multi_ext_priv_ver(&cpu->env, multi_ext_cfg->offset);
+ }
+
isa_ext_update_enabled(cpu, multi_ext_cfg->offset, value);
}
@@ -949,6 +1325,8 @@
riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_deprecated_exts);
+ riscv_cpu_add_profiles(obj);
+
for (Property *prop = riscv_cpu_options; prop && prop->name; prop++) {
qdev_property_add_static(DEVICE(obj), prop);
}
@@ -999,6 +1377,7 @@
RISCVCPU *cpu = RISCV_CPU(cs);
Object *obj = OBJECT(cpu);
+ misa_ext_user_opts = g_hash_table_new(NULL, g_direct_equal);
multi_ext_user_opts = g_hash_table_new(NULL, g_direct_equal);
riscv_cpu_add_user_properties(obj);
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
index f0be79b..071fbad 100644
--- a/target/riscv/translate.c
+++ b/target/riscv/translate.c
@@ -1089,6 +1089,7 @@
#include "insn_trans/trans_rvv.c.inc"
#include "insn_trans/trans_rvb.c.inc"
#include "insn_trans/trans_rvzicond.c.inc"
+#include "insn_trans/trans_rvzacas.c.inc"
#include "insn_trans/trans_rvzawrs.c.inc"
#include "insn_trans/trans_rvzicbo.c.inc"
#include "insn_trans/trans_rvzfa.c.inc"
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
index a83f8aa..d268199 100644
--- a/tcg/i386/tcg-target.c.inc
+++ b/tcg/i386/tcg-target.c.inc
@@ -244,6 +244,7 @@
#define P_VEXL 0x80000 /* Set VEX.L = 1 */
#define P_EVEX 0x100000 /* Requires EVEX encoding */
+#define OPC_ARITH_EbIb (0x80)
#define OPC_ARITH_EvIz (0x81)
#define OPC_ARITH_EvIb (0x83)
#define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */
@@ -1316,23 +1317,41 @@
c &= 7;
}
- /* ??? While INC is 2 bytes shorter than ADDL $1, they also induce
- partial flags update stalls on Pentium4 and are not recommended
- by current Intel optimization manuals. */
- if (!cf && (c == ARITH_ADD || c == ARITH_SUB) && (val == 1 || val == -1)) {
- int is_inc = (c == ARITH_ADD) ^ (val < 0);
- if (TCG_TARGET_REG_BITS == 64) {
- /* The single-byte increment encodings are re-tasked as the
- REX prefixes. Use the MODRM encoding. */
- tcg_out_modrm(s, OPC_GRP5 + rexw,
- (is_inc ? EXT5_INC_Ev : EXT5_DEC_Ev), r0);
- } else {
- tcg_out8(s, (is_inc ? OPC_INC_r32 : OPC_DEC_r32) + r0);
+ switch (c) {
+ case ARITH_ADD:
+ case ARITH_SUB:
+ if (!cf) {
+ /*
+ * ??? While INC is 2 bytes shorter than ADDL $1, they also induce
+ * partial flags update stalls on Pentium4 and are not recommended
+ * by current Intel optimization manuals.
+ */
+ if (val == 1 || val == -1) {
+ int is_inc = (c == ARITH_ADD) ^ (val < 0);
+ if (TCG_TARGET_REG_BITS == 64) {
+ /*
+ * The single-byte increment encodings are re-tasked
+ * as the REX prefixes. Use the MODRM encoding.
+ */
+ tcg_out_modrm(s, OPC_GRP5 + rexw,
+ (is_inc ? EXT5_INC_Ev : EXT5_DEC_Ev), r0);
+ } else {
+ tcg_out8(s, (is_inc ? OPC_INC_r32 : OPC_DEC_r32) + r0);
+ }
+ return;
+ }
+ if (val == 128) {
+ /*
+ * Facilitate using an 8-bit immediate. Carry is inverted
+ * by this transformation, so do it only if cf == 0.
+ */
+ c ^= ARITH_ADD ^ ARITH_SUB;
+ val = -128;
+ }
}
- return;
- }
+ break;
- if (c == ARITH_AND) {
+ case ARITH_AND:
if (TCG_TARGET_REG_BITS == 64) {
if (val == 0xffffffffu) {
tcg_out_ext32u(s, r0, r0);
@@ -1351,6 +1370,17 @@
tcg_out_ext16u(s, r0, r0);
return;
}
+ break;
+
+ case ARITH_OR:
+ case ARITH_XOR:
+ if (val >= 0x80 && val <= 0xff
+ && (r0 < 4 || TCG_TARGET_REG_BITS == 64)) {
+ tcg_out_modrm(s, OPC_ARITH_EbIb + P_REXB_RM, c, r0);
+ tcg_out8(s, val);
+ return;
+ }
+ break;
}
if (val == (int8_t)val) {
diff --git a/tcg/ppc/tcg-target-con-set.h b/tcg/ppc/tcg-target-con-set.h
index bbd7b21..cb47b29 100644
--- a/tcg/ppc/tcg-target-con-set.h
+++ b/tcg/ppc/tcg-target-con-set.h
@@ -35,7 +35,7 @@
C_O1_I4(r, r, ri, rZ, rZ)
C_O1_I4(r, r, r, ri, ri)
C_O2_I1(r, r, r)
-C_O2_I1(o, m, r)
+C_N1O1_I1(o, m, r)
C_O2_I2(r, r, r, r)
C_O2_I4(r, r, rI, rZM, r, r)
C_O2_I4(r, r, r, r, rI, rZM)
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
index 856c3b1..5481696 100644
--- a/tcg/ppc/tcg-target.c.inc
+++ b/tcg/ppc/tcg-target.c.inc
@@ -2595,6 +2595,7 @@
tcg_debug_assert(!need_bswap);
tcg_debug_assert(datalo & 1);
tcg_debug_assert(datahi == datalo - 1);
+ tcg_debug_assert(!is_ld || datahi != index);
insn = is_ld ? LQ : STQ;
tcg_out32(s, insn | TAI(datahi, index, 0));
} else {
@@ -4071,7 +4072,7 @@
case INDEX_op_qemu_ld_a32_i128:
case INDEX_op_qemu_ld_a64_i128:
- return C_O2_I1(o, m, r);
+ return C_N1O1_I1(o, m, r);
case INDEX_op_qemu_st_a32_i128:
case INDEX_op_qemu_st_a64_i128:
return C_O0_I3(o, m, r);
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 896a36c..e2c38f6 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -653,6 +653,7 @@
#define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4),
#define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2),
+#define C_N1O1_I1(O1, O2, I1) C_PFX3(c_n1o1_i1_, O1, O2, I1),
#define C_N2_I1(O1, O2, I1) C_PFX3(c_n2_i1_, O1, O2, I1),
#define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1),
@@ -676,6 +677,7 @@
#undef C_O1_I3
#undef C_O1_I4
#undef C_N1_I2
+#undef C_N1O1_I1
#undef C_N2_I1
#undef C_O2_I1
#undef C_O2_I2
@@ -696,6 +698,7 @@
#define C_O1_I4(O1, I1, I2, I3, I4) { .args_ct_str = { #O1, #I1, #I2, #I3, #I4 } },
#define C_N1_I2(O1, I1, I2) { .args_ct_str = { "&" #O1, #I1, #I2 } },
+#define C_N1O1_I1(O1, O2, I1) { .args_ct_str = { "&" #O1, #O2, #I1 } },
#define C_N2_I1(O1, O2, I1) { .args_ct_str = { "&" #O1, "&" #O2, #I1 } },
#define C_O2_I1(O1, O2, I1) { .args_ct_str = { #O1, #O2, #I1 } },
@@ -718,6 +721,7 @@
#undef C_O1_I3
#undef C_O1_I4
#undef C_N1_I2
+#undef C_N1O1_I1
#undef C_N2_I1
#undef C_O2_I1
#undef C_O2_I2
@@ -738,6 +742,7 @@
#define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4)
#define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2)
+#define C_N1O1_I1(O1, O2, I1) C_PFX3(c_n1o1_i1_, O1, O2, I1)
#define C_N2_I1(O1, O2, I1) C_PFX3(c_n2_i1_, O1, O2, I1)
#define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1)
@@ -2988,6 +2993,7 @@
.pair = 2,
.pair_index = o,
.regs = def->args_ct[o].regs << 1,
+ .newreg = def->args_ct[o].newreg,
};
def->args_ct[o].pair = 1;
def->args_ct[o].pair_index = i;
@@ -3004,6 +3010,7 @@
.pair = 1,
.pair_index = o,
.regs = def->args_ct[o].regs >> 1,
+ .newreg = def->args_ct[o].newreg,
};
def->args_ct[o].pair = 2;
def->args_ct[o].pair_index = i;
@@ -5036,17 +5043,21 @@
break;
case 1: /* first of pair */
- tcg_debug_assert(!arg_ct->newreg);
if (arg_ct->oalias) {
reg = new_args[arg_ct->alias_index];
- break;
+ } else if (arg_ct->newreg) {
+ reg = tcg_reg_alloc_pair(s, arg_ct->regs,
+ i_allocated_regs | o_allocated_regs,
+ output_pref(op, k),
+ ts->indirect_base);
+ } else {
+ reg = tcg_reg_alloc_pair(s, arg_ct->regs, o_allocated_regs,
+ output_pref(op, k),
+ ts->indirect_base);
}
- reg = tcg_reg_alloc_pair(s, arg_ct->regs, o_allocated_regs,
- output_pref(op, k), ts->indirect_base);
break;
case 2: /* second of pair */
- tcg_debug_assert(!arg_ct->newreg);
if (arg_ct->oalias) {
reg = new_args[arg_ct->alias_index];
} else {
diff --git a/util/cpuinfo-ppc.c b/util/cpuinfo-ppc.c
index 1ea3db0..b2d8893 100644
--- a/util/cpuinfo-ppc.c
+++ b/util/cpuinfo-ppc.c
@@ -6,10 +6,10 @@
#include "qemu/osdep.h"
#include "host/cpuinfo.h"
+#include <asm/cputable.h>
#ifdef CONFIG_GETAUXVAL
# include <sys/auxv.h>
#else
-# include <asm/cputable.h>
# include "elf.h"
#endif
@@ -40,7 +40,7 @@
info |= CPUINFO_V2_06;
}
- if (hwcap2 & PPC_FEATURE2_HAS_ISEL) {
+ if (hwcap2 & PPC_FEATURE2_ISEL) {
info |= CPUINFO_ISEL;
}
if (hwcap & PPC_FEATURE_HAS_ALTIVEC) {
@@ -53,7 +53,7 @@
* always have both anyway, since VSX came with Power7
* and crypto came with Power8.
*/
- if (hwcap2 & PPC_FEATURE2_HAS_VEC_CRYPTO) {
+ if (hwcap2 & PPC_FEATURE2_VEC_CRYPTO) {
info |= CPUINFO_CRYPTO;
}
}
diff --git a/util/fifo8.c b/util/fifo8.c
index de8fd0f..4e01b53 100644
--- a/util/fifo8.c
+++ b/util/fifo8.c
@@ -66,19 +66,37 @@
return ret;
}
-const uint8_t *fifo8_pop_buf(Fifo8 *fifo, uint32_t max, uint32_t *num)
+static const uint8_t *fifo8_peekpop_buf(Fifo8 *fifo, uint32_t max,
+ uint32_t *numptr, bool do_pop)
{
uint8_t *ret;
+ uint32_t num;
assert(max > 0 && max <= fifo->num);
- *num = MIN(fifo->capacity - fifo->head, max);
+ num = MIN(fifo->capacity - fifo->head, max);
ret = &fifo->data[fifo->head];
- fifo->head += *num;
- fifo->head %= fifo->capacity;
- fifo->num -= *num;
+
+ if (do_pop) {
+ fifo->head += num;
+ fifo->head %= fifo->capacity;
+ fifo->num -= num;
+ }
+ if (numptr) {
+ *numptr = num;
+ }
return ret;
}
+const uint8_t *fifo8_peek_buf(Fifo8 *fifo, uint32_t max, uint32_t *numptr)
+{
+ return fifo8_peekpop_buf(fifo, max, numptr, false);
+}
+
+const uint8_t *fifo8_pop_buf(Fifo8 *fifo, uint32_t max, uint32_t *numptr)
+{
+ return fifo8_peekpop_buf(fifo, max, numptr, true);
+}
+
void fifo8_reset(Fifo8 *fifo)
{
fifo->num = 0;