Merge remote-tracking branch 'remotes/armbru/tags/pull-misc-2021-07-27' into staging
Miscellaneous patches for 2021-07-27
# gpg: Signature made Tue 27 Jul 2021 16:19:35 BST
# gpg: using RSA key 354BC8B3D7EB2A6B68674E5F3870B400EB918653
# gpg: issuer "armbru@redhat.com"
# gpg: Good signature from "Markus Armbruster <armbru@redhat.com>" [full]
# gpg: aka "Markus Armbruster <armbru@pond.sub.org>" [full]
# Primary key fingerprint: 354B C8B3 D7EB 2A6B 6867 4E5F 3870 B400 EB91 8653
* remotes/armbru/tags/pull-misc-2021-07-27:
vl: Don't continue after -smp help.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
diff --git a/MAINTAINERS b/MAINTAINERS
index 445f7fe..42ac45c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3439,7 +3439,7 @@
Incompatible changes
R: libvir-list@redhat.com
-F: docs/system/deprecated.rst
+F: docs/about/deprecated.rst
Build System
------------
diff --git a/VERSION b/VERSION
index 7d5e1d8..f51337e 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-6.0.90
+6.0.91
diff --git a/configure b/configure
index 69cef68..79e2ddc 100755
--- a/configure
+++ b/configure
@@ -5230,7 +5230,7 @@
if test -n "${deprecated_features}"; then
echo "Warning, deprecated features enabled."
- echo "Please see docs/system/deprecated.rst"
+ echo "Please see docs/about/deprecated.rst"
echo " features: ${deprecated_features}"
fi
diff --git a/docs/system/arm/cpu-features.rst b/docs/system/arm/cpu-features.rst
index c455442..11dce5c 100644
--- a/docs/system/arm/cpu-features.rst
+++ b/docs/system/arm/cpu-features.rst
@@ -376,3 +376,18 @@
lengths is to explicitly enable each desired length. Therefore only
example's (1), (4), and (6) exhibit recommended uses of the properties.
+SVE User-mode Default Vector Length Property
+--------------------------------------------
+
+For qemu-aarch64, the cpu property ``sve-default-vector-length=N`` is
+defined to mirror the Linux kernel parameter file
+``/proc/sys/abi/sve_default_vector_length``. The default length, ``N``,
+is in units of bytes and must be between 16 and 8192.
+If not specified, the default vector length is 64.
+
+If the default length is larger than the maximum vector length enabled,
+the actual vector length will be reduced. Note that the maximum vector
+length supported by QEMU is 256.
+
+If this property is set to ``-1`` then the default vector length
+is set to the maximum possible length.
diff --git a/hw/arm/nseries.c b/hw/arm/nseries.c
index 906c915..af3164c 100644
--- a/hw/arm/nseries.c
+++ b/hw/arm/nseries.c
@@ -692,7 +692,7 @@
default:
bad_cmd:
qemu_log_mask(LOG_GUEST_ERROR,
- "%s: unknown command %02x\n", __func__, s->cmd);
+ "%s: unknown command 0x%02x\n", __func__, s->cmd);
break;
}
diff --git a/hw/arm/smmuv3-internal.h b/hw/arm/smmuv3-internal.h
index 3dac576..d1885ae 100644
--- a/hw/arm/smmuv3-internal.h
+++ b/hw/arm/smmuv3-internal.h
@@ -570,7 +570,7 @@
/* CD fields */
-#define CD_VALID(x) extract32((x)->word[0], 30, 1)
+#define CD_VALID(x) extract32((x)->word[0], 31, 1)
#define CD_ASID(x) extract32((x)->word[1], 16, 16)
#define CD_TTB(x, sel) \
({ \
diff --git a/hw/gpio/aspeed_gpio.c b/hw/gpio/aspeed_gpio.c
index 6ae0116..b3dec44 100644
--- a/hw/gpio/aspeed_gpio.c
+++ b/hw/gpio/aspeed_gpio.c
@@ -207,7 +207,6 @@
#define GPIO_1_8V_MEM_SIZE 0x9D8
#define GPIO_1_8V_REG_ARRAY_SIZE ((GPIO_1_8V_MEM_SIZE - \
GPIO_1_8V_REG_OFFSET) >> 2)
-#define GPIO_MAX_MEM_SIZE MAX(GPIO_3_6V_MEM_SIZE, GPIO_1_8V_MEM_SIZE)
static int aspeed_evaluate_irq(GPIOSets *regs, int gpio_prev_high, int gpio)
{
@@ -849,7 +848,7 @@
}
memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_gpio_ops, s,
- TYPE_ASPEED_GPIO, GPIO_MAX_MEM_SIZE);
+ TYPE_ASPEED_GPIO, 0x800);
sysbus_init_mmio(sbd, &s->iomem);
}
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
index 94fe002..1e7ddcb 100644
--- a/hw/intc/armv7m_nvic.c
+++ b/hw/intc/armv7m_nvic.c
@@ -127,15 +127,14 @@
{
int irq;
- /* We can shortcut if the highest priority pending interrupt
- * happens to be external or if there is nothing pending.
+ /*
+ * We can shortcut if the highest priority pending interrupt
+ * happens to be external; if not we need to check the whole
+ * vectors[] array.
*/
if (s->vectpending > NVIC_FIRST_IRQ) {
return true;
}
- if (s->vectpending == 0) {
- return false;
- }
for (irq = NVIC_FIRST_IRQ; irq < s->num_irq; irq++) {
if (s->vectors[irq].pending) {
@@ -805,6 +804,16 @@
nvic_irq_update(s);
}
+static bool vectpending_targets_secure(NVICState *s)
+{
+ /* Return true if s->vectpending targets Secure state */
+ if (s->vectpending_is_s_banked) {
+ return true;
+ }
+ return !exc_is_banked(s->vectpending) &&
+ exc_targets_secure(s, s->vectpending);
+}
+
void armv7m_nvic_get_pending_irq_info(void *opaque,
int *pirq, bool *ptargets_secure)
{
@@ -814,12 +823,7 @@
assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq);
- if (s->vectpending_is_s_banked) {
- targets_secure = true;
- } else {
- targets_secure = !exc_is_banked(pending) &&
- exc_targets_secure(s, pending);
- }
+ targets_secure = vectpending_targets_secure(s);
trace_nvic_get_pending_irq_info(pending, targets_secure);
@@ -1040,7 +1044,19 @@
/* VECTACTIVE */
val = cpu->env.v7m.exception;
/* VECTPENDING */
- val |= (s->vectpending & 0xff) << 12;
+ if (s->vectpending) {
+ /*
+ * From v8.1M VECTPENDING must read as 1 if accessed as
+ * NonSecure and the highest priority pending and enabled
+ * exception targets Secure.
+ */
+ int vp = s->vectpending;
+ if (!attrs.secure && arm_feature(&cpu->env, ARM_FEATURE_V8_1M) &&
+ vectpending_targets_secure(s)) {
+ vp = 1;
+ }
+ val |= (vp & 0x1ff) << 12;
+ }
/* ISRPENDING - set if any external IRQ is pending */
if (nvic_isrpending(s)) {
val |= (1 << 22);
diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
index 629b0d3..43dfaea 100644
--- a/hw/nvme/ctrl.c
+++ b/hw/nvme/ctrl.c
@@ -439,10 +439,12 @@
static void nvme_irq_check(NvmeCtrl *n)
{
+ uint32_t intms = ldl_le_p(&n->bar.intms);
+
if (msix_enabled(&(n->parent_obj))) {
return;
}
- if (~n->bar.intms & n->irq_status) {
+ if (~intms & n->irq_status) {
pci_irq_assert(&n->parent_obj);
} else {
pci_irq_deassert(&n->parent_obj);
@@ -623,6 +625,10 @@
return NVME_INVALID_USE_OF_CMB | NVME_DNR;
}
+ if (sg->iov.niov + 1 > IOV_MAX) {
+ goto max_mappings_exceeded;
+ }
+
if (cmb) {
return nvme_map_addr_cmb(n, &sg->iov, addr, len);
} else {
@@ -634,9 +640,18 @@
return NVME_INVALID_USE_OF_CMB | NVME_DNR;
}
+ if (sg->qsg.nsg + 1 > IOV_MAX) {
+ goto max_mappings_exceeded;
+ }
+
qemu_sglist_add(&sg->qsg, addr, len);
return NVME_SUCCESS;
+
+max_mappings_exceeded:
+ NVME_GUEST_ERR(pci_nvme_ub_too_many_mappings,
+ "number of mappings exceed 1024");
+ return NVME_INTERNAL_DEV_ERROR | NVME_DNR;
}
static inline bool nvme_addr_is_dma(NvmeCtrl *n, hwaddr addr)
@@ -1276,7 +1291,7 @@
if (ret) {
trace_pci_nvme_err_addr_write(addr);
trace_pci_nvme_err_cfs();
- n->bar.csts = NVME_CSTS_FAILED;
+ stl_le_p(&n->bar.csts, NVME_CSTS_FAILED);
break;
}
QTAILQ_REMOVE(&cq->req_list, req, entry);
@@ -4009,7 +4024,7 @@
trace_pci_nvme_err_invalid_create_sq_sqid(sqid);
return NVME_INVALID_QID | NVME_DNR;
}
- if (unlikely(!qsize || qsize > NVME_CAP_MQES(n->bar.cap))) {
+ if (unlikely(!qsize || qsize > NVME_CAP_MQES(ldq_le_p(&n->bar.cap)))) {
trace_pci_nvme_err_invalid_create_sq_size(qsize);
return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR;
}
@@ -4195,7 +4210,7 @@
return NVME_INVALID_FIELD | NVME_DNR;
}
- switch (NVME_CC_CSS(n->bar.cc)) {
+ switch (NVME_CC_CSS(ldl_le_p(&n->bar.cc))) {
case NVME_CC_CSS_NVM:
src_iocs = nvme_cse_iocs_nvm;
/* fall through */
@@ -4357,7 +4372,7 @@
trace_pci_nvme_err_invalid_create_cq_cqid(cqid);
return NVME_INVALID_QID | NVME_DNR;
}
- if (unlikely(!qsize || qsize > NVME_CAP_MQES(n->bar.cap))) {
+ if (unlikely(!qsize || qsize > NVME_CAP_MQES(ldq_le_p(&n->bar.cap)))) {
trace_pci_nvme_err_invalid_create_cq_size(qsize);
return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR;
}
@@ -5150,17 +5165,19 @@
static void nvme_select_iocs_ns(NvmeCtrl *n, NvmeNamespace *ns)
{
+ uint32_t cc = ldl_le_p(&n->bar.cc);
+
ns->iocs = nvme_cse_iocs_none;
switch (ns->csi) {
case NVME_CSI_NVM:
- if (NVME_CC_CSS(n->bar.cc) != NVME_CC_CSS_ADMIN_ONLY) {
+ if (NVME_CC_CSS(cc) != NVME_CC_CSS_ADMIN_ONLY) {
ns->iocs = nvme_cse_iocs_nvm;
}
break;
case NVME_CSI_ZONED:
- if (NVME_CC_CSS(n->bar.cc) == NVME_CC_CSS_CSI) {
+ if (NVME_CC_CSS(cc) == NVME_CC_CSS_CSI) {
ns->iocs = nvme_cse_iocs_zoned;
- } else if (NVME_CC_CSS(n->bar.cc) == NVME_CC_CSS_NVM) {
+ } else if (NVME_CC_CSS(cc) == NVME_CC_CSS_NVM) {
ns->iocs = nvme_cse_iocs_nvm;
}
break;
@@ -5497,7 +5514,7 @@
if (nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd))) {
trace_pci_nvme_err_addr_read(addr);
trace_pci_nvme_err_cfs();
- n->bar.csts = NVME_CSTS_FAILED;
+ stl_le_p(&n->bar.csts, NVME_CSTS_FAILED);
break;
}
nvme_inc_sq_head(sq);
@@ -5552,8 +5569,6 @@
n->aer_queued = 0;
n->outstanding_aers = 0;
n->qs_created = false;
-
- n->bar.cc = 0;
}
static void nvme_ctrl_shutdown(NvmeCtrl *n)
@@ -5592,7 +5607,12 @@
static int nvme_start_ctrl(NvmeCtrl *n)
{
- uint32_t page_bits = NVME_CC_MPS(n->bar.cc) + 12;
+ uint64_t cap = ldq_le_p(&n->bar.cap);
+ uint32_t cc = ldl_le_p(&n->bar.cc);
+ uint32_t aqa = ldl_le_p(&n->bar.aqa);
+ uint64_t asq = ldq_le_p(&n->bar.asq);
+ uint64_t acq = ldq_le_p(&n->bar.acq);
+ uint32_t page_bits = NVME_CC_MPS(cc) + 12;
uint32_t page_size = 1 << page_bits;
if (unlikely(n->cq[0])) {
@@ -5603,73 +5623,72 @@
trace_pci_nvme_err_startfail_sq();
return -1;
}
- if (unlikely(!n->bar.asq)) {
+ if (unlikely(!asq)) {
trace_pci_nvme_err_startfail_nbarasq();
return -1;
}
- if (unlikely(!n->bar.acq)) {
+ if (unlikely(!acq)) {
trace_pci_nvme_err_startfail_nbaracq();
return -1;
}
- if (unlikely(n->bar.asq & (page_size - 1))) {
- trace_pci_nvme_err_startfail_asq_misaligned(n->bar.asq);
+ if (unlikely(asq & (page_size - 1))) {
+ trace_pci_nvme_err_startfail_asq_misaligned(asq);
return -1;
}
- if (unlikely(n->bar.acq & (page_size - 1))) {
- trace_pci_nvme_err_startfail_acq_misaligned(n->bar.acq);
+ if (unlikely(acq & (page_size - 1))) {
+ trace_pci_nvme_err_startfail_acq_misaligned(acq);
return -1;
}
- if (unlikely(!(NVME_CAP_CSS(n->bar.cap) & (1 << NVME_CC_CSS(n->bar.cc))))) {
- trace_pci_nvme_err_startfail_css(NVME_CC_CSS(n->bar.cc));
+ if (unlikely(!(NVME_CAP_CSS(cap) & (1 << NVME_CC_CSS(cc))))) {
+ trace_pci_nvme_err_startfail_css(NVME_CC_CSS(cc));
return -1;
}
- if (unlikely(NVME_CC_MPS(n->bar.cc) <
- NVME_CAP_MPSMIN(n->bar.cap))) {
+ if (unlikely(NVME_CC_MPS(cc) < NVME_CAP_MPSMIN(cap))) {
trace_pci_nvme_err_startfail_page_too_small(
- NVME_CC_MPS(n->bar.cc),
- NVME_CAP_MPSMIN(n->bar.cap));
+ NVME_CC_MPS(cc),
+ NVME_CAP_MPSMIN(cap));
return -1;
}
- if (unlikely(NVME_CC_MPS(n->bar.cc) >
- NVME_CAP_MPSMAX(n->bar.cap))) {
+ if (unlikely(NVME_CC_MPS(cc) >
+ NVME_CAP_MPSMAX(cap))) {
trace_pci_nvme_err_startfail_page_too_large(
- NVME_CC_MPS(n->bar.cc),
- NVME_CAP_MPSMAX(n->bar.cap));
+ NVME_CC_MPS(cc),
+ NVME_CAP_MPSMAX(cap));
return -1;
}
- if (unlikely(NVME_CC_IOCQES(n->bar.cc) <
+ if (unlikely(NVME_CC_IOCQES(cc) <
NVME_CTRL_CQES_MIN(n->id_ctrl.cqes))) {
trace_pci_nvme_err_startfail_cqent_too_small(
- NVME_CC_IOCQES(n->bar.cc),
- NVME_CTRL_CQES_MIN(n->bar.cap));
+ NVME_CC_IOCQES(cc),
+ NVME_CTRL_CQES_MIN(cap));
return -1;
}
- if (unlikely(NVME_CC_IOCQES(n->bar.cc) >
+ if (unlikely(NVME_CC_IOCQES(cc) >
NVME_CTRL_CQES_MAX(n->id_ctrl.cqes))) {
trace_pci_nvme_err_startfail_cqent_too_large(
- NVME_CC_IOCQES(n->bar.cc),
- NVME_CTRL_CQES_MAX(n->bar.cap));
+ NVME_CC_IOCQES(cc),
+ NVME_CTRL_CQES_MAX(cap));
return -1;
}
- if (unlikely(NVME_CC_IOSQES(n->bar.cc) <
+ if (unlikely(NVME_CC_IOSQES(cc) <
NVME_CTRL_SQES_MIN(n->id_ctrl.sqes))) {
trace_pci_nvme_err_startfail_sqent_too_small(
- NVME_CC_IOSQES(n->bar.cc),
- NVME_CTRL_SQES_MIN(n->bar.cap));
+ NVME_CC_IOSQES(cc),
+ NVME_CTRL_SQES_MIN(cap));
return -1;
}
- if (unlikely(NVME_CC_IOSQES(n->bar.cc) >
+ if (unlikely(NVME_CC_IOSQES(cc) >
NVME_CTRL_SQES_MAX(n->id_ctrl.sqes))) {
trace_pci_nvme_err_startfail_sqent_too_large(
- NVME_CC_IOSQES(n->bar.cc),
- NVME_CTRL_SQES_MAX(n->bar.cap));
+ NVME_CC_IOSQES(cc),
+ NVME_CTRL_SQES_MAX(cap));
return -1;
}
- if (unlikely(!NVME_AQA_ASQS(n->bar.aqa))) {
+ if (unlikely(!NVME_AQA_ASQS(aqa))) {
trace_pci_nvme_err_startfail_asqent_sz_zero();
return -1;
}
- if (unlikely(!NVME_AQA_ACQS(n->bar.aqa))) {
+ if (unlikely(!NVME_AQA_ACQS(aqa))) {
trace_pci_nvme_err_startfail_acqent_sz_zero();
return -1;
}
@@ -5677,12 +5696,10 @@
n->page_bits = page_bits;
n->page_size = page_size;
n->max_prp_ents = n->page_size / sizeof(uint64_t);
- n->cqe_size = 1 << NVME_CC_IOCQES(n->bar.cc);
- n->sqe_size = 1 << NVME_CC_IOSQES(n->bar.cc);
- nvme_init_cq(&n->admin_cq, n, n->bar.acq, 0, 0,
- NVME_AQA_ACQS(n->bar.aqa) + 1, 1);
- nvme_init_sq(&n->admin_sq, n, n->bar.asq, 0, 0,
- NVME_AQA_ASQS(n->bar.aqa) + 1);
+ n->cqe_size = 1 << NVME_CC_IOCQES(cc);
+ n->sqe_size = 1 << NVME_CC_IOSQES(cc);
+ nvme_init_cq(&n->admin_cq, n, acq, 0, 0, NVME_AQA_ACQS(aqa) + 1, 1);
+ nvme_init_sq(&n->admin_sq, n, asq, 0, 0, NVME_AQA_ASQS(aqa) + 1);
nvme_set_timestamp(n, 0ULL);
@@ -5695,22 +5712,33 @@
static void nvme_cmb_enable_regs(NvmeCtrl *n)
{
- NVME_CMBLOC_SET_CDPCILS(n->bar.cmbloc, 1);
- NVME_CMBLOC_SET_CDPMLS(n->bar.cmbloc, 1);
- NVME_CMBLOC_SET_BIR(n->bar.cmbloc, NVME_CMB_BIR);
+ uint32_t cmbloc = ldl_le_p(&n->bar.cmbloc);
+ uint32_t cmbsz = ldl_le_p(&n->bar.cmbsz);
- NVME_CMBSZ_SET_SQS(n->bar.cmbsz, 1);
- NVME_CMBSZ_SET_CQS(n->bar.cmbsz, 0);
- NVME_CMBSZ_SET_LISTS(n->bar.cmbsz, 1);
- NVME_CMBSZ_SET_RDS(n->bar.cmbsz, 1);
- NVME_CMBSZ_SET_WDS(n->bar.cmbsz, 1);
- NVME_CMBSZ_SET_SZU(n->bar.cmbsz, 2); /* MBs */
- NVME_CMBSZ_SET_SZ(n->bar.cmbsz, n->params.cmb_size_mb);
+ NVME_CMBLOC_SET_CDPCILS(cmbloc, 1);
+ NVME_CMBLOC_SET_CDPMLS(cmbloc, 1);
+ NVME_CMBLOC_SET_BIR(cmbloc, NVME_CMB_BIR);
+ stl_le_p(&n->bar.cmbloc, cmbloc);
+
+ NVME_CMBSZ_SET_SQS(cmbsz, 1);
+ NVME_CMBSZ_SET_CQS(cmbsz, 0);
+ NVME_CMBSZ_SET_LISTS(cmbsz, 1);
+ NVME_CMBSZ_SET_RDS(cmbsz, 1);
+ NVME_CMBSZ_SET_WDS(cmbsz, 1);
+ NVME_CMBSZ_SET_SZU(cmbsz, 2); /* MBs */
+ NVME_CMBSZ_SET_SZ(cmbsz, n->params.cmb_size_mb);
+ stl_le_p(&n->bar.cmbsz, cmbsz);
}
static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data,
unsigned size)
{
+ uint64_t cap = ldq_le_p(&n->bar.cap);
+ uint32_t cc = ldl_le_p(&n->bar.cc);
+ uint32_t intms = ldl_le_p(&n->bar.intms);
+ uint32_t csts = ldl_le_p(&n->bar.csts);
+ uint32_t pmrsts = ldl_le_p(&n->bar.pmrsts);
+
if (unlikely(offset & (sizeof(uint32_t) - 1))) {
NVME_GUEST_ERR(pci_nvme_ub_mmiowr_misaligned32,
"MMIO write not 32-bit aligned,"
@@ -5727,65 +5755,77 @@
}
switch (offset) {
- case 0xc: /* INTMS */
+ case NVME_REG_INTMS:
if (unlikely(msix_enabled(&(n->parent_obj)))) {
NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix,
"undefined access to interrupt mask set"
" when MSI-X is enabled");
/* should be ignored, fall through for now */
}
- n->bar.intms |= data & 0xffffffff;
+ intms |= data;
+ stl_le_p(&n->bar.intms, intms);
n->bar.intmc = n->bar.intms;
- trace_pci_nvme_mmio_intm_set(data & 0xffffffff, n->bar.intmc);
+ trace_pci_nvme_mmio_intm_set(data & 0xffffffff, intms);
nvme_irq_check(n);
break;
- case 0x10: /* INTMC */
+ case NVME_REG_INTMC:
if (unlikely(msix_enabled(&(n->parent_obj)))) {
NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix,
"undefined access to interrupt mask clr"
" when MSI-X is enabled");
/* should be ignored, fall through for now */
}
- n->bar.intms &= ~(data & 0xffffffff);
+ intms &= ~data;
+ stl_le_p(&n->bar.intms, intms);
n->bar.intmc = n->bar.intms;
- trace_pci_nvme_mmio_intm_clr(data & 0xffffffff, n->bar.intmc);
+ trace_pci_nvme_mmio_intm_clr(data & 0xffffffff, intms);
nvme_irq_check(n);
break;
- case 0x14: /* CC */
+ case NVME_REG_CC:
trace_pci_nvme_mmio_cfg(data & 0xffffffff);
+
/* Windows first sends data, then sends enable bit */
- if (!NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc) &&
- !NVME_CC_SHN(data) && !NVME_CC_SHN(n->bar.cc))
+ if (!NVME_CC_EN(data) && !NVME_CC_EN(cc) &&
+ !NVME_CC_SHN(data) && !NVME_CC_SHN(cc))
{
- n->bar.cc = data;
+ cc = data;
}
- if (NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc)) {
- n->bar.cc = data;
+ if (NVME_CC_EN(data) && !NVME_CC_EN(cc)) {
+ cc = data;
+
+ /* flush CC since nvme_start_ctrl() needs the value */
+ stl_le_p(&n->bar.cc, cc);
if (unlikely(nvme_start_ctrl(n))) {
trace_pci_nvme_err_startfail();
- n->bar.csts = NVME_CSTS_FAILED;
+ csts = NVME_CSTS_FAILED;
} else {
trace_pci_nvme_mmio_start_success();
- n->bar.csts = NVME_CSTS_READY;
+ csts = NVME_CSTS_READY;
}
- } else if (!NVME_CC_EN(data) && NVME_CC_EN(n->bar.cc)) {
+ } else if (!NVME_CC_EN(data) && NVME_CC_EN(cc)) {
trace_pci_nvme_mmio_stopped();
nvme_ctrl_reset(n);
- n->bar.csts &= ~NVME_CSTS_READY;
+ cc = 0;
+ csts &= ~NVME_CSTS_READY;
}
- if (NVME_CC_SHN(data) && !(NVME_CC_SHN(n->bar.cc))) {
+
+ if (NVME_CC_SHN(data) && !(NVME_CC_SHN(cc))) {
trace_pci_nvme_mmio_shutdown_set();
nvme_ctrl_shutdown(n);
- n->bar.cc = data;
- n->bar.csts |= NVME_CSTS_SHST_COMPLETE;
- } else if (!NVME_CC_SHN(data) && NVME_CC_SHN(n->bar.cc)) {
+ cc = data;
+ csts |= NVME_CSTS_SHST_COMPLETE;
+ } else if (!NVME_CC_SHN(data) && NVME_CC_SHN(cc)) {
trace_pci_nvme_mmio_shutdown_cleared();
- n->bar.csts &= ~NVME_CSTS_SHST_COMPLETE;
- n->bar.cc = data;
+ csts &= ~NVME_CSTS_SHST_COMPLETE;
+ cc = data;
}
+
+ stl_le_p(&n->bar.cc, cc);
+ stl_le_p(&n->bar.csts, csts);
+
break;
- case 0x1c: /* CSTS */
+ case NVME_REG_CSTS:
if (data & (1 << 4)) {
NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ssreset_w1c_unsupported,
"attempted to W1C CSTS.NSSRO"
@@ -5796,7 +5836,7 @@
" of controller status");
}
break;
- case 0x20: /* NSSR */
+ case NVME_REG_NSSR:
if (data == 0x4e564d65) {
trace_pci_nvme_ub_mmiowr_ssreset_unsupported();
} else {
@@ -5804,53 +5844,53 @@
return;
}
break;
- case 0x24: /* AQA */
- n->bar.aqa = data & 0xffffffff;
+ case NVME_REG_AQA:
+ stl_le_p(&n->bar.aqa, data);
trace_pci_nvme_mmio_aqattr(data & 0xffffffff);
break;
- case 0x28: /* ASQ */
- n->bar.asq = size == 8 ? data :
- (n->bar.asq & ~0xffffffffULL) | (data & 0xffffffff);
+ case NVME_REG_ASQ:
+ stn_le_p(&n->bar.asq, size, data);
trace_pci_nvme_mmio_asqaddr(data);
break;
- case 0x2c: /* ASQ hi */
- n->bar.asq = (n->bar.asq & 0xffffffff) | (data << 32);
- trace_pci_nvme_mmio_asqaddr_hi(data, n->bar.asq);
+ case NVME_REG_ASQ + 4:
+ stl_le_p((uint8_t *)&n->bar.asq + 4, data);
+ trace_pci_nvme_mmio_asqaddr_hi(data, ldq_le_p(&n->bar.asq));
break;
- case 0x30: /* ACQ */
+ case NVME_REG_ACQ:
trace_pci_nvme_mmio_acqaddr(data);
- n->bar.acq = size == 8 ? data :
- (n->bar.acq & ~0xffffffffULL) | (data & 0xffffffff);
+ stn_le_p(&n->bar.acq, size, data);
break;
- case 0x34: /* ACQ hi */
- n->bar.acq = (n->bar.acq & 0xffffffff) | (data << 32);
- trace_pci_nvme_mmio_acqaddr_hi(data, n->bar.acq);
+ case NVME_REG_ACQ + 4:
+ stl_le_p((uint8_t *)&n->bar.acq + 4, data);
+ trace_pci_nvme_mmio_acqaddr_hi(data, ldq_le_p(&n->bar.acq));
break;
- case 0x38: /* CMBLOC */
+ case NVME_REG_CMBLOC:
NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbloc_reserved,
"invalid write to reserved CMBLOC"
" when CMBSZ is zero, ignored");
return;
- case 0x3C: /* CMBSZ */
+ case NVME_REG_CMBSZ:
NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbsz_readonly,
"invalid write to read only CMBSZ, ignored");
return;
- case 0x50: /* CMBMSC */
- if (!NVME_CAP_CMBS(n->bar.cap)) {
+ case NVME_REG_CMBMSC:
+ if (!NVME_CAP_CMBS(cap)) {
return;
}
- n->bar.cmbmsc = size == 8 ? data :
- (n->bar.cmbmsc & ~0xffffffff) | (data & 0xffffffff);
+ stn_le_p(&n->bar.cmbmsc, size, data);
n->cmb.cmse = false;
if (NVME_CMBMSC_CRE(data)) {
nvme_cmb_enable_regs(n);
if (NVME_CMBMSC_CMSE(data)) {
- hwaddr cba = NVME_CMBMSC_CBA(data) << CMBMSC_CBA_SHIFT;
+ uint64_t cmbmsc = ldq_le_p(&n->bar.cmbmsc);
+ hwaddr cba = NVME_CMBMSC_CBA(cmbmsc) << CMBMSC_CBA_SHIFT;
if (cba + int128_get64(n->cmb.mem.size) < cba) {
- NVME_CMBSTS_SET_CBAI(n->bar.cmbsts, 1);
+ uint32_t cmbsts = ldl_le_p(&n->bar.cmbsts);
+ NVME_CMBSTS_SET_CBAI(cmbsts, 1);
+ stl_le_p(&n->bar.cmbsts, cmbsts);
return;
}
@@ -5863,53 +5903,57 @@
}
return;
- case 0x54: /* CMBMSC hi */
- n->bar.cmbmsc = (n->bar.cmbmsc & 0xffffffff) | (data << 32);
+ case NVME_REG_CMBMSC + 4:
+ stl_le_p((uint8_t *)&n->bar.cmbmsc + 4, data);
return;
- case 0xe00: /* PMRCAP */
+ case NVME_REG_PMRCAP:
NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrcap_readonly,
"invalid write to PMRCAP register, ignored");
return;
- case 0xe04: /* PMRCTL */
- if (!NVME_CAP_PMRS(n->bar.cap)) {
+ case NVME_REG_PMRCTL:
+ if (!NVME_CAP_PMRS(cap)) {
return;
}
- n->bar.pmrctl = data;
+ stl_le_p(&n->bar.pmrctl, data);
if (NVME_PMRCTL_EN(data)) {
memory_region_set_enabled(&n->pmr.dev->mr, true);
- n->bar.pmrsts = 0;
+ pmrsts = 0;
} else {
memory_region_set_enabled(&n->pmr.dev->mr, false);
- NVME_PMRSTS_SET_NRDY(n->bar.pmrsts, 1);
+ NVME_PMRSTS_SET_NRDY(pmrsts, 1);
n->pmr.cmse = false;
}
+ stl_le_p(&n->bar.pmrsts, pmrsts);
return;
- case 0xe08: /* PMRSTS */
+ case NVME_REG_PMRSTS:
NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrsts_readonly,
"invalid write to PMRSTS register, ignored");
return;
- case 0xe0C: /* PMREBS */
+ case NVME_REG_PMREBS:
NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrebs_readonly,
"invalid write to PMREBS register, ignored");
return;
- case 0xe10: /* PMRSWTP */
+ case NVME_REG_PMRSWTP:
NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrswtp_readonly,
"invalid write to PMRSWTP register, ignored");
return;
- case 0xe14: /* PMRMSCL */
- if (!NVME_CAP_PMRS(n->bar.cap)) {
+ case NVME_REG_PMRMSCL:
+ if (!NVME_CAP_PMRS(cap)) {
return;
}
- n->bar.pmrmsc = (n->bar.pmrmsc & ~0xffffffff) | (data & 0xffffffff);
+ stl_le_p(&n->bar.pmrmscl, data);
n->pmr.cmse = false;
- if (NVME_PMRMSC_CMSE(n->bar.pmrmsc)) {
- hwaddr cba = NVME_PMRMSC_CBA(n->bar.pmrmsc) << PMRMSC_CBA_SHIFT;
+ if (NVME_PMRMSCL_CMSE(data)) {
+ uint64_t pmrmscu = ldl_le_p(&n->bar.pmrmscu);
+ hwaddr cba = pmrmscu << 32 |
+ (NVME_PMRMSCL_CBA(data) << PMRMSCL_CBA_SHIFT);
if (cba + int128_get64(n->pmr.dev->mr.size) < cba) {
- NVME_PMRSTS_SET_CBAI(n->bar.pmrsts, 1);
+ NVME_PMRSTS_SET_CBAI(pmrsts, 1);
+ stl_le_p(&n->bar.pmrsts, pmrsts);
return;
}
@@ -5918,12 +5962,12 @@
}
return;
- case 0xe18: /* PMRMSCU */
- if (!NVME_CAP_PMRS(n->bar.cap)) {
+ case NVME_REG_PMRMSCU:
+ if (!NVME_CAP_PMRS(cap)) {
return;
}
- n->bar.pmrmsc = (n->bar.pmrmsc & 0xffffffff) | (data << 32);
+ stl_le_p(&n->bar.pmrmscu, data);
return;
default:
NVME_GUEST_ERR(pci_nvme_ub_mmiowr_invalid,
@@ -5938,7 +5982,6 @@
{
NvmeCtrl *n = (NvmeCtrl *)opaque;
uint8_t *ptr = (uint8_t *)&n->bar;
- uint64_t val = 0;
trace_pci_nvme_mmio_read(addr, size);
@@ -5954,24 +5997,25 @@
/* should RAZ, fall through for now */
}
- if (addr < sizeof(n->bar)) {
- /*
- * When PMRWBM bit 1 is set then read from
- * from PMRSTS should ensure prior writes
- * made it to persistent media
- */
- if (addr == 0xe08 &&
- (NVME_PMRCAP_PMRWBM(n->bar.pmrcap) & 0x02)) {
- memory_region_msync(&n->pmr.dev->mr, 0, n->pmr.dev->size);
- }
- memcpy(&val, ptr + addr, size);
- } else {
+ if (addr > sizeof(n->bar) - size) {
NVME_GUEST_ERR(pci_nvme_ub_mmiord_invalid_ofs,
"MMIO read beyond last register,"
" offset=0x%"PRIx64", returning 0", addr);
+
+ return 0;
}
- return val;
+ /*
+ * When PMRWBM bit 1 is set then read from
+ * from PMRSTS should ensure prior writes
+ * made it to persistent media
+ */
+ if (addr == NVME_REG_PMRSTS &&
+ (NVME_PMRCAP_PMRWBM(ldl_le_p(&n->bar.pmrcap)) & 0x02)) {
+ memory_region_msync(&n->pmr.dev->mr, 0, n->pmr.dev->size);
+ }
+
+ return ldn_le_p(ptr + addr, size);
}
static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
@@ -6229,6 +6273,7 @@
static void nvme_init_cmb(NvmeCtrl *n, PCIDevice *pci_dev)
{
uint64_t cmb_size = n->params.cmb_size_mb * MiB;
+ uint64_t cap = ldq_le_p(&n->bar.cap);
n->cmb.buf = g_malloc0(cmb_size);
memory_region_init_io(&n->cmb.mem, OBJECT(n), &nvme_cmb_ops, n,
@@ -6238,7 +6283,8 @@
PCI_BASE_ADDRESS_MEM_TYPE_64 |
PCI_BASE_ADDRESS_MEM_PREFETCH, &n->cmb.mem);
- NVME_CAP_SET_CMBS(n->bar.cap, 1);
+ NVME_CAP_SET_CMBS(cap, 1);
+ stq_le_p(&n->bar.cap, cap);
if (n->params.legacy_cmb) {
nvme_cmb_enable_regs(n);
@@ -6248,14 +6294,17 @@
static void nvme_init_pmr(NvmeCtrl *n, PCIDevice *pci_dev)
{
- NVME_PMRCAP_SET_RDS(n->bar.pmrcap, 1);
- NVME_PMRCAP_SET_WDS(n->bar.pmrcap, 1);
- NVME_PMRCAP_SET_BIR(n->bar.pmrcap, NVME_PMR_BIR);
- /* Turn on bit 1 support */
- NVME_PMRCAP_SET_PMRWBM(n->bar.pmrcap, 0x02);
- NVME_PMRCAP_SET_CMSS(n->bar.pmrcap, 1);
+ uint32_t pmrcap = ldl_le_p(&n->bar.pmrcap);
- pci_register_bar(pci_dev, NVME_PMRCAP_BIR(n->bar.pmrcap),
+ NVME_PMRCAP_SET_RDS(pmrcap, 1);
+ NVME_PMRCAP_SET_WDS(pmrcap, 1);
+ NVME_PMRCAP_SET_BIR(pmrcap, NVME_PMR_BIR);
+ /* Turn on bit 1 support */
+ NVME_PMRCAP_SET_PMRWBM(pmrcap, 0x02);
+ NVME_PMRCAP_SET_CMSS(pmrcap, 1);
+ stl_le_p(&n->bar.pmrcap, pmrcap);
+
+ pci_register_bar(pci_dev, NVME_PMR_BIR,
PCI_BASE_ADDRESS_SPACE_MEMORY |
PCI_BASE_ADDRESS_MEM_TYPE_64 |
PCI_BASE_ADDRESS_MEM_PREFETCH, &n->pmr.dev->mr);
@@ -6345,6 +6394,7 @@
{
NvmeIdCtrl *id = &n->id_ctrl;
uint8_t *pci_conf = pci_dev->config;
+ uint64_t cap = ldq_le_p(&n->bar.cap);
id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID));
id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID));
@@ -6423,17 +6473,18 @@
id->cmic |= NVME_CMIC_MULTI_CTRL;
}
- NVME_CAP_SET_MQES(n->bar.cap, 0x7ff);
- NVME_CAP_SET_CQR(n->bar.cap, 1);
- NVME_CAP_SET_TO(n->bar.cap, 0xf);
- NVME_CAP_SET_CSS(n->bar.cap, NVME_CAP_CSS_NVM);
- NVME_CAP_SET_CSS(n->bar.cap, NVME_CAP_CSS_CSI_SUPP);
- NVME_CAP_SET_CSS(n->bar.cap, NVME_CAP_CSS_ADMIN_ONLY);
- NVME_CAP_SET_MPSMAX(n->bar.cap, 4);
- NVME_CAP_SET_CMBS(n->bar.cap, n->params.cmb_size_mb ? 1 : 0);
- NVME_CAP_SET_PMRS(n->bar.cap, n->pmr.dev ? 1 : 0);
+ NVME_CAP_SET_MQES(cap, 0x7ff);
+ NVME_CAP_SET_CQR(cap, 1);
+ NVME_CAP_SET_TO(cap, 0xf);
+ NVME_CAP_SET_CSS(cap, NVME_CAP_CSS_NVM);
+ NVME_CAP_SET_CSS(cap, NVME_CAP_CSS_CSI_SUPP);
+ NVME_CAP_SET_CSS(cap, NVME_CAP_CSS_ADMIN_ONLY);
+ NVME_CAP_SET_MPSMAX(cap, 4);
+ NVME_CAP_SET_CMBS(cap, n->params.cmb_size_mb ? 1 : 0);
+ NVME_CAP_SET_PMRS(cap, n->pmr.dev ? 1 : 0);
+ stq_le_p(&n->bar.cap, cap);
- n->bar.vs = NVME_SPEC_VER;
+ stl_le_p(&n->bar.vs, NVME_SPEC_VER);
n->bar.intmc = n->bar.intms = 0;
}
@@ -6498,7 +6549,7 @@
ns = &n->namespace;
ns->params.nsid = 1;
- if (nvme_ns_setup(n, ns, errp)) {
+ if (nvme_ns_setup(ns, errp)) {
return;
}
@@ -6514,13 +6565,15 @@
nvme_ctrl_reset(n);
- for (i = 1; i <= NVME_MAX_NAMESPACES; i++) {
- ns = nvme_ns(n, i);
- if (!ns) {
- continue;
+ if (n->subsys) {
+ for (i = 1; i <= NVME_MAX_NAMESPACES; i++) {
+ ns = nvme_ns(n, i);
+ if (ns) {
+ ns->attached--;
+ }
}
- nvme_ns_cleanup(ns);
+ nvme_subsys_unregister_ctrl(n->subsys, n);
}
g_free(n->cq);
@@ -6582,7 +6635,7 @@
cap = NVME_SMART_SPARE | NVME_SMART_TEMPERATURE | NVME_SMART_RELIABILITY
| NVME_SMART_MEDIA_READ_ONLY | NVME_SMART_FAILED_VOLATILE_MEDIA;
- if (NVME_CAP_PMRS(n->bar.cap)) {
+ if (NVME_CAP_PMRS(ldq_le_p(&n->bar.cap))) {
cap |= NVME_SMART_PMR_UNRELIABLE;
}
diff --git a/hw/nvme/ns.c b/hw/nvme/ns.c
index 4275c3d..b7cf149 100644
--- a/hw/nvme/ns.c
+++ b/hw/nvme/ns.c
@@ -346,8 +346,7 @@
assert(ns->nr_open_zones == 0);
}
-static int nvme_ns_check_constraints(NvmeCtrl *n, NvmeNamespace *ns,
- Error **errp)
+static int nvme_ns_check_constraints(NvmeNamespace *ns, Error **errp)
{
if (!ns->blkconf.blk) {
error_setg(errp, "block backend not configured");
@@ -366,20 +365,6 @@
return -1;
}
- if (!n->subsys) {
- if (ns->params.detached) {
- error_setg(errp, "detached requires that the nvme device is "
- "linked to an nvme-subsys device");
- return -1;
- }
-
- if (ns->params.shared) {
- error_setg(errp, "shared requires that the nvme device is "
- "linked to an nvme-subsys device");
- return -1;
- }
- }
-
if (ns->params.zoned) {
if (ns->params.max_active_zones) {
if (ns->params.max_open_zones > ns->params.max_active_zones) {
@@ -411,9 +396,9 @@
return 0;
}
-int nvme_ns_setup(NvmeCtrl *n, NvmeNamespace *ns, Error **errp)
+int nvme_ns_setup(NvmeNamespace *ns, Error **errp)
{
- if (nvme_ns_check_constraints(n, ns, errp)) {
+ if (nvme_ns_check_constraints(ns, errp)) {
return -1;
}
@@ -456,6 +441,15 @@
}
}
+static void nvme_ns_unrealize(DeviceState *dev)
+{
+ NvmeNamespace *ns = NVME_NS(dev);
+
+ nvme_ns_drain(ns);
+ nvme_ns_shutdown(ns);
+ nvme_ns_cleanup(ns);
+}
+
static void nvme_ns_realize(DeviceState *dev, Error **errp)
{
NvmeNamespace *ns = NVME_NS(dev);
@@ -465,7 +459,29 @@
uint32_t nsid = ns->params.nsid;
int i;
- if (nvme_ns_setup(n, ns, errp)) {
+ if (!n->subsys) {
+ if (ns->params.detached) {
+ error_setg(errp, "detached requires that the nvme device is "
+ "linked to an nvme-subsys device");
+ return;
+ }
+
+ if (ns->params.shared) {
+ error_setg(errp, "shared requires that the nvme device is "
+ "linked to an nvme-subsys device");
+ return;
+ }
+ } else {
+ /*
+ * If this namespace belongs to a subsystem (through a link on the
+ * controller device), reparent the device.
+ */
+ if (!qdev_set_parent_bus(dev, &subsys->bus.parent_bus, errp)) {
+ return;
+ }
+ }
+
+ if (nvme_ns_setup(ns, errp)) {
return;
}
@@ -553,6 +569,7 @@
dc->bus_type = TYPE_NVME_BUS;
dc->realize = nvme_ns_realize;
+ dc->unrealize = nvme_ns_unrealize;
device_class_set_props(dc, nvme_ns_props);
dc->desc = "Virtual NVMe namespace";
}
diff --git a/hw/nvme/nvme.h b/hw/nvme/nvme.h
index 56f8ece..83ffaba 100644
--- a/hw/nvme/nvme.h
+++ b/hw/nvme/nvme.h
@@ -33,12 +33,20 @@
typedef struct NvmeCtrl NvmeCtrl;
typedef struct NvmeNamespace NvmeNamespace;
+#define TYPE_NVME_BUS "nvme-bus"
+OBJECT_DECLARE_SIMPLE_TYPE(NvmeBus, NVME_BUS)
+
+typedef struct NvmeBus {
+ BusState parent_bus;
+} NvmeBus;
+
#define TYPE_NVME_SUBSYS "nvme-subsys"
#define NVME_SUBSYS(obj) \
OBJECT_CHECK(NvmeSubsystem, (obj), TYPE_NVME_SUBSYS)
typedef struct NvmeSubsystem {
DeviceState parent_obj;
+ NvmeBus bus;
uint8_t subnqn[256];
NvmeCtrl *ctrls[NVME_MAX_CONTROLLERS];
@@ -50,6 +58,7 @@
} NvmeSubsystem;
int nvme_subsys_register_ctrl(NvmeCtrl *n, Error **errp);
+void nvme_subsys_unregister_ctrl(NvmeSubsystem *subsys, NvmeCtrl *n);
static inline NvmeCtrl *nvme_subsys_ctrl(NvmeSubsystem *subsys,
uint32_t cntlid)
@@ -246,7 +255,7 @@
}
void nvme_ns_init_format(NvmeNamespace *ns);
-int nvme_ns_setup(NvmeCtrl *n, NvmeNamespace *ns, Error **errp);
+int nvme_ns_setup(NvmeNamespace *ns, Error **errp);
void nvme_ns_drain(NvmeNamespace *ns);
void nvme_ns_shutdown(NvmeNamespace *ns);
void nvme_ns_cleanup(NvmeNamespace *ns);
@@ -364,13 +373,6 @@
QTAILQ_HEAD(, NvmeRequest) req_list;
} NvmeCQueue;
-#define TYPE_NVME_BUS "nvme-bus"
-#define NVME_BUS(obj) OBJECT_CHECK(NvmeBus, (obj), TYPE_NVME_BUS)
-
-typedef struct NvmeBus {
- BusState parent_bus;
-} NvmeBus;
-
#define TYPE_NVME "nvme"
#define NVME(obj) \
OBJECT_CHECK(NvmeCtrl, (obj), TYPE_NVME)
diff --git a/hw/nvme/subsys.c b/hw/nvme/subsys.c
index 192223d..93c3595 100644
--- a/hw/nvme/subsys.c
+++ b/hw/nvme/subsys.c
@@ -32,6 +32,11 @@
return cntlid;
}
+void nvme_subsys_unregister_ctrl(NvmeSubsystem *subsys, NvmeCtrl *n)
+{
+ subsys->ctrls[n->cntlid] = NULL;
+}
+
static void nvme_subsys_setup(NvmeSubsystem *subsys)
{
const char *nqn = subsys->params.nqn ?
@@ -45,6 +50,9 @@
{
NvmeSubsystem *subsys = NVME_SUBSYS(dev);
+ qbus_create_inplace(&subsys->bus, sizeof(NvmeBus), TYPE_NVME_BUS, dev,
+ dev->id);
+
nvme_subsys_setup(subsys);
}
@@ -61,6 +69,7 @@
dc->realize = nvme_subsys_realize;
dc->desc = "Virtual NVMe subsystem";
+ dc->hotpluggable = false;
device_class_set_props(dc, nvme_subsystem_props);
}
diff --git a/hw/nvme/trace-events b/hw/nvme/trace-events
index f9a1f14..430eeb3 100644
--- a/hw/nvme/trace-events
+++ b/hw/nvme/trace-events
@@ -199,3 +199,4 @@
pci_nvme_ub_db_wr_invalid_sq(uint32_t qid) "submission queue doorbell write for nonexistent queue, sqid=%"PRIu32", ignoring"
pci_nvme_ub_db_wr_invalid_sqtail(uint32_t qid, uint16_t new_tail) "submission queue doorbell write value beyond queue size, sqid=%"PRIu32", new_head=%"PRIu16", ignoring"
pci_nvme_ub_unknown_css_value(void) "unknown value in cc.css field"
+pci_nvme_ub_too_many_mappings(void) "too many prp/sgl mappings"
diff --git a/include/block/nvme.h b/include/block/nvme.h
index 527105f..77aae01 100644
--- a/include/block/nvme.h
+++ b/include/block/nvme.h
@@ -9,7 +9,7 @@
uint32_t cc;
uint8_t rsvd24[4];
uint32_t csts;
- uint32_t nssrc;
+ uint32_t nssr;
uint32_t aqa;
uint64_t asq;
uint64_t acq;
@@ -26,10 +26,38 @@
uint32_t pmrsts;
uint32_t pmrebs;
uint32_t pmrswtp;
- uint64_t pmrmsc;
+ uint32_t pmrmscl;
+ uint32_t pmrmscu;
uint8_t css[484];
} NvmeBar;
+enum NvmeBarRegs {
+ NVME_REG_CAP = offsetof(NvmeBar, cap),
+ NVME_REG_VS = offsetof(NvmeBar, vs),
+ NVME_REG_INTMS = offsetof(NvmeBar, intms),
+ NVME_REG_INTMC = offsetof(NvmeBar, intmc),
+ NVME_REG_CC = offsetof(NvmeBar, cc),
+ NVME_REG_CSTS = offsetof(NvmeBar, csts),
+ NVME_REG_NSSR = offsetof(NvmeBar, nssr),
+ NVME_REG_AQA = offsetof(NvmeBar, aqa),
+ NVME_REG_ASQ = offsetof(NvmeBar, asq),
+ NVME_REG_ACQ = offsetof(NvmeBar, acq),
+ NVME_REG_CMBLOC = offsetof(NvmeBar, cmbloc),
+ NVME_REG_CMBSZ = offsetof(NvmeBar, cmbsz),
+ NVME_REG_BPINFO = offsetof(NvmeBar, bpinfo),
+ NVME_REG_BPRSEL = offsetof(NvmeBar, bprsel),
+ NVME_REG_BPMBL = offsetof(NvmeBar, bpmbl),
+ NVME_REG_CMBMSC = offsetof(NvmeBar, cmbmsc),
+ NVME_REG_CMBSTS = offsetof(NvmeBar, cmbsts),
+ NVME_REG_PMRCAP = offsetof(NvmeBar, pmrcap),
+ NVME_REG_PMRCTL = offsetof(NvmeBar, pmrctl),
+ NVME_REG_PMRSTS = offsetof(NvmeBar, pmrsts),
+ NVME_REG_PMREBS = offsetof(NvmeBar, pmrebs),
+ NVME_REG_PMRSWTP = offsetof(NvmeBar, pmrswtp),
+ NVME_REG_PMRMSCL = offsetof(NvmeBar, pmrmscl),
+ NVME_REG_PMRMSCU = offsetof(NvmeBar, pmrmscu),
+};
+
enum NvmeCapShift {
CAP_MQES_SHIFT = 0,
CAP_CQR_SHIFT = 16,
@@ -475,25 +503,25 @@
#define NVME_PMRSWTP_SET_PMRSWTV(pmrswtp, val) \
(pmrswtp |= (uint64_t)(val & PMRSWTP_PMRSWTV_MASK) << PMRSWTP_PMRSWTV_SHIFT)
-enum NvmePmrmscShift {
- PMRMSC_CMSE_SHIFT = 1,
- PMRMSC_CBA_SHIFT = 12,
+enum NvmePmrmsclShift {
+ PMRMSCL_CMSE_SHIFT = 1,
+ PMRMSCL_CBA_SHIFT = 12,
};
-enum NvmePmrmscMask {
- PMRMSC_CMSE_MASK = 0x1,
- PMRMSC_CBA_MASK = 0xfffffffffffff,
+enum NvmePmrmsclMask {
+ PMRMSCL_CMSE_MASK = 0x1,
+ PMRMSCL_CBA_MASK = 0xfffff,
};
-#define NVME_PMRMSC_CMSE(pmrmsc) \
- ((pmrmsc >> PMRMSC_CMSE_SHIFT) & PMRMSC_CMSE_MASK)
-#define NVME_PMRMSC_CBA(pmrmsc) \
- ((pmrmsc >> PMRMSC_CBA_SHIFT) & PMRMSC_CBA_MASK)
+#define NVME_PMRMSCL_CMSE(pmrmscl) \
+ ((pmrmscl >> PMRMSCL_CMSE_SHIFT) & PMRMSCL_CMSE_MASK)
+#define NVME_PMRMSCL_CBA(pmrmscl) \
+ ((pmrmscl >> PMRMSCL_CBA_SHIFT) & PMRMSCL_CBA_MASK)
-#define NVME_PMRMSC_SET_CMSE(pmrmsc, val) \
- (pmrmsc |= (uint64_t)(val & PMRMSC_CMSE_MASK) << PMRMSC_CMSE_SHIFT)
-#define NVME_PMRMSC_SET_CBA(pmrmsc, val) \
- (pmrmsc |= (uint64_t)(val & PMRMSC_CBA_MASK) << PMRMSC_CBA_SHIFT)
+#define NVME_PMRMSCL_SET_CMSE(pmrmscl, val) \
+ (pmrmscl |= (uint32_t)(val & PMRMSCL_CMSE_MASK) << PMRMSCL_CMSE_SHIFT)
+#define NVME_PMRMSCL_SET_CBA(pmrmscl, val) \
+ (pmrmscl |= (uint32_t)(val & PMRMSCL_CBA_MASK) << PMRMSCL_CBA_SHIFT)
enum NvmeSglDescriptorType {
NVME_SGL_DESCR_TYPE_DATA_BLOCK = 0x0,
diff --git a/qemu-options.hx b/qemu-options.hx
index 99ed5ec..83aa59a 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -98,28 +98,32 @@
Enables or disables ACPI Heterogeneous Memory Attribute Table
(HMAT) support. The default is off.
- ``memory-backend='id'``
+ ``memory-backend='id'``
An alternative to legacy ``-mem-path`` and ``mem-prealloc`` options.
Allows to use a memory backend as main RAM.
For example:
::
- -object memory-backend-file,id=pc.ram,size=512M,mem-path=/hugetlbfs,prealloc=on,share=on
- -machine memory-backend=pc.ram
- -m 512M
+
+ -object memory-backend-file,id=pc.ram,size=512M,mem-path=/hugetlbfs,prealloc=on,share=on
+ -machine memory-backend=pc.ram
+ -m 512M
Migration compatibility note:
- a) as backend id one shall use value of 'default-ram-id', advertised by
- machine type (available via ``query-machines`` QMP command), if migration
- to/from old QEMU (<5.0) is expected.
- b) for machine types 4.0 and older, user shall
- use ``x-use-canonical-path-for-ramblock-id=off`` backend option
- if migration to/from old QEMU (<5.0) is expected.
+
+ * as backend id one shall use value of 'default-ram-id', advertised by
+ machine type (available via ``query-machines`` QMP command), if migration
+ to/from old QEMU (<5.0) is expected.
+ * for machine types 4.0 and older, user shall
+ use ``x-use-canonical-path-for-ramblock-id=off`` backend option
+ if migration to/from old QEMU (<5.0) is expected.
+
For example:
::
- -object memory-backend-ram,id=pc.ram,size=512M,x-use-canonical-path-for-ramblock-id=off
- -machine memory-backend=pc.ram
- -m 512M
+
+ -object memory-backend-ram,id=pc.ram,size=512M,x-use-canonical-path-for-ramblock-id=off
+ -machine memory-backend=pc.ram
+ -m 512M
ERST
HXCOMM Deprecated by -machine
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 752b15b..2866dd7 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -201,7 +201,8 @@
env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 16, 2, 3);
/* with reasonable vector length */
if (cpu_isar_feature(aa64_sve, cpu)) {
- env->vfp.zcr_el[1] = MIN(cpu->sve_max_vq - 1, 3);
+ env->vfp.zcr_el[1] =
+ aarch64_sve_zcr_get_valid_len(cpu, cpu->sve_default_vq - 1);
}
/*
* Enable TBI0 but not TBI1.
@@ -1051,7 +1052,16 @@
QLIST_INIT(&cpu->pre_el_change_hooks);
QLIST_INIT(&cpu->el_change_hooks);
-#ifndef CONFIG_USER_ONLY
+#ifdef CONFIG_USER_ONLY
+# ifdef TARGET_AARCH64
+ /*
+ * The linux kernel defaults to 512-bit vectors, when sve is supported.
+ * See documentation for /proc/sys/abi/sve_default_vector_length, and
+ * our corresponding sve-default-vector-length cpu property.
+ */
+ cpu->sve_default_vq = 4;
+# endif
+#else
/* Our inbound IRQ and FIQ lines */
if (kvm_enabled()) {
/* VIRQ and VFIQ are unused with KVM but we add them to maintain
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index be9a4dc..9f0a5f8 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -1006,6 +1006,11 @@
/* Used to set the maximum vector length the cpu will support. */
uint32_t sve_max_vq;
+#ifdef CONFIG_USER_ONLY
+ /* Used to set the default vector length at process start. */
+ uint32_t sve_default_vq;
+#endif
+
/*
* In sve_vq_map each set bit is a supported vector length of
* (bit-number + 1) * 16 bytes, i.e. each bit number + 1 is the vector
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index c7a1626..c690318 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -559,6 +559,59 @@
cpu->isar.id_aa64pfr0 = t;
}
+#ifdef CONFIG_USER_ONLY
+/* Mirror linux /proc/sys/abi/sve_default_vector_length. */
+static void cpu_arm_set_sve_default_vec_len(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+ int32_t default_len, default_vq, remainder;
+
+ if (!visit_type_int32(v, name, &default_len, errp)) {
+ return;
+ }
+
+ /* Undocumented, but the kernel allows -1 to indicate "maximum". */
+ if (default_len == -1) {
+ cpu->sve_default_vq = ARM_MAX_VQ;
+ return;
+ }
+
+ default_vq = default_len / 16;
+ remainder = default_len % 16;
+
+ /*
+ * Note that the 512 max comes from include/uapi/asm/sve_context.h
+ * and is the maximum architectural width of ZCR_ELx.LEN.
+ */
+ if (remainder || default_vq < 1 || default_vq > 512) {
+ error_setg(errp, "cannot set sve-default-vector-length");
+ if (remainder) {
+ error_append_hint(errp, "Vector length not a multiple of 16\n");
+ } else if (default_vq < 1) {
+ error_append_hint(errp, "Vector length smaller than 16\n");
+ } else {
+ error_append_hint(errp, "Vector length larger than %d\n",
+ 512 * 16);
+ }
+ return;
+ }
+
+ cpu->sve_default_vq = default_vq;
+}
+
+static void cpu_arm_get_sve_default_vec_len(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+ int32_t value = cpu->sve_default_vq * 16;
+
+ visit_type_int32(v, name, &value, errp);
+}
+#endif
+
void aarch64_add_sve_properties(Object *obj)
{
uint32_t vq;
@@ -571,6 +624,13 @@
object_property_add(obj, name, "bool", cpu_arm_get_sve_vq,
cpu_arm_set_sve_vq, NULL, NULL);
}
+
+#ifdef CONFIG_USER_ONLY
+ /* Mirror linux /proc/sys/abi/sve_default_vector_length. */
+ object_property_add(obj, "sve-default-vector-length", "int32",
+ cpu_arm_get_sve_default_vec_len,
+ cpu_arm_set_sve_default_vec_len, NULL, NULL);
+#endif
}
void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp)
diff --git a/target/arm/gdbstub.c b/target/arm/gdbstub.c
index a8fff2a..826601b 100644
--- a/target/arm/gdbstub.c
+++ b/target/arm/gdbstub.c
@@ -84,6 +84,10 @@
if (n < 16) {
/* Core integer register. */
+ if (n == 13 && arm_feature(env, ARM_FEATURE_M)) {
+ /* M profile SP low bits are always 0 */
+ tmp &= ~3;
+ }
env->regs[n] = tmp;
return 4;
}
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 0c07ca9..155d8bf 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -6457,11 +6457,13 @@
return 0;
}
-static uint32_t sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len)
+uint32_t aarch64_sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len)
{
uint32_t end_len;
- end_len = start_len &= 0xf;
+ start_len = MIN(start_len, ARM_MAX_VQ - 1);
+ end_len = start_len;
+
if (!test_bit(start_len, cpu->sve_vq_map)) {
end_len = find_last_bit(cpu->sve_vq_map, start_len);
assert(end_len < start_len);
@@ -6487,7 +6489,7 @@
zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]);
}
- return sve_zcr_get_valid_len(cpu, zcr_len);
+ return aarch64_sve_zcr_get_valid_len(cpu, zcr_len);
}
static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
diff --git a/target/arm/internals.h b/target/arm/internals.h
index 11a7201..cd2ea8a 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -177,6 +177,16 @@
void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
#endif /* CONFIG_TCG */
+/**
+ * aarch64_sve_zcr_get_valid_len:
+ * @cpu: cpu context
+ * @start_len: maximum len to consider
+ *
+ * Return the maximum supported sve vector length <= @start_len.
+ * Note that both @start_len and the return value are in units
+ * of ZCR_ELx.LEN, so the vector bit length is (x + 1) * 128.
+ */
+uint32_t aarch64_sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len);
enum arm_fprounding {
FPROUNDING_TIEEVEN,
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
index 7a1e35a..20761c9 100644
--- a/target/arm/m_helper.c
+++ b/target/arm/m_helper.c
@@ -1554,6 +1554,7 @@
qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
"stackframe: NSACR prevents clearing FPU registers\n");
v7m_exception_taken(cpu, excret, true, false);
+ return;
} else if (!cpacr_pass) {
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
exc_secure);
@@ -1561,6 +1562,7 @@
qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
"stackframe: CPACR prevents clearing FPU registers\n");
v7m_exception_taken(cpu, excret, true, false);
+ return;
}
}
/* Clear s0..s15, FPSCR and VPR */
@@ -2246,6 +2248,7 @@
env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
break;
case EXCP_UNALIGNED:
+ /* Unaligned faults reported by M-profile aware code */
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
break;
@@ -2318,6 +2321,13 @@
}
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
break;
+ case 0x1: /* Alignment fault reported by generic code */
+ qemu_log_mask(CPU_LOG_INT,
+ "...really UsageFault with UFSR.UNALIGNED\n");
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
+ env->v7m.secure);
+ break;
default:
/*
* All other FSR values are either MPU faults or "can't happen
@@ -2563,13 +2573,13 @@
if (!env->v7m.secure) {
return;
}
- env->v7m.other_ss_msp = val;
+ env->v7m.other_ss_msp = val & ~3;
return;
case 0x89: /* PSP_NS */
if (!env->v7m.secure) {
return;
}
- env->v7m.other_ss_psp = val;
+ env->v7m.other_ss_psp = val & ~3;
return;
case 0x8a: /* MSPLIM_NS */
if (!env->v7m.secure) {
@@ -2638,6 +2648,8 @@
limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
+ val &= ~0x3;
+
if (val < limit) {
raise_exception_ra(env, EXCP_STKOF, 0, 1, GETPC());
}
@@ -2660,16 +2672,16 @@
break;
case 8: /* MSP */
if (v7m_using_psp(env)) {
- env->v7m.other_sp = val;
+ env->v7m.other_sp = val & ~3;
} else {
- env->regs[13] = val;
+ env->regs[13] = val & ~3;
}
break;
case 9: /* PSP */
if (v7m_using_psp(env)) {
- env->regs[13] = val;
+ env->regs[13] = val & ~3;
} else {
- env->v7m.other_sp = val;
+ env->v7m.other_sp = val & ~3;
}
break;
case 10: /* MSPLIM */
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 351afa4..80c2826 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -291,6 +291,9 @@
*/
tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
s->base.is_jmp = DISAS_JUMP;
+ } else if (reg == 13 && arm_dc_feature(s, ARM_FEATURE_M)) {
+ /* For M-profile SP bits [1:0] are always zero */
+ tcg_gen_andi_i32(var, var, ~3);
}
tcg_gen_mov_i32(cpu_R[reg], var);
tcg_temp_free_i32(var);
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index edb97eb..34a7ce8 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -4110,7 +4110,7 @@
* none", but this is just for compatibility while libvirt isn't
* adapted to resolve CPU model versions before creating VMs.
* See "Runnability guarantee of CPU models" at
- * docs/system/deprecated.rst.
+ * docs/about/deprecated.rst.
*/
X86CPUVersion default_cpu_version = 1;
diff --git a/tests/qtest/nvme-test.c b/tests/qtest/nvme-test.c
index d32c953..f8bafb5 100644
--- a/tests/qtest/nvme-test.c
+++ b/tests/qtest/nvme-test.c
@@ -13,6 +13,7 @@
#include "libqos/libqtest.h"
#include "libqos/qgraph.h"
#include "libqos/pci.h"
+#include "include/block/nvme.h"
typedef struct QNvme QNvme;
@@ -66,12 +67,89 @@
g_assert_cmpint(qpci_io_readl(pdev, bar, cmb_bar_size - 1), !=, 0x44332211);
}
+static void nvmetest_reg_read_test(void *obj, void *data, QGuestAllocator *alloc)
+{
+ QNvme *nvme = obj;
+ QPCIDevice *pdev = &nvme->dev;
+ QPCIBar bar;
+ uint32_t cap_lo, cap_hi;
+ uint64_t cap;
+
+ qpci_device_enable(pdev);
+ bar = qpci_iomap(pdev, 0, NULL);
+
+ cap_lo = qpci_io_readl(pdev, bar, 0x0);
+ g_assert_cmpint(NVME_CAP_MQES(cap_lo), ==, 0x7ff);
+
+ cap_hi = qpci_io_readl(pdev, bar, 0x4);
+ g_assert_cmpint(NVME_CAP_MPSMAX((uint64_t)cap_hi << 32), ==, 0x4);
+
+ cap = qpci_io_readq(pdev, bar, 0x0);
+ g_assert_cmpint(NVME_CAP_MQES(cap), ==, 0x7ff);
+ g_assert_cmpint(NVME_CAP_MPSMAX(cap), ==, 0x4);
+
+ qpci_iounmap(pdev, bar);
+}
+
+static void nvmetest_pmr_reg_test(void *obj, void *data, QGuestAllocator *alloc)
+{
+ QNvme *nvme = obj;
+ QPCIDevice *pdev = &nvme->dev;
+ QPCIBar pmr_bar, nvme_bar;
+ uint32_t pmrcap, pmrsts;
+
+ qpci_device_enable(pdev);
+ pmr_bar = qpci_iomap(pdev, 4, NULL);
+
+ /* Without Enabling PMRCTL check bar enablemet */
+ qpci_io_writel(pdev, pmr_bar, 0, 0xccbbaa99);
+ g_assert_cmpint(qpci_io_readb(pdev, pmr_bar, 0), !=, 0x99);
+ g_assert_cmpint(qpci_io_readw(pdev, pmr_bar, 0), !=, 0xaa99);
+
+ /* Map NVMe Bar Register to Enable the Mem Region */
+ nvme_bar = qpci_iomap(pdev, 0, NULL);
+
+ pmrcap = qpci_io_readl(pdev, nvme_bar, 0xe00);
+ g_assert_cmpint(NVME_PMRCAP_RDS(pmrcap), ==, 0x1);
+ g_assert_cmpint(NVME_PMRCAP_WDS(pmrcap), ==, 0x1);
+ g_assert_cmpint(NVME_PMRCAP_BIR(pmrcap), ==, 0x4);
+ g_assert_cmpint(NVME_PMRCAP_PMRWBM(pmrcap), ==, 0x2);
+ g_assert_cmpint(NVME_PMRCAP_CMSS(pmrcap), ==, 0x1);
+
+ /* Enable PMRCTRL */
+ qpci_io_writel(pdev, nvme_bar, 0xe04, 0x1);
+
+ qpci_io_writel(pdev, pmr_bar, 0, 0x44332211);
+ g_assert_cmpint(qpci_io_readb(pdev, pmr_bar, 0), ==, 0x11);
+ g_assert_cmpint(qpci_io_readw(pdev, pmr_bar, 0), ==, 0x2211);
+ g_assert_cmpint(qpci_io_readl(pdev, pmr_bar, 0), ==, 0x44332211);
+
+ pmrsts = qpci_io_readl(pdev, nvme_bar, 0xe08);
+ g_assert_cmpint(NVME_PMRSTS_NRDY(pmrsts), ==, 0x0);
+
+ /* Disable PMRCTRL */
+ qpci_io_writel(pdev, nvme_bar, 0xe04, 0x0);
+
+ qpci_io_writel(pdev, pmr_bar, 0, 0x88776655);
+ g_assert_cmpint(qpci_io_readb(pdev, pmr_bar, 0), !=, 0x55);
+ g_assert_cmpint(qpci_io_readw(pdev, pmr_bar, 0), !=, 0x6655);
+ g_assert_cmpint(qpci_io_readl(pdev, pmr_bar, 0), !=, 0x88776655);
+
+ pmrsts = qpci_io_readl(pdev, nvme_bar, 0xe08);
+ g_assert_cmpint(NVME_PMRSTS_NRDY(pmrsts), ==, 0x1);
+
+ qpci_iounmap(pdev, nvme_bar);
+ qpci_iounmap(pdev, pmr_bar);
+}
+
static void nvme_register_nodes(void)
{
QOSGraphEdgeOptions opts = {
.extra_device_opts = "addr=04.0,drive=drv0,serial=foo",
.before_cmd_line = "-drive id=drv0,if=none,file=null-co://,"
- "file.read-zeroes=on,format=raw",
+ "file.read-zeroes=on,format=raw "
+ "-object memory-backend-ram,id=pmr0,"
+ "share=on,size=8",
};
add_qpci_address(&opts, &(QPCIAddress) { .devfn = QPCI_DEVFN(4, 0) });
@@ -83,6 +161,13 @@
qos_add_test("oob-cmb-access", "nvme", nvmetest_oob_cmb_test, &(QOSGraphTestOptions) {
.edge.extra_device_opts = "cmb_size_mb=2"
});
+
+ qos_add_test("pmr-test-access", "nvme", nvmetest_pmr_reg_test,
+ &(QOSGraphTestOptions) {
+ .edge.extra_device_opts = "pmrdev=pmr0"
+ });
+
+ qos_add_test("reg-read", "nvme", nvmetest_reg_read_test, NULL);
}
libqos_init(nvme_register_nodes);