vfio/common: Separate vfio-pci ranges
QEMU computes the DMA logging ranges for two predefined ranges: 32-bit
and 64-bit. In the OVMF case, when the dynamic MMIO window is enabled,
QEMU includes in the 64-bit range the RAM regions at the lower part
and vfio-pci device RAM regions which are at the top of the address
space. This range contains a large gap and the size can be bigger than
the dirty tracking HW limits of some devices (MLX5 has a 2^42 limit).
To avoid such large ranges, introduce a new PCI range covering the
vfio-pci device RAM regions, this only if the addresses are above 4GB
to avoid breaking potential SeaBIOS guests.
[ clg: - wrote commit log
- fixed overlapping 32-bit and PCI ranges when using SeaBIOS ]
Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
Signed-off-by: Cédric Le Goater <clg@redhat.com>
Fixes: 5255bbf4ec16 ("vfio/common: Add device dirty page tracking start/stop")
Signed-off-by: Cédric Le Goater <clg@redhat.com>
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index 237101d..1346492 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -27,6 +27,7 @@
#include "hw/vfio/vfio-common.h"
#include "hw/vfio/vfio.h"
+#include "hw/vfio/pci.h"
#include "exec/address-spaces.h"
#include "exec/memory.h"
#include "exec/ram_addr.h"
@@ -1400,6 +1401,8 @@
hwaddr max32;
hwaddr min64;
hwaddr max64;
+ hwaddr minpci64;
+ hwaddr maxpci64;
} VFIODirtyRanges;
typedef struct VFIODirtyRangesListener {
@@ -1408,6 +1411,31 @@
MemoryListener listener;
} VFIODirtyRangesListener;
+static bool vfio_section_is_vfio_pci(MemoryRegionSection *section,
+ VFIOContainer *container)
+{
+ VFIOPCIDevice *pcidev;
+ VFIODevice *vbasedev;
+ VFIOGroup *group;
+ Object *owner;
+
+ owner = memory_region_owner(section->mr);
+
+ QLIST_FOREACH(group, &container->group_list, container_next) {
+ QLIST_FOREACH(vbasedev, &group->device_list, next) {
+ if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) {
+ continue;
+ }
+ pcidev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
+ if (OBJECT(pcidev) == owner) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
static void vfio_dirty_tracking_update(MemoryListener *listener,
MemoryRegionSection *section)
{
@@ -1424,19 +1452,32 @@
}
/*
- * The address space passed to the dirty tracker is reduced to two ranges:
- * one for 32-bit DMA ranges, and another one for 64-bit DMA ranges.
+ * The address space passed to the dirty tracker is reduced to three ranges:
+ * one for 32-bit DMA ranges, one for 64-bit DMA ranges and one for the
+ * PCI 64-bit hole.
+ *
* The underlying reports of dirty will query a sub-interval of each of
* these ranges.
*
- * The purpose of the dual range handling is to handle known cases of big
- * holes in the address space, like the x86 AMD 1T hole. The alternative
- * would be an IOVATree but that has a much bigger runtime overhead and
- * unnecessary complexity.
+ * The purpose of the three range handling is to handle known cases of big
+ * holes in the address space, like the x86 AMD 1T hole, and firmware (like
+ * OVMF) which may relocate the pci-hole64 to the end of the address space.
+ * The latter would otherwise generate large ranges for tracking, stressing
+ * the limits of supported hardware. The pci-hole32 will always be below 4G
+ * (overlapping or not) so it doesn't need special handling and is part of
+ * the 32-bit range.
+ *
+ * The alternative would be an IOVATree but that has a much bigger runtime
+ * overhead and unnecessary complexity.
*/
- min = (end <= UINT32_MAX) ? &range->min32 : &range->min64;
- max = (end <= UINT32_MAX) ? &range->max32 : &range->max64;
-
+ if (vfio_section_is_vfio_pci(section, dirty->container) &&
+ iova >= UINT32_MAX) {
+ min = &range->minpci64;
+ max = &range->maxpci64;
+ } else {
+ min = (end <= UINT32_MAX) ? &range->min32 : &range->min64;
+ max = (end <= UINT32_MAX) ? &range->max32 : &range->max64;
+ }
if (*min > iova) {
*min = iova;
}
@@ -1461,6 +1502,7 @@
memset(&dirty, 0, sizeof(dirty));
dirty.ranges.min32 = UINT32_MAX;
dirty.ranges.min64 = UINT64_MAX;
+ dirty.ranges.minpci64 = UINT64_MAX;
dirty.listener = vfio_dirty_tracking_listener;
dirty.container = container;
@@ -1531,7 +1573,8 @@
* DMA logging uAPI guarantees to support at least a number of ranges that
* fits into a single host kernel base page.
*/
- control->num_ranges = !!tracking->max32 + !!tracking->max64;
+ control->num_ranges = !!tracking->max32 + !!tracking->max64 +
+ !!tracking->maxpci64;
ranges = g_try_new0(struct vfio_device_feature_dma_logging_range,
control->num_ranges);
if (!ranges) {
@@ -1550,11 +1593,17 @@
if (tracking->max64) {
ranges->iova = tracking->min64;
ranges->length = (tracking->max64 - tracking->min64) + 1;
+ ranges++;
+ }
+ if (tracking->maxpci64) {
+ ranges->iova = tracking->minpci64;
+ ranges->length = (tracking->maxpci64 - tracking->minpci64) + 1;
}
trace_vfio_device_dirty_tracking_start(control->num_ranges,
tracking->min32, tracking->max32,
- tracking->min64, tracking->max64);
+ tracking->min64, tracking->max64,
+ tracking->minpci64, tracking->maxpci64);
return feature;
}
diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events
index 329736a..81ec7c7 100644
--- a/hw/vfio/trace-events
+++ b/hw/vfio/trace-events
@@ -104,7 +104,7 @@
vfio_listener_region_add_no_dma_map(const char *name, uint64_t iova, uint64_t size, uint64_t page_size) "Region \"%s\" 0x%"PRIx64" size=0x%"PRIx64" is not aligned to 0x%"PRIx64" and cannot be mapped for DMA"
vfio_listener_region_del(uint64_t start, uint64_t end) "region_del 0x%"PRIx64" - 0x%"PRIx64
vfio_device_dirty_tracking_update(uint64_t start, uint64_t end, uint64_t min, uint64_t max) "section 0x%"PRIx64" - 0x%"PRIx64" -> update [0x%"PRIx64" - 0x%"PRIx64"]"
-vfio_device_dirty_tracking_start(int nr_ranges, uint64_t min32, uint64_t max32, uint64_t min64, uint64_t max64) "nr_ranges %d 32:[0x%"PRIx64" - 0x%"PRIx64"], 64:[0x%"PRIx64" - 0x%"PRIx64"]"
+vfio_device_dirty_tracking_start(int nr_ranges, uint64_t min32, uint64_t max32, uint64_t min64, uint64_t max64, uint64_t minpci, uint64_t maxpci) "nr_ranges %d 32:[0x%"PRIx64" - 0x%"PRIx64"], 64:[0x%"PRIx64" - 0x%"PRIx64"], pci64:[0x%"PRIx64" - 0x%"PRIx64"]"
vfio_disconnect_container(int fd) "close container->fd=%d"
vfio_put_group(int fd) "close group->fd=%d"
vfio_get_device(const char * name, unsigned int flags, unsigned int num_regions, unsigned int num_irqs) "Device %s flags: %u, regions: %u, irqs: %u"