vfio: Collect container iova range info

Collect iova range information if VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE
capability is supported.

This allows to propagate the information though the IOMMU MR
set_iova_ranges() callback so that virtual IOMMUs
get aware of those aperture constraints. This is only done if
the info is available and the number of iova ranges is greater than
0.

A new vfio_get_info_iova_range helper is introduced matching
the coding style of existing vfio_get_info_dma_avail. The
boolean returned value isn't used though. Code is aligned
between both.

Signed-off-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Alex Williamson <alex.williamson@redhat.com>
Tested-by: Yanghang Liu <yanghliu@redhat.com>
Signed-off-by: Cédric Le Goater <clg@redhat.com>
This commit is contained in:
Eric Auger 2023-10-19 15:45:09 +02:00 committed by Cédric Le Goater
parent 51478a8ef5
commit e4a8ae09c5
3 changed files with 49 additions and 3 deletions

View File

@ -693,6 +693,15 @@ static void vfio_listener_region_add(MemoryListener *listener,
goto fail;
}
if (container->iova_ranges) {
ret = memory_region_iommu_set_iova_ranges(giommu->iommu_mr,
container->iova_ranges, &err);
if (ret) {
g_free(giommu);
goto fail;
}
}
ret = memory_region_register_iommu_notifier(section->mr, &giommu->n,
&err);
if (ret) {

View File

@ -382,7 +382,7 @@ bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info *info,
/* If the capability cannot be found, assume no DMA limiting */
hdr = vfio_get_iommu_type1_info_cap(info,
VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL);
if (hdr == NULL) {
if (!hdr) {
return false;
}
@ -394,6 +394,32 @@ bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info *info,
return true;
}
static bool vfio_get_info_iova_range(struct vfio_iommu_type1_info *info,
VFIOContainer *container)
{
struct vfio_info_cap_header *hdr;
struct vfio_iommu_type1_info_cap_iova_range *cap;
hdr = vfio_get_iommu_type1_info_cap(info,
VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE);
if (!hdr) {
return false;
}
cap = (void *)hdr;
for (int i = 0; i < cap->nr_iovas; i++) {
Range *range = g_new(Range, 1);
range_set_bounds(range, cap->iova_ranges[i].start,
cap->iova_ranges[i].end);
container->iova_ranges =
range_list_insert(container->iova_ranges, range);
}
return true;
}
static void vfio_kvm_device_add_group(VFIOGroup *group)
{
Error *err = NULL;
@ -535,6 +561,12 @@ static void vfio_get_iommu_info_migration(VFIOContainer *container,
}
}
static void vfio_free_container(VFIOContainer *container)
{
g_list_free_full(container->iova_ranges, g_free);
g_free(container);
}
static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
Error **errp)
{
@ -616,6 +648,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
container->error = NULL;
container->dirty_pages_supported = false;
container->dma_max_mappings = 0;
container->iova_ranges = NULL;
QLIST_INIT(&container->giommu_list);
QLIST_INIT(&container->hostwin_list);
QLIST_INIT(&container->vrdl_list);
@ -652,6 +685,9 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
if (!vfio_get_info_dma_avail(info, &container->dma_max_mappings)) {
container->dma_max_mappings = 65535;
}
vfio_get_info_iova_range(info, container);
vfio_get_iommu_info_migration(container, info);
g_free(info);
@ -765,7 +801,7 @@ enable_discards_exit:
vfio_ram_block_discard_disable(container, false);
free_container_exit:
g_free(container);
vfio_free_container(container);
close_fd_exit:
close(fd);
@ -819,7 +855,7 @@ static void vfio_disconnect_container(VFIOGroup *group)
trace_vfio_disconnect_container(container->fd);
close(container->fd);
g_free(container);
vfio_free_container(container);
vfio_put_address_space(space);
}

View File

@ -99,6 +99,7 @@ typedef struct VFIOContainer {
QLIST_HEAD(, VFIORamDiscardListener) vrdl_list;
QLIST_ENTRY(VFIOContainer) next;
QLIST_HEAD(, VFIODevice) device_list;
GList *iova_ranges;
} VFIOContainer;
typedef struct VFIOGuestIOMMU {