pc,pci,virtio,crypto: bugfixes

fixes all over the place.
 
 Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmTMJ90PHG1zdEByZWRo
 YXQuY29tAAoJECgfDbjSjVRprTAH/1YxxP9Dhn71BjkwGQ18SmpNp0wlmP9GRJEy
 7aQNO7ativ8njAX1fLEo0ZRJ5qX1MCw+/ZuEvIUZD+0biwimsVCPjWVLs3Q8geUs
 LzQWuvUoRGp136BtaZUrlS/cWr8TQY+4/lyK/xOBUOiI+5AP1Yi7eL9162RDQR3D
 cV/0eH8QNY+93n+VnyFY6Y55YnHyH9EBkxdtnVkt7NOCms4qMRf9IBiWOMaktp4w
 iTfvOfKbTCKhWDsNWIJEJUtWItRFp6OIRdO3KoMXBHuE8S/0C19fc2eBfbeN/bUK
 I5b4xO181ibzoPGWkDfLYi1wFfvGDDxFe119EzvDKU8dDtNFBoY=
 =FRdM
 -----END PGP SIGNATURE-----

Merge tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu into staging

pc,pci,virtio,crypto: bugfixes

fixes all over the place.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

# -----BEGIN PGP SIGNATURE-----
#
# iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmTMJ90PHG1zdEByZWRo
# YXQuY29tAAoJECgfDbjSjVRprTAH/1YxxP9Dhn71BjkwGQ18SmpNp0wlmP9GRJEy
# 7aQNO7ativ8njAX1fLEo0ZRJ5qX1MCw+/ZuEvIUZD+0biwimsVCPjWVLs3Q8geUs
# LzQWuvUoRGp136BtaZUrlS/cWr8TQY+4/lyK/xOBUOiI+5AP1Yi7eL9162RDQR3D
# cV/0eH8QNY+93n+VnyFY6Y55YnHyH9EBkxdtnVkt7NOCms4qMRf9IBiWOMaktp4w
# iTfvOfKbTCKhWDsNWIJEJUtWItRFp6OIRdO3KoMXBHuE8S/0C19fc2eBfbeN/bUK
# I5b4xO181ibzoPGWkDfLYi1wFfvGDDxFe119EzvDKU8dDtNFBoY=
# =FRdM
# -----END PGP SIGNATURE-----
# gpg: Signature made Thu 03 Aug 2023 03:19:09 PM PDT
# gpg:                using RSA key 5D09FD0871C8F85B94CA8A0D281F0DB8D28D5469
# gpg:                issuer "mst@redhat.com"
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" [undefined]
# gpg:                 aka "Michael S. Tsirkin <mst@redhat.com>" [undefined]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 0270 606B 6F3C DF3D 0B17  0970 C350 3912 AFBE 8E67
#      Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA  8A0D 281F 0DB8 D28D 5469

* tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu: (22 commits)
  cryptodev: Handle unexpected request to avoid crash
  virtio-crypto: verify src&dst buffer length for sym request
  include/hw/i386/x86-iommu: Fix struct X86IOMMU_MSIMessage for big endian hosts
  hw/i386/x86-iommu: Fix endianness issue in x86_iommu_irq_to_msi_message()
  hw/i386/intel_iommu: Fix index calculation in vtd_interrupt_remap_msi()
  hw/i386/intel_iommu: Fix struct VTDInvDescIEC on big endian hosts
  hw/i386/intel_iommu: Fix endianness problems related to VTD_IR_TableEntry
  hw/i386/intel_iommu: Fix trivial endianness problems
  vhost: fix the fd leak
  pci: do not respond config requests after PCI device eject
  virtio: Fix packed virtqueue used_idx mask
  hw/virtio: qmp: add RING_RESET to 'info virtio-status'
  tests: acpi: update expected blobs
  acpi: x86: remove _ADR on host bridges
  tests: acpi: whitelist expected blobs
  tests: acpi: x86: update expected blobs
  x86: acpi: workaround Windows not handling name references in Package properly
  tests: acpi: x86: whitelist expected blobs
  hw/virtio: Add a protection against duplicate vu_scmi_stop calls
  virtio-iommu: Standardize granule extraction and formatting
  ...

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2023-08-03 18:48:20 -07:00
commit a089a73383
53 changed files with 132 additions and 74 deletions

View File

@ -191,6 +191,11 @@ static int cryptodev_backend_account(CryptoDevBackend *backend,
if (algtype == QCRYPTODEV_BACKEND_ALG_ASYM) {
CryptoDevBackendAsymOpInfo *asym_op_info = op_info->u.asym_op_info;
len = asym_op_info->src_len;
if (unlikely(!backend->asym_stat)) {
error_report("cryptodev: Unexpected asym operation");
return -VIRTIO_CRYPTO_NOTSUPP;
}
switch (op_info->op_code) {
case VIRTIO_CRYPTO_AKCIPHER_ENCRYPT:
CryptodevAsymStatIncEncrypt(backend, len);
@ -210,6 +215,11 @@ static int cryptodev_backend_account(CryptoDevBackend *backend,
} else if (algtype == QCRYPTODEV_BACKEND_ALG_SYM) {
CryptoDevBackendSymOpInfo *sym_op_info = op_info->u.sym_op_info;
len = sym_op_info->src_len;
if (unlikely(!backend->sym_stat)) {
error_report("cryptodev: Unexpected sym operation");
return -VIRTIO_CRYPTO_NOTSUPP;
}
switch (op_info->op_code) {
case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
CryptodevSymStatIncEncrypt(backend, len);

View File

@ -362,9 +362,13 @@ Aml *aml_pci_device_dsm(void)
{
Aml *params = aml_local(0);
Aml *pkg = aml_package(2);
aml_append(pkg, aml_name("BSEL"));
aml_append(pkg, aml_name("ASUN"));
aml_append(pkg, aml_int(0));
aml_append(pkg, aml_int(0));
aml_append(method, aml_store(pkg, params));
aml_append(method,
aml_store(aml_name("BSEL"), aml_index(params, aml_int(0))));
aml_append(method,
aml_store(aml_name("ASUN"), aml_index(params, aml_int(1))));
aml_append(method,
aml_return(aml_call5("PDSM", aml_arg(0), aml_arg(1),
aml_arg(2), aml_arg(3), params))
@ -1460,7 +1464,6 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
sb_scope = aml_scope("_SB");
dev = aml_device("PCI0");
aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A03")));
aml_append(dev, aml_name_decl("_ADR", aml_int(0)));
aml_append(dev, aml_name_decl("_UID", aml_int(pcmc->pci_root_uid)));
aml_append(dev, aml_pci_edsm());
aml_append(sb_scope, dev);
@ -1475,7 +1478,6 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
dev = aml_device("PCI0");
aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A08")));
aml_append(dev, aml_name_decl("_CID", aml_eisaid("PNP0A03")));
aml_append(dev, aml_name_decl("_ADR", aml_int(0)));
aml_append(dev, aml_name_decl("_UID", aml_int(pcmc->pci_root_uid)));
aml_append(dev, build_q35_osc_method(!pm->pcihp_bridge_en));
aml_append(dev, aml_pci_edsm());
@ -1589,7 +1591,6 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
aml_append(pkg, aml_eisaid("PNP0A08"));
aml_append(pkg, aml_eisaid("PNP0A03"));
aml_append(dev, aml_name_decl("_CID", pkg));
aml_append(dev, aml_name_decl("_ADR", aml_int(0)));
build_cxl_osc_method(dev);
} else if (pci_bus_is_express(bus)) {
aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A08")));

View File

@ -756,6 +756,8 @@ static int vtd_get_pdire_from_pdir_table(dma_addr_t pasid_dir_base,
return -VTD_FR_PASID_TABLE_INV;
}
pdire->val = le64_to_cpu(pdire->val);
return 0;
}
@ -780,6 +782,9 @@ static int vtd_get_pe_in_pasid_leaf_table(IntelIOMMUState *s,
pe, entry_size, MEMTXATTRS_UNSPECIFIED)) {
return -VTD_FR_PASID_TABLE_INV;
}
for (size_t i = 0; i < ARRAY_SIZE(pe->val); i++) {
pe->val[i] = le64_to_cpu(pe->val[i]);
}
/* Do translation type check */
if (!vtd_pe_type_check(x86_iommu, pe)) {
@ -3323,14 +3328,15 @@ static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index,
return -VTD_FR_IR_ROOT_INVAL;
}
trace_vtd_ir_irte_get(index, le64_to_cpu(entry->data[1]),
le64_to_cpu(entry->data[0]));
entry->data[0] = le64_to_cpu(entry->data[0]);
entry->data[1] = le64_to_cpu(entry->data[1]);
trace_vtd_ir_irte_get(index, entry->data[1], entry->data[0]);
if (!entry->irte.present) {
error_report_once("%s: detected non-present IRTE "
"(index=%u, high=0x%" PRIx64 ", low=0x%" PRIx64 ")",
__func__, index, le64_to_cpu(entry->data[1]),
le64_to_cpu(entry->data[0]));
__func__, index, entry->data[1], entry->data[0]);
return -VTD_FR_IR_ENTRY_P;
}
@ -3338,14 +3344,13 @@ static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index,
entry->irte.__reserved_2) {
error_report_once("%s: detected non-zero reserved IRTE "
"(index=%u, high=0x%" PRIx64 ", low=0x%" PRIx64 ")",
__func__, index, le64_to_cpu(entry->data[1]),
le64_to_cpu(entry->data[0]));
__func__, index, entry->data[1], entry->data[0]);
return -VTD_FR_IR_IRTE_RSVD;
}
if (sid != X86_IOMMU_SID_INVALID) {
/* Validate IRTE SID */
source_id = le32_to_cpu(entry->irte.source_id);
source_id = entry->irte.source_id;
switch (entry->irte.sid_vtype) {
case VTD_SVT_NONE:
break;
@ -3399,7 +3404,7 @@ static int vtd_remap_irq_get(IntelIOMMUState *iommu, uint16_t index,
irq->trigger_mode = irte.irte.trigger_mode;
irq->vector = irte.irte.vector;
irq->delivery_mode = irte.irte.delivery_mode;
irq->dest = le32_to_cpu(irte.irte.dest_id);
irq->dest = irte.irte.dest_id;
if (!iommu->intr_eime) {
#define VTD_IR_APIC_DEST_MASK (0xff00ULL)
#define VTD_IR_APIC_DEST_SHIFT (8)
@ -3454,7 +3459,7 @@ static int vtd_interrupt_remap_msi(IntelIOMMUState *iommu,
goto out;
}
index = addr.addr.index_h << 15 | le16_to_cpu(addr.addr.index_l);
index = addr.addr.index_h << 15 | addr.addr.index_l;
#define VTD_IR_MSI_DATA_SUBHANDLE (0x0000ffff)
#define VTD_IR_MSI_DATA_RESERVED (0xffff0000)

View File

@ -321,12 +321,21 @@ typedef enum VTDFaultReason {
/* Interrupt Entry Cache Invalidation Descriptor: VT-d 6.5.2.7. */
struct VTDInvDescIEC {
#if HOST_BIG_ENDIAN
uint64_t reserved_2:16;
uint64_t index:16; /* Start index to invalidate */
uint64_t index_mask:5; /* 2^N for continuous int invalidation */
uint64_t resved_1:22;
uint64_t granularity:1; /* If set, it's global IR invalidation */
uint64_t type:4; /* Should always be 0x4 */
#else
uint32_t type:4; /* Should always be 0x4 */
uint32_t granularity:1; /* If set, it's global IR invalidation */
uint32_t resved_1:22;
uint32_t index_mask:5; /* 2^N for continuous int invalidation */
uint32_t index:16; /* Start index to invalidate */
uint32_t reserved_2:16;
#endif
};
typedef struct VTDInvDescIEC VTDInvDescIEC;

View File

@ -63,7 +63,7 @@ void x86_iommu_irq_to_msi_message(X86IOMMUIrq *irq, MSIMessage *msg_out)
msg.redir_hint = irq->redir_hint;
msg.dest = irq->dest;
msg.__addr_hi = irq->dest & 0xffffff00;
msg.__addr_head = cpu_to_le32(0xfee);
msg.__addr_head = 0xfee;
/* Keep this from original MSI address bits */
msg.__not_used = irq->msi_addr_last_bits;

View File

@ -274,10 +274,7 @@ static int build_cdat_table(CDATSubHeader ***cdat_table, void *priv)
};
}
*cdat_table = g_malloc0(sizeof(*cdat_table) * CXL_USP_CDAT_NUM_ENTRIES);
if (!*cdat_table) {
return -ENOMEM;
}
*cdat_table = g_new0(CDATSubHeader *, CXL_USP_CDAT_NUM_ENTRIES);
/* Header always at start of structure */
(*cdat_table)[CXL_USP_CDAT_SSLBIS_LAT] = g_steal_pointer(&sslbis_latency);

View File

@ -62,6 +62,17 @@ static void pci_adjust_config_limit(PCIBus *bus, uint32_t *limit)
}
}
static bool is_pci_dev_ejected(PCIDevice *pci_dev)
{
/*
* device unplug was requested and the guest acked it,
* so we stop responding config accesses even if the
* device is not deleted (failover flow)
*/
return pci_dev && pci_dev->partially_hotplugged &&
!pci_dev->qdev.pending_deleted_event;
}
void pci_host_config_write_common(PCIDevice *pci_dev, uint32_t addr,
uint32_t limit, uint32_t val, uint32_t len)
{
@ -75,7 +86,7 @@ void pci_host_config_write_common(PCIDevice *pci_dev, uint32_t addr,
* allowing direct removal of unexposed functions.
*/
if ((pci_dev->qdev.hotplugged && !pci_get_function_0(pci_dev)) ||
!pci_dev->has_power) {
!pci_dev->has_power || is_pci_dev_ejected(pci_dev)) {
return;
}
@ -100,7 +111,7 @@ uint32_t pci_host_config_read_common(PCIDevice *pci_dev, uint32_t addr,
* allowing direct removal of unexposed functions.
*/
if ((pci_dev->qdev.hotplugged && !pci_get_function_0(pci_dev)) ||
!pci_dev->has_power) {
!pci_dev->has_power || is_pci_dev_ejected(pci_dev)) {
return ~0x0;
}

View File

@ -63,6 +63,7 @@ static int vu_scmi_start(VirtIODevice *vdev)
error_report("Error starting vhost-user-scmi: %d", ret);
goto err_guest_notifiers;
}
scmi->started_vu = true;
/*
* guest_notifier_mask/pending not used yet, so just unmask
@ -90,6 +91,12 @@ static void vu_scmi_stop(VirtIODevice *vdev)
struct vhost_dev *vhost_dev = &scmi->vhost_dev;
int ret;
/* vhost_dev_is_started() check in the callers is not fully reliable. */
if (!scmi->started_vu) {
return;
}
scmi->started_vu = false;
if (!k->set_guest_notifiers) {
return;
}

View File

@ -2044,6 +2044,8 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings)
event_notifier_test_and_clear(
&hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier);
event_notifier_test_and_clear(&vdev->config_notifier);
event_notifier_cleanup(
&hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier);
trace_vhost_dev_stop(hdev, vdev->name, vrings);

View File

@ -634,6 +634,11 @@ virtio_crypto_sym_op_helper(VirtIODevice *vdev,
return NULL;
}
if (unlikely(src_len != dst_len)) {
virtio_error(vdev, "sym request src len is different from dst len");
return NULL;
}
max_len = (uint64_t)iv_len + aad_len + src_len + dst_len + hash_result_len;
if (unlikely(max_len > vcrypto->conf.max_size)) {
virtio_error(vdev, "virtio-crypto too big length");

View File

@ -728,13 +728,15 @@ static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq)
VirtIOIOMMU *s = VIRTIO_IOMMU(vdev);
struct virtio_iommu_req_head head;
struct virtio_iommu_req_tail tail = {};
size_t output_size = sizeof(tail), sz;
VirtQueueElement *elem;
unsigned int iov_cnt;
struct iovec *iov;
void *buf = NULL;
size_t sz;
for (;;) {
size_t output_size = sizeof(tail);
elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
if (!elem) {
return;
@ -852,17 +854,19 @@ static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr,
VirtIOIOMMUEndpoint *ep;
uint32_t sid, flags;
bool bypass_allowed;
int granule;
bool found;
int i;
interval.low = addr;
interval.high = addr + 1;
granule = ctz64(s->config.page_size_mask);
IOMMUTLBEntry entry = {
.target_as = &address_space_memory,
.iova = addr,
.translated_addr = addr,
.addr_mask = (1 << ctz32(s->config.page_size_mask)) - 1,
.addr_mask = BIT_ULL(granule) - 1,
.perm = IOMMU_NONE,
};
@ -1115,7 +1119,7 @@ static int virtio_iommu_set_page_size_mask(IOMMUMemoryRegion *mr,
if (s->granule_frozen) {
int cur_granule = ctz64(cur_mask);
if (!(BIT(cur_granule) & new_mask)) {
if (!(BIT_ULL(cur_granule) & new_mask)) {
error_setg(errp, "virtio-iommu %s does not support frozen granule 0x%llx",
mr->parent_obj.name, BIT_ULL(cur_granule));
return -1;
@ -1161,7 +1165,7 @@ static void virtio_iommu_freeze_granule(Notifier *notifier, void *data)
}
s->granule_frozen = true;
granule = ctz64(s->config.page_size_mask);
trace_virtio_iommu_freeze_granule(BIT(granule));
trace_virtio_iommu_freeze_granule(BIT_ULL(granule));
}
static void virtio_iommu_device_realize(DeviceState *dev, Error **errp)

View File

@ -79,6 +79,8 @@ static const qmp_virtio_feature_map_t virtio_transport_map[] = {
"VIRTIO_F_ORDER_PLATFORM: Memory accesses ordered by platform"),
FEATURE_ENTRY(VIRTIO_F_SR_IOV, \
"VIRTIO_F_SR_IOV: Device supports single root I/O virtualization"),
FEATURE_ENTRY(VIRTIO_F_RING_RESET, \
"VIRTIO_F_RING_RESET: Driver can reset a queue individually"),
/* Virtio ring transport features */
FEATURE_ENTRY(VIRTIO_RING_F_INDIRECT_DESC, \
"VIRTIO_RING_F_INDIRECT_DESC: Indirect descriptors supported"),

View File

@ -3321,7 +3321,7 @@ static void virtio_queue_packed_set_last_avail_idx(VirtIODevice *vdev,
vq->last_avail_wrap_counter =
vq->shadow_avail_wrap_counter = !!(idx & 0x8000);
idx >>= 16;
vq->used_idx = idx & 0x7ffff;
vq->used_idx = idx & 0x7fff;
vq->used_wrap_counter = !!(idx & 0x8000);
}

View File

@ -178,37 +178,39 @@ enum {
union VTD_IR_TableEntry {
struct {
#if HOST_BIG_ENDIAN
uint32_t __reserved_1:8; /* Reserved 1 */
uint32_t vector:8; /* Interrupt Vector */
uint32_t irte_mode:1; /* IRTE Mode */
uint32_t __reserved_0:3; /* Reserved 0 */
uint32_t __avail:4; /* Available spaces for software */
uint32_t delivery_mode:3; /* Delivery Mode */
uint32_t trigger_mode:1; /* Trigger Mode */
uint32_t redir_hint:1; /* Redirection Hint */
uint32_t dest_mode:1; /* Destination Mode */
uint32_t fault_disable:1; /* Fault Processing Disable */
uint32_t present:1; /* Whether entry present/available */
uint64_t dest_id:32; /* Destination ID */
uint64_t __reserved_1:8; /* Reserved 1 */
uint64_t vector:8; /* Interrupt Vector */
uint64_t irte_mode:1; /* IRTE Mode */
uint64_t __reserved_0:3; /* Reserved 0 */
uint64_t __avail:4; /* Available spaces for software */
uint64_t delivery_mode:3; /* Delivery Mode */
uint64_t trigger_mode:1; /* Trigger Mode */
uint64_t redir_hint:1; /* Redirection Hint */
uint64_t dest_mode:1; /* Destination Mode */
uint64_t fault_disable:1; /* Fault Processing Disable */
uint64_t present:1; /* Whether entry present/available */
#else
uint32_t present:1; /* Whether entry present/available */
uint32_t fault_disable:1; /* Fault Processing Disable */
uint32_t dest_mode:1; /* Destination Mode */
uint32_t redir_hint:1; /* Redirection Hint */
uint32_t trigger_mode:1; /* Trigger Mode */
uint32_t delivery_mode:3; /* Delivery Mode */
uint32_t __avail:4; /* Available spaces for software */
uint32_t __reserved_0:3; /* Reserved 0 */
uint32_t irte_mode:1; /* IRTE Mode */
uint32_t vector:8; /* Interrupt Vector */
uint32_t __reserved_1:8; /* Reserved 1 */
uint64_t present:1; /* Whether entry present/available */
uint64_t fault_disable:1; /* Fault Processing Disable */
uint64_t dest_mode:1; /* Destination Mode */
uint64_t redir_hint:1; /* Redirection Hint */
uint64_t trigger_mode:1; /* Trigger Mode */
uint64_t delivery_mode:3; /* Delivery Mode */
uint64_t __avail:4; /* Available spaces for software */
uint64_t __reserved_0:3; /* Reserved 0 */
uint64_t irte_mode:1; /* IRTE Mode */
uint64_t vector:8; /* Interrupt Vector */
uint64_t __reserved_1:8; /* Reserved 1 */
uint64_t dest_id:32; /* Destination ID */
#endif
uint32_t dest_id; /* Destination ID */
uint16_t source_id; /* Source-ID */
#if HOST_BIG_ENDIAN
uint64_t __reserved_2:44; /* Reserved 2 */
uint64_t sid_vtype:2; /* Source-ID Validation Type */
uint64_t sid_q:2; /* Source-ID Qualifier */
uint64_t source_id:16; /* Source-ID */
#else
uint64_t source_id:16; /* Source-ID */
uint64_t sid_q:2; /* Source-ID Qualifier */
uint64_t sid_vtype:2; /* Source-ID Validation Type */
uint64_t __reserved_2:44; /* Reserved 2 */

View File

@ -87,40 +87,42 @@ struct X86IOMMU_MSIMessage {
union {
struct {
#if HOST_BIG_ENDIAN
uint32_t __addr_head:12; /* 0xfee */
uint32_t dest:8;
uint32_t __reserved:8;
uint32_t redir_hint:1;
uint32_t dest_mode:1;
uint32_t __not_used:2;
uint64_t __addr_hi:32;
uint64_t __addr_head:12; /* 0xfee */
uint64_t dest:8;
uint64_t __reserved:8;
uint64_t redir_hint:1;
uint64_t dest_mode:1;
uint64_t __not_used:2;
#else
uint32_t __not_used:2;
uint32_t dest_mode:1;
uint32_t redir_hint:1;
uint32_t __reserved:8;
uint32_t dest:8;
uint32_t __addr_head:12; /* 0xfee */
uint64_t __not_used:2;
uint64_t dest_mode:1;
uint64_t redir_hint:1;
uint64_t __reserved:8;
uint64_t dest:8;
uint64_t __addr_head:12; /* 0xfee */
uint64_t __addr_hi:32;
#endif
uint32_t __addr_hi;
} QEMU_PACKED;
uint64_t msi_addr;
};
union {
struct {
#if HOST_BIG_ENDIAN
uint16_t trigger_mode:1;
uint16_t level:1;
uint16_t __resved:3;
uint16_t delivery_mode:3;
uint16_t vector:8;
uint32_t __resved1:16;
uint32_t trigger_mode:1;
uint32_t level:1;
uint32_t __resved:3;
uint32_t delivery_mode:3;
uint32_t vector:8;
#else
uint16_t vector:8;
uint16_t delivery_mode:3;
uint16_t __resved:3;
uint16_t level:1;
uint16_t trigger_mode:1;
uint32_t vector:8;
uint32_t delivery_mode:3;
uint32_t __resved:3;
uint32_t level:1;
uint32_t trigger_mode:1;
uint32_t __resved1:16;
#endif
uint16_t __resved1;
} QEMU_PACKED;
uint32_t msi_data;
};

View File

@ -25,6 +25,7 @@ struct VHostUserSCMI {
VirtQueue *cmd_vq;
VirtQueue *event_vq;
bool connected;
bool started_vu;
};
#endif /* _QEMU_VHOST_USER_SCMI_H */

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.