mirror of https://github.com/xemu-project/xemu.git
pc,pci,virtio: cleanups, fixes, features
vhost-user-gpu: edid vhost-user-scmi device vhost-vdpa: _F_CTRL_RX and _F_CTRL_RX_EXTRA support for svq cleanups, fixes all over the place. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> -----BEGIN PGP SIGNATURE----- iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmSsjYMPHG1zdEByZWRo YXQuY29tAAoJECgfDbjSjVRp2vYH/20u6TAMssE/UAJoUU0ypbJkbHjDqiqDeuZN qDYazLUWIJTUbDnSfXAiRcdJuukEpEFcoHa9O6vgFE/SNod51IrvsJR9CbZxNmk6 D+Px9dkMckDE/yb8f6hhcHsi7/1v04I0oSXmJTVYxWSKQhD4Km6x8Larqsh0u4yd n6laZ+VK5H8sk6QvI5vMz+lYavACQVryiWV/GAigP21B0eQK79I5/N6y0q8/axD5 cpeTzUF+m33SfLfyd7PPmibCQFYrHDwosynSnr3qnKusPRJt2FzWkzOiZgbtgE2L UQ/S4sYTBy8dZJMc0wTywbs1bSwzNrkQ+uS0v74z9wCUYTgvQTA= =RsOh -----END PGP SIGNATURE----- Merge tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu into staging pc,pci,virtio: cleanups, fixes, features vhost-user-gpu: edid vhost-user-scmi device vhost-vdpa: _F_CTRL_RX and _F_CTRL_RX_EXTRA support for svq cleanups, fixes all over the place. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> # -----BEGIN PGP SIGNATURE----- # # iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmSsjYMPHG1zdEByZWRo # YXQuY29tAAoJECgfDbjSjVRp2vYH/20u6TAMssE/UAJoUU0ypbJkbHjDqiqDeuZN # qDYazLUWIJTUbDnSfXAiRcdJuukEpEFcoHa9O6vgFE/SNod51IrvsJR9CbZxNmk6 # D+Px9dkMckDE/yb8f6hhcHsi7/1v04I0oSXmJTVYxWSKQhD4Km6x8Larqsh0u4yd # n6laZ+VK5H8sk6QvI5vMz+lYavACQVryiWV/GAigP21B0eQK79I5/N6y0q8/axD5 # cpeTzUF+m33SfLfyd7PPmibCQFYrHDwosynSnr3qnKusPRJt2FzWkzOiZgbtgE2L # UQ/S4sYTBy8dZJMc0wTywbs1bSwzNrkQ+uS0v74z9wCUYTgvQTA= # =RsOh # -----END PGP SIGNATURE----- # gpg: Signature made Tue 11 Jul 2023 12:00:19 AM BST # gpg: using RSA key 5D09FD0871C8F85B94CA8A0D281F0DB8D28D5469 # gpg: issuer "mst@redhat.com" # gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" [undefined] # gpg: aka "Michael S. Tsirkin <mst@redhat.com>" [undefined] # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: 0270 606B 6F3C DF3D 0B17 0970 C350 3912 AFBE 8E67 # Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA 8A0D 281F 0DB8 D28D 5469 * tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu: (66 commits) vdpa: Allow VIRTIO_NET_F_CTRL_RX_EXTRA in SVQ vdpa: Restore packet receive filtering state relative with _F_CTRL_RX_EXTRA feature vdpa: Allow VIRTIO_NET_F_CTRL_RX in SVQ vdpa: Avoid forwarding large CVQ command failures vdpa: Accessing CVQ header through its structure vhost: Fix false positive out-of-bounds vdpa: Restore packet receive filtering state relative with _F_CTRL_RX feature vdpa: Restore MAC address filtering state vdpa: Use iovec for vhost_vdpa_net_load_cmd() pcie: Specify 0 for ARI next function numbers pcie: Use common ARI next function number include/hw/virtio: document some more usage of notifiers include/hw/virtio: add kerneldoc for virtio_init include/hw/virtio: document virtio_notify_config hw/virtio: fix typo in VIRTIO_CONFIG_IRQ_IDX comments include/hw: document the device_class_set_parent_* fns include: attempt to document device_class_set_props vdpa: Fix possible use-after-free for VirtQueueElement pcie: Add hotplug detect state register to cmask virtio-iommu: Rework the traces in virtio_iommu_set_page_size_mask() ... Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
commit
2bb9d628a7
|
@ -2215,6 +2215,13 @@ F: hw/virtio/vhost-user-gpio*
|
|||
F: include/hw/virtio/vhost-user-gpio.h
|
||||
F: tests/qtest/libqos/virtio-gpio.*
|
||||
|
||||
vhost-user-scmi
|
||||
R: mzamazal@redhat.com
|
||||
S: Supported
|
||||
F: hw/virtio/vhost-user-scmi*
|
||||
F: include/hw/virtio/vhost-user-scmi.h
|
||||
F: tests/qtest/libqos/virtio-scmi.*
|
||||
|
||||
virtio-crypto
|
||||
M: Gonglei <arei.gonglei@huawei.com>
|
||||
S: Supported
|
||||
|
|
|
@ -303,6 +303,53 @@ vg_get_display_info(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd)
|
|||
cmd->state = VG_CMD_STATE_PENDING;
|
||||
}
|
||||
|
||||
static gboolean
|
||||
get_edid_cb(gint fd, GIOCondition condition, gpointer user_data)
|
||||
{
|
||||
struct virtio_gpu_resp_edid resp_edid;
|
||||
VuGpu *vg = user_data;
|
||||
struct virtio_gpu_ctrl_command *cmd = QTAILQ_LAST(&vg->fenceq);
|
||||
|
||||
g_debug("get edid cb");
|
||||
assert(cmd->cmd_hdr.type == VIRTIO_GPU_CMD_GET_EDID);
|
||||
if (!vg_recv_msg(vg, VHOST_USER_GPU_GET_EDID,
|
||||
sizeof(resp_edid), &resp_edid)) {
|
||||
return G_SOURCE_CONTINUE;
|
||||
}
|
||||
|
||||
QTAILQ_REMOVE(&vg->fenceq, cmd, next);
|
||||
vg_ctrl_response(vg, cmd, &resp_edid.hdr, sizeof(resp_edid));
|
||||
|
||||
vg->wait_in = 0;
|
||||
vg_handle_ctrl(&vg->dev.parent, 0);
|
||||
|
||||
return G_SOURCE_REMOVE;
|
||||
}
|
||||
|
||||
void
|
||||
vg_get_edid(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd)
|
||||
{
|
||||
struct virtio_gpu_cmd_get_edid get_edid;
|
||||
|
||||
VUGPU_FILL_CMD(get_edid);
|
||||
virtio_gpu_bswap_32(&get_edid, sizeof(get_edid));
|
||||
|
||||
VhostUserGpuMsg msg = {
|
||||
.request = VHOST_USER_GPU_GET_EDID,
|
||||
.size = sizeof(VhostUserGpuEdidRequest),
|
||||
.payload.edid_req = {
|
||||
.scanout_id = get_edid.scanout,
|
||||
},
|
||||
};
|
||||
|
||||
assert(vg->wait_in == 0);
|
||||
|
||||
vg_send_msg(vg, &msg, -1);
|
||||
vg->wait_in = g_unix_fd_add(vg->sock_fd, G_IO_IN | G_IO_HUP,
|
||||
get_edid_cb, vg);
|
||||
cmd->state = VG_CMD_STATE_PENDING;
|
||||
}
|
||||
|
||||
static void
|
||||
vg_resource_create_2d(VuGpu *g,
|
||||
struct virtio_gpu_ctrl_command *cmd)
|
||||
|
@ -837,8 +884,9 @@ vg_process_cmd(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd)
|
|||
case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
|
||||
vg_resource_detach_backing(vg, cmd);
|
||||
break;
|
||||
/* case VIRTIO_GPU_CMD_GET_EDID: */
|
||||
/* break */
|
||||
case VIRTIO_GPU_CMD_GET_EDID:
|
||||
vg_get_edid(vg, cmd);
|
||||
break;
|
||||
default:
|
||||
g_warning("TODO handle ctrl %x\n", cmd->cmd_hdr.type);
|
||||
cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
|
||||
|
@ -1022,26 +1070,36 @@ vg_queue_set_started(VuDev *dev, int qidx, bool started)
|
|||
static gboolean
|
||||
protocol_features_cb(gint fd, GIOCondition condition, gpointer user_data)
|
||||
{
|
||||
const uint64_t protocol_edid = (1 << VHOST_USER_GPU_PROTOCOL_F_EDID);
|
||||
VuGpu *g = user_data;
|
||||
uint64_t u64;
|
||||
uint64_t protocol_features;
|
||||
VhostUserGpuMsg msg = {
|
||||
.request = VHOST_USER_GPU_GET_PROTOCOL_FEATURES
|
||||
};
|
||||
|
||||
if (!vg_recv_msg(g, msg.request, sizeof(u64), &u64)) {
|
||||
if (!vg_recv_msg(g, msg.request,
|
||||
sizeof(protocol_features), &protocol_features)) {
|
||||
return G_SOURCE_CONTINUE;
|
||||
}
|
||||
|
||||
protocol_features &= protocol_edid;
|
||||
|
||||
msg = (VhostUserGpuMsg) {
|
||||
.request = VHOST_USER_GPU_SET_PROTOCOL_FEATURES,
|
||||
.size = sizeof(uint64_t),
|
||||
.payload.u64 = 0
|
||||
.payload.u64 = protocol_features,
|
||||
};
|
||||
vg_send_msg(g, &msg, -1);
|
||||
|
||||
g->wait_in = 0;
|
||||
vg_handle_ctrl(&g->dev.parent, 0);
|
||||
|
||||
if (g->edid_inited && !(protocol_features & protocol_edid)) {
|
||||
g_printerr("EDID feature set by the frontend but it does not support "
|
||||
"the EDID vhost-user-gpu protocol.\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
return G_SOURCE_REMOVE;
|
||||
}
|
||||
|
||||
|
@ -1049,7 +1107,7 @@ static void
|
|||
set_gpu_protocol_features(VuGpu *g)
|
||||
{
|
||||
VhostUserGpuMsg msg = {
|
||||
.request = VHOST_USER_GPU_GET_PROTOCOL_FEATURES
|
||||
.request = VHOST_USER_GPU_GET_PROTOCOL_FEATURES,
|
||||
};
|
||||
|
||||
vg_send_msg(g, &msg, -1);
|
||||
|
@ -1086,6 +1144,7 @@ vg_get_features(VuDev *dev)
|
|||
if (opt_virgl) {
|
||||
features |= 1 << VIRTIO_GPU_F_VIRGL;
|
||||
}
|
||||
features |= 1 << VIRTIO_GPU_F_EDID;
|
||||
|
||||
return features;
|
||||
}
|
||||
|
@ -1103,6 +1162,8 @@ vg_set_features(VuDev *dev, uint64_t features)
|
|||
g->virgl_inited = true;
|
||||
}
|
||||
|
||||
g->edid_inited = !!(features & (1 << VIRTIO_GPU_F_EDID));
|
||||
|
||||
g->virgl = virgl;
|
||||
}
|
||||
|
||||
|
|
|
@ -495,6 +495,9 @@ void vg_virgl_process_cmd(VuGpu *g, struct virtio_gpu_ctrl_command *cmd)
|
|||
case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
|
||||
vg_get_display_info(g, cmd);
|
||||
break;
|
||||
case VIRTIO_GPU_CMD_GET_EDID:
|
||||
vg_get_edid(g, cmd);
|
||||
break;
|
||||
default:
|
||||
g_debug("TODO handle ctrl %x\n", cmd->cmd_hdr.type);
|
||||
cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
|
||||
|
|
|
@ -36,6 +36,7 @@ typedef enum VhostUserGpuRequest {
|
|||
VHOST_USER_GPU_UPDATE,
|
||||
VHOST_USER_GPU_DMABUF_SCANOUT,
|
||||
VHOST_USER_GPU_DMABUF_UPDATE,
|
||||
VHOST_USER_GPU_GET_EDID,
|
||||
} VhostUserGpuRequest;
|
||||
|
||||
typedef struct VhostUserGpuDisplayInfoReply {
|
||||
|
@ -83,6 +84,10 @@ typedef struct VhostUserGpuDMABUFScanout {
|
|||
int fd_drm_fourcc;
|
||||
} QEMU_PACKED VhostUserGpuDMABUFScanout;
|
||||
|
||||
typedef struct VhostUserGpuEdidRequest {
|
||||
uint32_t scanout_id;
|
||||
} QEMU_PACKED VhostUserGpuEdidRequest;
|
||||
|
||||
typedef struct VhostUserGpuMsg {
|
||||
uint32_t request; /* VhostUserGpuRequest */
|
||||
uint32_t flags;
|
||||
|
@ -93,6 +98,8 @@ typedef struct VhostUserGpuMsg {
|
|||
VhostUserGpuScanout scanout;
|
||||
VhostUserGpuUpdate update;
|
||||
VhostUserGpuDMABUFScanout dmabuf_scanout;
|
||||
VhostUserGpuEdidRequest edid_req;
|
||||
struct virtio_gpu_resp_edid resp_edid;
|
||||
struct virtio_gpu_resp_display_info display_info;
|
||||
uint64_t u64;
|
||||
} payload;
|
||||
|
@ -104,6 +111,8 @@ static VhostUserGpuMsg m __attribute__ ((unused));
|
|||
|
||||
#define VHOST_USER_GPU_MSG_FLAG_REPLY 0x4
|
||||
|
||||
#define VHOST_USER_GPU_PROTOCOL_F_EDID 0
|
||||
|
||||
struct virtio_gpu_scanout {
|
||||
uint32_t width, height;
|
||||
int x, y;
|
||||
|
@ -122,6 +131,7 @@ typedef struct VuGpu {
|
|||
|
||||
bool virgl;
|
||||
bool virgl_inited;
|
||||
bool edid_inited;
|
||||
uint32_t inflight;
|
||||
|
||||
struct virtio_gpu_scanout scanout[VIRTIO_GPU_MAX_SCANOUTS];
|
||||
|
@ -171,6 +181,7 @@ int vg_create_mapping_iov(VuGpu *g,
|
|||
struct iovec **iov);
|
||||
void vg_cleanup_mapping_iov(VuGpu *g, struct iovec *iov, uint32_t count);
|
||||
void vg_get_display_info(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd);
|
||||
void vg_get_edid(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd);
|
||||
|
||||
void vg_wait_ok(VuGpu *g);
|
||||
|
||||
|
|
|
@ -124,6 +124,16 @@ VhostUserGpuDMABUFScanout
|
|||
:fourcc: ``i32``, the DMABUF fourcc
|
||||
|
||||
|
||||
VhostUserGpuEdidRequest
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
+------------+
|
||||
| scanout-id |
|
||||
+------------+
|
||||
|
||||
:scanout-id: ``u32``, the scanout to get edid from
|
||||
|
||||
|
||||
C structure
|
||||
-----------
|
||||
|
||||
|
@ -141,6 +151,8 @@ In QEMU the vhost-user-gpu message is implemented with the following struct:
|
|||
VhostUserGpuScanout scanout;
|
||||
VhostUserGpuUpdate update;
|
||||
VhostUserGpuDMABUFScanout dmabuf_scanout;
|
||||
VhostUserGpuEdidRequest edid_req;
|
||||
struct virtio_gpu_resp_edid resp_edid;
|
||||
struct virtio_gpu_resp_display_info display_info;
|
||||
uint64_t u64;
|
||||
} payload;
|
||||
|
@ -149,10 +161,11 @@ In QEMU the vhost-user-gpu message is implemented with the following struct:
|
|||
Protocol features
|
||||
-----------------
|
||||
|
||||
None yet.
|
||||
.. code:: c
|
||||
|
||||
As the protocol may need to evolve, new messages and communication
|
||||
changes are negotiated thanks to preliminary
|
||||
#define VHOST_USER_GPU_PROTOCOL_F_EDID 0
|
||||
|
||||
New messages and communication changes are negotiated thanks to the
|
||||
``VHOST_USER_GPU_GET_PROTOCOL_FEATURES`` and
|
||||
``VHOST_USER_GPU_SET_PROTOCOL_FEATURES`` requests.
|
||||
|
||||
|
@ -241,3 +254,12 @@ Message types
|
|||
Note: there is no data payload, since the scanout is shared thanks
|
||||
to DMABUF, that must have been set previously with
|
||||
``VHOST_USER_GPU_DMABUF_SCANOUT``.
|
||||
|
||||
``VHOST_USER_GPU_GET_EDID``
|
||||
:id: 11
|
||||
:request payload: ``struct VhostUserGpuEdidRequest``
|
||||
:reply payload: ``struct virtio_gpu_resp_edid`` (from virtio specification)
|
||||
|
||||
Retrieve the EDID data for a given scanout.
|
||||
This message requires the ``VHOST_USER_GPU_PROTOCOL_F_EDID`` protocol
|
||||
feature to be supported.
|
||||
|
|
|
@ -48,7 +48,7 @@ setting up a BAR for a VF.
|
|||
...
|
||||
int ret = pcie_endpoint_cap_init(d, 0x70);
|
||||
...
|
||||
pcie_ari_init(d, 0x100, 1);
|
||||
pcie_ari_init(d, 0x100);
|
||||
...
|
||||
|
||||
/* Add and initialize the SR/IOV capability */
|
||||
|
@ -78,7 +78,7 @@ setting up a BAR for a VF.
|
|||
...
|
||||
int ret = pcie_endpoint_cap_init(d, 0x60);
|
||||
...
|
||||
pcie_ari_init(d, 0x100, 1);
|
||||
pcie_ari_init(d, 0x100);
|
||||
...
|
||||
memory_region_init(mr, ... )
|
||||
pcie_sriov_vf_register_bar(d, bar_nr, mr);
|
||||
|
|
|
@ -197,3 +197,13 @@ void machine_parse_smp_config(MachineState *ms,
|
|||
return;
|
||||
}
|
||||
}
|
||||
|
||||
unsigned int machine_topo_get_cores_per_socket(const MachineState *ms)
|
||||
{
|
||||
return ms->smp.cores * ms->smp.clusters * ms->smp.dies;
|
||||
}
|
||||
|
||||
unsigned int machine_topo_get_threads_per_socket(const MachineState *ms)
|
||||
{
|
||||
return ms->smp.threads * machine_topo_get_cores_per_socket(ms);
|
||||
}
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
|
||||
GlobalProperty hw_compat_8_0[] = {
|
||||
{ "migration", "multifd-flush-after-each-section", "on"},
|
||||
{ TYPE_PCI_DEVICE, "x-pcie-ari-nextfn-1", "on" },
|
||||
};
|
||||
const size_t hw_compat_8_0_len = G_N_ELEMENTS(hw_compat_8_0);
|
||||
|
||||
|
|
|
@ -31,6 +31,7 @@ typedef enum VhostUserGpuRequest {
|
|||
VHOST_USER_GPU_UPDATE,
|
||||
VHOST_USER_GPU_DMABUF_SCANOUT,
|
||||
VHOST_USER_GPU_DMABUF_UPDATE,
|
||||
VHOST_USER_GPU_GET_EDID,
|
||||
} VhostUserGpuRequest;
|
||||
|
||||
typedef struct VhostUserGpuDisplayInfoReply {
|
||||
|
@ -78,6 +79,10 @@ typedef struct VhostUserGpuDMABUFScanout {
|
|||
int fd_drm_fourcc;
|
||||
} QEMU_PACKED VhostUserGpuDMABUFScanout;
|
||||
|
||||
typedef struct VhostUserGpuEdidRequest {
|
||||
uint32_t scanout_id;
|
||||
} QEMU_PACKED VhostUserGpuEdidRequest;
|
||||
|
||||
typedef struct VhostUserGpuMsg {
|
||||
uint32_t request; /* VhostUserGpuRequest */
|
||||
uint32_t flags;
|
||||
|
@ -88,6 +93,8 @@ typedef struct VhostUserGpuMsg {
|
|||
VhostUserGpuScanout scanout;
|
||||
VhostUserGpuUpdate update;
|
||||
VhostUserGpuDMABUFScanout dmabuf_scanout;
|
||||
VhostUserGpuEdidRequest edid_req;
|
||||
struct virtio_gpu_resp_edid resp_edid;
|
||||
struct virtio_gpu_resp_display_info display_info;
|
||||
uint64_t u64;
|
||||
} payload;
|
||||
|
@ -99,6 +106,8 @@ static VhostUserGpuMsg m __attribute__ ((unused));
|
|||
|
||||
#define VHOST_USER_GPU_MSG_FLAG_REPLY 0x4
|
||||
|
||||
#define VHOST_USER_GPU_PROTOCOL_F_EDID 0
|
||||
|
||||
static void vhost_user_gpu_update_blocked(VhostUserGPU *g, bool blocked);
|
||||
|
||||
static void
|
||||
|
@ -161,6 +170,9 @@ vhost_user_gpu_handle_display(VhostUserGPU *g, VhostUserGpuMsg *msg)
|
|||
.request = msg->request,
|
||||
.flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
|
||||
.size = sizeof(uint64_t),
|
||||
.payload = {
|
||||
.u64 = (1 << VHOST_USER_GPU_PROTOCOL_F_EDID)
|
||||
}
|
||||
};
|
||||
|
||||
vhost_user_gpu_send_msg(g, &reply);
|
||||
|
@ -184,6 +196,26 @@ vhost_user_gpu_handle_display(VhostUserGPU *g, VhostUserGpuMsg *msg)
|
|||
vhost_user_gpu_send_msg(g, &reply);
|
||||
break;
|
||||
}
|
||||
case VHOST_USER_GPU_GET_EDID: {
|
||||
VhostUserGpuEdidRequest *m = &msg->payload.edid_req;
|
||||
struct virtio_gpu_resp_edid resp = { {} };
|
||||
VhostUserGpuMsg reply = {
|
||||
.request = msg->request,
|
||||
.flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
|
||||
.size = sizeof(reply.payload.resp_edid),
|
||||
};
|
||||
|
||||
if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
|
||||
error_report("invalid scanout: %d", m->scanout_id);
|
||||
break;
|
||||
}
|
||||
|
||||
resp.hdr.type = VIRTIO_GPU_RESP_OK_EDID;
|
||||
virtio_gpu_base_generate_edid(VIRTIO_GPU_BASE(g), m->scanout_id, &resp);
|
||||
memcpy(&reply.payload.resp_edid, &resp, sizeof(resp));
|
||||
vhost_user_gpu_send_msg(g, &reply);
|
||||
break;
|
||||
}
|
||||
case VHOST_USER_GPU_SCANOUT: {
|
||||
VhostUserGpuScanout *m = &msg->payload.scanout;
|
||||
|
||||
|
@ -489,7 +521,7 @@ vhost_user_gpu_guest_notifier_pending(VirtIODevice *vdev, int idx)
|
|||
|
||||
/*
|
||||
* Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
|
||||
* as the Marco of configure interrupt's IDX, If this driver does not
|
||||
* as the macro of configure interrupt's IDX, If this driver does not
|
||||
* support, the function will return
|
||||
*/
|
||||
|
||||
|
@ -506,7 +538,7 @@ vhost_user_gpu_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
|
|||
|
||||
/*
|
||||
* Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
|
||||
* as the Marco of configure interrupt's IDX, If this driver does not
|
||||
* as the macro of configure interrupt's IDX, If this driver does not
|
||||
* support, the function will return
|
||||
*/
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include "migration/blocker.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "hw/display/edid.h"
|
||||
#include "trace.h"
|
||||
|
||||
void
|
||||
|
@ -51,6 +52,22 @@ virtio_gpu_base_fill_display_info(VirtIOGPUBase *g,
|
|||
}
|
||||
}
|
||||
|
||||
void
|
||||
virtio_gpu_base_generate_edid(VirtIOGPUBase *g, int scanout,
|
||||
struct virtio_gpu_resp_edid *edid)
|
||||
{
|
||||
qemu_edid_info info = {
|
||||
.width_mm = g->req_state[scanout].width_mm,
|
||||
.height_mm = g->req_state[scanout].height_mm,
|
||||
.prefx = g->req_state[scanout].width,
|
||||
.prefy = g->req_state[scanout].height,
|
||||
.refresh_rate = g->req_state[scanout].refresh_rate,
|
||||
};
|
||||
|
||||
edid->size = cpu_to_le32(sizeof(edid->edid));
|
||||
qemu_edid_generate(edid->edid, sizeof(edid->edid), &info);
|
||||
}
|
||||
|
||||
static void virtio_gpu_invalidate_display(void *opaque)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
#include "hw/virtio/virtio-gpu-bswap.h"
|
||||
#include "hw/virtio/virtio-gpu-pixman.h"
|
||||
#include "hw/virtio/virtio-bus.h"
|
||||
#include "hw/display/edid.h"
|
||||
#include "hw/qdev-properties.h"
|
||||
#include "qemu/log.h"
|
||||
#include "qemu/module.h"
|
||||
|
@ -207,23 +206,6 @@ void virtio_gpu_get_display_info(VirtIOGPU *g,
|
|||
sizeof(display_info));
|
||||
}
|
||||
|
||||
static void
|
||||
virtio_gpu_generate_edid(VirtIOGPU *g, int scanout,
|
||||
struct virtio_gpu_resp_edid *edid)
|
||||
{
|
||||
VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
|
||||
qemu_edid_info info = {
|
||||
.width_mm = b->req_state[scanout].width_mm,
|
||||
.height_mm = b->req_state[scanout].height_mm,
|
||||
.prefx = b->req_state[scanout].width,
|
||||
.prefy = b->req_state[scanout].height,
|
||||
.refresh_rate = b->req_state[scanout].refresh_rate,
|
||||
};
|
||||
|
||||
edid->size = cpu_to_le32(sizeof(edid->edid));
|
||||
qemu_edid_generate(edid->edid, sizeof(edid->edid), &info);
|
||||
}
|
||||
|
||||
void virtio_gpu_get_edid(VirtIOGPU *g,
|
||||
struct virtio_gpu_ctrl_command *cmd)
|
||||
{
|
||||
|
@ -242,7 +224,7 @@ void virtio_gpu_get_edid(VirtIOGPU *g,
|
|||
trace_virtio_gpu_cmd_get_edid(get_edid.scanout);
|
||||
memset(&edid, 0, sizeof(edid));
|
||||
edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID;
|
||||
virtio_gpu_generate_edid(g, get_edid.scanout, &edid);
|
||||
virtio_gpu_base_generate_edid(VIRTIO_GPU_BASE(g), get_edid.scanout, &edid);
|
||||
virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid));
|
||||
}
|
||||
|
||||
|
|
|
@ -114,7 +114,7 @@ static void pc_init1(MachineState *machine,
|
|||
X86MachineState *x86ms = X86_MACHINE(machine);
|
||||
MemoryRegion *system_memory = get_system_memory();
|
||||
MemoryRegion *system_io = get_system_io();
|
||||
PCIBus *pci_bus;
|
||||
PCIBus *pci_bus = NULL;
|
||||
ISABus *isa_bus;
|
||||
int piix3_devfn = -1;
|
||||
qemu_irq smi_irq;
|
||||
|
@ -122,11 +122,10 @@ static void pc_init1(MachineState *machine,
|
|||
BusState *idebus[MAX_IDE_BUS];
|
||||
ISADevice *rtc_state;
|
||||
MemoryRegion *ram_memory;
|
||||
MemoryRegion *pci_memory;
|
||||
MemoryRegion *rom_memory;
|
||||
MemoryRegion *pci_memory = NULL;
|
||||
MemoryRegion *rom_memory = system_memory;
|
||||
ram_addr_t lowmem;
|
||||
uint64_t hole64_size;
|
||||
DeviceState *i440fx_host;
|
||||
uint64_t hole64_size = 0;
|
||||
|
||||
/*
|
||||
* Calculate ram split, for memory below and above 4G. It's a bit
|
||||
|
@ -198,18 +197,39 @@ static void pc_init1(MachineState *machine,
|
|||
}
|
||||
|
||||
if (pcmc->pci_enabled) {
|
||||
Object *phb;
|
||||
|
||||
pci_memory = g_new(MemoryRegion, 1);
|
||||
memory_region_init(pci_memory, NULL, "pci", UINT64_MAX);
|
||||
rom_memory = pci_memory;
|
||||
i440fx_host = qdev_new(host_type);
|
||||
hole64_size = object_property_get_uint(OBJECT(i440fx_host),
|
||||
|
||||
phb = OBJECT(qdev_new(host_type));
|
||||
object_property_add_child(OBJECT(machine), "i440fx", phb);
|
||||
object_property_set_link(phb, PCI_HOST_PROP_RAM_MEM,
|
||||
OBJECT(ram_memory), &error_fatal);
|
||||
object_property_set_link(phb, PCI_HOST_PROP_PCI_MEM,
|
||||
OBJECT(pci_memory), &error_fatal);
|
||||
object_property_set_link(phb, PCI_HOST_PROP_SYSTEM_MEM,
|
||||
OBJECT(system_memory), &error_fatal);
|
||||
object_property_set_link(phb, PCI_HOST_PROP_IO_MEM,
|
||||
OBJECT(system_io), &error_fatal);
|
||||
object_property_set_uint(phb, PCI_HOST_BELOW_4G_MEM_SIZE,
|
||||
x86ms->below_4g_mem_size, &error_fatal);
|
||||
object_property_set_uint(phb, PCI_HOST_ABOVE_4G_MEM_SIZE,
|
||||
x86ms->above_4g_mem_size, &error_fatal);
|
||||
object_property_set_str(phb, I440FX_HOST_PROP_PCI_TYPE, pci_type,
|
||||
&error_fatal);
|
||||
sysbus_realize_and_unref(SYS_BUS_DEVICE(phb), &error_fatal);
|
||||
|
||||
pci_bus = PCI_BUS(qdev_get_child_bus(DEVICE(phb), "pci.0"));
|
||||
pci_bus_map_irqs(pci_bus,
|
||||
xen_enabled() ? xen_pci_slot_get_pirq
|
||||
: pc_pci_slot_get_pirq);
|
||||
pcms->bus = pci_bus;
|
||||
|
||||
hole64_size = object_property_get_uint(phb,
|
||||
PCI_HOST_PROP_PCI_HOLE64_SIZE,
|
||||
&error_abort);
|
||||
} else {
|
||||
pci_memory = NULL;
|
||||
rom_memory = system_memory;
|
||||
i440fx_host = NULL;
|
||||
hole64_size = 0;
|
||||
}
|
||||
|
||||
pc_guest_info_init(pcms);
|
||||
|
@ -227,6 +247,9 @@ static void pc_init1(MachineState *machine,
|
|||
if (!xen_enabled()) {
|
||||
pc_memory_init(pcms, system_memory, rom_memory, hole64_size);
|
||||
} else {
|
||||
assert(machine->ram_size == x86ms->below_4g_mem_size +
|
||||
x86ms->above_4g_mem_size);
|
||||
|
||||
pc_system_flash_cleanup_unused(pcms);
|
||||
if (machine->kernel_filename != NULL) {
|
||||
/* For xen HVM direct kernel boot, load linux here */
|
||||
|
@ -240,19 +263,7 @@ static void pc_init1(MachineState *machine,
|
|||
PIIX3State *piix3;
|
||||
PCIDevice *pci_dev;
|
||||
|
||||
pci_bus = i440fx_init(pci_type,
|
||||
i440fx_host,
|
||||
system_memory, system_io, machine->ram_size,
|
||||
x86ms->below_4g_mem_size,
|
||||
x86ms->above_4g_mem_size,
|
||||
pci_memory, ram_memory);
|
||||
pci_bus_map_irqs(pci_bus,
|
||||
xen_enabled() ? xen_pci_slot_get_pirq
|
||||
: pc_pci_slot_get_pirq);
|
||||
pcms->bus = pci_bus;
|
||||
|
||||
pci_dev = pci_create_simple_multifunction(pci_bus, -1, true,
|
||||
TYPE_PIIX3_DEVICE);
|
||||
pci_dev = pci_create_simple_multifunction(pci_bus, -1, TYPE_PIIX3_DEVICE);
|
||||
|
||||
if (xen_enabled()) {
|
||||
pci_device_set_intx_routing_notifier(
|
||||
|
@ -275,7 +286,6 @@ static void pc_init1(MachineState *machine,
|
|||
rtc_state = ISA_DEVICE(object_resolve_path_component(OBJECT(pci_dev),
|
||||
"rtc"));
|
||||
} else {
|
||||
pci_bus = NULL;
|
||||
isa_bus = isa_bus_new(NULL, system_memory, system_io,
|
||||
&error_abort);
|
||||
|
||||
|
|
|
@ -100,12 +100,12 @@ static int ehci_create_ich9_with_companions(PCIBus *bus, int slot)
|
|||
return -1;
|
||||
}
|
||||
|
||||
ehci = pci_new_multifunction(PCI_DEVFN(slot, 7), true, name);
|
||||
ehci = pci_new_multifunction(PCI_DEVFN(slot, 7), name);
|
||||
pci_realize_and_unref(ehci, bus, &error_fatal);
|
||||
usbbus = QLIST_FIRST(&ehci->qdev.child_bus);
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
uhci = pci_new_multifunction(PCI_DEVFN(slot, comp[i].func), true,
|
||||
uhci = pci_new_multifunction(PCI_DEVFN(slot, comp[i].func),
|
||||
comp[i].name);
|
||||
qdev_prop_set_string(&uhci->qdev, "masterbus", usbbus->name);
|
||||
qdev_prop_set_uint32(&uhci->qdev, "firstport", comp[i].port);
|
||||
|
@ -120,8 +120,7 @@ static void pc_q35_init(MachineState *machine)
|
|||
PCMachineState *pcms = PC_MACHINE(machine);
|
||||
PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms);
|
||||
X86MachineState *x86ms = X86_MACHINE(machine);
|
||||
Q35PCIHost *q35_host;
|
||||
PCIHostState *phb;
|
||||
Object *phb;
|
||||
PCIBus *host_bus;
|
||||
PCIDevice *lpc;
|
||||
DeviceState *lpc_dev;
|
||||
|
@ -207,10 +206,10 @@ static void pc_q35_init(MachineState *machine)
|
|||
}
|
||||
|
||||
/* create pci host bus */
|
||||
q35_host = Q35_HOST_DEVICE(qdev_new(TYPE_Q35_HOST_DEVICE));
|
||||
phb = OBJECT(qdev_new(TYPE_Q35_HOST_DEVICE));
|
||||
|
||||
if (pcmc->pci_enabled) {
|
||||
pci_hole64_size = object_property_get_uint(OBJECT(q35_host),
|
||||
pci_hole64_size = object_property_get_uint(phb,
|
||||
PCI_HOST_PROP_PCI_HOLE64_SIZE,
|
||||
&error_abort);
|
||||
}
|
||||
|
@ -218,25 +217,29 @@ static void pc_q35_init(MachineState *machine)
|
|||
/* allocate ram and load rom/bios */
|
||||
pc_memory_init(pcms, system_memory, rom_memory, pci_hole64_size);
|
||||
|
||||
object_property_add_child(OBJECT(machine), "q35", OBJECT(q35_host));
|
||||
object_property_set_link(OBJECT(q35_host), MCH_HOST_PROP_RAM_MEM,
|
||||
object_property_add_child(OBJECT(machine), "q35", phb);
|
||||
object_property_set_link(phb, PCI_HOST_PROP_RAM_MEM,
|
||||
OBJECT(machine->ram), NULL);
|
||||
object_property_set_link(OBJECT(q35_host), MCH_HOST_PROP_PCI_MEM,
|
||||
object_property_set_link(phb, PCI_HOST_PROP_PCI_MEM,
|
||||
OBJECT(pci_memory), NULL);
|
||||
object_property_set_link(OBJECT(q35_host), MCH_HOST_PROP_SYSTEM_MEM,
|
||||
object_property_set_link(phb, PCI_HOST_PROP_SYSTEM_MEM,
|
||||
OBJECT(system_memory), NULL);
|
||||
object_property_set_link(OBJECT(q35_host), MCH_HOST_PROP_IO_MEM,
|
||||
object_property_set_link(phb, PCI_HOST_PROP_IO_MEM,
|
||||
OBJECT(system_io), NULL);
|
||||
object_property_set_int(OBJECT(q35_host), PCI_HOST_BELOW_4G_MEM_SIZE,
|
||||
object_property_set_int(phb, PCI_HOST_BELOW_4G_MEM_SIZE,
|
||||
x86ms->below_4g_mem_size, NULL);
|
||||
object_property_set_int(OBJECT(q35_host), PCI_HOST_ABOVE_4G_MEM_SIZE,
|
||||
object_property_set_int(phb, PCI_HOST_ABOVE_4G_MEM_SIZE,
|
||||
x86ms->above_4g_mem_size, NULL);
|
||||
object_property_set_bool(phb, PCI_HOST_BYPASS_IOMMU,
|
||||
pcms->default_bus_bypass_iommu, NULL);
|
||||
sysbus_realize_and_unref(SYS_BUS_DEVICE(phb), &error_fatal);
|
||||
|
||||
/* pci */
|
||||
sysbus_realize_and_unref(SYS_BUS_DEVICE(q35_host), &error_fatal);
|
||||
phb = PCI_HOST_BRIDGE(q35_host);
|
||||
host_bus = phb->bus;
|
||||
host_bus = PCI_BUS(qdev_get_child_bus(DEVICE(phb), "pcie.0"));
|
||||
pcms->bus = host_bus;
|
||||
|
||||
/* create ISA bus */
|
||||
lpc = pci_new_multifunction(PCI_DEVFN(ICH9_LPC_DEV, ICH9_LPC_FUNC), true,
|
||||
lpc = pci_new_multifunction(PCI_DEVFN(ICH9_LPC_DEV, ICH9_LPC_FUNC),
|
||||
TYPE_ICH9_LPC_DEVICE);
|
||||
qdev_prop_set_bit(DEVICE(lpc), "smm-enabled",
|
||||
x86_machine_is_smm_enabled(x86ms));
|
||||
|
@ -301,7 +304,7 @@ static void pc_q35_init(MachineState *machine)
|
|||
ahci = pci_create_simple_multifunction(host_bus,
|
||||
PCI_DEVFN(ICH9_SATA1_DEV,
|
||||
ICH9_SATA1_FUNC),
|
||||
true, "ich9-ahci");
|
||||
"ich9-ahci");
|
||||
idebus[0] = qdev_get_child_bus(&ahci->qdev, "ide.0");
|
||||
idebus[1] = qdev_get_child_bus(&ahci->qdev, "ide.1");
|
||||
g_assert(MAX_SATA_PORTS == ahci_get_num_ports(ahci));
|
||||
|
@ -323,7 +326,7 @@ static void pc_q35_init(MachineState *machine)
|
|||
smb = pci_create_simple_multifunction(host_bus,
|
||||
PCI_DEVFN(ICH9_SMB_DEV,
|
||||
ICH9_SMB_FUNC),
|
||||
true, TYPE_ICH9_SMB_DEVICE);
|
||||
TYPE_ICH9_SMB_DEVICE);
|
||||
pcms->smbus = I2C_BUS(qdev_get_child_bus(DEVICE(smb), "i2c"));
|
||||
|
||||
smbus_eeprom_init(pcms->smbus, 8, NULL, 0);
|
||||
|
|
|
@ -770,8 +770,7 @@ static void boston_mach_init(MachineState *machine)
|
|||
boston_lcd_event, NULL, s, NULL, true);
|
||||
|
||||
ahci = pci_create_simple_multifunction(&PCI_BRIDGE(&pcie2->root)->sec_bus,
|
||||
PCI_DEVFN(0, 0),
|
||||
true, TYPE_ICH9_AHCI);
|
||||
PCI_DEVFN(0, 0), TYPE_ICH9_AHCI);
|
||||
g_assert(ARRAY_SIZE(hd) == ahci_get_num_ports(ahci));
|
||||
ide_drive_get(hd, ahci_get_num_ports(ahci));
|
||||
ahci_ide_create_devs(ahci, hd);
|
||||
|
|
|
@ -297,7 +297,7 @@ static void mips_fuloong2e_init(MachineState *machine)
|
|||
/* South bridge -> IP5 */
|
||||
pci_dev = pci_create_simple_multifunction(pci_bus,
|
||||
PCI_DEVFN(FULOONG2E_VIA_SLOT, 0),
|
||||
true, TYPE_VT82C686B_ISA);
|
||||
TYPE_VT82C686B_ISA);
|
||||
object_property_add_alias(OBJECT(machine), "rtc-time",
|
||||
object_resolve_path_component(OBJECT(pci_dev),
|
||||
"rtc"),
|
||||
|
|
|
@ -1251,7 +1251,7 @@ void mips_malta_init(MachineState *machine)
|
|||
pci_bus_map_irqs(pci_bus, malta_pci_slot_get_pirq);
|
||||
|
||||
/* Southbridge */
|
||||
piix4 = pci_create_simple_multifunction(pci_bus, PIIX4_PCI_DEVFN, true,
|
||||
piix4 = pci_create_simple_multifunction(pci_bus, PIIX4_PCI_DEVFN,
|
||||
TYPE_PIIX4_PCI_DEVICE);
|
||||
isa_bus = ISA_BUS(qdev_get_child_bus(DEVICE(piix4), "isa.0"));
|
||||
|
||||
|
|
|
@ -431,7 +431,7 @@ static void igb_pci_realize(PCIDevice *pci_dev, Error **errp)
|
|||
hw_error("Failed to initialize AER capability");
|
||||
}
|
||||
|
||||
pcie_ari_init(pci_dev, 0x150, 1);
|
||||
pcie_ari_init(pci_dev, 0x150);
|
||||
|
||||
pcie_sriov_pf_init(pci_dev, IGB_CAP_SRIOV_OFFSET, TYPE_IGBVF,
|
||||
IGB_82576_VF_DEV_ID, IGB_MAX_VF_FUNCTIONS, IGB_MAX_VF_FUNCTIONS,
|
||||
|
|
|
@ -270,7 +270,7 @@ static void igbvf_pci_realize(PCIDevice *dev, Error **errp)
|
|||
hw_error("Failed to initialize AER capability");
|
||||
}
|
||||
|
||||
pcie_ari_init(dev, 0x150, 1);
|
||||
pcie_ari_init(dev, 0x150);
|
||||
}
|
||||
|
||||
static void igbvf_pci_uninit(PCIDevice *dev)
|
||||
|
|
|
@ -3362,7 +3362,7 @@ static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
|
|||
}
|
||||
/*
|
||||
* Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
|
||||
* as the Marco of configure interrupt's IDX, If this driver does not
|
||||
* as the macro of configure interrupt's IDX, If this driver does not
|
||||
* support, the function will return false
|
||||
*/
|
||||
|
||||
|
@ -3394,7 +3394,7 @@ static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
|
|||
}
|
||||
/*
|
||||
*Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
|
||||
* as the Marco of configure interrupt's IDX, If this driver does not
|
||||
* as the macro of configure interrupt's IDX, If this driver does not
|
||||
* support, the function will return
|
||||
*/
|
||||
|
||||
|
@ -3951,6 +3951,7 @@ static void virtio_net_class_init(ObjectClass *klass, void *data)
|
|||
vdc->vmsd = &vmstate_virtio_net_device;
|
||||
vdc->primary_unplug_pending = primary_unplug_pending;
|
||||
vdc->get_vhost = virtio_net_get_vhost;
|
||||
vdc->toggle_device_iotlb = vhost_toggle_device_iotlb;
|
||||
}
|
||||
|
||||
static const TypeInfo virtio_net_info = {
|
||||
|
|
|
@ -8120,7 +8120,7 @@ static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
|
|||
pcie_endpoint_cap_init(pci_dev, 0x80);
|
||||
pcie_cap_flr_init(pci_dev);
|
||||
if (n->params.sriov_max_vfs) {
|
||||
pcie_ari_init(pci_dev, 0x100, 1);
|
||||
pcie_ari_init(pci_dev, 0x100);
|
||||
}
|
||||
|
||||
/* add one to max_ioqpairs to account for the admin queue pair */
|
||||
|
|
|
@ -27,7 +27,6 @@
|
|||
#include "qemu/range.h"
|
||||
#include "hw/i386/pc.h"
|
||||
#include "hw/pci/pci.h"
|
||||
#include "hw/pci/pci_bus.h"
|
||||
#include "hw/pci/pci_host.h"
|
||||
#include "hw/pci-host/i440fx.h"
|
||||
#include "hw/qdev-properties.h"
|
||||
|
@ -47,10 +46,19 @@ OBJECT_DECLARE_SIMPLE_TYPE(I440FXState, I440FX_PCI_HOST_BRIDGE)
|
|||
|
||||
struct I440FXState {
|
||||
PCIHostState parent_obj;
|
||||
|
||||
MemoryRegion *system_memory;
|
||||
MemoryRegion *io_memory;
|
||||
MemoryRegion *pci_address_space;
|
||||
MemoryRegion *ram_memory;
|
||||
Range pci_hole;
|
||||
uint64_t below_4g_mem_size;
|
||||
uint64_t above_4g_mem_size;
|
||||
uint64_t pci_hole64_size;
|
||||
bool pci_hole64_fix;
|
||||
uint32_t short_root_bus;
|
||||
|
||||
char *pci_type;
|
||||
};
|
||||
|
||||
#define I440FX_PAM 0x59
|
||||
|
@ -65,6 +73,15 @@ struct I440FXState {
|
|||
*/
|
||||
#define I440FX_COREBOOT_RAM_SIZE 0x57
|
||||
|
||||
static void i440fx_realize(PCIDevice *dev, Error **errp)
|
||||
{
|
||||
dev->config[I440FX_SMRAM] = 0x02;
|
||||
|
||||
if (object_property_get_bool(qdev_get_machine(), "iommu", NULL)) {
|
||||
warn_report("i440fx doesn't support emulated iommu");
|
||||
}
|
||||
}
|
||||
|
||||
static void i440fx_update_memory_mappings(PCII440FXState *d)
|
||||
{
|
||||
int i;
|
||||
|
@ -205,80 +222,69 @@ static void i440fx_pcihost_get_pci_hole64_end(Object *obj, Visitor *v,
|
|||
|
||||
static void i440fx_pcihost_initfn(Object *obj)
|
||||
{
|
||||
PCIHostState *s = PCI_HOST_BRIDGE(obj);
|
||||
I440FXState *s = I440FX_PCI_HOST_BRIDGE(obj);
|
||||
PCIHostState *phb = PCI_HOST_BRIDGE(obj);
|
||||
|
||||
memory_region_init_io(&s->conf_mem, obj, &pci_host_conf_le_ops, s,
|
||||
memory_region_init_io(&phb->conf_mem, obj, &pci_host_conf_le_ops, phb,
|
||||
"pci-conf-idx", 4);
|
||||
memory_region_init_io(&s->data_mem, obj, &pci_host_data_le_ops, s,
|
||||
memory_region_init_io(&phb->data_mem, obj, &pci_host_data_le_ops, phb,
|
||||
"pci-conf-data", 4);
|
||||
|
||||
object_property_add_link(obj, PCI_HOST_PROP_RAM_MEM, TYPE_MEMORY_REGION,
|
||||
(Object **) &s->ram_memory,
|
||||
qdev_prop_allow_set_link_before_realize, 0);
|
||||
|
||||
object_property_add_link(obj, PCI_HOST_PROP_PCI_MEM, TYPE_MEMORY_REGION,
|
||||
(Object **) &s->pci_address_space,
|
||||
qdev_prop_allow_set_link_before_realize, 0);
|
||||
|
||||
object_property_add_link(obj, PCI_HOST_PROP_SYSTEM_MEM, TYPE_MEMORY_REGION,
|
||||
(Object **) &s->system_memory,
|
||||
qdev_prop_allow_set_link_before_realize, 0);
|
||||
|
||||
object_property_add_link(obj, PCI_HOST_PROP_IO_MEM, TYPE_MEMORY_REGION,
|
||||
(Object **) &s->io_memory,
|
||||
qdev_prop_allow_set_link_before_realize, 0);
|
||||
}
|
||||
|
||||
static void i440fx_pcihost_realize(DeviceState *dev, Error **errp)
|
||||
{
|
||||
PCIHostState *s = PCI_HOST_BRIDGE(dev);
|
||||
ERRP_GUARD();
|
||||
I440FXState *s = I440FX_PCI_HOST_BRIDGE(dev);
|
||||
PCIHostState *phb = PCI_HOST_BRIDGE(dev);
|
||||
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
|
||||
PCIBus *b;
|
||||
PCIDevice *d;
|
||||
PCII440FXState *f;
|
||||
unsigned i;
|
||||
|
||||
memory_region_add_subregion(s->bus->address_space_io, 0xcf8, &s->conf_mem);
|
||||
memory_region_add_subregion(s->io_memory, 0xcf8, &phb->conf_mem);
|
||||
sysbus_init_ioports(sbd, 0xcf8, 4);
|
||||
|
||||
memory_region_add_subregion(s->bus->address_space_io, 0xcfc, &s->data_mem);
|
||||
memory_region_add_subregion(s->io_memory, 0xcfc, &phb->data_mem);
|
||||
sysbus_init_ioports(sbd, 0xcfc, 4);
|
||||
|
||||
/* register i440fx 0xcf8 port as coalesced pio */
|
||||
memory_region_set_flush_coalesced(&s->data_mem);
|
||||
memory_region_add_coalescing(&s->conf_mem, 0, 4);
|
||||
}
|
||||
memory_region_set_flush_coalesced(&phb->data_mem);
|
||||
memory_region_add_coalescing(&phb->conf_mem, 0, 4);
|
||||
|
||||
static void i440fx_realize(PCIDevice *dev, Error **errp)
|
||||
{
|
||||
dev->config[I440FX_SMRAM] = 0x02;
|
||||
b = pci_root_bus_new(dev, NULL, s->pci_address_space,
|
||||
s->io_memory, 0, TYPE_PCI_BUS);
|
||||
phb->bus = b;
|
||||
|
||||
if (object_property_get_bool(qdev_get_machine(), "iommu", NULL)) {
|
||||
warn_report("i440fx doesn't support emulated iommu");
|
||||
}
|
||||
}
|
||||
|
||||
PCIBus *i440fx_init(const char *pci_type,
|
||||
DeviceState *dev,
|
||||
MemoryRegion *address_space_mem,
|
||||
MemoryRegion *address_space_io,
|
||||
ram_addr_t ram_size,
|
||||
ram_addr_t below_4g_mem_size,
|
||||
ram_addr_t above_4g_mem_size,
|
||||
MemoryRegion *pci_address_space,
|
||||
MemoryRegion *ram_memory)
|
||||
{
|
||||
PCIBus *b;
|
||||
PCIDevice *d;
|
||||
PCIHostState *s;
|
||||
PCII440FXState *f;
|
||||
unsigned i;
|
||||
I440FXState *i440fx;
|
||||
|
||||
s = PCI_HOST_BRIDGE(dev);
|
||||
b = pci_root_bus_new(dev, NULL, pci_address_space,
|
||||
address_space_io, 0, TYPE_PCI_BUS);
|
||||
s->bus = b;
|
||||
object_property_add_child(qdev_get_machine(), "i440fx", OBJECT(dev));
|
||||
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
|
||||
|
||||
d = pci_create_simple(b, 0, pci_type);
|
||||
d = pci_create_simple(b, 0, s->pci_type);
|
||||
f = I440FX_PCI_DEVICE(d);
|
||||
f->system_memory = address_space_mem;
|
||||
f->pci_address_space = pci_address_space;
|
||||
f->ram_memory = ram_memory;
|
||||
|
||||
i440fx = I440FX_PCI_HOST_BRIDGE(dev);
|
||||
range_set_bounds(&i440fx->pci_hole, below_4g_mem_size,
|
||||
range_set_bounds(&s->pci_hole, s->below_4g_mem_size,
|
||||
IO_APIC_DEFAULT_ADDRESS - 1);
|
||||
|
||||
/* setup pci memory mapping */
|
||||
pc_pci_as_mapping_init(f->system_memory, f->pci_address_space);
|
||||
pc_pci_as_mapping_init(s->system_memory, s->pci_address_space);
|
||||
|
||||
/* if *disabled* show SMRAM to all CPUs */
|
||||
memory_region_init_alias(&f->smram_region, OBJECT(d), "smram-region",
|
||||
f->pci_address_space, 0xa0000, 0x20000);
|
||||
memory_region_add_subregion_overlap(f->system_memory, 0xa0000,
|
||||
s->pci_address_space, SMRAM_C_BASE, SMRAM_C_SIZE);
|
||||
memory_region_add_subregion_overlap(s->system_memory, SMRAM_C_BASE,
|
||||
&f->smram_region, 1);
|
||||
memory_region_set_enabled(&f->smram_region, true);
|
||||
|
||||
|
@ -286,20 +292,21 @@ PCIBus *i440fx_init(const char *pci_type,
|
|||
memory_region_init(&f->smram, OBJECT(d), "smram", 4 * GiB);
|
||||
memory_region_set_enabled(&f->smram, true);
|
||||
memory_region_init_alias(&f->low_smram, OBJECT(d), "smram-low",
|
||||
f->ram_memory, 0xa0000, 0x20000);
|
||||
s->ram_memory, SMRAM_C_BASE, SMRAM_C_SIZE);
|
||||
memory_region_set_enabled(&f->low_smram, true);
|
||||
memory_region_add_subregion(&f->smram, 0xa0000, &f->low_smram);
|
||||
memory_region_add_subregion(&f->smram, SMRAM_C_BASE, &f->low_smram);
|
||||
object_property_add_const_link(qdev_get_machine(), "smram",
|
||||
OBJECT(&f->smram));
|
||||
|
||||
init_pam(&f->pam_regions[0], OBJECT(d), f->ram_memory, f->system_memory,
|
||||
f->pci_address_space, PAM_BIOS_BASE, PAM_BIOS_SIZE);
|
||||
init_pam(&f->pam_regions[0], OBJECT(d), s->ram_memory, s->system_memory,
|
||||
s->pci_address_space, PAM_BIOS_BASE, PAM_BIOS_SIZE);
|
||||
for (i = 0; i < ARRAY_SIZE(f->pam_regions) - 1; ++i) {
|
||||
init_pam(&f->pam_regions[i + 1], OBJECT(d), f->ram_memory,
|
||||
f->system_memory, f->pci_address_space,
|
||||
init_pam(&f->pam_regions[i + 1], OBJECT(d), s->ram_memory,
|
||||
s->system_memory, s->pci_address_space,
|
||||
PAM_EXPAN_BASE + i * PAM_EXPAN_SIZE, PAM_EXPAN_SIZE);
|
||||
}
|
||||
|
||||
ram_addr_t ram_size = s->below_4g_mem_size + s->above_4g_mem_size;
|
||||
ram_size = ram_size / 8 / 1024 / 1024;
|
||||
if (ram_size > 255) {
|
||||
ram_size = 255;
|
||||
|
@ -307,8 +314,6 @@ PCIBus *i440fx_init(const char *pci_type,
|
|||
d->config[I440FX_COREBOOT_RAM_SIZE] = ram_size;
|
||||
|
||||
i440fx_update_memory_mappings(f);
|
||||
|
||||
return b;
|
||||
}
|
||||
|
||||
static void i440fx_class_init(ObjectClass *klass, void *data)
|
||||
|
@ -359,7 +364,12 @@ static Property i440fx_props[] = {
|
|||
DEFINE_PROP_SIZE(PCI_HOST_PROP_PCI_HOLE64_SIZE, I440FXState,
|
||||
pci_hole64_size, I440FX_PCI_HOST_HOLE64_SIZE_DEFAULT),
|
||||
DEFINE_PROP_UINT32("short_root_bus", I440FXState, short_root_bus, 0),
|
||||
DEFINE_PROP_SIZE(PCI_HOST_BELOW_4G_MEM_SIZE, I440FXState,
|
||||
below_4g_mem_size, 0),
|
||||
DEFINE_PROP_SIZE(PCI_HOST_ABOVE_4G_MEM_SIZE, I440FXState,
|
||||
above_4g_mem_size, 0),
|
||||
DEFINE_PROP_BOOL("x-pci-hole64-fix", I440FXState, pci_hole64_fix, true),
|
||||
DEFINE_PROP_STRING(I440FX_HOST_PROP_PCI_TYPE, I440FXState, pci_type),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
|
|
|
@ -66,9 +66,7 @@ static void q35_host_realize(DeviceState *dev, Error **errp)
|
|||
s->mch.pci_address_space,
|
||||
s->mch.address_space_io,
|
||||
0, TYPE_PCIE_BUS);
|
||||
PC_MACHINE(qdev_get_machine())->bus = pci->bus;
|
||||
pci->bypass_iommu =
|
||||
PC_MACHINE(qdev_get_machine())->default_bus_bypass_iommu;
|
||||
|
||||
qdev_realize(DEVICE(&s->mch), BUS(pci->bus), &error_fatal);
|
||||
}
|
||||
|
||||
|
@ -242,19 +240,19 @@ static void q35_host_initfn(Object *obj)
|
|||
object_property_add_uint64_ptr(obj, PCIE_HOST_MCFG_SIZE,
|
||||
&pehb->size, OBJ_PROP_FLAG_READ);
|
||||
|
||||
object_property_add_link(obj, MCH_HOST_PROP_RAM_MEM, TYPE_MEMORY_REGION,
|
||||
object_property_add_link(obj, PCI_HOST_PROP_RAM_MEM, TYPE_MEMORY_REGION,
|
||||
(Object **) &s->mch.ram_memory,
|
||||
qdev_prop_allow_set_link_before_realize, 0);
|
||||
|
||||
object_property_add_link(obj, MCH_HOST_PROP_PCI_MEM, TYPE_MEMORY_REGION,
|
||||
object_property_add_link(obj, PCI_HOST_PROP_PCI_MEM, TYPE_MEMORY_REGION,
|
||||
(Object **) &s->mch.pci_address_space,
|
||||
qdev_prop_allow_set_link_before_realize, 0);
|
||||
|
||||
object_property_add_link(obj, MCH_HOST_PROP_SYSTEM_MEM, TYPE_MEMORY_REGION,
|
||||
object_property_add_link(obj, PCI_HOST_PROP_SYSTEM_MEM, TYPE_MEMORY_REGION,
|
||||
(Object **) &s->mch.system_memory,
|
||||
qdev_prop_allow_set_link_before_realize, 0);
|
||||
|
||||
object_property_add_link(obj, MCH_HOST_PROP_IO_MEM, TYPE_MEMORY_REGION,
|
||||
object_property_add_link(obj, PCI_HOST_PROP_IO_MEM, TYPE_MEMORY_REGION,
|
||||
(Object **) &s->mch.address_space_io,
|
||||
qdev_prop_allow_set_link_before_realize, 0);
|
||||
}
|
||||
|
@ -285,7 +283,6 @@ static void blackhole_write(void *opaque, hwaddr addr, uint64_t val,
|
|||
static const MemoryRegionOps blackhole_ops = {
|
||||
.read = blackhole_read,
|
||||
.write = blackhole_write,
|
||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
||||
.valid.min_access_size = 1,
|
||||
.valid.max_access_size = 4,
|
||||
.impl.min_access_size = 4,
|
||||
|
|
|
@ -387,14 +387,12 @@ static void sabre_realize(DeviceState *dev, Error **errp)
|
|||
pci_setup_iommu(phb->bus, sabre_pci_dma_iommu, s->iommu);
|
||||
|
||||
/* APB secondary busses */
|
||||
pci_dev = pci_new_multifunction(PCI_DEVFN(1, 0), true,
|
||||
TYPE_SIMBA_PCI_BRIDGE);
|
||||
pci_dev = pci_new_multifunction(PCI_DEVFN(1, 0), TYPE_SIMBA_PCI_BRIDGE);
|
||||
s->bridgeB = PCI_BRIDGE(pci_dev);
|
||||
pci_bridge_map_irq(s->bridgeB, "pciB", pci_simbaB_map_irq);
|
||||
pci_realize_and_unref(pci_dev, phb->bus, &error_fatal);
|
||||
|
||||
pci_dev = pci_new_multifunction(PCI_DEVFN(1, 1), true,
|
||||
TYPE_SIMBA_PCI_BRIDGE);
|
||||
pci_dev = pci_new_multifunction(PCI_DEVFN(1, 1), TYPE_SIMBA_PCI_BRIDGE);
|
||||
s->bridgeA = PCI_BRIDGE(pci_dev);
|
||||
pci_bridge_map_irq(s->bridgeA, "pciA", pci_simbaA_map_irq);
|
||||
pci_realize_and_unref(pci_dev, phb->bus, &error_fatal);
|
||||
|
|
40
hw/pci/pci.c
40
hw/pci/pci.c
|
@ -65,6 +65,7 @@ bool pci_available = true;
|
|||
static char *pcibus_get_dev_path(DeviceState *dev);
|
||||
static char *pcibus_get_fw_dev_path(DeviceState *dev);
|
||||
static void pcibus_reset(BusState *qbus);
|
||||
static bool pcie_has_upstream_port(PCIDevice *dev);
|
||||
|
||||
static Property pci_props[] = {
|
||||
DEFINE_PROP_PCI_DEVFN("addr", PCIDevice, devfn, -1),
|
||||
|
@ -82,6 +83,8 @@ static Property pci_props[] = {
|
|||
DEFINE_PROP_UINT32("acpi-index", PCIDevice, acpi_index, 0),
|
||||
DEFINE_PROP_BIT("x-pcie-err-unc-mask", PCIDevice, cap_present,
|
||||
QEMU_PCIE_ERR_UNC_MASK_BITNR, true),
|
||||
DEFINE_PROP_BIT("x-pcie-ari-nextfn-1", PCIDevice, cap_present,
|
||||
QEMU_PCIE_ARI_NEXTFN_1_BITNR, false),
|
||||
DEFINE_PROP_END_OF_LIST()
|
||||
};
|
||||
|
||||
|
@ -2121,6 +2124,25 @@ static void pci_qdev_realize(DeviceState *qdev, Error **errp)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* A PCIe Downstream Port that do not have ARI Forwarding enabled must
|
||||
* associate only Device 0 with the device attached to the bus
|
||||
* representing the Link from the Port (PCIe base spec rev 4.0 ver 0.3,
|
||||
* sec 7.3.1).
|
||||
* With ARI, PCI_SLOT() can return non-zero value as the traditional
|
||||
* 5-bit Device Number and 3-bit Function Number fields in its associated
|
||||
* Routing IDs, Requester IDs and Completer IDs are interpreted as a
|
||||
* single 8-bit Function Number. Hence, ignore ARI capable devices.
|
||||
*/
|
||||
if (pci_is_express(pci_dev) &&
|
||||
!pcie_find_capability(pci_dev, PCI_EXT_CAP_ID_ARI) &&
|
||||
pcie_has_upstream_port(pci_dev) &&
|
||||
PCI_SLOT(pci_dev->devfn)) {
|
||||
warn_report("PCI: slot %d is not valid for %s,"
|
||||
" parent device only allows plugging into slot 0.",
|
||||
PCI_SLOT(pci_dev->devfn), pci_dev->name);
|
||||
}
|
||||
|
||||
if (pci_dev->failover_pair_id) {
|
||||
if (!pci_bus_is_express(pci_get_bus(pci_dev))) {
|
||||
error_setg(errp, "failover primary device must be on "
|
||||
|
@ -2164,8 +2186,8 @@ static void pci_qdev_realize(DeviceState *qdev, Error **errp)
|
|||
pci_dev->msi_trigger = pci_msi_trigger;
|
||||
}
|
||||
|
||||
PCIDevice *pci_new_multifunction(int devfn, bool multifunction,
|
||||
const char *name)
|
||||
static PCIDevice *pci_new_internal(int devfn, bool multifunction,
|
||||
const char *name)
|
||||
{
|
||||
DeviceState *dev;
|
||||
|
||||
|
@ -2175,9 +2197,14 @@ PCIDevice *pci_new_multifunction(int devfn, bool multifunction,
|
|||
return PCI_DEVICE(dev);
|
||||
}
|
||||
|
||||
PCIDevice *pci_new_multifunction(int devfn, const char *name)
|
||||
{
|
||||
return pci_new_internal(devfn, true, name);
|
||||
}
|
||||
|
||||
PCIDevice *pci_new(int devfn, const char *name)
|
||||
{
|
||||
return pci_new_multifunction(devfn, false, name);
|
||||
return pci_new_internal(devfn, false, name);
|
||||
}
|
||||
|
||||
bool pci_realize_and_unref(PCIDevice *dev, PCIBus *bus, Error **errp)
|
||||
|
@ -2186,17 +2213,18 @@ bool pci_realize_and_unref(PCIDevice *dev, PCIBus *bus, Error **errp)
|
|||
}
|
||||
|
||||
PCIDevice *pci_create_simple_multifunction(PCIBus *bus, int devfn,
|
||||
bool multifunction,
|
||||
const char *name)
|
||||
{
|
||||
PCIDevice *dev = pci_new_multifunction(devfn, multifunction, name);
|
||||
PCIDevice *dev = pci_new_multifunction(devfn, name);
|
||||
pci_realize_and_unref(dev, bus, &error_fatal);
|
||||
return dev;
|
||||
}
|
||||
|
||||
PCIDevice *pci_create_simple(PCIBus *bus, int devfn, const char *name)
|
||||
{
|
||||
return pci_create_simple_multifunction(bus, devfn, false, name);
|
||||
PCIDevice *dev = pci_new(devfn, name);
|
||||
pci_realize_and_unref(dev, bus, &error_fatal);
|
||||
return dev;
|
||||
}
|
||||
|
||||
static uint8_t pci_find_space(PCIDevice *pdev, uint8_t size)
|
||||
|
|
|
@ -232,7 +232,7 @@ const VMStateDescription vmstate_pcihost = {
|
|||
static Property pci_host_properties_common[] = {
|
||||
DEFINE_PROP_BOOL("x-config-reg-migration-enabled", PCIHostState,
|
||||
mig_enabled, true),
|
||||
DEFINE_PROP_BOOL("bypass-iommu", PCIHostState, bypass_iommu, false),
|
||||
DEFINE_PROP_BOOL(PCI_HOST_BYPASS_IOMMU, PCIHostState, bypass_iommu, false),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
|
|
|
@ -666,6 +666,10 @@ void pcie_cap_slot_init(PCIDevice *dev, PCIESlot *s)
|
|||
pci_word_test_and_set_mask(dev->w1cmask + pos + PCI_EXP_SLTSTA,
|
||||
PCI_EXP_HP_EV_SUPPORTED);
|
||||
|
||||
/* Avoid migration abortion when this device hot-removed by guest */
|
||||
pci_word_test_and_clear_mask(dev->cmask + pos + PCI_EXP_SLTSTA,
|
||||
PCI_EXP_SLTSTA_PDS);
|
||||
|
||||
dev->exp.hpev_notified = false;
|
||||
|
||||
qbus_set_hotplug_handler(BUS(pci_bridge_get_sec_bus(PCI_BRIDGE(dev))),
|
||||
|
@ -1035,8 +1039,10 @@ void pcie_sync_bridge_lnk(PCIDevice *bridge_dev)
|
|||
*/
|
||||
|
||||
/* ARI */
|
||||
void pcie_ari_init(PCIDevice *dev, uint16_t offset, uint16_t nextfn)
|
||||
void pcie_ari_init(PCIDevice *dev, uint16_t offset)
|
||||
{
|
||||
uint16_t nextfn = dev->cap_present & QEMU_PCIE_ARI_NEXTFN_1 ? 1 : 0;
|
||||
|
||||
pcie_add_capability(dev, PCI_EXT_CAP_ID_ARI, PCI_ARI_VER,
|
||||
offset, PCI_ARI_SIZEOF);
|
||||
pci_set_long(dev->config + offset + PCI_ARI_CAP, (nextfn & 0xff) << 8);
|
||||
|
|
|
@ -211,6 +211,7 @@ static void unregister_vfs(PCIDevice *dev)
|
|||
error_free(local_err);
|
||||
}
|
||||
object_unparent(OBJECT(vf));
|
||||
object_unref(OBJECT(vf));
|
||||
}
|
||||
g_free(dev->exp.sriov_pf.vf);
|
||||
dev->exp.sriov_pf.vf = NULL;
|
||||
|
|
|
@ -180,7 +180,7 @@ static void pegasos2_init(MachineState *machine)
|
|||
|
||||
/* VIA VT8231 South Bridge (multifunction PCI device) */
|
||||
via = OBJECT(pci_create_simple_multifunction(pci_bus, PCI_DEVFN(12, 0),
|
||||
true, TYPE_VT8231_ISA));
|
||||
TYPE_VT8231_ISA));
|
||||
for (i = 0; i < PCI_NUM_PINS; i++) {
|
||||
pm->via_pirq[i] = qdev_get_gpio_in_named(DEVICE(via), "pirq", i);
|
||||
}
|
||||
|
|
|
@ -713,6 +713,8 @@ static void smbios_build_type_4_table(MachineState *ms, unsigned instance)
|
|||
{
|
||||
char sock_str[128];
|
||||
size_t tbl_len = SMBIOS_TYPE_4_LEN_V28;
|
||||
unsigned threads_per_socket;
|
||||
unsigned cores_per_socket;
|
||||
|
||||
if (smbios_ep_type == SMBIOS_ENTRY_POINT_TYPE_64) {
|
||||
tbl_len = SMBIOS_TYPE_4_LEN_V30;
|
||||
|
@ -747,17 +749,20 @@ static void smbios_build_type_4_table(MachineState *ms, unsigned instance)
|
|||
SMBIOS_TABLE_SET_STR(4, asset_tag_number_str, type4.asset);
|
||||
SMBIOS_TABLE_SET_STR(4, part_number_str, type4.part);
|
||||
|
||||
t->core_count = (ms->smp.cores > 255) ? 0xFF : ms->smp.cores;
|
||||
threads_per_socket = machine_topo_get_threads_per_socket(ms);
|
||||
cores_per_socket = machine_topo_get_cores_per_socket(ms);
|
||||
|
||||
t->core_count = (cores_per_socket > 255) ? 0xFF : cores_per_socket;
|
||||
t->core_enabled = t->core_count;
|
||||
|
||||
t->thread_count = (ms->smp.threads > 255) ? 0xFF : ms->smp.threads;
|
||||
t->thread_count = (threads_per_socket > 255) ? 0xFF : threads_per_socket;
|
||||
|
||||
t->processor_characteristics = cpu_to_le16(0x02); /* Unknown */
|
||||
t->processor_family2 = cpu_to_le16(0x01); /* Other */
|
||||
|
||||
if (tbl_len == SMBIOS_TYPE_4_LEN_V30) {
|
||||
t->core_count2 = t->core_enabled2 = cpu_to_le16(ms->smp.cores);
|
||||
t->thread_count2 = cpu_to_le16(ms->smp.threads);
|
||||
t->core_count2 = t->core_enabled2 = cpu_to_le16(cores_per_socket);
|
||||
t->thread_count2 = cpu_to_le16(threads_per_socket);
|
||||
}
|
||||
|
||||
SMBIOS_BUILD_TABLE_POST;
|
||||
|
@ -1088,8 +1093,7 @@ void smbios_get_tables(MachineState *ms,
|
|||
smbios_build_type_2_table();
|
||||
smbios_build_type_3_table();
|
||||
|
||||
smbios_smp_sockets = DIV_ROUND_UP(ms->smp.cpus,
|
||||
ms->smp.cores * ms->smp.threads);
|
||||
smbios_smp_sockets = ms->smp.sockets;
|
||||
assert(smbios_smp_sockets >= 1);
|
||||
|
||||
for (i = 0; i < smbios_smp_sockets; i++) {
|
||||
|
|
|
@ -612,7 +612,7 @@ static void sun4uv_init(MemoryRegion *address_space_mem,
|
|||
pci_bus_set_slot_reserved_mask(pci_busA, 0xfffffff1);
|
||||
pci_bus_set_slot_reserved_mask(pci_busB, 0xfffffff0);
|
||||
|
||||
ebus = pci_new_multifunction(PCI_DEVFN(1, 0), true, TYPE_EBUS);
|
||||
ebus = pci_new_multifunction(PCI_DEVFN(1, 0), TYPE_EBUS);
|
||||
qdev_prop_set_uint64(DEVICE(ebus), "console-serial-base",
|
||||
hwdef->console_serial_base);
|
||||
pci_realize_and_unref(ebus, pci_busA, &error_fatal);
|
||||
|
@ -648,8 +648,7 @@ static void sun4uv_init(MemoryRegion *address_space_mem,
|
|||
|
||||
if (!nd->model || strcmp(nd->model, mc->default_nic) == 0) {
|
||||
if (!onboard_nic) {
|
||||
pci_dev = pci_new_multifunction(PCI_DEVFN(1, 1),
|
||||
true, mc->default_nic);
|
||||
pci_dev = pci_new_multifunction(PCI_DEVFN(1, 1), mc->default_nic);
|
||||
bus = pci_busA;
|
||||
memcpy(&macaddr, &nd->macaddr.a, sizeof(MACAddr));
|
||||
onboard_nic = true;
|
||||
|
|
|
@ -96,3 +96,8 @@ config VHOST_VDPA_DEV
|
|||
bool
|
||||
default y
|
||||
depends on VIRTIO && VHOST_VDPA && LINUX
|
||||
|
||||
config VHOST_USER_SCMI
|
||||
bool
|
||||
default y
|
||||
depends on VIRTIO && VHOST_USER
|
||||
|
|
|
@ -35,6 +35,8 @@ specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_I2C', if_true: files('vhost-user
|
|||
specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_RNG', if_true: files('vhost-user-rng.c'))
|
||||
specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_GPIO', if_true: files('vhost-user-gpio.c'))
|
||||
specific_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_GPIO'], if_true: files('vhost-user-gpio-pci.c'))
|
||||
specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_SCMI', if_true: files('vhost-user-scmi.c'))
|
||||
specific_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_SCMI'], if_true: files('vhost-user-scmi-pci.c'))
|
||||
|
||||
virtio_pci_ss = ss.source_set()
|
||||
virtio_pci_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock-pci.c'))
|
||||
|
|
|
@ -34,7 +34,9 @@ vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_
|
|||
vhost_vdpa_dma_unmap(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" type: %"PRIu8
|
||||
vhost_vdpa_listener_begin_batch(void *v, int fd, uint32_t msg_type, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8
|
||||
vhost_vdpa_listener_commit(void *v, int fd, uint32_t msg_type, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8
|
||||
vhost_vdpa_listener_region_add_unaligned(void *v, const char *name, uint64_t offset_as, uint64_t offset_page) "vdpa: %p region %s offset_within_address_space %"PRIu64" offset_within_region %"PRIu64
|
||||
vhost_vdpa_listener_region_add(void *vdpa, uint64_t iova, uint64_t llend, void *vaddr, bool readonly) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64" vaddr: %p read-only: %d"
|
||||
vhost_vdpa_listener_region_del_unaligned(void *v, const char *name, uint64_t offset_as, uint64_t offset_page) "vdpa: %p region %s offset_within_address_space %"PRIu64" offset_within_region %"PRIu64
|
||||
vhost_vdpa_listener_region_del(void *vdpa, uint64_t iova, uint64_t llend) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64
|
||||
vhost_vdpa_add_status(void *dev, uint8_t status) "dev: %p status: 0x%"PRIx8
|
||||
vhost_vdpa_init(void *dev, void *vdpa) "dev: %p vdpa: %p"
|
||||
|
@ -44,7 +46,7 @@ vhost_vdpa_set_mem_table(void *dev, uint32_t nregions, uint32_t padding) "dev: %
|
|||
vhost_vdpa_dump_regions(void *dev, int i, uint64_t guest_phys_addr, uint64_t memory_size, uint64_t userspace_addr, uint64_t flags_padding) "dev: %p %d: guest_phys_addr: 0x%"PRIx64" memory_size: 0x%"PRIx64" userspace_addr: 0x%"PRIx64" flags_padding: 0x%"PRIx64
|
||||
vhost_vdpa_set_features(void *dev, uint64_t features) "dev: %p features: 0x%"PRIx64
|
||||
vhost_vdpa_get_device_id(void *dev, uint32_t device_id) "dev: %p device_id %"PRIu32
|
||||
vhost_vdpa_reset_device(void *dev, uint8_t status) "dev: %p status: 0x%"PRIx8
|
||||
vhost_vdpa_reset_device(void *dev) "dev: %p"
|
||||
vhost_vdpa_get_vq_index(void *dev, int idx, int vq_idx) "dev: %p idx: %d vq idx: %d"
|
||||
vhost_vdpa_set_vring_ready(void *dev) "dev: %p"
|
||||
vhost_vdpa_dump_config(void *dev, const char *line) "dev: %p %s"
|
||||
|
@ -131,6 +133,7 @@ virtio_iommu_set_page_size_mask(const char *name, uint64_t old, uint64_t new) "m
|
|||
virtio_iommu_notify_flag_add(const char *name) "add notifier to mr %s"
|
||||
virtio_iommu_notify_flag_del(const char *name) "del notifier from mr %s"
|
||||
virtio_iommu_switch_address_space(uint8_t bus, uint8_t slot, uint8_t fn, bool on) "Device %02x:%02x.%x switching address space (iommu enabled=%d)"
|
||||
virtio_iommu_freeze_granule(uint64_t page_size_mask) "granule set to 0x%"PRIx64
|
||||
|
||||
# virtio-mem.c
|
||||
virtio_mem_send_response(uint16_t type) "type=%" PRIu16
|
||||
|
|
|
@ -111,7 +111,7 @@ static bool vhost_svq_translate_addr(const VhostShadowVirtqueue *svq,
|
|||
addrs[i] = map->iova + off;
|
||||
|
||||
needle_last = int128_add(int128_make64(needle.translated_addr),
|
||||
int128_make64(iovec[i].iov_len));
|
||||
int128_makes64(iovec[i].iov_len - 1));
|
||||
map_last = int128_make64(map->translated_addr + map->size);
|
||||
if (unlikely(int128_gt(needle_last, map_last))) {
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
|
|
|
@ -15,3 +15,7 @@ bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp)
|
|||
void vhost_user_cleanup(VhostUserState *user)
|
||||
{
|
||||
}
|
||||
|
||||
void vhost_toggle_device_iotlb(VirtIODevice *vdev)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -161,7 +161,7 @@ static void vuf_guest_notifier_mask(VirtIODevice *vdev, int idx,
|
|||
|
||||
/*
|
||||
* Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
|
||||
* as the Marco of configure interrupt's IDX, If this driver does not
|
||||
* as the macro of configure interrupt's IDX, If this driver does not
|
||||
* support, the function will return
|
||||
*/
|
||||
|
||||
|
@ -177,7 +177,7 @@ static bool vuf_guest_notifier_pending(VirtIODevice *vdev, int idx)
|
|||
|
||||
/*
|
||||
* Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
|
||||
* as the Marco of configure interrupt's IDX, If this driver does not
|
||||
* as the macro of configure interrupt's IDX, If this driver does not
|
||||
* support, the function will return
|
||||
*/
|
||||
|
||||
|
|
|
@ -194,7 +194,7 @@ static void vu_gpio_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
|
|||
|
||||
/*
|
||||
* Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
|
||||
* as the Marco of configure interrupt's IDX, If this driver does not
|
||||
* as the macro of configure interrupt's IDX, If this driver does not
|
||||
* support, the function will return
|
||||
*/
|
||||
|
||||
|
|
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
* Vhost-user SCMI virtio device PCI glue
|
||||
*
|
||||
* SPDX-FileCopyrightText: Red Hat, Inc.
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "hw/qdev-properties.h"
|
||||
#include "hw/virtio/vhost-user-scmi.h"
|
||||
#include "hw/virtio/virtio-pci.h"
|
||||
|
||||
struct VHostUserSCMIPCI {
|
||||
VirtIOPCIProxy parent_obj;
|
||||
VHostUserSCMI vdev;
|
||||
};
|
||||
|
||||
typedef struct VHostUserSCMIPCI VHostUserSCMIPCI;
|
||||
|
||||
#define TYPE_VHOST_USER_SCMI_PCI "vhost-user-scmi-pci-base"
|
||||
|
||||
DECLARE_INSTANCE_CHECKER(VHostUserSCMIPCI, VHOST_USER_SCMI_PCI,
|
||||
TYPE_VHOST_USER_SCMI_PCI)
|
||||
|
||||
static void vhost_user_scmi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
|
||||
{
|
||||
VHostUserSCMIPCI *dev = VHOST_USER_SCMI_PCI(vpci_dev);
|
||||
DeviceState *vdev = DEVICE(&dev->vdev);
|
||||
|
||||
vpci_dev->nvectors = 1;
|
||||
qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
|
||||
}
|
||||
|
||||
static void vhost_user_scmi_pci_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
|
||||
PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
|
||||
k->realize = vhost_user_scmi_pci_realize;
|
||||
set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
|
||||
pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
|
||||
pcidev_k->device_id = 0; /* Set by virtio-pci based on virtio id */
|
||||
pcidev_k->revision = 0x00;
|
||||
pcidev_k->class_id = PCI_CLASS_COMMUNICATION_OTHER;
|
||||
}
|
||||
|
||||
static void vhost_user_scmi_pci_instance_init(Object *obj)
|
||||
{
|
||||
VHostUserSCMIPCI *dev = VHOST_USER_SCMI_PCI(obj);
|
||||
|
||||
virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
|
||||
TYPE_VHOST_USER_SCMI);
|
||||
}
|
||||
|
||||
static const VirtioPCIDeviceTypeInfo vhost_user_scmi_pci_info = {
|
||||
.base_name = TYPE_VHOST_USER_SCMI_PCI,
|
||||
.non_transitional_name = "vhost-user-scmi-pci",
|
||||
.instance_size = sizeof(VHostUserSCMIPCI),
|
||||
.instance_init = vhost_user_scmi_pci_instance_init,
|
||||
.class_init = vhost_user_scmi_pci_class_init,
|
||||
};
|
||||
|
||||
static void vhost_user_scmi_pci_register(void)
|
||||
{
|
||||
virtio_pci_types_register(&vhost_user_scmi_pci_info);
|
||||
}
|
||||
|
||||
type_init(vhost_user_scmi_pci_register);
|
|
@ -0,0 +1,306 @@
|
|||
/*
|
||||
* Vhost-user SCMI virtio device
|
||||
*
|
||||
* SPDX-FileCopyrightText: Red Hat, Inc.
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*
|
||||
* Implementation based on other vhost-user devices in QEMU.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "hw/virtio/virtio-bus.h"
|
||||
#include "hw/virtio/vhost-user-scmi.h"
|
||||
#include "standard-headers/linux/virtio_ids.h"
|
||||
#include "standard-headers/linux/virtio_scmi.h"
|
||||
#include "trace.h"
|
||||
|
||||
/*
|
||||
* In this version, we don't support VIRTIO_SCMI_F_SHARED_MEMORY.
|
||||
* Note that VIRTIO_SCMI_F_SHARED_MEMORY is currently not supported in
|
||||
* Linux VirtIO SCMI guest driver.
|
||||
*/
|
||||
static const int feature_bits[] = {
|
||||
VIRTIO_F_VERSION_1,
|
||||
VIRTIO_F_NOTIFY_ON_EMPTY,
|
||||
VIRTIO_RING_F_INDIRECT_DESC,
|
||||
VIRTIO_RING_F_EVENT_IDX,
|
||||
VIRTIO_F_RING_RESET,
|
||||
VIRTIO_SCMI_F_P2A_CHANNELS,
|
||||
VHOST_INVALID_FEATURE_BIT
|
||||
};
|
||||
|
||||
static int vu_scmi_start(VirtIODevice *vdev)
|
||||
{
|
||||
VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev);
|
||||
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
|
||||
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
|
||||
struct vhost_dev *vhost_dev = &scmi->vhost_dev;
|
||||
int ret, i;
|
||||
|
||||
if (!k->set_guest_notifiers) {
|
||||
error_report("binding does not support guest notifiers");
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
ret = vhost_dev_enable_notifiers(vhost_dev, vdev);
|
||||
if (ret < 0) {
|
||||
error_report("Error enabling host notifiers: %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = k->set_guest_notifiers(qbus->parent, vhost_dev->nvqs, true);
|
||||
if (ret < 0) {
|
||||
error_report("Error binding guest notifier: %d", ret);
|
||||
goto err_host_notifiers;
|
||||
}
|
||||
|
||||
vhost_ack_features(&scmi->vhost_dev, feature_bits, vdev->guest_features);
|
||||
|
||||
ret = vhost_dev_start(&scmi->vhost_dev, vdev, true);
|
||||
if (ret < 0) {
|
||||
error_report("Error starting vhost-user-scmi: %d", ret);
|
||||
goto err_guest_notifiers;
|
||||
}
|
||||
|
||||
/*
|
||||
* guest_notifier_mask/pending not used yet, so just unmask
|
||||
* everything here. virtio-pci will do the right thing by
|
||||
* enabling/disabling irqfd.
|
||||
*/
|
||||
for (i = 0; i < scmi->vhost_dev.nvqs; i++) {
|
||||
vhost_virtqueue_mask(&scmi->vhost_dev, vdev, i, false);
|
||||
}
|
||||
return 0;
|
||||
|
||||
err_guest_notifiers:
|
||||
k->set_guest_notifiers(qbus->parent, vhost_dev->nvqs, false);
|
||||
err_host_notifiers:
|
||||
vhost_dev_disable_notifiers(vhost_dev, vdev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vu_scmi_stop(VirtIODevice *vdev)
|
||||
{
|
||||
VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev);
|
||||
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
|
||||
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
|
||||
struct vhost_dev *vhost_dev = &scmi->vhost_dev;
|
||||
int ret;
|
||||
|
||||
if (!k->set_guest_notifiers) {
|
||||
return;
|
||||
}
|
||||
|
||||
vhost_dev_stop(vhost_dev, vdev, true);
|
||||
|
||||
ret = k->set_guest_notifiers(qbus->parent, vhost_dev->nvqs, false);
|
||||
if (ret < 0) {
|
||||
error_report("vhost guest notifier cleanup failed: %d", ret);
|
||||
return;
|
||||
}
|
||||
vhost_dev_disable_notifiers(vhost_dev, vdev);
|
||||
}
|
||||
|
||||
static void vu_scmi_set_status(VirtIODevice *vdev, uint8_t status)
|
||||
{
|
||||
VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev);
|
||||
bool should_start = virtio_device_should_start(vdev, status);
|
||||
|
||||
if (!scmi->connected) {
|
||||
return;
|
||||
}
|
||||
if (vhost_dev_is_started(&scmi->vhost_dev) == should_start) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (should_start) {
|
||||
vu_scmi_start(vdev);
|
||||
} else {
|
||||
vu_scmi_stop(vdev);
|
||||
}
|
||||
}
|
||||
|
||||
static uint64_t vu_scmi_get_features(VirtIODevice *vdev, uint64_t features,
|
||||
Error **errp)
|
||||
{
|
||||
VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev);
|
||||
|
||||
return vhost_get_features(&scmi->vhost_dev, feature_bits, features);
|
||||
}
|
||||
|
||||
static void vu_scmi_handle_output(VirtIODevice *vdev, VirtQueue *vq)
|
||||
{
|
||||
/*
|
||||
* Not normally called; it's the daemon that handles the queue;
|
||||
* however virtio's cleanup path can call this.
|
||||
*/
|
||||
}
|
||||
|
||||
static void vu_scmi_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
|
||||
{
|
||||
VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev);
|
||||
|
||||
if (idx == VIRTIO_CONFIG_IRQ_IDX) {
|
||||
return;
|
||||
}
|
||||
|
||||
vhost_virtqueue_mask(&scmi->vhost_dev, vdev, idx, mask);
|
||||
}
|
||||
|
||||
static bool vu_scmi_guest_notifier_pending(VirtIODevice *vdev, int idx)
|
||||
{
|
||||
VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev);
|
||||
|
||||
return vhost_virtqueue_pending(&scmi->vhost_dev, idx);
|
||||
}
|
||||
|
||||
static void vu_scmi_connect(DeviceState *dev)
|
||||
{
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
|
||||
VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev);
|
||||
|
||||
if (scmi->connected) {
|
||||
return;
|
||||
}
|
||||
scmi->connected = true;
|
||||
|
||||
/* restore vhost state */
|
||||
if (virtio_device_started(vdev, vdev->status)) {
|
||||
vu_scmi_start(vdev);
|
||||
}
|
||||
}
|
||||
|
||||
static void vu_scmi_disconnect(DeviceState *dev)
|
||||
{
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
|
||||
VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev);
|
||||
|
||||
if (!scmi->connected) {
|
||||
return;
|
||||
}
|
||||
scmi->connected = false;
|
||||
|
||||
if (vhost_dev_is_started(&scmi->vhost_dev)) {
|
||||
vu_scmi_stop(vdev);
|
||||
}
|
||||
}
|
||||
|
||||
static void vu_scmi_event(void *opaque, QEMUChrEvent event)
|
||||
{
|
||||
DeviceState *dev = opaque;
|
||||
|
||||
switch (event) {
|
||||
case CHR_EVENT_OPENED:
|
||||
vu_scmi_connect(dev);
|
||||
break;
|
||||
case CHR_EVENT_CLOSED:
|
||||
vu_scmi_disconnect(dev);
|
||||
break;
|
||||
case CHR_EVENT_BREAK:
|
||||
case CHR_EVENT_MUX_IN:
|
||||
case CHR_EVENT_MUX_OUT:
|
||||
/* Ignore */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void do_vhost_user_cleanup(VirtIODevice *vdev, VHostUserSCMI *scmi)
|
||||
{
|
||||
virtio_delete_queue(scmi->cmd_vq);
|
||||
virtio_delete_queue(scmi->event_vq);
|
||||
g_free(scmi->vhost_dev.vqs);
|
||||
virtio_cleanup(vdev);
|
||||
vhost_user_cleanup(&scmi->vhost_user);
|
||||
}
|
||||
|
||||
static void vu_scmi_device_realize(DeviceState *dev, Error **errp)
|
||||
{
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
|
||||
VHostUserSCMI *scmi = VHOST_USER_SCMI(dev);
|
||||
int ret;
|
||||
|
||||
if (!scmi->chardev.chr) {
|
||||
error_setg(errp, "vhost-user-scmi: chardev is mandatory");
|
||||
return;
|
||||
}
|
||||
|
||||
vdev->host_features |= (1ULL << VIRTIO_SCMI_F_P2A_CHANNELS);
|
||||
|
||||
if (!vhost_user_init(&scmi->vhost_user, &scmi->chardev, errp)) {
|
||||
return;
|
||||
}
|
||||
|
||||
virtio_init(vdev, VIRTIO_ID_SCMI, 0);
|
||||
|
||||
scmi->cmd_vq = virtio_add_queue(vdev, 256, vu_scmi_handle_output);
|
||||
scmi->event_vq = virtio_add_queue(vdev, 256, vu_scmi_handle_output);
|
||||
scmi->vhost_dev.nvqs = 2;
|
||||
scmi->vhost_dev.vqs = g_new0(struct vhost_virtqueue, scmi->vhost_dev.nvqs);
|
||||
|
||||
ret = vhost_dev_init(&scmi->vhost_dev, &scmi->vhost_user,
|
||||
VHOST_BACKEND_TYPE_USER, 0, errp);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret,
|
||||
"vhost-user-scmi: vhost_dev_init() failed");
|
||||
do_vhost_user_cleanup(vdev, scmi);
|
||||
return;
|
||||
}
|
||||
|
||||
qemu_chr_fe_set_handlers(&scmi->chardev, NULL, NULL, vu_scmi_event, NULL,
|
||||
dev, NULL, true);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static void vu_scmi_device_unrealize(DeviceState *dev)
|
||||
{
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
|
||||
VHostUserSCMI *scmi = VHOST_USER_SCMI(dev);
|
||||
|
||||
vu_scmi_set_status(vdev, 0);
|
||||
vhost_dev_cleanup(&scmi->vhost_dev);
|
||||
do_vhost_user_cleanup(vdev, scmi);
|
||||
}
|
||||
|
||||
static const VMStateDescription vu_scmi_vmstate = {
|
||||
.name = "vhost-user-scmi",
|
||||
.unmigratable = 1,
|
||||
};
|
||||
|
||||
static Property vu_scmi_properties[] = {
|
||||
DEFINE_PROP_CHR("chardev", VHostUserSCMI, chardev),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
static void vu_scmi_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
|
||||
|
||||
device_class_set_props(dc, vu_scmi_properties);
|
||||
dc->vmsd = &vu_scmi_vmstate;
|
||||
set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
|
||||
vdc->realize = vu_scmi_device_realize;
|
||||
vdc->unrealize = vu_scmi_device_unrealize;
|
||||
vdc->get_features = vu_scmi_get_features;
|
||||
vdc->set_status = vu_scmi_set_status;
|
||||
vdc->guest_notifier_mask = vu_scmi_guest_notifier_mask;
|
||||
vdc->guest_notifier_pending = vu_scmi_guest_notifier_pending;
|
||||
}
|
||||
|
||||
static const TypeInfo vu_scmi_info = {
|
||||
.name = TYPE_VHOST_USER_SCMI,
|
||||
.parent = TYPE_VIRTIO_DEVICE,
|
||||
.instance_size = sizeof(VHostUserSCMI),
|
||||
.class_init = vu_scmi_class_init,
|
||||
};
|
||||
|
||||
static void vu_scmi_register_types(void)
|
||||
{
|
||||
type_register_static(&vu_scmi_info);
|
||||
}
|
||||
|
||||
type_init(vu_scmi_register_types)
|
|
@ -367,7 +367,7 @@ static int process_message_reply(struct vhost_dev *dev,
|
|||
return msg_reply.payload.u64 ? -EIO : 0;
|
||||
}
|
||||
|
||||
static bool vhost_user_one_time_request(VhostUserRequest request)
|
||||
static bool vhost_user_per_device_request(VhostUserRequest request)
|
||||
{
|
||||
switch (request) {
|
||||
case VHOST_USER_SET_OWNER:
|
||||
|
@ -375,6 +375,7 @@ static bool vhost_user_one_time_request(VhostUserRequest request)
|
|||
case VHOST_USER_SET_MEM_TABLE:
|
||||
case VHOST_USER_GET_QUEUE_NUM:
|
||||
case VHOST_USER_NET_SET_MTU:
|
||||
case VHOST_USER_RESET_DEVICE:
|
||||
case VHOST_USER_ADD_MEM_REG:
|
||||
case VHOST_USER_REM_MEM_REG:
|
||||
return true;
|
||||
|
@ -392,11 +393,17 @@ static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg,
|
|||
int ret, size = VHOST_USER_HDR_SIZE + msg->hdr.size;
|
||||
|
||||
/*
|
||||
* For non-vring specific requests, like VHOST_USER_SET_MEM_TABLE,
|
||||
* we just need send it once in the first time. For later such
|
||||
* request, we just ignore it.
|
||||
* Some devices, like virtio-scsi, are implemented as a single vhost_dev,
|
||||
* while others, like virtio-net, contain multiple vhost_devs. For
|
||||
* operations such as configuring device memory mappings or issuing device
|
||||
* resets, which affect the whole device instead of individual VQs,
|
||||
* vhost-user messages should only be sent once.
|
||||
*
|
||||
* Devices with multiple vhost_devs are given an associated dev->vq_index
|
||||
* so per_device requests are only sent if vq_index is 0.
|
||||
*/
|
||||
if (vhost_user_one_time_request(msg->hdr.request) && dev->vq_index != 0) {
|
||||
if (vhost_user_per_device_request(msg->hdr.request)
|
||||
&& dev->vq_index != 0) {
|
||||
msg->hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK;
|
||||
return 0;
|
||||
}
|
||||
|
@ -1256,7 +1263,7 @@ static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
|
|||
.hdr.flags = VHOST_USER_VERSION,
|
||||
};
|
||||
|
||||
if (vhost_user_one_time_request(request) && dev->vq_index != 0) {
|
||||
if (vhost_user_per_device_request(request) && dev->vq_index != 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -323,7 +323,9 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
|
|||
|
||||
if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
|
||||
(section->offset_within_region & ~TARGET_PAGE_MASK))) {
|
||||
error_report("%s received unaligned region", __func__);
|
||||
trace_vhost_vdpa_listener_region_add_unaligned(v, section->mr->name,
|
||||
section->offset_within_address_space & ~TARGET_PAGE_MASK,
|
||||
section->offset_within_region & ~TARGET_PAGE_MASK);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -405,7 +407,9 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener,
|
|||
|
||||
if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
|
||||
(section->offset_within_region & ~TARGET_PAGE_MASK))) {
|
||||
error_report("%s received unaligned region", __func__);
|
||||
trace_vhost_vdpa_listener_region_del_unaligned(v, section->mr->name,
|
||||
section->offset_within_address_space & ~TARGET_PAGE_MASK,
|
||||
section->offset_within_region & ~TARGET_PAGE_MASK);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -859,7 +863,7 @@ static int vhost_vdpa_reset_device(struct vhost_dev *dev)
|
|||
uint8_t status = 0;
|
||||
|
||||
ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
|
||||
trace_vhost_vdpa_reset_device(dev, status);
|
||||
trace_vhost_vdpa_reset_device(dev);
|
||||
v->suspended = false;
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -129,7 +129,7 @@ static void vhost_vsock_common_guest_notifier_mask(VirtIODevice *vdev, int idx,
|
|||
|
||||
/*
|
||||
* Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
|
||||
* as the Marco of configure interrupt's IDX, If this driver does not
|
||||
* as the macro of configure interrupt's IDX, If this driver does not
|
||||
* support, the function will return
|
||||
*/
|
||||
|
||||
|
@ -146,7 +146,7 @@ static bool vhost_vsock_common_guest_notifier_pending(VirtIODevice *vdev,
|
|||
|
||||
/*
|
||||
* Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
|
||||
* as the Marco of configure interrupt's IDX, If this driver does not
|
||||
* as the macro of configure interrupt's IDX, If this driver does not
|
||||
* support, the function will return
|
||||
*/
|
||||
|
||||
|
|
|
@ -780,7 +780,6 @@ static void vhost_iommu_region_add(MemoryListener *listener,
|
|||
Int128 end;
|
||||
int iommu_idx;
|
||||
IOMMUMemoryRegion *iommu_mr;
|
||||
int ret;
|
||||
|
||||
if (!memory_region_is_iommu(section->mr)) {
|
||||
return;
|
||||
|
@ -795,7 +794,9 @@ static void vhost_iommu_region_add(MemoryListener *listener,
|
|||
iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
|
||||
MEMTXATTRS_UNSPECIFIED);
|
||||
iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify,
|
||||
IOMMU_NOTIFIER_DEVIOTLB_UNMAP,
|
||||
dev->vdev->device_iotlb_enabled ?
|
||||
IOMMU_NOTIFIER_DEVIOTLB_UNMAP :
|
||||
IOMMU_NOTIFIER_UNMAP,
|
||||
section->offset_within_region,
|
||||
int128_get64(end),
|
||||
iommu_idx);
|
||||
|
@ -803,16 +804,8 @@ static void vhost_iommu_region_add(MemoryListener *listener,
|
|||
iommu->iommu_offset = section->offset_within_address_space -
|
||||
section->offset_within_region;
|
||||
iommu->hdev = dev;
|
||||
ret = memory_region_register_iommu_notifier(section->mr, &iommu->n, NULL);
|
||||
if (ret) {
|
||||
/*
|
||||
* Some vIOMMUs do not support dev-iotlb yet. If so, try to use the
|
||||
* UNMAP legacy message
|
||||
*/
|
||||
iommu->n.notifier_flags = IOMMU_NOTIFIER_UNMAP;
|
||||
memory_region_register_iommu_notifier(section->mr, &iommu->n,
|
||||
&error_fatal);
|
||||
}
|
||||
memory_region_register_iommu_notifier(section->mr, &iommu->n,
|
||||
&error_fatal);
|
||||
QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next);
|
||||
/* TODO: can replay help performance here? */
|
||||
}
|
||||
|
@ -840,6 +833,27 @@ static void vhost_iommu_region_del(MemoryListener *listener,
|
|||
}
|
||||
}
|
||||
|
||||
void vhost_toggle_device_iotlb(VirtIODevice *vdev)
|
||||
{
|
||||
VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
|
||||
struct vhost_dev *dev;
|
||||
struct vhost_iommu *iommu;
|
||||
|
||||
if (vdev->vhost_started) {
|
||||
dev = vdc->get_vhost(vdev);
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
|
||||
QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) {
|
||||
memory_region_unregister_iommu_notifier(iommu->mr, &iommu->n);
|
||||
iommu->n.notifier_flags = vdev->device_iotlb_enabled ?
|
||||
IOMMU_NOTIFIER_DEVIOTLB_UNMAP : IOMMU_NOTIFIER_UNMAP;
|
||||
memory_region_register_iommu_notifier(iommu->mr, &iommu->n,
|
||||
&error_fatal);
|
||||
}
|
||||
}
|
||||
|
||||
static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
|
||||
struct vhost_virtqueue *vq,
|
||||
unsigned idx, bool enable_log)
|
||||
|
|
|
@ -1210,7 +1210,7 @@ static void virtio_crypto_guest_notifier_mask(VirtIODevice *vdev, int idx,
|
|||
|
||||
/*
|
||||
* Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
|
||||
* as the Marco of configure interrupt's IDX, If this driver does not
|
||||
* as the macro of configure interrupt's IDX, If this driver does not
|
||||
* support, the function will return
|
||||
*/
|
||||
|
||||
|
@ -1229,7 +1229,7 @@ static bool virtio_crypto_guest_notifier_pending(VirtIODevice *vdev, int idx)
|
|||
|
||||
/*
|
||||
* Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
|
||||
* as the Marco of configure interrupt's IDX, If this driver does not
|
||||
* as the macro of configure interrupt's IDX, If this driver does not
|
||||
* support, the function will return
|
||||
*/
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include "hw/virtio/virtio.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "sysemu/reset.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "trace.h"
|
||||
|
@ -1100,29 +1101,24 @@ static int virtio_iommu_set_page_size_mask(IOMMUMemoryRegion *mr,
|
|||
new_mask);
|
||||
|
||||
if ((cur_mask & new_mask) == 0) {
|
||||
error_setg(errp, "virtio-iommu page mask 0x%"PRIx64
|
||||
" is incompatible with mask 0x%"PRIx64, cur_mask, new_mask);
|
||||
error_setg(errp, "virtio-iommu %s reports a page size mask 0x%"PRIx64
|
||||
" incompatible with currently supported mask 0x%"PRIx64,
|
||||
mr->parent_obj.name, new_mask, cur_mask);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* After the machine is finalized, we can't change the mask anymore. If by
|
||||
* Once the granule is frozen we can't change the mask anymore. If by
|
||||
* chance the hotplugged device supports the same granule, we can still
|
||||
* accept it. Having a different masks is possible but the guest will use
|
||||
* sub-optimal block sizes, so warn about it.
|
||||
* accept it.
|
||||
*/
|
||||
if (phase_check(PHASE_MACHINE_READY)) {
|
||||
int new_granule = ctz64(new_mask);
|
||||
if (s->granule_frozen) {
|
||||
int cur_granule = ctz64(cur_mask);
|
||||
|
||||
if (new_granule != cur_granule) {
|
||||
error_setg(errp, "virtio-iommu page mask 0x%"PRIx64
|
||||
" is incompatible with mask 0x%"PRIx64, cur_mask,
|
||||
new_mask);
|
||||
if (!(BIT(cur_granule) & new_mask)) {
|
||||
error_setg(errp, "virtio-iommu %s does not support frozen granule 0x%llx",
|
||||
mr->parent_obj.name, BIT_ULL(cur_granule));
|
||||
return -1;
|
||||
} else if (new_mask != cur_mask) {
|
||||
warn_report("virtio-iommu page mask 0x%"PRIx64
|
||||
" does not match 0x%"PRIx64, cur_mask, new_mask);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1146,6 +1142,28 @@ static void virtio_iommu_system_reset(void *opaque)
|
|||
|
||||
}
|
||||
|
||||
static void virtio_iommu_freeze_granule(Notifier *notifier, void *data)
|
||||
{
|
||||
VirtIOIOMMU *s = container_of(notifier, VirtIOIOMMU, machine_done);
|
||||
int granule;
|
||||
|
||||
if (likely(s->config.bypass)) {
|
||||
/*
|
||||
* Transient IOMMU MR enable to collect page_size_mask requirements
|
||||
* through memory_region_iommu_set_page_size_mask() called by
|
||||
* VFIO region_add() callback
|
||||
*/
|
||||
s->config.bypass = false;
|
||||
virtio_iommu_switch_address_space_all(s);
|
||||
/* restore default */
|
||||
s->config.bypass = true;
|
||||
virtio_iommu_switch_address_space_all(s);
|
||||
}
|
||||
s->granule_frozen = true;
|
||||
granule = ctz64(s->config.page_size_mask);
|
||||
trace_virtio_iommu_freeze_granule(BIT(granule));
|
||||
}
|
||||
|
||||
static void virtio_iommu_device_realize(DeviceState *dev, Error **errp)
|
||||
{
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
|
||||
|
@ -1189,6 +1207,9 @@ static void virtio_iommu_device_realize(DeviceState *dev, Error **errp)
|
|||
error_setg(errp, "VIRTIO-IOMMU is not attached to any PCI bus!");
|
||||
}
|
||||
|
||||
s->machine_done.notify = virtio_iommu_freeze_granule;
|
||||
qemu_add_machine_init_done_notifier(&s->machine_done);
|
||||
|
||||
qemu_register_reset(virtio_iommu_system_reset, s);
|
||||
}
|
||||
|
||||
|
@ -1198,6 +1219,7 @@ static void virtio_iommu_device_unrealize(DeviceState *dev)
|
|||
VirtIOIOMMU *s = VIRTIO_IOMMU(dev);
|
||||
|
||||
qemu_unregister_reset(virtio_iommu_system_reset, s);
|
||||
qemu_remove_machine_init_done_notifier(&s->machine_done);
|
||||
|
||||
g_hash_table_destroy(s->as_by_busptr);
|
||||
if (s->domains) {
|
||||
|
|
|
@ -35,6 +35,8 @@ void machine_set_cpu_numa_node(MachineState *machine,
|
|||
Error **errp);
|
||||
void machine_parse_smp_config(MachineState *ms,
|
||||
const SMPConfiguration *config, Error **errp);
|
||||
unsigned int machine_topo_get_cores_per_socket(const MachineState *ms);
|
||||
unsigned int machine_topo_get_threads_per_socket(const MachineState *ms);
|
||||
|
||||
/**
|
||||
* machine_class_allow_dynamic_sysbus_dev: Add type to list of valid devices
|
||||
|
|
|
@ -146,6 +146,10 @@ void pc_acpi_smi_interrupt(void *opaque, int irq, int level);
|
|||
|
||||
void pc_guest_info_init(PCMachineState *pcms);
|
||||
|
||||
#define PCI_HOST_PROP_RAM_MEM "ram-mem"
|
||||
#define PCI_HOST_PROP_PCI_MEM "pci-mem"
|
||||
#define PCI_HOST_PROP_SYSTEM_MEM "system-mem"
|
||||
#define PCI_HOST_PROP_IO_MEM "io-mem"
|
||||
#define PCI_HOST_PROP_PCI_HOLE_START "pci-hole-start"
|
||||
#define PCI_HOST_PROP_PCI_HOLE_END "pci-hole-end"
|
||||
#define PCI_HOST_PROP_PCI_HOLE64_START "pci-hole64-start"
|
||||
|
|
|
@ -15,6 +15,8 @@
|
|||
#include "hw/pci-host/pam.h"
|
||||
#include "qom/object.h"
|
||||
|
||||
#define I440FX_HOST_PROP_PCI_TYPE "pci-type"
|
||||
|
||||
#define TYPE_I440FX_PCI_HOST_BRIDGE "i440FX-pcihost"
|
||||
#define TYPE_I440FX_PCI_DEVICE "i440FX"
|
||||
|
||||
|
@ -25,9 +27,6 @@ struct PCII440FXState {
|
|||
PCIDevice parent_obj;
|
||||
/*< public >*/
|
||||
|
||||
MemoryRegion *system_memory;
|
||||
MemoryRegion *pci_address_space;
|
||||
MemoryRegion *ram_memory;
|
||||
PAMMemoryRegion pam_regions[PAM_REGIONS_COUNT];
|
||||
MemoryRegion smram_region;
|
||||
MemoryRegion smram, low_smram;
|
||||
|
@ -35,15 +34,4 @@ struct PCII440FXState {
|
|||
|
||||
#define TYPE_IGD_PASSTHROUGH_I440FX_PCI_DEVICE "igd-passthrough-i440FX"
|
||||
|
||||
PCIBus *i440fx_init(const char *pci_type,
|
||||
DeviceState *dev,
|
||||
MemoryRegion *address_space_mem,
|
||||
MemoryRegion *address_space_io,
|
||||
ram_addr_t ram_size,
|
||||
ram_addr_t below_4g_mem_size,
|
||||
ram_addr_t above_4g_mem_size,
|
||||
MemoryRegion *pci_memory,
|
||||
MemoryRegion *ram_memory);
|
||||
|
||||
|
||||
#endif
|
||||
|
|
|
@ -74,11 +74,6 @@ struct Q35PCIHost {
|
|||
* gmch part
|
||||
*/
|
||||
|
||||
#define MCH_HOST_PROP_RAM_MEM "ram-mem"
|
||||
#define MCH_HOST_PROP_PCI_MEM "pci-mem"
|
||||
#define MCH_HOST_PROP_SYSTEM_MEM "system-mem"
|
||||
#define MCH_HOST_PROP_IO_MEM "io-mem"
|
||||
|
||||
/* PCI configuration */
|
||||
#define MCH_HOST_BRIDGE "MCH"
|
||||
|
||||
|
|
|
@ -209,6 +209,8 @@ enum {
|
|||
QEMU_PCIE_CAP_CXL = (1 << QEMU_PCIE_CXL_BITNR),
|
||||
#define QEMU_PCIE_ERR_UNC_MASK_BITNR 11
|
||||
QEMU_PCIE_ERR_UNC_MASK = (1 << QEMU_PCIE_ERR_UNC_MASK_BITNR),
|
||||
#define QEMU_PCIE_ARI_NEXTFN_1_BITNR 12
|
||||
QEMU_PCIE_ARI_NEXTFN_1 = (1 << QEMU_PCIE_ARI_NEXTFN_1_BITNR),
|
||||
};
|
||||
|
||||
typedef struct PCIINTxRoute {
|
||||
|
@ -577,13 +579,11 @@ pci_set_quad_by_mask(uint8_t *config, uint64_t mask, uint64_t reg)
|
|||
pci_set_quad(config, (~mask & val) | (mask & rval));
|
||||
}
|
||||
|
||||
PCIDevice *pci_new_multifunction(int devfn, bool multifunction,
|
||||
const char *name);
|
||||
PCIDevice *pci_new_multifunction(int devfn, const char *name);
|
||||
PCIDevice *pci_new(int devfn, const char *name);
|
||||
bool pci_realize_and_unref(PCIDevice *dev, PCIBus *bus, Error **errp);
|
||||
|
||||
PCIDevice *pci_create_simple_multifunction(PCIBus *bus, int devfn,
|
||||
bool multifunction,
|
||||
const char *name);
|
||||
PCIDevice *pci_create_simple(PCIBus *bus, int devfn, const char *name);
|
||||
|
||||
|
|
|
@ -31,6 +31,8 @@
|
|||
#include "hw/sysbus.h"
|
||||
#include "qom/object.h"
|
||||
|
||||
#define PCI_HOST_BYPASS_IOMMU "bypass-iommu"
|
||||
|
||||
#define TYPE_PCI_HOST_BRIDGE "pci-host-bridge"
|
||||
OBJECT_DECLARE_TYPE(PCIHostState, PCIHostBridgeClass, PCI_HOST_BRIDGE)
|
||||
|
||||
|
|
|
@ -135,7 +135,7 @@ void pcie_sync_bridge_lnk(PCIDevice *dev);
|
|||
void pcie_acs_init(PCIDevice *dev, uint16_t offset);
|
||||
void pcie_acs_reset(PCIDevice *dev);
|
||||
|
||||
void pcie_ari_init(PCIDevice *dev, uint16_t offset, uint16_t nextfn);
|
||||
void pcie_ari_init(PCIDevice *dev, uint16_t offset);
|
||||
void pcie_dev_ser_num_init(PCIDevice *dev, uint16_t offset, uint64_t ser_num);
|
||||
void pcie_ats_init(PCIDevice *dev, uint16_t offset, bool aligned);
|
||||
|
||||
|
|
|
@ -926,6 +926,15 @@ BusState *sysbus_get_default(void);
|
|||
char *qdev_get_fw_dev_path(DeviceState *dev);
|
||||
char *qdev_get_own_fw_dev_path_from_handler(BusState *bus, DeviceState *dev);
|
||||
|
||||
/**
|
||||
* device_class_set_props(): add a set of properties to an device
|
||||
* @dc: the parent DeviceClass all devices inherit
|
||||
* @props: an array of properties, terminate by DEFINE_PROP_END_OF_LIST()
|
||||
*
|
||||
* This will add a set of properties to the object. It will fault if
|
||||
* you attempt to add an existing property defined by a parent class.
|
||||
* To modify an inherited property you need to use????
|
||||
*/
|
||||
void device_class_set_props(DeviceClass *dc, Property *props);
|
||||
|
||||
/**
|
||||
|
@ -943,9 +952,36 @@ void device_class_set_props(DeviceClass *dc, Property *props);
|
|||
void device_class_set_parent_reset(DeviceClass *dc,
|
||||
DeviceReset dev_reset,
|
||||
DeviceReset *parent_reset);
|
||||
|
||||
/**
|
||||
* device_class_set_parent_realize() - set up for chaining realize fns
|
||||
* @dc: The device class
|
||||
* @dev_realize: the device realize function
|
||||
* @parent_realize: somewhere to save the parents realize function
|
||||
*
|
||||
* This is intended to be used when the new realize function will
|
||||
* eventually call its parent realization function during creation.
|
||||
* This requires storing the function call somewhere (usually in the
|
||||
* instance structure) so you can eventually call
|
||||
* dc->parent_realize(dev, errp)
|
||||
*/
|
||||
void device_class_set_parent_realize(DeviceClass *dc,
|
||||
DeviceRealize dev_realize,
|
||||
DeviceRealize *parent_realize);
|
||||
|
||||
|
||||
/**
|
||||
* device_class_set_parent_unrealize() - set up for chaining unrealize fns
|
||||
* @dc: The device class
|
||||
* @dev_unrealize: the device realize function
|
||||
* @parent_unrealize: somewhere to save the parents unrealize function
|
||||
*
|
||||
* This is intended to be used when the new unrealize function will
|
||||
* eventually call its parent unrealization function during the
|
||||
* unrealize phase. This requires storing the function call somewhere
|
||||
* (usually in the instance structure) so you can eventually call
|
||||
* dc->parent_unrealize(dev);
|
||||
*/
|
||||
void device_class_set_parent_unrealize(DeviceClass *dc,
|
||||
DeviceUnrealize dev_unrealize,
|
||||
DeviceUnrealize *parent_unrealize);
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
* Vhost-user SCMI virtio device
|
||||
*
|
||||
* Copyright (c) 2023 Red Hat, Inc.
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
#ifndef _QEMU_VHOST_USER_SCMI_H
|
||||
#define _QEMU_VHOST_USER_SCMI_H
|
||||
|
||||
#include "hw/virtio/virtio.h"
|
||||
#include "hw/virtio/vhost.h"
|
||||
#include "hw/virtio/vhost-user.h"
|
||||
|
||||
#define TYPE_VHOST_USER_SCMI "vhost-user-scmi"
|
||||
OBJECT_DECLARE_SIMPLE_TYPE(VHostUserSCMI, VHOST_USER_SCMI);
|
||||
|
||||
struct VHostUserSCMI {
|
||||
VirtIODevice parent;
|
||||
CharBackend chardev;
|
||||
struct vhost_virtqueue *vhost_vqs;
|
||||
struct vhost_dev vhost_dev;
|
||||
VhostUserState vhost_user;
|
||||
VirtQueue *cmd_vq;
|
||||
VirtQueue *event_vq;
|
||||
bool connected;
|
||||
};
|
||||
|
||||
#endif /* _QEMU_VHOST_USER_SCMI_H */
|
|
@ -320,6 +320,7 @@ bool vhost_has_free_slot(void);
|
|||
int vhost_net_set_backend(struct vhost_dev *hdev,
|
||||
struct vhost_vring_file *file);
|
||||
|
||||
void vhost_toggle_device_iotlb(VirtIODevice *vdev);
|
||||
int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write);
|
||||
|
||||
int vhost_virtqueue_start(struct vhost_dev *dev, struct VirtIODevice *vdev,
|
||||
|
|
|
@ -242,6 +242,8 @@ void virtio_gpu_base_reset(VirtIOGPUBase *g);
|
|||
void virtio_gpu_base_fill_display_info(VirtIOGPUBase *g,
|
||||
struct virtio_gpu_resp_display_info *dpy_info);
|
||||
|
||||
void virtio_gpu_base_generate_edid(VirtIOGPUBase *g, int scanout,
|
||||
struct virtio_gpu_resp_edid *edid);
|
||||
/* virtio-gpu.c */
|
||||
void virtio_gpu_ctrl_response(VirtIOGPU *g,
|
||||
struct virtio_gpu_ctrl_command *cmd,
|
||||
|
|
|
@ -61,6 +61,8 @@ struct VirtIOIOMMU {
|
|||
QemuRecMutex mutex;
|
||||
GTree *endpoints;
|
||||
bool boot_bypass;
|
||||
Notifier machine_done;
|
||||
bool granule_frozen;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -150,10 +150,18 @@ struct VirtIODevice
|
|||
VMChangeStateEntry *vmstate;
|
||||
char *bus_name;
|
||||
uint8_t device_endian;
|
||||
/**
|
||||
* @user_guest_notifier_mask: gate usage of ->guest_notifier_mask() callback.
|
||||
* This is used to suppress the masking of guest updates for
|
||||
* vhost-user devices which are asynchronous by design.
|
||||
*/
|
||||
bool use_guest_notifier_mask;
|
||||
AddressSpace *dma_as;
|
||||
QLIST_HEAD(, VirtQueue) *vector_queues;
|
||||
QTAILQ_ENTRY(VirtIODevice) next;
|
||||
/**
|
||||
* @config_notifier: the event notifier that handles config events
|
||||
*/
|
||||
EventNotifier config_notifier;
|
||||
bool device_iotlb_enabled;
|
||||
};
|
||||
|
@ -219,6 +227,12 @@ struct VirtioDeviceClass {
|
|||
void virtio_instance_init_common(Object *proxy_obj, void *data,
|
||||
size_t vdev_size, const char *vdev_name);
|
||||
|
||||
/**
|
||||
* virtio_init() - initialise the common VirtIODevice structure
|
||||
* @vdev: pointer to VirtIODevice
|
||||
* @device_id: the VirtIO device ID (see virtio_ids.h)
|
||||
* @config_size: size of the config space
|
||||
*/
|
||||
void virtio_init(VirtIODevice *vdev, uint16_t device_id, size_t config_size);
|
||||
|
||||
void virtio_cleanup(VirtIODevice *vdev);
|
||||
|
@ -276,6 +290,13 @@ extern const VMStateInfo virtio_vmstate_info;
|
|||
|
||||
int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id);
|
||||
|
||||
/**
|
||||
* virtio_notify_config() - signal a change to device config
|
||||
* @vdev: the virtio device
|
||||
*
|
||||
* Assuming the virtio device is up (VIRTIO_CONFIG_S_DRIVER_OK) this
|
||||
* will trigger a guest interrupt and update the config version.
|
||||
*/
|
||||
void virtio_notify_config(VirtIODevice *vdev);
|
||||
|
||||
bool virtio_queue_get_notification(VirtQueue *vq);
|
||||
|
|
451
net/vhost-vdpa.c
451
net/vhost-vdpa.c
|
@ -110,6 +110,8 @@ static const uint64_t vdpa_svq_device_features =
|
|||
BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) |
|
||||
BIT_ULL(VIRTIO_NET_F_STATUS) |
|
||||
BIT_ULL(VIRTIO_NET_F_CTRL_VQ) |
|
||||
BIT_ULL(VIRTIO_NET_F_CTRL_RX) |
|
||||
BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA) |
|
||||
BIT_ULL(VIRTIO_NET_F_MQ) |
|
||||
BIT_ULL(VIRTIO_F_ANY_LAYOUT) |
|
||||
BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
|
||||
|
@ -626,34 +628,96 @@ static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
|
|||
}
|
||||
|
||||
static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
|
||||
uint8_t cmd, const void *data,
|
||||
size_t data_size)
|
||||
uint8_t cmd, const struct iovec *data_sg,
|
||||
size_t data_num)
|
||||
{
|
||||
const struct virtio_net_ctrl_hdr ctrl = {
|
||||
.class = class,
|
||||
.cmd = cmd,
|
||||
};
|
||||
size_t data_size = iov_size(data_sg, data_num);
|
||||
|
||||
assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
|
||||
|
||||
/* pack the CVQ command header */
|
||||
memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl));
|
||||
memcpy(s->cvq_cmd_out_buffer + sizeof(ctrl), data, data_size);
|
||||
|
||||
return vhost_vdpa_net_cvq_add(s, sizeof(ctrl) + data_size,
|
||||
/* pack the CVQ command command-specific-data */
|
||||
iov_to_buf(data_sg, data_num, 0,
|
||||
s->cvq_cmd_out_buffer + sizeof(ctrl), data_size);
|
||||
|
||||
return vhost_vdpa_net_cvq_add(s, data_size + sizeof(ctrl),
|
||||
sizeof(virtio_net_ctrl_ack));
|
||||
}
|
||||
|
||||
static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
|
||||
{
|
||||
if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
|
||||
const struct iovec data = {
|
||||
.iov_base = (void *)n->mac,
|
||||
.iov_len = sizeof(n->mac),
|
||||
};
|
||||
ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MAC,
|
||||
VIRTIO_NET_CTRL_MAC_ADDR_SET,
|
||||
n->mac, sizeof(n->mac));
|
||||
&data, 1);
|
||||
if (unlikely(dev_written < 0)) {
|
||||
return dev_written;
|
||||
}
|
||||
if (*s->status != VIRTIO_NET_OK) {
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
return *s->status != VIRTIO_NET_OK;
|
||||
/*
|
||||
* According to VirtIO standard, "The device MUST have an
|
||||
* empty MAC filtering table on reset.".
|
||||
*
|
||||
* Therefore, there is no need to send this CVQ command if the
|
||||
* driver also sets an empty MAC filter table, which aligns with
|
||||
* the device's defaults.
|
||||
*
|
||||
* Note that the device's defaults can mismatch the driver's
|
||||
* configuration only at live migration.
|
||||
*/
|
||||
if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX) ||
|
||||
n->mac_table.in_use == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t uni_entries = n->mac_table.first_multi,
|
||||
uni_macs_size = uni_entries * ETH_ALEN,
|
||||
mul_entries = n->mac_table.in_use - uni_entries,
|
||||
mul_macs_size = mul_entries * ETH_ALEN;
|
||||
struct virtio_net_ctrl_mac uni = {
|
||||
.entries = cpu_to_le32(uni_entries),
|
||||
};
|
||||
struct virtio_net_ctrl_mac mul = {
|
||||
.entries = cpu_to_le32(mul_entries),
|
||||
};
|
||||
const struct iovec data[] = {
|
||||
{
|
||||
.iov_base = &uni,
|
||||
.iov_len = sizeof(uni),
|
||||
}, {
|
||||
.iov_base = n->mac_table.macs,
|
||||
.iov_len = uni_macs_size,
|
||||
}, {
|
||||
.iov_base = &mul,
|
||||
.iov_len = sizeof(mul),
|
||||
}, {
|
||||
.iov_base = &n->mac_table.macs[uni_macs_size],
|
||||
.iov_len = mul_macs_size,
|
||||
},
|
||||
};
|
||||
ssize_t dev_written = vhost_vdpa_net_load_cmd(s,
|
||||
VIRTIO_NET_CTRL_MAC,
|
||||
VIRTIO_NET_CTRL_MAC_TABLE_SET,
|
||||
data, ARRAY_SIZE(data));
|
||||
if (unlikely(dev_written < 0)) {
|
||||
return dev_written;
|
||||
}
|
||||
if (*s->status != VIRTIO_NET_OK) {
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -670,14 +734,21 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
|
|||
}
|
||||
|
||||
mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs);
|
||||
const struct iovec data = {
|
||||
.iov_base = &mq,
|
||||
.iov_len = sizeof(mq),
|
||||
};
|
||||
dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MQ,
|
||||
VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &mq,
|
||||
sizeof(mq));
|
||||
VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
|
||||
&data, 1);
|
||||
if (unlikely(dev_written < 0)) {
|
||||
return dev_written;
|
||||
}
|
||||
if (*s->status != VIRTIO_NET_OK) {
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return *s->status != VIRTIO_NET_OK;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
|
||||
|
@ -708,14 +779,190 @@ static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
|
|||
}
|
||||
|
||||
offloads = cpu_to_le64(n->curr_guest_offloads);
|
||||
const struct iovec data = {
|
||||
.iov_base = &offloads,
|
||||
.iov_len = sizeof(offloads),
|
||||
};
|
||||
dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
|
||||
VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
|
||||
&offloads, sizeof(offloads));
|
||||
&data, 1);
|
||||
if (unlikely(dev_written < 0)) {
|
||||
return dev_written;
|
||||
}
|
||||
if (*s->status != VIRTIO_NET_OK) {
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return *s->status != VIRTIO_NET_OK;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s,
|
||||
uint8_t cmd,
|
||||
uint8_t on)
|
||||
{
|
||||
const struct iovec data = {
|
||||
.iov_base = &on,
|
||||
.iov_len = sizeof(on),
|
||||
};
|
||||
return vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_RX,
|
||||
cmd, &data, 1);
|
||||
}
|
||||
|
||||
static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
|
||||
const VirtIONet *n)
|
||||
{
|
||||
ssize_t dev_written;
|
||||
|
||||
if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* According to virtio_net_reset(), device turns promiscuous mode
|
||||
* on by default.
|
||||
*
|
||||
* Addtionally, according to VirtIO standard, "Since there are
|
||||
* no guarantees, it can use a hash filter or silently switch to
|
||||
* allmulti or promiscuous mode if it is given too many addresses.".
|
||||
* QEMU marks `n->mac_table.uni_overflow` if guest sets too many
|
||||
* non-multicast MAC addresses, indicating that promiscuous mode
|
||||
* should be enabled.
|
||||
*
|
||||
* Therefore, QEMU should only send this CVQ command if the
|
||||
* `n->mac_table.uni_overflow` is not marked and `n->promisc` is off,
|
||||
* which sets promiscuous mode on, different from the device's defaults.
|
||||
*
|
||||
* Note that the device's defaults can mismatch the driver's
|
||||
* configuration only at live migration.
|
||||
*/
|
||||
if (!n->mac_table.uni_overflow && !n->promisc) {
|
||||
dev_written = vhost_vdpa_net_load_rx_mode(s,
|
||||
VIRTIO_NET_CTRL_RX_PROMISC, 0);
|
||||
if (unlikely(dev_written < 0)) {
|
||||
return dev_written;
|
||||
}
|
||||
if (*s->status != VIRTIO_NET_OK) {
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* According to virtio_net_reset(), device turns all-multicast mode
|
||||
* off by default.
|
||||
*
|
||||
* According to VirtIO standard, "Since there are no guarantees,
|
||||
* it can use a hash filter or silently switch to allmulti or
|
||||
* promiscuous mode if it is given too many addresses.". QEMU marks
|
||||
* `n->mac_table.multi_overflow` if guest sets too many
|
||||
* non-multicast MAC addresses.
|
||||
*
|
||||
* Therefore, QEMU should only send this CVQ command if the
|
||||
* `n->mac_table.multi_overflow` is marked or `n->allmulti` is on,
|
||||
* which sets all-multicast mode on, different from the device's defaults.
|
||||
*
|
||||
* Note that the device's defaults can mismatch the driver's
|
||||
* configuration only at live migration.
|
||||
*/
|
||||
if (n->mac_table.multi_overflow || n->allmulti) {
|
||||
dev_written = vhost_vdpa_net_load_rx_mode(s,
|
||||
VIRTIO_NET_CTRL_RX_ALLMULTI, 1);
|
||||
if (unlikely(dev_written < 0)) {
|
||||
return dev_written;
|
||||
}
|
||||
if (*s->status != VIRTIO_NET_OK) {
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX_EXTRA)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* According to virtio_net_reset(), device turns all-unicast mode
|
||||
* off by default.
|
||||
*
|
||||
* Therefore, QEMU should only send this CVQ command if the driver
|
||||
* sets all-unicast mode on, different from the device's defaults.
|
||||
*
|
||||
* Note that the device's defaults can mismatch the driver's
|
||||
* configuration only at live migration.
|
||||
*/
|
||||
if (n->alluni) {
|
||||
dev_written = vhost_vdpa_net_load_rx_mode(s,
|
||||
VIRTIO_NET_CTRL_RX_ALLUNI, 1);
|
||||
if (dev_written < 0) {
|
||||
return dev_written;
|
||||
}
|
||||
if (*s->status != VIRTIO_NET_OK) {
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* According to virtio_net_reset(), device turns non-multicast mode
|
||||
* off by default.
|
||||
*
|
||||
* Therefore, QEMU should only send this CVQ command if the driver
|
||||
* sets non-multicast mode on, different from the device's defaults.
|
||||
*
|
||||
* Note that the device's defaults can mismatch the driver's
|
||||
* configuration only at live migration.
|
||||
*/
|
||||
if (n->nomulti) {
|
||||
dev_written = vhost_vdpa_net_load_rx_mode(s,
|
||||
VIRTIO_NET_CTRL_RX_NOMULTI, 1);
|
||||
if (dev_written < 0) {
|
||||
return dev_written;
|
||||
}
|
||||
if (*s->status != VIRTIO_NET_OK) {
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* According to virtio_net_reset(), device turns non-unicast mode
|
||||
* off by default.
|
||||
*
|
||||
* Therefore, QEMU should only send this CVQ command if the driver
|
||||
* sets non-unicast mode on, different from the device's defaults.
|
||||
*
|
||||
* Note that the device's defaults can mismatch the driver's
|
||||
* configuration only at live migration.
|
||||
*/
|
||||
if (n->nouni) {
|
||||
dev_written = vhost_vdpa_net_load_rx_mode(s,
|
||||
VIRTIO_NET_CTRL_RX_NOUNI, 1);
|
||||
if (dev_written < 0) {
|
||||
return dev_written;
|
||||
}
|
||||
if (*s->status != VIRTIO_NET_OK) {
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* According to virtio_net_reset(), device turns non-broadcast mode
|
||||
* off by default.
|
||||
*
|
||||
* Therefore, QEMU should only send this CVQ command if the driver
|
||||
* sets non-broadcast mode on, different from the device's defaults.
|
||||
*
|
||||
* Note that the device's defaults can mismatch the driver's
|
||||
* configuration only at live migration.
|
||||
*/
|
||||
if (n->nobcast) {
|
||||
dev_written = vhost_vdpa_net_load_rx_mode(s,
|
||||
VIRTIO_NET_CTRL_RX_NOBCAST, 1);
|
||||
if (dev_written < 0) {
|
||||
return dev_written;
|
||||
}
|
||||
if (*s->status != VIRTIO_NET_OK) {
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vhost_vdpa_net_load(NetClientState *nc)
|
||||
|
@ -744,6 +991,10 @@ static int vhost_vdpa_net_load(NetClientState *nc)
|
|||
if (unlikely(r)) {
|
||||
return r;
|
||||
}
|
||||
r = vhost_vdpa_net_load_rx(s, n);
|
||||
if (unlikely(r)) {
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -761,6 +1012,148 @@ static NetClientInfo net_vhost_vdpa_cvq_info = {
|
|||
.check_peer_type = vhost_vdpa_check_peer_type,
|
||||
};
|
||||
|
||||
/*
|
||||
* Forward the excessive VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command to
|
||||
* vdpa device.
|
||||
*
|
||||
* Considering that QEMU cannot send the entire filter table to the
|
||||
* vdpa device, it should send the VIRTIO_NET_CTRL_RX_PROMISC CVQ
|
||||
* command to enable promiscuous mode to receive all packets,
|
||||
* according to VirtIO standard, "Since there are no guarantees,
|
||||
* it can use a hash filter or silently switch to allmulti or
|
||||
* promiscuous mode if it is given too many addresses.".
|
||||
*
|
||||
* Since QEMU ignores MAC addresses beyond `MAC_TABLE_ENTRIES` and
|
||||
* marks `n->mac_table.x_overflow` accordingly, it should have
|
||||
* the same effect on the device model to receive
|
||||
* (`MAC_TABLE_ENTRIES` + 1) or more non-multicast MAC addresses.
|
||||
* The same applies to multicast MAC addresses.
|
||||
*
|
||||
* Therefore, QEMU can provide the device model with a fake
|
||||
* VIRTIO_NET_CTRL_MAC_TABLE_SET command with (`MAC_TABLE_ENTRIES` + 1)
|
||||
* non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) multicast
|
||||
* MAC addresses. This ensures that the device model marks
|
||||
* `n->mac_table.uni_overflow` and `n->mac_table.multi_overflow`,
|
||||
* allowing all packets to be received, which aligns with the
|
||||
* state of the vdpa device.
|
||||
*/
|
||||
static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s,
|
||||
VirtQueueElement *elem,
|
||||
struct iovec *out)
|
||||
{
|
||||
struct virtio_net_ctrl_mac mac_data, *mac_ptr;
|
||||
struct virtio_net_ctrl_hdr *hdr_ptr;
|
||||
uint32_t cursor;
|
||||
ssize_t r;
|
||||
|
||||
/* parse the non-multicast MAC address entries from CVQ command */
|
||||
cursor = sizeof(*hdr_ptr);
|
||||
r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
|
||||
&mac_data, sizeof(mac_data));
|
||||
if (unlikely(r != sizeof(mac_data))) {
|
||||
/*
|
||||
* If the CVQ command is invalid, we should simulate the vdpa device
|
||||
* to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
|
||||
*/
|
||||
*s->status = VIRTIO_NET_ERR;
|
||||
return sizeof(*s->status);
|
||||
}
|
||||
cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
|
||||
|
||||
/* parse the multicast MAC address entries from CVQ command */
|
||||
r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
|
||||
&mac_data, sizeof(mac_data));
|
||||
if (r != sizeof(mac_data)) {
|
||||
/*
|
||||
* If the CVQ command is invalid, we should simulate the vdpa device
|
||||
* to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
|
||||
*/
|
||||
*s->status = VIRTIO_NET_ERR;
|
||||
return sizeof(*s->status);
|
||||
}
|
||||
cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
|
||||
|
||||
/* validate the CVQ command */
|
||||
if (iov_size(elem->out_sg, elem->out_num) != cursor) {
|
||||
/*
|
||||
* If the CVQ command is invalid, we should simulate the vdpa device
|
||||
* to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
|
||||
*/
|
||||
*s->status = VIRTIO_NET_ERR;
|
||||
return sizeof(*s->status);
|
||||
}
|
||||
|
||||
/*
|
||||
* According to VirtIO standard, "Since there are no guarantees,
|
||||
* it can use a hash filter or silently switch to allmulti or
|
||||
* promiscuous mode if it is given too many addresses.".
|
||||
*
|
||||
* Therefore, considering that QEMU is unable to send the entire
|
||||
* filter table to the vdpa device, it should send the
|
||||
* VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode
|
||||
*/
|
||||
r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_PROMISC, 1);
|
||||
if (unlikely(r < 0)) {
|
||||
return r;
|
||||
}
|
||||
if (*s->status != VIRTIO_NET_OK) {
|
||||
return sizeof(*s->status);
|
||||
}
|
||||
|
||||
/*
|
||||
* QEMU should also send a fake VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ
|
||||
* command to the device model, including (`MAC_TABLE_ENTRIES` + 1)
|
||||
* non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1)
|
||||
* multicast MAC addresses.
|
||||
*
|
||||
* By doing so, the device model can mark `n->mac_table.uni_overflow`
|
||||
* and `n->mac_table.multi_overflow`, enabling all packets to be
|
||||
* received, which aligns with the state of the vdpa device.
|
||||
*/
|
||||
cursor = 0;
|
||||
uint32_t fake_uni_entries = MAC_TABLE_ENTRIES + 1,
|
||||
fake_mul_entries = MAC_TABLE_ENTRIES + 1,
|
||||
fake_cvq_size = sizeof(struct virtio_net_ctrl_hdr) +
|
||||
sizeof(mac_data) + fake_uni_entries * ETH_ALEN +
|
||||
sizeof(mac_data) + fake_mul_entries * ETH_ALEN;
|
||||
|
||||
assert(fake_cvq_size < vhost_vdpa_net_cvq_cmd_page_len());
|
||||
out->iov_len = fake_cvq_size;
|
||||
|
||||
/* pack the header for fake CVQ command */
|
||||
hdr_ptr = out->iov_base + cursor;
|
||||
hdr_ptr->class = VIRTIO_NET_CTRL_MAC;
|
||||
hdr_ptr->cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
|
||||
cursor += sizeof(*hdr_ptr);
|
||||
|
||||
/*
|
||||
* Pack the non-multicast MAC addresses part for fake CVQ command.
|
||||
*
|
||||
* According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
|
||||
* addresses provieded in CVQ command. Therefore, only the entries
|
||||
* field need to be prepared in the CVQ command.
|
||||
*/
|
||||
mac_ptr = out->iov_base + cursor;
|
||||
mac_ptr->entries = cpu_to_le32(fake_uni_entries);
|
||||
cursor += sizeof(*mac_ptr) + fake_uni_entries * ETH_ALEN;
|
||||
|
||||
/*
|
||||
* Pack the multicast MAC addresses part for fake CVQ command.
|
||||
*
|
||||
* According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
|
||||
* addresses provieded in CVQ command. Therefore, only the entries
|
||||
* field need to be prepared in the CVQ command.
|
||||
*/
|
||||
mac_ptr = out->iov_base + cursor;
|
||||
mac_ptr->entries = cpu_to_le32(fake_mul_entries);
|
||||
|
||||
/*
|
||||
* Simulating QEMU poll a vdpa device used buffer
|
||||
* for VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
|
||||
*/
|
||||
return sizeof(*s->status);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate and copy control virtqueue commands.
|
||||
*
|
||||
|
@ -773,6 +1166,7 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
|
|||
{
|
||||
VhostVDPAState *s = opaque;
|
||||
size_t in_len;
|
||||
const struct virtio_net_ctrl_hdr *ctrl;
|
||||
virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
|
||||
/* Out buffer sent to both the vdpa device and the device model */
|
||||
struct iovec out = {
|
||||
|
@ -787,14 +1181,34 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
|
|||
|
||||
out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
|
||||
s->cvq_cmd_out_buffer,
|
||||
vhost_vdpa_net_cvq_cmd_len());
|
||||
if (*(uint8_t *)s->cvq_cmd_out_buffer == VIRTIO_NET_CTRL_ANNOUNCE) {
|
||||
vhost_vdpa_net_cvq_cmd_page_len());
|
||||
|
||||
ctrl = s->cvq_cmd_out_buffer;
|
||||
if (ctrl->class == VIRTIO_NET_CTRL_ANNOUNCE) {
|
||||
/*
|
||||
* Guest announce capability is emulated by qemu, so don't forward to
|
||||
* the device.
|
||||
*/
|
||||
dev_written = sizeof(status);
|
||||
*s->status = VIRTIO_NET_OK;
|
||||
} else if (unlikely(ctrl->class == VIRTIO_NET_CTRL_MAC &&
|
||||
ctrl->cmd == VIRTIO_NET_CTRL_MAC_TABLE_SET &&
|
||||
iov_size(elem->out_sg, elem->out_num) > out.iov_len)) {
|
||||
/*
|
||||
* Due to the size limitation of the out buffer sent to the vdpa device,
|
||||
* which is determined by vhost_vdpa_net_cvq_cmd_page_len(), excessive
|
||||
* MAC addresses set by the driver for the filter table can cause
|
||||
* truncation of the CVQ command in QEMU. As a result, the vdpa device
|
||||
* rejects the flawed CVQ command.
|
||||
*
|
||||
* Therefore, QEMU must handle this situation instead of sending
|
||||
* the CVQ command direclty.
|
||||
*/
|
||||
dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem,
|
||||
&out);
|
||||
if (unlikely(dev_written < 0)) {
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
|
||||
if (unlikely(dev_written < 0)) {
|
||||
|
@ -824,7 +1238,16 @@ out:
|
|||
error_report("Bad device CVQ written length");
|
||||
}
|
||||
vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status)));
|
||||
g_free(elem);
|
||||
/*
|
||||
* `elem` belongs to vhost_vdpa_net_handle_ctrl_avail() only when
|
||||
* the function successfully forwards the CVQ command, indicated
|
||||
* by a non-negative value of `dev_written`. Otherwise, it still
|
||||
* belongs to SVQ.
|
||||
* This function should only free the `elem` when it owns.
|
||||
*/
|
||||
if (dev_written >= 0) {
|
||||
g_free(elem);
|
||||
}
|
||||
return dev_written < 0 ? dev_written : 0;
|
||||
}
|
||||
|
||||
|
|
Binary file not shown.
|
@ -1020,9 +1020,9 @@ static void test_acpi_q35_tcg_no_acpi_hotplug(void)
|
|||
" -device pci-testdev,bus=nohprp,acpi-index=501"
|
||||
" -device pcie-root-port,id=nohprpint,port=0x0,chassis=3,hotplug=off,"
|
||||
"multifunction=on,addr=8.0"
|
||||
" -device pci-testdev,bus=nohprpint,acpi-index=601,addr=8.1"
|
||||
" -device pci-testdev,bus=nohprpint,acpi-index=601,addr=0.1"
|
||||
" -device pcie-root-port,id=hprp2,port=0x0,chassis=4,bus=nohprpint,"
|
||||
"addr=9.0"
|
||||
"addr=0.2"
|
||||
" -device pci-testdev,bus=hprp2,acpi-index=602"
|
||||
, &data);
|
||||
free_test_data(&data);
|
||||
|
|
|
@ -784,14 +784,12 @@ static void test_override_scsi(void)
|
|||
test_override(args, "pc", expected);
|
||||
}
|
||||
|
||||
static void setup_pci_bridge(TestArgs *args, const char *id, const char *rootid)
|
||||
static void setup_pci_bridge(TestArgs *args, const char *id)
|
||||
{
|
||||
|
||||
char *root, *br;
|
||||
root = g_strdup_printf("-device pcie-root-port,id=%s", rootid);
|
||||
br = g_strdup_printf("-device pcie-pci-bridge,bus=%s,id=%s", rootid, id);
|
||||
char *br;
|
||||
br = g_strdup_printf("-device pcie-pci-bridge,bus=pcie.0,id=%s", id);
|
||||
|
||||
args->argc = append_arg(args->argc, args->argv, ARGV_SIZE, root);
|
||||
args->argc = append_arg(args->argc, args->argv, ARGV_SIZE, br);
|
||||
}
|
||||
|
||||
|
@ -811,8 +809,8 @@ static void test_override_scsi_q35(void)
|
|||
add_drive_with_mbr(args, empty_mbr, 1);
|
||||
add_drive_with_mbr(args, empty_mbr, 1);
|
||||
add_drive_with_mbr(args, empty_mbr, 1);
|
||||
setup_pci_bridge(args, "pcie.0", "br");
|
||||
add_scsi_controller(args, "lsi53c895a", "br", 3);
|
||||
setup_pci_bridge(args, "pcie-pci-br");
|
||||
add_scsi_controller(args, "lsi53c895a", "pcie-pci-br", 3);
|
||||
add_scsi_disk(args, 0, 0, 0, 0, 0, 10000, 120, 30);
|
||||
add_scsi_disk(args, 1, 0, 0, 1, 0, 9000, 120, 30);
|
||||
add_scsi_disk(args, 2, 0, 0, 2, 0, 1, 0, 0);
|
||||
|
@ -868,9 +866,9 @@ static void test_override_virtio_blk_q35(void)
|
|||
};
|
||||
add_drive_with_mbr(args, empty_mbr, 1);
|
||||
add_drive_with_mbr(args, empty_mbr, 1);
|
||||
setup_pci_bridge(args, "pcie.0", "br");
|
||||
add_virtio_disk(args, 0, "br", 3, 10000, 120, 30);
|
||||
add_virtio_disk(args, 1, "br", 4, 9000, 120, 30);
|
||||
setup_pci_bridge(args, "pcie-pci-br");
|
||||
add_virtio_disk(args, 0, "pcie-pci-br", 3, 10000, 120, 30);
|
||||
add_virtio_disk(args, 1, "pcie-pci-br", 4, 9000, 120, 30);
|
||||
test_override(args, "q35", expected);
|
||||
}
|
||||
|
||||
|
|
|
@ -46,6 +46,7 @@ libqos_srcs = files(
|
|||
'virtio-serial.c',
|
||||
'virtio-iommu.c',
|
||||
'virtio-gpio.c',
|
||||
'virtio-scmi.c',
|
||||
'generic-pcihost.c',
|
||||
|
||||
# qgraph machines:
|
||||
|
|
|
@ -0,0 +1,174 @@
|
|||
/*
|
||||
* virtio-scmi nodes for testing
|
||||
*
|
||||
* SPDX-FileCopyrightText: Linaro Ltd
|
||||
* SPDX-FileCopyrightText: Red Hat, Inc.
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*
|
||||
* Based on virtio-gpio.c, doing basically the same thing.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "standard-headers/linux/virtio_config.h"
|
||||
#include "../libqtest.h"
|
||||
#include "qemu/module.h"
|
||||
#include "qgraph.h"
|
||||
#include "virtio-scmi.h"
|
||||
|
||||
static QGuestAllocator *alloc;
|
||||
|
||||
static void virtio_scmi_cleanup(QVhostUserSCMI *scmi)
|
||||
{
|
||||
QVirtioDevice *vdev = scmi->vdev;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
qvirtqueue_cleanup(vdev->bus, scmi->queues[i], alloc);
|
||||
}
|
||||
g_free(scmi->queues);
|
||||
}
|
||||
|
||||
/*
|
||||
* This handles the VirtIO setup from the point of view of the driver
|
||||
* frontend and therefore doesn't present any vhost specific features
|
||||
* and in fact masks of the re-used bit.
|
||||
*/
|
||||
static void virtio_scmi_setup(QVhostUserSCMI *scmi)
|
||||
{
|
||||
QVirtioDevice *vdev = scmi->vdev;
|
||||
uint64_t features;
|
||||
int i;
|
||||
|
||||
features = qvirtio_get_features(vdev);
|
||||
features &= ~QVIRTIO_F_BAD_FEATURE;
|
||||
qvirtio_set_features(vdev, features);
|
||||
|
||||
scmi->queues = g_new(QVirtQueue *, 2);
|
||||
for (i = 0; i < 2; i++) {
|
||||
scmi->queues[i] = qvirtqueue_setup(vdev, alloc, i);
|
||||
}
|
||||
qvirtio_set_driver_ok(vdev);
|
||||
}
|
||||
|
||||
static void *qvirtio_scmi_get_driver(QVhostUserSCMI *v_scmi,
|
||||
const char *interface)
|
||||
{
|
||||
if (!g_strcmp0(interface, "vhost-user-scmi")) {
|
||||
return v_scmi;
|
||||
}
|
||||
if (!g_strcmp0(interface, "virtio")) {
|
||||
return v_scmi->vdev;
|
||||
}
|
||||
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
static void *qvirtio_scmi_device_get_driver(void *object,
|
||||
const char *interface)
|
||||
{
|
||||
QVhostUserSCMIDevice *v_scmi = object;
|
||||
return qvirtio_scmi_get_driver(&v_scmi->scmi, interface);
|
||||
}
|
||||
|
||||
/* virtio-scmi (mmio) */
|
||||
static void qvirtio_scmi_device_destructor(QOSGraphObject *obj)
|
||||
{
|
||||
QVhostUserSCMIDevice *scmi_dev = (QVhostUserSCMIDevice *) obj;
|
||||
virtio_scmi_cleanup(&scmi_dev->scmi);
|
||||
}
|
||||
|
||||
static void qvirtio_scmi_device_start_hw(QOSGraphObject *obj)
|
||||
{
|
||||
QVhostUserSCMIDevice *scmi_dev = (QVhostUserSCMIDevice *) obj;
|
||||
virtio_scmi_setup(&scmi_dev->scmi);
|
||||
}
|
||||
|
||||
static void *virtio_scmi_device_create(void *virtio_dev,
|
||||
QGuestAllocator *t_alloc,
|
||||
void *addr)
|
||||
{
|
||||
QVhostUserSCMIDevice *virtio_device = g_new0(QVhostUserSCMIDevice, 1);
|
||||
QVhostUserSCMI *interface = &virtio_device->scmi;
|
||||
|
||||
interface->vdev = virtio_dev;
|
||||
alloc = t_alloc;
|
||||
|
||||
virtio_device->obj.get_driver = qvirtio_scmi_device_get_driver;
|
||||
virtio_device->obj.start_hw = qvirtio_scmi_device_start_hw;
|
||||
virtio_device->obj.destructor = qvirtio_scmi_device_destructor;
|
||||
|
||||
return &virtio_device->obj;
|
||||
}
|
||||
|
||||
/* virtio-scmi-pci */
|
||||
static void qvirtio_scmi_pci_destructor(QOSGraphObject *obj)
|
||||
{
|
||||
QVhostUserSCMIPCI *scmi_pci = (QVhostUserSCMIPCI *) obj;
|
||||
QOSGraphObject *pci_vobj = &scmi_pci->pci_vdev.obj;
|
||||
|
||||
virtio_scmi_cleanup(&scmi_pci->scmi);
|
||||
qvirtio_pci_destructor(pci_vobj);
|
||||
}
|
||||
|
||||
static void qvirtio_scmi_pci_start_hw(QOSGraphObject *obj)
|
||||
{
|
||||
QVhostUserSCMIPCI *scmi_pci = (QVhostUserSCMIPCI *) obj;
|
||||
QOSGraphObject *pci_vobj = &scmi_pci->pci_vdev.obj;
|
||||
|
||||
qvirtio_pci_start_hw(pci_vobj);
|
||||
virtio_scmi_setup(&scmi_pci->scmi);
|
||||
}
|
||||
|
||||
static void *qvirtio_scmi_pci_get_driver(void *object, const char *interface)
|
||||
{
|
||||
QVhostUserSCMIPCI *v_scmi = object;
|
||||
|
||||
if (!g_strcmp0(interface, "pci-device")) {
|
||||
return v_scmi->pci_vdev.pdev;
|
||||
}
|
||||
return qvirtio_scmi_get_driver(&v_scmi->scmi, interface);
|
||||
}
|
||||
|
||||
static void *virtio_scmi_pci_create(void *pci_bus, QGuestAllocator *t_alloc,
|
||||
void *addr)
|
||||
{
|
||||
QVhostUserSCMIPCI *virtio_spci = g_new0(QVhostUserSCMIPCI, 1);
|
||||
QVhostUserSCMI *interface = &virtio_spci->scmi;
|
||||
QOSGraphObject *obj = &virtio_spci->pci_vdev.obj;
|
||||
|
||||
virtio_pci_init(&virtio_spci->pci_vdev, pci_bus, addr);
|
||||
interface->vdev = &virtio_spci->pci_vdev.vdev;
|
||||
alloc = t_alloc;
|
||||
|
||||
obj->get_driver = qvirtio_scmi_pci_get_driver;
|
||||
obj->start_hw = qvirtio_scmi_pci_start_hw;
|
||||
obj->destructor = qvirtio_scmi_pci_destructor;
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
static void virtio_scmi_register_nodes(void)
|
||||
{
|
||||
QPCIAddress addr = {
|
||||
.devfn = QPCI_DEVFN(4, 0),
|
||||
};
|
||||
|
||||
QOSGraphEdgeOptions edge_opts = { };
|
||||
|
||||
/* vhost-user-scmi-device */
|
||||
edge_opts.extra_device_opts = "id=scmi,chardev=chr-vhost-user-test "
|
||||
"-global virtio-mmio.force-legacy=false";
|
||||
qos_node_create_driver("vhost-user-scmi-device",
|
||||
virtio_scmi_device_create);
|
||||
qos_node_consumes("vhost-user-scmi-device", "virtio-bus", &edge_opts);
|
||||
qos_node_produces("vhost-user-scmi-device", "vhost-user-scmi");
|
||||
|
||||
/* virtio-scmi-pci */
|
||||
edge_opts.extra_device_opts = "id=scmi,addr=04.0,chardev=chr-vhost-user-test";
|
||||
add_qpci_address(&edge_opts, &addr);
|
||||
qos_node_create_driver("vhost-user-scmi-pci", virtio_scmi_pci_create);
|
||||
qos_node_consumes("vhost-user-scmi-pci", "pci-bus", &edge_opts);
|
||||
qos_node_produces("vhost-user-scmi-pci", "vhost-user-scmi");
|
||||
}
|
||||
|
||||
libqos_init(virtio_scmi_register_nodes);
|
|
@ -0,0 +1,34 @@
|
|||
/*
|
||||
* virtio-scmi structures
|
||||
*
|
||||
* SPDX-FileCopyrightText: Red Hat, Inc.
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
#ifndef TESTS_LIBQOS_VIRTIO_SCMI_H
|
||||
#define TESTS_LIBQOS_VIRTIO_SCMI_H
|
||||
|
||||
#include "qgraph.h"
|
||||
#include "virtio.h"
|
||||
#include "virtio-pci.h"
|
||||
|
||||
typedef struct QVhostUserSCMI QVhostUserSCMI;
|
||||
typedef struct QVhostUserSCMIPCI QVhostUserSCMIPCI;
|
||||
typedef struct QVhostUserSCMIDevice QVhostUserSCMIDevice;
|
||||
|
||||
struct QVhostUserSCMI {
|
||||
QVirtioDevice *vdev;
|
||||
QVirtQueue **queues;
|
||||
};
|
||||
|
||||
struct QVhostUserSCMIPCI {
|
||||
QVirtioPCIDevice pci_vdev;
|
||||
QVhostUserSCMI scmi;
|
||||
};
|
||||
|
||||
struct QVhostUserSCMIDevice {
|
||||
QOSGraphObject obj;
|
||||
QVhostUserSCMI scmi;
|
||||
};
|
||||
|
||||
#endif
|
|
@ -33,6 +33,7 @@
|
|||
#include "standard-headers/linux/virtio_ids.h"
|
||||
#include "standard-headers/linux/virtio_net.h"
|
||||
#include "standard-headers/linux/virtio_gpio.h"
|
||||
#include "standard-headers/linux/virtio_scmi.h"
|
||||
|
||||
#ifdef CONFIG_LINUX
|
||||
#include <sys/vfs.h>
|
||||
|
@ -145,6 +146,7 @@ enum {
|
|||
enum {
|
||||
VHOST_USER_NET,
|
||||
VHOST_USER_GPIO,
|
||||
VHOST_USER_SCMI,
|
||||
};
|
||||
|
||||
typedef struct TestServer {
|
||||
|
@ -1157,3 +1159,45 @@ static void register_vhost_gpio_test(void)
|
|||
"vhost-user-gpio", test_read_guest_mem, &opts);
|
||||
}
|
||||
libqos_init(register_vhost_gpio_test);
|
||||
|
||||
static uint64_t vu_scmi_get_features(TestServer *s)
|
||||
{
|
||||
return 0x1ULL << VIRTIO_F_VERSION_1 |
|
||||
0x1ULL << VIRTIO_SCMI_F_P2A_CHANNELS |
|
||||
0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
|
||||
}
|
||||
|
||||
static void vu_scmi_get_protocol_features(TestServer *s, CharBackend *chr,
|
||||
VhostUserMsg *msg)
|
||||
{
|
||||
msg->flags |= VHOST_USER_REPLY_MASK;
|
||||
msg->size = sizeof(m.payload.u64);
|
||||
msg->payload.u64 = 1ULL << VHOST_USER_PROTOCOL_F_MQ;
|
||||
|
||||
qemu_chr_fe_write_all(chr, (uint8_t *)msg, VHOST_USER_HDR_SIZE + msg->size);
|
||||
}
|
||||
|
||||
static struct vhost_user_ops g_vu_scmi_ops = {
|
||||
.type = VHOST_USER_SCMI,
|
||||
|
||||
.append_opts = append_vhost_gpio_opts,
|
||||
|
||||
.get_features = vu_scmi_get_features,
|
||||
.set_features = vu_net_set_features,
|
||||
.get_protocol_features = vu_scmi_get_protocol_features,
|
||||
};
|
||||
|
||||
static void register_vhost_scmi_test(void)
|
||||
{
|
||||
QOSGraphTestOptions opts = {
|
||||
.before = vhost_user_test_setup,
|
||||
.subprocess = true,
|
||||
.arg = &g_vu_scmi_ops,
|
||||
};
|
||||
|
||||
qemu_add_opts(&qemu_chardev_opts);
|
||||
|
||||
qos_add_test("scmi/read-guest-mem/memfile",
|
||||
"vhost-user-scmi", test_read_guest_mem, &opts);
|
||||
}
|
||||
libqos_init(register_vhost_scmi_test);
|
||||
|
|
Loading…
Reference in New Issue