mirror of https://github.com/xemu-project/xemu.git
vga: add vhost-user-gpu.
-----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQIcBAABAgAGBQJc7g0iAAoJEEy22O7T6HE4vXsQAI3+EGS8dFBNOxu2pLYMHzM0 l19fU8HAXiVDBf6Ghkn1X6kpY84JVaJwRlbME+sgQeTnxY/FOOQV7PJBi7iwMpdo sdu0GDVTU5UjTK24yunrTz3PBcejwKE5miDyZUAKI8LIZKeSenaIETOuEyKotjGU XH36vjjxa0L9UL4AR6KjqGB5+VKlQuqoAbXBkOiHYACZqo1ayXimjNud1Kiprfs7 X9A+vcJfjtUZNE1X61OyLnrXGb4QfkqTSlE9PGpTkGMAPlVSbdRj9aP7Ivc2v2+v gd/a4chYzUhGpXo4bej6B2KiFFz8NZEc46EmIPMOKloixXyxiqgnWH0QvSrfle0Q pAC4mIeGRDTYGYfzr5GYG/09CO6zIwap5t44AG6tUWzMMaLS1o+5E5RKkA8hSKAk qgMr4eh/caQZzDF737PEtgE7yAh1KSMPbS7fT/M69YyFHg6dxJ+qU4JOGGUC6G2o PGpKCvgf+NYi//M8Ukf1tbPugq86wLE6rvSu/4l2EGmSyCXUOfXZeFgNSnVcTM87 pEKd5dapI0uvEyqbGHx2IXVcni8wxKgCt07GpcKIdI1dNKbya/SxxHGpP0Ie7hP4 OGcKNwiouglO1iCV9VG8FUqXdw66rOper+zf4ExPgbgZ2l6lSBux8g7rQU4effAR hqzN631jiQxyoix8SCW1 =BRv/ -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/kraxel/tags/vga-20190529-pull-request' into staging vga: add vhost-user-gpu. # gpg: Signature made Wed 29 May 2019 05:40:02 BST # gpg: using RSA key 4CB6D8EED3E87138 # gpg: Good signature from "Gerd Hoffmann (work) <kraxel@redhat.com>" [full] # gpg: aka "Gerd Hoffmann <gerd@kraxel.org>" [full] # gpg: aka "Gerd Hoffmann (private) <kraxel@gmail.com>" [full] # Primary key fingerprint: A032 8CFF B93A 17A7 9901 FE7D 4CB6 D8EE D3E8 7138 * remotes/kraxel/tags/vga-20190529-pull-request: hw/display: add vhost-user-vga & gpu-pci virtio-gpu: split virtio-gpu-pci & virtio-vga virtio-gpu: split virtio-gpu, introduce virtio-gpu-base spice-app: fix running when !CONFIG_OPENGL contrib: add vhost-user-gpu util: compile drm.o on posix virtio-gpu: add a pixman helper header virtio-gpu: add bswap helpers header vhost-user: add vhost_user_gpu_set_socket() virtio-gpu: add sanity check Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
95172e2405
10
MAINTAINERS
10
MAINTAINERS
|
@ -1675,9 +1675,17 @@ virtio-gpu
|
|||
M: Gerd Hoffmann <kraxel@redhat.com>
|
||||
S: Maintained
|
||||
F: hw/display/virtio-gpu*
|
||||
F: hw/display/virtio-vga.c
|
||||
F: hw/display/virtio-vga.*
|
||||
F: include/hw/virtio/virtio-gpu.h
|
||||
|
||||
vhost-user-gpu
|
||||
M: Marc-André Lureau <marcandre.lureau@redhat.com>
|
||||
M: Gerd Hoffmann <kraxel@redhat.com>
|
||||
S: Maintained
|
||||
F: docs/interop/vhost-user-gpu.rst
|
||||
F: contrib/vhost-user-gpu
|
||||
F: hw/display/vhost-user-*
|
||||
|
||||
Cirrus VGA
|
||||
M: Gerd Hoffmann <kraxel@redhat.com>
|
||||
S: Odd Fixes
|
||||
|
|
24
Makefile
24
Makefile
|
@ -314,8 +314,20 @@ $(call set-vpath, $(SRC_PATH))
|
|||
|
||||
LIBS+=-lz $(LIBS_TOOLS)
|
||||
|
||||
vhost-user-json-y =
|
||||
HELPERS-y =
|
||||
|
||||
HELPERS-$(call land,$(CONFIG_SOFTMMU),$(CONFIG_LINUX)) = qemu-bridge-helper$(EXESUF)
|
||||
|
||||
ifdef CONFIG_LINUX
|
||||
ifdef CONFIG_VIRGL
|
||||
ifdef CONFIG_GBM
|
||||
HELPERS-y += vhost-user-gpu$(EXESUF)
|
||||
vhost-user-json-y += contrib/vhost-user-gpu/50-qemu-gpu.json
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef BUILD_DOCS
|
||||
DOCS=qemu-doc.html qemu-doc.txt qemu.1 qemu-img.1 qemu-nbd.8 qemu-ga.8
|
||||
DOCS+=docs/interop/qemu-qmp-ref.html docs/interop/qemu-qmp-ref.txt docs/interop/qemu-qmp-ref.7
|
||||
|
@ -409,6 +421,7 @@ dummy := $(call unnest-vars,, \
|
|||
vhost-user-scsi-obj-y \
|
||||
vhost-user-blk-obj-y \
|
||||
vhost-user-input-obj-y \
|
||||
vhost-user-gpu-obj-y \
|
||||
qga-vss-dll-obj-y \
|
||||
block-obj-y \
|
||||
block-obj-m \
|
||||
|
@ -426,7 +439,7 @@ dummy := $(call unnest-vars,, \
|
|||
|
||||
include $(SRC_PATH)/tests/Makefile.include
|
||||
|
||||
all: $(DOCS) $(if $(BUILD_DOCS),sphinxdocs) $(TOOLS) $(HELPERS-y) recurse-all modules
|
||||
all: $(DOCS) $(if $(BUILD_DOCS),sphinxdocs) $(TOOLS) $(HELPERS-y) recurse-all modules $(vhost-user-json-y)
|
||||
|
||||
qemu-version.h: FORCE
|
||||
$(call quiet-command, \
|
||||
|
@ -619,6 +632,9 @@ rdmacm-mux$(EXESUF): LIBS += "-libumad"
|
|||
rdmacm-mux$(EXESUF): $(rdmacm-mux-obj-y) $(COMMON_LDADDS)
|
||||
$(call LINK, $^)
|
||||
|
||||
vhost-user-gpu$(EXESUF): $(vhost-user-gpu-obj-y) $(libvhost-user-obj-y) libqemuutil.a libqemustub.a
|
||||
$(call LINK, $^)
|
||||
|
||||
ifdef CONFIG_VHOST_USER_INPUT
|
||||
ifdef CONFIG_LINUX
|
||||
vhost-user-input$(EXESUF): $(vhost-user-input-obj-y) libvhost-user.a libqemuutil.a
|
||||
|
@ -827,6 +843,12 @@ endif
|
|||
ifneq ($(HELPERS-y),)
|
||||
$(call install-prog,$(HELPERS-y),$(DESTDIR)$(libexecdir))
|
||||
endif
|
||||
ifneq ($(vhost-user-json-y),)
|
||||
$(INSTALL_DIR) "$(DESTDIR)$(qemu_datadir)/vhost-user/"
|
||||
for x in $(vhost-user-json-y); do \
|
||||
$(INSTALL_DATA) $$x "$(DESTDIR)$(qemu_datadir)/vhost-user/"; \
|
||||
done
|
||||
endif
|
||||
ifdef CONFIG_TRACE_SYSTEMTAP
|
||||
$(INSTALL_PROG) "scripts/qemu-trace-stap" $(DESTDIR)$(bindir)
|
||||
endif
|
||||
|
|
|
@ -123,6 +123,7 @@ vhost-user-scsi-obj-y = contrib/vhost-user-scsi/
|
|||
vhost-user-blk-obj-y = contrib/vhost-user-blk/
|
||||
rdmacm-mux-obj-y = contrib/rdmacm-mux/
|
||||
vhost-user-input-obj-y = contrib/vhost-user-input/
|
||||
vhost-user-gpu-obj-y = contrib/vhost-user-gpu/
|
||||
|
||||
######################################################################
|
||||
trace-events-subdirs =
|
||||
|
|
|
@ -4099,6 +4099,13 @@ libs_softmmu="$libs_softmmu $fdt_libs"
|
|||
##########################################
|
||||
# opengl probe (for sdl2, gtk, milkymist-tmu2)
|
||||
|
||||
gbm="no"
|
||||
if $pkg_config gbm; then
|
||||
gbm_cflags="$($pkg_config --cflags gbm)"
|
||||
gbm_libs="$($pkg_config --libs gbm)"
|
||||
gbm="yes"
|
||||
fi
|
||||
|
||||
if test "$opengl" != "no" ; then
|
||||
opengl_pkgs="epoxy gbm"
|
||||
if $pkg_config $opengl_pkgs; then
|
||||
|
@ -6964,6 +6971,13 @@ if test "$opengl" = "yes" ; then
|
|||
fi
|
||||
fi
|
||||
|
||||
if test "$gbm" = "yes" ; then
|
||||
echo "CONFIG_GBM=y" >> $config_host_mak
|
||||
echo "GBM_LIBS=$gbm_libs" >> $config_host_mak
|
||||
echo "GBM_CFLAGS=$gbm_cflags" >> $config_host_mak
|
||||
fi
|
||||
|
||||
|
||||
if test "$malloc_trim" = "yes" ; then
|
||||
echo "CONFIG_MALLOC_TRIM=y" >> $config_host_mak
|
||||
fi
|
||||
|
@ -7859,6 +7873,9 @@ echo "QEMU_CFLAGS+=$cflags" >> $config_target_mak
|
|||
|
||||
done # for target in $targets
|
||||
|
||||
echo "PIXMAN_CFLAGS=$pixman_cflags" >> $config_host_mak
|
||||
echo "PIXMAN_LIBS=$pixman_libs" >> $config_host_mak
|
||||
|
||||
if test -n "$enabled_cross_compilers"; then
|
||||
echo
|
||||
echo "NOTE: cross-compilers enabled: $enabled_cross_compilers"
|
||||
|
|
|
@ -130,6 +130,7 @@ vu_request_to_string(unsigned int req)
|
|||
REQ(VHOST_USER_POSTCOPY_END),
|
||||
REQ(VHOST_USER_GET_INFLIGHT_FD),
|
||||
REQ(VHOST_USER_SET_INFLIGHT_FD),
|
||||
REQ(VHOST_USER_GPU_SET_SOCKET),
|
||||
REQ(VHOST_USER_MAX),
|
||||
};
|
||||
#undef REQ
|
||||
|
|
|
@ -94,6 +94,7 @@ typedef enum VhostUserRequest {
|
|||
VHOST_USER_POSTCOPY_END = 30,
|
||||
VHOST_USER_GET_INFLIGHT_FD = 31,
|
||||
VHOST_USER_SET_INFLIGHT_FD = 32,
|
||||
VHOST_USER_GPU_SET_SOCKET = 33,
|
||||
VHOST_USER_MAX
|
||||
} VhostUserRequest;
|
||||
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"description": "QEMU vhost-user-gpu",
|
||||
"type": "gpu",
|
||||
"binary": "@libexecdir@/vhost-user-gpu",
|
||||
}
|
|
@ -0,0 +1,10 @@
|
|||
vhost-user-gpu-obj-y = main.o virgl.o vugbm.o
|
||||
|
||||
main.o-cflags := $(PIXMAN_CFLAGS) $(GBM_CFLAGS)
|
||||
main.o-libs := $(PIXMAN_LIBS)
|
||||
|
||||
virgl.o-cflags := $(VIRGL_CFLAGS) $(GBM_CFLAGS)
|
||||
virgl.o-libs := $(VIRGL_LIBS)
|
||||
|
||||
vugbm.o-cflags := $(GBM_CFLAGS)
|
||||
vugbm.o-libs := $(GBM_LIBS)
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,579 @@
|
|||
/*
|
||||
* Virtio vhost-user GPU Device
|
||||
*
|
||||
* Copyright Red Hat, Inc. 2013-2018
|
||||
*
|
||||
* Authors:
|
||||
* Dave Airlie <airlied@redhat.com>
|
||||
* Gerd Hoffmann <kraxel@redhat.com>
|
||||
* Marc-André Lureau <marcandre.lureau@redhat.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#include <virglrenderer.h>
|
||||
#include "virgl.h"
|
||||
|
||||
void
|
||||
vg_virgl_update_cursor_data(VuGpu *g, uint32_t resource_id,
|
||||
gpointer data)
|
||||
{
|
||||
uint32_t width, height;
|
||||
uint32_t *cursor;
|
||||
|
||||
cursor = virgl_renderer_get_cursor_data(resource_id, &width, &height);
|
||||
g_return_if_fail(cursor != NULL);
|
||||
g_return_if_fail(width == 64);
|
||||
g_return_if_fail(height == 64);
|
||||
|
||||
memcpy(data, cursor, 64 * 64 * sizeof(uint32_t));
|
||||
free(cursor);
|
||||
}
|
||||
|
||||
static void
|
||||
virgl_cmd_context_create(VuGpu *g,
|
||||
struct virtio_gpu_ctrl_command *cmd)
|
||||
{
|
||||
struct virtio_gpu_ctx_create cc;
|
||||
|
||||
VUGPU_FILL_CMD(cc);
|
||||
|
||||
virgl_renderer_context_create(cc.hdr.ctx_id, cc.nlen,
|
||||
cc.debug_name);
|
||||
}
|
||||
|
||||
static void
|
||||
virgl_cmd_context_destroy(VuGpu *g,
|
||||
struct virtio_gpu_ctrl_command *cmd)
|
||||
{
|
||||
struct virtio_gpu_ctx_destroy cd;
|
||||
|
||||
VUGPU_FILL_CMD(cd);
|
||||
|
||||
virgl_renderer_context_destroy(cd.hdr.ctx_id);
|
||||
}
|
||||
|
||||
static void
|
||||
virgl_cmd_create_resource_2d(VuGpu *g,
|
||||
struct virtio_gpu_ctrl_command *cmd)
|
||||
{
|
||||
struct virtio_gpu_resource_create_2d c2d;
|
||||
struct virgl_renderer_resource_create_args args;
|
||||
|
||||
VUGPU_FILL_CMD(c2d);
|
||||
|
||||
args.handle = c2d.resource_id;
|
||||
args.target = 2;
|
||||
args.format = c2d.format;
|
||||
args.bind = (1 << 1);
|
||||
args.width = c2d.width;
|
||||
args.height = c2d.height;
|
||||
args.depth = 1;
|
||||
args.array_size = 1;
|
||||
args.last_level = 0;
|
||||
args.nr_samples = 0;
|
||||
args.flags = VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP;
|
||||
virgl_renderer_resource_create(&args, NULL, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
virgl_cmd_create_resource_3d(VuGpu *g,
|
||||
struct virtio_gpu_ctrl_command *cmd)
|
||||
{
|
||||
struct virtio_gpu_resource_create_3d c3d;
|
||||
struct virgl_renderer_resource_create_args args;
|
||||
|
||||
VUGPU_FILL_CMD(c3d);
|
||||
|
||||
args.handle = c3d.resource_id;
|
||||
args.target = c3d.target;
|
||||
args.format = c3d.format;
|
||||
args.bind = c3d.bind;
|
||||
args.width = c3d.width;
|
||||
args.height = c3d.height;
|
||||
args.depth = c3d.depth;
|
||||
args.array_size = c3d.array_size;
|
||||
args.last_level = c3d.last_level;
|
||||
args.nr_samples = c3d.nr_samples;
|
||||
args.flags = c3d.flags;
|
||||
virgl_renderer_resource_create(&args, NULL, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
virgl_cmd_resource_unref(VuGpu *g,
|
||||
struct virtio_gpu_ctrl_command *cmd)
|
||||
{
|
||||
struct virtio_gpu_resource_unref unref;
|
||||
|
||||
VUGPU_FILL_CMD(unref);
|
||||
|
||||
virgl_renderer_resource_unref(unref.resource_id);
|
||||
}
|
||||
|
||||
/* Not yet(?) defined in standard-headers, remove when possible */
|
||||
#ifndef VIRTIO_GPU_CAPSET_VIRGL2
|
||||
#define VIRTIO_GPU_CAPSET_VIRGL2 2
|
||||
#endif
|
||||
|
||||
static void
|
||||
virgl_cmd_get_capset_info(VuGpu *g,
|
||||
struct virtio_gpu_ctrl_command *cmd)
|
||||
{
|
||||
struct virtio_gpu_get_capset_info info;
|
||||
struct virtio_gpu_resp_capset_info resp;
|
||||
|
||||
VUGPU_FILL_CMD(info);
|
||||
|
||||
if (info.capset_index == 0) {
|
||||
resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL;
|
||||
virgl_renderer_get_cap_set(resp.capset_id,
|
||||
&resp.capset_max_version,
|
||||
&resp.capset_max_size);
|
||||
} else if (info.capset_index == 1) {
|
||||
resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL2;
|
||||
virgl_renderer_get_cap_set(resp.capset_id,
|
||||
&resp.capset_max_version,
|
||||
&resp.capset_max_size);
|
||||
} else {
|
||||
resp.capset_max_version = 0;
|
||||
resp.capset_max_size = 0;
|
||||
}
|
||||
resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO;
|
||||
vg_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
|
||||
}
|
||||
|
||||
uint32_t
|
||||
vg_virgl_get_num_capsets(void)
|
||||
{
|
||||
uint32_t capset2_max_ver, capset2_max_size;
|
||||
virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VIRGL2,
|
||||
&capset2_max_ver,
|
||||
&capset2_max_size);
|
||||
|
||||
return capset2_max_ver ? 2 : 1;
|
||||
}
|
||||
|
||||
static void
|
||||
virgl_cmd_get_capset(VuGpu *g,
|
||||
struct virtio_gpu_ctrl_command *cmd)
|
||||
{
|
||||
struct virtio_gpu_get_capset gc;
|
||||
struct virtio_gpu_resp_capset *resp;
|
||||
uint32_t max_ver, max_size;
|
||||
|
||||
VUGPU_FILL_CMD(gc);
|
||||
|
||||
virgl_renderer_get_cap_set(gc.capset_id, &max_ver,
|
||||
&max_size);
|
||||
resp = g_malloc0(sizeof(*resp) + max_size);
|
||||
|
||||
resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET;
|
||||
virgl_renderer_fill_caps(gc.capset_id,
|
||||
gc.capset_version,
|
||||
(void *)resp->capset_data);
|
||||
vg_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + max_size);
|
||||
g_free(resp);
|
||||
}
|
||||
|
||||
static void
|
||||
virgl_cmd_submit_3d(VuGpu *g,
|
||||
struct virtio_gpu_ctrl_command *cmd)
|
||||
{
|
||||
struct virtio_gpu_cmd_submit cs;
|
||||
void *buf;
|
||||
size_t s;
|
||||
|
||||
VUGPU_FILL_CMD(cs);
|
||||
|
||||
buf = g_malloc(cs.size);
|
||||
s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
|
||||
sizeof(cs), buf, cs.size);
|
||||
if (s != cs.size) {
|
||||
g_critical("%s: size mismatch (%zd/%d)", __func__, s, cs.size);
|
||||
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
|
||||
goto out;
|
||||
}
|
||||
|
||||
virgl_renderer_submit_cmd(buf, cs.hdr.ctx_id, cs.size / 4);
|
||||
|
||||
out:
|
||||
g_free(buf);
|
||||
}
|
||||
|
||||
static void
|
||||
virgl_cmd_transfer_to_host_2d(VuGpu *g,
|
||||
struct virtio_gpu_ctrl_command *cmd)
|
||||
{
|
||||
struct virtio_gpu_transfer_to_host_2d t2d;
|
||||
struct virtio_gpu_box box;
|
||||
|
||||
VUGPU_FILL_CMD(t2d);
|
||||
|
||||
box.x = t2d.r.x;
|
||||
box.y = t2d.r.y;
|
||||
box.z = 0;
|
||||
box.w = t2d.r.width;
|
||||
box.h = t2d.r.height;
|
||||
box.d = 1;
|
||||
|
||||
virgl_renderer_transfer_write_iov(t2d.resource_id,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
(struct virgl_box *)&box,
|
||||
t2d.offset, NULL, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
virgl_cmd_transfer_to_host_3d(VuGpu *g,
|
||||
struct virtio_gpu_ctrl_command *cmd)
|
||||
{
|
||||
struct virtio_gpu_transfer_host_3d t3d;
|
||||
|
||||
VUGPU_FILL_CMD(t3d);
|
||||
|
||||
virgl_renderer_transfer_write_iov(t3d.resource_id,
|
||||
t3d.hdr.ctx_id,
|
||||
t3d.level,
|
||||
t3d.stride,
|
||||
t3d.layer_stride,
|
||||
(struct virgl_box *)&t3d.box,
|
||||
t3d.offset, NULL, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
virgl_cmd_transfer_from_host_3d(VuGpu *g,
|
||||
struct virtio_gpu_ctrl_command *cmd)
|
||||
{
|
||||
struct virtio_gpu_transfer_host_3d tf3d;
|
||||
|
||||
VUGPU_FILL_CMD(tf3d);
|
||||
|
||||
virgl_renderer_transfer_read_iov(tf3d.resource_id,
|
||||
tf3d.hdr.ctx_id,
|
||||
tf3d.level,
|
||||
tf3d.stride,
|
||||
tf3d.layer_stride,
|
||||
(struct virgl_box *)&tf3d.box,
|
||||
tf3d.offset, NULL, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
virgl_resource_attach_backing(VuGpu *g,
|
||||
struct virtio_gpu_ctrl_command *cmd)
|
||||
{
|
||||
struct virtio_gpu_resource_attach_backing att_rb;
|
||||
struct iovec *res_iovs;
|
||||
int ret;
|
||||
|
||||
VUGPU_FILL_CMD(att_rb);
|
||||
|
||||
ret = vg_create_mapping_iov(g, &att_rb, cmd, &res_iovs);
|
||||
if (ret != 0) {
|
||||
cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
|
||||
return;
|
||||
}
|
||||
|
||||
virgl_renderer_resource_attach_iov(att_rb.resource_id,
|
||||
res_iovs, att_rb.nr_entries);
|
||||
}
|
||||
|
||||
static void
|
||||
virgl_resource_detach_backing(VuGpu *g,
|
||||
struct virtio_gpu_ctrl_command *cmd)
|
||||
{
|
||||
struct virtio_gpu_resource_detach_backing detach_rb;
|
||||
struct iovec *res_iovs = NULL;
|
||||
int num_iovs = 0;
|
||||
|
||||
VUGPU_FILL_CMD(detach_rb);
|
||||
|
||||
virgl_renderer_resource_detach_iov(detach_rb.resource_id,
|
||||
&res_iovs,
|
||||
&num_iovs);
|
||||
if (res_iovs == NULL || num_iovs == 0) {
|
||||
return;
|
||||
}
|
||||
g_free(res_iovs);
|
||||
}
|
||||
|
||||
static void
|
||||
virgl_cmd_set_scanout(VuGpu *g,
|
||||
struct virtio_gpu_ctrl_command *cmd)
|
||||
{
|
||||
struct virtio_gpu_set_scanout ss;
|
||||
struct virgl_renderer_resource_info info;
|
||||
int ret;
|
||||
|
||||
VUGPU_FILL_CMD(ss);
|
||||
|
||||
if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUTS) {
|
||||
g_critical("%s: illegal scanout id specified %d",
|
||||
__func__, ss.scanout_id);
|
||||
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
|
||||
return;
|
||||
}
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
|
||||
if (ss.resource_id && ss.r.width && ss.r.height) {
|
||||
ret = virgl_renderer_resource_get_info(ss.resource_id, &info);
|
||||
if (ret == -1) {
|
||||
g_critical("%s: illegal resource specified %d\n",
|
||||
__func__, ss.resource_id);
|
||||
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
|
||||
return;
|
||||
}
|
||||
|
||||
int fd = -1;
|
||||
if (virgl_renderer_get_fd_for_texture(info.tex_id, &fd) < 0) {
|
||||
g_critical("%s: failed to get fd for texture\n", __func__);
|
||||
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
|
||||
return;
|
||||
}
|
||||
assert(fd >= 0);
|
||||
VhostUserGpuMsg msg = {
|
||||
.request = VHOST_USER_GPU_DMABUF_SCANOUT,
|
||||
.size = sizeof(VhostUserGpuDMABUFScanout),
|
||||
.payload.dmabuf_scanout.scanout_id = ss.scanout_id,
|
||||
.payload.dmabuf_scanout.x = ss.r.x,
|
||||
.payload.dmabuf_scanout.y = ss.r.y,
|
||||
.payload.dmabuf_scanout.width = ss.r.width,
|
||||
.payload.dmabuf_scanout.height = ss.r.height,
|
||||
.payload.dmabuf_scanout.fd_width = info.width,
|
||||
.payload.dmabuf_scanout.fd_height = info.height,
|
||||
.payload.dmabuf_scanout.fd_stride = info.stride,
|
||||
.payload.dmabuf_scanout.fd_flags = info.flags,
|
||||
.payload.dmabuf_scanout.fd_drm_fourcc = info.drm_fourcc
|
||||
};
|
||||
vg_send_msg(g, &msg, fd);
|
||||
close(fd);
|
||||
} else {
|
||||
VhostUserGpuMsg msg = {
|
||||
.request = VHOST_USER_GPU_DMABUF_SCANOUT,
|
||||
.size = sizeof(VhostUserGpuDMABUFScanout),
|
||||
.payload.dmabuf_scanout.scanout_id = ss.scanout_id,
|
||||
};
|
||||
g_debug("disable scanout");
|
||||
vg_send_msg(g, &msg, -1);
|
||||
}
|
||||
g->scanout[ss.scanout_id].resource_id = ss.resource_id;
|
||||
}
|
||||
|
||||
static void
|
||||
virgl_cmd_resource_flush(VuGpu *g,
|
||||
struct virtio_gpu_ctrl_command *cmd)
|
||||
{
|
||||
struct virtio_gpu_resource_flush rf;
|
||||
int i;
|
||||
|
||||
VUGPU_FILL_CMD(rf);
|
||||
|
||||
if (!rf.resource_id) {
|
||||
g_debug("bad resource id for flush..?");
|
||||
return;
|
||||
}
|
||||
for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) {
|
||||
if (g->scanout[i].resource_id != rf.resource_id) {
|
||||
continue;
|
||||
}
|
||||
VhostUserGpuMsg msg = {
|
||||
.request = VHOST_USER_GPU_DMABUF_UPDATE,
|
||||
.size = sizeof(VhostUserGpuUpdate),
|
||||
.payload.update.scanout_id = i,
|
||||
.payload.update.x = rf.r.x,
|
||||
.payload.update.y = rf.r.y,
|
||||
.payload.update.width = rf.r.width,
|
||||
.payload.update.height = rf.r.height
|
||||
};
|
||||
vg_send_msg(g, &msg, -1);
|
||||
vg_wait_ok(g);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
virgl_cmd_ctx_attach_resource(VuGpu *g,
|
||||
struct virtio_gpu_ctrl_command *cmd)
|
||||
{
|
||||
struct virtio_gpu_ctx_resource att_res;
|
||||
|
||||
VUGPU_FILL_CMD(att_res);
|
||||
|
||||
virgl_renderer_ctx_attach_resource(att_res.hdr.ctx_id, att_res.resource_id);
|
||||
}
|
||||
|
||||
static void
|
||||
virgl_cmd_ctx_detach_resource(VuGpu *g,
|
||||
struct virtio_gpu_ctrl_command *cmd)
|
||||
{
|
||||
struct virtio_gpu_ctx_resource det_res;
|
||||
|
||||
VUGPU_FILL_CMD(det_res);
|
||||
|
||||
virgl_renderer_ctx_detach_resource(det_res.hdr.ctx_id, det_res.resource_id);
|
||||
}
|
||||
|
||||
void vg_virgl_process_cmd(VuGpu *g, struct virtio_gpu_ctrl_command *cmd)
|
||||
{
|
||||
virgl_renderer_force_ctx_0();
|
||||
switch (cmd->cmd_hdr.type) {
|
||||
case VIRTIO_GPU_CMD_CTX_CREATE:
|
||||
virgl_cmd_context_create(g, cmd);
|
||||
break;
|
||||
case VIRTIO_GPU_CMD_CTX_DESTROY:
|
||||
virgl_cmd_context_destroy(g, cmd);
|
||||
break;
|
||||
case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
|
||||
virgl_cmd_create_resource_2d(g, cmd);
|
||||
break;
|
||||
case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D:
|
||||
virgl_cmd_create_resource_3d(g, cmd);
|
||||
break;
|
||||
case VIRTIO_GPU_CMD_SUBMIT_3D:
|
||||
virgl_cmd_submit_3d(g, cmd);
|
||||
break;
|
||||
case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
|
||||
virgl_cmd_transfer_to_host_2d(g, cmd);
|
||||
break;
|
||||
case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D:
|
||||
virgl_cmd_transfer_to_host_3d(g, cmd);
|
||||
break;
|
||||
case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D:
|
||||
virgl_cmd_transfer_from_host_3d(g, cmd);
|
||||
break;
|
||||
case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
|
||||
virgl_resource_attach_backing(g, cmd);
|
||||
break;
|
||||
case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
|
||||
virgl_resource_detach_backing(g, cmd);
|
||||
break;
|
||||
case VIRTIO_GPU_CMD_SET_SCANOUT:
|
||||
virgl_cmd_set_scanout(g, cmd);
|
||||
break;
|
||||
case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
|
||||
virgl_cmd_resource_flush(g, cmd);
|
||||
break;
|
||||
case VIRTIO_GPU_CMD_RESOURCE_UNREF:
|
||||
virgl_cmd_resource_unref(g, cmd);
|
||||
break;
|
||||
case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE:
|
||||
/* TODO add security */
|
||||
virgl_cmd_ctx_attach_resource(g, cmd);
|
||||
break;
|
||||
case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE:
|
||||
/* TODO add security */
|
||||
virgl_cmd_ctx_detach_resource(g, cmd);
|
||||
break;
|
||||
case VIRTIO_GPU_CMD_GET_CAPSET_INFO:
|
||||
virgl_cmd_get_capset_info(g, cmd);
|
||||
break;
|
||||
case VIRTIO_GPU_CMD_GET_CAPSET:
|
||||
virgl_cmd_get_capset(g, cmd);
|
||||
break;
|
||||
case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
|
||||
vg_get_display_info(g, cmd);
|
||||
break;
|
||||
default:
|
||||
g_debug("TODO handle ctrl %x\n", cmd->cmd_hdr.type);
|
||||
cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
|
||||
break;
|
||||
}
|
||||
|
||||
if (cmd->finished) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (cmd->error) {
|
||||
g_warning("%s: ctrl 0x%x, error 0x%x\n", __func__,
|
||||
cmd->cmd_hdr.type, cmd->error);
|
||||
vg_ctrl_response_nodata(g, cmd, cmd->error);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) {
|
||||
vg_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
|
||||
return;
|
||||
}
|
||||
|
||||
g_debug("Creating fence id:%" PRId64 " type:%d",
|
||||
cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
|
||||
virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
|
||||
}
|
||||
|
||||
static void
|
||||
virgl_write_fence(void *opaque, uint32_t fence)
|
||||
{
|
||||
VuGpu *g = opaque;
|
||||
struct virtio_gpu_ctrl_command *cmd, *tmp;
|
||||
|
||||
QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
|
||||
/*
|
||||
* the guest can end up emitting fences out of order
|
||||
* so we should check all fenced cmds not just the first one.
|
||||
*/
|
||||
if (cmd->cmd_hdr.fence_id > fence) {
|
||||
continue;
|
||||
}
|
||||
g_debug("FENCE %" PRIu64, cmd->cmd_hdr.fence_id);
|
||||
vg_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
|
||||
QTAILQ_REMOVE(&g->fenceq, cmd, next);
|
||||
g_free(cmd);
|
||||
g->inflight--;
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(VIRGL_RENDERER_CALLBACKS_VERSION) && \
|
||||
VIRGL_RENDERER_CALLBACKS_VERSION >= 2
|
||||
static int
|
||||
virgl_get_drm_fd(void *opaque)
|
||||
{
|
||||
VuGpu *g = opaque;
|
||||
|
||||
return g->drm_rnode_fd;
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct virgl_renderer_callbacks virgl_cbs = {
|
||||
#if defined(VIRGL_RENDERER_CALLBACKS_VERSION) && \
|
||||
VIRGL_RENDERER_CALLBACKS_VERSION >= 2
|
||||
.get_drm_fd = virgl_get_drm_fd,
|
||||
.version = 2,
|
||||
#else
|
||||
.version = 1,
|
||||
#endif
|
||||
.write_fence = virgl_write_fence,
|
||||
};
|
||||
|
||||
static void
|
||||
vg_virgl_poll(VuDev *dev, int condition, void *data)
|
||||
{
|
||||
virgl_renderer_poll();
|
||||
}
|
||||
|
||||
bool
|
||||
vg_virgl_init(VuGpu *g)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (g->drm_rnode_fd && virgl_cbs.version == 1) {
|
||||
g_warning("virgl will use the default rendernode");
|
||||
}
|
||||
|
||||
ret = virgl_renderer_init(g,
|
||||
VIRGL_RENDERER_USE_EGL |
|
||||
VIRGL_RENDERER_THREAD_SYNC,
|
||||
&virgl_cbs);
|
||||
if (ret != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = virgl_renderer_get_poll_fd();
|
||||
if (ret != -1) {
|
||||
g->renderer_source =
|
||||
vug_source_new(&g->dev, ret, G_IO_IN, vg_virgl_poll, g);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
/*
|
||||
* Virtio vhost-user GPU Device
|
||||
*
|
||||
* Copyright Red Hat, Inc. 2013-2018
|
||||
*
|
||||
* Authors:
|
||||
* Dave Airlie <airlied@redhat.com>
|
||||
* Gerd Hoffmann <kraxel@redhat.com>
|
||||
* Marc-André Lureau <marcandre.lureau@redhat.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
#ifndef VUGPU_VIRGL_H_
|
||||
#define VUGPU_VIRGL_H_
|
||||
|
||||
#include "vugpu.h"
|
||||
|
||||
bool vg_virgl_init(VuGpu *g);
|
||||
uint32_t vg_virgl_get_num_capsets(void);
|
||||
void vg_virgl_process_cmd(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd);
|
||||
void vg_virgl_update_cursor_data(VuGpu *g, uint32_t resource_id,
|
||||
gpointer data);
|
||||
|
||||
#endif
|
|
@ -0,0 +1,328 @@
|
|||
/*
|
||||
* Virtio vhost-user GPU Device
|
||||
*
|
||||
* DRM helpers
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#include "vugbm.h"
|
||||
|
||||
static bool
|
||||
mem_alloc_bo(struct vugbm_buffer *buf)
|
||||
{
|
||||
buf->mmap = g_malloc(buf->width * buf->height * 4);
|
||||
buf->stride = buf->width * 4;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void
|
||||
mem_free_bo(struct vugbm_buffer *buf)
|
||||
{
|
||||
g_free(buf->mmap);
|
||||
}
|
||||
|
||||
static bool
|
||||
mem_map_bo(struct vugbm_buffer *buf)
|
||||
{
|
||||
return buf->mmap != NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
mem_unmap_bo(struct vugbm_buffer *buf)
|
||||
{
|
||||
}
|
||||
|
||||
static void
|
||||
mem_device_destroy(struct vugbm_device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMFD
|
||||
struct udmabuf_create {
|
||||
uint32_t memfd;
|
||||
uint32_t flags;
|
||||
uint64_t offset;
|
||||
uint64_t size;
|
||||
};
|
||||
|
||||
#define UDMABUF_CREATE _IOW('u', 0x42, struct udmabuf_create)
|
||||
|
||||
static size_t
|
||||
udmabuf_get_size(struct vugbm_buffer *buf)
|
||||
{
|
||||
return ROUND_UP(buf->width * buf->height * 4, getpagesize());
|
||||
}
|
||||
|
||||
static bool
|
||||
udmabuf_alloc_bo(struct vugbm_buffer *buf)
|
||||
{
|
||||
int ret;
|
||||
|
||||
buf->memfd = memfd_create("udmabuf-bo", MFD_ALLOW_SEALING);
|
||||
if (buf->memfd < 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = ftruncate(buf->memfd, udmabuf_get_size(buf));
|
||||
if (ret < 0) {
|
||||
close(buf->memfd);
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = fcntl(buf->memfd, F_ADD_SEALS, F_SEAL_SHRINK);
|
||||
if (ret < 0) {
|
||||
close(buf->memfd);
|
||||
return false;
|
||||
}
|
||||
|
||||
buf->stride = buf->width * 4;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void
|
||||
udmabuf_free_bo(struct vugbm_buffer *buf)
|
||||
{
|
||||
close(buf->memfd);
|
||||
}
|
||||
|
||||
static bool
|
||||
udmabuf_map_bo(struct vugbm_buffer *buf)
|
||||
{
|
||||
buf->mmap = mmap(NULL, udmabuf_get_size(buf),
|
||||
PROT_READ | PROT_WRITE, MAP_SHARED, buf->memfd, 0);
|
||||
if (buf->mmap == MAP_FAILED) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
udmabuf_get_fd(struct vugbm_buffer *buf, int *fd)
|
||||
{
|
||||
struct udmabuf_create create = {
|
||||
.memfd = buf->memfd,
|
||||
.offset = 0,
|
||||
.size = udmabuf_get_size(buf),
|
||||
};
|
||||
|
||||
*fd = ioctl(buf->dev->fd, UDMABUF_CREATE, &create);
|
||||
|
||||
return *fd >= 0;
|
||||
}
|
||||
|
||||
static void
|
||||
udmabuf_unmap_bo(struct vugbm_buffer *buf)
|
||||
{
|
||||
munmap(buf->mmap, udmabuf_get_size(buf));
|
||||
}
|
||||
|
||||
static void
|
||||
udmabuf_device_destroy(struct vugbm_device *dev)
|
||||
{
|
||||
close(dev->fd);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_GBM
|
||||
static bool
|
||||
alloc_bo(struct vugbm_buffer *buf)
|
||||
{
|
||||
struct gbm_device *dev = buf->dev->dev;
|
||||
|
||||
assert(!buf->bo);
|
||||
|
||||
buf->bo = gbm_bo_create(dev, buf->width, buf->height,
|
||||
buf->format,
|
||||
GBM_BO_USE_RENDERING | GBM_BO_USE_LINEAR);
|
||||
|
||||
if (buf->bo) {
|
||||
buf->stride = gbm_bo_get_stride(buf->bo);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void
|
||||
free_bo(struct vugbm_buffer *buf)
|
||||
{
|
||||
gbm_bo_destroy(buf->bo);
|
||||
}
|
||||
|
||||
static bool
|
||||
map_bo(struct vugbm_buffer *buf)
|
||||
{
|
||||
uint32_t stride;
|
||||
|
||||
buf->mmap = gbm_bo_map(buf->bo, 0, 0, buf->width, buf->height,
|
||||
GBM_BO_TRANSFER_READ_WRITE, &stride,
|
||||
&buf->mmap_data);
|
||||
|
||||
assert(stride == buf->stride);
|
||||
|
||||
return buf->mmap != NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
unmap_bo(struct vugbm_buffer *buf)
|
||||
{
|
||||
gbm_bo_unmap(buf->bo, buf->mmap_data);
|
||||
}
|
||||
|
||||
static bool
|
||||
get_fd(struct vugbm_buffer *buf, int *fd)
|
||||
{
|
||||
*fd = gbm_bo_get_fd(buf->bo);
|
||||
|
||||
return *fd >= 0;
|
||||
}
|
||||
|
||||
static void
|
||||
device_destroy(struct vugbm_device *dev)
|
||||
{
|
||||
gbm_device_destroy(dev->dev);
|
||||
}
|
||||
#endif
|
||||
|
||||
void
|
||||
vugbm_device_destroy(struct vugbm_device *dev)
|
||||
{
|
||||
if (!dev->inited) {
|
||||
return;
|
||||
}
|
||||
|
||||
dev->device_destroy(dev);
|
||||
}
|
||||
|
||||
bool
|
||||
vugbm_device_init(struct vugbm_device *dev, int fd)
|
||||
{
|
||||
dev->fd = fd;
|
||||
|
||||
#ifdef CONFIG_GBM
|
||||
dev->dev = gbm_create_device(fd);
|
||||
#endif
|
||||
|
||||
if (0) {
|
||||
/* nothing */
|
||||
}
|
||||
#ifdef CONFIG_GBM
|
||||
else if (dev->dev != NULL) {
|
||||
dev->alloc_bo = alloc_bo;
|
||||
dev->free_bo = free_bo;
|
||||
dev->get_fd = get_fd;
|
||||
dev->map_bo = map_bo;
|
||||
dev->unmap_bo = unmap_bo;
|
||||
dev->device_destroy = device_destroy;
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_MEMFD
|
||||
else if (g_file_test("/dev/udmabuf", G_FILE_TEST_EXISTS)) {
|
||||
dev->fd = open("/dev/udmabuf", O_RDWR);
|
||||
if (dev->fd < 0) {
|
||||
return false;
|
||||
}
|
||||
g_debug("Using experimental udmabuf backend");
|
||||
dev->alloc_bo = udmabuf_alloc_bo;
|
||||
dev->free_bo = udmabuf_free_bo;
|
||||
dev->get_fd = udmabuf_get_fd;
|
||||
dev->map_bo = udmabuf_map_bo;
|
||||
dev->unmap_bo = udmabuf_unmap_bo;
|
||||
dev->device_destroy = udmabuf_device_destroy;
|
||||
}
|
||||
#endif
|
||||
else {
|
||||
g_debug("Using mem fallback");
|
||||
dev->alloc_bo = mem_alloc_bo;
|
||||
dev->free_bo = mem_free_bo;
|
||||
dev->map_bo = mem_map_bo;
|
||||
dev->unmap_bo = mem_unmap_bo;
|
||||
dev->device_destroy = mem_device_destroy;
|
||||
return false;
|
||||
}
|
||||
|
||||
dev->inited = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
vugbm_buffer_map(struct vugbm_buffer *buf)
|
||||
{
|
||||
struct vugbm_device *dev = buf->dev;
|
||||
|
||||
return dev->map_bo(buf);
|
||||
}
|
||||
|
||||
static void
|
||||
vugbm_buffer_unmap(struct vugbm_buffer *buf)
|
||||
{
|
||||
struct vugbm_device *dev = buf->dev;
|
||||
|
||||
dev->unmap_bo(buf);
|
||||
}
|
||||
|
||||
bool
|
||||
vugbm_buffer_can_get_dmabuf_fd(struct vugbm_buffer *buffer)
|
||||
{
|
||||
if (!buffer->dev->get_fd) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
vugbm_buffer_get_dmabuf_fd(struct vugbm_buffer *buffer, int *fd)
|
||||
{
|
||||
if (!vugbm_buffer_can_get_dmabuf_fd(buffer) ||
|
||||
!buffer->dev->get_fd(buffer, fd)) {
|
||||
g_warning("Failed to get dmabuf");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (*fd < 0) {
|
||||
g_warning("error: dmabuf_fd < 0");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
vugbm_buffer_create(struct vugbm_buffer *buffer, struct vugbm_device *dev,
|
||||
uint32_t width, uint32_t height)
|
||||
{
|
||||
buffer->dev = dev;
|
||||
buffer->width = width;
|
||||
buffer->height = height;
|
||||
buffer->format = GBM_FORMAT_XRGB8888;
|
||||
buffer->stride = 0; /* modified during alloc */
|
||||
if (!dev->alloc_bo(buffer)) {
|
||||
g_warning("alloc_bo failed");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!vugbm_buffer_map(buffer)) {
|
||||
g_warning("map_bo failed");
|
||||
goto err;
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
err:
|
||||
dev->free_bo(buffer);
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
vugbm_buffer_destroy(struct vugbm_buffer *buffer)
|
||||
{
|
||||
struct vugbm_device *dev = buffer->dev;
|
||||
|
||||
vugbm_buffer_unmap(buffer);
|
||||
dev->free_bo(buffer);
|
||||
}
|
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
* Virtio vhost-user GPU Device
|
||||
*
|
||||
* GBM helpers
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
#ifndef VHOST_USER_GPU_GBM_H
|
||||
#define VHOST_USER_GPU_GBM_H
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
|
||||
#ifdef CONFIG_MEMFD
|
||||
#include <sys/mman.h>
|
||||
#include <sys/ioctl.h>
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_GBM
|
||||
#include <gbm.h>
|
||||
#endif
|
||||
|
||||
struct vugbm_buffer;
|
||||
|
||||
struct vugbm_device {
|
||||
bool inited;
|
||||
int fd;
|
||||
#ifdef CONFIG_GBM
|
||||
struct gbm_device *dev;
|
||||
#endif
|
||||
|
||||
bool (*alloc_bo)(struct vugbm_buffer *buf);
|
||||
void (*free_bo)(struct vugbm_buffer *buf);
|
||||
bool (*get_fd)(struct vugbm_buffer *buf, int *fd);
|
||||
bool (*map_bo)(struct vugbm_buffer *buf);
|
||||
void (*unmap_bo)(struct vugbm_buffer *buf);
|
||||
void (*device_destroy)(struct vugbm_device *dev);
|
||||
};
|
||||
|
||||
struct vugbm_buffer {
|
||||
struct vugbm_device *dev;
|
||||
|
||||
#ifdef CONFIG_MEMFD
|
||||
int memfd;
|
||||
#endif
|
||||
#ifdef CONFIG_GBM
|
||||
struct gbm_bo *bo;
|
||||
void *mmap_data;
|
||||
#endif
|
||||
|
||||
uint8_t *mmap;
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
uint32_t stride;
|
||||
uint32_t format;
|
||||
};
|
||||
|
||||
bool vugbm_device_init(struct vugbm_device *dev, int fd);
|
||||
void vugbm_device_destroy(struct vugbm_device *dev);
|
||||
|
||||
bool vugbm_buffer_create(struct vugbm_buffer *buffer, struct vugbm_device *dev,
|
||||
uint32_t width, uint32_t height);
|
||||
bool vugbm_buffer_can_get_dmabuf_fd(struct vugbm_buffer *buffer);
|
||||
bool vugbm_buffer_get_dmabuf_fd(struct vugbm_buffer *buffer, int *fd);
|
||||
void vugbm_buffer_destroy(struct vugbm_buffer *buffer);
|
||||
|
||||
#endif
|
|
@ -0,0 +1,177 @@
|
|||
/*
|
||||
* Virtio vhost-user GPU Device
|
||||
*
|
||||
* Copyright Red Hat, Inc. 2013-2018
|
||||
*
|
||||
* Authors:
|
||||
* Dave Airlie <airlied@redhat.com>
|
||||
* Gerd Hoffmann <kraxel@redhat.com>
|
||||
* Marc-André Lureau <marcandre.lureau@redhat.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
#ifndef VUGPU_H_
|
||||
#define VUGPU_H_
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
|
||||
#include "contrib/libvhost-user/libvhost-user-glib.h"
|
||||
#include "standard-headers/linux/virtio_gpu.h"
|
||||
|
||||
#include "qemu/queue.h"
|
||||
#include "qemu/iov.h"
|
||||
#include "qemu/bswap.h"
|
||||
#include "vugbm.h"
|
||||
|
||||
typedef enum VhostUserGpuRequest {
|
||||
VHOST_USER_GPU_NONE = 0,
|
||||
VHOST_USER_GPU_GET_PROTOCOL_FEATURES,
|
||||
VHOST_USER_GPU_SET_PROTOCOL_FEATURES,
|
||||
VHOST_USER_GPU_GET_DISPLAY_INFO,
|
||||
VHOST_USER_GPU_CURSOR_POS,
|
||||
VHOST_USER_GPU_CURSOR_POS_HIDE,
|
||||
VHOST_USER_GPU_CURSOR_UPDATE,
|
||||
VHOST_USER_GPU_SCANOUT,
|
||||
VHOST_USER_GPU_UPDATE,
|
||||
VHOST_USER_GPU_DMABUF_SCANOUT,
|
||||
VHOST_USER_GPU_DMABUF_UPDATE,
|
||||
} VhostUserGpuRequest;
|
||||
|
||||
typedef struct VhostUserGpuDisplayInfoReply {
|
||||
struct virtio_gpu_resp_display_info info;
|
||||
} VhostUserGpuDisplayInfoReply;
|
||||
|
||||
typedef struct VhostUserGpuCursorPos {
|
||||
uint32_t scanout_id;
|
||||
uint32_t x;
|
||||
uint32_t y;
|
||||
} QEMU_PACKED VhostUserGpuCursorPos;
|
||||
|
||||
typedef struct VhostUserGpuCursorUpdate {
|
||||
VhostUserGpuCursorPos pos;
|
||||
uint32_t hot_x;
|
||||
uint32_t hot_y;
|
||||
uint32_t data[64 * 64];
|
||||
} QEMU_PACKED VhostUserGpuCursorUpdate;
|
||||
|
||||
typedef struct VhostUserGpuScanout {
|
||||
uint32_t scanout_id;
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
} QEMU_PACKED VhostUserGpuScanout;
|
||||
|
||||
typedef struct VhostUserGpuUpdate {
|
||||
uint32_t scanout_id;
|
||||
uint32_t x;
|
||||
uint32_t y;
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
uint8_t data[];
|
||||
} QEMU_PACKED VhostUserGpuUpdate;
|
||||
|
||||
typedef struct VhostUserGpuDMABUFScanout {
|
||||
uint32_t scanout_id;
|
||||
uint32_t x;
|
||||
uint32_t y;
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
uint32_t fd_width;
|
||||
uint32_t fd_height;
|
||||
uint32_t fd_stride;
|
||||
uint32_t fd_flags;
|
||||
int fd_drm_fourcc;
|
||||
} QEMU_PACKED VhostUserGpuDMABUFScanout;
|
||||
|
||||
typedef struct VhostUserGpuMsg {
|
||||
uint32_t request; /* VhostUserGpuRequest */
|
||||
uint32_t flags;
|
||||
uint32_t size; /* the following payload size */
|
||||
union {
|
||||
VhostUserGpuCursorPos cursor_pos;
|
||||
VhostUserGpuCursorUpdate cursor_update;
|
||||
VhostUserGpuScanout scanout;
|
||||
VhostUserGpuUpdate update;
|
||||
VhostUserGpuDMABUFScanout dmabuf_scanout;
|
||||
struct virtio_gpu_resp_display_info display_info;
|
||||
uint64_t u64;
|
||||
} payload;
|
||||
} QEMU_PACKED VhostUserGpuMsg;
|
||||
|
||||
static VhostUserGpuMsg m __attribute__ ((unused));
|
||||
#define VHOST_USER_GPU_HDR_SIZE \
|
||||
(sizeof(m.request) + sizeof(m.flags) + sizeof(m.size))
|
||||
|
||||
#define VHOST_USER_GPU_MSG_FLAG_REPLY 0x4
|
||||
|
||||
struct virtio_gpu_scanout {
|
||||
uint32_t width, height;
|
||||
int x, y;
|
||||
int invalidate;
|
||||
uint32_t resource_id;
|
||||
};
|
||||
|
||||
typedef struct VuGpu {
|
||||
VugDev dev;
|
||||
struct virtio_gpu_config virtio_config;
|
||||
struct vugbm_device gdev;
|
||||
int sock_fd;
|
||||
int drm_rnode_fd;
|
||||
GSource *renderer_source;
|
||||
guint wait_ok;
|
||||
|
||||
bool virgl;
|
||||
bool virgl_inited;
|
||||
uint32_t inflight;
|
||||
|
||||
struct virtio_gpu_scanout scanout[VIRTIO_GPU_MAX_SCANOUTS];
|
||||
QTAILQ_HEAD(, virtio_gpu_simple_resource) reslist;
|
||||
QTAILQ_HEAD(, virtio_gpu_ctrl_command) fenceq;
|
||||
} VuGpu;
|
||||
|
||||
struct virtio_gpu_ctrl_command {
|
||||
VuVirtqElement elem;
|
||||
VuVirtq *vq;
|
||||
struct virtio_gpu_ctrl_hdr cmd_hdr;
|
||||
uint32_t error;
|
||||
bool finished;
|
||||
QTAILQ_ENTRY(virtio_gpu_ctrl_command) next;
|
||||
};
|
||||
|
||||
#define VUGPU_FILL_CMD(out) do { \
|
||||
size_t s; \
|
||||
s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 0, \
|
||||
&out, sizeof(out)); \
|
||||
if (s != sizeof(out)) { \
|
||||
g_critical("%s: command size incorrect %zu vs %zu", \
|
||||
__func__, s, sizeof(out)); \
|
||||
return; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
||||
void vg_ctrl_response(VuGpu *g,
|
||||
struct virtio_gpu_ctrl_command *cmd,
|
||||
struct virtio_gpu_ctrl_hdr *resp,
|
||||
size_t resp_len);
|
||||
|
||||
void vg_ctrl_response_nodata(VuGpu *g,
|
||||
struct virtio_gpu_ctrl_command *cmd,
|
||||
enum virtio_gpu_ctrl_type type);
|
||||
|
||||
int vg_create_mapping_iov(VuGpu *g,
|
||||
struct virtio_gpu_resource_attach_backing *ab,
|
||||
struct virtio_gpu_ctrl_command *cmd,
|
||||
struct iovec **iov);
|
||||
|
||||
void vg_get_display_info(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd);
|
||||
|
||||
void vg_wait_ok(VuGpu *g);
|
||||
|
||||
void vg_send_msg(VuGpu *g, const VhostUserGpuMsg *msg, int fd);
|
||||
|
||||
bool vg_recv_msg(VuGpu *g, uint32_t expect_req, uint32_t expect_size,
|
||||
gpointer payload);
|
||||
|
||||
|
||||
#endif
|
|
@ -16,3 +16,4 @@ Contents:
|
|||
live-block-operations
|
||||
pr-helper
|
||||
vhost-user
|
||||
vhost-user-gpu
|
||||
|
|
|
@ -0,0 +1,242 @@
|
|||
=======================
|
||||
Vhost-user-gpu Protocol
|
||||
=======================
|
||||
|
||||
:Licence: This work is licensed under the terms of the GNU GPL,
|
||||
version 2 or later. See the COPYING file in the top-level
|
||||
directory.
|
||||
|
||||
.. contents:: Table of Contents
|
||||
|
||||
Introduction
|
||||
============
|
||||
|
||||
The vhost-user-gpu protocol is aiming at sharing the rendering result
|
||||
of a virtio-gpu, done from a vhost-user slave process to a vhost-user
|
||||
master process (such as QEMU). It bears a resemblance to a display
|
||||
server protocol, if you consider QEMU as the display server and the
|
||||
slave as the client, but in a very limited way. Typically, it will
|
||||
work by setting a scanout/display configuration, before sending flush
|
||||
events for the display updates. It will also update the cursor shape
|
||||
and position.
|
||||
|
||||
The protocol is sent over a UNIX domain stream socket, since it uses
|
||||
socket ancillary data to share opened file descriptors (DMABUF fds or
|
||||
shared memory). The socket is usually obtained via
|
||||
``VHOST_USER_GPU_SET_SOCKET``.
|
||||
|
||||
Requests are sent by the *slave*, and the optional replies by the
|
||||
*master*.
|
||||
|
||||
Wire format
|
||||
===========
|
||||
|
||||
Unless specified differently, numbers are in the machine native byte
|
||||
order.
|
||||
|
||||
A vhost-user-gpu message (request and reply) consists of 3 header
|
||||
fields and a payload.
|
||||
|
||||
+---------+-------+------+---------+
|
||||
| request | flags | size | payload |
|
||||
+---------+-------+------+---------+
|
||||
|
||||
Header
|
||||
------
|
||||
|
||||
:request: ``u32``, type of the request
|
||||
|
||||
:flags: ``u32``, 32-bit bit field:
|
||||
|
||||
- Bit 2 is the reply flag - needs to be set on each reply
|
||||
|
||||
:size: ``u32``, size of the payload
|
||||
|
||||
Payload types
|
||||
-------------
|
||||
|
||||
Depending on the request type, **payload** can be:
|
||||
|
||||
VhostUserGpuCursorPos
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
+------------+---+---+
|
||||
| scanout-id | x | y |
|
||||
+------------+---+---+
|
||||
|
||||
:scanout-id: ``u32``, the scanout where the cursor is located
|
||||
|
||||
:x/y: ``u32``, the cursor postion
|
||||
|
||||
VhostUserGpuCursorUpdate
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
+-----+-------+-------+--------+
|
||||
| pos | hot_x | hot_y | cursor |
|
||||
+-----+-------+-------+--------+
|
||||
|
||||
:pos: a ``VhostUserGpuCursorPos``, the cursor location
|
||||
|
||||
:hot_x/hot_y: ``u32``, the cursor hot location
|
||||
|
||||
:cursor: ``[u32; 64 * 64]``, 64x64 RGBA cursor data (PIXMAN_a8r8g8b8 format)
|
||||
|
||||
VhostUserGpuScanout
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
+------------+---+---+
|
||||
| scanout-id | w | h |
|
||||
+------------+---+---+
|
||||
|
||||
:scanout-id: ``u32``, the scanout configuration to set
|
||||
|
||||
:w/h: ``u32``, the scanout width/height size
|
||||
|
||||
VhostUserGpuUpdate
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
+------------+---+---+---+---+------+
|
||||
| scanout-id | x | y | w | h | data |
|
||||
+------------+---+---+---+---+------+
|
||||
|
||||
:scanout-id: ``u32``, the scanout content to update
|
||||
|
||||
:x/y/w/h: ``u32``, region of the update
|
||||
|
||||
:data: RGB data (PIXMAN_x8r8g8b8 format)
|
||||
|
||||
VhostUserGpuDMABUFScanout
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
+------------+---+---+---+---+-----+-----+--------+-------+--------+
|
||||
| scanout-id | x | y | w | h | fdw | fwh | stride | flags | fourcc |
|
||||
+------------+---+---+---+---+-----+-----+--------+-------+--------+
|
||||
|
||||
:scanout-id: ``u32``, the scanout configuration to set
|
||||
|
||||
:x/y: ``u32``, the location of the scanout within the DMABUF
|
||||
|
||||
:w/h: ``u32``, the scanout width/height size
|
||||
|
||||
:fdw/fdh/stride/flags: ``u32``, the DMABUF width/height/stride/flags
|
||||
|
||||
:fourcc: ``i32``, the DMABUF fourcc
|
||||
|
||||
|
||||
C structure
|
||||
-----------
|
||||
|
||||
In QEMU the vhost-user-gpu message is implemented with the following struct:
|
||||
|
||||
.. code:: c
|
||||
|
||||
typedef struct VhostUserGpuMsg {
|
||||
uint32_t request; /* VhostUserGpuRequest */
|
||||
uint32_t flags;
|
||||
uint32_t size; /* the following payload size */
|
||||
union {
|
||||
VhostUserGpuCursorPos cursor_pos;
|
||||
VhostUserGpuCursorUpdate cursor_update;
|
||||
VhostUserGpuScanout scanout;
|
||||
VhostUserGpuUpdate update;
|
||||
VhostUserGpuDMABUFScanout dmabuf_scanout;
|
||||
struct virtio_gpu_resp_display_info display_info;
|
||||
uint64_t u64;
|
||||
} payload;
|
||||
} QEMU_PACKED VhostUserGpuMsg;
|
||||
|
||||
Protocol features
|
||||
-----------------
|
||||
|
||||
None yet.
|
||||
|
||||
As the protocol may need to evolve, new messages and communication
|
||||
changes are negotiated thanks to preliminary
|
||||
``VHOST_USER_GPU_GET_PROTOCOL_FEATURES`` and
|
||||
``VHOST_USER_GPU_SET_PROTOCOL_FEATURES`` requests.
|
||||
|
||||
Communication
|
||||
=============
|
||||
|
||||
Message types
|
||||
-------------
|
||||
|
||||
``VHOST_USER_GPU_GET_PROTOCOL_FEATURES``
|
||||
:id: 1
|
||||
:request payload: N/A
|
||||
:reply payload: ``u64``
|
||||
|
||||
Get the supported protocol features bitmask.
|
||||
|
||||
``VHOST_USER_GPU_SET_PROTOCOL_FEATURES``
|
||||
:id: 2
|
||||
:request payload: ``u64``
|
||||
:reply payload: N/A
|
||||
|
||||
Enable protocol features using a bitmask.
|
||||
|
||||
``VHOST_USER_GPU_GET_DISPLAY_INFO``
|
||||
:id: 3
|
||||
:request payload: N/A
|
||||
:reply payload: ``struct virtio_gpu_resp_display_info`` (from virtio specification)
|
||||
|
||||
Get the preferred display configuration.
|
||||
|
||||
``VHOST_USER_GPU_CURSOR_POS``
|
||||
:id: 4
|
||||
:request payload: ``VhostUserGpuCursorPos``
|
||||
:reply payload: N/A
|
||||
|
||||
Set/show the cursor position.
|
||||
|
||||
``VHOST_USER_GPU_CURSOR_POS_HIDE``
|
||||
:id: 5
|
||||
:request payload: ``VhostUserGpuCursorPos``
|
||||
:reply payload: N/A
|
||||
|
||||
Set/hide the cursor.
|
||||
|
||||
``VHOST_USER_GPU_CURSOR_UPDATE``
|
||||
:id: 6
|
||||
:request payload: ``VhostUserGpuCursorUpdate``
|
||||
:reply payload: N/A
|
||||
|
||||
Update the cursor shape and location.
|
||||
|
||||
``VHOST_USER_GPU_SCANOUT``
|
||||
:id: 7
|
||||
:request payload: ``VhostUserGpuScanout``
|
||||
:reply payload: N/A
|
||||
|
||||
Set the scanout resolution. To disable a scanout, the dimensions
|
||||
width/height are set to 0.
|
||||
|
||||
``VHOST_USER_GPU_UPDATE``
|
||||
:id: 8
|
||||
:request payload: ``VhostUserGpuUpdate``
|
||||
:reply payload: N/A
|
||||
|
||||
Update the scanout content. The data payload contains the graphical bits.
|
||||
The display should be flushed and presented.
|
||||
|
||||
``VHOST_USER_GPU_DMABUF_SCANOUT``
|
||||
:id: 9
|
||||
:request payload: ``VhostUserGpuDMABUFScanout``
|
||||
:reply payload: N/A
|
||||
|
||||
Set the scanout resolution/configuration, and share a DMABUF file
|
||||
descriptor for the scanout content, which is passed as ancillary
|
||||
data. To disable a scanout, the dimensions width/height are set
|
||||
to 0, there is no file descriptor passed.
|
||||
|
||||
``VHOST_USER_GPU_DMABUF_UPDATE``
|
||||
:id: 10
|
||||
:request payload: ``VhostUserGpuUpdate``
|
||||
:reply payload: empty payload
|
||||
|
||||
The display should be flushed and presented according to updated
|
||||
region from ``VhostUserGpuUpdate``.
|
||||
|
||||
Note: there is no data payload, since the scanout is shared thanks
|
||||
to DMABUF, that must have been set previously with
|
||||
``VHOST_USER_GPU_DMABUF_SCANOUT``.
|
|
@ -1163,6 +1163,15 @@ Master message types
|
|||
send the shared inflight buffer back to slave so that slave could
|
||||
get inflight I/O after a crash or restart.
|
||||
|
||||
``VHOST_USER_GPU_SET_SOCKET``
|
||||
:id: 33
|
||||
:equivalent ioctl: N/A
|
||||
:master payload: N/A
|
||||
|
||||
Sets the GPU protocol socket file descriptor, which is passed as
|
||||
ancillary data. The GPU protocol is used to inform the master of
|
||||
rendering state and updates. See vhost-user-gpu.rst for details.
|
||||
|
||||
Slave message types
|
||||
-------------------
|
||||
|
||||
|
|
|
@ -111,6 +111,16 @@ config VIRTIO_VGA
|
|||
depends on VIRTIO_PCI
|
||||
select VGA
|
||||
|
||||
config VHOST_USER_GPU
|
||||
bool
|
||||
default y
|
||||
depends on VIRTIO_GPU && VHOST_USER
|
||||
|
||||
config VHOST_USER_VGA
|
||||
bool
|
||||
default y
|
||||
depends on VIRTIO_VGA && VHOST_USER_GPU
|
||||
|
||||
config DPCD
|
||||
bool
|
||||
select AUX
|
||||
|
|
|
@ -43,9 +43,12 @@ obj-$(CONFIG_VGA) += vga.o
|
|||
|
||||
common-obj-$(CONFIG_QXL) += qxl.o qxl-logger.o qxl-render.o
|
||||
|
||||
obj-$(CONFIG_VIRTIO_GPU) += virtio-gpu.o virtio-gpu-3d.o
|
||||
obj-$(CONFIG_VIRTIO_GPU) += virtio-gpu-base.o virtio-gpu.o virtio-gpu-3d.o
|
||||
obj-$(CONFIG_VHOST_USER_GPU) += vhost-user-gpu.o
|
||||
obj-$(call land,$(CONFIG_VIRTIO_GPU),$(CONFIG_VIRTIO_PCI)) += virtio-gpu-pci.o
|
||||
obj-$(call land,$(CONFIG_VHOST_USER_GPU),$(CONFIG_VIRTIO_PCI)) += vhost-user-gpu-pci.o
|
||||
obj-$(CONFIG_VIRTIO_VGA) += virtio-vga.o
|
||||
obj-$(CONFIG_VHOST_USER_VGA) += vhost-user-vga.o
|
||||
virtio-gpu.o-cflags := $(VIRGL_CFLAGS)
|
||||
virtio-gpu.o-libs += $(VIRGL_LIBS)
|
||||
virtio-gpu-3d.o-cflags := $(VIRGL_CFLAGS)
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
/*
|
||||
* vhost-user GPU PCI device
|
||||
*
|
||||
* Copyright Red Hat, Inc. 2018
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qapi/error.h"
|
||||
#include "hw/virtio/virtio-gpu-pci.h"
|
||||
|
||||
#define TYPE_VHOST_USER_GPU_PCI "vhost-user-gpu-pci"
|
||||
#define VHOST_USER_GPU_PCI(obj) \
|
||||
OBJECT_CHECK(VhostUserGPUPCI, (obj), TYPE_VHOST_USER_GPU_PCI)
|
||||
|
||||
typedef struct VhostUserGPUPCI {
|
||||
VirtIOGPUPCIBase parent_obj;
|
||||
|
||||
VhostUserGPU vdev;
|
||||
} VhostUserGPUPCI;
|
||||
|
||||
static void vhost_user_gpu_pci_initfn(Object *obj)
|
||||
{
|
||||
VhostUserGPUPCI *dev = VHOST_USER_GPU_PCI(obj);
|
||||
|
||||
virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
|
||||
TYPE_VHOST_USER_GPU);
|
||||
|
||||
VIRTIO_GPU_PCI_BASE(obj)->vgpu = VIRTIO_GPU_BASE(&dev->vdev);
|
||||
|
||||
object_property_add_alias(obj, "chardev",
|
||||
OBJECT(&dev->vdev), "chardev",
|
||||
&error_abort);
|
||||
}
|
||||
|
||||
static const VirtioPCIDeviceTypeInfo vhost_user_gpu_pci_info = {
|
||||
.generic_name = TYPE_VHOST_USER_GPU_PCI,
|
||||
.parent = TYPE_VIRTIO_GPU_PCI_BASE,
|
||||
.instance_size = sizeof(VhostUserGPUPCI),
|
||||
.instance_init = vhost_user_gpu_pci_initfn,
|
||||
};
|
||||
|
||||
static void vhost_user_gpu_pci_register_types(void)
|
||||
{
|
||||
virtio_pci_types_register(&vhost_user_gpu_pci_info);
|
||||
}
|
||||
|
||||
type_init(vhost_user_gpu_pci_register_types)
|
|
@ -0,0 +1,607 @@
|
|||
/*
|
||||
* vhost-user GPU Device
|
||||
*
|
||||
* Copyright Red Hat, Inc. 2018
|
||||
*
|
||||
* Authors:
|
||||
* Marc-André Lureau <marcandre.lureau@redhat.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "hw/virtio/virtio-gpu.h"
|
||||
#include "chardev/char-fe.h"
|
||||
#include "qapi/error.h"
|
||||
#include "migration/blocker.h"
|
||||
|
||||
#define VHOST_USER_GPU(obj) \
|
||||
OBJECT_CHECK(VhostUserGPU, (obj), TYPE_VHOST_USER_GPU)
|
||||
|
||||
typedef enum VhostUserGpuRequest {
|
||||
VHOST_USER_GPU_NONE = 0,
|
||||
VHOST_USER_GPU_GET_PROTOCOL_FEATURES,
|
||||
VHOST_USER_GPU_SET_PROTOCOL_FEATURES,
|
||||
VHOST_USER_GPU_GET_DISPLAY_INFO,
|
||||
VHOST_USER_GPU_CURSOR_POS,
|
||||
VHOST_USER_GPU_CURSOR_POS_HIDE,
|
||||
VHOST_USER_GPU_CURSOR_UPDATE,
|
||||
VHOST_USER_GPU_SCANOUT,
|
||||
VHOST_USER_GPU_UPDATE,
|
||||
VHOST_USER_GPU_DMABUF_SCANOUT,
|
||||
VHOST_USER_GPU_DMABUF_UPDATE,
|
||||
} VhostUserGpuRequest;
|
||||
|
||||
typedef struct VhostUserGpuDisplayInfoReply {
|
||||
struct virtio_gpu_resp_display_info info;
|
||||
} VhostUserGpuDisplayInfoReply;
|
||||
|
||||
typedef struct VhostUserGpuCursorPos {
|
||||
uint32_t scanout_id;
|
||||
uint32_t x;
|
||||
uint32_t y;
|
||||
} QEMU_PACKED VhostUserGpuCursorPos;
|
||||
|
||||
typedef struct VhostUserGpuCursorUpdate {
|
||||
VhostUserGpuCursorPos pos;
|
||||
uint32_t hot_x;
|
||||
uint32_t hot_y;
|
||||
uint32_t data[64 * 64];
|
||||
} QEMU_PACKED VhostUserGpuCursorUpdate;
|
||||
|
||||
typedef struct VhostUserGpuScanout {
|
||||
uint32_t scanout_id;
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
} QEMU_PACKED VhostUserGpuScanout;
|
||||
|
||||
typedef struct VhostUserGpuUpdate {
|
||||
uint32_t scanout_id;
|
||||
uint32_t x;
|
||||
uint32_t y;
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
uint8_t data[];
|
||||
} QEMU_PACKED VhostUserGpuUpdate;
|
||||
|
||||
typedef struct VhostUserGpuDMABUFScanout {
|
||||
uint32_t scanout_id;
|
||||
uint32_t x;
|
||||
uint32_t y;
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
uint32_t fd_width;
|
||||
uint32_t fd_height;
|
||||
uint32_t fd_stride;
|
||||
uint32_t fd_flags;
|
||||
int fd_drm_fourcc;
|
||||
} QEMU_PACKED VhostUserGpuDMABUFScanout;
|
||||
|
||||
typedef struct VhostUserGpuMsg {
|
||||
uint32_t request; /* VhostUserGpuRequest */
|
||||
uint32_t flags;
|
||||
uint32_t size; /* the following payload size */
|
||||
union {
|
||||
VhostUserGpuCursorPos cursor_pos;
|
||||
VhostUserGpuCursorUpdate cursor_update;
|
||||
VhostUserGpuScanout scanout;
|
||||
VhostUserGpuUpdate update;
|
||||
VhostUserGpuDMABUFScanout dmabuf_scanout;
|
||||
struct virtio_gpu_resp_display_info display_info;
|
||||
uint64_t u64;
|
||||
} payload;
|
||||
} QEMU_PACKED VhostUserGpuMsg;
|
||||
|
||||
static VhostUserGpuMsg m __attribute__ ((unused));
|
||||
#define VHOST_USER_GPU_HDR_SIZE \
|
||||
(sizeof(m.request) + sizeof(m.size) + sizeof(m.flags))
|
||||
|
||||
#define VHOST_USER_GPU_MSG_FLAG_REPLY 0x4
|
||||
|
||||
static void vhost_user_gpu_update_blocked(VhostUserGPU *g, bool blocked);
|
||||
|
||||
static void
|
||||
vhost_user_gpu_handle_cursor(VhostUserGPU *g, VhostUserGpuMsg *msg)
|
||||
{
|
||||
VhostUserGpuCursorPos *pos = &msg->payload.cursor_pos;
|
||||
struct virtio_gpu_scanout *s;
|
||||
|
||||
if (pos->scanout_id >= g->parent_obj.conf.max_outputs) {
|
||||
return;
|
||||
}
|
||||
s = &g->parent_obj.scanout[pos->scanout_id];
|
||||
|
||||
if (msg->request == VHOST_USER_GPU_CURSOR_UPDATE) {
|
||||
VhostUserGpuCursorUpdate *up = &msg->payload.cursor_update;
|
||||
if (!s->current_cursor) {
|
||||
s->current_cursor = cursor_alloc(64, 64);
|
||||
}
|
||||
|
||||
s->current_cursor->hot_x = up->hot_x;
|
||||
s->current_cursor->hot_y = up->hot_y;
|
||||
|
||||
memcpy(s->current_cursor->data, up->data,
|
||||
64 * 64 * sizeof(uint32_t));
|
||||
|
||||
dpy_cursor_define(s->con, s->current_cursor);
|
||||
}
|
||||
|
||||
dpy_mouse_set(s->con, pos->x, pos->y,
|
||||
msg->request != VHOST_USER_GPU_CURSOR_POS_HIDE);
|
||||
}
|
||||
|
||||
static void
|
||||
vhost_user_gpu_send_msg(VhostUserGPU *g, const VhostUserGpuMsg *msg)
|
||||
{
|
||||
qemu_chr_fe_write(&g->vhost_chr, (uint8_t *)msg,
|
||||
VHOST_USER_GPU_HDR_SIZE + msg->size);
|
||||
}
|
||||
|
||||
static void
|
||||
vhost_user_gpu_unblock(VhostUserGPU *g)
|
||||
{
|
||||
VhostUserGpuMsg msg = {
|
||||
.request = VHOST_USER_GPU_DMABUF_UPDATE,
|
||||
.flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
|
||||
};
|
||||
|
||||
vhost_user_gpu_send_msg(g, &msg);
|
||||
}
|
||||
|
||||
static void
|
||||
vhost_user_gpu_handle_display(VhostUserGPU *g, VhostUserGpuMsg *msg)
|
||||
{
|
||||
QemuConsole *con = NULL;
|
||||
struct virtio_gpu_scanout *s;
|
||||
|
||||
switch (msg->request) {
|
||||
case VHOST_USER_GPU_GET_PROTOCOL_FEATURES: {
|
||||
VhostUserGpuMsg reply = {
|
||||
.request = msg->request,
|
||||
.flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
|
||||
.size = sizeof(uint64_t),
|
||||
};
|
||||
|
||||
vhost_user_gpu_send_msg(g, &reply);
|
||||
break;
|
||||
}
|
||||
case VHOST_USER_GPU_SET_PROTOCOL_FEATURES: {
|
||||
break;
|
||||
}
|
||||
case VHOST_USER_GPU_GET_DISPLAY_INFO: {
|
||||
struct virtio_gpu_resp_display_info display_info = { {} };
|
||||
VhostUserGpuMsg reply = {
|
||||
.request = msg->request,
|
||||
.flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
|
||||
.size = sizeof(struct virtio_gpu_resp_display_info),
|
||||
};
|
||||
|
||||
display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
|
||||
virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info);
|
||||
memcpy(&reply.payload.display_info, &display_info,
|
||||
sizeof(display_info));
|
||||
vhost_user_gpu_send_msg(g, &reply);
|
||||
break;
|
||||
}
|
||||
case VHOST_USER_GPU_SCANOUT: {
|
||||
VhostUserGpuScanout *m = &msg->payload.scanout;
|
||||
|
||||
if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
|
||||
return;
|
||||
}
|
||||
|
||||
g->parent_obj.enable = 1;
|
||||
s = &g->parent_obj.scanout[m->scanout_id];
|
||||
con = s->con;
|
||||
|
||||
if (m->scanout_id == 0 && m->width == 0) {
|
||||
s->ds = qemu_create_message_surface(640, 480,
|
||||
"Guest disabled display.");
|
||||
dpy_gfx_replace_surface(con, s->ds);
|
||||
} else {
|
||||
s->ds = qemu_create_displaysurface(m->width, m->height);
|
||||
/* replace surface on next update */
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
case VHOST_USER_GPU_DMABUF_SCANOUT: {
|
||||
VhostUserGpuDMABUFScanout *m = &msg->payload.dmabuf_scanout;
|
||||
int fd = qemu_chr_fe_get_msgfd(&g->vhost_chr);
|
||||
QemuDmaBuf *dmabuf;
|
||||
|
||||
if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
|
||||
error_report("invalid scanout: %d", m->scanout_id);
|
||||
if (fd >= 0) {
|
||||
close(fd);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
g->parent_obj.enable = 1;
|
||||
con = g->parent_obj.scanout[m->scanout_id].con;
|
||||
dmabuf = &g->dmabuf[m->scanout_id];
|
||||
if (dmabuf->fd >= 0) {
|
||||
close(dmabuf->fd);
|
||||
dmabuf->fd = -1;
|
||||
}
|
||||
if (!console_has_gl_dmabuf(con)) {
|
||||
/* it would be nice to report that error earlier */
|
||||
error_report("console doesn't support dmabuf!");
|
||||
break;
|
||||
}
|
||||
dpy_gl_release_dmabuf(con, dmabuf);
|
||||
if (fd == -1) {
|
||||
dpy_gl_scanout_disable(con);
|
||||
break;
|
||||
}
|
||||
*dmabuf = (QemuDmaBuf) {
|
||||
.fd = fd,
|
||||
.width = m->fd_width,
|
||||
.height = m->fd_height,
|
||||
.stride = m->fd_stride,
|
||||
.fourcc = m->fd_drm_fourcc,
|
||||
.y0_top = m->fd_flags & VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP,
|
||||
};
|
||||
dpy_gl_scanout_dmabuf(con, dmabuf);
|
||||
break;
|
||||
}
|
||||
case VHOST_USER_GPU_DMABUF_UPDATE: {
|
||||
VhostUserGpuUpdate *m = &msg->payload.update;
|
||||
|
||||
if (m->scanout_id >= g->parent_obj.conf.max_outputs ||
|
||||
!g->parent_obj.scanout[m->scanout_id].con) {
|
||||
error_report("invalid scanout update: %d", m->scanout_id);
|
||||
vhost_user_gpu_unblock(g);
|
||||
break;
|
||||
}
|
||||
|
||||
con = g->parent_obj.scanout[m->scanout_id].con;
|
||||
if (!console_has_gl(con)) {
|
||||
error_report("console doesn't support GL!");
|
||||
vhost_user_gpu_unblock(g);
|
||||
break;
|
||||
}
|
||||
dpy_gl_update(con, m->x, m->y, m->width, m->height);
|
||||
g->backend_blocked = true;
|
||||
break;
|
||||
}
|
||||
case VHOST_USER_GPU_UPDATE: {
|
||||
VhostUserGpuUpdate *m = &msg->payload.update;
|
||||
|
||||
if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
|
||||
break;
|
||||
}
|
||||
s = &g->parent_obj.scanout[m->scanout_id];
|
||||
con = s->con;
|
||||
pixman_image_t *image =
|
||||
pixman_image_create_bits(PIXMAN_x8r8g8b8,
|
||||
m->width,
|
||||
m->height,
|
||||
(uint32_t *)m->data,
|
||||
m->width * 4);
|
||||
|
||||
pixman_image_composite(PIXMAN_OP_SRC,
|
||||
image, NULL, s->ds->image,
|
||||
0, 0, 0, 0, m->x, m->y, m->width, m->height);
|
||||
|
||||
pixman_image_unref(image);
|
||||
if (qemu_console_surface(con) != s->ds) {
|
||||
dpy_gfx_replace_surface(con, s->ds);
|
||||
} else {
|
||||
dpy_gfx_update(con, m->x, m->y, m->width, m->height);
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
g_warning("unhandled message %d %d", msg->request, msg->size);
|
||||
}
|
||||
|
||||
if (con && qemu_console_is_gl_blocked(con)) {
|
||||
vhost_user_gpu_update_blocked(g, true);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
vhost_user_gpu_chr_read(void *opaque)
|
||||
{
|
||||
VhostUserGPU *g = opaque;
|
||||
VhostUserGpuMsg *msg = NULL;
|
||||
VhostUserGpuRequest request;
|
||||
uint32_t size, flags;
|
||||
int r;
|
||||
|
||||
r = qemu_chr_fe_read_all(&g->vhost_chr,
|
||||
(uint8_t *)&request, sizeof(uint32_t));
|
||||
if (r != sizeof(uint32_t)) {
|
||||
error_report("failed to read msg header: %d, %d", r, errno);
|
||||
goto end;
|
||||
}
|
||||
|
||||
r = qemu_chr_fe_read_all(&g->vhost_chr,
|
||||
(uint8_t *)&flags, sizeof(uint32_t));
|
||||
if (r != sizeof(uint32_t)) {
|
||||
error_report("failed to read msg flags");
|
||||
goto end;
|
||||
}
|
||||
|
||||
r = qemu_chr_fe_read_all(&g->vhost_chr,
|
||||
(uint8_t *)&size, sizeof(uint32_t));
|
||||
if (r != sizeof(uint32_t)) {
|
||||
error_report("failed to read msg size");
|
||||
goto end;
|
||||
}
|
||||
|
||||
msg = g_malloc(VHOST_USER_GPU_HDR_SIZE + size);
|
||||
g_return_if_fail(msg != NULL);
|
||||
|
||||
r = qemu_chr_fe_read_all(&g->vhost_chr,
|
||||
(uint8_t *)&msg->payload, size);
|
||||
if (r != size) {
|
||||
error_report("failed to read msg payload %d != %d", r, size);
|
||||
goto end;
|
||||
}
|
||||
|
||||
msg->request = request;
|
||||
msg->flags = size;
|
||||
msg->size = size;
|
||||
|
||||
if (request == VHOST_USER_GPU_CURSOR_UPDATE ||
|
||||
request == VHOST_USER_GPU_CURSOR_POS ||
|
||||
request == VHOST_USER_GPU_CURSOR_POS_HIDE) {
|
||||
vhost_user_gpu_handle_cursor(g, msg);
|
||||
} else {
|
||||
vhost_user_gpu_handle_display(g, msg);
|
||||
}
|
||||
|
||||
end:
|
||||
g_free(msg);
|
||||
}
|
||||
|
||||
static void
|
||||
vhost_user_gpu_update_blocked(VhostUserGPU *g, bool blocked)
|
||||
{
|
||||
qemu_set_fd_handler(g->vhost_gpu_fd,
|
||||
blocked ? NULL : vhost_user_gpu_chr_read, NULL, g);
|
||||
}
|
||||
|
||||
static void
|
||||
vhost_user_gpu_gl_unblock(VirtIOGPUBase *b)
|
||||
{
|
||||
VhostUserGPU *g = VHOST_USER_GPU(b);
|
||||
|
||||
if (g->backend_blocked) {
|
||||
vhost_user_gpu_unblock(VHOST_USER_GPU(g));
|
||||
g->backend_blocked = false;
|
||||
}
|
||||
|
||||
vhost_user_gpu_update_blocked(VHOST_USER_GPU(g), false);
|
||||
}
|
||||
|
||||
static bool
|
||||
vhost_user_gpu_do_set_socket(VhostUserGPU *g, Error **errp)
|
||||
{
|
||||
Chardev *chr;
|
||||
int sv[2];
|
||||
|
||||
if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
|
||||
error_setg_errno(errp, errno, "socketpair() failed");
|
||||
return false;
|
||||
}
|
||||
|
||||
chr = CHARDEV(object_new(TYPE_CHARDEV_SOCKET));
|
||||
if (!chr || qemu_chr_add_client(chr, sv[0]) == -1) {
|
||||
error_setg(errp, "Failed to make socket chardev");
|
||||
goto err;
|
||||
}
|
||||
if (!qemu_chr_fe_init(&g->vhost_chr, chr, errp)) {
|
||||
goto err;
|
||||
}
|
||||
if (vhost_user_gpu_set_socket(&g->vhost->dev, sv[1]) < 0) {
|
||||
error_setg(errp, "Failed to set vhost-user-gpu socket");
|
||||
qemu_chr_fe_deinit(&g->vhost_chr, false);
|
||||
goto err;
|
||||
}
|
||||
|
||||
g->vhost_gpu_fd = sv[0];
|
||||
vhost_user_gpu_update_blocked(g, false);
|
||||
close(sv[1]);
|
||||
return true;
|
||||
|
||||
err:
|
||||
close(sv[0]);
|
||||
close(sv[1]);
|
||||
if (chr) {
|
||||
object_unref(OBJECT(chr));
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void
|
||||
vhost_user_gpu_get_config(VirtIODevice *vdev, uint8_t *config_data)
|
||||
{
|
||||
VhostUserGPU *g = VHOST_USER_GPU(vdev);
|
||||
VirtIOGPUBase *b = VIRTIO_GPU_BASE(vdev);
|
||||
struct virtio_gpu_config *vgconfig =
|
||||
(struct virtio_gpu_config *)config_data;
|
||||
int ret;
|
||||
|
||||
memset(config_data, 0, sizeof(struct virtio_gpu_config));
|
||||
|
||||
ret = vhost_dev_get_config(&g->vhost->dev,
|
||||
config_data, sizeof(struct virtio_gpu_config));
|
||||
if (ret) {
|
||||
error_report("vhost-user-gpu: get device config space failed");
|
||||
return;
|
||||
}
|
||||
|
||||
/* those fields are managed by qemu */
|
||||
vgconfig->num_scanouts = b->virtio_config.num_scanouts;
|
||||
vgconfig->events_read = b->virtio_config.events_read;
|
||||
vgconfig->events_clear = b->virtio_config.events_clear;
|
||||
}
|
||||
|
||||
static void
|
||||
vhost_user_gpu_set_config(VirtIODevice *vdev,
|
||||
const uint8_t *config_data)
|
||||
{
|
||||
VhostUserGPU *g = VHOST_USER_GPU(vdev);
|
||||
VirtIOGPUBase *b = VIRTIO_GPU_BASE(vdev);
|
||||
const struct virtio_gpu_config *vgconfig =
|
||||
(const struct virtio_gpu_config *)config_data;
|
||||
int ret;
|
||||
|
||||
if (vgconfig->events_clear) {
|
||||
b->virtio_config.events_read &= ~vgconfig->events_clear;
|
||||
}
|
||||
|
||||
ret = vhost_dev_set_config(&g->vhost->dev, config_data,
|
||||
0, sizeof(struct virtio_gpu_config),
|
||||
VHOST_SET_CONFIG_TYPE_MASTER);
|
||||
if (ret) {
|
||||
error_report("vhost-user-gpu: set device config space failed");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
vhost_user_gpu_set_status(VirtIODevice *vdev, uint8_t val)
|
||||
{
|
||||
VhostUserGPU *g = VHOST_USER_GPU(vdev);
|
||||
Error *err = NULL;
|
||||
|
||||
if (val & VIRTIO_CONFIG_S_DRIVER_OK && vdev->vm_running) {
|
||||
if (!vhost_user_gpu_do_set_socket(g, &err)) {
|
||||
error_report_err(err);
|
||||
return;
|
||||
}
|
||||
vhost_user_backend_start(g->vhost);
|
||||
} else {
|
||||
/* unblock any wait and stop processing */
|
||||
if (g->vhost_gpu_fd != -1) {
|
||||
vhost_user_gpu_update_blocked(g, true);
|
||||
qemu_chr_fe_deinit(&g->vhost_chr, true);
|
||||
g->vhost_gpu_fd = -1;
|
||||
}
|
||||
vhost_user_backend_stop(g->vhost);
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
vhost_user_gpu_guest_notifier_pending(VirtIODevice *vdev, int idx)
|
||||
{
|
||||
VhostUserGPU *g = VHOST_USER_GPU(vdev);
|
||||
|
||||
return vhost_virtqueue_pending(&g->vhost->dev, idx);
|
||||
}
|
||||
|
||||
static void
|
||||
vhost_user_gpu_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
|
||||
{
|
||||
VhostUserGPU *g = VHOST_USER_GPU(vdev);
|
||||
|
||||
vhost_virtqueue_mask(&g->vhost->dev, vdev, idx, mask);
|
||||
}
|
||||
|
||||
static void
|
||||
vhost_user_gpu_instance_init(Object *obj)
|
||||
{
|
||||
VhostUserGPU *g = VHOST_USER_GPU(obj);
|
||||
|
||||
g->vhost = VHOST_USER_BACKEND(object_new(TYPE_VHOST_USER_BACKEND));
|
||||
object_property_add_alias(obj, "chardev",
|
||||
OBJECT(g->vhost), "chardev", &error_abort);
|
||||
}
|
||||
|
||||
static void
|
||||
vhost_user_gpu_instance_finalize(Object *obj)
|
||||
{
|
||||
VhostUserGPU *g = VHOST_USER_GPU(obj);
|
||||
|
||||
object_unref(OBJECT(g->vhost));
|
||||
}
|
||||
|
||||
static void
|
||||
vhost_user_gpu_reset(VirtIODevice *vdev)
|
||||
{
|
||||
VhostUserGPU *g = VHOST_USER_GPU(vdev);
|
||||
|
||||
virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
|
||||
|
||||
vhost_user_backend_stop(g->vhost);
|
||||
}
|
||||
|
||||
static int
|
||||
vhost_user_gpu_config_change(struct vhost_dev *dev)
|
||||
{
|
||||
error_report("vhost-user-gpu: unhandled backend config change");
|
||||
return -1;
|
||||
}
|
||||
|
||||
static const VhostDevConfigOps config_ops = {
|
||||
.vhost_dev_config_notifier = vhost_user_gpu_config_change,
|
||||
};
|
||||
|
||||
static void
|
||||
vhost_user_gpu_device_realize(DeviceState *qdev, Error **errp)
|
||||
{
|
||||
VhostUserGPU *g = VHOST_USER_GPU(qdev);
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(g);
|
||||
|
||||
vhost_dev_set_config_notifier(&g->vhost->dev, &config_ops);
|
||||
if (vhost_user_backend_dev_init(g->vhost, vdev, 2, errp) < 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (virtio_has_feature(g->vhost->dev.features, VIRTIO_GPU_F_VIRGL)) {
|
||||
g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED;
|
||||
}
|
||||
|
||||
if (!virtio_gpu_base_device_realize(qdev, NULL, NULL, errp)) {
|
||||
return;
|
||||
}
|
||||
|
||||
g->vhost_gpu_fd = -1;
|
||||
}
|
||||
|
||||
static Property vhost_user_gpu_properties[] = {
|
||||
VIRTIO_GPU_BASE_PROPERTIES(VhostUserGPU, parent_obj.conf),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
static void
|
||||
vhost_user_gpu_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
|
||||
VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_CLASS(klass);
|
||||
|
||||
vgc->gl_unblock = vhost_user_gpu_gl_unblock;
|
||||
|
||||
vdc->realize = vhost_user_gpu_device_realize;
|
||||
vdc->reset = vhost_user_gpu_reset;
|
||||
vdc->set_status = vhost_user_gpu_set_status;
|
||||
vdc->guest_notifier_mask = vhost_user_gpu_guest_notifier_mask;
|
||||
vdc->guest_notifier_pending = vhost_user_gpu_guest_notifier_pending;
|
||||
vdc->get_config = vhost_user_gpu_get_config;
|
||||
vdc->set_config = vhost_user_gpu_set_config;
|
||||
|
||||
dc->props = vhost_user_gpu_properties;
|
||||
}
|
||||
|
||||
static const TypeInfo vhost_user_gpu_info = {
|
||||
.name = TYPE_VHOST_USER_GPU,
|
||||
.parent = TYPE_VIRTIO_GPU_BASE,
|
||||
.instance_size = sizeof(VhostUserGPU),
|
||||
.instance_init = vhost_user_gpu_instance_init,
|
||||
.instance_finalize = vhost_user_gpu_instance_finalize,
|
||||
.class_init = vhost_user_gpu_class_init,
|
||||
};
|
||||
|
||||
static void vhost_user_gpu_register_types(void)
|
||||
{
|
||||
type_register_static(&vhost_user_gpu_info);
|
||||
}
|
||||
|
||||
type_init(vhost_user_gpu_register_types)
|
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
* vhost-user VGA device
|
||||
*
|
||||
* Copyright Red Hat, Inc. 2018
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qapi/error.h"
|
||||
#include "virtio-vga.h"
|
||||
|
||||
#define TYPE_VHOST_USER_VGA "vhost-user-vga"
|
||||
|
||||
#define VHOST_USER_VGA(obj) \
|
||||
OBJECT_CHECK(VhostUserVGA, (obj), TYPE_VHOST_USER_VGA)
|
||||
|
||||
typedef struct VhostUserVGA {
|
||||
VirtIOVGABase parent_obj;
|
||||
|
||||
VhostUserGPU vdev;
|
||||
} VhostUserVGA;
|
||||
|
||||
static void vhost_user_vga_inst_initfn(Object *obj)
|
||||
{
|
||||
VhostUserVGA *dev = VHOST_USER_VGA(obj);
|
||||
|
||||
virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
|
||||
TYPE_VHOST_USER_GPU);
|
||||
|
||||
VIRTIO_VGA_BASE(dev)->vgpu = VIRTIO_GPU_BASE(&dev->vdev);
|
||||
|
||||
object_property_add_alias(obj, "chardev",
|
||||
OBJECT(&dev->vdev), "chardev",
|
||||
&error_abort);
|
||||
}
|
||||
|
||||
static const VirtioPCIDeviceTypeInfo vhost_user_vga_info = {
|
||||
.generic_name = TYPE_VHOST_USER_VGA,
|
||||
.parent = TYPE_VIRTIO_VGA_BASE,
|
||||
.instance_size = sizeof(struct VhostUserVGA),
|
||||
.instance_init = vhost_user_vga_inst_initfn,
|
||||
};
|
||||
|
||||
static void vhost_user_vga_register_types(void)
|
||||
{
|
||||
virtio_pci_types_register(&vhost_user_vga_info);
|
||||
}
|
||||
|
||||
type_init(vhost_user_vga_register_types)
|
|
@ -118,11 +118,11 @@ static void virgl_cmd_context_destroy(VirtIOGPU *g,
|
|||
static void virtio_gpu_rect_update(VirtIOGPU *g, int idx, int x, int y,
|
||||
int width, int height)
|
||||
{
|
||||
if (!g->scanout[idx].con) {
|
||||
if (!g->parent_obj.scanout[idx].con) {
|
||||
return;
|
||||
}
|
||||
|
||||
dpy_gl_update(g->scanout[idx].con, x, y, width, height);
|
||||
dpy_gl_update(g->parent_obj.scanout[idx].con, x, y, width, height);
|
||||
}
|
||||
|
||||
static void virgl_cmd_resource_flush(VirtIOGPU *g,
|
||||
|
@ -135,8 +135,8 @@ static void virgl_cmd_resource_flush(VirtIOGPU *g,
|
|||
trace_virtio_gpu_cmd_res_flush(rf.resource_id,
|
||||
rf.r.width, rf.r.height, rf.r.x, rf.r.y);
|
||||
|
||||
for (i = 0; i < g->conf.max_outputs; i++) {
|
||||
if (g->scanout[i].resource_id != rf.resource_id) {
|
||||
for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
|
||||
if (g->parent_obj.scanout[i].resource_id != rf.resource_id) {
|
||||
continue;
|
||||
}
|
||||
virtio_gpu_rect_update(g, i, rf.r.x, rf.r.y, rf.r.width, rf.r.height);
|
||||
|
@ -154,13 +154,13 @@ static void virgl_cmd_set_scanout(VirtIOGPU *g,
|
|||
trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
|
||||
ss.r.width, ss.r.height, ss.r.x, ss.r.y);
|
||||
|
||||
if (ss.scanout_id >= g->conf.max_outputs) {
|
||||
if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
|
||||
qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
|
||||
__func__, ss.scanout_id);
|
||||
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
|
||||
return;
|
||||
}
|
||||
g->enable = 1;
|
||||
g->parent_obj.enable = 1;
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
|
||||
|
@ -173,20 +173,22 @@ static void virgl_cmd_set_scanout(VirtIOGPU *g,
|
|||
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
|
||||
return;
|
||||
}
|
||||
qemu_console_resize(g->scanout[ss.scanout_id].con,
|
||||
qemu_console_resize(g->parent_obj.scanout[ss.scanout_id].con,
|
||||
ss.r.width, ss.r.height);
|
||||
virgl_renderer_force_ctx_0();
|
||||
dpy_gl_scanout_texture(g->scanout[ss.scanout_id].con, info.tex_id,
|
||||
info.flags & 1 /* FIXME: Y_0_TOP */,
|
||||
info.width, info.height,
|
||||
ss.r.x, ss.r.y, ss.r.width, ss.r.height);
|
||||
dpy_gl_scanout_texture(
|
||||
g->parent_obj.scanout[ss.scanout_id].con, info.tex_id,
|
||||
info.flags & 1 /* FIXME: Y_0_TOP */,
|
||||
info.width, info.height,
|
||||
ss.r.x, ss.r.y, ss.r.width, ss.r.height);
|
||||
} else {
|
||||
if (ss.scanout_id != 0) {
|
||||
dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL);
|
||||
dpy_gfx_replace_surface(
|
||||
g->parent_obj.scanout[ss.scanout_id].con, NULL);
|
||||
}
|
||||
dpy_gl_scanout_disable(g->scanout[ss.scanout_id].con);
|
||||
dpy_gl_scanout_disable(g->parent_obj.scanout[ss.scanout_id].con);
|
||||
}
|
||||
g->scanout[ss.scanout_id].resource_id = ss.resource_id;
|
||||
g->parent_obj.scanout[ss.scanout_id].resource_id = ss.resource_id;
|
||||
}
|
||||
|
||||
static void virgl_cmd_submit_3d(VirtIOGPU *g,
|
||||
|
@ -209,7 +211,7 @@ static void virgl_cmd_submit_3d(VirtIOGPU *g,
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (virtio_gpu_stats_enabled(g->conf)) {
|
||||
if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
|
||||
g->stats.req_3d++;
|
||||
g->stats.bytes_3d += cs.size;
|
||||
}
|
||||
|
@ -507,7 +509,7 @@ static void virgl_write_fence(void *opaque, uint32_t fence)
|
|||
QTAILQ_REMOVE(&g->fenceq, cmd, next);
|
||||
g_free(cmd);
|
||||
g->inflight--;
|
||||
if (virtio_gpu_stats_enabled(g->conf)) {
|
||||
if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
|
||||
fprintf(stderr, "inflight: %3d (-)\r", g->inflight);
|
||||
}
|
||||
}
|
||||
|
@ -524,7 +526,7 @@ virgl_create_context(void *opaque, int scanout_idx,
|
|||
qparams.major_ver = params->major_ver;
|
||||
qparams.minor_ver = params->minor_ver;
|
||||
|
||||
ctx = dpy_gl_ctx_create(g->scanout[scanout_idx].con, &qparams);
|
||||
ctx = dpy_gl_ctx_create(g->parent_obj.scanout[scanout_idx].con, &qparams);
|
||||
return (virgl_renderer_gl_context)ctx;
|
||||
}
|
||||
|
||||
|
@ -533,7 +535,7 @@ static void virgl_destroy_context(void *opaque, virgl_renderer_gl_context ctx)
|
|||
VirtIOGPU *g = opaque;
|
||||
QEMUGLContext qctx = (QEMUGLContext)ctx;
|
||||
|
||||
dpy_gl_ctx_destroy(g->scanout[0].con, qctx);
|
||||
dpy_gl_ctx_destroy(g->parent_obj.scanout[0].con, qctx);
|
||||
}
|
||||
|
||||
static int virgl_make_context_current(void *opaque, int scanout_idx,
|
||||
|
@ -542,7 +544,8 @@ static int virgl_make_context_current(void *opaque, int scanout_idx,
|
|||
VirtIOGPU *g = opaque;
|
||||
QEMUGLContext qctx = (QEMUGLContext)ctx;
|
||||
|
||||
return dpy_gl_ctx_make_current(g->scanout[scanout_idx].con, qctx);
|
||||
return dpy_gl_ctx_make_current(g->parent_obj.scanout[scanout_idx].con,
|
||||
qctx);
|
||||
}
|
||||
|
||||
static struct virgl_renderer_callbacks virtio_gpu_3d_cbs = {
|
||||
|
@ -594,11 +597,11 @@ void virtio_gpu_virgl_reset(VirtIOGPU *g)
|
|||
int i;
|
||||
|
||||
/* virgl_renderer_reset() ??? */
|
||||
for (i = 0; i < g->conf.max_outputs; i++) {
|
||||
for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
|
||||
if (i != 0) {
|
||||
dpy_gfx_replace_surface(g->scanout[i].con, NULL);
|
||||
dpy_gfx_replace_surface(g->parent_obj.scanout[i].con, NULL);
|
||||
}
|
||||
dpy_gl_scanout_disable(g->scanout[i].con);
|
||||
dpy_gl_scanout_disable(g->parent_obj.scanout[i].con);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -614,7 +617,7 @@ int virtio_gpu_virgl_init(VirtIOGPU *g)
|
|||
g->fence_poll = timer_new_ms(QEMU_CLOCK_VIRTUAL,
|
||||
virtio_gpu_fence_poll, g);
|
||||
|
||||
if (virtio_gpu_stats_enabled(g->conf)) {
|
||||
if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
|
||||
g->print_stats = timer_new_ms(QEMU_CLOCK_VIRTUAL,
|
||||
virtio_gpu_print_stats, g);
|
||||
timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
|
||||
|
|
|
@ -0,0 +1,268 @@
|
|||
/*
|
||||
* Virtio GPU Device
|
||||
*
|
||||
* Copyright Red Hat, Inc. 2013-2014
|
||||
*
|
||||
* Authors:
|
||||
* Dave Airlie <airlied@redhat.com>
|
||||
* Gerd Hoffmann <kraxel@redhat.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
|
||||
#include "hw/virtio/virtio-gpu.h"
|
||||
#include "migration/blocker.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "trace.h"
|
||||
|
||||
void
|
||||
virtio_gpu_base_reset(VirtIOGPUBase *g)
|
||||
{
|
||||
int i;
|
||||
|
||||
g->enable = 0;
|
||||
g->use_virgl_renderer = false;
|
||||
|
||||
for (i = 0; i < g->conf.max_outputs; i++) {
|
||||
g->scanout[i].resource_id = 0;
|
||||
g->scanout[i].width = 0;
|
||||
g->scanout[i].height = 0;
|
||||
g->scanout[i].x = 0;
|
||||
g->scanout[i].y = 0;
|
||||
g->scanout[i].ds = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
virtio_gpu_base_fill_display_info(VirtIOGPUBase *g,
|
||||
struct virtio_gpu_resp_display_info *dpy_info)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < g->conf.max_outputs; i++) {
|
||||
if (g->enabled_output_bitmask & (1 << i)) {
|
||||
dpy_info->pmodes[i].enabled = 1;
|
||||
dpy_info->pmodes[i].r.width = cpu_to_le32(g->req_state[i].width);
|
||||
dpy_info->pmodes[i].r.height = cpu_to_le32(g->req_state[i].height);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void virtio_gpu_invalidate_display(void *opaque)
|
||||
{
|
||||
}
|
||||
|
||||
static void virtio_gpu_update_display(void *opaque)
|
||||
{
|
||||
}
|
||||
|
||||
static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata)
|
||||
{
|
||||
}
|
||||
|
||||
static void virtio_gpu_notify_event(VirtIOGPUBase *g, uint32_t event_type)
|
||||
{
|
||||
g->virtio_config.events_read |= event_type;
|
||||
virtio_notify_config(&g->parent_obj);
|
||||
}
|
||||
|
||||
static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info)
|
||||
{
|
||||
VirtIOGPUBase *g = opaque;
|
||||
|
||||
if (idx >= g->conf.max_outputs) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
g->req_state[idx].x = info->xoff;
|
||||
g->req_state[idx].y = info->yoff;
|
||||
g->req_state[idx].width = info->width;
|
||||
g->req_state[idx].height = info->height;
|
||||
|
||||
if (info->width && info->height) {
|
||||
g->enabled_output_bitmask |= (1 << idx);
|
||||
} else {
|
||||
g->enabled_output_bitmask &= ~(1 << idx);
|
||||
}
|
||||
|
||||
/* send event to guest */
|
||||
virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
virtio_gpu_gl_block(void *opaque, bool block)
|
||||
{
|
||||
VirtIOGPUBase *g = opaque;
|
||||
VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_GET_CLASS(g);
|
||||
|
||||
if (block) {
|
||||
g->renderer_blocked++;
|
||||
} else {
|
||||
g->renderer_blocked--;
|
||||
}
|
||||
assert(g->renderer_blocked >= 0);
|
||||
|
||||
if (g->renderer_blocked == 0) {
|
||||
vgc->gl_unblock(g);
|
||||
}
|
||||
}
|
||||
|
||||
const GraphicHwOps virtio_gpu_ops = {
|
||||
.invalidate = virtio_gpu_invalidate_display,
|
||||
.gfx_update = virtio_gpu_update_display,
|
||||
.text_update = virtio_gpu_text_update,
|
||||
.ui_info = virtio_gpu_ui_info,
|
||||
.gl_block = virtio_gpu_gl_block,
|
||||
};
|
||||
|
||||
bool
|
||||
virtio_gpu_base_device_realize(DeviceState *qdev,
|
||||
VirtIOHandleOutput ctrl_cb,
|
||||
VirtIOHandleOutput cursor_cb,
|
||||
Error **errp)
|
||||
{
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
|
||||
VirtIOGPUBase *g = VIRTIO_GPU_BASE(qdev);
|
||||
Error *local_err = NULL;
|
||||
int i;
|
||||
|
||||
if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) {
|
||||
error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS);
|
||||
return false;
|
||||
}
|
||||
|
||||
g->use_virgl_renderer = false;
|
||||
if (virtio_gpu_virgl_enabled(g->conf)) {
|
||||
error_setg(&g->migration_blocker, "virgl is not yet migratable");
|
||||
migrate_add_blocker(g->migration_blocker, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
error_free(g->migration_blocker);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
g->virtio_config.num_scanouts = cpu_to_le32(g->conf.max_outputs);
|
||||
virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU,
|
||||
sizeof(struct virtio_gpu_config));
|
||||
|
||||
if (virtio_gpu_virgl_enabled(g->conf)) {
|
||||
/* use larger control queue in 3d mode */
|
||||
virtio_add_queue(vdev, 256, ctrl_cb);
|
||||
virtio_add_queue(vdev, 16, cursor_cb);
|
||||
} else {
|
||||
virtio_add_queue(vdev, 64, ctrl_cb);
|
||||
virtio_add_queue(vdev, 16, cursor_cb);
|
||||
}
|
||||
|
||||
g->enabled_output_bitmask = 1;
|
||||
|
||||
g->req_state[0].width = g->conf.xres;
|
||||
g->req_state[0].height = g->conf.yres;
|
||||
|
||||
for (i = 0; i < g->conf.max_outputs; i++) {
|
||||
g->scanout[i].con =
|
||||
graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g);
|
||||
if (i > 0) {
|
||||
dpy_gfx_replace_surface(g->scanout[i].con, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
virtio_gpu_base_get_features(VirtIODevice *vdev, uint64_t features,
|
||||
Error **errp)
|
||||
{
|
||||
VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
|
||||
|
||||
if (virtio_gpu_virgl_enabled(g->conf)) {
|
||||
features |= (1 << VIRTIO_GPU_F_VIRGL);
|
||||
}
|
||||
if (virtio_gpu_edid_enabled(g->conf)) {
|
||||
features |= (1 << VIRTIO_GPU_F_EDID);
|
||||
}
|
||||
|
||||
return features;
|
||||
}
|
||||
|
||||
static void
|
||||
virtio_gpu_base_set_features(VirtIODevice *vdev, uint64_t features)
|
||||
{
|
||||
static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL);
|
||||
VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
|
||||
|
||||
g->use_virgl_renderer = ((features & virgl) == virgl);
|
||||
trace_virtio_gpu_features(g->use_virgl_renderer);
|
||||
}
|
||||
|
||||
static void
|
||||
virtio_gpu_base_device_unrealize(DeviceState *qdev, Error **errp)
|
||||
{
|
||||
VirtIOGPUBase *g = VIRTIO_GPU_BASE(qdev);
|
||||
|
||||
if (g->migration_blocker) {
|
||||
migrate_del_blocker(g->migration_blocker);
|
||||
error_free(g->migration_blocker);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
virtio_gpu_base_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
|
||||
|
||||
vdc->unrealize = virtio_gpu_base_device_unrealize;
|
||||
vdc->get_features = virtio_gpu_base_get_features;
|
||||
vdc->set_features = virtio_gpu_base_set_features;
|
||||
|
||||
set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
|
||||
dc->hotpluggable = false;
|
||||
}
|
||||
|
||||
static const TypeInfo virtio_gpu_base_info = {
|
||||
.name = TYPE_VIRTIO_GPU_BASE,
|
||||
.parent = TYPE_VIRTIO_DEVICE,
|
||||
.instance_size = sizeof(VirtIOGPUBase),
|
||||
.class_size = sizeof(VirtIOGPUBaseClass),
|
||||
.class_init = virtio_gpu_base_class_init,
|
||||
.abstract = true
|
||||
};
|
||||
|
||||
static void
|
||||
virtio_register_types(void)
|
||||
{
|
||||
type_register_static(&virtio_gpu_base_info);
|
||||
}
|
||||
|
||||
type_init(virtio_register_types)
|
||||
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408);
|
||||
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24);
|
|
@ -16,33 +16,18 @@
|
|||
#include "hw/pci/pci.h"
|
||||
#include "hw/virtio/virtio.h"
|
||||
#include "hw/virtio/virtio-bus.h"
|
||||
#include "hw/virtio/virtio-pci.h"
|
||||
#include "hw/virtio/virtio-gpu.h"
|
||||
#include "hw/virtio/virtio-gpu-pci.h"
|
||||
|
||||
typedef struct VirtIOGPUPCI VirtIOGPUPCI;
|
||||
|
||||
/*
|
||||
* virtio-gpu-pci: This extends VirtioPCIProxy.
|
||||
*/
|
||||
#define TYPE_VIRTIO_GPU_PCI "virtio-gpu-pci"
|
||||
#define VIRTIO_GPU_PCI(obj) \
|
||||
OBJECT_CHECK(VirtIOGPUPCI, (obj), TYPE_VIRTIO_GPU_PCI)
|
||||
|
||||
struct VirtIOGPUPCI {
|
||||
VirtIOPCIProxy parent_obj;
|
||||
VirtIOGPU vdev;
|
||||
};
|
||||
|
||||
static Property virtio_gpu_pci_properties[] = {
|
||||
static Property virtio_gpu_pci_base_properties[] = {
|
||||
DEFINE_VIRTIO_GPU_PCI_PROPERTIES(VirtIOPCIProxy),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
static void virtio_gpu_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
|
||||
static void virtio_gpu_pci_base_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
|
||||
{
|
||||
VirtIOGPUPCI *vgpu = VIRTIO_GPU_PCI(vpci_dev);
|
||||
VirtIOGPU *g = &vgpu->vdev;
|
||||
DeviceState *vdev = DEVICE(&vgpu->vdev);
|
||||
VirtIOGPUPCIBase *vgpu = VIRTIO_GPU_PCI_BASE(vpci_dev);
|
||||
VirtIOGPUBase *g = vgpu->vgpu;
|
||||
DeviceState *vdev = DEVICE(g);
|
||||
int i;
|
||||
Error *local_error = NULL;
|
||||
|
||||
|
@ -64,36 +49,56 @@ static void virtio_gpu_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
|
|||
}
|
||||
}
|
||||
|
||||
static void virtio_gpu_pci_class_init(ObjectClass *klass, void *data)
|
||||
static void virtio_gpu_pci_base_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
|
||||
PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
|
||||
|
||||
set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
|
||||
dc->props = virtio_gpu_pci_properties;
|
||||
dc->props = virtio_gpu_pci_base_properties;
|
||||
dc->hotpluggable = false;
|
||||
k->realize = virtio_gpu_pci_realize;
|
||||
k->realize = virtio_gpu_pci_base_realize;
|
||||
pcidev_k->class_id = PCI_CLASS_DISPLAY_OTHER;
|
||||
}
|
||||
|
||||
static const TypeInfo virtio_gpu_pci_base_info = {
|
||||
.name = TYPE_VIRTIO_GPU_PCI_BASE,
|
||||
.parent = TYPE_VIRTIO_PCI,
|
||||
.instance_size = sizeof(VirtIOGPUPCIBase),
|
||||
.class_init = virtio_gpu_pci_base_class_init,
|
||||
.abstract = true
|
||||
};
|
||||
|
||||
#define TYPE_VIRTIO_GPU_PCI "virtio-gpu-pci"
|
||||
#define VIRTIO_GPU_PCI(obj) \
|
||||
OBJECT_CHECK(VirtIOGPUPCI, (obj), TYPE_VIRTIO_GPU_PCI)
|
||||
|
||||
typedef struct VirtIOGPUPCI {
|
||||
VirtIOGPUPCIBase parent_obj;
|
||||
VirtIOGPU vdev;
|
||||
} VirtIOGPUPCI;
|
||||
|
||||
static void virtio_gpu_initfn(Object *obj)
|
||||
{
|
||||
VirtIOGPUPCI *dev = VIRTIO_GPU_PCI(obj);
|
||||
|
||||
virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
|
||||
TYPE_VIRTIO_GPU);
|
||||
VIRTIO_GPU_PCI_BASE(obj)->vgpu = VIRTIO_GPU_BASE(&dev->vdev);
|
||||
}
|
||||
|
||||
static const VirtioPCIDeviceTypeInfo virtio_gpu_pci_info = {
|
||||
.generic_name = TYPE_VIRTIO_GPU_PCI,
|
||||
.parent = TYPE_VIRTIO_GPU_PCI_BASE,
|
||||
.instance_size = sizeof(VirtIOGPUPCI),
|
||||
.instance_init = virtio_gpu_initfn,
|
||||
.class_init = virtio_gpu_pci_class_init,
|
||||
};
|
||||
|
||||
static void virtio_gpu_pci_register_types(void)
|
||||
{
|
||||
type_register_static(&virtio_gpu_pci_base_info);
|
||||
virtio_pci_types_register(&virtio_gpu_pci_info);
|
||||
}
|
||||
|
||||
type_init(virtio_gpu_pci_register_types)
|
||||
|
|
|
@ -20,11 +20,13 @@
|
|||
#include "sysemu/dma.h"
|
||||
#include "hw/virtio/virtio.h"
|
||||
#include "hw/virtio/virtio-gpu.h"
|
||||
#include "hw/virtio/virtio-gpu-bswap.h"
|
||||
#include "hw/virtio/virtio-gpu-pixman.h"
|
||||
#include "hw/virtio/virtio-bus.h"
|
||||
#include "hw/display/edid.h"
|
||||
#include "migration/blocker.h"
|
||||
#include "qemu/log.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qemu/error-report.h"
|
||||
|
||||
#define VIRTIO_GPU_VM_VERSION 1
|
||||
|
||||
|
@ -34,53 +36,11 @@ virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
|
|||
static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
|
||||
struct virtio_gpu_simple_resource *res);
|
||||
|
||||
static void
|
||||
virtio_gpu_ctrl_hdr_bswap(struct virtio_gpu_ctrl_hdr *hdr)
|
||||
{
|
||||
le32_to_cpus(&hdr->type);
|
||||
le32_to_cpus(&hdr->flags);
|
||||
le64_to_cpus(&hdr->fence_id);
|
||||
le32_to_cpus(&hdr->ctx_id);
|
||||
le32_to_cpus(&hdr->padding);
|
||||
}
|
||||
|
||||
static void virtio_gpu_bswap_32(void *ptr,
|
||||
size_t size)
|
||||
{
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
|
||||
size_t i;
|
||||
struct virtio_gpu_ctrl_hdr *hdr = (struct virtio_gpu_ctrl_hdr *) ptr;
|
||||
|
||||
virtio_gpu_ctrl_hdr_bswap(hdr);
|
||||
|
||||
i = sizeof(struct virtio_gpu_ctrl_hdr);
|
||||
while (i < size) {
|
||||
le32_to_cpus((uint32_t *)(ptr + i));
|
||||
i = i + sizeof(uint32_t);
|
||||
}
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
static void
|
||||
virtio_gpu_t2d_bswap(struct virtio_gpu_transfer_to_host_2d *t2d)
|
||||
{
|
||||
virtio_gpu_ctrl_hdr_bswap(&t2d->hdr);
|
||||
le32_to_cpus(&t2d->r.x);
|
||||
le32_to_cpus(&t2d->r.y);
|
||||
le32_to_cpus(&t2d->r.width);
|
||||
le32_to_cpus(&t2d->r.height);
|
||||
le64_to_cpus(&t2d->offset);
|
||||
le32_to_cpus(&t2d->resource_id);
|
||||
le32_to_cpus(&t2d->padding);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_VIRGL
|
||||
#include <virglrenderer.h>
|
||||
#define VIRGL(_g, _virgl, _simple, ...) \
|
||||
do { \
|
||||
if (_g->use_virgl_renderer) { \
|
||||
if (_g->parent_obj.use_virgl_renderer) { \
|
||||
_virgl(__VA_ARGS__); \
|
||||
} else { \
|
||||
_simple(__VA_ARGS__); \
|
||||
|
@ -148,10 +108,10 @@ static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
|
|||
struct virtio_gpu_scanout *s;
|
||||
bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR;
|
||||
|
||||
if (cursor->pos.scanout_id >= g->conf.max_outputs) {
|
||||
if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) {
|
||||
return;
|
||||
}
|
||||
s = &g->scanout[cursor->pos.scanout_id];
|
||||
s = &g->parent_obj.scanout[cursor->pos.scanout_id];
|
||||
|
||||
trace_virtio_gpu_update_cursor(cursor->pos.scanout_id,
|
||||
cursor->pos.x,
|
||||
|
@ -182,53 +142,6 @@ static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
|
|||
cursor->resource_id ? 1 : 0);
|
||||
}
|
||||
|
||||
static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
|
||||
{
|
||||
VirtIOGPU *g = VIRTIO_GPU(vdev);
|
||||
memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
|
||||
}
|
||||
|
||||
static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
|
||||
{
|
||||
VirtIOGPU *g = VIRTIO_GPU(vdev);
|
||||
struct virtio_gpu_config vgconfig;
|
||||
|
||||
memcpy(&vgconfig, config, sizeof(g->virtio_config));
|
||||
|
||||
if (vgconfig.events_clear) {
|
||||
g->virtio_config.events_read &= ~vgconfig.events_clear;
|
||||
}
|
||||
}
|
||||
|
||||
static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features,
|
||||
Error **errp)
|
||||
{
|
||||
VirtIOGPU *g = VIRTIO_GPU(vdev);
|
||||
|
||||
if (virtio_gpu_virgl_enabled(g->conf)) {
|
||||
features |= (1 << VIRTIO_GPU_F_VIRGL);
|
||||
}
|
||||
if (virtio_gpu_edid_enabled(g->conf)) {
|
||||
features |= (1 << VIRTIO_GPU_F_EDID);
|
||||
}
|
||||
return features;
|
||||
}
|
||||
|
||||
static void virtio_gpu_set_features(VirtIODevice *vdev, uint64_t features)
|
||||
{
|
||||
static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL);
|
||||
VirtIOGPU *g = VIRTIO_GPU(vdev);
|
||||
|
||||
g->use_virgl_renderer = ((features & virgl) == virgl);
|
||||
trace_virtio_gpu_features(g->use_virgl_renderer);
|
||||
}
|
||||
|
||||
static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type)
|
||||
{
|
||||
g->virtio_config.events_read |= event_type;
|
||||
virtio_notify_config(&g->parent_obj);
|
||||
}
|
||||
|
||||
static struct virtio_gpu_simple_resource *
|
||||
virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
|
||||
{
|
||||
|
@ -277,21 +190,6 @@ void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
|
|||
virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp));
|
||||
}
|
||||
|
||||
static void
|
||||
virtio_gpu_fill_display_info(VirtIOGPU *g,
|
||||
struct virtio_gpu_resp_display_info *dpy_info)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < g->conf.max_outputs; i++) {
|
||||
if (g->enabled_output_bitmask & (1 << i)) {
|
||||
dpy_info->pmodes[i].enabled = 1;
|
||||
dpy_info->pmodes[i].r.width = cpu_to_le32(g->req_state[i].width);
|
||||
dpy_info->pmodes[i].r.height = cpu_to_le32(g->req_state[i].height);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void virtio_gpu_get_display_info(VirtIOGPU *g,
|
||||
struct virtio_gpu_ctrl_command *cmd)
|
||||
{
|
||||
|
@ -300,7 +198,7 @@ void virtio_gpu_get_display_info(VirtIOGPU *g,
|
|||
trace_virtio_gpu_cmd_get_display_info();
|
||||
memset(&display_info, 0, sizeof(display_info));
|
||||
display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
|
||||
virtio_gpu_fill_display_info(g, &display_info);
|
||||
virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info);
|
||||
virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
|
||||
sizeof(display_info));
|
||||
}
|
||||
|
@ -309,9 +207,10 @@ static void
|
|||
virtio_gpu_generate_edid(VirtIOGPU *g, int scanout,
|
||||
struct virtio_gpu_resp_edid *edid)
|
||||
{
|
||||
VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
|
||||
qemu_edid_info info = {
|
||||
.prefx = g->req_state[scanout].width,
|
||||
.prefy = g->req_state[scanout].height,
|
||||
.prefx = b->req_state[scanout].width,
|
||||
.prefy = b->req_state[scanout].height,
|
||||
};
|
||||
|
||||
edid->size = cpu_to_le32(sizeof(edid->edid));
|
||||
|
@ -323,11 +222,12 @@ void virtio_gpu_get_edid(VirtIOGPU *g,
|
|||
{
|
||||
struct virtio_gpu_resp_edid edid;
|
||||
struct virtio_gpu_cmd_get_edid get_edid;
|
||||
VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
|
||||
|
||||
VIRTIO_GPU_FILL_CMD(get_edid);
|
||||
virtio_gpu_bswap_32(&get_edid, sizeof(get_edid));
|
||||
|
||||
if (get_edid.scanout >= g->conf.max_outputs) {
|
||||
if (get_edid.scanout >= b->conf.max_outputs) {
|
||||
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
|
||||
return;
|
||||
}
|
||||
|
@ -339,30 +239,6 @@ void virtio_gpu_get_edid(VirtIOGPU *g,
|
|||
virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid));
|
||||
}
|
||||
|
||||
static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format)
|
||||
{
|
||||
switch (virtio_gpu_format) {
|
||||
case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM:
|
||||
return PIXMAN_BE_b8g8r8x8;
|
||||
case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM:
|
||||
return PIXMAN_BE_b8g8r8a8;
|
||||
case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM:
|
||||
return PIXMAN_BE_x8r8g8b8;
|
||||
case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM:
|
||||
return PIXMAN_BE_a8r8g8b8;
|
||||
case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM:
|
||||
return PIXMAN_BE_r8g8b8x8;
|
||||
case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM:
|
||||
return PIXMAN_BE_r8g8b8a8;
|
||||
case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM:
|
||||
return PIXMAN_BE_x8b8g8r8;
|
||||
case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM:
|
||||
return PIXMAN_BE_a8b8g8r8;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static uint32_t calc_image_hostmem(pixman_format_code_t pformat,
|
||||
uint32_t width, uint32_t height)
|
||||
{
|
||||
|
@ -409,7 +285,7 @@ static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
|
|||
res->format = c2d.format;
|
||||
res->resource_id = c2d.resource_id;
|
||||
|
||||
pformat = get_pixman_format(c2d.format);
|
||||
pformat = virtio_gpu_get_pixman_format(c2d.format);
|
||||
if (!pformat) {
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
"%s: host couldn't handle guest format %d\n",
|
||||
|
@ -420,7 +296,7 @@ static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
|
|||
}
|
||||
|
||||
res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height);
|
||||
if (res->hostmem + g->hostmem < g->conf.max_hostmem) {
|
||||
if (res->hostmem + g->hostmem < g->conf_max_hostmem) {
|
||||
res->image = pixman_image_create_bits(pformat,
|
||||
c2d.width,
|
||||
c2d.height,
|
||||
|
@ -442,7 +318,7 @@ static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
|
|||
|
||||
static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id)
|
||||
{
|
||||
struct virtio_gpu_scanout *scanout = &g->scanout[scanout_id];
|
||||
struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id];
|
||||
struct virtio_gpu_simple_resource *res;
|
||||
DisplaySurface *ds = NULL;
|
||||
|
||||
|
@ -474,7 +350,7 @@ static void virtio_gpu_resource_destroy(VirtIOGPU *g,
|
|||
int i;
|
||||
|
||||
if (res->scanout_bitmask) {
|
||||
for (i = 0; i < g->conf.max_outputs; i++) {
|
||||
for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
|
||||
if (res->scanout_bitmask & (1 << i)) {
|
||||
virtio_gpu_disable_scanout(g, i);
|
||||
}
|
||||
|
@ -604,7 +480,7 @@ static void virtio_gpu_resource_flush(VirtIOGPU *g,
|
|||
|
||||
pixman_region_init_rect(&flush_region,
|
||||
rf.r.x, rf.r.y, rf.r.width, rf.r.height);
|
||||
for (i = 0; i < g->conf.max_outputs; i++) {
|
||||
for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
|
||||
struct virtio_gpu_scanout *scanout;
|
||||
pixman_region16_t region, finalregion;
|
||||
pixman_box16_t *extents;
|
||||
|
@ -612,7 +488,7 @@ static void virtio_gpu_resource_flush(VirtIOGPU *g,
|
|||
if (!(res->scanout_bitmask & (1 << i))) {
|
||||
continue;
|
||||
}
|
||||
scanout = &g->scanout[i];
|
||||
scanout = &g->parent_obj.scanout[i];
|
||||
|
||||
pixman_region_init(&finalregion);
|
||||
pixman_region_init_rect(®ion, scanout->x, scanout->y,
|
||||
|
@ -622,7 +498,7 @@ static void virtio_gpu_resource_flush(VirtIOGPU *g,
|
|||
pixman_region_translate(&finalregion, -scanout->x, -scanout->y);
|
||||
extents = pixman_region_extents(&finalregion);
|
||||
/* work out the area we need to update for each console */
|
||||
dpy_gfx_update(g->scanout[i].con,
|
||||
dpy_gfx_update(g->parent_obj.scanout[i].con,
|
||||
extents->x1, extents->y1,
|
||||
extents->x2 - extents->x1,
|
||||
extents->y2 - extents->y1);
|
||||
|
@ -653,14 +529,14 @@ static void virtio_gpu_set_scanout(VirtIOGPU *g,
|
|||
trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
|
||||
ss.r.width, ss.r.height, ss.r.x, ss.r.y);
|
||||
|
||||
if (ss.scanout_id >= g->conf.max_outputs) {
|
||||
if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
|
||||
qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
|
||||
__func__, ss.scanout_id);
|
||||
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
|
||||
return;
|
||||
}
|
||||
|
||||
g->enable = 1;
|
||||
g->parent_obj.enable = 1;
|
||||
if (ss.resource_id == 0) {
|
||||
virtio_gpu_disable_scanout(g, ss.scanout_id);
|
||||
return;
|
||||
|
@ -677,6 +553,8 @@ static void virtio_gpu_set_scanout(VirtIOGPU *g,
|
|||
|
||||
if (ss.r.x > res->width ||
|
||||
ss.r.y > res->height ||
|
||||
ss.r.width < 16 ||
|
||||
ss.r.height < 16 ||
|
||||
ss.r.width > res->width ||
|
||||
ss.r.height > res->height ||
|
||||
ss.r.x + ss.r.width > res->width ||
|
||||
|
@ -689,7 +567,7 @@ static void virtio_gpu_set_scanout(VirtIOGPU *g,
|
|||
return;
|
||||
}
|
||||
|
||||
scanout = &g->scanout[ss.scanout_id];
|
||||
scanout = &g->parent_obj.scanout[ss.scanout_id];
|
||||
|
||||
format = pixman_image_get_format(res->image);
|
||||
bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
|
||||
|
@ -712,7 +590,8 @@ static void virtio_gpu_set_scanout(VirtIOGPU *g,
|
|||
return;
|
||||
}
|
||||
pixman_image_unref(rect);
|
||||
dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds);
|
||||
dpy_gfx_replace_surface(g->parent_obj.scanout[ss.scanout_id].con,
|
||||
scanout->ds);
|
||||
}
|
||||
|
||||
ores = virtio_gpu_find_resource(g, scanout->resource_id);
|
||||
|
@ -930,7 +809,7 @@ void virtio_gpu_process_cmdq(VirtIOGPU *g)
|
|||
while (!QTAILQ_EMPTY(&g->cmdq)) {
|
||||
cmd = QTAILQ_FIRST(&g->cmdq);
|
||||
|
||||
if (g->renderer_blocked) {
|
||||
if (g->parent_obj.renderer_blocked) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -939,14 +818,14 @@ void virtio_gpu_process_cmdq(VirtIOGPU *g)
|
|||
g, cmd);
|
||||
|
||||
QTAILQ_REMOVE(&g->cmdq, cmd, next);
|
||||
if (virtio_gpu_stats_enabled(g->conf)) {
|
||||
if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
|
||||
g->stats.requests++;
|
||||
}
|
||||
|
||||
if (!cmd->finished) {
|
||||
QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
|
||||
g->inflight++;
|
||||
if (virtio_gpu_stats_enabled(g->conf)) {
|
||||
if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
|
||||
if (g->stats.max_inflight < g->inflight) {
|
||||
g->stats.max_inflight = g->inflight;
|
||||
}
|
||||
|
@ -958,6 +837,19 @@ void virtio_gpu_process_cmdq(VirtIOGPU *g)
|
|||
}
|
||||
}
|
||||
|
||||
static void virtio_gpu_gl_unblock(VirtIOGPUBase *b)
|
||||
{
|
||||
VirtIOGPU *g = VIRTIO_GPU(b);
|
||||
|
||||
#ifdef CONFIG_VIRGL
|
||||
if (g->renderer_reset) {
|
||||
g->renderer_reset = false;
|
||||
virtio_gpu_virgl_reset(g);
|
||||
}
|
||||
#endif
|
||||
virtio_gpu_process_cmdq(g);
|
||||
}
|
||||
|
||||
static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
|
||||
{
|
||||
VirtIOGPU *g = VIRTIO_GPU(vdev);
|
||||
|
@ -968,7 +860,7 @@ static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_VIRGL
|
||||
if (!g->renderer_inited && g->use_virgl_renderer) {
|
||||
if (!g->renderer_inited && g->parent_obj.use_virgl_renderer) {
|
||||
virtio_gpu_virgl_init(g);
|
||||
g->renderer_inited = true;
|
||||
}
|
||||
|
@ -986,7 +878,7 @@ static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
|
|||
virtio_gpu_process_cmdq(g);
|
||||
|
||||
#ifdef CONFIG_VIRGL
|
||||
if (g->use_virgl_renderer) {
|
||||
if (g->parent_obj.use_virgl_renderer) {
|
||||
virtio_gpu_virgl_fence_poll(g);
|
||||
}
|
||||
#endif
|
||||
|
@ -995,7 +887,7 @@ static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
|
|||
static void virtio_gpu_ctrl_bh(void *opaque)
|
||||
{
|
||||
VirtIOGPU *g = opaque;
|
||||
virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq);
|
||||
virtio_gpu_handle_ctrl(&g->parent_obj.parent_obj, g->ctrl_vq);
|
||||
}
|
||||
|
||||
static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
|
||||
|
@ -1033,75 +925,9 @@ static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
|
|||
static void virtio_gpu_cursor_bh(void *opaque)
|
||||
{
|
||||
VirtIOGPU *g = opaque;
|
||||
virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq);
|
||||
virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq);
|
||||
}
|
||||
|
||||
static void virtio_gpu_invalidate_display(void *opaque)
|
||||
{
|
||||
}
|
||||
|
||||
static void virtio_gpu_update_display(void *opaque)
|
||||
{
|
||||
}
|
||||
|
||||
static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata)
|
||||
{
|
||||
}
|
||||
|
||||
static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info)
|
||||
{
|
||||
VirtIOGPU *g = opaque;
|
||||
|
||||
if (idx >= g->conf.max_outputs) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
g->req_state[idx].x = info->xoff;
|
||||
g->req_state[idx].y = info->yoff;
|
||||
g->req_state[idx].width = info->width;
|
||||
g->req_state[idx].height = info->height;
|
||||
|
||||
if (info->width && info->height) {
|
||||
g->enabled_output_bitmask |= (1 << idx);
|
||||
} else {
|
||||
g->enabled_output_bitmask &= ~(1 << idx);
|
||||
}
|
||||
|
||||
/* send event to guest */
|
||||
virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void virtio_gpu_gl_block(void *opaque, bool block)
|
||||
{
|
||||
VirtIOGPU *g = opaque;
|
||||
|
||||
if (block) {
|
||||
g->renderer_blocked++;
|
||||
} else {
|
||||
g->renderer_blocked--;
|
||||
}
|
||||
assert(g->renderer_blocked >= 0);
|
||||
|
||||
if (g->renderer_blocked == 0) {
|
||||
#ifdef CONFIG_VIRGL
|
||||
if (g->renderer_reset) {
|
||||
g->renderer_reset = false;
|
||||
virtio_gpu_virgl_reset(g);
|
||||
}
|
||||
#endif
|
||||
virtio_gpu_process_cmdq(g);
|
||||
}
|
||||
}
|
||||
|
||||
const GraphicHwOps virtio_gpu_ops = {
|
||||
.invalidate = virtio_gpu_invalidate_display,
|
||||
.gfx_update = virtio_gpu_update_display,
|
||||
.text_update = virtio_gpu_text_update,
|
||||
.ui_info = virtio_gpu_ui_info,
|
||||
.gl_block = virtio_gpu_gl_block,
|
||||
};
|
||||
|
||||
static const VMStateDescription vmstate_virtio_gpu_scanout = {
|
||||
.name = "virtio-gpu-one-scanout",
|
||||
.version_id = 1,
|
||||
|
@ -1124,10 +950,11 @@ static const VMStateDescription vmstate_virtio_gpu_scanouts = {
|
|||
.name = "virtio-gpu-scanouts",
|
||||
.version_id = 1,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_INT32(enable, struct VirtIOGPU),
|
||||
VMSTATE_UINT32_EQUAL(conf.max_outputs, struct VirtIOGPU, NULL),
|
||||
VMSTATE_STRUCT_VARRAY_UINT32(scanout, struct VirtIOGPU,
|
||||
conf.max_outputs, 1,
|
||||
VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU),
|
||||
VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs,
|
||||
struct VirtIOGPU, NULL),
|
||||
VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU,
|
||||
parent_obj.conf.max_outputs, 1,
|
||||
vmstate_virtio_gpu_scanout,
|
||||
struct virtio_gpu_scanout),
|
||||
VMSTATE_END_OF_LIST()
|
||||
|
@ -1183,7 +1010,7 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
|
|||
res->iov_cnt = qemu_get_be32(f);
|
||||
|
||||
/* allocate */
|
||||
pformat = get_pixman_format(res->format);
|
||||
pformat = virtio_gpu_get_pixman_format(res->format);
|
||||
if (!pformat) {
|
||||
g_free(res);
|
||||
return -EINVAL;
|
||||
|
@ -1242,8 +1069,8 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
|
|||
|
||||
/* load & apply scanout state */
|
||||
vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1);
|
||||
for (i = 0; i < g->conf.max_outputs; i++) {
|
||||
scanout = &g->scanout[i];
|
||||
for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
|
||||
scanout = &g->parent_obj.scanout[i];
|
||||
if (!scanout->resource_id) {
|
||||
continue;
|
||||
}
|
||||
|
@ -1272,84 +1099,35 @@ static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
|
|||
VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
|
||||
VirtIOGPU *g = VIRTIO_GPU(qdev);
|
||||
bool have_virgl;
|
||||
Error *local_err = NULL;
|
||||
int i;
|
||||
|
||||
if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) {
|
||||
error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS);
|
||||
return;
|
||||
}
|
||||
|
||||
g->use_virgl_renderer = false;
|
||||
#if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN)
|
||||
have_virgl = false;
|
||||
#else
|
||||
have_virgl = display_opengl;
|
||||
#endif
|
||||
if (!have_virgl) {
|
||||
g->conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED);
|
||||
}
|
||||
|
||||
if (virtio_gpu_virgl_enabled(g->conf)) {
|
||||
error_setg(&g->migration_blocker, "virgl is not yet migratable");
|
||||
migrate_add_blocker(g->migration_blocker, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
error_free(g->migration_blocker);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
g->virtio_config.num_scanouts = cpu_to_le32(g->conf.max_outputs);
|
||||
virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU,
|
||||
sizeof(struct virtio_gpu_config));
|
||||
|
||||
g->req_state[0].width = g->conf.xres;
|
||||
g->req_state[0].height = g->conf.yres;
|
||||
|
||||
if (virtio_gpu_virgl_enabled(g->conf)) {
|
||||
/* use larger control queue in 3d mode */
|
||||
g->ctrl_vq = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb);
|
||||
g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
|
||||
|
||||
#if defined(CONFIG_VIRGL)
|
||||
g->virtio_config.num_capsets = virtio_gpu_virgl_get_num_capsets(g);
|
||||
#else
|
||||
g->virtio_config.num_capsets = 0;
|
||||
#endif
|
||||
g->parent_obj.conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED);
|
||||
} else {
|
||||
g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb);
|
||||
g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
|
||||
#if defined(CONFIG_VIRGL)
|
||||
VIRTIO_GPU_BASE(g)->virtio_config.num_capsets =
|
||||
virtio_gpu_virgl_get_num_capsets(g);
|
||||
#endif
|
||||
}
|
||||
|
||||
if (!virtio_gpu_base_device_realize(qdev,
|
||||
virtio_gpu_handle_ctrl_cb,
|
||||
virtio_gpu_handle_cursor_cb,
|
||||
errp)) {
|
||||
return;
|
||||
}
|
||||
|
||||
g->ctrl_vq = virtio_get_queue(vdev, 0);
|
||||
g->cursor_vq = virtio_get_queue(vdev, 1);
|
||||
g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g);
|
||||
g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g);
|
||||
QTAILQ_INIT(&g->reslist);
|
||||
QTAILQ_INIT(&g->cmdq);
|
||||
QTAILQ_INIT(&g->fenceq);
|
||||
|
||||
g->enabled_output_bitmask = 1;
|
||||
|
||||
for (i = 0; i < g->conf.max_outputs; i++) {
|
||||
g->scanout[i].con =
|
||||
graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g);
|
||||
if (i > 0) {
|
||||
dpy_gfx_replace_surface(g->scanout[i].con, NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void virtio_gpu_device_unrealize(DeviceState *qdev, Error **errp)
|
||||
{
|
||||
VirtIOGPU *g = VIRTIO_GPU(qdev);
|
||||
if (g->migration_blocker) {
|
||||
migrate_del_blocker(g->migration_blocker);
|
||||
error_free(g->migration_blocker);
|
||||
}
|
||||
}
|
||||
|
||||
static void virtio_gpu_instance_init(Object *obj)
|
||||
{
|
||||
}
|
||||
|
||||
static void virtio_gpu_reset(VirtIODevice *vdev)
|
||||
|
@ -1357,21 +1135,16 @@ static void virtio_gpu_reset(VirtIODevice *vdev)
|
|||
VirtIOGPU *g = VIRTIO_GPU(vdev);
|
||||
struct virtio_gpu_simple_resource *res, *tmp;
|
||||
struct virtio_gpu_ctrl_command *cmd;
|
||||
int i;
|
||||
|
||||
g->enable = 0;
|
||||
#ifdef CONFIG_VIRGL
|
||||
if (g->parent_obj.use_virgl_renderer) {
|
||||
virtio_gpu_virgl_reset(g);
|
||||
}
|
||||
#endif
|
||||
|
||||
QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
|
||||
virtio_gpu_resource_destroy(g, res);
|
||||
}
|
||||
for (i = 0; i < g->conf.max_outputs; i++) {
|
||||
g->scanout[i].resource_id = 0;
|
||||
g->scanout[i].width = 0;
|
||||
g->scanout[i].height = 0;
|
||||
g->scanout[i].x = 0;
|
||||
g->scanout[i].y = 0;
|
||||
g->scanout[i].ds = NULL;
|
||||
}
|
||||
|
||||
while (!QTAILQ_EMPTY(&g->cmdq)) {
|
||||
cmd = QTAILQ_FIRST(&g->cmdq);
|
||||
|
@ -1387,15 +1160,37 @@ static void virtio_gpu_reset(VirtIODevice *vdev)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_VIRGL
|
||||
if (g->use_virgl_renderer) {
|
||||
if (g->renderer_blocked) {
|
||||
if (g->parent_obj.use_virgl_renderer) {
|
||||
if (g->parent_obj.renderer_blocked) {
|
||||
g->renderer_reset = true;
|
||||
} else {
|
||||
virtio_gpu_virgl_reset(g);
|
||||
}
|
||||
g->use_virgl_renderer = 0;
|
||||
g->parent_obj.use_virgl_renderer = false;
|
||||
}
|
||||
#endif
|
||||
|
||||
virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
|
||||
}
|
||||
|
||||
static void
|
||||
virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
|
||||
{
|
||||
VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
|
||||
|
||||
memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
|
||||
}
|
||||
|
||||
static void
|
||||
virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
|
||||
{
|
||||
VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
|
||||
const struct virtio_gpu_config *vgconfig =
|
||||
(const struct virtio_gpu_config *)config;
|
||||
|
||||
if (vgconfig->events_clear) {
|
||||
g->virtio_config.events_read &= ~vgconfig->events_clear;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1426,18 +1221,15 @@ static const VMStateDescription vmstate_virtio_gpu = {
|
|||
};
|
||||
|
||||
static Property virtio_gpu_properties[] = {
|
||||
DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1),
|
||||
DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf.max_hostmem, 256 * MiB),
|
||||
VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf),
|
||||
DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem,
|
||||
256 * MiB),
|
||||
#ifdef CONFIG_VIRGL
|
||||
DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags,
|
||||
DEFINE_PROP_BIT("virgl", VirtIOGPU, parent_obj.conf.flags,
|
||||
VIRTIO_GPU_FLAG_VIRGL_ENABLED, true),
|
||||
DEFINE_PROP_BIT("stats", VirtIOGPU, conf.flags,
|
||||
DEFINE_PROP_BIT("stats", VirtIOGPU, parent_obj.conf.flags,
|
||||
VIRTIO_GPU_FLAG_STATS_ENABLED, false),
|
||||
#endif
|
||||
DEFINE_PROP_BIT("edid", VirtIOGPU, conf.flags,
|
||||
VIRTIO_GPU_FLAG_EDID_ENABLED, false),
|
||||
DEFINE_PROP_UINT32("xres", VirtIOGPU, conf.xres, 1024),
|
||||
DEFINE_PROP_UINT32("yres", VirtIOGPU, conf.yres, 768),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
|
@ -1445,27 +1237,22 @@ static void virtio_gpu_class_init(ObjectClass *klass, void *data)
|
|||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
|
||||
VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_CLASS(klass);
|
||||
|
||||
vgc->gl_unblock = virtio_gpu_gl_unblock;
|
||||
vdc->realize = virtio_gpu_device_realize;
|
||||
vdc->unrealize = virtio_gpu_device_unrealize;
|
||||
vdc->reset = virtio_gpu_reset;
|
||||
vdc->get_config = virtio_gpu_get_config;
|
||||
vdc->set_config = virtio_gpu_set_config;
|
||||
vdc->get_features = virtio_gpu_get_features;
|
||||
vdc->set_features = virtio_gpu_set_features;
|
||||
|
||||
vdc->reset = virtio_gpu_reset;
|
||||
|
||||
set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
|
||||
dc->props = virtio_gpu_properties;
|
||||
dc->vmsd = &vmstate_virtio_gpu;
|
||||
dc->hotpluggable = false;
|
||||
dc->props = virtio_gpu_properties;
|
||||
}
|
||||
|
||||
static const TypeInfo virtio_gpu_info = {
|
||||
.name = TYPE_VIRTIO_GPU,
|
||||
.parent = TYPE_VIRTIO_DEVICE,
|
||||
.parent = TYPE_VIRTIO_GPU_BASE,
|
||||
.instance_size = sizeof(VirtIOGPU),
|
||||
.instance_init = virtio_gpu_instance_init,
|
||||
.class_init = virtio_gpu_class_init,
|
||||
};
|
||||
|
||||
|
@ -1475,26 +1262,3 @@ static void virtio_register_types(void)
|
|||
}
|
||||
|
||||
type_init(virtio_register_types)
|
||||
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408);
|
||||
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32);
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24);
|
||||
|
|
|
@ -1,63 +1,42 @@
|
|||
#include "qemu/osdep.h"
|
||||
#include "hw/hw.h"
|
||||
#include "hw/pci/pci.h"
|
||||
#include "vga_int.h"
|
||||
#include "hw/virtio/virtio-pci.h"
|
||||
#include "hw/virtio/virtio-gpu.h"
|
||||
#include "qapi/error.h"
|
||||
#include "virtio-vga.h"
|
||||
|
||||
/*
|
||||
* virtio-vga: This extends VirtioPCIProxy.
|
||||
*/
|
||||
#define TYPE_VIRTIO_VGA "virtio-vga"
|
||||
#define VIRTIO_VGA(obj) \
|
||||
OBJECT_CHECK(VirtIOVGA, (obj), TYPE_VIRTIO_VGA)
|
||||
#define VIRTIO_VGA_GET_CLASS(obj) \
|
||||
OBJECT_GET_CLASS(VirtIOVGAClass, obj, TYPE_VIRTIO_VGA)
|
||||
#define VIRTIO_VGA_CLASS(klass) \
|
||||
OBJECT_CLASS_CHECK(VirtIOVGAClass, klass, TYPE_VIRTIO_VGA)
|
||||
|
||||
typedef struct VirtIOVGA {
|
||||
VirtIOPCIProxy parent_obj;
|
||||
VirtIOGPU vdev;
|
||||
VGACommonState vga;
|
||||
MemoryRegion vga_mrs[3];
|
||||
} VirtIOVGA;
|
||||
|
||||
typedef struct VirtIOVGAClass {
|
||||
VirtioPCIClass parent_class;
|
||||
DeviceReset parent_reset;
|
||||
} VirtIOVGAClass;
|
||||
|
||||
static void virtio_vga_invalidate_display(void *opaque)
|
||||
static void virtio_vga_base_invalidate_display(void *opaque)
|
||||
{
|
||||
VirtIOVGA *vvga = opaque;
|
||||
VirtIOVGABase *vvga = opaque;
|
||||
VirtIOGPUBase *g = vvga->vgpu;
|
||||
|
||||
if (vvga->vdev.enable) {
|
||||
virtio_gpu_ops.invalidate(&vvga->vdev);
|
||||
if (g->enable) {
|
||||
virtio_gpu_ops.invalidate(g);
|
||||
} else {
|
||||
vvga->vga.hw_ops->invalidate(&vvga->vga);
|
||||
}
|
||||
}
|
||||
|
||||
static void virtio_vga_update_display(void *opaque)
|
||||
static void virtio_vga_base_update_display(void *opaque)
|
||||
{
|
||||
VirtIOVGA *vvga = opaque;
|
||||
VirtIOVGABase *vvga = opaque;
|
||||
VirtIOGPUBase *g = vvga->vgpu;
|
||||
|
||||
if (vvga->vdev.enable) {
|
||||
virtio_gpu_ops.gfx_update(&vvga->vdev);
|
||||
if (g->enable) {
|
||||
virtio_gpu_ops.gfx_update(g);
|
||||
} else {
|
||||
vvga->vga.hw_ops->gfx_update(&vvga->vga);
|
||||
}
|
||||
}
|
||||
|
||||
static void virtio_vga_text_update(void *opaque, console_ch_t *chardata)
|
||||
static void virtio_vga_base_text_update(void *opaque, console_ch_t *chardata)
|
||||
{
|
||||
VirtIOVGA *vvga = opaque;
|
||||
VirtIOVGABase *vvga = opaque;
|
||||
VirtIOGPUBase *g = vvga->vgpu;
|
||||
|
||||
if (vvga->vdev.enable) {
|
||||
if (g->enable) {
|
||||
if (virtio_gpu_ops.text_update) {
|
||||
virtio_gpu_ops.text_update(&vvga->vdev, chardata);
|
||||
virtio_gpu_ops.text_update(g, chardata);
|
||||
}
|
||||
} else {
|
||||
if (vvga->vga.hw_ops->text_update) {
|
||||
|
@ -66,49 +45,52 @@ static void virtio_vga_text_update(void *opaque, console_ch_t *chardata)
|
|||
}
|
||||
}
|
||||
|
||||
static int virtio_vga_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info)
|
||||
static int virtio_vga_base_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info)
|
||||
{
|
||||
VirtIOVGA *vvga = opaque;
|
||||
VirtIOVGABase *vvga = opaque;
|
||||
VirtIOGPUBase *g = vvga->vgpu;
|
||||
|
||||
if (virtio_gpu_ops.ui_info) {
|
||||
return virtio_gpu_ops.ui_info(&vvga->vdev, idx, info);
|
||||
return virtio_gpu_ops.ui_info(g, idx, info);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void virtio_vga_gl_block(void *opaque, bool block)
|
||||
static void virtio_vga_base_gl_block(void *opaque, bool block)
|
||||
{
|
||||
VirtIOVGA *vvga = opaque;
|
||||
VirtIOVGABase *vvga = opaque;
|
||||
VirtIOGPUBase *g = vvga->vgpu;
|
||||
|
||||
if (virtio_gpu_ops.gl_block) {
|
||||
virtio_gpu_ops.gl_block(&vvga->vdev, block);
|
||||
virtio_gpu_ops.gl_block(g, block);
|
||||
}
|
||||
}
|
||||
|
||||
static const GraphicHwOps virtio_vga_ops = {
|
||||
.invalidate = virtio_vga_invalidate_display,
|
||||
.gfx_update = virtio_vga_update_display,
|
||||
.text_update = virtio_vga_text_update,
|
||||
.ui_info = virtio_vga_ui_info,
|
||||
.gl_block = virtio_vga_gl_block,
|
||||
static const GraphicHwOps virtio_vga_base_ops = {
|
||||
.invalidate = virtio_vga_base_invalidate_display,
|
||||
.gfx_update = virtio_vga_base_update_display,
|
||||
.text_update = virtio_vga_base_text_update,
|
||||
.ui_info = virtio_vga_base_ui_info,
|
||||
.gl_block = virtio_vga_base_gl_block,
|
||||
};
|
||||
|
||||
static const VMStateDescription vmstate_virtio_vga = {
|
||||
static const VMStateDescription vmstate_virtio_vga_base = {
|
||||
.name = "virtio-vga",
|
||||
.version_id = 2,
|
||||
.minimum_version_id = 2,
|
||||
.fields = (VMStateField[]) {
|
||||
/* no pci stuff here, saving the virtio device will handle that */
|
||||
VMSTATE_STRUCT(vga, VirtIOVGA, 0, vmstate_vga_common, VGACommonState),
|
||||
VMSTATE_STRUCT(vga, VirtIOVGABase, 0,
|
||||
vmstate_vga_common, VGACommonState),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
||||
/* VGA device wrapper around PCI device around virtio GPU */
|
||||
static void virtio_vga_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
|
||||
static void virtio_vga_base_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
|
||||
{
|
||||
VirtIOVGA *vvga = VIRTIO_VGA(vpci_dev);
|
||||
VirtIOGPU *g = &vvga->vdev;
|
||||
VirtIOVGABase *vvga = VIRTIO_VGA_BASE(vpci_dev);
|
||||
VirtIOGPUBase *g = vvga->vgpu;
|
||||
VGACommonState *vga = &vvga->vga;
|
||||
Error *err = NULL;
|
||||
uint32_t offset;
|
||||
|
@ -168,7 +150,7 @@ static void virtio_vga_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
|
|||
vvga->vga_mrs, true, false);
|
||||
|
||||
vga->con = g->scanout[0].con;
|
||||
graphic_console_set_hwops(vga->con, &virtio_vga_ops, vvga);
|
||||
graphic_console_set_hwops(vga->con, &virtio_vga_base_ops, vvga);
|
||||
|
||||
for (i = 0; i < g->conf.max_outputs; i++) {
|
||||
object_property_set_link(OBJECT(g->scanout[i].con),
|
||||
|
@ -177,10 +159,10 @@ static void virtio_vga_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
|
|||
}
|
||||
}
|
||||
|
||||
static void virtio_vga_reset(DeviceState *dev)
|
||||
static void virtio_vga_base_reset(DeviceState *dev)
|
||||
{
|
||||
VirtIOVGAClass *klass = VIRTIO_VGA_GET_CLASS(dev);
|
||||
VirtIOVGA *vvga = VIRTIO_VGA(dev);
|
||||
VirtIOVGABaseClass *klass = VIRTIO_VGA_BASE_GET_CLASS(dev);
|
||||
VirtIOVGABase *vvga = VIRTIO_VGA_BASE(dev);
|
||||
|
||||
/* reset virtio-gpu */
|
||||
klass->parent_reset(dev);
|
||||
|
@ -190,48 +172,70 @@ static void virtio_vga_reset(DeviceState *dev)
|
|||
vga_dirty_log_start(&vvga->vga);
|
||||
}
|
||||
|
||||
static Property virtio_vga_properties[] = {
|
||||
static Property virtio_vga_base_properties[] = {
|
||||
DEFINE_VIRTIO_GPU_PCI_PROPERTIES(VirtIOPCIProxy),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
static void virtio_vga_class_init(ObjectClass *klass, void *data)
|
||||
static void virtio_vga_base_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
|
||||
VirtIOVGAClass *v = VIRTIO_VGA_CLASS(klass);
|
||||
VirtIOVGABaseClass *v = VIRTIO_VGA_BASE_CLASS(klass);
|
||||
PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
|
||||
|
||||
set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
|
||||
dc->props = virtio_vga_properties;
|
||||
dc->vmsd = &vmstate_virtio_vga;
|
||||
dc->props = virtio_vga_base_properties;
|
||||
dc->vmsd = &vmstate_virtio_vga_base;
|
||||
dc->hotpluggable = false;
|
||||
device_class_set_parent_reset(dc, virtio_vga_reset,
|
||||
device_class_set_parent_reset(dc, virtio_vga_base_reset,
|
||||
&v->parent_reset);
|
||||
|
||||
k->realize = virtio_vga_realize;
|
||||
k->realize = virtio_vga_base_realize;
|
||||
pcidev_k->romfile = "vgabios-virtio.bin";
|
||||
pcidev_k->class_id = PCI_CLASS_DISPLAY_VGA;
|
||||
}
|
||||
|
||||
static TypeInfo virtio_vga_base_info = {
|
||||
.name = TYPE_VIRTIO_VGA_BASE,
|
||||
.parent = TYPE_VIRTIO_PCI,
|
||||
.instance_size = sizeof(struct VirtIOVGABase),
|
||||
.class_size = sizeof(struct VirtIOVGABaseClass),
|
||||
.class_init = virtio_vga_base_class_init,
|
||||
.abstract = true,
|
||||
};
|
||||
|
||||
#define TYPE_VIRTIO_VGA "virtio-vga"
|
||||
|
||||
#define VIRTIO_VGA(obj) \
|
||||
OBJECT_CHECK(VirtIOVGA, (obj), TYPE_VIRTIO_VGA)
|
||||
|
||||
typedef struct VirtIOVGA {
|
||||
VirtIOVGABase parent_obj;
|
||||
|
||||
VirtIOGPU vdev;
|
||||
} VirtIOVGA;
|
||||
|
||||
static void virtio_vga_inst_initfn(Object *obj)
|
||||
{
|
||||
VirtIOVGA *dev = VIRTIO_VGA(obj);
|
||||
|
||||
virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
|
||||
TYPE_VIRTIO_GPU);
|
||||
VIRTIO_VGA_BASE(dev)->vgpu = VIRTIO_GPU_BASE(&dev->vdev);
|
||||
}
|
||||
|
||||
|
||||
static VirtioPCIDeviceTypeInfo virtio_vga_info = {
|
||||
.generic_name = TYPE_VIRTIO_VGA,
|
||||
.parent = TYPE_VIRTIO_VGA_BASE,
|
||||
.instance_size = sizeof(struct VirtIOVGA),
|
||||
.instance_init = virtio_vga_inst_initfn,
|
||||
.class_size = sizeof(struct VirtIOVGAClass),
|
||||
.class_init = virtio_vga_class_init,
|
||||
};
|
||||
|
||||
static void virtio_vga_register_types(void)
|
||||
{
|
||||
type_register_static(&virtio_vga_base_info);
|
||||
virtio_pci_types_register(&virtio_vga_info);
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
#ifndef VIRTIO_VGA_H_
|
||||
#define VIRTIO_VGA_H_
|
||||
|
||||
#include "hw/virtio/virtio-gpu-pci.h"
|
||||
#include "vga_int.h"
|
||||
|
||||
/*
|
||||
* virtio-vga-base: This extends VirtioPCIProxy.
|
||||
*/
|
||||
#define TYPE_VIRTIO_VGA_BASE "virtio-vga-base"
|
||||
#define VIRTIO_VGA_BASE(obj) \
|
||||
OBJECT_CHECK(VirtIOVGABase, (obj), TYPE_VIRTIO_VGA_BASE)
|
||||
#define VIRTIO_VGA_BASE_GET_CLASS(obj) \
|
||||
OBJECT_GET_CLASS(VirtIOVGABaseClass, obj, TYPE_VIRTIO_VGA_BASE)
|
||||
#define VIRTIO_VGA_BASE_CLASS(klass) \
|
||||
OBJECT_CLASS_CHECK(VirtIOVGABaseClass, klass, TYPE_VIRTIO_VGA_BASE)
|
||||
|
||||
typedef struct VirtIOVGABase {
|
||||
VirtIOPCIProxy parent_obj;
|
||||
|
||||
VirtIOGPUBase *vgpu;
|
||||
VGACommonState vga;
|
||||
MemoryRegion vga_mrs[3];
|
||||
} VirtIOVGABase;
|
||||
|
||||
typedef struct VirtIOVGABaseClass {
|
||||
VirtioPCIClass parent_class;
|
||||
|
||||
DeviceReset parent_reset;
|
||||
} VirtIOVGABaseClass;
|
||||
|
||||
#endif /* VIRTIO_VGA_H_ */
|
|
@ -96,6 +96,7 @@ typedef enum VhostUserRequest {
|
|||
VHOST_USER_POSTCOPY_END = 30,
|
||||
VHOST_USER_GET_INFLIGHT_FD = 31,
|
||||
VHOST_USER_SET_INFLIGHT_FD = 32,
|
||||
VHOST_USER_GPU_SET_SOCKET = 33,
|
||||
VHOST_USER_MAX
|
||||
} VhostUserRequest;
|
||||
|
||||
|
@ -353,6 +354,16 @@ static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int vhost_user_gpu_set_socket(struct vhost_dev *dev, int fd)
|
||||
{
|
||||
VhostUserMsg msg = {
|
||||
.hdr.request = VHOST_USER_GPU_SET_SOCKET,
|
||||
.hdr.flags = VHOST_USER_VERSION,
|
||||
};
|
||||
|
||||
return vhost_user_write(dev, &msg, &fd, 1);
|
||||
}
|
||||
|
||||
static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
|
||||
struct vhost_log *log)
|
||||
{
|
||||
|
|
|
@ -170,4 +170,6 @@ int vhost_backend_invalidate_device_iotlb(struct vhost_dev *dev,
|
|||
int vhost_backend_handle_iotlb_msg(struct vhost_dev *dev,
|
||||
struct vhost_iotlb_msg *imsg);
|
||||
|
||||
int vhost_user_gpu_set_socket(struct vhost_dev *dev, int fd);
|
||||
|
||||
#endif /* VHOST_BACKEND_H */
|
||||
|
|
|
@ -0,0 +1,61 @@
|
|||
/*
|
||||
* Virtio GPU Device
|
||||
*
|
||||
* Copyright Red Hat, Inc. 2013-2014
|
||||
*
|
||||
* Authors:
|
||||
* Dave Airlie <airlied@redhat.com>
|
||||
* Gerd Hoffmann <kraxel@redhat.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#ifndef HW_VIRTIO_GPU_BSWAP_H
|
||||
#define HW_VIRTIO_GPU_BSWAP_H
|
||||
|
||||
#include "qemu/bswap.h"
|
||||
|
||||
static inline void
|
||||
virtio_gpu_ctrl_hdr_bswap(struct virtio_gpu_ctrl_hdr *hdr)
|
||||
{
|
||||
le32_to_cpus(&hdr->type);
|
||||
le32_to_cpus(&hdr->flags);
|
||||
le64_to_cpus(&hdr->fence_id);
|
||||
le32_to_cpus(&hdr->ctx_id);
|
||||
le32_to_cpus(&hdr->padding);
|
||||
}
|
||||
|
||||
static inline void
|
||||
virtio_gpu_bswap_32(void *ptr, size_t size)
|
||||
{
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
|
||||
size_t i;
|
||||
struct virtio_gpu_ctrl_hdr *hdr = (struct virtio_gpu_ctrl_hdr *) ptr;
|
||||
|
||||
virtio_gpu_ctrl_hdr_bswap(hdr);
|
||||
|
||||
i = sizeof(struct virtio_gpu_ctrl_hdr);
|
||||
while (i < size) {
|
||||
le32_to_cpus((uint32_t *)(ptr + i));
|
||||
i = i + sizeof(uint32_t);
|
||||
}
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
virtio_gpu_t2d_bswap(struct virtio_gpu_transfer_to_host_2d *t2d)
|
||||
{
|
||||
virtio_gpu_ctrl_hdr_bswap(&t2d->hdr);
|
||||
le32_to_cpus(&t2d->r.x);
|
||||
le32_to_cpus(&t2d->r.y);
|
||||
le32_to_cpus(&t2d->r.width);
|
||||
le32_to_cpus(&t2d->r.height);
|
||||
le64_to_cpus(&t2d->offset);
|
||||
le32_to_cpus(&t2d->resource_id);
|
||||
le32_to_cpus(&t2d->padding);
|
||||
}
|
||||
|
||||
#endif
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* Virtio GPU PCI Device
|
||||
*
|
||||
* Copyright Red Hat, Inc. 2013-2014
|
||||
*
|
||||
* Authors:
|
||||
* Dave Airlie <airlied@redhat.com>
|
||||
* Gerd Hoffmann <kraxel@redhat.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#ifndef HW_VIRTIO_GPU_PCI_H
|
||||
#define HW_VIRTIO_GPU_PCI_H
|
||||
|
||||
#include "hw/virtio/virtio-pci.h"
|
||||
#include "hw/virtio/virtio-gpu.h"
|
||||
|
||||
typedef struct VirtIOGPUPCIBase VirtIOGPUPCIBase;
|
||||
|
||||
/*
|
||||
* virtio-gpu-pci-base: This extends VirtioPCIProxy.
|
||||
*/
|
||||
#define TYPE_VIRTIO_GPU_PCI_BASE "virtio-gpu-pci-base"
|
||||
#define VIRTIO_GPU_PCI_BASE(obj) \
|
||||
OBJECT_CHECK(VirtIOGPUPCIBase, (obj), TYPE_VIRTIO_GPU_PCI_BASE)
|
||||
|
||||
struct VirtIOGPUPCIBase {
|
||||
VirtIOPCIProxy parent_obj;
|
||||
VirtIOGPUBase *vgpu;
|
||||
};
|
||||
|
||||
/* to share between PCI and VGA */
|
||||
#define DEFINE_VIRTIO_GPU_PCI_PROPERTIES(_state) \
|
||||
DEFINE_PROP_BIT("ioeventfd", _state, flags, \
|
||||
VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, false), \
|
||||
DEFINE_PROP_UINT32("vectors", _state, nvectors, 3)
|
||||
|
||||
#endif /* HW_VIRTIO_GPU_PCI_H */
|
|
@ -0,0 +1,45 @@
|
|||
/*
|
||||
* Virtio GPU Device
|
||||
*
|
||||
* Copyright Red Hat, Inc. 2013-2014
|
||||
*
|
||||
* Authors:
|
||||
* Dave Airlie <airlied@redhat.com>
|
||||
* Gerd Hoffmann <kraxel@redhat.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#ifndef HW_VIRTIO_GPU_PIXMAN_H
|
||||
#define HW_VIRTIO_GPU_PIXMAN_H
|
||||
|
||||
#include "ui/qemu-pixman.h"
|
||||
#include "standard-headers/linux/virtio_gpu.h"
|
||||
|
||||
static inline pixman_format_code_t
|
||||
virtio_gpu_get_pixman_format(uint32_t virtio_gpu_format)
|
||||
{
|
||||
switch (virtio_gpu_format) {
|
||||
case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM:
|
||||
return PIXMAN_BE_b8g8r8x8;
|
||||
case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM:
|
||||
return PIXMAN_BE_b8g8r8a8;
|
||||
case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM:
|
||||
return PIXMAN_BE_x8r8g8b8;
|
||||
case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM:
|
||||
return PIXMAN_BE_a8r8g8b8;
|
||||
case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM:
|
||||
return PIXMAN_BE_r8g8b8x8;
|
||||
case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM:
|
||||
return PIXMAN_BE_r8g8b8a8;
|
||||
case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM:
|
||||
return PIXMAN_BE_x8b8g8r8;
|
||||
case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM:
|
||||
return PIXMAN_BE_a8b8g8r8;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
|
@ -19,13 +19,24 @@
|
|||
#include "ui/console.h"
|
||||
#include "hw/virtio/virtio.h"
|
||||
#include "qemu/log.h"
|
||||
#include "sysemu/vhost-user-backend.h"
|
||||
|
||||
#include "standard-headers/linux/virtio_gpu.h"
|
||||
|
||||
#define TYPE_VIRTIO_GPU_BASE "virtio-gpu-base"
|
||||
#define VIRTIO_GPU_BASE(obj) \
|
||||
OBJECT_CHECK(VirtIOGPUBase, (obj), TYPE_VIRTIO_GPU_BASE)
|
||||
#define VIRTIO_GPU_BASE_GET_CLASS(obj) \
|
||||
OBJECT_GET_CLASS(VirtIOGPUBaseClass, obj, TYPE_VIRTIO_GPU_BASE)
|
||||
#define VIRTIO_GPU_BASE_CLASS(klass) \
|
||||
OBJECT_CLASS_CHECK(VirtIOGPUBaseClass, klass, TYPE_VIRTIO_GPU_BASE)
|
||||
|
||||
#define TYPE_VIRTIO_GPU "virtio-gpu-device"
|
||||
#define VIRTIO_GPU(obj) \
|
||||
OBJECT_CHECK(VirtIOGPU, (obj), TYPE_VIRTIO_GPU)
|
||||
|
||||
#define TYPE_VHOST_USER_GPU "vhost-user-gpu"
|
||||
|
||||
#define VIRTIO_ID_GPU 16
|
||||
|
||||
struct virtio_gpu_simple_resource {
|
||||
|
@ -58,7 +69,7 @@ struct virtio_gpu_requested_state {
|
|||
int x, y;
|
||||
};
|
||||
|
||||
enum virtio_gpu_conf_flags {
|
||||
enum virtio_gpu_base_conf_flags {
|
||||
VIRTIO_GPU_FLAG_VIRGL_ENABLED = 1,
|
||||
VIRTIO_GPU_FLAG_STATS_ENABLED,
|
||||
VIRTIO_GPU_FLAG_EDID_ENABLED,
|
||||
|
@ -71,8 +82,7 @@ enum virtio_gpu_conf_flags {
|
|||
#define virtio_gpu_edid_enabled(_cfg) \
|
||||
(_cfg.flags & (1 << VIRTIO_GPU_FLAG_EDID_ENABLED))
|
||||
|
||||
struct virtio_gpu_conf {
|
||||
uint64_t max_hostmem;
|
||||
struct virtio_gpu_base_conf {
|
||||
uint32_t max_outputs;
|
||||
uint32_t flags;
|
||||
uint32_t xres;
|
||||
|
@ -88,31 +98,55 @@ struct virtio_gpu_ctrl_command {
|
|||
QTAILQ_ENTRY(virtio_gpu_ctrl_command) next;
|
||||
};
|
||||
|
||||
typedef struct VirtIOGPU {
|
||||
typedef struct VirtIOGPUBase {
|
||||
VirtIODevice parent_obj;
|
||||
|
||||
QEMUBH *ctrl_bh;
|
||||
QEMUBH *cursor_bh;
|
||||
Error *migration_blocker;
|
||||
|
||||
struct virtio_gpu_base_conf conf;
|
||||
struct virtio_gpu_config virtio_config;
|
||||
|
||||
bool use_virgl_renderer;
|
||||
int renderer_blocked;
|
||||
int enable;
|
||||
|
||||
struct virtio_gpu_scanout scanout[VIRTIO_GPU_MAX_SCANOUTS];
|
||||
|
||||
int enabled_output_bitmask;
|
||||
struct virtio_gpu_requested_state req_state[VIRTIO_GPU_MAX_SCANOUTS];
|
||||
} VirtIOGPUBase;
|
||||
|
||||
typedef struct VirtIOGPUBaseClass {
|
||||
VirtioDeviceClass parent;
|
||||
|
||||
void (*gl_unblock)(VirtIOGPUBase *g);
|
||||
} VirtIOGPUBaseClass;
|
||||
|
||||
#define VIRTIO_GPU_BASE_PROPERTIES(_state, _conf) \
|
||||
DEFINE_PROP_UINT32("max_outputs", _state, _conf.max_outputs, 1), \
|
||||
DEFINE_PROP_BIT("edid", _state, _conf.flags, \
|
||||
VIRTIO_GPU_FLAG_EDID_ENABLED, false), \
|
||||
DEFINE_PROP_UINT32("xres", _state, _conf.xres, 1024), \
|
||||
DEFINE_PROP_UINT32("yres", _state, _conf.yres, 768)
|
||||
|
||||
typedef struct VirtIOGPU {
|
||||
VirtIOGPUBase parent_obj;
|
||||
|
||||
uint64_t conf_max_hostmem;
|
||||
|
||||
VirtQueue *ctrl_vq;
|
||||
VirtQueue *cursor_vq;
|
||||
|
||||
int enable;
|
||||
QEMUBH *ctrl_bh;
|
||||
QEMUBH *cursor_bh;
|
||||
|
||||
QTAILQ_HEAD(, virtio_gpu_simple_resource) reslist;
|
||||
QTAILQ_HEAD(, virtio_gpu_ctrl_command) cmdq;
|
||||
QTAILQ_HEAD(, virtio_gpu_ctrl_command) fenceq;
|
||||
|
||||
struct virtio_gpu_scanout scanout[VIRTIO_GPU_MAX_SCANOUTS];
|
||||
struct virtio_gpu_requested_state req_state[VIRTIO_GPU_MAX_SCANOUTS];
|
||||
|
||||
struct virtio_gpu_conf conf;
|
||||
uint64_t hostmem;
|
||||
int enabled_output_bitmask;
|
||||
struct virtio_gpu_config virtio_config;
|
||||
|
||||
bool use_virgl_renderer;
|
||||
bool renderer_inited;
|
||||
int renderer_blocked;
|
||||
bool renderer_reset;
|
||||
QEMUTimer *fence_poll;
|
||||
QEMUTimer *print_stats;
|
||||
|
@ -124,17 +158,19 @@ typedef struct VirtIOGPU {
|
|||
uint32_t req_3d;
|
||||
uint32_t bytes_3d;
|
||||
} stats;
|
||||
|
||||
Error *migration_blocker;
|
||||
} VirtIOGPU;
|
||||
|
||||
extern const GraphicHwOps virtio_gpu_ops;
|
||||
typedef struct VhostUserGPU {
|
||||
VirtIOGPUBase parent_obj;
|
||||
|
||||
/* to share between PCI and VGA */
|
||||
#define DEFINE_VIRTIO_GPU_PCI_PROPERTIES(_state) \
|
||||
DEFINE_PROP_BIT("ioeventfd", _state, flags, \
|
||||
VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, false), \
|
||||
DEFINE_PROP_UINT32("vectors", _state, nvectors, 3)
|
||||
VhostUserBackend *vhost;
|
||||
int vhost_gpu_fd; /* closed by the chardev */
|
||||
CharBackend vhost_chr;
|
||||
QemuDmaBuf dmabuf[VIRTIO_GPU_MAX_SCANOUTS];
|
||||
bool backend_blocked;
|
||||
} VhostUserGPU;
|
||||
|
||||
extern const GraphicHwOps virtio_gpu_ops;
|
||||
|
||||
#define VIRTIO_GPU_FILL_CMD(out) do { \
|
||||
size_t s; \
|
||||
|
@ -148,6 +184,15 @@ extern const GraphicHwOps virtio_gpu_ops;
|
|||
} \
|
||||
} while (0)
|
||||
|
||||
/* virtio-gpu-base.c */
|
||||
bool virtio_gpu_base_device_realize(DeviceState *qdev,
|
||||
VirtIOHandleOutput ctrl_cb,
|
||||
VirtIOHandleOutput cursor_cb,
|
||||
Error **errp);
|
||||
void virtio_gpu_base_reset(VirtIOGPUBase *g);
|
||||
void virtio_gpu_base_fill_display_info(VirtIOGPUBase *g,
|
||||
struct virtio_gpu_resp_display_info *dpy_info);
|
||||
|
||||
/* virtio-gpu.c */
|
||||
void virtio_gpu_ctrl_response(VirtIOGPU *g,
|
||||
struct virtio_gpu_ctrl_command *cmd,
|
||||
|
@ -175,4 +220,5 @@ void virtio_gpu_virgl_fence_poll(VirtIOGPU *g);
|
|||
void virtio_gpu_virgl_reset(VirtIOGPU *g);
|
||||
int virtio_gpu_virgl_init(VirtIOGPU *g);
|
||||
int virtio_gpu_virgl_get_num_capsets(VirtIOGPU *g);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -144,7 +144,7 @@ cc-option = $(if $(shell $(CC) $1 $2 -S -o /dev/null -xc /dev/null \
|
|||
cc-c-option = $(if $(shell $(CC) $1 $2 -c -o /dev/null -xc /dev/null \
|
||||
>/dev/null 2>&1 && echo OK), $2, $3)
|
||||
|
||||
VPATH_SUFFIXES = %.c %.h %.S %.cc %.cpp %.m %.mak %.texi %.sh %.rc Kconfig%
|
||||
VPATH_SUFFIXES = %.c %.h %.S %.cc %.cpp %.m %.mak %.texi %.sh %.rc Kconfig% %.json.in
|
||||
set-vpath = $(if $1,$(foreach PATTERN,$(VPATH_SUFFIXES),$(eval vpath $(PATTERN) $1)))
|
||||
|
||||
# install-prog list, dir
|
||||
|
@ -392,3 +392,10 @@ TEXI2MAN = $(call quiet-command, \
|
|||
$(call TEXI2MAN)
|
||||
%.8:
|
||||
$(call TEXI2MAN)
|
||||
|
||||
GEN_SUBST = $(call quiet-command, \
|
||||
sed -e "s!@libexecdir@!$(libexecdir)!g" < $< > $@, \
|
||||
"GEN","$@")
|
||||
|
||||
%.json: %.json.in
|
||||
$(call GEN_SUBST)
|
||||
|
|
|
@ -157,9 +157,10 @@ static void spice_app_display_early_init(DisplayOptions *opts)
|
|||
qemu_opt_set(qopts, "addr", sock_path, &error_abort);
|
||||
qemu_opt_set(qopts, "image-compression", "off", &error_abort);
|
||||
qemu_opt_set(qopts, "streaming-video", "off", &error_abort);
|
||||
#ifdef CONFIG_OPENGL
|
||||
qemu_opt_set(qopts, "gl", opts->has_gl ? "on" : "off", &error_abort);
|
||||
display_opengl = opts->has_gl;
|
||||
|
||||
#endif
|
||||
be->u.spiceport.data->fqdn = g_strdup("org.qemu.monitor.qmp.0");
|
||||
qemu_chardev_new("org.qemu.monitor.qmp", TYPE_CHARDEV_SPICEPORT,
|
||||
be, NULL, &error_abort);
|
||||
|
|
|
@ -53,7 +53,7 @@ util-obj-y += systemd.o
|
|||
util-obj-y += iova-tree.o
|
||||
util-obj-$(CONFIG_INOTIFY1) += filemonitor-inotify.o
|
||||
util-obj-$(CONFIG_LINUX) += vfio-helpers.o
|
||||
util-obj-$(CONFIG_OPENGL) += drm.o
|
||||
util-obj-$(CONFIG_POSIX) += drm.o
|
||||
util-obj-y += guest-random.o
|
||||
|
||||
stub-obj-y += filemonitor-stub.o
|
||||
|
|
Loading…
Reference in New Issue