mirror of https://github.com/xemu-project/xemu.git
vhost-user: add multi queue support
Based on patch by Nikolay Nikolaev: Vhost-user will implement the multi queue support in a similar way to what vhost already has - a separate thread for each queue. To enable the multi queue functionality - a new command line parameter "queues" is introduced for the vhost-user netdev. Signed-off-by: Nikolay Nikolaev <n.nikolaev@virtualopensystems.com> Signed-off-by: Changchun Ouyang <changchun.ouyang@intel.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
019a3edbb2
commit
830d70db69
|
@ -127,6 +127,11 @@ in the ancillary data:
|
|||
If Master is unable to send the full message or receives a wrong reply it will
|
||||
close the connection. An optional reconnection mechanism can be implemented.
|
||||
|
||||
Multi queue support
|
||||
-------------------
|
||||
The protocol supports multiple queues by setting all index fields in the sent
|
||||
messages to a properly calculated value.
|
||||
|
||||
Message types
|
||||
-------------
|
||||
|
||||
|
|
|
@ -157,6 +157,7 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options)
|
|||
|
||||
net->dev.nvqs = 2;
|
||||
net->dev.vqs = net->vqs;
|
||||
net->dev.vq_index = net->nc->queue_index;
|
||||
|
||||
r = vhost_dev_init(&net->dev, options->opaque,
|
||||
options->backend_type, options->force);
|
||||
|
@ -267,7 +268,7 @@ static void vhost_net_stop_one(struct vhost_net *net,
|
|||
for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
|
||||
const VhostOps *vhost_ops = net->dev.vhost_ops;
|
||||
int r = vhost_ops->vhost_call(&net->dev, VHOST_RESET_OWNER,
|
||||
NULL);
|
||||
&file);
|
||||
assert(r >= 0);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -210,7 +210,12 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
|
|||
break;
|
||||
|
||||
case VHOST_SET_OWNER:
|
||||
break;
|
||||
|
||||
case VHOST_RESET_OWNER:
|
||||
memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
|
||||
msg.state.index += dev->vq_index;
|
||||
msg.size = sizeof(m.state);
|
||||
break;
|
||||
|
||||
case VHOST_SET_MEM_TABLE:
|
||||
|
@ -253,17 +258,20 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
|
|||
case VHOST_SET_VRING_NUM:
|
||||
case VHOST_SET_VRING_BASE:
|
||||
memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
|
||||
msg.state.index += dev->vq_index;
|
||||
msg.size = sizeof(m.state);
|
||||
break;
|
||||
|
||||
case VHOST_GET_VRING_BASE:
|
||||
memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
|
||||
msg.state.index += dev->vq_index;
|
||||
msg.size = sizeof(m.state);
|
||||
need_reply = 1;
|
||||
break;
|
||||
|
||||
case VHOST_SET_VRING_ADDR:
|
||||
memcpy(&msg.addr, arg, sizeof(struct vhost_vring_addr));
|
||||
msg.addr.index += dev->vq_index;
|
||||
msg.size = sizeof(m.addr);
|
||||
break;
|
||||
|
||||
|
@ -271,7 +279,7 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
|
|||
case VHOST_SET_VRING_CALL:
|
||||
case VHOST_SET_VRING_ERR:
|
||||
file = arg;
|
||||
msg.u64 = file->index & VHOST_USER_VRING_IDX_MASK;
|
||||
msg.u64 = (file->index + dev->vq_index) & VHOST_USER_VRING_IDX_MASK;
|
||||
msg.size = sizeof(m.u64);
|
||||
if (ioeventfd_enabled() && file->fd > 0) {
|
||||
fds[fd_num++] = file->fd;
|
||||
|
@ -313,6 +321,7 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
|
|||
error_report("Received bad msg size.");
|
||||
return -1;
|
||||
}
|
||||
msg.state.index -= dev->vq_index;
|
||||
memcpy(arg, &msg.state, sizeof(struct vhost_vring_state));
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -121,35 +121,39 @@ static void net_vhost_user_event(void *opaque, int event)
|
|||
case CHR_EVENT_OPENED:
|
||||
vhost_user_start(s);
|
||||
net_vhost_link_down(s, false);
|
||||
error_report("chardev \"%s\" went up", s->chr->label);
|
||||
error_report("chardev \"%s\" went up", s->nc.info_str);
|
||||
break;
|
||||
case CHR_EVENT_CLOSED:
|
||||
net_vhost_link_down(s, true);
|
||||
vhost_user_stop(s);
|
||||
error_report("chardev \"%s\" went down", s->chr->label);
|
||||
error_report("chardev \"%s\" went down", s->nc.info_str);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int net_vhost_user_init(NetClientState *peer, const char *device,
|
||||
const char *name, CharDriverState *chr)
|
||||
const char *name, CharDriverState *chr,
|
||||
uint32_t queues)
|
||||
{
|
||||
NetClientState *nc;
|
||||
VhostUserState *s;
|
||||
int i;
|
||||
|
||||
nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
|
||||
for (i = 0; i < queues; i++) {
|
||||
nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
|
||||
|
||||
snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user to %s",
|
||||
chr->label);
|
||||
snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user%d to %s",
|
||||
i, chr->label);
|
||||
|
||||
s = DO_UPCAST(VhostUserState, nc, nc);
|
||||
s = DO_UPCAST(VhostUserState, nc, nc);
|
||||
|
||||
/* We don't provide a receive callback */
|
||||
s->nc.receive_disabled = 1;
|
||||
s->chr = chr;
|
||||
|
||||
qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event, s);
|
||||
/* We don't provide a receive callback */
|
||||
s->nc.receive_disabled = 1;
|
||||
s->chr = chr;
|
||||
s->nc.queue_index = i;
|
||||
|
||||
qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event, s);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -226,6 +230,7 @@ int net_init_vhost_user(const NetClientOptions *opts, const char *name,
|
|||
NetClientState *peer, Error **errp)
|
||||
{
|
||||
/* FIXME error_setg(errp, ...) on failure */
|
||||
uint32_t queues;
|
||||
const NetdevVhostUserOptions *vhost_user_opts;
|
||||
CharDriverState *chr;
|
||||
|
||||
|
@ -244,6 +249,12 @@ int net_init_vhost_user(const NetClientOptions *opts, const char *name,
|
|||
return -1;
|
||||
}
|
||||
|
||||
/* number of queues for multiqueue */
|
||||
if (vhost_user_opts->has_queues) {
|
||||
queues = vhost_user_opts->queues;
|
||||
} else {
|
||||
queues = 1;
|
||||
}
|
||||
|
||||
return net_vhost_user_init(peer, "vhost_user", name, chr);
|
||||
return net_vhost_user_init(peer, "vhost_user", name, chr, queues);
|
||||
}
|
||||
|
|
|
@ -2444,12 +2444,16 @@
|
|||
#
|
||||
# @vhostforce: #optional vhost on for non-MSIX virtio guests (default: false).
|
||||
#
|
||||
# @queues: #optional number of queues to be created for multiqueue vhost-user
|
||||
# (default: 1) (Since 2.4)
|
||||
#
|
||||
# Since 2.1
|
||||
##
|
||||
{ 'struct': 'NetdevVhostUserOptions',
|
||||
'data': {
|
||||
'chardev': 'str',
|
||||
'*vhostforce': 'bool' } }
|
||||
'*vhostforce': 'bool',
|
||||
'*queues': 'uint32' } }
|
||||
|
||||
##
|
||||
# @NetClientOptions
|
||||
|
|
|
@ -1955,13 +1955,14 @@ The hubport netdev lets you connect a NIC to a QEMU "vlan" instead of a single
|
|||
netdev. @code{-net} and @code{-device} with parameter @option{vlan} create the
|
||||
required hub automatically.
|
||||
|
||||
@item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off]
|
||||
@item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off][,queues=n]
|
||||
|
||||
Establish a vhost-user netdev, backed by a chardev @var{id}. The chardev should
|
||||
be a unix domain socket backed one. The vhost-user uses a specifically defined
|
||||
protocol to pass vhost ioctl replacement messages to an application on the other
|
||||
end of the socket. On non-MSIX guests, the feature can be forced with
|
||||
@var{vhostforce}.
|
||||
@var{vhostforce}. Use 'queues=@var{n}' to specify the number of queues to
|
||||
be created for multiqueue vhost-user.
|
||||
|
||||
Example:
|
||||
@example
|
||||
|
|
Loading…
Reference in New Issue