-----BEGIN PGP SIGNATURE-----

Version: GnuPG v1
 
 iQEcBAABAgAGBQJjMqL9AAoJEO8Ells5jWIRUKcH/iNuJpxMXG18pGteBiTu3/ut
 KRR9u1nLZZXA2/02NTOYnrrHcplFQkEBXNHaEintWfctKHIP/llY8LDVriDFM+6N
 4PzwLGLe7R9S7rfgt/xMDY0nFESFph5XyVTCxAAUm3Exhm8vIg1FM8Tep8lW/taW
 pliDa0K/9pQAfIN+eCnMUtH2JUttak8RwvAg5rXBg7XUx48ZTQn1o7aYYTPOAC2v
 RWkX0BKc7FVK5maAhe6Ugrcf6v4R2mDIAvnr+Anvo67SfgFZ5MtCllr0liJ4h3Vd
 +/PlsBDJotvht3QZVva1tn1Jk5rhN8N8lZbVOuMsklU/tX3Xrj99HJNETLXks2k=
 =82t6
 -----END PGP SIGNATURE-----

Merge tag 'net-pull-request' of https://github.com/jasowang/qemu into staging

# -----BEGIN PGP SIGNATURE-----
# Version: GnuPG v1
#
# iQEcBAABAgAGBQJjMqL9AAoJEO8Ells5jWIRUKcH/iNuJpxMXG18pGteBiTu3/ut
# KRR9u1nLZZXA2/02NTOYnrrHcplFQkEBXNHaEintWfctKHIP/llY8LDVriDFM+6N
# 4PzwLGLe7R9S7rfgt/xMDY0nFESFph5XyVTCxAAUm3Exhm8vIg1FM8Tep8lW/taW
# pliDa0K/9pQAfIN+eCnMUtH2JUttak8RwvAg5rXBg7XUx48ZTQn1o7aYYTPOAC2v
# RWkX0BKc7FVK5maAhe6Ugrcf6v4R2mDIAvnr+Anvo67SfgFZ5MtCllr0liJ4h3Vd
# +/PlsBDJotvht3QZVva1tn1Jk5rhN8N8lZbVOuMsklU/tX3Xrj99HJNETLXks2k=
# =82t6
# -----END PGP SIGNATURE-----
# gpg: Signature made Tue 27 Sep 2022 03:15:09 EDT
# gpg:                using RSA key EF04965B398D6211
# gpg: Good signature from "Jason Wang (Jason Wang on RedHat) <jasowang@redhat.com>" [full]
# Primary key fingerprint: 215D 46F4 8246 689E C77F  3562 EF04 965B 398D 6211

* tag 'net-pull-request' of https://github.com/jasowang/qemu:
  virtio: del net client if net_init_tap_one failed
  vdpa: Allow MQ feature in SVQ
  virtio-net: Update virtio-net curr_queue_pairs in vdpa backends
  vdpa: validate MQ CVQ commands
  vdpa: Add vhost_vdpa_net_load_mq
  vdpa: extract vhost_vdpa_net_load_mac from vhost_vdpa_net_load
  vdpa: Make VhostVDPAState cvq_cmd_in_buffer control ack type
  e1000e: set RX desc status with DD flag in a separate operation

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2022-09-27 11:08:36 -04:00
commit dbc4f48b5a
4 changed files with 158 additions and 51 deletions

View File

@ -1364,6 +1364,57 @@ struct NetRxPkt *pkt, const E1000E_RSSInfo *rss_info,
} }
} }
static inline void
e1000e_pci_dma_write_rx_desc(E1000ECore *core, dma_addr_t addr,
uint8_t *desc, dma_addr_t len)
{
PCIDevice *dev = core->owner;
if (e1000e_rx_use_legacy_descriptor(core)) {
struct e1000_rx_desc *d = (struct e1000_rx_desc *) desc;
size_t offset = offsetof(struct e1000_rx_desc, status);
uint8_t status = d->status;
d->status &= ~E1000_RXD_STAT_DD;
pci_dma_write(dev, addr, desc, len);
if (status & E1000_RXD_STAT_DD) {
d->status = status;
pci_dma_write(dev, addr + offset, &status, sizeof(status));
}
} else {
if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) {
union e1000_rx_desc_packet_split *d =
(union e1000_rx_desc_packet_split *) desc;
size_t offset = offsetof(union e1000_rx_desc_packet_split,
wb.middle.status_error);
uint32_t status = d->wb.middle.status_error;
d->wb.middle.status_error &= ~E1000_RXD_STAT_DD;
pci_dma_write(dev, addr, desc, len);
if (status & E1000_RXD_STAT_DD) {
d->wb.middle.status_error = status;
pci_dma_write(dev, addr + offset, &status, sizeof(status));
}
} else {
union e1000_rx_desc_extended *d =
(union e1000_rx_desc_extended *) desc;
size_t offset = offsetof(union e1000_rx_desc_extended,
wb.upper.status_error);
uint32_t status = d->wb.upper.status_error;
d->wb.upper.status_error &= ~E1000_RXD_STAT_DD;
pci_dma_write(dev, addr, desc, len);
if (status & E1000_RXD_STAT_DD) {
d->wb.upper.status_error = status;
pci_dma_write(dev, addr + offset, &status, sizeof(status));
}
}
}
}
typedef struct e1000e_ba_state_st { typedef struct e1000e_ba_state_st {
uint16_t written[MAX_PS_BUFFERS]; uint16_t written[MAX_PS_BUFFERS];
uint8_t cur_idx; uint8_t cur_idx;
@ -1600,7 +1651,7 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
e1000e_write_rx_descr(core, desc, is_last ? core->rx_pkt : NULL, e1000e_write_rx_descr(core, desc, is_last ? core->rx_pkt : NULL,
rss_info, do_ps ? ps_hdr_len : 0, &bastate.written); rss_info, do_ps ? ps_hdr_len : 0, &bastate.written);
pci_dma_write(d, base, &desc, core->rx_desc_len); e1000e_pci_dma_write_rx_desc(core, base, desc, core->rx_desc_len);
e1000e_ring_advance(core, rxi, e1000e_ring_advance(core, rxi,
core->rx_desc_len / E1000_MIN_RX_DESC_LEN); core->rx_desc_len / E1000_MIN_RX_DESC_LEN);

View File

@ -1412,19 +1412,14 @@ static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
return VIRTIO_NET_ERR; return VIRTIO_NET_ERR;
} }
/* Avoid changing the number of queue_pairs for vdpa device in
* userspace handler. A future fix is needed to handle the mq
* change in userspace handler with vhost-vdpa. Let's disable
* the mq handling from userspace for now and only allow get
* done through the kernel. Ripples may be seen when falling
* back to userspace, but without doing it qemu process would
* crash on a recursive entry to virtio_net_set_status().
*/
if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
return VIRTIO_NET_ERR;
}
n->curr_queue_pairs = queue_pairs; n->curr_queue_pairs = queue_pairs;
if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
/*
* Avoid updating the backend for a vdpa device: We're only interested
* in updating the device model queues.
*/
return VIRTIO_NET_OK;
}
/* stop the backend before changing the number of queue_pairs to avoid handling a /* stop the backend before changing the number of queue_pairs to avoid handling a
* disabled queue */ * disabled queue */
virtio_net_set_status(vdev, vdev->status); virtio_net_set_status(vdev, vdev->status);

View File

@ -686,7 +686,7 @@ static void net_init_tap_one(const NetdevTapOptions *tap, NetClientState *peer,
tap_set_sndbuf(s->fd, tap, &err); tap_set_sndbuf(s->fd, tap, &err);
if (err) { if (err) {
error_propagate(errp, err); error_propagate(errp, err);
return; goto failed;
} }
if (tap->has_fd || tap->has_fds) { if (tap->has_fd || tap->has_fds) {
@ -726,12 +726,12 @@ static void net_init_tap_one(const NetdevTapOptions *tap, NetClientState *peer,
} else { } else {
warn_report_err(err); warn_report_err(err);
} }
return; goto failed;
} }
if (!g_unix_set_fd_nonblocking(vhostfd, true, NULL)) { if (!g_unix_set_fd_nonblocking(vhostfd, true, NULL)) {
error_setg_errno(errp, errno, "%s: Can't use file descriptor %d", error_setg_errno(errp, errno, "%s: Can't use file descriptor %d",
name, fd); name, fd);
return; goto failed;
} }
} else { } else {
vhostfd = open("/dev/vhost-net", O_RDWR); vhostfd = open("/dev/vhost-net", O_RDWR);
@ -743,11 +743,11 @@ static void net_init_tap_one(const NetdevTapOptions *tap, NetClientState *peer,
warn_report("tap: open vhost char device failed: %s", warn_report("tap: open vhost char device failed: %s",
strerror(errno)); strerror(errno));
} }
return; goto failed;
} }
if (!g_unix_set_fd_nonblocking(vhostfd, true, NULL)) { if (!g_unix_set_fd_nonblocking(vhostfd, true, NULL)) {
error_setg_errno(errp, errno, "Failed to set FD nonblocking"); error_setg_errno(errp, errno, "Failed to set FD nonblocking");
return; goto failed;
} }
} }
options.opaque = (void *)(uintptr_t)vhostfd; options.opaque = (void *)(uintptr_t)vhostfd;
@ -760,11 +760,17 @@ static void net_init_tap_one(const NetdevTapOptions *tap, NetClientState *peer,
} else { } else {
warn_report(VHOST_NET_INIT_FAILED); warn_report(VHOST_NET_INIT_FAILED);
} }
return; goto failed;
} }
} else if (vhostfdname) { } else if (vhostfdname) {
error_setg(errp, "vhostfd(s)= is not valid without vhost"); error_setg(errp, "vhostfd(s)= is not valid without vhost");
goto failed;
} }
return;
failed:
qemu_del_net_client(&s->nc);
} }
static int get_fds(char *str, char *fds[], int max) static int get_fds(char *str, char *fds[], int max)

View File

@ -35,7 +35,9 @@ typedef struct VhostVDPAState {
VHostNetState *vhost_net; VHostNetState *vhost_net;
/* Control commands shadow buffers */ /* Control commands shadow buffers */
void *cvq_cmd_out_buffer, *cvq_cmd_in_buffer; void *cvq_cmd_out_buffer;
virtio_net_ctrl_ack *status;
bool started; bool started;
} VhostVDPAState; } VhostVDPAState;
@ -92,6 +94,7 @@ static const uint64_t vdpa_svq_device_features =
BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) | BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) |
BIT_ULL(VIRTIO_NET_F_STATUS) | BIT_ULL(VIRTIO_NET_F_STATUS) |
BIT_ULL(VIRTIO_NET_F_CTRL_VQ) | BIT_ULL(VIRTIO_NET_F_CTRL_VQ) |
BIT_ULL(VIRTIO_NET_F_MQ) |
BIT_ULL(VIRTIO_F_ANY_LAYOUT) | BIT_ULL(VIRTIO_F_ANY_LAYOUT) |
BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) | BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
BIT_ULL(VIRTIO_NET_F_RSC_EXT) | BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
@ -158,7 +161,7 @@ static void vhost_vdpa_cleanup(NetClientState *nc)
struct vhost_dev *dev = &s->vhost_net->dev; struct vhost_dev *dev = &s->vhost_net->dev;
qemu_vfree(s->cvq_cmd_out_buffer); qemu_vfree(s->cvq_cmd_out_buffer);
qemu_vfree(s->cvq_cmd_in_buffer); qemu_vfree(s->status);
if (dev->vq_index + dev->nvqs == dev->vq_index_end) { if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete); g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
} }
@ -310,7 +313,7 @@ static int vhost_vdpa_net_cvq_start(NetClientState *nc)
return r; return r;
} }
r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_in_buffer, r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status,
vhost_vdpa_net_cvq_cmd_page_len(), true); vhost_vdpa_net_cvq_cmd_page_len(), true);
if (unlikely(r < 0)) { if (unlikely(r < 0)) {
vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
@ -327,7 +330,7 @@ static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
if (s->vhost_vdpa.shadow_vqs_enabled) { if (s->vhost_vdpa.shadow_vqs_enabled) {
vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_in_buffer); vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
} }
} }
@ -340,7 +343,7 @@ static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
.iov_len = out_len, .iov_len = out_len,
}; };
const struct iovec in = { const struct iovec in = {
.iov_base = s->cvq_cmd_in_buffer, .iov_base = s->status,
.iov_len = sizeof(virtio_net_ctrl_ack), .iov_len = sizeof(virtio_net_ctrl_ack),
}; };
VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
@ -363,12 +366,69 @@ static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
return vhost_svq_poll(svq); return vhost_svq_poll(svq);
} }
static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
uint8_t cmd, const void *data,
size_t data_size)
{
const struct virtio_net_ctrl_hdr ctrl = {
.class = class,
.cmd = cmd,
};
assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl));
memcpy(s->cvq_cmd_out_buffer + sizeof(ctrl), data, data_size);
return vhost_vdpa_net_cvq_add(s, sizeof(ctrl) + data_size,
sizeof(virtio_net_ctrl_ack));
}
static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
{
uint64_t features = n->parent_obj.guest_features;
if (features & BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR)) {
ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MAC,
VIRTIO_NET_CTRL_MAC_ADDR_SET,
n->mac, sizeof(n->mac));
if (unlikely(dev_written < 0)) {
return dev_written;
}
return *s->status != VIRTIO_NET_OK;
}
return 0;
}
static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
const VirtIONet *n)
{
struct virtio_net_ctrl_mq mq;
uint64_t features = n->parent_obj.guest_features;
ssize_t dev_written;
if (!(features & BIT_ULL(VIRTIO_NET_F_MQ))) {
return 0;
}
mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs);
dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MQ,
VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &mq,
sizeof(mq));
if (unlikely(dev_written < 0)) {
return dev_written;
}
return *s->status != VIRTIO_NET_OK;
}
static int vhost_vdpa_net_load(NetClientState *nc) static int vhost_vdpa_net_load(NetClientState *nc)
{ {
VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
const struct vhost_vdpa *v = &s->vhost_vdpa; struct vhost_vdpa *v = &s->vhost_vdpa;
const VirtIONet *n; const VirtIONet *n;
uint64_t features; int r;
assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
@ -377,26 +437,13 @@ static int vhost_vdpa_net_load(NetClientState *nc)
} }
n = VIRTIO_NET(v->dev->vdev); n = VIRTIO_NET(v->dev->vdev);
features = n->parent_obj.guest_features; r = vhost_vdpa_net_load_mac(s, n);
if (features & BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR)) { if (unlikely(r < 0)) {
const struct virtio_net_ctrl_hdr ctrl = { return r;
.class = VIRTIO_NET_CTRL_MAC, }
.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET, r = vhost_vdpa_net_load_mq(s, n);
}; if (unlikely(r)) {
char *cursor = s->cvq_cmd_out_buffer; return r;
ssize_t dev_written;
memcpy(cursor, &ctrl, sizeof(ctrl));
cursor += sizeof(ctrl);
memcpy(cursor, n->mac, sizeof(n->mac));
dev_written = vhost_vdpa_net_cvq_add(s, sizeof(ctrl) + sizeof(n->mac),
sizeof(virtio_net_ctrl_ack));
if (unlikely(dev_written < 0)) {
return dev_written;
}
return *((virtio_net_ctrl_ack *)s->cvq_cmd_in_buffer) != VIRTIO_NET_OK;
} }
return 0; return 0;
@ -440,6 +487,15 @@ static bool vhost_vdpa_net_cvq_validate_cmd(const void *out_buf, size_t len)
__func__, ctrl.cmd); __func__, ctrl.cmd);
}; };
break; break;
case VIRTIO_NET_CTRL_MQ:
switch (ctrl.cmd) {
case VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET:
return true;
default:
qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid mq cmd %u\n",
__func__, ctrl.cmd);
};
break;
default: default:
qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid control class %u\n", qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid control class %u\n",
__func__, ctrl.class); __func__, ctrl.class);
@ -491,8 +547,7 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
goto out; goto out;
} }
memcpy(&status, s->cvq_cmd_in_buffer, sizeof(status)); if (*s->status != VIRTIO_NET_OK) {
if (status != VIRTIO_NET_OK) {
return VIRTIO_NET_ERR; return VIRTIO_NET_ERR;
} }
@ -549,9 +604,9 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(), s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(),
vhost_vdpa_net_cvq_cmd_page_len()); vhost_vdpa_net_cvq_cmd_page_len());
memset(s->cvq_cmd_out_buffer, 0, vhost_vdpa_net_cvq_cmd_page_len()); memset(s->cvq_cmd_out_buffer, 0, vhost_vdpa_net_cvq_cmd_page_len());
s->cvq_cmd_in_buffer = qemu_memalign(qemu_real_host_page_size(), s->status = qemu_memalign(qemu_real_host_page_size(),
vhost_vdpa_net_cvq_cmd_page_len()); vhost_vdpa_net_cvq_cmd_page_len());
memset(s->cvq_cmd_in_buffer, 0, vhost_vdpa_net_cvq_cmd_page_len()); memset(s->status, 0, vhost_vdpa_net_cvq_cmd_page_len());
s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops; s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
s->vhost_vdpa.shadow_vq_ops_opaque = s; s->vhost_vdpa.shadow_vq_ops_opaque = s;