NBD patches through 2023-09-25

- Denis V. Lunev: iotest improvements
 - Eric Blake: further work towards 64-bit NBD extensions
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEccLMIrHEYCkn0vOqp6FrSiUnQ2oFAmUR2MUACgkQp6FrSiUn
 Q2q6jAf+PT65XzMAhgKvu1vIeMSQqyCocNB2MCOzNp+46uB9bNbPPLQSH2EX+t6p
 kQfHyHUl4YMi0EqgCfodiewlaUKeMxP3cPWMGYaYZ16uNMOIYL1boreDAcM25rb5
 P3TV3DAWTWSclUxrkTC2DxAIBPgsPsGG/2daqOMDEdinxlIywCMJDEIHc9gwwd/t
 7laz9V1cOW9NbQXrM7eTofJKPKIeqZ+w0kvqrf9HBvZl9CqwHADi7xoz9xP+fN+f
 713ED/hwt0FIlixtIm2/8vu7nn09cu6m9NaKsMOomsYg9Z6wU3ctivViG5NLq3MD
 OOUu51dV8gRRAXAFU5vKb0d93D27zQ==
 =Ik02
 -----END PGP SIGNATURE-----

Merge tag 'pull-nbd-2023-09-25' of https://repo.or.cz/qemu/ericb into staging

NBD patches through 2023-09-25

- Denis V. Lunev: iotest improvements
- Eric Blake: further work towards 64-bit NBD extensions

# -----BEGIN PGP SIGNATURE-----
#
# iQEzBAABCAAdFiEEccLMIrHEYCkn0vOqp6FrSiUnQ2oFAmUR2MUACgkQp6FrSiUn
# Q2q6jAf+PT65XzMAhgKvu1vIeMSQqyCocNB2MCOzNp+46uB9bNbPPLQSH2EX+t6p
# kQfHyHUl4YMi0EqgCfodiewlaUKeMxP3cPWMGYaYZ16uNMOIYL1boreDAcM25rb5
# P3TV3DAWTWSclUxrkTC2DxAIBPgsPsGG/2daqOMDEdinxlIywCMJDEIHc9gwwd/t
# 7laz9V1cOW9NbQXrM7eTofJKPKIeqZ+w0kvqrf9HBvZl9CqwHADi7xoz9xP+fN+f
# 713ED/hwt0FIlixtIm2/8vu7nn09cu6m9NaKsMOomsYg9Z6wU3ctivViG5NLq3MD
# OOUu51dV8gRRAXAFU5vKb0d93D27zQ==
# =Ik02
# -----END PGP SIGNATURE-----
# gpg: Signature made Mon 25 Sep 2023 15:00:21 EDT
# gpg:                using RSA key 71C2CC22B1C4602927D2F3AAA7A16B4A2527436A
# gpg: Good signature from "Eric Blake <eblake@redhat.com>" [full]
# gpg:                 aka "Eric Blake (Free Software Programmer) <ebb9@byu.net>" [full]
# gpg:                 aka "[jpeg image of size 6874]" [full]
# Primary key fingerprint: 71C2 CC22 B1C4 6029 27D2  F3AA A7A1 6B4A 2527 436A

* tag 'pull-nbd-2023-09-25' of https://repo.or.cz/qemu/ericb:
  nbd/server: Refactor handling of command sanity checks
  nbd: Prepare for 64-bit request effect lengths
  nbd: Add types for extended headers
  nbd/client: Pass mode through to nbd_send_request
  nbd: Replace bool structured_reply with mode enum
  iotests: improve 'not run' message for nbd-multiconn test
  iotests: use TEST_IMG_FILE instead of TEST_IMG in _require_large_file

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2023-09-26 09:04:23 -04:00
commit 11a629d246
12 changed files with 281 additions and 151 deletions

View File

@ -339,7 +339,7 @@ int coroutine_fn nbd_co_do_establish_connection(BlockDriverState *bs,
* We have connected, but must fail for other reasons.
* Send NBD_CMD_DISC as a courtesy to the server.
*/
NBDRequest request = { .type = NBD_CMD_DISC };
NBDRequest request = { .type = NBD_CMD_DISC, .mode = s->info.mode };
nbd_send_request(s->ioc, &request);
@ -463,7 +463,8 @@ static coroutine_fn int nbd_receive_replies(BDRVNBDState *s, uint64_t cookie)
nbd_channel_error(s, ret);
return ret;
}
if (nbd_reply_is_structured(&s->reply) && !s->info.structured_reply) {
if (nbd_reply_is_structured(&s->reply) &&
s->info.mode < NBD_MODE_STRUCTURED) {
nbd_channel_error(s, -EINVAL);
return -EINVAL;
}
@ -519,6 +520,7 @@ nbd_co_send_request(BlockDriverState *bs, NBDRequest *request,
qemu_co_mutex_lock(&s->send_mutex);
request->cookie = INDEX_TO_COOKIE(i);
request->mode = s->info.mode;
assert(s->ioc);
@ -608,7 +610,7 @@ static int nbd_parse_offset_hole_payload(BDRVNBDState *s,
static int nbd_parse_blockstatus_payload(BDRVNBDState *s,
NBDStructuredReplyChunk *chunk,
uint8_t *payload, uint64_t orig_length,
NBDExtent *extent, Error **errp)
NBDExtent32 *extent, Error **errp)
{
uint32_t context_id;
@ -866,7 +868,7 @@ static coroutine_fn int nbd_co_do_receive_one_chunk(
}
/* handle structured reply chunk */
assert(s->info.structured_reply);
assert(s->info.mode >= NBD_MODE_STRUCTURED);
chunk = &s->reply.structured;
if (chunk->type == NBD_REPLY_TYPE_NONE) {
@ -1070,7 +1072,8 @@ nbd_co_receive_cmdread_reply(BDRVNBDState *s, uint64_t cookie,
void *payload = NULL;
Error *local_err = NULL;
NBD_FOREACH_REPLY_CHUNK(s, iter, cookie, s->info.structured_reply,
NBD_FOREACH_REPLY_CHUNK(s, iter, cookie,
s->info.mode >= NBD_MODE_STRUCTURED,
qiov, &reply, &payload)
{
int ret;
@ -1115,7 +1118,7 @@ nbd_co_receive_cmdread_reply(BDRVNBDState *s, uint64_t cookie,
static int coroutine_fn
nbd_co_receive_blockstatus_reply(BDRVNBDState *s, uint64_t cookie,
uint64_t length, NBDExtent *extent,
uint64_t length, NBDExtent32 *extent,
int *request_ret, Error **errp)
{
NBDReplyChunkIter iter;
@ -1302,10 +1305,11 @@ nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
NBDRequest request = {
.type = NBD_CMD_WRITE_ZEROES,
.from = offset,
.len = bytes, /* .len is uint32_t actually */
.len = bytes,
};
assert(bytes <= UINT32_MAX); /* rely on max_pwrite_zeroes */
/* rely on max_pwrite_zeroes */
assert(bytes <= UINT32_MAX || s->info.mode >= NBD_MODE_EXTENDED);
assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
if (!(s->info.flags & NBD_FLAG_SEND_WRITE_ZEROES)) {
@ -1352,10 +1356,11 @@ nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
NBDRequest request = {
.type = NBD_CMD_TRIM,
.from = offset,
.len = bytes, /* len is uint32_t */
.len = bytes,
};
assert(bytes <= UINT32_MAX); /* rely on max_pdiscard */
/* rely on max_pdiscard */
assert(bytes <= UINT32_MAX || s->info.mode >= NBD_MODE_EXTENDED);
assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
if (!(s->info.flags & NBD_FLAG_SEND_TRIM) || !bytes) {
@ -1370,15 +1375,14 @@ static int coroutine_fn GRAPH_RDLOCK nbd_client_co_block_status(
int64_t *pnum, int64_t *map, BlockDriverState **file)
{
int ret, request_ret;
NBDExtent extent = { 0 };
NBDExtent32 extent = { 0 };
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
Error *local_err = NULL;
NBDRequest request = {
.type = NBD_CMD_BLOCK_STATUS,
.from = offset,
.len = MIN(QEMU_ALIGN_DOWN(INT_MAX, bs->bl.request_alignment),
MIN(bytes, s->info.size - offset)),
.len = MIN(bytes, s->info.size - offset),
.flags = NBD_CMD_FLAG_REQ_ONE,
};
@ -1388,6 +1392,10 @@ static int coroutine_fn GRAPH_RDLOCK nbd_client_co_block_status(
*file = bs;
return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
}
if (s->info.mode < NBD_MODE_EXTENDED) {
request.len = MIN(QEMU_ALIGN_DOWN(INT_MAX, bs->bl.request_alignment),
request.len);
}
/*
* Work around the fact that the block layer doesn't do
@ -1463,7 +1471,7 @@ static void nbd_yank(void *opaque)
static void nbd_client_close(BlockDriverState *bs)
{
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
NBDRequest request = { .type = NBD_CMD_DISC };
NBDRequest request = { .type = NBD_CMD_DISC, .mode = s->info.mode };
if (s->ioc) {
nbd_send_request(s->ioc, &request);
@ -1952,6 +1960,14 @@ static void nbd_refresh_limits(BlockDriverState *bs, Error **errp)
bs->bl.max_pwrite_zeroes = max;
bs->bl.max_transfer = max;
/*
* Assume that if the server supports extended headers, it also
* supports unlimited size zero and trim commands.
*/
if (s->info.mode >= NBD_MODE_EXTENDED) {
bs->bl.max_pdiscard = bs->bl.max_pwrite_zeroes = 0;
}
if (s->info.opt_block &&
s->info.opt_block > bs->bl.opt_transfer) {
bs->bl.opt_transfer = s->info.opt_block;

View File

@ -167,7 +167,7 @@ iscsi_xcopy(void *src_lun, uint64_t src_off, void *dst_lun, uint64_t dst_off, ui
nbd_parse_blockstatus_compliance(const char *err) "ignoring extra data from non-compliant server: %s"
nbd_structured_read_compliance(const char *type) "server sent non-compliant unaligned read %s chunk"
nbd_read_reply_entry_fail(int ret, const char *err) "ret = %d, err: %s"
nbd_co_request_fail(uint64_t from, uint32_t len, uint64_t handle, uint16_t flags, uint16_t type, const char *name, int ret, const char *err) "Request failed { .from = %" PRIu64", .len = %" PRIu32 ", .handle = %" PRIu64 ", .flags = 0x%" PRIx16 ", .type = %" PRIu16 " (%s) } ret = %d, err: %s"
nbd_co_request_fail(uint64_t from, uint64_t len, uint64_t handle, uint16_t flags, uint16_t type, const char *name, int ret, const char *err) "Request failed { .from = %" PRIu64", .len = %" PRIu64 ", .handle = %" PRIu64 ", .flags = 0x%" PRIx16 ", .type = %" PRIu16 " (%s) } ret = %d, err: %s"
nbd_client_handshake(const char *export_name) "export '%s'"
nbd_client_handshake_success(const char *export_name) "export '%s'"
nbd_reconnect_attempt(unsigned in_flight) "in_flight %u"

View File

@ -60,20 +60,22 @@ typedef enum NBDMode {
NBD_MODE_EXPORT_NAME, /* newstyle but only OPT_EXPORT_NAME safe */
NBD_MODE_SIMPLE, /* newstyle but only simple replies */
NBD_MODE_STRUCTURED, /* newstyle, structured replies enabled */
/* TODO add NBD_MODE_EXTENDED */
NBD_MODE_EXTENDED, /* newstyle, extended headers enabled */
} NBDMode;
/* Transmission phase structs
*
* Note: these are _NOT_ the same as the network representation of an NBD
* request and reply!
/* Transmission phase structs */
/*
* Note: NBDRequest is _NOT_ the same as the network representation of an NBD
* request!
*/
typedef struct NBDRequest {
uint64_t cookie;
uint64_t from;
uint32_t len;
uint64_t from; /* Offset touched by the command */
uint64_t len; /* Effect length; 32 bit limit without extended headers */
uint16_t flags; /* NBD_CMD_FLAG_* */
uint16_t type; /* NBD_CMD_* */
uint16_t type; /* NBD_CMD_* */
NBDMode mode; /* Determines which network representation to use */
} NBDRequest;
typedef struct NBDSimpleReply {
@ -91,20 +93,36 @@ typedef struct NBDStructuredReplyChunk {
uint32_t length; /* length of payload */
} QEMU_PACKED NBDStructuredReplyChunk;
typedef struct NBDExtendedReplyChunk {
uint32_t magic; /* NBD_EXTENDED_REPLY_MAGIC */
uint16_t flags; /* combination of NBD_REPLY_FLAG_* */
uint16_t type; /* NBD_REPLY_TYPE_* */
uint64_t cookie; /* request handle */
uint64_t offset; /* request offset */
uint64_t length; /* length of payload */
} QEMU_PACKED NBDExtendedReplyChunk;
typedef union NBDReply {
NBDSimpleReply simple;
NBDStructuredReplyChunk structured;
NBDExtendedReplyChunk extended;
struct {
/*
* @magic and @cookie fields have the same offset and size both in
* simple reply and structured reply chunk, so let them be accessible
* without ".simple." or ".structured." specification
* @magic and @cookie fields have the same offset and size in all
* forms of replies, so let them be accessible without ".simple.",
* ".structured.", or ".extended." specifications.
*/
uint32_t magic;
uint32_t _skip;
uint64_t cookie;
} QEMU_PACKED;
};
} NBDReply;
QEMU_BUILD_BUG_ON(offsetof(NBDReply, simple.cookie) !=
offsetof(NBDReply, cookie));
QEMU_BUILD_BUG_ON(offsetof(NBDReply, structured.cookie) !=
offsetof(NBDReply, cookie));
QEMU_BUILD_BUG_ON(offsetof(NBDReply, extended.cookie) !=
offsetof(NBDReply, cookie));
/* Header of chunk for NBD_REPLY_TYPE_OFFSET_DATA */
typedef struct NBDStructuredReadData {
@ -131,14 +149,34 @@ typedef struct NBDStructuredError {
typedef struct NBDStructuredMeta {
/* header's length >= 12 (at least one extent) */
uint32_t context_id;
/* extents follows */
/* NBDExtent32 extents[] follows, array length implied by header */
} QEMU_PACKED NBDStructuredMeta;
/* Extent chunk for NBD_REPLY_TYPE_BLOCK_STATUS */
typedef struct NBDExtent {
/* Extent array element for NBD_REPLY_TYPE_BLOCK_STATUS */
typedef struct NBDExtent32 {
uint32_t length;
uint32_t flags; /* NBD_STATE_* */
} QEMU_PACKED NBDExtent;
} QEMU_PACKED NBDExtent32;
/* Header of NBD_REPLY_TYPE_BLOCK_STATUS_EXT */
typedef struct NBDExtendedMeta {
/* header's length >= 24 (at least one extent) */
uint32_t context_id;
uint32_t count; /* header length must be count * 16 + 8 */
/* NBDExtent64 extents[count] follows */
} QEMU_PACKED NBDExtendedMeta;
/* Extent array element for NBD_REPLY_TYPE_BLOCK_STATUS_EXT */
typedef struct NBDExtent64 {
uint64_t length;
uint64_t flags; /* NBD_STATE_* */
} QEMU_PACKED NBDExtent64;
/* Client payload for limiting NBD_CMD_BLOCK_STATUS reply */
typedef struct NBDBlockStatusPayload {
uint64_t effect_length;
/* uint32_t ids[] follows, array length implied by header */
} QEMU_PACKED NBDBlockStatusPayload;
/* Transmission (export) flags: sent from server to client during handshake,
but describe what will happen during transmission */
@ -156,20 +194,22 @@ enum {
NBD_FLAG_SEND_RESIZE_BIT = 9, /* Send resize */
NBD_FLAG_SEND_CACHE_BIT = 10, /* Send CACHE (prefetch) */
NBD_FLAG_SEND_FAST_ZERO_BIT = 11, /* FAST_ZERO flag for WRITE_ZEROES */
NBD_FLAG_BLOCK_STAT_PAYLOAD_BIT = 12, /* PAYLOAD flag for BLOCK_STATUS */
};
#define NBD_FLAG_HAS_FLAGS (1 << NBD_FLAG_HAS_FLAGS_BIT)
#define NBD_FLAG_READ_ONLY (1 << NBD_FLAG_READ_ONLY_BIT)
#define NBD_FLAG_SEND_FLUSH (1 << NBD_FLAG_SEND_FLUSH_BIT)
#define NBD_FLAG_SEND_FUA (1 << NBD_FLAG_SEND_FUA_BIT)
#define NBD_FLAG_ROTATIONAL (1 << NBD_FLAG_ROTATIONAL_BIT)
#define NBD_FLAG_SEND_TRIM (1 << NBD_FLAG_SEND_TRIM_BIT)
#define NBD_FLAG_SEND_WRITE_ZEROES (1 << NBD_FLAG_SEND_WRITE_ZEROES_BIT)
#define NBD_FLAG_SEND_DF (1 << NBD_FLAG_SEND_DF_BIT)
#define NBD_FLAG_CAN_MULTI_CONN (1 << NBD_FLAG_CAN_MULTI_CONN_BIT)
#define NBD_FLAG_SEND_RESIZE (1 << NBD_FLAG_SEND_RESIZE_BIT)
#define NBD_FLAG_SEND_CACHE (1 << NBD_FLAG_SEND_CACHE_BIT)
#define NBD_FLAG_SEND_FAST_ZERO (1 << NBD_FLAG_SEND_FAST_ZERO_BIT)
#define NBD_FLAG_HAS_FLAGS (1 << NBD_FLAG_HAS_FLAGS_BIT)
#define NBD_FLAG_READ_ONLY (1 << NBD_FLAG_READ_ONLY_BIT)
#define NBD_FLAG_SEND_FLUSH (1 << NBD_FLAG_SEND_FLUSH_BIT)
#define NBD_FLAG_SEND_FUA (1 << NBD_FLAG_SEND_FUA_BIT)
#define NBD_FLAG_ROTATIONAL (1 << NBD_FLAG_ROTATIONAL_BIT)
#define NBD_FLAG_SEND_TRIM (1 << NBD_FLAG_SEND_TRIM_BIT)
#define NBD_FLAG_SEND_WRITE_ZEROES (1 << NBD_FLAG_SEND_WRITE_ZEROES_BIT)
#define NBD_FLAG_SEND_DF (1 << NBD_FLAG_SEND_DF_BIT)
#define NBD_FLAG_CAN_MULTI_CONN (1 << NBD_FLAG_CAN_MULTI_CONN_BIT)
#define NBD_FLAG_SEND_RESIZE (1 << NBD_FLAG_SEND_RESIZE_BIT)
#define NBD_FLAG_SEND_CACHE (1 << NBD_FLAG_SEND_CACHE_BIT)
#define NBD_FLAG_SEND_FAST_ZERO (1 << NBD_FLAG_SEND_FAST_ZERO_BIT)
#define NBD_FLAG_BLOCK_STAT_PAYLOAD (1 << NBD_FLAG_BLOCK_STAT_PAYLOAD_BIT)
/* New-style handshake (global) flags, sent from server to client, and
control what will happen during handshake phase. */
@ -192,6 +232,7 @@ enum {
#define NBD_OPT_STRUCTURED_REPLY (8)
#define NBD_OPT_LIST_META_CONTEXT (9)
#define NBD_OPT_SET_META_CONTEXT (10)
#define NBD_OPT_EXTENDED_HEADERS (11)
/* Option reply types. */
#define NBD_REP_ERR(value) ((UINT32_C(1) << 31) | (value))
@ -209,6 +250,8 @@ enum {
#define NBD_REP_ERR_UNKNOWN NBD_REP_ERR(6) /* Export unknown */
#define NBD_REP_ERR_SHUTDOWN NBD_REP_ERR(7) /* Server shutting down */
#define NBD_REP_ERR_BLOCK_SIZE_REQD NBD_REP_ERR(8) /* Need INFO_BLOCK_SIZE */
#define NBD_REP_ERR_TOO_BIG NBD_REP_ERR(9) /* Payload size overflow */
#define NBD_REP_ERR_EXT_HEADER_REQD NBD_REP_ERR(10) /* Need extended headers */
/* Info types, used during NBD_REP_INFO */
#define NBD_INFO_EXPORT 0
@ -217,12 +260,14 @@ enum {
#define NBD_INFO_BLOCK_SIZE 3
/* Request flags, sent from client to server during transmission phase */
#define NBD_CMD_FLAG_FUA (1 << 0) /* 'force unit access' during write */
#define NBD_CMD_FLAG_NO_HOLE (1 << 1) /* don't punch hole on zero run */
#define NBD_CMD_FLAG_DF (1 << 2) /* don't fragment structured read */
#define NBD_CMD_FLAG_REQ_ONE (1 << 3) /* only one extent in BLOCK_STATUS
* reply chunk */
#define NBD_CMD_FLAG_FAST_ZERO (1 << 4) /* fail if WRITE_ZEROES is not fast */
#define NBD_CMD_FLAG_FUA (1 << 0) /* 'force unit access' during write */
#define NBD_CMD_FLAG_NO_HOLE (1 << 1) /* don't punch hole on zero run */
#define NBD_CMD_FLAG_DF (1 << 2) /* don't fragment structured read */
#define NBD_CMD_FLAG_REQ_ONE (1 << 3) \
/* only one extent in BLOCK_STATUS reply chunk */
#define NBD_CMD_FLAG_FAST_ZERO (1 << 4) /* fail if WRITE_ZEROES is not fast */
#define NBD_CMD_FLAG_PAYLOAD_LEN (1 << 5) \
/* length describes payload, not effect; only with ext header */
/* Supported request types */
enum {
@ -248,22 +293,31 @@ enum {
*/
#define NBD_MAX_STRING_SIZE 4096
/* Two types of reply structures */
/* Two types of request structures, a given client will only use 1 */
#define NBD_REQUEST_MAGIC 0x25609513
#define NBD_EXTENDED_REQUEST_MAGIC 0x21e41c71
/*
* Three types of reply structures, but what a client expects depends
* on NBD_OPT_STRUCTURED_REPLY and NBD_OPT_EXTENDED_HEADERS.
*/
#define NBD_SIMPLE_REPLY_MAGIC 0x67446698
#define NBD_STRUCTURED_REPLY_MAGIC 0x668e33ef
#define NBD_EXTENDED_REPLY_MAGIC 0x6e8a278c
/* Structured reply flags */
/* Chunk reply flags (for structured and extended replies) */
#define NBD_REPLY_FLAG_DONE (1 << 0) /* This reply-chunk is last */
/* Structured reply types */
/* Chunk reply types */
#define NBD_REPLY_ERR(value) ((1 << 15) | (value))
#define NBD_REPLY_TYPE_NONE 0
#define NBD_REPLY_TYPE_OFFSET_DATA 1
#define NBD_REPLY_TYPE_OFFSET_HOLE 2
#define NBD_REPLY_TYPE_BLOCK_STATUS 5
#define NBD_REPLY_TYPE_ERROR NBD_REPLY_ERR(1)
#define NBD_REPLY_TYPE_ERROR_OFFSET NBD_REPLY_ERR(2)
#define NBD_REPLY_TYPE_NONE 0
#define NBD_REPLY_TYPE_OFFSET_DATA 1
#define NBD_REPLY_TYPE_OFFSET_HOLE 2
#define NBD_REPLY_TYPE_BLOCK_STATUS 5
#define NBD_REPLY_TYPE_BLOCK_STATUS_EXT 6
#define NBD_REPLY_TYPE_ERROR NBD_REPLY_ERR(1)
#define NBD_REPLY_TYPE_ERROR_OFFSET NBD_REPLY_ERR(2)
/* Extent flags for base:allocation in NBD_REPLY_TYPE_BLOCK_STATUS */
#define NBD_STATE_HOLE (1 << 0)
@ -305,7 +359,7 @@ typedef struct NBDExportInfo {
/* In-out fields, set by client before nbd_receive_negotiate() and
* updated by server results during nbd_receive_negotiate() */
bool structured_reply;
NBDMode mode; /* input maximum mode tolerated; output actual mode chosen */
bool base_allocation; /* base:allocation context for NBD_CMD_BLOCK_STATUS */
/* Set by server results during nbd_receive_negotiate() and

View File

@ -1,5 +1,5 @@
/*
* QEMU Block driver for NBD
* QEMU Block driver for NBD
*
* Copyright (c) 2021 Virtuozzo International GmbH.
*
@ -93,7 +93,7 @@ NBDClientConnection *nbd_client_connection_new(const SocketAddress *saddr,
.do_negotiation = do_negotiation,
.initial_info.request_sizes = true,
.initial_info.structured_reply = true,
.initial_info.mode = NBD_MODE_STRUCTURED,
.initial_info.base_allocation = true,
.initial_info.x_dirty_bitmap = g_strdup(x_dirty_bitmap),
.initial_info.name = g_strdup(export_name ?: "")

View File

@ -879,7 +879,7 @@ static int nbd_list_meta_contexts(QIOChannel *ioc,
*/
static int nbd_start_negotiate(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
const char *hostname, QIOChannel **outioc,
bool structured_reply, bool *zeroes,
NBDMode max_mode, bool *zeroes,
Error **errp)
{
ERRP_GUARD();
@ -953,7 +953,7 @@ static int nbd_start_negotiate(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
if (fixedNewStyle) {
int result = 0;
if (structured_reply) {
if (max_mode >= NBD_MODE_STRUCTURED) {
result = nbd_request_simple_option(ioc,
NBD_OPT_STRUCTURED_REPLY,
false, errp);
@ -1022,20 +1022,19 @@ int nbd_receive_negotiate(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
trace_nbd_receive_negotiate_name(info->name);
result = nbd_start_negotiate(ioc, tlscreds, hostname, outioc,
info->structured_reply, &zeroes, errp);
info->mode, &zeroes, errp);
if (result < 0) {
return result;
}
info->structured_reply = false;
info->mode = result;
info->base_allocation = false;
if (tlscreds && *outioc) {
ioc = *outioc;
}
switch ((NBDMode)result) {
switch (info->mode) {
case NBD_MODE_STRUCTURED:
info->structured_reply = true;
if (base_allocation) {
result = nbd_negotiate_simple_meta_context(ioc, info, errp);
if (result < 0) {
@ -1144,8 +1143,8 @@ int nbd_receive_export_list(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
QIOChannel *sioc = NULL;
*info = NULL;
result = nbd_start_negotiate(ioc, tlscreds, hostname, &sioc, true,
NULL, errp);
result = nbd_start_negotiate(ioc, tlscreds, hostname, &sioc,
NBD_MODE_STRUCTURED, NULL, errp);
if (tlscreds && sioc) {
ioc = sioc;
}
@ -1176,7 +1175,7 @@ int nbd_receive_export_list(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
memset(&array[count - 1], 0, sizeof(*array));
array[count - 1].name = name;
array[count - 1].description = desc;
array[count - 1].structured_reply = result == NBD_MODE_STRUCTURED;
array[count - 1].mode = result;
}
for (i = 0; i < count; i++) {
@ -1209,6 +1208,7 @@ int nbd_receive_export_list(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
/* Lone export name is implied, but we can parse length and flags */
array = g_new0(NBDExportInfo, 1);
array->name = g_strdup("");
array->mode = NBD_MODE_OLDSTYLE;
count = 1;
if (nbd_negotiate_finish_oldstyle(ioc, array, errp) < 0) {
@ -1218,7 +1218,7 @@ int nbd_receive_export_list(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
/* Send NBD_CMD_DISC as a courtesy to the server, but ignore all
* errors now that we have the information we wanted. */
if (nbd_drop(ioc, 124, NULL) == 0) {
NBDRequest request = { .type = NBD_CMD_DISC };
NBDRequest request = { .type = NBD_CMD_DISC, .mode = result };
nbd_send_request(ioc, &request);
}
@ -1348,6 +1348,8 @@ int nbd_send_request(QIOChannel *ioc, NBDRequest *request)
{
uint8_t buf[NBD_REQUEST_SIZE];
assert(request->mode <= NBD_MODE_STRUCTURED); /* TODO handle extended */
assert(request->len <= UINT32_MAX);
trace_nbd_send_request(request->from, request->len, request->cookie,
request->flags, request->type,
nbd_cmd_lookup(request->type));

View File

@ -79,6 +79,8 @@ const char *nbd_opt_lookup(uint32_t opt)
return "list meta context";
case NBD_OPT_SET_META_CONTEXT:
return "set meta context";
case NBD_OPT_EXTENDED_HEADERS:
return "extended headers";
default:
return "<unknown>";
}
@ -112,6 +114,10 @@ const char *nbd_rep_lookup(uint32_t rep)
return "server shutting down";
case NBD_REP_ERR_BLOCK_SIZE_REQD:
return "block size required";
case NBD_REP_ERR_TOO_BIG:
return "option payload too big";
case NBD_REP_ERR_EXT_HEADER_REQD:
return "extended headers required";
default:
return "<unknown>";
}
@ -170,7 +176,9 @@ const char *nbd_reply_type_lookup(uint16_t type)
case NBD_REPLY_TYPE_OFFSET_HOLE:
return "hole";
case NBD_REPLY_TYPE_BLOCK_STATUS:
return "block status";
return "block status (32-bit)";
case NBD_REPLY_TYPE_BLOCK_STATUS_EXT:
return "block status (64-bit)";
case NBD_REPLY_TYPE_ERROR:
return "generic error";
case NBD_REPLY_TYPE_ERROR_OFFSET:
@ -261,6 +269,8 @@ const char *nbd_mode_lookup(NBDMode mode)
return "simple headers";
case NBD_MODE_STRUCTURED:
return "structured replies";
case NBD_MODE_EXTENDED:
return "extended headers";
default:
return "<unknown>";
}

View File

@ -1,7 +1,7 @@
/*
* NBD Internal Declarations
*
* Copyright (C) 2016 Red Hat, Inc.
* Copyright Red Hat
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
@ -44,7 +44,6 @@
#define NBD_OLDSTYLE_NEGOTIATE_SIZE (8 + 8 + 8 + 4 + 124)
#define NBD_INIT_MAGIC 0x4e42444d41474943LL /* ASCII "NBDMAGIC" */
#define NBD_REQUEST_MAGIC 0x25609513
#define NBD_OPTS_MAGIC 0x49484156454F5054LL /* ASCII "IHAVEOPT" */
#define NBD_CLIENT_MAGIC 0x0000420281861253LL
#define NBD_REP_MAGIC 0x0003e889045565a9LL

View File

@ -143,7 +143,7 @@ struct NBDClient {
uint32_t check_align; /* If non-zero, check for aligned client requests */
bool structured_reply;
NBDMode mode;
NBDExportMetaContexts export_meta;
uint32_t opt; /* Current option being negotiated */
@ -502,7 +502,7 @@ static int nbd_negotiate_handle_export_name(NBDClient *client, bool no_zeroes,
}
myflags = client->exp->nbdflags;
if (client->structured_reply) {
if (client->mode >= NBD_MODE_STRUCTURED) {
myflags |= NBD_FLAG_SEND_DF;
}
trace_nbd_negotiate_new_style_size_flags(client->exp->size, myflags);
@ -687,7 +687,7 @@ static int nbd_negotiate_handle_info(NBDClient *client, Error **errp)
/* Send NBD_INFO_EXPORT always */
myflags = exp->nbdflags;
if (client->structured_reply) {
if (client->mode >= NBD_MODE_STRUCTURED) {
myflags |= NBD_FLAG_SEND_DF;
}
trace_nbd_negotiate_new_style_size_flags(exp->size, myflags);
@ -985,7 +985,8 @@ static int nbd_negotiate_meta_queries(NBDClient *client,
size_t i;
size_t count = 0;
if (client->opt == NBD_OPT_SET_META_CONTEXT && !client->structured_reply) {
if (client->opt == NBD_OPT_SET_META_CONTEXT &&
client->mode < NBD_MODE_STRUCTURED) {
return nbd_opt_invalid(client, errp,
"request option '%s' when structured reply "
"is not negotiated",
@ -1122,10 +1123,12 @@ static int nbd_negotiate_options(NBDClient *client, Error **errp)
if (nbd_read32(client->ioc, &flags, "flags", errp) < 0) {
return -EIO;
}
client->mode = NBD_MODE_EXPORT_NAME;
trace_nbd_negotiate_options_flags(flags);
if (flags & NBD_FLAG_C_FIXED_NEWSTYLE) {
fixedNewstyle = true;
flags &= ~NBD_FLAG_C_FIXED_NEWSTYLE;
client->mode = NBD_MODE_SIMPLE;
}
if (flags & NBD_FLAG_C_NO_ZEROES) {
no_zeroes = true;
@ -1162,7 +1165,7 @@ static int nbd_negotiate_options(NBDClient *client, Error **errp)
client->optlen = length;
if (length > NBD_MAX_BUFFER_SIZE) {
error_setg(errp, "len (%" PRIu32" ) is larger than max len (%u)",
error_setg(errp, "len (%" PRIu32 ") is larger than max len (%u)",
length, NBD_MAX_BUFFER_SIZE);
return -EINVAL;
}
@ -1261,13 +1264,13 @@ static int nbd_negotiate_options(NBDClient *client, Error **errp)
case NBD_OPT_STRUCTURED_REPLY:
if (length) {
ret = nbd_reject_length(client, false, errp);
} else if (client->structured_reply) {
} else if (client->mode >= NBD_MODE_STRUCTURED) {
ret = nbd_negotiate_send_rep_err(
client, NBD_REP_ERR_INVALID, errp,
"structured reply already negotiated");
} else {
ret = nbd_negotiate_send_rep(client, NBD_REP_ACK, errp);
client->structured_reply = true;
client->mode = NBD_MODE_STRUCTURED;
}
break;
@ -1434,7 +1437,7 @@ static int coroutine_fn nbd_receive_request(NBDClient *client, NBDRequest *reque
request->type = lduw_be_p(buf + 6);
request->cookie = ldq_be_p(buf + 8);
request->from = ldq_be_p(buf + 16);
request->len = ldl_be_p(buf + 24);
request->len = (uint32_t)ldl_be_p(buf + 24); /* widen 32 to 64 bits */
trace_nbd_receive_request(magic, request->flags, request->type,
request->from, request->len);
@ -1884,7 +1887,7 @@ static int coroutine_fn nbd_co_send_simple_reply(NBDClient *client,
NBDRequest *request,
uint32_t error,
void *data,
size_t len,
uint64_t len,
Error **errp)
{
NBDSimpleReply reply;
@ -1895,7 +1898,10 @@ static int coroutine_fn nbd_co_send_simple_reply(NBDClient *client,
};
assert(!len || !nbd_err);
assert(!client->structured_reply || request->type != NBD_CMD_READ);
assert(len <= NBD_MAX_BUFFER_SIZE);
assert(client->mode < NBD_MODE_STRUCTURED ||
(client->mode == NBD_MODE_STRUCTURED &&
request->type != NBD_CMD_READ));
trace_nbd_co_send_simple_reply(request->cookie, nbd_err,
nbd_err_lookup(nbd_err), len);
set_be_simple_reply(&reply, nbd_err, request->cookie);
@ -1951,7 +1957,7 @@ static int coroutine_fn nbd_co_send_chunk_read(NBDClient *client,
NBDRequest *request,
uint64_t offset,
void *data,
size_t size,
uint64_t size,
bool final,
Error **errp)
{
@ -1963,7 +1969,7 @@ static int coroutine_fn nbd_co_send_chunk_read(NBDClient *client,
{.iov_base = data, .iov_len = size}
};
assert(size);
assert(size && size <= NBD_MAX_BUFFER_SIZE);
trace_nbd_co_send_chunk_read(request->cookie, offset, data, size);
set_be_chunk(client, iov, 3, final ? NBD_REPLY_FLAG_DONE : 0,
NBD_REPLY_TYPE_OFFSET_DATA, request);
@ -1971,7 +1977,7 @@ static int coroutine_fn nbd_co_send_chunk_read(NBDClient *client,
return nbd_co_send_iov(client, iov, 3, errp);
}
/*ebb*/
static int coroutine_fn nbd_co_send_chunk_error(NBDClient *client,
NBDRequest *request,
uint32_t error,
@ -2006,13 +2012,14 @@ static int coroutine_fn nbd_co_send_sparse_read(NBDClient *client,
NBDRequest *request,
uint64_t offset,
uint8_t *data,
size_t size,
uint64_t size,
Error **errp)
{
int ret = 0;
NBDExport *exp = client->exp;
size_t progress = 0;
assert(size <= NBD_MAX_BUFFER_SIZE);
while (progress < size) {
int64_t pnum;
int status = blk_co_block_status_above(exp->common.blk, NULL,
@ -2067,7 +2074,7 @@ static int coroutine_fn nbd_co_send_sparse_read(NBDClient *client,
}
typedef struct NBDExtentArray {
NBDExtent *extents;
NBDExtent32 *extents;
unsigned int nb_alloc;
unsigned int count;
uint64_t total_length;
@ -2080,7 +2087,7 @@ static NBDExtentArray *nbd_extent_array_new(unsigned int nb_alloc)
NBDExtentArray *ea = g_new0(NBDExtentArray, 1);
ea->nb_alloc = nb_alloc;
ea->extents = g_new(NBDExtent, nb_alloc);
ea->extents = g_new(NBDExtent32, nb_alloc);
ea->can_add = true;
return ea;
@ -2143,7 +2150,7 @@ static int nbd_extent_array_add(NBDExtentArray *ea,
}
ea->total_length += length;
ea->extents[ea->count] = (NBDExtent) {.length = length, .flags = flags};
ea->extents[ea->count] = (NBDExtent32) {.length = length, .flags = flags};
ea->count++;
return 0;
@ -2310,11 +2317,16 @@ static int coroutine_fn nbd_co_send_bitmap(NBDClient *client,
* to the client (although the caller may still need to disconnect after
* reporting the error).
*/
static int coroutine_fn nbd_co_receive_request(NBDRequestData *req, NBDRequest *request,
static int coroutine_fn nbd_co_receive_request(NBDRequestData *req,
NBDRequest *request,
Error **errp)
{
NBDClient *client = req->client;
int valid_flags;
bool check_length = false;
bool check_rofs = false;
bool allocate_buffer = false;
unsigned payload_len = 0;
int valid_flags = NBD_CMD_FLAG_FUA;
int ret;
g_assert(qemu_in_coroutine());
@ -2326,60 +2338,94 @@ static int coroutine_fn nbd_co_receive_request(NBDRequestData *req, NBDRequest *
trace_nbd_co_receive_request_decode_type(request->cookie, request->type,
nbd_cmd_lookup(request->type));
if (request->type != NBD_CMD_WRITE) {
/* No payload, we are ready to read the next request. */
req->complete = true;
}
if (request->type == NBD_CMD_DISC) {
switch (request->type) {
case NBD_CMD_DISC:
/* Special case: we're going to disconnect without a reply,
* whether or not flags, from, or len are bogus */
req->complete = true;
return -EIO;
case NBD_CMD_READ:
if (client->mode >= NBD_MODE_STRUCTURED) {
valid_flags |= NBD_CMD_FLAG_DF;
}
check_length = true;
allocate_buffer = true;
break;
case NBD_CMD_WRITE:
payload_len = request->len;
check_length = true;
allocate_buffer = true;
check_rofs = true;
break;
case NBD_CMD_FLUSH:
break;
case NBD_CMD_TRIM:
check_rofs = true;
break;
case NBD_CMD_CACHE:
check_length = true;
break;
case NBD_CMD_WRITE_ZEROES:
valid_flags |= NBD_CMD_FLAG_NO_HOLE | NBD_CMD_FLAG_FAST_ZERO;
check_rofs = true;
break;
case NBD_CMD_BLOCK_STATUS:
valid_flags |= NBD_CMD_FLAG_REQ_ONE;
break;
default:
/* Unrecognized, will fail later */
;
}
if (request->type == NBD_CMD_READ || request->type == NBD_CMD_WRITE ||
request->type == NBD_CMD_CACHE)
{
if (request->len > NBD_MAX_BUFFER_SIZE) {
error_setg(errp, "len (%" PRIu32" ) is larger than max len (%u)",
request->len, NBD_MAX_BUFFER_SIZE);
return -EINVAL;
}
if (request->type != NBD_CMD_CACHE) {
req->data = blk_try_blockalign(client->exp->common.blk,
request->len);
if (req->data == NULL) {
error_setg(errp, "No memory");
return -ENOMEM;
}
/* Payload and buffer handling. */
if (!payload_len) {
req->complete = true;
}
if (check_length && request->len > NBD_MAX_BUFFER_SIZE) {
/* READ, WRITE, CACHE */
error_setg(errp, "len (%" PRIu64 ") is larger than max len (%u)",
request->len, NBD_MAX_BUFFER_SIZE);
return -EINVAL;
}
if (allocate_buffer) {
/* READ, WRITE */
req->data = blk_try_blockalign(client->exp->common.blk,
request->len);
if (req->data == NULL) {
error_setg(errp, "No memory");
return -ENOMEM;
}
}
if (request->type == NBD_CMD_WRITE) {
if (nbd_read(client->ioc, req->data, request->len, "CMD_WRITE data",
errp) < 0)
{
if (payload_len) {
/* WRITE */
assert(req->data);
ret = nbd_read(client->ioc, req->data, payload_len,
"CMD_WRITE data", errp);
if (ret < 0) {
return -EIO;
}
req->complete = true;
trace_nbd_co_receive_request_payload_received(request->cookie,
request->len);
payload_len);
}
/* Sanity checks. */
if (client->exp->nbdflags & NBD_FLAG_READ_ONLY &&
(request->type == NBD_CMD_WRITE ||
request->type == NBD_CMD_WRITE_ZEROES ||
request->type == NBD_CMD_TRIM)) {
if (client->exp->nbdflags & NBD_FLAG_READ_ONLY && check_rofs) {
/* WRITE, TRIM, WRITE_ZEROES */
error_setg(errp, "Export is read-only");
return -EROFS;
}
if (request->from > client->exp->size ||
request->len > client->exp->size - request->from) {
error_setg(errp, "operation past EOF; From: %" PRIu64 ", Len: %" PRIu32
error_setg(errp, "operation past EOF; From: %" PRIu64 ", Len: %" PRIu64
", Size: %" PRIu64, request->from, request->len,
client->exp->size);
return (request->type == NBD_CMD_WRITE ||
@ -2396,14 +2442,6 @@ static int coroutine_fn nbd_co_receive_request(NBDRequestData *req, NBDRequest *
request->len,
client->check_align);
}
valid_flags = NBD_CMD_FLAG_FUA;
if (request->type == NBD_CMD_READ && client->structured_reply) {
valid_flags |= NBD_CMD_FLAG_DF;
} else if (request->type == NBD_CMD_WRITE_ZEROES) {
valid_flags |= NBD_CMD_FLAG_NO_HOLE | NBD_CMD_FLAG_FAST_ZERO;
} else if (request->type == NBD_CMD_BLOCK_STATUS) {
valid_flags |= NBD_CMD_FLAG_REQ_ONE;
}
if (request->flags & ~valid_flags) {
error_setg(errp, "unsupported flags for command %s (got 0x%x)",
nbd_cmd_lookup(request->type), request->flags);
@ -2423,7 +2461,7 @@ static coroutine_fn int nbd_send_generic_reply(NBDClient *client,
const char *error_msg,
Error **errp)
{
if (client->structured_reply && ret < 0) {
if (client->mode >= NBD_MODE_STRUCTURED && ret < 0) {
return nbd_co_send_chunk_error(client, request, -ret, error_msg, errp);
} else {
return nbd_co_send_simple_reply(client, request, ret < 0 ? -ret : 0,
@ -2441,6 +2479,7 @@ static coroutine_fn int nbd_do_cmd_read(NBDClient *client, NBDRequest *request,
NBDExport *exp = client->exp;
assert(request->type == NBD_CMD_READ);
assert(request->len <= NBD_MAX_BUFFER_SIZE);
/* XXX: NBD Protocol only documents use of FUA with WRITE */
if (request->flags & NBD_CMD_FLAG_FUA) {
@ -2451,8 +2490,8 @@ static coroutine_fn int nbd_do_cmd_read(NBDClient *client, NBDRequest *request,
}
}
if (client->structured_reply && !(request->flags & NBD_CMD_FLAG_DF) &&
request->len)
if (client->mode >= NBD_MODE_STRUCTURED &&
!(request->flags & NBD_CMD_FLAG_DF) && request->len)
{
return nbd_co_send_sparse_read(client, request, request->from,
data, request->len, errp);
@ -2464,7 +2503,7 @@ static coroutine_fn int nbd_do_cmd_read(NBDClient *client, NBDRequest *request,
"reading from file failed", errp);
}
if (client->structured_reply) {
if (client->mode >= NBD_MODE_STRUCTURED) {
if (request->len) {
return nbd_co_send_chunk_read(client, request, request->from, data,
request->len, true, errp);
@ -2491,6 +2530,7 @@ static coroutine_fn int nbd_do_cmd_cache(NBDClient *client, NBDRequest *request,
NBDExport *exp = client->exp;
assert(request->type == NBD_CMD_CACHE);
assert(request->len <= NBD_MAX_BUFFER_SIZE);
ret = blk_co_preadv(exp->common.blk, request->from, request->len,
NULL, BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH);
@ -2524,6 +2564,7 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
if (request->flags & NBD_CMD_FLAG_FUA) {
flags |= BDRV_REQ_FUA;
}
assert(request->len <= NBD_MAX_BUFFER_SIZE);
ret = blk_co_pwrite(exp->common.blk, request->from, request->len, data,
flags);
return nbd_send_generic_reply(client, request, ret,
@ -2567,6 +2608,7 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
return nbd_send_generic_reply(client, request, -EINVAL,
"need non-zero length", errp);
}
assert(request->len <= UINT32_MAX);
if (client->export_meta.count) {
bool dont_fragment = request->flags & NBD_CMD_FLAG_REQ_ONE;
int contexts_remaining = client->export_meta.count;

View File

@ -31,7 +31,7 @@ nbd_client_loop(void) "Doing NBD loop"
nbd_client_loop_ret(int ret, const char *error) "NBD loop returned %d: %s"
nbd_client_clear_queue(void) "Clearing NBD queue"
nbd_client_clear_socket(void) "Clearing NBD socket"
nbd_send_request(uint64_t from, uint32_t len, uint64_t cookie, uint16_t flags, uint16_t type, const char *name) "Sending request to server: { .from = %" PRIu64", .len = %" PRIu32 ", .cookie = %" PRIu64 ", .flags = 0x%" PRIx16 ", .type = %" PRIu16 " (%s) }"
nbd_send_request(uint64_t from, uint64_t len, uint64_t cookie, uint16_t flags, uint16_t type, const char *name) "Sending request to server: { .from = %" PRIu64", .len = %" PRIu64 ", .cookie = %" PRIu64 ", .flags = 0x%" PRIx16 ", .type = %" PRIu16 " (%s) }"
nbd_receive_simple_reply(int32_t error, const char *errname, uint64_t cookie) "Got simple reply: { .error = %" PRId32 " (%s), cookie = %" PRIu64" }"
nbd_receive_structured_reply_chunk(uint16_t flags, uint16_t type, const char *name, uint64_t cookie, uint32_t length) "Got structured reply chunk: { flags = 0x%" PRIx16 ", type = %d (%s), cookie = %" PRIu64 ", length = %" PRIu32 " }"
@ -60,18 +60,18 @@ nbd_negotiate_options_check_option(uint32_t option, const char *name) "Checking
nbd_negotiate_begin(void) "Beginning negotiation"
nbd_negotiate_new_style_size_flags(uint64_t size, unsigned flags) "advertising size %" PRIu64 " and flags 0x%x"
nbd_negotiate_success(void) "Negotiation succeeded"
nbd_receive_request(uint32_t magic, uint16_t flags, uint16_t type, uint64_t from, uint32_t len) "Got request: { magic = 0x%" PRIx32 ", .flags = 0x%" PRIx16 ", .type = 0x%" PRIx16 ", from = %" PRIu64 ", len = %" PRIu32 " }"
nbd_receive_request(uint32_t magic, uint16_t flags, uint16_t type, uint64_t from, uint64_t len) "Got request: { magic = 0x%" PRIx32 ", .flags = 0x%" PRIx16 ", .type = 0x%" PRIx16 ", from = %" PRIu64 ", len = %" PRIu64 " }"
nbd_blk_aio_attached(const char *name, void *ctx) "Export %s: Attaching clients to AIO context %p"
nbd_blk_aio_detach(const char *name, void *ctx) "Export %s: Detaching clients from AIO context %p"
nbd_co_send_simple_reply(uint64_t cookie, uint32_t error, const char *errname, int len) "Send simple reply: cookie = %" PRIu64 ", error = %" PRIu32 " (%s), len = %d"
nbd_co_send_simple_reply(uint64_t cookie, uint32_t error, const char *errname, uint64_t len) "Send simple reply: cookie = %" PRIu64 ", error = %" PRIu32 " (%s), len = %" PRIu64
nbd_co_send_chunk_done(uint64_t cookie) "Send structured reply done: cookie = %" PRIu64
nbd_co_send_chunk_read(uint64_t cookie, uint64_t offset, void *data, size_t size) "Send structured read data reply: cookie = %" PRIu64 ", offset = %" PRIu64 ", data = %p, len = %zu"
nbd_co_send_chunk_read_hole(uint64_t cookie, uint64_t offset, size_t size) "Send structured read hole reply: cookie = %" PRIu64 ", offset = %" PRIu64 ", len = %zu"
nbd_co_send_chunk_read(uint64_t cookie, uint64_t offset, void *data, uint64_t size) "Send structured read data reply: cookie = %" PRIu64 ", offset = %" PRIu64 ", data = %p, len = %" PRIu64
nbd_co_send_chunk_read_hole(uint64_t cookie, uint64_t offset, uint64_t size) "Send structured read hole reply: cookie = %" PRIu64 ", offset = %" PRIu64 ", len = %" PRIu64
nbd_co_send_extents(uint64_t cookie, unsigned int extents, uint32_t id, uint64_t length, int last) "Send block status reply: cookie = %" PRIu64 ", extents = %u, context = %d (extents cover %" PRIu64 " bytes, last chunk = %d)"
nbd_co_send_chunk_error(uint64_t cookie, int err, const char *errname, const char *msg) "Send structured error reply: cookie = %" PRIu64 ", error = %d (%s), msg = '%s'"
nbd_co_receive_request_decode_type(uint64_t cookie, uint16_t type, const char *name) "Decoding type: cookie = %" PRIu64 ", type = %" PRIu16 " (%s)"
nbd_co_receive_request_payload_received(uint64_t cookie, uint32_t len) "Payload received: cookie = %" PRIu64 ", len = %" PRIu32
nbd_co_receive_align_compliance(const char *op, uint64_t from, uint32_t len, uint32_t align) "client sent non-compliant unaligned %s request: from=0x%" PRIx64 ", len=0x%" PRIx32 ", align=0x%" PRIx32
nbd_co_receive_request_payload_received(uint64_t cookie, uint64_t len) "Payload received: cookie = %" PRIu64 ", len = %" PRIu64
nbd_co_receive_align_compliance(const char *op, uint64_t from, uint64_t len, uint32_t align) "client sent non-compliant unaligned %s request: from=0x%" PRIx64 ", len=0x%" PRIx64 ", align=0x%" PRIx32
nbd_trip(void) "Reading request"
# client-connection.c

View File

@ -295,7 +295,9 @@ static void *show_parts(void *arg)
static void *nbd_client_thread(void *arg)
{
struct NbdClientOpts *opts = arg;
NBDExportInfo info = { .request_sizes = false, .name = g_strdup("") };
/* TODO: Revisit this if nbd.ko ever gains support for structured reply */
NBDExportInfo info = { .request_sizes = false, .name = g_strdup(""),
.mode = NBD_MODE_SIMPLE };
QIOChannelSocket *sioc;
int fd = -1;
int ret = EXIT_FAILURE;

View File

@ -979,10 +979,15 @@ _require_drivers()
#
_require_large_file()
{
if ! truncate --size="$1" "$TEST_IMG"; then
if [ -z "$TEST_IMG_FILE" ]; then
FILENAME="$TEST_IMG"
else
FILENAME="$TEST_IMG_FILE"
fi
if ! truncate --size="$1" "$FILENAME"; then
_notrun "file system on $TEST_DIR does not support large enough files"
fi
rm "$TEST_IMG"
rm "$FILENAME"
}
# Check that a set of devices is available in the QEMU binary

View File

@ -142,4 +142,4 @@ if __name__ == '__main__':
iotests.main(supported_fmts=['qcow2'])
except ImportError:
iotests.notrun('libnbd not installed')
iotests.notrun('Python bindings to libnbd are not installed')