mirror of https://github.com/xemu-project/xemu.git
-----BEGIN PGP SIGNATURE-----
iQEcBAABAgAGBQJY+dQoAAoJEJykq7OBq3PIBbEH/AvihEVi7qCh2q0hMu3ZNUT6 y4t+T4lZXfKZUTb4XHL+wtC/Db24jXUP4V0lvQJbe7VlMNUP4RIq+Wv+TKg67RZM GB7S75nMACYWvDxMn9QCr1rW3BC9S57C12bpNvUhszpJZy3WVfM4q0IEJT1uq18u mc9oqMSffnaSBn1tfTZcY9SDs2f9kU0hkwUPIZ0SznFOcVFsJKy9ABQCZHvg/4ut j4Xnqaj+v81j9G1xhssGP1BgOoQw93ybSWcBV7XTbS7xprYCiLuq823VVsYLAwrf 5t6fN0XDKQV0f03nRkl8VZ3Xf3UWD2/l8S7Jo2wvnEIvArX40vyuseca85F3qrE= =tddG -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging # gpg: Signature made Fri 21 Apr 2017 10:43:04 BST # gpg: using RSA key 0x9CA4ABB381AB73C8 # gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" # gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>" # Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35 775A 9CA4 ABB3 81AB 73C8 * remotes/stefanha/tags/block-pull-request: MAINTAINERS: update my email address MAINTAINERS: update Wen's email address migration/block: use blk_pwrite_zeroes for each zero cluster throttle: make throttle_config(throttle_get_config()) symmetric throttle: do not use invalid config in test qemu-options: explain disk I/O throttling options Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
09fc586db3
|
@ -1817,8 +1817,8 @@ S: Supported
|
||||||
F: tests/image-fuzzer/
|
F: tests/image-fuzzer/
|
||||||
|
|
||||||
Replication
|
Replication
|
||||||
M: Wen Congyang <wency@cn.fujitsu.com>
|
M: Wen Congyang <wencongyang2@huawei.com>
|
||||||
M: Changlong Xie <xiecl.fnst@cn.fujitsu.com>
|
M: Xie Changlong <xiechanglong.d@gmail.com>
|
||||||
S: Supported
|
S: Supported
|
||||||
F: replication*
|
F: replication*
|
||||||
F: block/replication.c
|
F: block/replication.c
|
||||||
|
|
|
@ -885,6 +885,8 @@ static int block_load(QEMUFile *f, void *opaque, int version_id)
|
||||||
int64_t total_sectors = 0;
|
int64_t total_sectors = 0;
|
||||||
int nr_sectors;
|
int nr_sectors;
|
||||||
int ret;
|
int ret;
|
||||||
|
BlockDriverInfo bdi;
|
||||||
|
int cluster_size = BLOCK_SIZE;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
addr = qemu_get_be64(f);
|
addr = qemu_get_be64(f);
|
||||||
|
@ -919,6 +921,15 @@ static int block_load(QEMUFile *f, void *opaque, int version_id)
|
||||||
error_report_err(local_err);
|
error_report_err(local_err);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret = bdrv_get_info(blk_bs(blk), &bdi);
|
||||||
|
if (ret == 0 && bdi.cluster_size > 0 &&
|
||||||
|
bdi.cluster_size <= BLOCK_SIZE &&
|
||||||
|
BLOCK_SIZE % bdi.cluster_size == 0) {
|
||||||
|
cluster_size = bdi.cluster_size;
|
||||||
|
} else {
|
||||||
|
cluster_size = BLOCK_SIZE;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (total_sectors - addr < BDRV_SECTORS_PER_DIRTY_CHUNK) {
|
if (total_sectors - addr < BDRV_SECTORS_PER_DIRTY_CHUNK) {
|
||||||
|
@ -932,10 +943,30 @@ static int block_load(QEMUFile *f, void *opaque, int version_id)
|
||||||
nr_sectors * BDRV_SECTOR_SIZE,
|
nr_sectors * BDRV_SECTOR_SIZE,
|
||||||
BDRV_REQ_MAY_UNMAP);
|
BDRV_REQ_MAY_UNMAP);
|
||||||
} else {
|
} else {
|
||||||
|
int i;
|
||||||
|
int64_t cur_addr;
|
||||||
|
uint8_t *cur_buf;
|
||||||
|
|
||||||
buf = g_malloc(BLOCK_SIZE);
|
buf = g_malloc(BLOCK_SIZE);
|
||||||
qemu_get_buffer(f, buf, BLOCK_SIZE);
|
qemu_get_buffer(f, buf, BLOCK_SIZE);
|
||||||
ret = blk_pwrite(blk, addr * BDRV_SECTOR_SIZE, buf,
|
for (i = 0; i < BLOCK_SIZE / cluster_size; i++) {
|
||||||
nr_sectors * BDRV_SECTOR_SIZE, 0);
|
cur_addr = addr * BDRV_SECTOR_SIZE + i * cluster_size;
|
||||||
|
cur_buf = buf + i * cluster_size;
|
||||||
|
|
||||||
|
if ((!block_mig_state.zero_blocks ||
|
||||||
|
cluster_size < BLOCK_SIZE) &&
|
||||||
|
buffer_is_zero(cur_buf, cluster_size)) {
|
||||||
|
ret = blk_pwrite_zeroes(blk, cur_addr,
|
||||||
|
cluster_size,
|
||||||
|
BDRV_REQ_MAY_UNMAP);
|
||||||
|
} else {
|
||||||
|
ret = blk_pwrite(blk, cur_addr, cur_buf,
|
||||||
|
cluster_size, 0);
|
||||||
|
}
|
||||||
|
if (ret < 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
g_free(buf);
|
g_free(buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -635,6 +635,30 @@ file sectors into the image file.
|
||||||
conversion of plain zero writes by the OS to driver specific optimized
|
conversion of plain zero writes by the OS to driver specific optimized
|
||||||
zero write commands. You may even choose "unmap" if @var{discard} is set
|
zero write commands. You may even choose "unmap" if @var{discard} is set
|
||||||
to "unmap" to allow a zero write to be converted to an UNMAP operation.
|
to "unmap" to allow a zero write to be converted to an UNMAP operation.
|
||||||
|
@item bps=@var{b},bps_rd=@var{r},bps_wr=@var{w}
|
||||||
|
Specify bandwidth throttling limits in bytes per second, either for all request
|
||||||
|
types or for reads or writes only. Small values can lead to timeouts or hangs
|
||||||
|
inside the guest. A safe minimum for disks is 2 MB/s.
|
||||||
|
@item bps_max=@var{bm},bps_rd_max=@var{rm},bps_wr_max=@var{wm}
|
||||||
|
Specify bursts in bytes per second, either for all request types or for reads
|
||||||
|
or writes only. Bursts allow the guest I/O to spike above the limit
|
||||||
|
temporarily.
|
||||||
|
@item iops=@var{i},iops_rd=@var{r},iops_wr=@var{w}
|
||||||
|
Specify request rate limits in requests per second, either for all request
|
||||||
|
types or for reads or writes only.
|
||||||
|
@item iops_max=@var{bm},iops_rd_max=@var{rm},iops_wr_max=@var{wm}
|
||||||
|
Specify bursts in requests per second, either for all request types or for reads
|
||||||
|
or writes only. Bursts allow the guest I/O to spike above the limit
|
||||||
|
temporarily.
|
||||||
|
@item iops_size=@var{is}
|
||||||
|
Let every @var{is} bytes of a request count as a new request for iops
|
||||||
|
throttling purposes. Use this option to prevent guests from circumventing iops
|
||||||
|
limits by sending fewer but larger requests.
|
||||||
|
@item group=@var{g}
|
||||||
|
Join a throttling quota group with given name @var{g}. All drives that are
|
||||||
|
members of the same group are accounted for together. Use this option to
|
||||||
|
prevent guests from circumventing throttling limits by using many small disks
|
||||||
|
instead of a single larger disk.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
By default, the @option{cache=writeback} mode is used. It will report data
|
By default, the @option{cache=writeback} mode is used. It will report data
|
||||||
|
|
|
@ -205,8 +205,8 @@ static void test_config_functions(void)
|
||||||
orig_cfg.buckets[THROTTLE_OPS_READ].avg = 69;
|
orig_cfg.buckets[THROTTLE_OPS_READ].avg = 69;
|
||||||
orig_cfg.buckets[THROTTLE_OPS_WRITE].avg = 23;
|
orig_cfg.buckets[THROTTLE_OPS_WRITE].avg = 23;
|
||||||
|
|
||||||
orig_cfg.buckets[THROTTLE_BPS_TOTAL].max = 0; /* should be corrected */
|
orig_cfg.buckets[THROTTLE_BPS_TOTAL].max = 0;
|
||||||
orig_cfg.buckets[THROTTLE_BPS_READ].max = 1; /* should not be corrected */
|
orig_cfg.buckets[THROTTLE_BPS_READ].max = 56;
|
||||||
orig_cfg.buckets[THROTTLE_BPS_WRITE].max = 120;
|
orig_cfg.buckets[THROTTLE_BPS_WRITE].max = 120;
|
||||||
|
|
||||||
orig_cfg.buckets[THROTTLE_OPS_TOTAL].max = 150;
|
orig_cfg.buckets[THROTTLE_OPS_TOTAL].max = 150;
|
||||||
|
@ -246,8 +246,8 @@ static void test_config_functions(void)
|
||||||
g_assert(final_cfg.buckets[THROTTLE_OPS_READ].avg == 69);
|
g_assert(final_cfg.buckets[THROTTLE_OPS_READ].avg == 69);
|
||||||
g_assert(final_cfg.buckets[THROTTLE_OPS_WRITE].avg == 23);
|
g_assert(final_cfg.buckets[THROTTLE_OPS_WRITE].avg == 23);
|
||||||
|
|
||||||
g_assert(final_cfg.buckets[THROTTLE_BPS_TOTAL].max == 15.3);/* fixed */
|
g_assert(final_cfg.buckets[THROTTLE_BPS_TOTAL].max == 0);
|
||||||
g_assert(final_cfg.buckets[THROTTLE_BPS_READ].max == 1); /* not fixed */
|
g_assert(final_cfg.buckets[THROTTLE_BPS_READ].max == 56);
|
||||||
g_assert(final_cfg.buckets[THROTTLE_BPS_WRITE].max == 120);
|
g_assert(final_cfg.buckets[THROTTLE_BPS_WRITE].max == 120);
|
||||||
|
|
||||||
g_assert(final_cfg.buckets[THROTTLE_OPS_TOTAL].max == 150);
|
g_assert(final_cfg.buckets[THROTTLE_OPS_TOTAL].max == 150);
|
||||||
|
|
|
@ -380,6 +380,14 @@ static void throttle_fix_bucket(LeakyBucket *bkt)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* undo internal bucket parameter changes (see throttle_fix_bucket()) */
|
||||||
|
static void throttle_unfix_bucket(LeakyBucket *bkt)
|
||||||
|
{
|
||||||
|
if (bkt->max < bkt->avg) {
|
||||||
|
bkt->max = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* take care of canceling a timer */
|
/* take care of canceling a timer */
|
||||||
static void throttle_cancel_timer(QEMUTimer *timer)
|
static void throttle_cancel_timer(QEMUTimer *timer)
|
||||||
{
|
{
|
||||||
|
@ -420,7 +428,13 @@ void throttle_config(ThrottleState *ts,
|
||||||
*/
|
*/
|
||||||
void throttle_get_config(ThrottleState *ts, ThrottleConfig *cfg)
|
void throttle_get_config(ThrottleState *ts, ThrottleConfig *cfg)
|
||||||
{
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
*cfg = ts->cfg;
|
*cfg = ts->cfg;
|
||||||
|
|
||||||
|
for (i = 0; i < BUCKETS_COUNT; i++) {
|
||||||
|
throttle_unfix_bucket(&cfg->buckets[i]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue