From fcca5dce203b8df0817bcca4c23e4c6846df5a56 Mon Sep 17 00:00:00 2001 From: Fam Zheng Date: Mon, 25 Dec 2017 10:51:07 +0800 Subject: [PATCH 1/5] iotests: Test creating overlay when guest running Signed-off-by: Fam Zheng Message-id: 20171225025107.23985-1-famz@redhat.com Reviewed-by: Eric Blake Signed-off-by: Max Reitz --- tests/qemu-iotests/153 | 8 +++++--- tests/qemu-iotests/153.out | 7 ++++--- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/tests/qemu-iotests/153 b/tests/qemu-iotests/153 index fa25eb24bd..adfd02695b 100755 --- a/tests/qemu-iotests/153 +++ b/tests/qemu-iotests/153 @@ -32,6 +32,7 @@ _cleanup() { _cleanup_test_img rm -f "${TEST_IMG}.base" + rm -f "${TEST_IMG}.overlay" rm -f "${TEST_IMG}.convert" rm -f "${TEST_IMG}.a" rm -f "${TEST_IMG}.b" @@ -177,8 +178,6 @@ rm -f "${TEST_IMG}.lnk" &>/dev/null ln -s ${TEST_IMG} "${TEST_IMG}.lnk" || echo "Failed to create link" _run_qemu_with_images "${TEST_IMG}.lnk" "${TEST_IMG}" -echo -echo "== Closing an image should unlock it ==" _launch_qemu _send_qemu_cmd $QEMU_HANDLE \ @@ -193,7 +192,10 @@ _send_qemu_cmd $QEMU_HANDLE \ _run_cmd $QEMU_IO "${TEST_IMG}" -c 'write 0 512' -echo "Closing drive" +echo "Creating overlay with qemu-img when the guest is running should be allowed" +_run_cmd $QEMU_IMG create -f $IMGFMT -b "${TEST_IMG}" "${TEST_IMG}.overlay" + +echo "== Closing an image should unlock it ==" _send_qemu_cmd $QEMU_HANDLE \ "{ 'execute': 'human-monitor-command', 'arguments': { 'command-line': 'drive_del d0' } }" \ diff --git a/tests/qemu-iotests/153.out b/tests/qemu-iotests/153.out index 5b917b177c..34309cfb20 100644 --- a/tests/qemu-iotests/153.out +++ b/tests/qemu-iotests/153.out @@ -372,15 +372,16 @@ Is another process using the image? == Symbolic link == QEMU_PROG: -drive if=none,file=TEST_DIR/t.qcow2: Failed to get "write" lock Is another process using the image? - -== Closing an image should unlock it == {"return": {}} Adding drive _qemu_io_wrapper TEST_DIR/t.qcow2 -c write 0 512 can't open device TEST_DIR/t.qcow2: Failed to get "write" lock Is another process using the image? -Closing drive +Creating overlay with qemu-img when the guest is running should be allowed + +_qemu_img_wrapper create -f qcow2 -b TEST_DIR/t.qcow2 TEST_DIR/t.qcow2.overlay +== Closing an image should unlock it == _qemu_io_wrapper TEST_DIR/t.qcow2 -c write 0 512 Adding two and closing one From 990dc39cfa9b72fbe743a850db5542870caf7e05 Mon Sep 17 00:00:00 2001 From: Eric Blake Date: Mon, 5 Mar 2018 10:18:24 -0600 Subject: [PATCH 2/5] iotests: Mark all tests executable The majority of our iotests have the executable bit set; fix the few outliers for consistency. Signed-off-by: Eric Blake Message-id: 20180305161824.7188-1-eblake@redhat.com Signed-off-by: Max Reitz --- tests/qemu-iotests/096 | 0 tests/qemu-iotests/124 | 0 tests/qemu-iotests/129 | 0 tests/qemu-iotests/132 | 0 tests/qemu-iotests/136 | 0 tests/qemu-iotests/139 | 0 tests/qemu-iotests/148 | 0 tests/qemu-iotests/152 | 0 tests/qemu-iotests/163 | 0 tests/qemu-iotests/205 | 0 10 files changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 tests/qemu-iotests/096 mode change 100644 => 100755 tests/qemu-iotests/124 mode change 100644 => 100755 tests/qemu-iotests/129 mode change 100644 => 100755 tests/qemu-iotests/132 mode change 100644 => 100755 tests/qemu-iotests/136 mode change 100644 => 100755 tests/qemu-iotests/139 mode change 100644 => 100755 tests/qemu-iotests/148 mode change 100644 => 100755 tests/qemu-iotests/152 mode change 100644 => 100755 tests/qemu-iotests/163 mode change 100644 => 100755 tests/qemu-iotests/205 diff --git a/tests/qemu-iotests/096 b/tests/qemu-iotests/096 old mode 100644 new mode 100755 diff --git a/tests/qemu-iotests/124 b/tests/qemu-iotests/124 old mode 100644 new mode 100755 diff --git a/tests/qemu-iotests/129 b/tests/qemu-iotests/129 old mode 100644 new mode 100755 diff --git a/tests/qemu-iotests/132 b/tests/qemu-iotests/132 old mode 100644 new mode 100755 diff --git a/tests/qemu-iotests/136 b/tests/qemu-iotests/136 old mode 100644 new mode 100755 diff --git a/tests/qemu-iotests/139 b/tests/qemu-iotests/139 old mode 100644 new mode 100755 diff --git a/tests/qemu-iotests/148 b/tests/qemu-iotests/148 old mode 100644 new mode 100755 diff --git a/tests/qemu-iotests/152 b/tests/qemu-iotests/152 old mode 100644 new mode 100755 diff --git a/tests/qemu-iotests/163 b/tests/qemu-iotests/163 old mode 100644 new mode 100755 diff --git a/tests/qemu-iotests/205 b/tests/qemu-iotests/205 old mode 100644 new mode 100755 From 0bfed484a51e602ec77361c79c1caede396fb242 Mon Sep 17 00:00:00 2001 From: Fam Zheng Date: Thu, 1 Mar 2018 09:14:13 +0800 Subject: [PATCH 3/5] iotests: Skip test for ENOMEM error The AFL image is to exercise the code validating image size, which doesn't work on 32 bit or when out of memory (there is a large allocation before the interesting point). So check that and skip the test, instead of faking the result. Signed-off-by: Fam Zheng Message-id: 20180301011413.11531-1-famz@redhat.com Reviewed-by: Eric Blake Signed-off-by: Max Reitz --- tests/qemu-iotests/059 | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/qemu-iotests/059 b/tests/qemu-iotests/059 index 40f89eae18..530bbbe6ce 100755 --- a/tests/qemu-iotests/059 +++ b/tests/qemu-iotests/059 @@ -152,9 +152,8 @@ done echo echo "=== Testing afl image with a very large capacity ===" _use_sample_img afl9.vmdk.bz2 -# The sed makes this test pass on machines with little RAM -# (and also with 32 bit builds) -_img_info | sed -e 's/Cannot allocate memory/Invalid argument/' +_img_info | grep -q 'Cannot allocate memory' && _notrun "Insufficent memory, skipped test" +_img_info _cleanup_test_img # success, all done From 39eaefcedb9af9131543e6b35386627780ac3f41 Mon Sep 17 00:00:00 2001 From: Alberto Garcia Date: Tue, 6 Mar 2018 15:01:21 +0200 Subject: [PATCH 4/5] iotests: Tweak 030 in order to trigger a race condition with parallel jobs This patch tweaks TestParallelOps in iotest 030 so it allocates data in smaller regions (256KB/512KB instead of 512KB/1MB) and the block-stream job in test_stream_commit() only needs to copy data that is at the very end of the image. This way when the block-stream job is awakened it will finish right away without any chance of being stopped by block_job_sleep_ns(). This triggers the bug that was fixed by 3d5d319e1221082974711af1d09d82f and 1a63a907507fbbcfaee3f622907ec24 and is therefore a more useful test case for parallel block jobs. After this patch the aforementiond bug can also be reproduced with the test_stream_parallel() test case. Since with this change the stream job in test_stream_commit() finishes early, this patch introduces a similar test case where both jobs are slowed down so they can actually run in parallel. Signed-off-by: Alberto Garcia Cc: John Snow Message-id: 20180306130121.30243-1-berto@igalia.com Signed-off-by: Max Reitz --- tests/qemu-iotests/030 | 52 +++++++++++++++++++++++++++++++------- tests/qemu-iotests/030.out | 4 +-- 2 files changed, 45 insertions(+), 11 deletions(-) diff --git a/tests/qemu-iotests/030 b/tests/qemu-iotests/030 index 457984b8e9..b5f88959aa 100755 --- a/tests/qemu-iotests/030 +++ b/tests/qemu-iotests/030 @@ -156,7 +156,7 @@ class TestSingleDrive(iotests.QMPTestCase): class TestParallelOps(iotests.QMPTestCase): num_ops = 4 # Number of parallel block-stream operations num_imgs = num_ops * 2 + 1 - image_len = num_ops * 1024 * 1024 + image_len = num_ops * 512 * 1024 imgs = [] def setUp(self): @@ -176,14 +176,14 @@ class TestParallelOps(iotests.QMPTestCase): '-o', 'backing_file=%s' % self.imgs[i-1], self.imgs[i]) # Put data into the images we are copying data from - for i in range(self.num_imgs / 2): - img_index = i * 2 + 1 - # Alternate between 512k and 1M. + odd_img_indexes = [x for x in reversed(range(self.num_imgs)) if x % 2 == 1] + for i in range(len(odd_img_indexes)): + # Alternate between 256KB and 512KB. # This way jobs will not finish in the same order they were created - num_kb = 512 + 512 * (i % 2) + num_kb = 256 + 256 * (i % 2) qemu_io('-f', iotests.imgfmt, - '-c', 'write -P %d %d %d' % (i, i*1024*1024, num_kb * 1024), - self.imgs[img_index]) + '-c', 'write -P 0xFF %dk %dk' % (i * 512, num_kb), + self.imgs[odd_img_indexes[i]]) # Attach the drive to the VM self.vm = iotests.VM() @@ -318,12 +318,14 @@ class TestParallelOps(iotests.QMPTestCase): self.wait_until_completed(drive='commit-drive0') # Test a block-stream and a block-commit job in parallel - def test_stream_commit(self): + # Here the stream job is supposed to finish quickly in order to reproduce + # the scenario that triggers the bug fixed in 3d5d319e1221 and 1a63a907507 + def test_stream_commit_1(self): self.assertLessEqual(8, self.num_imgs) self.assert_no_active_block_jobs() # Stream from node0 into node2 - result = self.vm.qmp('block-stream', device='node2', job_id='node2') + result = self.vm.qmp('block-stream', device='node2', base_node='node0', job_id='node2') self.assert_qmp(result, 'return', {}) # Commit from the active layer into node3 @@ -348,6 +350,38 @@ class TestParallelOps(iotests.QMPTestCase): self.assert_no_active_block_jobs() + # This is similar to test_stream_commit_1 but both jobs are slowed + # down so they can run in parallel for a little while. + def test_stream_commit_2(self): + self.assertLessEqual(8, self.num_imgs) + self.assert_no_active_block_jobs() + + # Stream from node0 into node4 + result = self.vm.qmp('block-stream', device='node4', base_node='node0', job_id='node4', speed=1024*1024) + self.assert_qmp(result, 'return', {}) + + # Commit from the active layer into node5 + result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[5], speed=1024*1024) + self.assert_qmp(result, 'return', {}) + + # Wait for all jobs to be finished. + pending_jobs = ['node4', 'drive0'] + while len(pending_jobs) > 0: + for event in self.vm.get_qmp_events(wait=True): + if event['event'] == 'BLOCK_JOB_COMPLETED': + node_name = self.dictpath(event, 'data/device') + self.assertTrue(node_name in pending_jobs) + self.assert_qmp_absent(event, 'data/error') + pending_jobs.remove(node_name) + if event['event'] == 'BLOCK_JOB_READY': + self.assert_qmp(event, 'data/device', 'drive0') + self.assert_qmp(event, 'data/type', 'commit') + self.assert_qmp_absent(event, 'data/error') + self.assertTrue('drive0' in pending_jobs) + self.vm.qmp('block-job-complete', device='drive0') + + self.assert_no_active_block_jobs() + # Test the base_node parameter def test_stream_base_node_name(self): self.assert_no_active_block_jobs() diff --git a/tests/qemu-iotests/030.out b/tests/qemu-iotests/030.out index 391c8573ca..42314e9c00 100644 --- a/tests/qemu-iotests/030.out +++ b/tests/qemu-iotests/030.out @@ -1,5 +1,5 @@ -....................... +........................ ---------------------------------------------------------------------- -Ran 23 tests +Ran 24 tests OK From 21794244d4e9c5f81132e4574e5bd10ef5066715 Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Mon, 5 Mar 2018 15:59:26 +0000 Subject: [PATCH 5/5] qemu-iotests: fix 203 migration completion race There is a race between the test's 'query-migrate' QMP command after the QMP 'STOP' event and completing the migration: The test case invokes 'query-migrate' upon receiving 'STOP'. At this point the migration thread may still be in the process of completing. Therefore 'query-migrate' can return 'status': 'active' for a brief window of time instead of 'status': 'completed'. This results in qemu-iotests 203 hanging. Solve the race by enabling the 'events' migration capability, which causes QEMU to emit migration-specific QMP events that do not suffer from this race condition. Wait for the QMP 'MIGRATION' event with 'status': 'completed'. Reported-by: Max Reitz Signed-off-by: Stefan Hajnoczi Message-id: 20180305155926.25858-1-stefanha@redhat.com Reviewed-by: Max Reitz Signed-off-by: Max Reitz --- tests/qemu-iotests/203 | 15 +++++++++++---- tests/qemu-iotests/203.out | 5 +++++ 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/tests/qemu-iotests/203 b/tests/qemu-iotests/203 index 2c811917d8..4874a1a0d8 100755 --- a/tests/qemu-iotests/203 +++ b/tests/qemu-iotests/203 @@ -49,11 +49,18 @@ with iotests.FilePath('disk0.img') as disk0_img_path, \ node_name='drive1-node', iothread='iothread0', force=True)) + iotests.log('Enabling migration QMP events...') + iotests.log(vm.qmp('migrate-set-capabilities', capabilities=[ + { + 'capability': 'events', + 'state': True + } + ])) + iotests.log('Starting migration...') iotests.log(vm.qmp('migrate', uri='exec:cat >/dev/null')) while True: - vm.get_qmp_event(wait=60.0) - result = vm.qmp('query-migrate') - status = result.get('return', {}).get('status', None) - if status == 'completed': + event = vm.event_wait('MIGRATION') + iotests.log(event, filters=[iotests.filter_qmp_event]) + if event['data']['status'] == 'completed': break diff --git a/tests/qemu-iotests/203.out b/tests/qemu-iotests/203.out index 3f1ff900e4..1a11f0975c 100644 --- a/tests/qemu-iotests/203.out +++ b/tests/qemu-iotests/203.out @@ -2,5 +2,10 @@ Launching VM... Setting IOThreads... {u'return': {}} {u'return': {}} +Enabling migration QMP events... +{u'return': {}} Starting migration... {u'return': {}} +{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'setup'}, u'event': u'MIGRATION'} +{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'active'}, u'event': u'MIGRATION'} +{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'completed'}, u'event': u'MIGRATION'}