|  | #!/usr/bin/env python3 | 
|  | # group: rw backing | 
|  | # | 
|  | # Tests for image streaming. | 
|  | # | 
|  | # Copyright (C) 2012 IBM Corp. | 
|  | # | 
|  | # This program is free software; you can redistribute it and/or modify | 
|  | # it under the terms of the GNU General Public License as published by | 
|  | # the Free Software Foundation; either version 2 of the License, or | 
|  | # (at your option) any later version. | 
|  | # | 
|  | # This program is distributed in the hope that it will be useful, | 
|  | # but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | # GNU General Public License for more details. | 
|  | # | 
|  | # You should have received a copy of the GNU General Public License | 
|  | # along with this program.  If not, see <http://www.gnu.org/licenses/>. | 
|  | # | 
|  |  | 
|  | import time | 
|  | import os | 
|  | import iotests | 
|  | import unittest | 
|  | from iotests import qemu_img, qemu_io | 
|  |  | 
|  | backing_img = os.path.join(iotests.test_dir, 'backing.img') | 
|  | mid_img = os.path.join(iotests.test_dir, 'mid.img') | 
|  | test_img = os.path.join(iotests.test_dir, 'test.img') | 
|  |  | 
|  | class TestSingleDrive(iotests.QMPTestCase): | 
|  | image_len = 1 * 1024 * 1024 # MB | 
|  |  | 
|  | def setUp(self): | 
|  | iotests.create_image(backing_img, TestSingleDrive.image_len) | 
|  | qemu_img('create', '-f', iotests.imgfmt, | 
|  | '-o', 'backing_file=%s' % backing_img, | 
|  | '-F', 'raw', mid_img) | 
|  | qemu_img('create', '-f', iotests.imgfmt, | 
|  | '-o', 'backing_file=%s' % mid_img, | 
|  | '-F', iotests.imgfmt, test_img) | 
|  | qemu_io('-f', 'raw', '-c', 'write -P 0x1 0 512', backing_img) | 
|  | qemu_io('-f', iotests.imgfmt, '-c', 'write -P 0x1 524288 512', mid_img) | 
|  | self.vm = iotests.VM().add_drive("blkdebug::" + test_img, | 
|  | "backing.node-name=mid," + | 
|  | "backing.backing.node-name=base") | 
|  | self.vm.launch() | 
|  |  | 
|  | def tearDown(self): | 
|  | self.vm.shutdown() | 
|  | os.remove(test_img) | 
|  | os.remove(mid_img) | 
|  | os.remove(backing_img) | 
|  |  | 
|  | def test_stream(self): | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | self.vm.cmd('block-stream', device='drive0') | 
|  |  | 
|  | self.wait_until_completed() | 
|  |  | 
|  | self.assert_no_active_block_jobs() | 
|  | self.vm.shutdown() | 
|  |  | 
|  | self.assertEqual( | 
|  | qemu_io('-f', 'raw', '-c', 'map', backing_img).stdout, | 
|  | qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img).stdout, | 
|  | 'image file map does not match backing file after streaming') | 
|  |  | 
|  | def test_stream_intermediate(self): | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | self.assertNotEqual( | 
|  | qemu_io('-f', 'raw', '-rU', '-c', 'map', backing_img).stdout, | 
|  | qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', mid_img).stdout, | 
|  | 'image file map matches backing file before streaming') | 
|  |  | 
|  | self.vm.cmd('block-stream', device='mid', job_id='stream-mid') | 
|  |  | 
|  | self.wait_until_completed(drive='stream-mid') | 
|  |  | 
|  | self.assert_no_active_block_jobs() | 
|  | self.vm.shutdown() | 
|  |  | 
|  | self.assertEqual( | 
|  | qemu_io('-f', 'raw', '-c', 'map', backing_img).stdout, | 
|  | qemu_io('-f', iotests.imgfmt, '-c', 'map', mid_img).stdout, | 
|  | 'image file map does not match backing file after streaming') | 
|  |  | 
|  | def test_stream_pause(self): | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | self.vm.pause_drive('drive0') | 
|  | self.vm.cmd('block-stream', device='drive0') | 
|  |  | 
|  | self.pause_job('drive0', wait=False) | 
|  | self.vm.resume_drive('drive0') | 
|  | self.pause_wait('drive0') | 
|  |  | 
|  | result = self.vm.qmp('query-block-jobs') | 
|  | offset = self.dictpath(result, 'return[0]/offset') | 
|  |  | 
|  | time.sleep(0.5) | 
|  | result = self.vm.qmp('query-block-jobs') | 
|  | self.assert_qmp(result, 'return[0]/offset', offset) | 
|  |  | 
|  | self.vm.cmd('block-job-resume', device='drive0') | 
|  |  | 
|  | self.wait_until_completed() | 
|  |  | 
|  | self.assert_no_active_block_jobs() | 
|  | self.vm.shutdown() | 
|  |  | 
|  | self.assertEqual( | 
|  | qemu_io('-f', 'raw', '-c', 'map', backing_img).stdout, | 
|  | qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img).stdout, | 
|  | 'image file map does not match backing file after streaming') | 
|  |  | 
|  | def test_stream_no_op(self): | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | # The image map is empty before the operation | 
|  | empty_map = qemu_io( | 
|  | '-f', iotests.imgfmt, '-rU', '-c', 'map', test_img).stdout | 
|  |  | 
|  | # This is a no-op: no data should ever be copied from the base image | 
|  | self.vm.cmd('block-stream', device='drive0', base=mid_img) | 
|  |  | 
|  | self.wait_until_completed() | 
|  |  | 
|  | self.assert_no_active_block_jobs() | 
|  | self.vm.shutdown() | 
|  |  | 
|  | self.assertEqual( | 
|  | qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img).stdout, | 
|  | empty_map, 'image file map changed after a no-op') | 
|  |  | 
|  | def test_stream_partial(self): | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | self.vm.cmd('block-stream', device='drive0', base=backing_img) | 
|  |  | 
|  | self.wait_until_completed() | 
|  |  | 
|  | self.assert_no_active_block_jobs() | 
|  | self.vm.shutdown() | 
|  |  | 
|  | self.assertEqual( | 
|  | qemu_io('-f', iotests.imgfmt, '-c', 'map', mid_img).stdout, | 
|  | qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img).stdout, | 
|  | 'image file map does not match backing file after streaming') | 
|  |  | 
|  | def test_device_not_found(self): | 
|  | result = self.vm.qmp('block-stream', device='nonexistent') | 
|  | self.assert_qmp(result, 'error/desc', | 
|  | 'Cannot find device=\'nonexistent\' nor node-name=\'nonexistent\'') | 
|  |  | 
|  | def test_job_id_missing(self): | 
|  | result = self.vm.qmp('block-stream', device='mid') | 
|  | self.assert_qmp(result, 'error/desc', "Invalid job ID ''") | 
|  |  | 
|  | def test_read_only(self): | 
|  | # Create a new file that we can attach (we need a read-only top) | 
|  | with iotests.FilePath('ro-top.img') as ro_top_path: | 
|  | qemu_img('create', '-f', iotests.imgfmt, ro_top_path, | 
|  | str(self.image_len)) | 
|  |  | 
|  | self.vm.cmd('blockdev-add', | 
|  | node_name='ro-top', | 
|  | driver=iotests.imgfmt, | 
|  | read_only=True, | 
|  | file={ | 
|  | 'driver': 'file', | 
|  | 'filename': ro_top_path, | 
|  | 'read-only': True | 
|  | }, | 
|  | backing='mid') | 
|  |  | 
|  | result = self.vm.qmp('block-stream', job_id='stream', | 
|  | device='ro-top', base_node='base') | 
|  | self.assert_qmp(result, 'error/desc', 'Block node is read-only') | 
|  |  | 
|  | self.vm.cmd('blockdev-del', node_name='ro-top') | 
|  |  | 
|  |  | 
|  | class TestParallelOps(iotests.QMPTestCase): | 
|  | num_ops = 4 # Number of parallel block-stream operations | 
|  | num_imgs = num_ops * 2 + 1 | 
|  | image_len = num_ops * 4 * 1024 * 1024 | 
|  | imgs = [] | 
|  |  | 
|  | def setUp(self): | 
|  | opts = [] | 
|  | self.imgs = [] | 
|  |  | 
|  | # Initialize file names and command-line options | 
|  | for i in range(self.num_imgs): | 
|  | img_depth = self.num_imgs - i - 1 | 
|  | opts.append("backing." * img_depth + "node-name=node%d" % i) | 
|  | self.imgs.append(os.path.join(iotests.test_dir, 'img-%d.img' % i)) | 
|  |  | 
|  | # Create all images | 
|  | iotests.create_image(self.imgs[0], self.image_len) | 
|  | for i in range(1, self.num_imgs): | 
|  | qemu_img('create', '-f', iotests.imgfmt, | 
|  | '-o', 'backing_file=%s' % self.imgs[i-1], | 
|  | '-F', 'raw' if i == 1 else iotests.imgfmt, self.imgs[i]) | 
|  |  | 
|  | # Put data into the images we are copying data from | 
|  | odd_img_indexes = [x for x in reversed(range(self.num_imgs)) if x % 2 == 1] | 
|  | for i in range(len(odd_img_indexes)): | 
|  | # Alternate between 2MB and 4MB. | 
|  | # This way jobs will not finish in the same order they were created | 
|  | num_mb = 2 + 2 * (i % 2) | 
|  | qemu_io('-f', iotests.imgfmt, | 
|  | '-c', 'write -P 0xFF %dM %dM' % (i * 4, num_mb), | 
|  | self.imgs[odd_img_indexes[i]]) | 
|  |  | 
|  | # Attach the drive to the VM | 
|  | self.vm = iotests.VM() | 
|  | self.vm.add_drive(self.imgs[-1], ','.join(opts)) | 
|  | self.vm.launch() | 
|  |  | 
|  | def tearDown(self): | 
|  | self.vm.shutdown() | 
|  | for img in self.imgs: | 
|  | os.remove(img) | 
|  |  | 
|  | # Test that it's possible to run several block-stream operations | 
|  | # in parallel in the same snapshot chain | 
|  | @unittest.skipIf(os.environ.get('QEMU_CHECK_BLOCK_AUTO'), 'disabled in CI') | 
|  | def test_stream_parallel(self): | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | # Check that the maps don't match before the streaming operations | 
|  | for i in range(2, self.num_imgs, 2): | 
|  | self.assertNotEqual( | 
|  | qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[i]).stdout, | 
|  | qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[i-1]).stdout, | 
|  | 'image file map matches backing file before streaming') | 
|  |  | 
|  | # Create all streaming jobs | 
|  | pending_jobs = [] | 
|  | for i in range(2, self.num_imgs, 2): | 
|  | node_name = 'node%d' % i | 
|  | job_id = 'stream-%s' % node_name | 
|  | pending_jobs.append(job_id) | 
|  | self.vm.cmd('block-stream', device=node_name, | 
|  | job_id=job_id, bottom=f'node{i-1}', | 
|  | speed=1024) | 
|  |  | 
|  | # Do this in reverse: After unthrottling them, some jobs may finish | 
|  | # before we have unthrottled all of them.  This will drain their | 
|  | # subgraph, and this will make jobs above them advance (despite those | 
|  | # jobs on top being throttled).  In the worst case, all jobs below the | 
|  | # top one are finished before we can unthrottle it, and this makes it | 
|  | # advance so far that it completes before we can unthrottle it - which | 
|  | # results in an error. | 
|  | # Starting from the top (i.e. in reverse) does not have this problem: | 
|  | # When a job finishes, the ones below it are not advanced. | 
|  | for job in reversed(pending_jobs): | 
|  | self.vm.cmd('block-job-set-speed', device=job, speed=0) | 
|  |  | 
|  | # Wait for all jobs to be finished. | 
|  | while len(pending_jobs) > 0: | 
|  | for event in self.vm.get_qmp_events(wait=True): | 
|  | if event['event'] == 'BLOCK_JOB_COMPLETED': | 
|  | job_id = self.dictpath(event, 'data/device') | 
|  | self.assertTrue(job_id in pending_jobs) | 
|  | self.assert_qmp_absent(event, 'data/error') | 
|  | pending_jobs.remove(job_id) | 
|  |  | 
|  | self.assert_no_active_block_jobs() | 
|  | self.vm.shutdown() | 
|  |  | 
|  | # Check that all maps match now | 
|  | for i in range(2, self.num_imgs, 2): | 
|  | self.assertEqual( | 
|  | qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[i]).stdout, | 
|  | qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[i-1]).stdout, | 
|  | 'image file map does not match backing file after streaming') | 
|  |  | 
|  | # Test that it's not possible to perform two block-stream | 
|  | # operations if there are nodes involved in both. | 
|  | def test_overlapping_1(self): | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | # Set a speed limit to make sure that this job blocks the rest | 
|  | self.vm.cmd('block-stream', device='node4', | 
|  | job_id='stream-node4', base=self.imgs[1], | 
|  | filter_node_name='stream-filter', speed=1024*1024) | 
|  |  | 
|  | result = self.vm.qmp('block-stream', device='node5', job_id='stream-node5', base=self.imgs[2]) | 
|  | self.assert_qmp(result, 'error/desc', | 
|  | "Node 'stream-filter' is busy: block device is in use by block job: stream") | 
|  |  | 
|  | result = self.vm.qmp('block-stream', device='node3', job_id='stream-node3', base=self.imgs[2]) | 
|  | self.assert_qmp(result, 'error/desc', | 
|  | "Node 'node3' is busy: block device is in use by block job: stream") | 
|  |  | 
|  | result = self.vm.qmp('block-stream', device='node4', job_id='stream-node4-v2') | 
|  | self.assert_qmp(result, 'error/desc', | 
|  | "Node 'node4' is busy: block device is in use by block job: stream") | 
|  |  | 
|  | # block-commit should also fail if it touches nodes used by the stream job | 
|  | result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[4], job_id='commit-node4') | 
|  | self.assert_qmp(result, 'error/desc', | 
|  | "Node 'stream-filter' is busy: block device is in use by block job: stream") | 
|  |  | 
|  | result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[1], top=self.imgs[3], job_id='commit-node1') | 
|  | self.assert_qmp(result, 'error/desc', | 
|  | "Node 'node3' is busy: block device is in use by block job: stream") | 
|  |  | 
|  | # This fails because it needs to modify the backing string in node2, which is blocked | 
|  | result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[0], top=self.imgs[1], job_id='commit-node0') | 
|  | self.assert_qmp(result, 'error/desc', | 
|  | "Node 'node2' is busy: block device is in use by block job: stream") | 
|  |  | 
|  | self.vm.cmd('block-job-set-speed', device='stream-node4', speed=0) | 
|  |  | 
|  | self.wait_until_completed(drive='stream-node4') | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | # Similar to test_overlapping_1, but with block-commit | 
|  | # blocking the other jobs | 
|  | def test_overlapping_2(self): | 
|  | self.assertLessEqual(9, self.num_imgs) | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | # Set a speed limit to make sure that this job blocks the rest | 
|  | self.vm.cmd('block-commit', device='drive0', top=self.imgs[5], base=self.imgs[3], job_id='commit-node3', speed=1024*1024) | 
|  |  | 
|  | result = self.vm.qmp('block-stream', device='node3', job_id='stream-node3') | 
|  | self.assert_qmp(result, 'error/desc', | 
|  | "Node 'node3' is busy: block device is in use by block job: commit") | 
|  |  | 
|  | result = self.vm.qmp('block-stream', device='node6', base=self.imgs[2], job_id='stream-node6') | 
|  | self.assert_qmp(result, 'error/desc', | 
|  | "Node 'node5' is busy: block device is in use by block job: commit") | 
|  |  | 
|  | result = self.vm.qmp('block-stream', device='node4', base=self.imgs[2], job_id='stream-node4') | 
|  | self.assert_qmp(result, 'error/desc', | 
|  | "Node 'node4' is busy: block device is in use by block job: commit") | 
|  |  | 
|  | result = self.vm.qmp('block-stream', device='node6', base=self.imgs[4], job_id='stream-node6-v2') | 
|  | self.assert_qmp(result, 'error/desc', | 
|  | "Node 'node5' is busy: block device is in use by block job: commit") | 
|  |  | 
|  | # This fails because block-commit currently blocks the active layer even if it's not used | 
|  | result = self.vm.qmp('block-stream', device='drive0', base=self.imgs[5], job_id='stream-drive0') | 
|  | self.assert_qmp(result, 'error/desc', | 
|  | "Node 'drive0' is busy: block device is in use by block job: commit") | 
|  |  | 
|  | self.vm.cmd('block-job-set-speed', device='commit-node3', speed=0) | 
|  |  | 
|  | self.wait_until_completed(drive='commit-node3') | 
|  |  | 
|  | # Similar to test_overlapping_2, but here block-commit doesn't use the 'top' parameter. | 
|  | # Internally this uses a mirror block job, hence the separate test case. | 
|  | def test_overlapping_3(self): | 
|  | self.assertLessEqual(8, self.num_imgs) | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | # Set a speed limit to make sure that this job blocks the rest | 
|  | self.vm.cmd('block-commit', device='drive0', base=self.imgs[3], job_id='commit-drive0', speed=1024*1024) | 
|  |  | 
|  | result = self.vm.qmp('block-stream', device='node5', base=self.imgs[3], job_id='stream-node6') | 
|  | self.assert_qmp(result, 'error/desc', | 
|  | "Node 'node5' is busy: block device is in use by block job: commit") | 
|  |  | 
|  | self.vm.cmd('block-job-set-speed', device='commit-drive0', speed=0) | 
|  |  | 
|  | event = self.vm.event_wait(name='BLOCK_JOB_READY') | 
|  | self.assert_qmp(event, 'data/device', 'commit-drive0') | 
|  | self.assert_qmp(event, 'data/type', 'commit') | 
|  | self.assert_qmp_absent(event, 'data/error') | 
|  |  | 
|  | self.vm.cmd('block-job-complete', device='commit-drive0') | 
|  |  | 
|  | self.wait_until_completed(drive='commit-drive0') | 
|  |  | 
|  | # In this case the base node of the stream job is the same as the | 
|  | # top node of commit job. Since this results in the commit filter | 
|  | # node being part of the stream chain, this is not allowed. | 
|  | def test_overlapping_4(self): | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | # Commit from node2 into node0 | 
|  | self.vm.cmd('block-commit', device='drive0', | 
|  | top=self.imgs[2], base=self.imgs[0], | 
|  | filter_node_name='commit-filter', speed=1024*1024) | 
|  |  | 
|  | # Stream from node2 into node4 | 
|  | result = self.vm.qmp('block-stream', device='node4', base_node='node2', job_id='node4') | 
|  | self.assert_qmp(result, 'error/desc', | 
|  | "Cannot freeze 'backing' link to 'commit-filter'") | 
|  |  | 
|  | self.vm.cmd('block-job-set-speed', device='drive0', speed=0) | 
|  |  | 
|  | self.wait_until_completed() | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | # In this case the base node of the stream job is the commit job's | 
|  | # filter node.  stream does not have a real dependency on its base | 
|  | # node, so even though commit removes it when it is done, there is | 
|  | # no conflict. | 
|  | def test_overlapping_5(self): | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | # Commit from node2 into node0 | 
|  | self.vm.cmd('block-commit', device='drive0', | 
|  | top_node='node2', base_node='node0', | 
|  | filter_node_name='commit-filter', speed=1024*1024) | 
|  |  | 
|  | # Stream from node2 into node4 | 
|  | self.vm.cmd('block-stream', device='node4', | 
|  | base_node='commit-filter', job_id='node4') | 
|  |  | 
|  | self.vm.cmd('block-job-set-speed', device='drive0', speed=0) | 
|  |  | 
|  | self.vm.run_job(job='drive0', auto_dismiss=True) | 
|  | self.vm.run_job(job='node4', auto_dismiss=True) | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | # Assert that node0 is now the backing node of node4 | 
|  | result = self.vm.qmp('query-named-block-nodes') | 
|  | node4 = next(node for node in result['return'] if node['node-name'] == 'node4') | 
|  | self.assertEqual(node4['image']['backing-image']['filename'], self.imgs[0]) | 
|  |  | 
|  | # Test a block-stream and a block-commit job in parallel | 
|  | # Here the stream job is supposed to finish quickly in order to reproduce | 
|  | # the scenario that triggers the bug fixed in 3d5d319e1221 and 1a63a907507 | 
|  | def test_stream_commit_1(self): | 
|  | self.assertLessEqual(8, self.num_imgs) | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | # Stream from node0 into node2 | 
|  | self.vm.cmd('block-stream', device='node2', base_node='node0', job_id='node2') | 
|  |  | 
|  | # Commit from the active layer into node3 | 
|  | self.vm.cmd('block-commit', device='drive0', base=self.imgs[3]) | 
|  |  | 
|  | # Wait for all jobs to be finished. | 
|  | pending_jobs = ['node2', 'drive0'] | 
|  | while len(pending_jobs) > 0: | 
|  | for event in self.vm.get_qmp_events(wait=True): | 
|  | if event['event'] == 'BLOCK_JOB_COMPLETED': | 
|  | node_name = self.dictpath(event, 'data/device') | 
|  | self.assertTrue(node_name in pending_jobs) | 
|  | self.assert_qmp_absent(event, 'data/error') | 
|  | pending_jobs.remove(node_name) | 
|  | if event['event'] == 'BLOCK_JOB_READY': | 
|  | self.assert_qmp(event, 'data/device', 'drive0') | 
|  | self.assert_qmp(event, 'data/type', 'commit') | 
|  | self.assert_qmp_absent(event, 'data/error') | 
|  | self.assertTrue('drive0' in pending_jobs) | 
|  | self.vm.qmp('block-job-complete', device='drive0') | 
|  |  | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | # This is similar to test_stream_commit_1 but both jobs are slowed | 
|  | # down so they can run in parallel for a little while. | 
|  | def test_stream_commit_2(self): | 
|  | self.assertLessEqual(8, self.num_imgs) | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | # Stream from node0 into node4 | 
|  | self.vm.cmd('block-stream', device='node4', base_node='node0', job_id='node4', speed=1024*1024) | 
|  |  | 
|  | # Commit from the active layer into node5 | 
|  | self.vm.cmd('block-commit', device='drive0', base=self.imgs[5], speed=1024*1024) | 
|  |  | 
|  | for job in ['drive0', 'node4']: | 
|  | self.vm.cmd('block-job-set-speed', device=job, speed=0) | 
|  |  | 
|  | # Wait for all jobs to be finished. | 
|  | pending_jobs = ['node4', 'drive0'] | 
|  | while len(pending_jobs) > 0: | 
|  | for event in self.vm.get_qmp_events(wait=True): | 
|  | if event['event'] == 'BLOCK_JOB_COMPLETED': | 
|  | node_name = self.dictpath(event, 'data/device') | 
|  | self.assertTrue(node_name in pending_jobs) | 
|  | self.assert_qmp_absent(event, 'data/error') | 
|  | pending_jobs.remove(node_name) | 
|  | if event['event'] == 'BLOCK_JOB_READY': | 
|  | self.assert_qmp(event, 'data/device', 'drive0') | 
|  | self.assert_qmp(event, 'data/type', 'commit') | 
|  | self.assert_qmp_absent(event, 'data/error') | 
|  | self.assertTrue('drive0' in pending_jobs) | 
|  | self.vm.qmp('block-job-complete', device='drive0') | 
|  |  | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | # Test the base_node parameter | 
|  | def test_stream_base_node_name(self): | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | self.assertNotEqual( | 
|  | qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[4]).stdout, | 
|  | qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[3]).stdout, | 
|  | 'image file map matches backing file before streaming') | 
|  |  | 
|  | # Error: the base node does not exist | 
|  | result = self.vm.qmp('block-stream', device='node4', base_node='none', job_id='stream') | 
|  | self.assert_qmp(result, 'error/desc', | 
|  | 'Cannot find device=\'\' nor node-name=\'none\'') | 
|  |  | 
|  | # Error: the base node is not a backing file of the top node | 
|  | result = self.vm.qmp('block-stream', device='node4', base_node='node6', job_id='stream') | 
|  | self.assert_qmp(result, 'error/desc', | 
|  | "Node 'node6' is not a backing image of 'node4'") | 
|  |  | 
|  | # Error: the base node is the same as the top node | 
|  | result = self.vm.qmp('block-stream', device='node4', base_node='node4', job_id='stream') | 
|  | self.assert_qmp(result, 'error/desc', | 
|  | "Node 'node4' is not a backing image of 'node4'") | 
|  |  | 
|  | # Error: cannot specify 'base' and 'base-node' at the same time | 
|  | result = self.vm.qmp('block-stream', device='node4', base=self.imgs[2], base_node='node2', job_id='stream') | 
|  | self.assert_qmp(result, 'error/desc', | 
|  | "'base' and 'base-node' cannot be specified at the same time") | 
|  |  | 
|  | # Success: the base node is a backing file of the top node | 
|  | self.vm.cmd('block-stream', device='node4', base_node='node2', job_id='stream') | 
|  |  | 
|  | self.wait_until_completed(drive='stream') | 
|  |  | 
|  | self.assert_no_active_block_jobs() | 
|  | self.vm.shutdown() | 
|  |  | 
|  | self.assertEqual( | 
|  | qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[4]).stdout, | 
|  | qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[3]).stdout, | 
|  | 'image file map matches backing file after streaming') | 
|  |  | 
|  | class TestQuorum(iotests.QMPTestCase): | 
|  | num_children = 3 | 
|  | children = [] | 
|  | backing = [] | 
|  |  | 
|  | @iotests.skip_if_unsupported(['quorum']) | 
|  | def setUp(self): | 
|  | opts = ['driver=quorum', 'vote-threshold=2'] | 
|  |  | 
|  | # Initialize file names and command-line options | 
|  | for i in range(self.num_children): | 
|  | child_img = os.path.join(iotests.test_dir, 'img-%d.img' % i) | 
|  | backing_img = os.path.join(iotests.test_dir, 'backing-%d.img' % i) | 
|  | self.children.append(child_img) | 
|  | self.backing.append(backing_img) | 
|  | qemu_img('create', '-f', iotests.imgfmt, backing_img, '1M') | 
|  | qemu_io('-f', iotests.imgfmt, | 
|  | '-c', 'write -P 0x55 0 1024', backing_img) | 
|  | qemu_img('create', '-f', iotests.imgfmt, | 
|  | '-o', 'backing_file=%s' % backing_img, | 
|  | '-F', iotests.imgfmt, child_img) | 
|  | opts.append("children.%d.file.filename=%s" % (i, child_img)) | 
|  | opts.append("children.%d.node-name=node%d" % (i, i)) | 
|  |  | 
|  | # Attach the drive to the VM | 
|  | self.vm = iotests.VM() | 
|  | self.vm.add_drive(path = None, opts = ','.join(opts)) | 
|  | self.vm.launch() | 
|  |  | 
|  | def tearDown(self): | 
|  | self.vm.shutdown() | 
|  | for img in self.children: | 
|  | os.remove(img) | 
|  | for img in self.backing: | 
|  | os.remove(img) | 
|  |  | 
|  | def test_stream_quorum(self): | 
|  | self.assertNotEqual( | 
|  | qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.children[0]).stdout, | 
|  | qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.backing[0]).stdout, | 
|  | 'image file map matches backing file before streaming') | 
|  |  | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | self.vm.cmd('block-stream', device='node0', job_id='stream-node0') | 
|  |  | 
|  | self.wait_until_completed(drive='stream-node0') | 
|  |  | 
|  | self.assert_no_active_block_jobs() | 
|  | self.vm.shutdown() | 
|  |  | 
|  | self.assertEqual( | 
|  | qemu_io('-f', iotests.imgfmt, '-c', 'map', self.children[0]).stdout, | 
|  | qemu_io('-f', iotests.imgfmt, '-c', 'map', self.backing[0]).stdout, | 
|  | 'image file map does not match backing file after streaming') | 
|  |  | 
|  | class TestSmallerBackingFile(iotests.QMPTestCase): | 
|  | backing_len = 1 * 1024 * 1024 # MB | 
|  | image_len = 2 * backing_len | 
|  |  | 
|  | def setUp(self): | 
|  | iotests.create_image(backing_img, self.backing_len) | 
|  | qemu_img('create', '-f', iotests.imgfmt, | 
|  | '-o', 'backing_file=%s' % backing_img, | 
|  | '-F', 'raw', test_img, str(self.image_len)) | 
|  | self.vm = iotests.VM().add_drive(test_img) | 
|  | self.vm.launch() | 
|  |  | 
|  | # If this hangs, then you are missing a fix to complete streaming when the | 
|  | # end of the backing file is reached. | 
|  | def test_stream(self): | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | self.vm.cmd('block-stream', device='drive0') | 
|  |  | 
|  | self.wait_until_completed() | 
|  |  | 
|  | self.assert_no_active_block_jobs() | 
|  | self.vm.shutdown() | 
|  |  | 
|  | class TestErrors(iotests.QMPTestCase): | 
|  | image_len = 2 * 1024 * 1024 # MB | 
|  |  | 
|  | # this should match STREAM_BUFFER_SIZE/512 in block/stream.c | 
|  | STREAM_BUFFER_SIZE = 512 * 1024 | 
|  |  | 
|  | def create_blkdebug_file(self, name, event, errno): | 
|  | file = open(name, 'w') | 
|  | file.write(''' | 
|  | [inject-error] | 
|  | state = "1" | 
|  | event = "%s" | 
|  | errno = "%d" | 
|  | immediately = "off" | 
|  | once = "on" | 
|  | sector = "%d" | 
|  |  | 
|  | [set-state] | 
|  | state = "1" | 
|  | event = "%s" | 
|  | new_state = "2" | 
|  |  | 
|  | [set-state] | 
|  | state = "2" | 
|  | event = "%s" | 
|  | new_state = "1" | 
|  | ''' % (event, errno, self.STREAM_BUFFER_SIZE // 512, event, event)) | 
|  | file.close() | 
|  |  | 
|  | class TestEIO(TestErrors): | 
|  | def setUp(self): | 
|  | self.blkdebug_file = backing_img + ".blkdebug" | 
|  | iotests.create_image(backing_img, TestErrors.image_len) | 
|  | self.create_blkdebug_file(self.blkdebug_file, "read_aio", 5) | 
|  | qemu_img('create', '-f', iotests.imgfmt, | 
|  | '-o', 'backing_file=blkdebug:%s:%s,backing_fmt=raw' | 
|  | % (self.blkdebug_file, backing_img), | 
|  | test_img) | 
|  | self.vm = iotests.VM().add_drive(test_img) | 
|  | self.vm.launch() | 
|  |  | 
|  | def tearDown(self): | 
|  | self.vm.shutdown() | 
|  | os.remove(test_img) | 
|  | os.remove(backing_img) | 
|  | os.remove(self.blkdebug_file) | 
|  |  | 
|  | def test_report(self): | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | self.vm.cmd('block-stream', device='drive0') | 
|  |  | 
|  | completed = False | 
|  | error = False | 
|  | while not completed: | 
|  | for event in self.vm.get_qmp_events(wait=True): | 
|  | if event['event'] == 'BLOCK_JOB_ERROR': | 
|  | self.assert_qmp(event, 'data/device', 'drive0') | 
|  | self.assert_qmp(event, 'data/operation', 'read') | 
|  | error = True | 
|  | elif event['event'] == 'BLOCK_JOB_COMPLETED': | 
|  | self.assertTrue(error, 'job completed unexpectedly') | 
|  | self.assert_qmp(event, 'data/type', 'stream') | 
|  | self.assert_qmp(event, 'data/device', 'drive0') | 
|  | self.assert_qmp(event, 'data/error', 'Input/output error') | 
|  | self.assert_qmp(event, 'data/offset', self.STREAM_BUFFER_SIZE) | 
|  | self.assert_qmp(event, 'data/len', self.image_len) | 
|  | completed = True | 
|  | elif event['event'] == 'JOB_STATUS_CHANGE': | 
|  | self.assert_qmp(event, 'data/id', 'drive0') | 
|  |  | 
|  | self.assert_no_active_block_jobs() | 
|  | self.vm.shutdown() | 
|  |  | 
|  | def test_ignore(self): | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | self.vm.cmd('block-stream', device='drive0', on_error='ignore') | 
|  |  | 
|  | error = False | 
|  | completed = False | 
|  | while not completed: | 
|  | for event in self.vm.get_qmp_events(wait=True): | 
|  | if event['event'] == 'BLOCK_JOB_ERROR': | 
|  | error = True | 
|  | self.assert_qmp(event, 'data/device', 'drive0') | 
|  | self.assert_qmp(event, 'data/operation', 'read') | 
|  | result = self.vm.qmp('query-block-jobs') | 
|  | if result == {'return': []}: | 
|  | # Job finished too quickly | 
|  | continue | 
|  | self.assertIn(result['return'][0]['status'], | 
|  | ['running', 'pending', 'aborting', 'concluded']) | 
|  | elif event['event'] == 'BLOCK_JOB_COMPLETED': | 
|  | self.assertTrue(error, 'job completed unexpectedly') | 
|  | self.assert_qmp(event, 'data/type', 'stream') | 
|  | self.assert_qmp(event, 'data/device', 'drive0') | 
|  | self.assert_qmp(event, 'data/error', 'Input/output error') | 
|  | self.assert_qmp(event, 'data/offset', self.image_len) | 
|  | self.assert_qmp(event, 'data/len', self.image_len) | 
|  | completed = True | 
|  | elif event['event'] == 'JOB_STATUS_CHANGE': | 
|  | self.assert_qmp(event, 'data/id', 'drive0') | 
|  |  | 
|  | self.assert_no_active_block_jobs() | 
|  | self.vm.shutdown() | 
|  |  | 
|  | def test_stop(self): | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | self.vm.cmd('block-stream', device='drive0', on_error='stop') | 
|  |  | 
|  | error = False | 
|  | completed = False | 
|  | while not completed: | 
|  | for event in self.vm.get_qmp_events(wait=True): | 
|  | if event['event'] == 'BLOCK_JOB_ERROR': | 
|  | error = True | 
|  | self.assert_qmp(event, 'data/device', 'drive0') | 
|  | self.assert_qmp(event, 'data/operation', 'read') | 
|  |  | 
|  | if self.vm.qmp('query-block-jobs')['return'][0]['status'] != 'paused': | 
|  | self.vm.events_wait([( | 
|  | 'JOB_STATUS_CHANGE', | 
|  | {'data': {'id': 'drive0', 'status': 'paused'}} | 
|  | )]) | 
|  |  | 
|  | result = self.vm.qmp('query-block-jobs') | 
|  | self.assert_qmp(result, 'return[0]/status', 'paused') | 
|  | self.assert_qmp(result, 'return[0]/offset', self.STREAM_BUFFER_SIZE) | 
|  | self.assert_qmp(result, 'return[0]/io-status', 'failed') | 
|  |  | 
|  | self.vm.cmd('block-job-resume', device='drive0') | 
|  |  | 
|  | result = self.vm.qmp('query-block-jobs') | 
|  | if result == {'return': []}: | 
|  | # Race; likely already finished. Check. | 
|  | continue | 
|  | self.assertIn(result['return'][0]['status'], | 
|  | ['running', 'pending', 'aborting', 'concluded']) | 
|  | self.assert_qmp(result, 'return[0]/io-status', 'ok') | 
|  | elif event['event'] == 'BLOCK_JOB_COMPLETED': | 
|  | self.assertTrue(error, 'job completed unexpectedly') | 
|  | self.assert_qmp(event, 'data/type', 'stream') | 
|  | self.assert_qmp(event, 'data/device', 'drive0') | 
|  | self.assert_qmp_absent(event, 'data/error') | 
|  | self.assert_qmp(event, 'data/offset', self.image_len) | 
|  | self.assert_qmp(event, 'data/len', self.image_len) | 
|  | completed = True | 
|  | elif event['event'] == 'JOB_STATUS_CHANGE': | 
|  | self.assert_qmp(event, 'data/id', 'drive0') | 
|  |  | 
|  | self.assert_no_active_block_jobs() | 
|  | self.vm.shutdown() | 
|  |  | 
|  | def test_enospc(self): | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | self.vm.cmd('block-stream', device='drive0', on_error='enospc') | 
|  |  | 
|  | completed = False | 
|  | error = False | 
|  | while not completed: | 
|  | for event in self.vm.get_qmp_events(wait=True): | 
|  | if event['event'] == 'BLOCK_JOB_ERROR': | 
|  | self.assert_qmp(event, 'data/device', 'drive0') | 
|  | self.assert_qmp(event, 'data/operation', 'read') | 
|  | error = True | 
|  | elif event['event'] == 'BLOCK_JOB_COMPLETED': | 
|  | self.assertTrue(error, 'job completed unexpectedly') | 
|  | self.assert_qmp(event, 'data/type', 'stream') | 
|  | self.assert_qmp(event, 'data/device', 'drive0') | 
|  | self.assert_qmp(event, 'data/error', 'Input/output error') | 
|  | self.assert_qmp(event, 'data/offset', self.STREAM_BUFFER_SIZE) | 
|  | self.assert_qmp(event, 'data/len', self.image_len) | 
|  | completed = True | 
|  | elif event['event'] == 'JOB_STATUS_CHANGE': | 
|  | self.assert_qmp(event, 'data/id', 'drive0') | 
|  |  | 
|  | self.assert_no_active_block_jobs() | 
|  | self.vm.shutdown() | 
|  |  | 
|  | class TestENOSPC(TestErrors): | 
|  | def setUp(self): | 
|  | self.blkdebug_file = backing_img + ".blkdebug" | 
|  | iotests.create_image(backing_img, TestErrors.image_len) | 
|  | self.create_blkdebug_file(self.blkdebug_file, "read_aio", 28) | 
|  | qemu_img('create', '-f', iotests.imgfmt, | 
|  | '-o', 'backing_file=blkdebug:%s:%s,backing_fmt=raw' | 
|  | % (self.blkdebug_file, backing_img), | 
|  | test_img) | 
|  | self.vm = iotests.VM().add_drive(test_img) | 
|  | self.vm.launch() | 
|  |  | 
|  | def tearDown(self): | 
|  | self.vm.shutdown() | 
|  | os.remove(test_img) | 
|  | os.remove(backing_img) | 
|  | os.remove(self.blkdebug_file) | 
|  |  | 
|  | def test_enospc(self): | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | self.vm.cmd('block-stream', device='drive0', on_error='enospc') | 
|  |  | 
|  | error = False | 
|  | completed = False | 
|  | while not completed: | 
|  | for event in self.vm.get_qmp_events(wait=True): | 
|  | if event['event'] == 'BLOCK_JOB_ERROR': | 
|  | self.assert_qmp(event, 'data/device', 'drive0') | 
|  | self.assert_qmp(event, 'data/operation', 'read') | 
|  | error = True | 
|  |  | 
|  | if self.vm.qmp('query-block-jobs')['return'][0]['status'] != 'paused': | 
|  | self.vm.events_wait([( | 
|  | 'JOB_STATUS_CHANGE', | 
|  | {'data': {'id': 'drive0', 'status': 'paused'}} | 
|  | )]) | 
|  |  | 
|  | result = self.vm.qmp('query-block-jobs') | 
|  | self.assert_qmp(result, 'return[0]/status', 'paused') | 
|  | self.assert_qmp(result, 'return[0]/offset', self.STREAM_BUFFER_SIZE) | 
|  | self.assert_qmp(result, 'return[0]/io-status', 'nospace') | 
|  |  | 
|  | self.vm.cmd('block-job-resume', device='drive0') | 
|  |  | 
|  | result = self.vm.qmp('query-block-jobs') | 
|  | if result == {'return': []}: | 
|  | # Race; likely already finished. Check. | 
|  | continue | 
|  | self.assertIn(result['return'][0]['status'], | 
|  | ['running', 'pending', 'aborting', 'concluded']) | 
|  | self.assert_qmp(result, 'return[0]/io-status', 'ok') | 
|  | elif event['event'] == 'BLOCK_JOB_COMPLETED': | 
|  | self.assertTrue(error, 'job completed unexpectedly') | 
|  | self.assert_qmp(event, 'data/type', 'stream') | 
|  | self.assert_qmp(event, 'data/device', 'drive0') | 
|  | self.assert_qmp_absent(event, 'data/error') | 
|  | self.assert_qmp(event, 'data/offset', self.image_len) | 
|  | self.assert_qmp(event, 'data/len', self.image_len) | 
|  | completed = True | 
|  | elif event['event'] == 'JOB_STATUS_CHANGE': | 
|  | self.assert_qmp(event, 'data/id', 'drive0') | 
|  |  | 
|  | self.assert_no_active_block_jobs() | 
|  | self.vm.shutdown() | 
|  |  | 
|  | class TestStreamStop(iotests.QMPTestCase): | 
|  | image_len = 8 * 1024 * 1024 * 1024 # GB | 
|  |  | 
|  | def setUp(self): | 
|  | qemu_img('create', backing_img, str(TestStreamStop.image_len)) | 
|  | qemu_io('-f', 'raw', '-c', 'write -P 0x1 0 32M', backing_img) | 
|  | qemu_img('create', '-f', iotests.imgfmt, | 
|  | '-o', 'backing_file=%s' % backing_img, | 
|  | '-F', 'raw', test_img) | 
|  | qemu_io('-f', iotests.imgfmt, '-c', 'write -P 0x1 32M 32M', test_img) | 
|  | self.vm = iotests.VM().add_drive("blkdebug::" + test_img) | 
|  | self.vm.launch() | 
|  |  | 
|  | def tearDown(self): | 
|  | self.vm.shutdown() | 
|  | os.remove(test_img) | 
|  | os.remove(backing_img) | 
|  |  | 
|  | def test_stream_stop(self): | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | self.vm.pause_drive('drive0') | 
|  | self.vm.cmd('block-stream', device='drive0') | 
|  |  | 
|  | time.sleep(0.1) | 
|  | events = self.vm.get_qmp_events(wait=False) | 
|  | for e in events: | 
|  | self.assert_qmp(e, 'event', 'JOB_STATUS_CHANGE') | 
|  | self.assert_qmp(e, 'data/id', 'drive0') | 
|  |  | 
|  | self.cancel_and_wait(resume=True) | 
|  |  | 
|  | class TestSetSpeed(iotests.QMPTestCase): | 
|  | image_len = 80 * 1024 * 1024 # MB | 
|  |  | 
|  | def setUp(self): | 
|  | qemu_img('create', backing_img, str(TestSetSpeed.image_len)) | 
|  | qemu_io('-f', 'raw', '-c', 'write -P 0x1 0 32M', backing_img) | 
|  | qemu_img('create', '-f', iotests.imgfmt, | 
|  | '-o', 'backing_file=%s' % backing_img, | 
|  | '-F', 'raw', test_img) | 
|  | qemu_io('-f', iotests.imgfmt, '-c', 'write -P 0x1 32M 32M', test_img) | 
|  | self.vm = iotests.VM().add_drive('blkdebug::' + test_img) | 
|  | self.vm.launch() | 
|  |  | 
|  | def tearDown(self): | 
|  | self.vm.shutdown() | 
|  | os.remove(test_img) | 
|  | os.remove(backing_img) | 
|  |  | 
|  | # This is a short performance test which is not run by default. | 
|  | # Invoke "IMGFMT=qed ./030 TestSetSpeed.perf_test_throughput" | 
|  | def perf_test_throughput(self): | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | self.vm.cmd('block-stream', device='drive0') | 
|  |  | 
|  | self.vm.cmd('block-job-set-speed', device='drive0', speed=8 * 1024 * 1024) | 
|  |  | 
|  | self.wait_until_completed() | 
|  |  | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | def test_set_speed(self): | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | self.vm.pause_drive('drive0') | 
|  | self.vm.cmd('block-stream', device='drive0') | 
|  |  | 
|  | # Default speed is 0 | 
|  | result = self.vm.qmp('query-block-jobs') | 
|  | self.assert_qmp(result, 'return[0]/device', 'drive0') | 
|  | self.assert_qmp(result, 'return[0]/speed', 0) | 
|  |  | 
|  | self.vm.cmd('block-job-set-speed', device='drive0', speed=8 * 1024 * 1024) | 
|  |  | 
|  | # Ensure the speed we set was accepted | 
|  | result = self.vm.qmp('query-block-jobs') | 
|  | self.assert_qmp(result, 'return[0]/device', 'drive0') | 
|  | self.assert_qmp(result, 'return[0]/speed', 8 * 1024 * 1024) | 
|  |  | 
|  | self.cancel_and_wait(resume=True) | 
|  | self.vm.pause_drive('drive0') | 
|  |  | 
|  | # Check setting speed in block-stream works | 
|  | self.vm.cmd('block-stream', device='drive0', speed=4 * 1024 * 1024) | 
|  |  | 
|  | result = self.vm.qmp('query-block-jobs') | 
|  | self.assert_qmp(result, 'return[0]/device', 'drive0') | 
|  | self.assert_qmp(result, 'return[0]/speed', 4 * 1024 * 1024) | 
|  |  | 
|  | self.cancel_and_wait(resume=True) | 
|  |  | 
|  | def test_set_speed_invalid(self): | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | result = self.vm.qmp('block-stream', device='drive0', speed=-1) | 
|  | self.assert_qmp(result, 'error/desc', "Parameter 'speed' expects a non-negative value") | 
|  |  | 
|  | self.assert_no_active_block_jobs() | 
|  |  | 
|  | self.vm.pause_drive('drive0') | 
|  | self.vm.cmd('block-stream', device='drive0') | 
|  |  | 
|  | result = self.vm.qmp('block-job-set-speed', device='drive0', speed=-1) | 
|  | self.assert_qmp(result, 'error/desc', "Parameter 'speed' expects a non-negative value") | 
|  |  | 
|  | self.cancel_and_wait(resume=True) | 
|  |  | 
|  | if __name__ == '__main__': | 
|  | iotests.main(supported_fmts=['qcow2', 'qed'], | 
|  | supported_protocols=['file']) |