| #!/usr/bin/env python3 |
| # group: rw |
| # |
| # Tests for active mirroring |
| # |
| # Copyright (C) 2018 Red Hat, Inc. |
| # |
| # This program is free software; you can redistribute it and/or modify |
| # it under the terms of the GNU General Public License as published by |
| # the Free Software Foundation; either version 2 of the License, or |
| # (at your option) any later version. |
| # |
| # This program is distributed in the hope that it will be useful, |
| # but WITHOUT ANY WARRANTY; without even the implied warranty of |
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| # GNU General Public License for more details. |
| # |
| # You should have received a copy of the GNU General Public License |
| # along with this program. If not, see <http://www.gnu.org/licenses/>. |
| # |
| |
| import math |
| import os |
| import subprocess |
| import time |
| from typing import List, Optional |
| import iotests |
| from iotests import qemu_img |
| |
| source_img = os.path.join(iotests.test_dir, 'source.' + iotests.imgfmt) |
| target_img = os.path.join(iotests.test_dir, 'target.' + iotests.imgfmt) |
| |
| class TestActiveMirror(iotests.QMPTestCase): |
| image_len = 128 * 1024 * 1024 # MB |
| potential_writes_in_flight = True |
| |
| def setUp(self): |
| qemu_img('create', '-f', iotests.imgfmt, source_img, '128M') |
| qemu_img('create', '-f', iotests.imgfmt, target_img, '128M') |
| |
| blk_source = {'id': 'source', |
| 'if': 'none', |
| 'node-name': 'source-node', |
| 'driver': iotests.imgfmt, |
| 'file': {'driver': 'blkdebug', |
| 'image': {'driver': 'file', |
| 'filename': source_img}}} |
| |
| blk_target = {'node-name': 'target-node', |
| 'driver': iotests.imgfmt, |
| 'file': {'driver': 'file', |
| 'filename': target_img}} |
| |
| self.vm = iotests.VM() |
| self.vm.add_drive_raw(self.vm.qmp_to_opts(blk_source)) |
| self.vm.add_blockdev(self.vm.qmp_to_opts(blk_target)) |
| self.vm.add_device('virtio-blk,id=vblk,drive=source') |
| self.vm.launch() |
| |
| def tearDown(self): |
| self.vm.shutdown() |
| |
| if not self.potential_writes_in_flight: |
| self.assertTrue(iotests.compare_images(source_img, target_img), |
| 'mirror target does not match source') |
| |
| os.remove(source_img) |
| os.remove(target_img) |
| |
| def doActiveIO(self, sync_source_and_target): |
| # Fill the source image |
| self.vm.hmp_qemu_io('source', |
| 'write -P 1 0 %i' % self.image_len); |
| |
| # Start some background requests |
| for offset in range(1 * self.image_len // 8, 3 * self.image_len // 8, 1024 * 1024): |
| self.vm.hmp_qemu_io('source', 'aio_write -P 2 %i 1M' % offset) |
| for offset in range(2 * self.image_len // 8, 3 * self.image_len // 8, 1024 * 1024): |
| self.vm.hmp_qemu_io('source', 'aio_write -z %i 1M' % offset) |
| |
| # Start the block job |
| result = self.vm.qmp('blockdev-mirror', |
| job_id='mirror', |
| filter_node_name='mirror-node', |
| device='source-node', |
| target='target-node', |
| sync='full', |
| copy_mode='write-blocking') |
| self.assert_qmp(result, 'return', {}) |
| |
| # Start some more requests |
| for offset in range(3 * self.image_len // 8, 5 * self.image_len // 8, 1024 * 1024): |
| self.vm.hmp_qemu_io('source', 'aio_write -P 3 %i 1M' % offset) |
| for offset in range(4 * self.image_len // 8, 5 * self.image_len // 8, 1024 * 1024): |
| self.vm.hmp_qemu_io('source', 'aio_write -z %i 1M' % offset) |
| |
| # Wait for the READY event |
| self.wait_ready(drive='mirror') |
| |
| # Now start some final requests; all of these (which land on |
| # the source) should be settled using the active mechanism. |
| # The mirror code itself asserts that the source BDS's dirty |
| # bitmap will stay clean between READY and COMPLETED. |
| for offset in range(5 * self.image_len // 8, 7 * self.image_len // 8, 1024 * 1024): |
| self.vm.hmp_qemu_io('source', 'aio_write -P 3 %i 1M' % offset) |
| for offset in range(6 * self.image_len // 8, 7 * self.image_len // 8, 1024 * 1024): |
| self.vm.hmp_qemu_io('source', 'aio_write -z %i 1M' % offset) |
| |
| if sync_source_and_target: |
| # If source and target should be in sync after the mirror, |
| # we have to flush before completion |
| self.vm.hmp_qemu_io('source', 'aio_flush') |
| self.potential_writes_in_flight = False |
| |
| self.complete_and_wait(drive='mirror', wait_ready=False) |
| |
| def testActiveIO(self): |
| self.doActiveIO(False) |
| |
| def testActiveIOFlushed(self): |
| self.doActiveIO(True) |
| |
| def testUnalignedActiveIO(self): |
| # Fill the source image |
| result = self.vm.hmp_qemu_io('source', 'write -P 1 0 2M') |
| |
| # Start the block job (very slowly) |
| result = self.vm.qmp('blockdev-mirror', |
| job_id='mirror', |
| filter_node_name='mirror-node', |
| device='source-node', |
| target='target-node', |
| sync='full', |
| copy_mode='write-blocking', |
| buf_size=(1048576 // 4), |
| speed=1) |
| self.assert_qmp(result, 'return', {}) |
| |
| # Start an unaligned request to a dirty area |
| result = self.vm.hmp_qemu_io('source', 'write -P 2 %i 1' % (1048576 + 42)) |
| |
| # Let the job finish |
| result = self.vm.qmp('block-job-set-speed', device='mirror', speed=0) |
| self.assert_qmp(result, 'return', {}) |
| self.complete_and_wait(drive='mirror') |
| |
| self.potential_writes_in_flight = False |
| |
| def testIntersectingActiveIO(self): |
| # Fill the source image |
| result = self.vm.hmp_qemu_io('source', 'write -P 1 0 2M') |
| |
| # Start the block job (very slowly) |
| result = self.vm.qmp('blockdev-mirror', |
| job_id='mirror', |
| filter_node_name='mirror-node', |
| device='source-node', |
| target='target-node', |
| sync='full', |
| copy_mode='write-blocking', |
| speed=1) |
| |
| self.vm.hmp_qemu_io('source', 'break write_aio A') |
| self.vm.hmp_qemu_io('source', 'aio_write 0 1M') # 1 |
| self.vm.hmp_qemu_io('source', 'wait_break A') |
| self.vm.hmp_qemu_io('source', 'aio_write 0 2M') # 2 |
| self.vm.hmp_qemu_io('source', 'aio_write 0 2M') # 3 |
| |
| # Now 2 and 3 are in mirror_wait_on_conflicts, waiting for 1 |
| |
| self.vm.hmp_qemu_io('source', 'break write_aio B') |
| self.vm.hmp_qemu_io('source', 'aio_write 1M 2M') # 4 |
| self.vm.hmp_qemu_io('source', 'wait_break B') |
| |
| # 4 doesn't wait for 2 and 3, because they didn't yet set |
| # in_flight_bitmap. So, nothing prevents 4 to go except for our |
| # break-point B. |
| |
| self.vm.hmp_qemu_io('source', 'resume A') |
| |
| # Now we resumed 1, so 2 and 3 goes to the next iteration of while loop |
| # in mirror_wait_on_conflicts(). They don't exit, as bitmap is dirty |
| # due to request 4. |
| # In the past at that point 2 and 3 would wait for each other producing |
| # a dead-lock. Now this is fixed and they will wait for request 4. |
| |
| self.vm.hmp_qemu_io('source', 'resume B') |
| |
| # After resuming 4, one of 2 and 3 goes first and set in_flight_bitmap, |
| # so the other will wait for it. |
| |
| result = self.vm.qmp('block-job-set-speed', device='mirror', speed=0) |
| self.assert_qmp(result, 'return', {}) |
| self.complete_and_wait(drive='mirror') |
| |
| self.potential_writes_in_flight = False |
| |
| |
| class TestThrottledWithNbdExportBase(iotests.QMPTestCase): |
| image_len = 128 * 1024 * 1024 # MB |
| iops: Optional[int] = None |
| background_processes: List['subprocess.Popen[str]'] = [] |
| |
| def setUp(self): |
| # Must be set by subclasses |
| self.assertIsNotNone(self.iops) |
| |
| qemu_img('create', '-f', iotests.imgfmt, source_img, '128M') |
| qemu_img('create', '-f', iotests.imgfmt, target_img, '128M') |
| |
| self.vm = iotests.VM() |
| self.vm.launch() |
| |
| result = self.vm.qmp('object-add', **{ |
| 'qom-type': 'throttle-group', |
| 'id': 'thrgr', |
| 'limits': { |
| 'iops-total': self.iops, |
| 'iops-total-max': self.iops |
| } |
| }) |
| self.assert_qmp(result, 'return', {}) |
| |
| result = self.vm.qmp('blockdev-add', **{ |
| 'node-name': 'source-node', |
| 'driver': 'throttle', |
| 'throttle-group': 'thrgr', |
| 'file': { |
| 'driver': iotests.imgfmt, |
| 'file': { |
| 'driver': 'file', |
| 'filename': source_img |
| } |
| } |
| }) |
| self.assert_qmp(result, 'return', {}) |
| |
| result = self.vm.qmp('blockdev-add', **{ |
| 'node-name': 'target-node', |
| 'driver': iotests.imgfmt, |
| 'file': { |
| 'driver': 'file', |
| 'filename': target_img |
| } |
| }) |
| self.assert_qmp(result, 'return', {}) |
| |
| self.nbd_sock = iotests.file_path('nbd.sock', |
| base_dir=iotests.sock_dir) |
| self.nbd_url = f'nbd+unix:///source-node?socket={self.nbd_sock}' |
| |
| result = self.vm.qmp('nbd-server-start', addr={ |
| 'type': 'unix', |
| 'data': { |
| 'path': self.nbd_sock |
| } |
| }) |
| self.assert_qmp(result, 'return', {}) |
| |
| result = self.vm.qmp('block-export-add', id='exp0', type='nbd', |
| node_name='source-node', writable=True) |
| self.assert_qmp(result, 'return', {}) |
| |
| def tearDown(self): |
| # Wait for background requests to settle |
| try: |
| while True: |
| p = self.background_processes.pop() |
| while True: |
| try: |
| p.wait(timeout=0.0) |
| break |
| except subprocess.TimeoutExpired: |
| self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}') |
| except IndexError: |
| pass |
| |
| # Cancel ongoing block jobs |
| for job in self.vm.qmp('query-jobs')['return']: |
| self.vm.qmp('block-job-cancel', device=job['id'], force=True) |
| |
| while True: |
| self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}') |
| if len(self.vm.qmp('query-jobs')['return']) == 0: |
| break |
| |
| self.vm.shutdown() |
| os.remove(source_img) |
| os.remove(target_img) |
| |
| |
| class TestLowThrottledWithNbdExport(TestThrottledWithNbdExportBase): |
| iops = 16 |
| |
| def testUnderLoad(self): |
| ''' |
| Throttle the source node, then issue a whole bunch of external requests |
| while the mirror job (in write-blocking mode) is running. We want to |
| see background requests being issued even while the source is under |
| full load by active writes, so that progress can be made towards READY. |
| ''' |
| |
| # Fill the first half of the source image; do not fill the second half, |
| # that is where we will have active requests occur. This ensures that |
| # active mirroring itself will not directly contribute to the job's |
| # progress (because when the job was started, those areas were not |
| # intended to be copied, so active mirroring will only lead to not |
| # losing progress, but also not making any). |
| self.vm.hmp_qemu_io('source-node', |
| f'aio_write -P 1 0 {self.image_len // 2}') |
| self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}') |
| |
| # Launch the mirror job |
| mirror_buf_size = 65536 |
| result = self.vm.qmp('blockdev-mirror', |
| job_id='mirror', |
| filter_node_name='mirror-node', |
| device='source-node', |
| target='target-node', |
| sync='full', |
| copy_mode='write-blocking', |
| buf_size=mirror_buf_size) |
| self.assert_qmp(result, 'return', {}) |
| |
| # We create the external requests via qemu-io processes on the NBD |
| # server. Have their offset start in the middle of the image so they |
| # do not overlap with the background requests (which start from the |
| # beginning). |
| active_request_offset = self.image_len // 2 |
| active_request_len = 4096 |
| |
| # Create enough requests to saturate the node for 5 seconds |
| for _ in range(0, 5 * self.iops): |
| req = f'write -P 42 {active_request_offset} {active_request_len}' |
| active_request_offset += active_request_len |
| p = iotests.qemu_io_popen('-f', 'nbd', self.nbd_url, '-c', req) |
| self.background_processes += [p] |
| |
| # Now advance the clock one I/O operation at a time by the 4 seconds |
| # (i.e. one less than 5). We expect the mirror job to issue background |
| # operations here, even though active requests are still in flight. |
| # The active requests will take precedence, however, because they have |
| # been issued earlier than mirror's background requests. |
| # Once the active requests we have started above are done (i.e. after 5 |
| # virtual seconds), we expect those background requests to be worked |
| # on. We only advance 4 seconds here to avoid race conditions. |
| for _ in range(0, 4 * self.iops): |
| step = math.ceil(1 * 1000 * 1000 * 1000 / self.iops) |
| self.vm.qtest(f'clock_step {step}') |
| |
| # Note how much remains to be done until the mirror job is finished |
| job_status = self.vm.qmp('query-jobs')['return'][0] |
| start_remaining = job_status['total-progress'] - \ |
| job_status['current-progress'] |
| |
| # Create a whole bunch of more active requests |
| for _ in range(0, 10 * self.iops): |
| req = f'write -P 42 {active_request_offset} {active_request_len}' |
| active_request_offset += active_request_len |
| p = iotests.qemu_io_popen('-f', 'nbd', self.nbd_url, '-c', req) |
| self.background_processes += [p] |
| |
| # Let the clock advance more. After 1 second, as noted above, we |
| # expect the background requests to be worked on. Give them a couple |
| # of seconds (specifically 4) to see their impact. |
| for _ in range(0, 5 * self.iops): |
| step = math.ceil(1 * 1000 * 1000 * 1000 / self.iops) |
| self.vm.qtest(f'clock_step {step}') |
| |
| # Note how much remains to be done now. We expect this number to be |
| # reduced thanks to those background requests. |
| job_status = self.vm.qmp('query-jobs')['return'][0] |
| end_remaining = job_status['total-progress'] - \ |
| job_status['current-progress'] |
| |
| # See that indeed progress was being made on the job, even while the |
| # node was saturated with active requests |
| self.assertGreater(start_remaining - end_remaining, 0) |
| |
| |
| class TestHighThrottledWithNbdExport(TestThrottledWithNbdExportBase): |
| iops = 1024 |
| |
| def testActiveOnCreation(self): |
| ''' |
| Issue requests on the mirror source node right as the mirror is |
| instated. It's possible that requests occur before the actual job is |
| created, but after the node has been put into the graph. Write |
| requests across the node must in that case be forwarded to the source |
| node without attempting to mirror them (there is no job object yet, so |
| attempting to access it would cause a segfault). |
| We do this with a lightly throttled node (i.e. quite high IOPS limit). |
| Using throttling seems to increase reproductivity, but if the limit is |
| too low, all requests allowed per second will be submitted before |
| mirror_start_job() gets to the problematic point. |
| ''' |
| |
| # Let qemu-img bench create write requests (enough for two seconds on |
| # the virtual clock) |
| bench_args = ['bench', '-w', '-d', '1024', '-f', 'nbd', |
| '-c', str(self.iops * 2), self.nbd_url] |
| p = iotests.qemu_tool_popen(iotests.qemu_img_args + bench_args) |
| self.background_processes += [p] |
| |
| # Give qemu-img bench time to start up and issue requests |
| time.sleep(1.0) |
| # Flush the request queue, so new requests can come in right as we |
| # start blockdev-mirror |
| self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}') |
| |
| result = self.vm.qmp('blockdev-mirror', |
| job_id='mirror', |
| device='source-node', |
| target='target-node', |
| sync='full', |
| copy_mode='write-blocking') |
| self.assert_qmp(result, 'return', {}) |
| |
| |
| if __name__ == '__main__': |
| iotests.main(supported_fmts=['qcow2', 'raw'], |
| supported_protocols=['file']) |