| #!/usr/bin/env python |
| # |
| # Tests for incremental drive-backup |
| # |
| # Copyright (C) 2015 John Snow for Red Hat, Inc. |
| # |
| # Based on 056. |
| # |
| # This program is free software; you can redistribute it and/or modify |
| # it under the terms of the GNU General Public License as published by |
| # the Free Software Foundation; either version 2 of the License, or |
| # (at your option) any later version. |
| # |
| # This program is distributed in the hope that it will be useful, |
| # but WITHOUT ANY WARRANTY; without even the implied warranty of |
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| # GNU General Public License for more details. |
| # |
| # You should have received a copy of the GNU General Public License |
| # along with this program. If not, see <http://www.gnu.org/licenses/>. |
| # |
| |
| import os |
| import iotests |
| |
| |
| def io_write_patterns(img, patterns): |
| for pattern in patterns: |
| iotests.qemu_io('-c', 'write -P%s %s %s' % pattern, img) |
| |
| |
| def try_remove(img): |
| try: |
| os.remove(img) |
| except OSError: |
| pass |
| |
| |
| def transaction_action(action, **kwargs): |
| return { |
| 'type': action, |
| 'data': dict((k.replace('_', '-'), v) for k, v in kwargs.items()) |
| } |
| |
| |
| def transaction_bitmap_clear(node, name, **kwargs): |
| return transaction_action('block-dirty-bitmap-clear', |
| node=node, name=name, **kwargs) |
| |
| |
| def transaction_drive_backup(device, target, **kwargs): |
| return transaction_action('drive-backup', job_id=device, device=device, |
| target=target, **kwargs) |
| |
| |
| class Bitmap: |
| def __init__(self, name, drive): |
| self.name = name |
| self.drive = drive |
| self.num = 0 |
| self.backups = list() |
| |
| def base_target(self): |
| return (self.drive['backup'], None) |
| |
| def new_target(self, num=None): |
| if num is None: |
| num = self.num |
| self.num = num + 1 |
| base = os.path.join(iotests.test_dir, |
| "%s.%s." % (self.drive['id'], self.name)) |
| suff = "%i.%s" % (num, self.drive['fmt']) |
| target = base + "inc" + suff |
| reference = base + "ref" + suff |
| self.backups.append((target, reference)) |
| return (target, reference) |
| |
| def last_target(self): |
| if self.backups: |
| return self.backups[-1] |
| return self.base_target() |
| |
| def del_target(self): |
| for image in self.backups.pop(): |
| try_remove(image) |
| self.num -= 1 |
| |
| def cleanup(self): |
| for backup in self.backups: |
| for image in backup: |
| try_remove(image) |
| |
| |
| class TestIncrementalBackupBase(iotests.QMPTestCase): |
| def __init__(self, *args): |
| super(TestIncrementalBackupBase, self).__init__(*args) |
| self.bitmaps = list() |
| self.files = list() |
| self.drives = list() |
| self.vm = iotests.VM() |
| self.err_img = os.path.join(iotests.test_dir, 'err.%s' % iotests.imgfmt) |
| |
| |
| def setUp(self): |
| # Create a base image with a distinctive patterning |
| drive0 = self.add_node('drive0') |
| self.img_create(drive0['file'], drive0['fmt']) |
| self.vm.add_drive(drive0['file'], opts='node-name=node0') |
| self.write_default_pattern(drive0['file']) |
| self.vm.launch() |
| |
| |
| def write_default_pattern(self, target): |
| io_write_patterns(target, (('0x41', 0, 512), |
| ('0xd5', '1M', '32k'), |
| ('0xdc', '32M', '124k'))) |
| |
| |
| def add_node(self, node_id, fmt=iotests.imgfmt, path=None, backup=None): |
| if path is None: |
| path = os.path.join(iotests.test_dir, '%s.%s' % (node_id, fmt)) |
| if backup is None: |
| backup = os.path.join(iotests.test_dir, |
| '%s.full.backup.%s' % (node_id, fmt)) |
| |
| self.drives.append({ |
| 'id': node_id, |
| 'file': path, |
| 'backup': backup, |
| 'fmt': fmt }) |
| return self.drives[-1] |
| |
| |
| def img_create(self, img, fmt=iotests.imgfmt, size='64M', |
| parent=None, parentFormat=None, **kwargs): |
| optargs = [] |
| for k,v in kwargs.items(): |
| optargs = optargs + ['-o', '%s=%s' % (k,v)] |
| args = ['create', '-f', fmt] + optargs + [img, size] |
| if parent: |
| if parentFormat is None: |
| parentFormat = fmt |
| args = args + ['-b', parent, '-F', parentFormat] |
| iotests.qemu_img(*args) |
| self.files.append(img) |
| |
| |
| def do_qmp_backup(self, error='Input/output error', **kwargs): |
| res = self.vm.qmp('drive-backup', **kwargs) |
| self.assert_qmp(res, 'return', {}) |
| return self.wait_qmp_backup(kwargs['device'], error) |
| |
| |
| def ignore_job_status_change_events(self): |
| while True: |
| e = self.vm.event_wait(name="JOB_STATUS_CHANGE") |
| if e['data']['status'] == 'null': |
| break |
| |
| def wait_qmp_backup(self, device, error='Input/output error'): |
| event = self.vm.event_wait(name="BLOCK_JOB_COMPLETED", |
| match={'data': {'device': device}}) |
| self.assertNotEqual(event, None) |
| self.ignore_job_status_change_events() |
| |
| try: |
| failure = self.dictpath(event, 'data/error') |
| except AssertionError: |
| # Backup succeeded. |
| self.assert_qmp(event, 'data/offset', event['data']['len']) |
| return True |
| else: |
| # Backup failed. |
| self.assert_qmp(event, 'data/error', error) |
| return False |
| |
| |
| def wait_qmp_backup_cancelled(self, device): |
| event = self.vm.event_wait(name='BLOCK_JOB_CANCELLED', |
| match={'data': {'device': device}}) |
| self.assertNotEqual(event, None) |
| self.ignore_job_status_change_events() |
| |
| |
| def create_anchor_backup(self, drive=None): |
| if drive is None: |
| drive = self.drives[-1] |
| res = self.do_qmp_backup(job_id=drive['id'], |
| device=drive['id'], sync='full', |
| format=drive['fmt'], target=drive['backup']) |
| self.assertTrue(res) |
| self.files.append(drive['backup']) |
| return drive['backup'] |
| |
| |
| def make_reference_backup(self, bitmap=None): |
| if bitmap is None: |
| bitmap = self.bitmaps[-1] |
| _, reference = bitmap.last_target() |
| res = self.do_qmp_backup(job_id=bitmap.drive['id'], |
| device=bitmap.drive['id'], sync='full', |
| format=bitmap.drive['fmt'], target=reference) |
| self.assertTrue(res) |
| |
| |
| def add_bitmap(self, name, drive, **kwargs): |
| bitmap = Bitmap(name, drive) |
| self.bitmaps.append(bitmap) |
| result = self.vm.qmp('block-dirty-bitmap-add', node=drive['id'], |
| name=bitmap.name, **kwargs) |
| self.assert_qmp(result, 'return', {}) |
| return bitmap |
| |
| |
| def prepare_backup(self, bitmap=None, parent=None, **kwargs): |
| if bitmap is None: |
| bitmap = self.bitmaps[-1] |
| if parent is None: |
| parent, _ = bitmap.last_target() |
| |
| target, _ = bitmap.new_target() |
| self.img_create(target, bitmap.drive['fmt'], parent=parent, |
| **kwargs) |
| return target |
| |
| |
| def create_incremental(self, bitmap=None, parent=None, |
| parentFormat=None, validate=True, |
| target=None): |
| if bitmap is None: |
| bitmap = self.bitmaps[-1] |
| if parent is None: |
| parent, _ = bitmap.last_target() |
| |
| if target is None: |
| target = self.prepare_backup(bitmap, parent) |
| res = self.do_qmp_backup(job_id=bitmap.drive['id'], |
| device=bitmap.drive['id'], |
| sync='incremental', bitmap=bitmap.name, |
| format=bitmap.drive['fmt'], target=target, |
| mode='existing') |
| if not res: |
| bitmap.del_target(); |
| self.assertFalse(validate) |
| else: |
| self.make_reference_backup(bitmap) |
| return res |
| |
| |
| def check_backups(self): |
| for bitmap in self.bitmaps: |
| for incremental, reference in bitmap.backups: |
| self.assertTrue(iotests.compare_images(incremental, reference)) |
| last = bitmap.last_target()[0] |
| self.assertTrue(iotests.compare_images(last, bitmap.drive['file'])) |
| |
| |
| def hmp_io_writes(self, drive, patterns): |
| for pattern in patterns: |
| self.vm.hmp_qemu_io(drive, 'write -P%s %s %s' % pattern) |
| self.vm.hmp_qemu_io(drive, 'flush') |
| |
| |
| def do_incremental_simple(self, **kwargs): |
| self.create_anchor_backup() |
| self.add_bitmap('bitmap0', self.drives[0], **kwargs) |
| |
| # Sanity: Create a "hollow" incremental backup |
| self.create_incremental() |
| # Three writes: One complete overwrite, one new segment, |
| # and one partial overlap. |
| self.hmp_io_writes(self.drives[0]['id'], (('0xab', 0, 512), |
| ('0xfe', '16M', '256k'), |
| ('0x64', '32736k', '64k'))) |
| self.create_incremental() |
| # Three more writes, one of each kind, like above |
| self.hmp_io_writes(self.drives[0]['id'], (('0x9a', 0, 512), |
| ('0x55', '8M', '352k'), |
| ('0x78', '15872k', '1M'))) |
| self.create_incremental() |
| self.vm.shutdown() |
| self.check_backups() |
| |
| |
| def tearDown(self): |
| self.vm.shutdown() |
| for bitmap in self.bitmaps: |
| bitmap.cleanup() |
| for filename in self.files: |
| try_remove(filename) |
| |
| |
| |
| class TestIncrementalBackup(TestIncrementalBackupBase): |
| def test_incremental_simple(self): |
| ''' |
| Test: Create and verify three incremental backups. |
| |
| Create a bitmap and a full backup before VM execution begins, |
| then create a series of three incremental backups "during execution," |
| i.e.; after IO requests begin modifying the drive. |
| ''' |
| return self.do_incremental_simple() |
| |
| |
| def test_small_granularity(self): |
| ''' |
| Test: Create and verify backups made with a small granularity bitmap. |
| |
| Perform the same test as test_incremental_simple, but with a granularity |
| of only 32KiB instead of the present default of 64KiB. |
| ''' |
| return self.do_incremental_simple(granularity=32768) |
| |
| |
| def test_large_granularity(self): |
| ''' |
| Test: Create and verify backups made with a large granularity bitmap. |
| |
| Perform the same test as test_incremental_simple, but with a granularity |
| of 128KiB instead of the present default of 64KiB. |
| ''' |
| return self.do_incremental_simple(granularity=131072) |
| |
| |
| def test_larger_cluster_target(self): |
| ''' |
| Test: Create and verify backups made to a larger cluster size target. |
| |
| With a default granularity of 64KiB, verify that backups made to a |
| larger cluster size target of 128KiB without a backing file works. |
| ''' |
| drive0 = self.drives[0] |
| |
| # Create a cluster_size=128k full backup / "anchor" backup |
| self.img_create(drive0['backup'], cluster_size='128k') |
| self.assertTrue(self.do_qmp_backup(device=drive0['id'], sync='full', |
| format=drive0['fmt'], |
| target=drive0['backup'], |
| mode='existing')) |
| |
| # Create bitmap and dirty it with some new writes. |
| # overwrite [32736, 32799] which will dirty bitmap clusters at |
| # 32M-64K and 32M. 32M+64K will be left undirtied. |
| bitmap0 = self.add_bitmap('bitmap0', drive0) |
| self.hmp_io_writes(drive0['id'], |
| (('0xab', 0, 512), |
| ('0xfe', '16M', '256k'), |
| ('0x64', '32736k', '64k'))) |
| # Check the dirty bitmap stats |
| self.assertTrue(self.vm.check_bitmap_status( |
| 'node0', bitmap0.name, { |
| 'name': 'bitmap0', |
| 'count': 458752, |
| 'granularity': 65536, |
| 'status': 'active', |
| 'persistent': False |
| })) |
| |
| # Prepare a cluster_size=128k backup target without a backing file. |
| (target, _) = bitmap0.new_target() |
| self.img_create(target, bitmap0.drive['fmt'], cluster_size='128k') |
| |
| # Perform Incremental Backup |
| self.assertTrue(self.do_qmp_backup(device=bitmap0.drive['id'], |
| sync='incremental', |
| bitmap=bitmap0.name, |
| format=bitmap0.drive['fmt'], |
| target=target, |
| mode='existing')) |
| self.make_reference_backup(bitmap0) |
| |
| # Add the backing file, then compare and exit. |
| iotests.qemu_img('rebase', '-f', drive0['fmt'], '-u', '-b', |
| drive0['backup'], '-F', drive0['fmt'], target) |
| self.vm.shutdown() |
| self.check_backups() |
| |
| |
| def test_incremental_transaction(self): |
| '''Test: Verify backups made from transactionally created bitmaps. |
| |
| Create a bitmap "before" VM execution begins, then create a second |
| bitmap AFTER writes have already occurred. Use transactions to create |
| a full backup and synchronize both bitmaps to this backup. |
| Create an incremental backup through both bitmaps and verify that |
| both backups match the current drive0 image. |
| ''' |
| |
| drive0 = self.drives[0] |
| bitmap0 = self.add_bitmap('bitmap0', drive0) |
| self.hmp_io_writes(drive0['id'], (('0xab', 0, 512), |
| ('0xfe', '16M', '256k'), |
| ('0x64', '32736k', '64k'))) |
| bitmap1 = self.add_bitmap('bitmap1', drive0) |
| |
| result = self.vm.qmp('transaction', actions=[ |
| transaction_bitmap_clear(bitmap0.drive['id'], bitmap0.name), |
| transaction_bitmap_clear(bitmap1.drive['id'], bitmap1.name), |
| transaction_drive_backup(drive0['id'], drive0['backup'], |
| sync='full', format=drive0['fmt']) |
| ]) |
| self.assert_qmp(result, 'return', {}) |
| self.wait_until_completed(drive0['id']) |
| self.files.append(drive0['backup']) |
| |
| self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512), |
| ('0x55', '8M', '352k'), |
| ('0x78', '15872k', '1M'))) |
| # Both bitmaps should be correctly in sync. |
| self.create_incremental(bitmap0) |
| self.create_incremental(bitmap1) |
| self.vm.shutdown() |
| self.check_backups() |
| |
| |
| def do_transaction_failure_test(self, race=False): |
| # Create a second drive, with pattern: |
| drive1 = self.add_node('drive1') |
| self.img_create(drive1['file'], drive1['fmt']) |
| io_write_patterns(drive1['file'], (('0x14', 0, 512), |
| ('0x5d', '1M', '32k'), |
| ('0xcd', '32M', '124k'))) |
| |
| # Create a blkdebug interface to this img as 'drive1' |
| result = self.vm.qmp('blockdev-add', |
| node_name=drive1['id'], |
| driver=drive1['fmt'], |
| file={ |
| 'driver': 'blkdebug', |
| 'image': { |
| 'driver': 'file', |
| 'filename': drive1['file'] |
| }, |
| 'set-state': [{ |
| 'event': 'flush_to_disk', |
| 'state': 1, |
| 'new_state': 2 |
| }], |
| 'inject-error': [{ |
| 'event': 'read_aio', |
| 'errno': 5, |
| 'state': 2, |
| 'immediately': False, |
| 'once': True |
| }], |
| } |
| ) |
| self.assert_qmp(result, 'return', {}) |
| |
| # Create bitmaps and full backups for both drives |
| drive0 = self.drives[0] |
| dr0bm0 = self.add_bitmap('bitmap0', drive0) |
| dr1bm0 = self.add_bitmap('bitmap0', drive1) |
| self.create_anchor_backup(drive0) |
| self.create_anchor_backup(drive1) |
| self.assert_no_active_block_jobs() |
| self.assertFalse(self.vm.get_qmp_events(wait=False)) |
| |
| # Emulate some writes |
| if not race: |
| self.hmp_io_writes(drive0['id'], (('0xab', 0, 512), |
| ('0xfe', '16M', '256k'), |
| ('0x64', '32736k', '64k'))) |
| self.hmp_io_writes(drive1['id'], (('0xba', 0, 512), |
| ('0xef', '16M', '256k'), |
| ('0x46', '32736k', '64k'))) |
| |
| # Create incremental backup targets |
| target0 = self.prepare_backup(dr0bm0) |
| target1 = self.prepare_backup(dr1bm0) |
| |
| # Ask for a new incremental backup per-each drive, |
| # expecting drive1's backup to fail. In the 'race' test, |
| # we expect drive1 to attempt to cancel the empty drive0 job. |
| transaction = [ |
| transaction_drive_backup(drive0['id'], target0, sync='incremental', |
| format=drive0['fmt'], mode='existing', |
| bitmap=dr0bm0.name), |
| transaction_drive_backup(drive1['id'], target1, sync='incremental', |
| format=drive1['fmt'], mode='existing', |
| bitmap=dr1bm0.name) |
| ] |
| result = self.vm.qmp('transaction', actions=transaction, |
| properties={'completion-mode': 'grouped'} ) |
| self.assert_qmp(result, 'return', {}) |
| |
| # Observe that drive0's backup is cancelled and drive1 completes with |
| # an error. |
| self.wait_qmp_backup_cancelled(drive0['id']) |
| self.assertFalse(self.wait_qmp_backup(drive1['id'])) |
| error = self.vm.event_wait('BLOCK_JOB_ERROR') |
| self.assert_qmp(error, 'data', {'device': drive1['id'], |
| 'action': 'report', |
| 'operation': 'read'}) |
| self.assertFalse(self.vm.get_qmp_events(wait=False)) |
| self.assert_no_active_block_jobs() |
| |
| # Delete drive0's successful target and eliminate our record of the |
| # unsuccessful drive1 target. |
| dr0bm0.del_target() |
| dr1bm0.del_target() |
| if race: |
| # Don't re-run the transaction, we only wanted to test the race. |
| self.vm.shutdown() |
| return |
| |
| # Re-run the same transaction: |
| target0 = self.prepare_backup(dr0bm0) |
| target1 = self.prepare_backup(dr1bm0) |
| |
| # Re-run the exact same transaction. |
| result = self.vm.qmp('transaction', actions=transaction, |
| properties={'completion-mode':'grouped'}) |
| self.assert_qmp(result, 'return', {}) |
| |
| # Both should complete successfully this time. |
| self.assertTrue(self.wait_qmp_backup(drive0['id'])) |
| self.assertTrue(self.wait_qmp_backup(drive1['id'])) |
| self.make_reference_backup(dr0bm0) |
| self.make_reference_backup(dr1bm0) |
| self.assertFalse(self.vm.get_qmp_events(wait=False)) |
| self.assert_no_active_block_jobs() |
| |
| # And the images should of course validate. |
| self.vm.shutdown() |
| self.check_backups() |
| |
| def test_transaction_failure(self): |
| '''Test: Verify backups made from a transaction that partially fails. |
| |
| Add a second drive with its own unique pattern, and add a bitmap to each |
| drive. Use blkdebug to interfere with the backup on just one drive and |
| attempt to create a coherent incremental backup across both drives. |
| |
| verify a failure in one but not both, then delete the failed stubs and |
| re-run the same transaction. |
| |
| verify that both incrementals are created successfully. |
| ''' |
| self.do_transaction_failure_test() |
| |
| def test_transaction_failure_race(self): |
| '''Test: Verify that transactions with jobs that have no data to |
| transfer do not cause race conditions in the cancellation of the entire |
| transaction job group. |
| ''' |
| self.do_transaction_failure_test(race=True) |
| |
| |
| def test_sync_dirty_bitmap_missing(self): |
| self.assert_no_active_block_jobs() |
| self.files.append(self.err_img) |
| result = self.vm.qmp('drive-backup', device=self.drives[0]['id'], |
| sync='incremental', format=self.drives[0]['fmt'], |
| target=self.err_img) |
| self.assert_qmp(result, 'error/class', 'GenericError') |
| |
| |
| def test_sync_dirty_bitmap_not_found(self): |
| self.assert_no_active_block_jobs() |
| self.files.append(self.err_img) |
| result = self.vm.qmp('drive-backup', device=self.drives[0]['id'], |
| sync='incremental', bitmap='unknown', |
| format=self.drives[0]['fmt'], target=self.err_img) |
| self.assert_qmp(result, 'error/class', 'GenericError') |
| |
| |
| def test_sync_dirty_bitmap_bad_granularity(self): |
| ''' |
| Test: Test what happens if we provide an improper granularity. |
| |
| The granularity must always be a power of 2. |
| ''' |
| self.assert_no_active_block_jobs() |
| self.assertRaises(AssertionError, self.add_bitmap, |
| 'bitmap0', self.drives[0], |
| granularity=64000) |
| |
| def test_growing_before_backup(self): |
| ''' |
| Test: Add a bitmap, truncate the image, write past the old |
| end, do a backup. |
| |
| Incremental backup should not ignore dirty bits past the old |
| image end. |
| ''' |
| self.assert_no_active_block_jobs() |
| |
| self.create_anchor_backup() |
| |
| self.add_bitmap('bitmap0', self.drives[0]) |
| |
| res = self.vm.qmp('block_resize', device=self.drives[0]['id'], |
| size=(65 * 1048576)) |
| self.assert_qmp(res, 'return', {}) |
| |
| # Dirty the image past the old end |
| self.vm.hmp_qemu_io(self.drives[0]['id'], 'write 64M 64k') |
| |
| target = self.prepare_backup(size='65M') |
| self.create_incremental(target=target) |
| |
| self.vm.shutdown() |
| self.check_backups() |
| |
| |
| class TestIncrementalBackupBlkdebug(TestIncrementalBackupBase): |
| '''Incremental backup tests that utilize a BlkDebug filter on drive0.''' |
| |
| def setUp(self): |
| drive0 = self.add_node('drive0') |
| self.img_create(drive0['file'], drive0['fmt']) |
| self.write_default_pattern(drive0['file']) |
| self.vm.launch() |
| |
| def test_incremental_failure(self): |
| '''Test: Verify backups made after a failure are correct. |
| |
| Simulate a failure during an incremental backup block job, |
| emulate additional writes, then create another incremental backup |
| afterwards and verify that the backup created is correct. |
| ''' |
| |
| drive0 = self.drives[0] |
| result = self.vm.qmp('blockdev-add', |
| node_name=drive0['id'], |
| driver=drive0['fmt'], |
| file={ |
| 'driver': 'blkdebug', |
| 'image': { |
| 'driver': 'file', |
| 'filename': drive0['file'] |
| }, |
| 'set-state': [{ |
| 'event': 'flush_to_disk', |
| 'state': 1, |
| 'new_state': 2 |
| }], |
| 'inject-error': [{ |
| 'event': 'read_aio', |
| 'errno': 5, |
| 'state': 2, |
| 'immediately': False, |
| 'once': True |
| }], |
| } |
| ) |
| self.assert_qmp(result, 'return', {}) |
| |
| self.create_anchor_backup(drive0) |
| self.add_bitmap('bitmap0', drive0) |
| # Note: at this point, during a normal execution, |
| # Assume that the VM resumes and begins issuing IO requests here. |
| |
| self.hmp_io_writes(drive0['id'], (('0xab', 0, 512), |
| ('0xfe', '16M', '256k'), |
| ('0x64', '32736k', '64k'))) |
| |
| result = self.create_incremental(validate=False) |
| self.assertFalse(result) |
| self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512), |
| ('0x55', '8M', '352k'), |
| ('0x78', '15872k', '1M'))) |
| self.create_incremental() |
| self.vm.shutdown() |
| self.check_backups() |
| |
| def test_incremental_pause(self): |
| """ |
| Test an incremental backup that errors into a pause and is resumed. |
| """ |
| |
| drive0 = self.drives[0] |
| # NB: The blkdebug script here looks for a "flush, read" pattern. |
| # The flush occurs in hmp_io_writes, and the read during the block job. |
| result = self.vm.qmp('blockdev-add', |
| node_name=drive0['id'], |
| driver=drive0['fmt'], |
| file={ |
| 'driver': 'blkdebug', |
| 'image': { |
| 'driver': 'file', |
| 'filename': drive0['file'] |
| }, |
| 'set-state': [{ |
| 'event': 'flush_to_disk', |
| 'state': 1, |
| 'new_state': 2 |
| }], |
| 'inject-error': [{ |
| 'event': 'read_aio', |
| 'errno': 5, |
| 'state': 2, |
| 'immediately': False, |
| 'once': True |
| }], |
| }) |
| self.assert_qmp(result, 'return', {}) |
| self.create_anchor_backup(drive0) |
| bitmap = self.add_bitmap('bitmap0', drive0) |
| |
| # Emulate guest activity |
| self.hmp_io_writes(drive0['id'], (('0xab', 0, 512), |
| ('0xfe', '16M', '256k'), |
| ('0x64', '32736k', '64k'))) |
| |
| # Bitmap Status Check |
| self.assertTrue(self.vm.check_bitmap_status( |
| drive0['id'], bitmap.name, { |
| 'count': 458752, |
| 'granularity': 65536, |
| 'status': 'active', |
| 'busy': False, |
| 'recording': True |
| })) |
| |
| # Start backup |
| parent, _ = bitmap.last_target() |
| target = self.prepare_backup(bitmap, parent) |
| res = self.vm.qmp('drive-backup', |
| job_id=bitmap.drive['id'], |
| device=bitmap.drive['id'], |
| sync='incremental', |
| bitmap=bitmap.name, |
| format=bitmap.drive['fmt'], |
| target=target, |
| mode='existing', |
| on_source_error='stop') |
| self.assert_qmp(res, 'return', {}) |
| |
| # Wait for the error |
| event = self.vm.event_wait(name="BLOCK_JOB_ERROR", |
| match={"data":{"device":bitmap.drive['id']}}) |
| self.assert_qmp(event, 'data', {'device': bitmap.drive['id'], |
| 'action': 'stop', |
| 'operation': 'read'}) |
| |
| # Bitmap Status Check |
| self.assertTrue(self.vm.check_bitmap_status( |
| drive0['id'], bitmap.name, { |
| 'count': 458752, |
| 'granularity': 65536, |
| 'status': 'frozen', |
| 'busy': True, |
| 'recording': True |
| })) |
| |
| # Resume and check incremental backup for consistency |
| res = self.vm.qmp('block-job-resume', device=bitmap.drive['id']) |
| self.assert_qmp(res, 'return', {}) |
| self.wait_qmp_backup(bitmap.drive['id']) |
| |
| # Bitmap Status Check |
| self.assertTrue(self.vm.check_bitmap_status( |
| drive0['id'], bitmap.name, { |
| 'count': 0, |
| 'granularity': 65536, |
| 'status': 'active', |
| 'busy': False, |
| 'recording': True |
| })) |
| |
| # Finalize / Cleanup |
| self.make_reference_backup(bitmap) |
| self.vm.shutdown() |
| self.check_backups() |
| |
| |
| if __name__ == '__main__': |
| iotests.main(supported_fmts=['qcow2'], |
| supported_protocols=['file']) |