| #!/usr/bin/env python |
| # |
| # Tests for incremental drive-backup |
| # |
| # Copyright (C) 2015 John Snow for Red Hat, Inc. |
| # |
| # Based on 056. |
| # |
| # This program is free software; you can redistribute it and/or modify |
| # it under the terms of the GNU General Public License as published by |
| # the Free Software Foundation; either version 2 of the License, or |
| # (at your option) any later version. |
| # |
| # This program is distributed in the hope that it will be useful, |
| # but WITHOUT ANY WARRANTY; without even the implied warranty of |
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| # GNU General Public License for more details. |
| # |
| # You should have received a copy of the GNU General Public License |
| # along with this program. If not, see <http://www.gnu.org/licenses/>. |
| # |
| |
| import os |
| import iotests |
| |
| |
| def io_write_patterns(img, patterns): |
| for pattern in patterns: |
| iotests.qemu_io('-c', 'write -P%s %s %s' % pattern, img) |
| |
| |
| def try_remove(img): |
| try: |
| os.remove(img) |
| except OSError: |
| pass |
| |
| |
| def transaction_action(action, **kwargs): |
| return { |
| 'type': action, |
| 'data': dict((k.replace('_', '-'), v) for k, v in kwargs.iteritems()) |
| } |
| |
| |
| def transaction_bitmap_clear(node, name, **kwargs): |
| return transaction_action('block-dirty-bitmap-clear', |
| node=node, name=name, **kwargs) |
| |
| |
| def transaction_drive_backup(device, target, **kwargs): |
| return transaction_action('drive-backup', device=device, target=target, |
| **kwargs) |
| |
| |
| class Bitmap: |
| def __init__(self, name, drive): |
| self.name = name |
| self.drive = drive |
| self.num = 0 |
| self.backups = list() |
| |
| def base_target(self): |
| return (self.drive['backup'], None) |
| |
| def new_target(self, num=None): |
| if num is None: |
| num = self.num |
| self.num = num + 1 |
| base = os.path.join(iotests.test_dir, |
| "%s.%s." % (self.drive['id'], self.name)) |
| suff = "%i.%s" % (num, self.drive['fmt']) |
| target = base + "inc" + suff |
| reference = base + "ref" + suff |
| self.backups.append((target, reference)) |
| return (target, reference) |
| |
| def last_target(self): |
| if self.backups: |
| return self.backups[-1] |
| return self.base_target() |
| |
| def del_target(self): |
| for image in self.backups.pop(): |
| try_remove(image) |
| self.num -= 1 |
| |
| def cleanup(self): |
| for backup in self.backups: |
| for image in backup: |
| try_remove(image) |
| |
| |
| class TestIncrementalBackupBase(iotests.QMPTestCase): |
| def __init__(self, *args): |
| super(TestIncrementalBackupBase, self).__init__(*args) |
| self.bitmaps = list() |
| self.files = list() |
| self.drives = list() |
| self.vm = iotests.VM() |
| self.err_img = os.path.join(iotests.test_dir, 'err.%s' % iotests.imgfmt) |
| |
| |
| def setUp(self): |
| # Create a base image with a distinctive patterning |
| drive0 = self.add_node('drive0') |
| self.img_create(drive0['file'], drive0['fmt']) |
| self.vm.add_drive(drive0['file']) |
| self.write_default_pattern(drive0['file']) |
| self.vm.launch() |
| |
| |
| def write_default_pattern(self, target): |
| io_write_patterns(target, (('0x41', 0, 512), |
| ('0xd5', '1M', '32k'), |
| ('0xdc', '32M', '124k'))) |
| |
| |
| def add_node(self, node_id, fmt=iotests.imgfmt, path=None, backup=None): |
| if path is None: |
| path = os.path.join(iotests.test_dir, '%s.%s' % (node_id, fmt)) |
| if backup is None: |
| backup = os.path.join(iotests.test_dir, |
| '%s.full.backup.%s' % (node_id, fmt)) |
| |
| self.drives.append({ |
| 'id': node_id, |
| 'file': path, |
| 'backup': backup, |
| 'fmt': fmt }) |
| return self.drives[-1] |
| |
| |
| def img_create(self, img, fmt=iotests.imgfmt, size='64M', |
| parent=None, parentFormat=None): |
| if parent: |
| if parentFormat is None: |
| parentFormat = fmt |
| iotests.qemu_img('create', '-f', fmt, img, size, |
| '-b', parent, '-F', parentFormat) |
| else: |
| iotests.qemu_img('create', '-f', fmt, img, size) |
| self.files.append(img) |
| |
| |
| def do_qmp_backup(self, error='Input/output error', **kwargs): |
| res = self.vm.qmp('drive-backup', **kwargs) |
| self.assert_qmp(res, 'return', {}) |
| return self.wait_qmp_backup(kwargs['device'], error) |
| |
| |
| def wait_qmp_backup(self, device, error='Input/output error'): |
| event = self.vm.event_wait(name="BLOCK_JOB_COMPLETED", |
| match={'data': {'device': device}}) |
| self.assertNotEqual(event, None) |
| |
| try: |
| failure = self.dictpath(event, 'data/error') |
| except AssertionError: |
| # Backup succeeded. |
| self.assert_qmp(event, 'data/offset', event['data']['len']) |
| return True |
| else: |
| # Backup failed. |
| self.assert_qmp(event, 'data/error', error) |
| return False |
| |
| |
| def wait_qmp_backup_cancelled(self, device): |
| event = self.vm.event_wait(name='BLOCK_JOB_CANCELLED', |
| match={'data': {'device': device}}) |
| self.assertNotEqual(event, None) |
| |
| |
| def create_anchor_backup(self, drive=None): |
| if drive is None: |
| drive = self.drives[-1] |
| res = self.do_qmp_backup(device=drive['id'], sync='full', |
| format=drive['fmt'], target=drive['backup']) |
| self.assertTrue(res) |
| self.files.append(drive['backup']) |
| return drive['backup'] |
| |
| |
| def make_reference_backup(self, bitmap=None): |
| if bitmap is None: |
| bitmap = self.bitmaps[-1] |
| _, reference = bitmap.last_target() |
| res = self.do_qmp_backup(device=bitmap.drive['id'], sync='full', |
| format=bitmap.drive['fmt'], target=reference) |
| self.assertTrue(res) |
| |
| |
| def add_bitmap(self, name, drive, **kwargs): |
| bitmap = Bitmap(name, drive) |
| self.bitmaps.append(bitmap) |
| result = self.vm.qmp('block-dirty-bitmap-add', node=drive['id'], |
| name=bitmap.name, **kwargs) |
| self.assert_qmp(result, 'return', {}) |
| return bitmap |
| |
| |
| def prepare_backup(self, bitmap=None, parent=None): |
| if bitmap is None: |
| bitmap = self.bitmaps[-1] |
| if parent is None: |
| parent, _ = bitmap.last_target() |
| |
| target, _ = bitmap.new_target() |
| self.img_create(target, bitmap.drive['fmt'], parent=parent) |
| return target |
| |
| |
| def create_incremental(self, bitmap=None, parent=None, |
| parentFormat=None, validate=True): |
| if bitmap is None: |
| bitmap = self.bitmaps[-1] |
| if parent is None: |
| parent, _ = bitmap.last_target() |
| |
| target = self.prepare_backup(bitmap, parent) |
| res = self.do_qmp_backup(device=bitmap.drive['id'], |
| sync='incremental', bitmap=bitmap.name, |
| format=bitmap.drive['fmt'], target=target, |
| mode='existing') |
| if not res: |
| bitmap.del_target(); |
| self.assertFalse(validate) |
| else: |
| self.make_reference_backup(bitmap) |
| return res |
| |
| |
| def check_backups(self): |
| for bitmap in self.bitmaps: |
| for incremental, reference in bitmap.backups: |
| self.assertTrue(iotests.compare_images(incremental, reference)) |
| last = bitmap.last_target()[0] |
| self.assertTrue(iotests.compare_images(last, bitmap.drive['file'])) |
| |
| |
| def hmp_io_writes(self, drive, patterns): |
| for pattern in patterns: |
| self.vm.hmp_qemu_io(drive, 'write -P%s %s %s' % pattern) |
| self.vm.hmp_qemu_io(drive, 'flush') |
| |
| |
| def do_incremental_simple(self, **kwargs): |
| self.create_anchor_backup() |
| self.add_bitmap('bitmap0', self.drives[0], **kwargs) |
| |
| # Sanity: Create a "hollow" incremental backup |
| self.create_incremental() |
| # Three writes: One complete overwrite, one new segment, |
| # and one partial overlap. |
| self.hmp_io_writes(self.drives[0]['id'], (('0xab', 0, 512), |
| ('0xfe', '16M', '256k'), |
| ('0x64', '32736k', '64k'))) |
| self.create_incremental() |
| # Three more writes, one of each kind, like above |
| self.hmp_io_writes(self.drives[0]['id'], (('0x9a', 0, 512), |
| ('0x55', '8M', '352k'), |
| ('0x78', '15872k', '1M'))) |
| self.create_incremental() |
| self.vm.shutdown() |
| self.check_backups() |
| |
| |
| def tearDown(self): |
| self.vm.shutdown() |
| for bitmap in self.bitmaps: |
| bitmap.cleanup() |
| for filename in self.files: |
| try_remove(filename) |
| |
| |
| |
| class TestIncrementalBackup(TestIncrementalBackupBase): |
| def test_incremental_simple(self): |
| ''' |
| Test: Create and verify three incremental backups. |
| |
| Create a bitmap and a full backup before VM execution begins, |
| then create a series of three incremental backups "during execution," |
| i.e.; after IO requests begin modifying the drive. |
| ''' |
| return self.do_incremental_simple() |
| |
| |
| def test_small_granularity(self): |
| ''' |
| Test: Create and verify backups made with a small granularity bitmap. |
| |
| Perform the same test as test_incremental_simple, but with a granularity |
| of only 32KiB instead of the present default of 64KiB. |
| ''' |
| return self.do_incremental_simple(granularity=32768) |
| |
| |
| def test_large_granularity(self): |
| ''' |
| Test: Create and verify backups made with a large granularity bitmap. |
| |
| Perform the same test as test_incremental_simple, but with a granularity |
| of 128KiB instead of the present default of 64KiB. |
| ''' |
| return self.do_incremental_simple(granularity=131072) |
| |
| |
| def test_incremental_transaction(self): |
| '''Test: Verify backups made from transactionally created bitmaps. |
| |
| Create a bitmap "before" VM execution begins, then create a second |
| bitmap AFTER writes have already occurred. Use transactions to create |
| a full backup and synchronize both bitmaps to this backup. |
| Create an incremental backup through both bitmaps and verify that |
| both backups match the current drive0 image. |
| ''' |
| |
| drive0 = self.drives[0] |
| bitmap0 = self.add_bitmap('bitmap0', drive0) |
| self.hmp_io_writes(drive0['id'], (('0xab', 0, 512), |
| ('0xfe', '16M', '256k'), |
| ('0x64', '32736k', '64k'))) |
| bitmap1 = self.add_bitmap('bitmap1', drive0) |
| |
| result = self.vm.qmp('transaction', actions=[ |
| transaction_bitmap_clear(bitmap0.drive['id'], bitmap0.name), |
| transaction_bitmap_clear(bitmap1.drive['id'], bitmap1.name), |
| transaction_drive_backup(drive0['id'], drive0['backup'], |
| sync='full', format=drive0['fmt']) |
| ]) |
| self.assert_qmp(result, 'return', {}) |
| self.wait_until_completed(drive0['id']) |
| self.files.append(drive0['backup']) |
| |
| self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512), |
| ('0x55', '8M', '352k'), |
| ('0x78', '15872k', '1M'))) |
| # Both bitmaps should be correctly in sync. |
| self.create_incremental(bitmap0) |
| self.create_incremental(bitmap1) |
| self.vm.shutdown() |
| self.check_backups() |
| |
| |
| def test_transaction_failure(self): |
| '''Test: Verify backups made from a transaction that partially fails. |
| |
| Add a second drive with its own unique pattern, and add a bitmap to each |
| drive. Use blkdebug to interfere with the backup on just one drive and |
| attempt to create a coherent incremental backup across both drives. |
| |
| verify a failure in one but not both, then delete the failed stubs and |
| re-run the same transaction. |
| |
| verify that both incrementals are created successfully. |
| ''' |
| |
| # Create a second drive, with pattern: |
| drive1 = self.add_node('drive1') |
| self.img_create(drive1['file'], drive1['fmt']) |
| io_write_patterns(drive1['file'], (('0x14', 0, 512), |
| ('0x5d', '1M', '32k'), |
| ('0xcd', '32M', '124k'))) |
| |
| # Create a blkdebug interface to this img as 'drive1' |
| result = self.vm.qmp('blockdev-add', options={ |
| 'id': drive1['id'], |
| 'driver': drive1['fmt'], |
| 'file': { |
| 'driver': 'blkdebug', |
| 'image': { |
| 'driver': 'file', |
| 'filename': drive1['file'] |
| }, |
| 'set-state': [{ |
| 'event': 'flush_to_disk', |
| 'state': 1, |
| 'new_state': 2 |
| }], |
| 'inject-error': [{ |
| 'event': 'read_aio', |
| 'errno': 5, |
| 'state': 2, |
| 'immediately': False, |
| 'once': True |
| }], |
| } |
| }) |
| self.assert_qmp(result, 'return', {}) |
| |
| # Create bitmaps and full backups for both drives |
| drive0 = self.drives[0] |
| dr0bm0 = self.add_bitmap('bitmap0', drive0) |
| dr1bm0 = self.add_bitmap('bitmap0', drive1) |
| self.create_anchor_backup(drive0) |
| self.create_anchor_backup(drive1) |
| self.assert_no_active_block_jobs() |
| self.assertFalse(self.vm.get_qmp_events(wait=False)) |
| |
| # Emulate some writes |
| self.hmp_io_writes(drive0['id'], (('0xab', 0, 512), |
| ('0xfe', '16M', '256k'), |
| ('0x64', '32736k', '64k'))) |
| self.hmp_io_writes(drive1['id'], (('0xba', 0, 512), |
| ('0xef', '16M', '256k'), |
| ('0x46', '32736k', '64k'))) |
| |
| # Create incremental backup targets |
| target0 = self.prepare_backup(dr0bm0) |
| target1 = self.prepare_backup(dr1bm0) |
| |
| # Ask for a new incremental backup per-each drive, |
| # expecting drive1's backup to fail: |
| transaction = [ |
| transaction_drive_backup(drive0['id'], target0, sync='incremental', |
| format=drive0['fmt'], mode='existing', |
| bitmap=dr0bm0.name), |
| transaction_drive_backup(drive1['id'], target1, sync='incremental', |
| format=drive1['fmt'], mode='existing', |
| bitmap=dr1bm0.name) |
| ] |
| result = self.vm.qmp('transaction', actions=transaction, |
| properties={'completion-mode': 'grouped'} ) |
| self.assert_qmp(result, 'return', {}) |
| |
| # Observe that drive0's backup is cancelled and drive1 completes with |
| # an error. |
| self.wait_qmp_backup_cancelled(drive0['id']) |
| self.assertFalse(self.wait_qmp_backup(drive1['id'])) |
| error = self.vm.event_wait('BLOCK_JOB_ERROR') |
| self.assert_qmp(error, 'data', {'device': drive1['id'], |
| 'action': 'report', |
| 'operation': 'read'}) |
| self.assertFalse(self.vm.get_qmp_events(wait=False)) |
| self.assert_no_active_block_jobs() |
| |
| # Delete drive0's successful target and eliminate our record of the |
| # unsuccessful drive1 target. Then re-run the same transaction. |
| dr0bm0.del_target() |
| dr1bm0.del_target() |
| target0 = self.prepare_backup(dr0bm0) |
| target1 = self.prepare_backup(dr1bm0) |
| |
| # Re-run the exact same transaction. |
| result = self.vm.qmp('transaction', actions=transaction, |
| properties={'completion-mode':'grouped'}) |
| self.assert_qmp(result, 'return', {}) |
| |
| # Both should complete successfully this time. |
| self.assertTrue(self.wait_qmp_backup(drive0['id'])) |
| self.assertTrue(self.wait_qmp_backup(drive1['id'])) |
| self.make_reference_backup(dr0bm0) |
| self.make_reference_backup(dr1bm0) |
| self.assertFalse(self.vm.get_qmp_events(wait=False)) |
| self.assert_no_active_block_jobs() |
| |
| # And the images should of course validate. |
| self.vm.shutdown() |
| self.check_backups() |
| |
| |
| def test_sync_dirty_bitmap_missing(self): |
| self.assert_no_active_block_jobs() |
| self.files.append(self.err_img) |
| result = self.vm.qmp('drive-backup', device=self.drives[0]['id'], |
| sync='incremental', format=self.drives[0]['fmt'], |
| target=self.err_img) |
| self.assert_qmp(result, 'error/class', 'GenericError') |
| |
| |
| def test_sync_dirty_bitmap_not_found(self): |
| self.assert_no_active_block_jobs() |
| self.files.append(self.err_img) |
| result = self.vm.qmp('drive-backup', device=self.drives[0]['id'], |
| sync='incremental', bitmap='unknown', |
| format=self.drives[0]['fmt'], target=self.err_img) |
| self.assert_qmp(result, 'error/class', 'GenericError') |
| |
| |
| def test_sync_dirty_bitmap_bad_granularity(self): |
| ''' |
| Test: Test what happens if we provide an improper granularity. |
| |
| The granularity must always be a power of 2. |
| ''' |
| self.assert_no_active_block_jobs() |
| self.assertRaises(AssertionError, self.add_bitmap, |
| 'bitmap0', self.drives[0], |
| granularity=64000) |
| |
| |
| class TestIncrementalBackupBlkdebug(TestIncrementalBackupBase): |
| '''Incremental backup tests that utilize a BlkDebug filter on drive0.''' |
| |
| def setUp(self): |
| drive0 = self.add_node('drive0') |
| self.img_create(drive0['file'], drive0['fmt']) |
| self.write_default_pattern(drive0['file']) |
| self.vm.launch() |
| |
| def test_incremental_failure(self): |
| '''Test: Verify backups made after a failure are correct. |
| |
| Simulate a failure during an incremental backup block job, |
| emulate additional writes, then create another incremental backup |
| afterwards and verify that the backup created is correct. |
| ''' |
| |
| drive0 = self.drives[0] |
| result = self.vm.qmp('blockdev-add', options={ |
| 'id': drive0['id'], |
| 'driver': drive0['fmt'], |
| 'file': { |
| 'driver': 'blkdebug', |
| 'image': { |
| 'driver': 'file', |
| 'filename': drive0['file'] |
| }, |
| 'set-state': [{ |
| 'event': 'flush_to_disk', |
| 'state': 1, |
| 'new_state': 2 |
| }], |
| 'inject-error': [{ |
| 'event': 'read_aio', |
| 'errno': 5, |
| 'state': 2, |
| 'immediately': False, |
| 'once': True |
| }], |
| } |
| }) |
| self.assert_qmp(result, 'return', {}) |
| |
| self.create_anchor_backup(drive0) |
| self.add_bitmap('bitmap0', drive0) |
| # Note: at this point, during a normal execution, |
| # Assume that the VM resumes and begins issuing IO requests here. |
| |
| self.hmp_io_writes(drive0['id'], (('0xab', 0, 512), |
| ('0xfe', '16M', '256k'), |
| ('0x64', '32736k', '64k'))) |
| |
| result = self.create_incremental(validate=False) |
| self.assertFalse(result) |
| self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512), |
| ('0x55', '8M', '352k'), |
| ('0x78', '15872k', '1M'))) |
| self.create_incremental() |
| self.vm.shutdown() |
| self.check_backups() |
| |
| |
| if __name__ == '__main__': |
| iotests.main(supported_fmts=['qcow2']) |