diff options
author | Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 2017-10-12 16:53:13 +0300 |
---|---|---|
committer | Jeff Cody <jcody@redhat.com> | 2017-12-18 10:54:13 -0500 |
commit | 53f1c8794f2c1aea4d2888a3ac4e1b3b8b8b9777 (patch) | |
tree | 4cfb4fa63ceef95fa1e2c0a2fd0cff276efe7fbb /block/backup.c | |
parent | 085bd08e6f32f0d96885ff8e0fa2896c2fabed50 (diff) | |
download | qemu-53f1c8794f2c1aea4d2888a3ac4e1b3b8b8b9777.zip |
backup: use copy_bitmap in incremental backup
We can use copy_bitmap instead of sync_bitmap. copy_bitmap is
initialized from sync_bitmap and it is more informative: we will not try
to process data, that is already in progress (by write notifier).
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: John Snow <jsnow@redhat.com>
Reviewed-by: Jeff Cody <jcody@redhat.com>
Message-id: 20171012135313.227864-6-vsementsov@virtuozzo.com
Signed-off-by: Jeff Cody <jcody@redhat.com>
Diffstat (limited to 'block/backup.c')
-rw-r--r-- | block/backup.c | 55 |
1 files changed, 17 insertions, 38 deletions
diff --git a/block/backup.c b/block/backup.c index 8ee220076b..4a16a37229 100644 --- a/block/backup.c +++ b/block/backup.c @@ -362,49 +362,28 @@ static bool coroutine_fn yield_and_check(BackupBlockJob *job) static int coroutine_fn backup_run_incremental(BackupBlockJob *job) { + int ret; bool error_is_read; - int ret = 0; - int clusters_per_iter; - uint32_t granularity; - int64_t offset; int64_t cluster; - int64_t end; - BdrvDirtyBitmapIter *dbi; - - granularity = bdrv_dirty_bitmap_granularity(job->sync_bitmap); - clusters_per_iter = MAX((granularity / job->cluster_size), 1); - dbi = bdrv_dirty_iter_new(job->sync_bitmap); - - /* Find the next dirty sector(s) */ - while ((offset = bdrv_dirty_iter_next(dbi)) >= 0) { - cluster = offset / job->cluster_size; - - for (end = cluster + clusters_per_iter; cluster < end; cluster++) { - do { - if (yield_and_check(job)) { - goto out; - } - ret = backup_do_cow(job, cluster * job->cluster_size, - job->cluster_size, &error_is_read, - false); - if ((ret < 0) && - backup_error_action(job, error_is_read, -ret) == - BLOCK_ERROR_ACTION_REPORT) { - goto out; - } - } while (ret < 0); - } + HBitmapIter hbi; - /* If the bitmap granularity is smaller than the backup granularity, - * we need to advance the iterator pointer to the next cluster. */ - if (granularity < job->cluster_size) { - bdrv_set_dirty_iter(dbi, cluster * job->cluster_size); - } + hbitmap_iter_init(&hbi, job->copy_bitmap, 0); + while ((cluster = hbitmap_iter_next(&hbi)) != -1) { + do { + if (yield_and_check(job)) { + return 0; + } + ret = backup_do_cow(job, cluster * job->cluster_size, + job->cluster_size, &error_is_read, false); + if (ret < 0 && backup_error_action(job, error_is_read, -ret) == + BLOCK_ERROR_ACTION_REPORT) + { + return ret; + } + } while (ret < 0); } -out: - bdrv_dirty_iter_free(dbi); - return ret; + return 0; } /* init copy_bitmap from sync_bitmap */ |