blob: be5b8ec2b1fcdd691cd218b2b19b164b5c8ae970 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
|
/*
* Dirtyrate common functions
*
* Copyright (c) 2020 HUAWEI TECHNOLOGIES CO., LTD.
*
* Authors:
* Chuan Zheng <zhengchuan@huawei.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef QEMU_MIGRATION_DIRTYRATE_H
#define QEMU_MIGRATION_DIRTYRATE_H
/*
* Sample 512 pages per GB as default.
* TODO: Make it configurable.
*/
#define DIRTYRATE_DEFAULT_SAMPLE_PAGES 512
/*
* Record ramblock idstr
*/
#define RAMBLOCK_INFO_MAX_LEN 256
/*
* Minimum RAMBlock size to sample, in megabytes.
*/
#define MIN_RAMBLOCK_SIZE 128
struct DirtyRateConfig {
uint64_t sample_pages_per_gigabytes; /* sample pages per GB */
int64_t sample_period_seconds; /* time duration between two sampling */
};
/*
* Store dirtypage info for each ramblock.
*/
struct RamblockDirtyInfo {
char idstr[RAMBLOCK_INFO_MAX_LEN]; /* idstr for each ramblock */
uint8_t *ramblock_addr; /* base address of ramblock we measure */
uint64_t ramblock_pages; /* ramblock size in TARGET_PAGE_SIZE */
uint64_t *sample_page_vfn; /* relative offset address for sampled page */
uint64_t sample_pages_count; /* count of sampled pages */
uint64_t sample_dirty_count; /* count of dirty pages we measure */
uint32_t *hash_result; /* array of hash result for sampled pages */
};
/*
* Store calculation statistics for each measure.
*/
struct DirtyRateStat {
uint64_t total_dirty_samples; /* total dirty sampled page */
uint64_t total_sample_count; /* total sampled pages */
uint64_t total_block_mem_MB; /* size of total sampled pages in MB */
int64_t dirty_rate; /* dirty rate in MB/s */
int64_t start_time; /* calculation start time in units of second */
int64_t calc_time; /* time duration of two sampling in units of second */
};
void *get_dirtyrate_thread(void *arg);
#endif
|