diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/block-backend.c | 13 | ||||
-rw-r--r-- | block/io.c | 36 | ||||
-rw-r--r-- | block/qapi.c | 2 | ||||
-rw-r--r-- | block/throttle-groups.c | 129 |
4 files changed, 102 insertions, 78 deletions
diff --git a/block/block-backend.c b/block/block-backend.c index 964a205d38..6880659665 100644 --- a/block/block-backend.c +++ b/block/block-backend.c @@ -107,8 +107,12 @@ BlockBackend *blk_new(Error **errp) blk = g_new0(BlockBackend, 1); blk->refcnt = 1; + qemu_co_queue_init(&blk->public.throttled_reqs[0]); + qemu_co_queue_init(&blk->public.throttled_reqs[1]); + notifier_list_init(&blk->remove_bs_notifiers); notifier_list_init(&blk->insert_bs_notifiers); + QTAILQ_INSERT_TAIL(&block_backends, blk, link); return blk; } @@ -437,7 +441,7 @@ void blk_remove_bs(BlockBackend *blk) notifier_list_notify(&blk->remove_bs_notifiers, blk); blk_update_root_state(blk); - if (blk->root->bs->throttle_state) { + if (blk->public.throttle_state) { bdrv_io_limits_disable(blk->root->bs); } @@ -795,7 +799,6 @@ static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf, int blk_pread_unthrottled(BlockBackend *blk, int64_t offset, uint8_t *buf, int count) { - BlockDriverState *bs = blk_bs(blk); int ret; ret = blk_check_byte_request(blk, offset, count); @@ -803,9 +806,9 @@ int blk_pread_unthrottled(BlockBackend *blk, int64_t offset, uint8_t *buf, return ret; } - bdrv_no_throttling_begin(bs); + bdrv_no_throttling_begin(blk_bs(blk)); ret = blk_pread(blk, offset, buf, count); - bdrv_no_throttling_end(bs); + bdrv_no_throttling_end(blk_bs(blk)); return ret; } @@ -1524,7 +1527,7 @@ void blk_update_root_state(BlockBackend *blk) g_free(blk->root_state.throttle_group); throttle_group_unref(blk->root_state.throttle_state); } - if (blk->root->bs->throttle_state) { + if (blk->public.throttle_state) { const char *name = throttle_group_get_name(blk); blk->root_state.throttle_group = g_strdup(name); blk->root_state.throttle_state = throttle_group_incref(name); diff --git a/block/io.c b/block/io.c index f6fb86883a..bdbaa1c0da 100644 --- a/block/io.c +++ b/block/io.c @@ -55,20 +55,31 @@ void bdrv_set_io_limits(BlockDriverState *bs, void bdrv_no_throttling_begin(BlockDriverState *bs) { - if (bs->io_limits_disabled++ == 0) { - throttle_group_restart_bs(bs); + if (!bs->blk) { + return; + } + + if (blk_get_public(bs->blk)->io_limits_disabled++ == 0) { + throttle_group_restart_blk(bs->blk); } } void bdrv_no_throttling_end(BlockDriverState *bs) { - assert(bs->io_limits_disabled); - --bs->io_limits_disabled; + BlockBackendPublic *blkp; + + if (!bs->blk) { + return; + } + + blkp = blk_get_public(bs->blk); + assert(blkp->io_limits_disabled); + --blkp->io_limits_disabled; } void bdrv_io_limits_disable(BlockDriverState *bs) { - assert(bs->throttle_state); + assert(blk_get_public(bs->blk)->throttle_state); bdrv_no_throttling_begin(bs); throttle_group_unregister_blk(bs->blk); bdrv_no_throttling_end(bs); @@ -77,14 +88,16 @@ void bdrv_io_limits_disable(BlockDriverState *bs) /* should be called before bdrv_set_io_limits if a limit is set */ void bdrv_io_limits_enable(BlockDriverState *bs, const char *group) { - assert(!bs->throttle_state); + BlockBackendPublic *blkp = blk_get_public(bs->blk); + + assert(!blkp->throttle_state); throttle_group_register_blk(bs->blk, group); } void bdrv_io_limits_update_group(BlockDriverState *bs, const char *group) { /* this bs is not part of any group */ - if (!bs->throttle_state) { + if (!blk_get_public(bs->blk)->throttle_state) { return; } @@ -178,14 +191,15 @@ void bdrv_disable_copy_on_read(BlockDriverState *bs) bool bdrv_requests_pending(BlockDriverState *bs) { BdrvChild *child; + BlockBackendPublic *blkp = bs->blk ? blk_get_public(bs->blk) : NULL; if (!QLIST_EMPTY(&bs->tracked_requests)) { return true; } - if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) { + if (blkp && !qemu_co_queue_empty(&blkp->throttled_reqs[0])) { return true; } - if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) { + if (blkp && !qemu_co_queue_empty(&blkp->throttled_reqs[1])) { return true; } @@ -1070,7 +1084,7 @@ int coroutine_fn bdrv_co_preadv(BlockDriverState *bs, } /* throttling disk I/O */ - if (bs->throttle_state) { + if (bs->blk && blk_get_public(bs->blk)->throttle_state) { throttle_group_co_io_limits_intercept(bs, bytes, false); } @@ -1431,7 +1445,7 @@ int coroutine_fn bdrv_co_pwritev(BlockDriverState *bs, } /* throttling disk I/O */ - if (bs->throttle_state) { + if (bs->blk && blk_get_public(bs->blk)->throttle_state) { throttle_group_co_io_limits_intercept(bs, bytes, true); } diff --git a/block/qapi.c b/block/qapi.c index a3e514d89b..1e4bb8a0a5 100644 --- a/block/qapi.c +++ b/block/qapi.c @@ -67,7 +67,7 @@ BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk, info->backing_file_depth = bdrv_get_backing_file_depth(bs); info->detect_zeroes = bs->detect_zeroes; - if (bs->throttle_state) { + if (bs->blk && blk_get_public(bs->blk)->throttle_state) { ThrottleConfig cfg; throttle_group_get_config(bs, &cfg); diff --git a/block/throttle-groups.c b/block/throttle-groups.c index e50ccaaf7e..56dc311867 100644 --- a/block/throttle-groups.c +++ b/block/throttle-groups.c @@ -30,7 +30,7 @@ #include "sysemu/qtest.h" /* The ThrottleGroup structure (with its ThrottleState) is shared - * among different BlockDriverState and it's independent from + * among different BlockBackends and it's independent from * AioContext, so in order to use it from different threads it needs * its own locking. * @@ -40,18 +40,18 @@ * The whole ThrottleGroup structure is private and invisible to * outside users, that only use it through its ThrottleState. * - * In addition to the ThrottleGroup structure, BlockDriverState has + * In addition to the ThrottleGroup structure, BlockBackendPublic has * fields that need to be accessed by other members of the group and - * therefore also need to be protected by this lock. Once a BDS is - * registered in a group those fields can be accessed by other threads - * any time. + * therefore also need to be protected by this lock. Once a + * BlockBackend is registered in a group those fields can be accessed + * by other threads any time. * * Again, all this is handled internally and is mostly transparent to * the outside. The 'throttle_timers' field however has an additional * constraint because it may be temporarily invalid (see for example * bdrv_set_aio_context()). Therefore in this file a thread will - * access some other BDS's timers only after verifying that that BDS - * has throttled requests in the queue. + * access some other BlockBackend's timers only after verifying that + * that BlockBackend has throttled requests in the queue. */ typedef struct ThrottleGroup { char *name; /* This is constant during the lifetime of the group */ @@ -141,8 +141,8 @@ void throttle_group_unref(ThrottleState *ts) */ const char *throttle_group_get_name(BlockBackend *blk) { - ThrottleGroup *tg = container_of(blk_bs(blk)->throttle_state, - ThrottleGroup, ts); + BlockBackendPublic *blkp = blk_get_public(blk); + ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts); return tg->name; } @@ -156,10 +156,10 @@ const char *throttle_group_get_name(BlockBackend *blk) */ static BlockBackend *throttle_group_next_blk(BlockBackend *blk) { - BlockDriverState *bs = blk_bs(blk); - ThrottleState *ts = bs->throttle_state; + BlockBackendPublic *blkp = blk_get_public(blk); + ThrottleState *ts = blkp->throttle_state; ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts); - BlockBackendPublic *next = QLIST_NEXT(blk_get_public(blk), round_robin); + BlockBackendPublic *next = QLIST_NEXT(blkp, round_robin); if (!next) { next = QLIST_FIRST(&tg->head); @@ -180,15 +180,15 @@ static BlockBackend *throttle_group_next_blk(BlockBackend *blk) */ static BlockBackend *next_throttle_token(BlockBackend *blk, bool is_write) { - ThrottleGroup *tg = container_of(blk_bs(blk)->throttle_state, - ThrottleGroup, ts); + BlockBackendPublic *blkp = blk_get_public(blk); + ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts); BlockBackend *token, *start; start = token = tg->tokens[is_write]; /* get next bs round in round robin style */ token = throttle_group_next_blk(token); - while (token != start && !blk_bs(token)->pending_reqs[is_write]) { + while (token != start && !blkp->pending_reqs[is_write]) { token = throttle_group_next_blk(token); } @@ -196,7 +196,7 @@ static BlockBackend *next_throttle_token(BlockBackend *blk, bool is_write) * then decide the token is the current bs because chances are * the current bs get the current request queued. */ - if (token == start && !blk_bs(token)->pending_reqs[is_write]) { + if (token == start && !blkp->pending_reqs[is_write]) { token = blk; } @@ -215,12 +215,13 @@ static BlockBackend *next_throttle_token(BlockBackend *blk, bool is_write) */ static bool throttle_group_schedule_timer(BlockBackend *blk, bool is_write) { - ThrottleState *ts = blk_bs(blk)->throttle_state; - ThrottleTimers *tt = &blk_bs(blk)->throttle_timers; + BlockBackendPublic *blkp = blk_get_public(blk); + ThrottleState *ts = blkp->throttle_state; + ThrottleTimers *tt = &blkp->throttle_timers; ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts); bool must_wait; - if (blk_bs(blk)->io_limits_disabled) { + if (blkp->io_limits_disabled) { return false; } @@ -249,14 +250,14 @@ static bool throttle_group_schedule_timer(BlockBackend *blk, bool is_write) */ static void schedule_next_request(BlockBackend *blk, bool is_write) { - BlockDriverState *bs = blk_bs(blk); - ThrottleGroup *tg = container_of(bs->throttle_state, ThrottleGroup, ts); + BlockBackendPublic *blkp = blk_get_public(blk); + ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts); bool must_wait; BlockBackend *token; /* Check if there's any pending request to schedule next */ token = next_throttle_token(blk, is_write); - if (!blk_bs(token)->pending_reqs[is_write]) { + if (!blkp->pending_reqs[is_write]) { return; } @@ -265,12 +266,12 @@ static void schedule_next_request(BlockBackend *blk, bool is_write) /* If it doesn't have to wait, queue it for immediate execution */ if (!must_wait) { - /* Give preference to requests from the current bs */ + /* Give preference to requests from the current blk */ if (qemu_in_coroutine() && - qemu_co_queue_next(&bs->throttled_reqs[is_write])) { + qemu_co_queue_next(&blkp->throttled_reqs[is_write])) { token = blk; } else { - ThrottleTimers *tt = &blk_bs(token)->throttle_timers; + ThrottleTimers *tt = &blkp->throttle_timers; int64_t now = qemu_clock_get_ns(tt->clock_type); timer_mod(tt->timers[is_write], now + 1); tg->any_timer_armed[is_write] = true; @@ -294,37 +295,40 @@ void coroutine_fn throttle_group_co_io_limits_intercept(BlockDriverState *bs, bool must_wait; BlockBackend *token; - ThrottleGroup *tg = container_of(bs->throttle_state, ThrottleGroup, ts); + BlockBackend *blk = bs->blk; + BlockBackendPublic *blkp = blk_get_public(blk); + ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts); qemu_mutex_lock(&tg->lock); /* First we check if this I/O has to be throttled. */ - token = next_throttle_token(bs->blk, is_write); + token = next_throttle_token(blk, is_write); must_wait = throttle_group_schedule_timer(token, is_write); /* Wait if there's a timer set or queued requests of this type */ - if (must_wait || bs->pending_reqs[is_write]) { - bs->pending_reqs[is_write]++; + if (must_wait || blkp->pending_reqs[is_write]) { + blkp->pending_reqs[is_write]++; qemu_mutex_unlock(&tg->lock); - qemu_co_queue_wait(&bs->throttled_reqs[is_write]); + qemu_co_queue_wait(&blkp->throttled_reqs[is_write]); qemu_mutex_lock(&tg->lock); - bs->pending_reqs[is_write]--; + blkp->pending_reqs[is_write]--; } /* The I/O will be executed, so do the accounting */ - throttle_account(bs->throttle_state, is_write, bytes); + throttle_account(blkp->throttle_state, is_write, bytes); /* Schedule the next request */ - schedule_next_request(bs->blk, is_write); + schedule_next_request(blk, is_write); qemu_mutex_unlock(&tg->lock); } -void throttle_group_restart_bs(BlockDriverState *bs) +void throttle_group_restart_blk(BlockBackend *blk) { + BlockBackendPublic *blkp = blk_get_public(blk); int i; for (i = 0; i < 2; i++) { - while (qemu_co_enter_next(&bs->throttled_reqs[i])) { + while (qemu_co_enter_next(&blkp->throttled_reqs[i])) { ; } } @@ -339,8 +343,9 @@ void throttle_group_restart_bs(BlockDriverState *bs) */ void throttle_group_config(BlockDriverState *bs, ThrottleConfig *cfg) { - ThrottleTimers *tt = &bs->throttle_timers; - ThrottleState *ts = bs->throttle_state; + BlockBackendPublic *blkp = blk_get_public(bs->blk); + ThrottleTimers *tt = &blkp->throttle_timers; + ThrottleState *ts = blkp->throttle_state; ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts); qemu_mutex_lock(&tg->lock); /* throttle_config() cancels the timers */ @@ -353,8 +358,8 @@ void throttle_group_config(BlockDriverState *bs, ThrottleConfig *cfg) throttle_config(ts, tt, cfg); qemu_mutex_unlock(&tg->lock); - qemu_co_enter_next(&bs->throttled_reqs[0]); - qemu_co_enter_next(&bs->throttled_reqs[1]); + qemu_co_enter_next(&blkp->throttled_reqs[0]); + qemu_co_enter_next(&blkp->throttled_reqs[1]); } /* Get the throttle configuration from a particular group. Similar to @@ -366,7 +371,8 @@ void throttle_group_config(BlockDriverState *bs, ThrottleConfig *cfg) */ void throttle_group_get_config(BlockDriverState *bs, ThrottleConfig *cfg) { - ThrottleState *ts = bs->throttle_state; + BlockBackendPublic *blkp = blk_get_public(bs->blk); + ThrottleState *ts = blkp->throttle_state; ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts); qemu_mutex_lock(&tg->lock); throttle_get_config(ts, cfg); @@ -376,12 +382,13 @@ void throttle_group_get_config(BlockDriverState *bs, ThrottleConfig *cfg) /* ThrottleTimers callback. This wakes up a request that was waiting * because it had been throttled. * - * @bs: the BlockDriverState whose request had been throttled + * @blk: the BlockBackend whose request had been throttled * @is_write: the type of operation (read/write) */ -static void timer_cb(BlockDriverState *bs, bool is_write) +static void timer_cb(BlockBackend *blk, bool is_write) { - ThrottleState *ts = bs->throttle_state; + BlockBackendPublic *blkp = blk_get_public(blk); + ThrottleState *ts = blkp->throttle_state; ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts); bool empty_queue; @@ -391,13 +398,13 @@ static void timer_cb(BlockDriverState *bs, bool is_write) qemu_mutex_unlock(&tg->lock); /* Run the request that was waiting for this timer */ - empty_queue = !qemu_co_enter_next(&bs->throttled_reqs[is_write]); + empty_queue = !qemu_co_enter_next(&blkp->throttled_reqs[is_write]); /* If the request queue was empty then we have to take care of * scheduling the next one */ if (empty_queue) { qemu_mutex_lock(&tg->lock); - schedule_next_request(bs->blk, is_write); + schedule_next_request(blk, is_write); qemu_mutex_unlock(&tg->lock); } } @@ -422,7 +429,7 @@ static void write_timer_cb(void *opaque) void throttle_group_register_blk(BlockBackend *blk, const char *groupname) { int i; - BlockDriverState *bs = blk_bs(blk); + BlockBackendPublic *blkp = blk_get_public(blk); ThrottleState *ts = throttle_group_incref(groupname); ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts); int clock_type = QEMU_CLOCK_REALTIME; @@ -432,7 +439,7 @@ void throttle_group_register_blk(BlockBackend *blk, const char *groupname) clock_type = QEMU_CLOCK_VIRTUAL; } - bs->throttle_state = ts; + blkp->throttle_state = ts; qemu_mutex_lock(&tg->lock); /* If the ThrottleGroup is new set this BlockBackend as the token */ @@ -442,14 +449,14 @@ void throttle_group_register_blk(BlockBackend *blk, const char *groupname) } } - QLIST_INSERT_HEAD(&tg->head, blk_get_public(blk), round_robin); + QLIST_INSERT_HEAD(&tg->head, blkp, round_robin); - throttle_timers_init(&bs->throttle_timers, - bdrv_get_aio_context(bs), + throttle_timers_init(&blkp->throttle_timers, + blk_get_aio_context(blk), clock_type, read_timer_cb, write_timer_cb, - bs); + blk); qemu_mutex_unlock(&tg->lock); } @@ -466,19 +473,19 @@ void throttle_group_register_blk(BlockBackend *blk, const char *groupname) */ void throttle_group_unregister_blk(BlockBackend *blk) { - BlockDriverState *bs = blk_bs(blk); - ThrottleGroup *tg = container_of(bs->throttle_state, ThrottleGroup, ts); + BlockBackendPublic *blkp = blk_get_public(blk); + ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts); int i; - assert(bs->pending_reqs[0] == 0 && bs->pending_reqs[1] == 0); - assert(qemu_co_queue_empty(&bs->throttled_reqs[0])); - assert(qemu_co_queue_empty(&bs->throttled_reqs[1])); + assert(blkp->pending_reqs[0] == 0 && blkp->pending_reqs[1] == 0); + assert(qemu_co_queue_empty(&blkp->throttled_reqs[0])); + assert(qemu_co_queue_empty(&blkp->throttled_reqs[1])); qemu_mutex_lock(&tg->lock); for (i = 0; i < 2; i++) { if (tg->tokens[i] == blk) { BlockBackend *token = throttle_group_next_blk(blk); - /* Take care of the case where this is the last bs in the group */ + /* Take care of the case where this is the last blk in the group */ if (token == blk) { token = NULL; } @@ -486,13 +493,13 @@ void throttle_group_unregister_blk(BlockBackend *blk) } } - /* remove the current bs from the list */ - QLIST_REMOVE(blk_get_public(blk), round_robin); - throttle_timers_destroy(&bs->throttle_timers); + /* remove the current blk from the list */ + QLIST_REMOVE(blkp, round_robin); + throttle_timers_destroy(&blkp->throttle_timers); qemu_mutex_unlock(&tg->lock); throttle_group_unref(&tg->ts); - bs->throttle_state = NULL; + blkp->throttle_state = NULL; } static void throttle_groups_init(void) |