qemu/block/throttle-groups.c
<<
>>
Prefs
   1/*
   2 * QEMU block throttling group infrastructure
   3 *
   4 * Copyright (C) Nodalink, EURL. 2014
   5 * Copyright (C) Igalia, S.L. 2015
   6 *
   7 * Authors:
   8 *   BenoƮt Canet <benoit.canet@nodalink.com>
   9 *   Alberto Garcia <berto@igalia.com>
  10 *
  11 * This program is free software; you can redistribute it and/or
  12 * modify it under the terms of the GNU General Public License as
  13 * published by the Free Software Foundation; either version 2 or
  14 * (at your option) version 3 of the License.
  15 *
  16 * This program is distributed in the hope that it will be useful,
  17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  19 * GNU General Public License for more details.
  20 *
  21 * You should have received a copy of the GNU General Public License
  22 * along with this program; if not, see <http://www.gnu.org/licenses/>.
  23 */
  24
  25#include "qemu/osdep.h"
  26#include "sysemu/block-backend.h"
  27#include "block/throttle-groups.h"
  28#include "qemu/queue.h"
  29#include "qemu/thread.h"
  30#include "sysemu/qtest.h"
  31
  32/* The ThrottleGroup structure (with its ThrottleState) is shared
  33 * among different BlockBackends and it's independent from
  34 * AioContext, so in order to use it from different threads it needs
  35 * its own locking.
  36 *
  37 * This locking is however handled internally in this file, so it's
  38 * transparent to outside users.
  39 *
  40 * The whole ThrottleGroup structure is private and invisible to
  41 * outside users, that only use it through its ThrottleState.
  42 *
  43 * In addition to the ThrottleGroup structure, BlockBackendPublic has
  44 * fields that need to be accessed by other members of the group and
  45 * therefore also need to be protected by this lock. Once a
  46 * BlockBackend is registered in a group those fields can be accessed
  47 * by other threads any time.
  48 *
  49 * Again, all this is handled internally and is mostly transparent to
  50 * the outside. The 'throttle_timers' field however has an additional
  51 * constraint because it may be temporarily invalid (see for example
  52 * bdrv_set_aio_context()). Therefore in this file a thread will
  53 * access some other BlockBackend's timers only after verifying that
  54 * that BlockBackend has throttled requests in the queue.
  55 */
  56typedef struct ThrottleGroup {
  57    char *name; /* This is constant during the lifetime of the group */
  58
  59    QemuMutex lock; /* This lock protects the following four fields */
  60    ThrottleState ts;
  61    QLIST_HEAD(, BlockBackendPublic) head;
  62    BlockBackend *tokens[2];
  63    bool any_timer_armed[2];
  64
  65    /* These two are protected by the global throttle_groups_lock */
  66    unsigned refcount;
  67    QTAILQ_ENTRY(ThrottleGroup) list;
  68} ThrottleGroup;
  69
  70static QemuMutex throttle_groups_lock;
  71static QTAILQ_HEAD(, ThrottleGroup) throttle_groups =
  72    QTAILQ_HEAD_INITIALIZER(throttle_groups);
  73
  74/* Increments the reference count of a ThrottleGroup given its name.
  75 *
  76 * If no ThrottleGroup is found with the given name a new one is
  77 * created.
  78 *
  79 * @name: the name of the ThrottleGroup
  80 * @ret:  the ThrottleState member of the ThrottleGroup
  81 */
  82ThrottleState *throttle_group_incref(const char *name)
  83{
  84    ThrottleGroup *tg = NULL;
  85    ThrottleGroup *iter;
  86
  87    qemu_mutex_lock(&throttle_groups_lock);
  88
  89    /* Look for an existing group with that name */
  90    QTAILQ_FOREACH(iter, &throttle_groups, list) {
  91        if (!strcmp(name, iter->name)) {
  92            tg = iter;
  93            break;
  94        }
  95    }
  96
  97    /* Create a new one if not found */
  98    if (!tg) {
  99        tg = g_new0(ThrottleGroup, 1);
 100        tg->name = g_strdup(name);
 101        qemu_mutex_init(&tg->lock);
 102        throttle_init(&tg->ts);
 103        QLIST_INIT(&tg->head);
 104
 105        QTAILQ_INSERT_TAIL(&throttle_groups, tg, list);
 106    }
 107
 108    tg->refcount++;
 109
 110    qemu_mutex_unlock(&throttle_groups_lock);
 111
 112    return &tg->ts;
 113}
 114
 115/* Decrease the reference count of a ThrottleGroup.
 116 *
 117 * When the reference count reaches zero the ThrottleGroup is
 118 * destroyed.
 119 *
 120 * @ts:  The ThrottleGroup to unref, given by its ThrottleState member
 121 */
 122void throttle_group_unref(ThrottleState *ts)
 123{
 124    ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
 125
 126    qemu_mutex_lock(&throttle_groups_lock);
 127    if (--tg->refcount == 0) {
 128        QTAILQ_REMOVE(&throttle_groups, tg, list);
 129        qemu_mutex_destroy(&tg->lock);
 130        g_free(tg->name);
 131        g_free(tg);
 132    }
 133    qemu_mutex_unlock(&throttle_groups_lock);
 134}
 135
 136/* Get the name from a BlockBackend's ThrottleGroup. The name (and the pointer)
 137 * is guaranteed to remain constant during the lifetime of the group.
 138 *
 139 * @blk:  a BlockBackend that is member of a throttling group
 140 * @ret:  the name of the group.
 141 */
 142const char *throttle_group_get_name(BlockBackend *blk)
 143{
 144    BlockBackendPublic *blkp = blk_get_public(blk);
 145    ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
 146    return tg->name;
 147}
 148
 149/* Return the next BlockBackend in the round-robin sequence, simulating a
 150 * circular list.
 151 *
 152 * This assumes that tg->lock is held.
 153 *
 154 * @blk: the current BlockBackend
 155 * @ret: the next BlockBackend in the sequence
 156 */
 157static BlockBackend *throttle_group_next_blk(BlockBackend *blk)
 158{
 159    BlockBackendPublic *blkp = blk_get_public(blk);
 160    ThrottleState *ts = blkp->throttle_state;
 161    ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
 162    BlockBackendPublic *next = QLIST_NEXT(blkp, round_robin);
 163
 164    if (!next) {
 165        next = QLIST_FIRST(&tg->head);
 166    }
 167
 168    return blk_by_public(next);
 169}
 170
 171/*
 172 * Return whether a BlockBackend has pending requests.
 173 *
 174 * This assumes that tg->lock is held.
 175 *
 176 * @blk: the BlockBackend
 177 * @is_write:  the type of operation (read/write)
 178 * @ret:       whether the BlockBackend has pending requests.
 179 */
 180static inline bool blk_has_pending_reqs(BlockBackend *blk,
 181                                        bool is_write)
 182{
 183    const BlockBackendPublic *blkp = blk_get_public(blk);
 184    return blkp->pending_reqs[is_write];
 185}
 186
 187/* Return the next BlockBackend in the round-robin sequence with pending I/O
 188 * requests.
 189 *
 190 * This assumes that tg->lock is held.
 191 *
 192 * @blk:       the current BlockBackend
 193 * @is_write:  the type of operation (read/write)
 194 * @ret:       the next BlockBackend with pending requests, or blk if there is
 195 *             none.
 196 */
 197static BlockBackend *next_throttle_token(BlockBackend *blk, bool is_write)
 198{
 199    BlockBackendPublic *blkp = blk_get_public(blk);
 200    ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
 201    BlockBackend *token, *start;
 202
 203    start = token = tg->tokens[is_write];
 204
 205    /* get next bs round in round robin style */
 206    token = throttle_group_next_blk(token);
 207    while (token != start && !blk_has_pending_reqs(token, is_write)) {
 208        token = throttle_group_next_blk(token);
 209    }
 210
 211    /* If no IO are queued for scheduling on the next round robin token
 212     * then decide the token is the current bs because chances are
 213     * the current bs get the current request queued.
 214     */
 215    if (token == start && !blk_has_pending_reqs(token, is_write)) {
 216        token = blk;
 217    }
 218
 219    /* Either we return the original BB, or one with pending requests */
 220    assert(token == blk || blk_has_pending_reqs(token, is_write));
 221
 222    return token;
 223}
 224
 225/* Check if the next I/O request for a BlockBackend needs to be throttled or
 226 * not. If there's no timer set in this group, set one and update the token
 227 * accordingly.
 228 *
 229 * This assumes that tg->lock is held.
 230 *
 231 * @blk:        the current BlockBackend
 232 * @is_write:   the type of operation (read/write)
 233 * @ret:        whether the I/O request needs to be throttled or not
 234 */
 235static bool throttle_group_schedule_timer(BlockBackend *blk, bool is_write)
 236{
 237    BlockBackendPublic *blkp = blk_get_public(blk);
 238    ThrottleState *ts = blkp->throttle_state;
 239    ThrottleTimers *tt = &blkp->throttle_timers;
 240    ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
 241    bool must_wait;
 242
 243    if (blkp->io_limits_disabled) {
 244        return false;
 245    }
 246
 247    /* Check if any of the timers in this group is already armed */
 248    if (tg->any_timer_armed[is_write]) {
 249        return true;
 250    }
 251
 252    must_wait = throttle_schedule_timer(ts, tt, is_write);
 253
 254    /* If a timer just got armed, set blk as the current token */
 255    if (must_wait) {
 256        tg->tokens[is_write] = blk;
 257        tg->any_timer_armed[is_write] = true;
 258    }
 259
 260    return must_wait;
 261}
 262
 263/* Look for the next pending I/O request and schedule it.
 264 *
 265 * This assumes that tg->lock is held.
 266 *
 267 * @blk:       the current BlockBackend
 268 * @is_write:  the type of operation (read/write)
 269 */
 270static void schedule_next_request(BlockBackend *blk, bool is_write)
 271{
 272    BlockBackendPublic *blkp = blk_get_public(blk);
 273    ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
 274    bool must_wait;
 275    BlockBackend *token;
 276
 277    /* Check if there's any pending request to schedule next */
 278    token = next_throttle_token(blk, is_write);
 279    if (!blk_has_pending_reqs(token, is_write)) {
 280        return;
 281    }
 282
 283    /* Set a timer for the request if it needs to be throttled */
 284    must_wait = throttle_group_schedule_timer(token, is_write);
 285
 286    /* If it doesn't have to wait, queue it for immediate execution */
 287    if (!must_wait) {
 288        /* Give preference to requests from the current blk */
 289        if (qemu_in_coroutine() &&
 290            qemu_co_queue_next(&blkp->throttled_reqs[is_write])) {
 291            token = blk;
 292        } else {
 293            ThrottleTimers *tt = &blk_get_public(token)->throttle_timers;
 294            int64_t now = qemu_clock_get_ns(tt->clock_type);
 295            timer_mod(tt->timers[is_write], now + 1);
 296            tg->any_timer_armed[is_write] = true;
 297        }
 298        tg->tokens[is_write] = token;
 299    }
 300}
 301
 302/* Check if an I/O request needs to be throttled, wait and set a timer
 303 * if necessary, and schedule the next request using a round robin
 304 * algorithm.
 305 *
 306 * @blk:       the current BlockBackend
 307 * @bytes:     the number of bytes for this I/O
 308 * @is_write:  the type of operation (read/write)
 309 */
 310void coroutine_fn throttle_group_co_io_limits_intercept(BlockBackend *blk,
 311                                                        unsigned int bytes,
 312                                                        bool is_write)
 313{
 314    bool must_wait;
 315    BlockBackend *token;
 316
 317    BlockBackendPublic *blkp = blk_get_public(blk);
 318    ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
 319    qemu_mutex_lock(&tg->lock);
 320
 321    /* First we check if this I/O has to be throttled. */
 322    token = next_throttle_token(blk, is_write);
 323    must_wait = throttle_group_schedule_timer(token, is_write);
 324
 325    /* Wait if there's a timer set or queued requests of this type */
 326    if (must_wait || blkp->pending_reqs[is_write]) {
 327        blkp->pending_reqs[is_write]++;
 328        qemu_mutex_unlock(&tg->lock);
 329        qemu_co_queue_wait(&blkp->throttled_reqs[is_write]);
 330        qemu_mutex_lock(&tg->lock);
 331        blkp->pending_reqs[is_write]--;
 332    }
 333
 334    /* The I/O will be executed, so do the accounting */
 335    throttle_account(blkp->throttle_state, is_write, bytes);
 336
 337    /* Schedule the next request */
 338    schedule_next_request(blk, is_write);
 339
 340    qemu_mutex_unlock(&tg->lock);
 341}
 342
 343void throttle_group_restart_blk(BlockBackend *blk)
 344{
 345    BlockBackendPublic *blkp = blk_get_public(blk);
 346    int i;
 347
 348    for (i = 0; i < 2; i++) {
 349        while (qemu_co_enter_next(&blkp->throttled_reqs[i])) {
 350            ;
 351        }
 352    }
 353}
 354
 355/* Update the throttle configuration for a particular group. Similar
 356 * to throttle_config(), but guarantees atomicity within the
 357 * throttling group.
 358 *
 359 * @blk: a BlockBackend that is a member of the group
 360 * @cfg: the configuration to set
 361 */
 362void throttle_group_config(BlockBackend *blk, ThrottleConfig *cfg)
 363{
 364    BlockBackendPublic *blkp = blk_get_public(blk);
 365    ThrottleTimers *tt = &blkp->throttle_timers;
 366    ThrottleState *ts = blkp->throttle_state;
 367    ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
 368    qemu_mutex_lock(&tg->lock);
 369    /* throttle_config() cancels the timers */
 370    if (timer_pending(tt->timers[0])) {
 371        tg->any_timer_armed[0] = false;
 372    }
 373    if (timer_pending(tt->timers[1])) {
 374        tg->any_timer_armed[1] = false;
 375    }
 376    throttle_config(ts, tt, cfg);
 377    qemu_mutex_unlock(&tg->lock);
 378
 379    qemu_co_enter_next(&blkp->throttled_reqs[0]);
 380    qemu_co_enter_next(&blkp->throttled_reqs[1]);
 381}
 382
 383/* Get the throttle configuration from a particular group. Similar to
 384 * throttle_get_config(), but guarantees atomicity within the
 385 * throttling group.
 386 *
 387 * @blk: a BlockBackend that is a member of the group
 388 * @cfg: the configuration will be written here
 389 */
 390void throttle_group_get_config(BlockBackend *blk, ThrottleConfig *cfg)
 391{
 392    BlockBackendPublic *blkp = blk_get_public(blk);
 393    ThrottleState *ts = blkp->throttle_state;
 394    ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
 395    qemu_mutex_lock(&tg->lock);
 396    throttle_get_config(ts, cfg);
 397    qemu_mutex_unlock(&tg->lock);
 398}
 399
 400/* ThrottleTimers callback. This wakes up a request that was waiting
 401 * because it had been throttled.
 402 *
 403 * @blk:       the BlockBackend whose request had been throttled
 404 * @is_write:  the type of operation (read/write)
 405 */
 406static void timer_cb(BlockBackend *blk, bool is_write)
 407{
 408    BlockBackendPublic *blkp = blk_get_public(blk);
 409    ThrottleState *ts = blkp->throttle_state;
 410    ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
 411    bool empty_queue;
 412
 413    /* The timer has just been fired, so we can update the flag */
 414    qemu_mutex_lock(&tg->lock);
 415    tg->any_timer_armed[is_write] = false;
 416    qemu_mutex_unlock(&tg->lock);
 417
 418    /* Run the request that was waiting for this timer */
 419    empty_queue = !qemu_co_enter_next(&blkp->throttled_reqs[is_write]);
 420
 421    /* If the request queue was empty then we have to take care of
 422     * scheduling the next one */
 423    if (empty_queue) {
 424        qemu_mutex_lock(&tg->lock);
 425        schedule_next_request(blk, is_write);
 426        qemu_mutex_unlock(&tg->lock);
 427    }
 428}
 429
 430static void read_timer_cb(void *opaque)
 431{
 432    timer_cb(opaque, false);
 433}
 434
 435static void write_timer_cb(void *opaque)
 436{
 437    timer_cb(opaque, true);
 438}
 439
 440/* Register a BlockBackend in the throttling group, also initializing its
 441 * timers and updating its throttle_state pointer to point to it. If a
 442 * throttling group with that name does not exist yet, it will be created.
 443 *
 444 * @blk:       the BlockBackend to insert
 445 * @groupname: the name of the group
 446 */
 447void throttle_group_register_blk(BlockBackend *blk, const char *groupname)
 448{
 449    int i;
 450    BlockBackendPublic *blkp = blk_get_public(blk);
 451    ThrottleState *ts = throttle_group_incref(groupname);
 452    ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
 453    int clock_type = QEMU_CLOCK_REALTIME;
 454
 455    if (qtest_enabled()) {
 456        /* For testing block IO throttling only */
 457        clock_type = QEMU_CLOCK_VIRTUAL;
 458    }
 459
 460    blkp->throttle_state = ts;
 461
 462    qemu_mutex_lock(&tg->lock);
 463    /* If the ThrottleGroup is new set this BlockBackend as the token */
 464    for (i = 0; i < 2; i++) {
 465        if (!tg->tokens[i]) {
 466            tg->tokens[i] = blk;
 467        }
 468    }
 469
 470    QLIST_INSERT_HEAD(&tg->head, blkp, round_robin);
 471
 472    throttle_timers_init(&blkp->throttle_timers,
 473                         blk_get_aio_context(blk),
 474                         clock_type,
 475                         read_timer_cb,
 476                         write_timer_cb,
 477                         blk);
 478
 479    qemu_mutex_unlock(&tg->lock);
 480}
 481
 482/* Unregister a BlockBackend from its group, removing it from the list,
 483 * destroying the timers and setting the throttle_state pointer to NULL.
 484 *
 485 * The BlockBackend must not have pending throttled requests, so the caller has
 486 * to drain them first.
 487 *
 488 * The group will be destroyed if it's empty after this operation.
 489 *
 490 * @blk: the BlockBackend to remove
 491 */
 492void throttle_group_unregister_blk(BlockBackend *blk)
 493{
 494    BlockBackendPublic *blkp = blk_get_public(blk);
 495    ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
 496    int i;
 497
 498    assert(blkp->pending_reqs[0] == 0 && blkp->pending_reqs[1] == 0);
 499    assert(qemu_co_queue_empty(&blkp->throttled_reqs[0]));
 500    assert(qemu_co_queue_empty(&blkp->throttled_reqs[1]));
 501
 502    qemu_mutex_lock(&tg->lock);
 503    for (i = 0; i < 2; i++) {
 504        if (tg->tokens[i] == blk) {
 505            BlockBackend *token = throttle_group_next_blk(blk);
 506            /* Take care of the case where this is the last blk in the group */
 507            if (token == blk) {
 508                token = NULL;
 509            }
 510            tg->tokens[i] = token;
 511        }
 512    }
 513
 514    /* remove the current blk from the list */
 515    QLIST_REMOVE(blkp, round_robin);
 516    throttle_timers_destroy(&blkp->throttle_timers);
 517    qemu_mutex_unlock(&tg->lock);
 518
 519    throttle_group_unref(&tg->ts);
 520    blkp->throttle_state = NULL;
 521}
 522
 523static void throttle_groups_init(void)
 524{
 525    qemu_mutex_init(&throttle_groups_lock);
 526}
 527
 528block_init(throttle_groups_init);
 529