linux/block/blk-mq-tag.c
<<
>>
Prefs
   1/*
   2 * Tag allocation using scalable bitmaps. Uses active queue tracking to support
   3 * fairer distribution of tags between multiple submitters when a shared tag map
   4 * is used.
   5 *
   6 * Copyright (C) 2013-2014 Jens Axboe
   7 */
   8#include <linux/kernel.h>
   9#include <linux/module.h>
  10
  11#include <linux/blk-mq.h>
  12#include "blk.h"
  13#include "blk-mq.h"
  14#include "blk-mq-tag.h"
  15
  16bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
  17{
  18        if (!tags)
  19                return true;
  20
  21        return sbitmap_any_bit_clear(&tags->bitmap_tags.sb);
  22}
  23
  24/*
  25 * If a previously inactive queue goes active, bump the active user count.
  26 */
  27bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
  28{
  29        if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
  30            !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
  31                atomic_inc(&hctx->tags->active_queues);
  32
  33        return true;
  34}
  35
  36/*
  37 * Wakeup all potentially sleeping on tags
  38 */
  39void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
  40{
  41        sbitmap_queue_wake_all(&tags->bitmap_tags);
  42        if (include_reserve)
  43                sbitmap_queue_wake_all(&tags->breserved_tags);
  44}
  45
  46/*
  47 * If a previously busy queue goes inactive, potential waiters could now
  48 * be allowed to queue. Wake them up and check.
  49 */
  50void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
  51{
  52        struct blk_mq_tags *tags = hctx->tags;
  53
  54        if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
  55                return;
  56
  57        atomic_dec(&tags->active_queues);
  58
  59        blk_mq_tag_wakeup_all(tags, false);
  60}
  61
  62/*
  63 * For shared tag users, we track the number of currently active users
  64 * and attempt to provide a fair share of the tag depth for each of them.
  65 */
  66static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
  67                                  struct sbitmap_queue *bt)
  68{
  69        unsigned int depth, users;
  70
  71        if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
  72                return true;
  73        if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
  74                return true;
  75
  76        /*
  77         * Don't try dividing an ant
  78         */
  79        if (bt->sb.depth == 1)
  80                return true;
  81
  82        users = atomic_read(&hctx->tags->active_queues);
  83        if (!users)
  84                return true;
  85
  86        /*
  87         * Allow at least some tags
  88         */
  89        depth = max((bt->sb.depth + users - 1) / users, 4U);
  90        return atomic_read(&hctx->nr_active) < depth;
  91}
  92
  93static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
  94                            struct sbitmap_queue *bt)
  95{
  96        if (!(data->flags & BLK_MQ_REQ_INTERNAL) &&
  97            !hctx_may_queue(data->hctx, bt))
  98                return -1;
  99        if (data->shallow_depth)
 100                return __sbitmap_queue_get_shallow(bt, data->shallow_depth);
 101        else
 102                return __sbitmap_queue_get(bt);
 103}
 104
 105unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
 106{
 107        struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
 108        struct sbitmap_queue *bt;
 109        struct sbq_wait_state *ws;
 110        DEFINE_WAIT(wait);
 111        unsigned int tag_offset;
 112        bool drop_ctx;
 113        int tag;
 114
 115        if (data->flags & BLK_MQ_REQ_RESERVED) {
 116                if (unlikely(!tags->nr_reserved_tags)) {
 117                        WARN_ON_ONCE(1);
 118                        return BLK_MQ_TAG_FAIL;
 119                }
 120                bt = &tags->breserved_tags;
 121                tag_offset = 0;
 122        } else {
 123                bt = &tags->bitmap_tags;
 124                tag_offset = tags->nr_reserved_tags;
 125        }
 126
 127        tag = __blk_mq_get_tag(data, bt);
 128        if (tag != -1)
 129                goto found_tag;
 130
 131        if (data->flags & BLK_MQ_REQ_NOWAIT)
 132                return BLK_MQ_TAG_FAIL;
 133
 134        ws = bt_wait_ptr(bt, data->hctx);
 135        drop_ctx = data->ctx == NULL;
 136        do {
 137                struct sbitmap_queue *bt_prev;
 138
 139                prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE);
 140
 141                tag = __blk_mq_get_tag(data, bt);
 142                if (tag != -1)
 143                        break;
 144
 145                /*
 146                 * We're out of tags on this hardware queue, kick any
 147                 * pending IO submits before going to sleep waiting for
 148                 * some to complete.
 149                 */
 150                blk_mq_run_hw_queue(data->hctx, false);
 151
 152                /*
 153                 * Retry tag allocation after running the hardware queue,
 154                 * as running the queue may also have found completions.
 155                 */
 156                tag = __blk_mq_get_tag(data, bt);
 157                if (tag != -1)
 158                        break;
 159
 160                if (data->ctx)
 161                        blk_mq_put_ctx(data->ctx);
 162
 163                bt_prev = bt;
 164                io_schedule();
 165
 166                data->ctx = blk_mq_get_ctx(data->q);
 167                data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu);
 168                tags = blk_mq_tags_from_data(data);
 169                if (data->flags & BLK_MQ_REQ_RESERVED)
 170                        bt = &tags->breserved_tags;
 171                else
 172                        bt = &tags->bitmap_tags;
 173
 174                finish_wait(&ws->wait, &wait);
 175
 176                /*
 177                 * If destination hw queue is changed, fake wake up on
 178                 * previous queue for compensating the wake up miss, so
 179                 * other allocations on previous queue won't be starved.
 180                 */
 181                if (bt != bt_prev)
 182                        sbitmap_queue_wake_up(bt_prev);
 183
 184                ws = bt_wait_ptr(bt, data->hctx);
 185        } while (1);
 186
 187        if (drop_ctx && data->ctx)
 188                blk_mq_put_ctx(data->ctx);
 189
 190        finish_wait(&ws->wait, &wait);
 191
 192found_tag:
 193        return tag + tag_offset;
 194}
 195
 196void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
 197                    struct blk_mq_ctx *ctx, unsigned int tag)
 198{
 199        if (!blk_mq_tag_is_reserved(tags, tag)) {
 200                const int real_tag = tag - tags->nr_reserved_tags;
 201
 202                BUG_ON(real_tag >= tags->nr_tags);
 203                sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
 204        } else {
 205                BUG_ON(tag >= tags->nr_reserved_tags);
 206                sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
 207        }
 208}
 209
 210struct bt_iter_data {
 211        struct blk_mq_hw_ctx *hctx;
 212        busy_iter_fn *fn;
 213        void *data;
 214        bool reserved;
 215};
 216
 217static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
 218{
 219        struct bt_iter_data *iter_data = data;
 220        struct blk_mq_hw_ctx *hctx = iter_data->hctx;
 221        struct blk_mq_tags *tags = hctx->tags;
 222        bool reserved = iter_data->reserved;
 223        struct request *rq;
 224
 225        if (!reserved)
 226                bitnr += tags->nr_reserved_tags;
 227        rq = tags->rqs[bitnr];
 228
 229        /*
 230         * We can hit rq == NULL here, because the tagging functions
 231         * test and set the bit before assining ->rqs[].
 232         */
 233        if (rq && rq->q == hctx->queue)
 234                iter_data->fn(hctx, rq, iter_data->data, reserved);
 235        return true;
 236}
 237
 238static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
 239                        busy_iter_fn *fn, void *data, bool reserved)
 240{
 241        struct bt_iter_data iter_data = {
 242                .hctx = hctx,
 243                .fn = fn,
 244                .data = data,
 245                .reserved = reserved,
 246        };
 247
 248        sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
 249}
 250
 251struct bt_tags_iter_data {
 252        struct blk_mq_tags *tags;
 253        busy_tag_iter_fn *fn;
 254        void *data;
 255        bool reserved;
 256};
 257
 258static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
 259{
 260        struct bt_tags_iter_data *iter_data = data;
 261        struct blk_mq_tags *tags = iter_data->tags;
 262        bool reserved = iter_data->reserved;
 263        struct request *rq;
 264
 265        if (!reserved)
 266                bitnr += tags->nr_reserved_tags;
 267
 268        /*
 269         * We can hit rq == NULL here, because the tagging functions
 270         * test and set the bit before assining ->rqs[].
 271         */
 272        rq = tags->rqs[bitnr];
 273        if (rq)
 274                iter_data->fn(rq, iter_data->data, reserved);
 275
 276        return true;
 277}
 278
 279static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
 280                             busy_tag_iter_fn *fn, void *data, bool reserved)
 281{
 282        struct bt_tags_iter_data iter_data = {
 283                .tags = tags,
 284                .fn = fn,
 285                .data = data,
 286                .reserved = reserved,
 287        };
 288
 289        if (tags->rqs)
 290                sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
 291}
 292
 293static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
 294                busy_tag_iter_fn *fn, void *priv)
 295{
 296        if (tags->nr_reserved_tags)
 297                bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, true);
 298        bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, false);
 299}
 300
 301void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
 302                busy_tag_iter_fn *fn, void *priv)
 303{
 304        int i;
 305
 306        for (i = 0; i < tagset->nr_hw_queues; i++) {
 307                if (tagset->tags && tagset->tags[i])
 308                        blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv);
 309        }
 310}
 311EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
 312
 313int blk_mq_reinit_tagset(struct blk_mq_tag_set *set)
 314{
 315        int i, j, ret = 0;
 316
 317        if (!set->ops->aux_ops || !set->ops->aux_ops->reinit_request)
 318                goto out;
 319
 320        for (i = 0; i < set->nr_hw_queues; i++) {
 321                struct blk_mq_tags *tags = set->tags[i];
 322
 323                if (!tags)
 324                        continue;
 325
 326                for (j = 0; j < tags->nr_tags; j++) {
 327                        if (!tags->static_rqs[j])
 328                                continue;
 329
 330                        ret = set->ops->aux_ops->reinit_request(set->driver_data,
 331                                                tags->static_rqs[j]);
 332                        if (ret)
 333                                goto out;
 334                }
 335        }
 336
 337out:
 338        return ret;
 339}
 340EXPORT_SYMBOL_GPL(blk_mq_reinit_tagset);
 341
 342void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
 343                void *priv)
 344{
 345        struct blk_mq_hw_ctx *hctx;
 346        int i;
 347
 348
 349        queue_for_each_hw_ctx(q, hctx, i) {
 350                struct blk_mq_tags *tags = hctx->tags;
 351
 352                /*
 353                 * If not software queues are currently mapped to this
 354                 * hardware queue, there's nothing to check
 355                 */
 356                if (!blk_mq_hw_queue_mapped(hctx))
 357                        continue;
 358
 359                if (tags->nr_reserved_tags)
 360                        bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
 361                bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
 362        }
 363
 364}
 365
 366static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
 367                    bool round_robin, int node)
 368{
 369        return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
 370                                       node);
 371}
 372
 373static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
 374                                                   int node, int alloc_policy)
 375{
 376        unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
 377        bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
 378
 379        if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node))
 380                goto free_tags;
 381        if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin,
 382                     node))
 383                goto free_bitmap_tags;
 384
 385        return tags;
 386free_bitmap_tags:
 387        sbitmap_queue_free(&tags->bitmap_tags);
 388free_tags:
 389        kfree(tags);
 390        return NULL;
 391}
 392
 393struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
 394                                     unsigned int reserved_tags,
 395                                     int node, int alloc_policy)
 396{
 397        struct blk_mq_tags *tags;
 398
 399        if (total_tags > BLK_MQ_TAG_MAX) {
 400                pr_err("blk-mq: tag depth too large\n");
 401                return NULL;
 402        }
 403
 404        tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
 405        if (!tags)
 406                return NULL;
 407
 408        tags->nr_tags = total_tags;
 409        tags->nr_reserved_tags = reserved_tags;
 410
 411        return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
 412}
 413
 414void blk_mq_free_tags(struct blk_mq_tags *tags)
 415{
 416        sbitmap_queue_free(&tags->bitmap_tags);
 417        sbitmap_queue_free(&tags->breserved_tags);
 418        kfree(tags);
 419}
 420
 421int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
 422                            struct blk_mq_tags **tagsptr, unsigned int tdepth,
 423                            bool can_grow)
 424{
 425        struct blk_mq_tags *tags = *tagsptr;
 426
 427        if (tdepth <= tags->nr_reserved_tags)
 428                return -EINVAL;
 429
 430        tdepth -= tags->nr_reserved_tags;
 431
 432        /*
 433         * If we are allowed to grow beyond the original size, allocate
 434         * a new set of tags before freeing the old one.
 435         */
 436        if (tdepth > tags->nr_tags) {
 437                struct blk_mq_tag_set *set = hctx->queue->tag_set;
 438                struct blk_mq_tags *new;
 439                bool ret;
 440
 441                if (!can_grow)
 442                        return -EINVAL;
 443
 444                /*
 445                 * We need some sort of upper limit, set it high enough that
 446                 * no valid use cases should require more.
 447                 */
 448                if (tdepth > 16 * BLKDEV_MAX_RQ)
 449                        return -EINVAL;
 450
 451                new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, 0);
 452                if (!new)
 453                        return -ENOMEM;
 454                ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
 455                if (ret) {
 456                        blk_mq_free_rq_map(new);
 457                        return -ENOMEM;
 458                }
 459
 460                blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
 461                blk_mq_free_rq_map(*tagsptr);
 462                *tagsptr = new;
 463        } else {
 464                /*
 465                 * Don't need (or can't) update reserved tags here, they
 466                 * remain static and should never need resizing.
 467                 */
 468                sbitmap_queue_resize(&tags->bitmap_tags, tdepth);
 469        }
 470
 471        return 0;
 472}
 473
 474/**
 475 * blk_mq_unique_tag() - return a tag that is unique queue-wide
 476 * @rq: request for which to compute a unique tag
 477 *
 478 * The tag field in struct request is unique per hardware queue but not over
 479 * all hardware queues. Hence this function that returns a tag with the
 480 * hardware context index in the upper bits and the per hardware queue tag in
 481 * the lower bits.
 482 *
 483 * Note: When called for a request that is queued on a non-multiqueue request
 484 * queue, the hardware context index is set to zero.
 485 */
 486u32 blk_mq_unique_tag(struct request *rq)
 487{
 488        struct request_queue *q = rq->q;
 489        struct blk_mq_hw_ctx *hctx;
 490        int hwq = 0;
 491
 492        if (q->mq_ops) {
 493                hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
 494                hwq = hctx->queue_num;
 495        }
 496
 497        return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
 498                (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
 499}
 500EXPORT_SYMBOL(blk_mq_unique_tag);
 501