linux/block/blk-mq-debugfs.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2017 Facebook
   4 */
   5
   6#include <linux/kernel.h>
   7#include <linux/blkdev.h>
   8#include <linux/debugfs.h>
   9
  10#include <linux/blk-mq.h>
  11#include "blk.h"
  12#include "blk-mq.h"
  13#include "blk-mq-debugfs.h"
  14#include "blk-mq-tag.h"
  15#include "blk-rq-qos.h"
  16
  17static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
  18{
  19        if (stat->nr_samples) {
  20                seq_printf(m, "samples=%d, mean=%lld, min=%llu, max=%llu",
  21                           stat->nr_samples, stat->mean, stat->min, stat->max);
  22        } else {
  23                seq_puts(m, "samples=0");
  24        }
  25}
  26
  27static int queue_poll_stat_show(void *data, struct seq_file *m)
  28{
  29        struct request_queue *q = data;
  30        int bucket;
  31
  32        for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS/2; bucket++) {
  33                seq_printf(m, "read  (%d Bytes): ", 1 << (9+bucket));
  34                print_stat(m, &q->poll_stat[2*bucket]);
  35                seq_puts(m, "\n");
  36
  37                seq_printf(m, "write (%d Bytes): ",  1 << (9+bucket));
  38                print_stat(m, &q->poll_stat[2*bucket+1]);
  39                seq_puts(m, "\n");
  40        }
  41        return 0;
  42}
  43
  44static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
  45        __acquires(&q->requeue_lock)
  46{
  47        struct request_queue *q = m->private;
  48
  49        spin_lock_irq(&q->requeue_lock);
  50        return seq_list_start(&q->requeue_list, *pos);
  51}
  52
  53static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
  54{
  55        struct request_queue *q = m->private;
  56
  57        return seq_list_next(v, &q->requeue_list, pos);
  58}
  59
  60static void queue_requeue_list_stop(struct seq_file *m, void *v)
  61        __releases(&q->requeue_lock)
  62{
  63        struct request_queue *q = m->private;
  64
  65        spin_unlock_irq(&q->requeue_lock);
  66}
  67
  68static const struct seq_operations queue_requeue_list_seq_ops = {
  69        .start  = queue_requeue_list_start,
  70        .next   = queue_requeue_list_next,
  71        .stop   = queue_requeue_list_stop,
  72        .show   = blk_mq_debugfs_rq_show,
  73};
  74
  75static int blk_flags_show(struct seq_file *m, const unsigned long flags,
  76                          const char *const *flag_name, int flag_name_count)
  77{
  78        bool sep = false;
  79        int i;
  80
  81        for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
  82                if (!(flags & BIT(i)))
  83                        continue;
  84                if (sep)
  85                        seq_puts(m, "|");
  86                sep = true;
  87                if (i < flag_name_count && flag_name[i])
  88                        seq_puts(m, flag_name[i]);
  89                else
  90                        seq_printf(m, "%d", i);
  91        }
  92        return 0;
  93}
  94
  95static int queue_pm_only_show(void *data, struct seq_file *m)
  96{
  97        struct request_queue *q = data;
  98
  99        seq_printf(m, "%d\n", atomic_read(&q->pm_only));
 100        return 0;
 101}
 102
 103#define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
 104static const char *const blk_queue_flag_name[] = {
 105        QUEUE_FLAG_NAME(STOPPED),
 106        QUEUE_FLAG_NAME(DYING),
 107        QUEUE_FLAG_NAME(NOMERGES),
 108        QUEUE_FLAG_NAME(SAME_COMP),
 109        QUEUE_FLAG_NAME(FAIL_IO),
 110        QUEUE_FLAG_NAME(NONROT),
 111        QUEUE_FLAG_NAME(IO_STAT),
 112        QUEUE_FLAG_NAME(DISCARD),
 113        QUEUE_FLAG_NAME(NOXMERGES),
 114        QUEUE_FLAG_NAME(ADD_RANDOM),
 115        QUEUE_FLAG_NAME(SECERASE),
 116        QUEUE_FLAG_NAME(SAME_FORCE),
 117        QUEUE_FLAG_NAME(DEAD),
 118        QUEUE_FLAG_NAME(INIT_DONE),
 119        QUEUE_FLAG_NAME(POLL),
 120        QUEUE_FLAG_NAME(WC),
 121        QUEUE_FLAG_NAME(FUA),
 122        QUEUE_FLAG_NAME(DAX),
 123        QUEUE_FLAG_NAME(STATS),
 124        QUEUE_FLAG_NAME(POLL_STATS),
 125        QUEUE_FLAG_NAME(REGISTERED),
 126        QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
 127        QUEUE_FLAG_NAME(QUIESCED),
 128};
 129#undef QUEUE_FLAG_NAME
 130
 131static int queue_state_show(void *data, struct seq_file *m)
 132{
 133        struct request_queue *q = data;
 134
 135        blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
 136                       ARRAY_SIZE(blk_queue_flag_name));
 137        seq_puts(m, "\n");
 138        return 0;
 139}
 140
 141static ssize_t queue_state_write(void *data, const char __user *buf,
 142                                 size_t count, loff_t *ppos)
 143{
 144        struct request_queue *q = data;
 145        char opbuf[16] = { }, *op;
 146
 147        /*
 148         * The "state" attribute is removed after blk_cleanup_queue() has called
 149         * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid
 150         * triggering a use-after-free.
 151         */
 152        if (blk_queue_dead(q))
 153                return -ENOENT;
 154
 155        if (count >= sizeof(opbuf)) {
 156                pr_err("%s: operation too long\n", __func__);
 157                goto inval;
 158        }
 159
 160        if (copy_from_user(opbuf, buf, count))
 161                return -EFAULT;
 162        op = strstrip(opbuf);
 163        if (strcmp(op, "run") == 0) {
 164                blk_mq_run_hw_queues(q, true);
 165        } else if (strcmp(op, "start") == 0) {
 166                blk_mq_start_stopped_hw_queues(q, true);
 167        } else if (strcmp(op, "kick") == 0) {
 168                blk_mq_kick_requeue_list(q);
 169        } else {
 170                pr_err("%s: unsupported operation '%s'\n", __func__, op);
 171inval:
 172                pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
 173                return -EINVAL;
 174        }
 175        return count;
 176}
 177
 178static int queue_write_hint_show(void *data, struct seq_file *m)
 179{
 180        struct request_queue *q = data;
 181        int i;
 182
 183        for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
 184                seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]);
 185
 186        return 0;
 187}
 188
 189static ssize_t queue_write_hint_store(void *data, const char __user *buf,
 190                                      size_t count, loff_t *ppos)
 191{
 192        struct request_queue *q = data;
 193        int i;
 194
 195        for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
 196                q->write_hints[i] = 0;
 197
 198        return count;
 199}
 200
 201static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
 202        { "poll_stat", 0400, queue_poll_stat_show },
 203        { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
 204        { "pm_only", 0600, queue_pm_only_show, NULL },
 205        { "state", 0600, queue_state_show, queue_state_write },
 206        { "write_hints", 0600, queue_write_hint_show, queue_write_hint_store },
 207        { "zone_wlock", 0400, queue_zone_wlock_show, NULL },
 208        { },
 209};
 210
 211#define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
 212static const char *const hctx_state_name[] = {
 213        HCTX_STATE_NAME(STOPPED),
 214        HCTX_STATE_NAME(TAG_ACTIVE),
 215        HCTX_STATE_NAME(SCHED_RESTART),
 216};
 217#undef HCTX_STATE_NAME
 218
 219static int hctx_state_show(void *data, struct seq_file *m)
 220{
 221        struct blk_mq_hw_ctx *hctx = data;
 222
 223        blk_flags_show(m, hctx->state, hctx_state_name,
 224                       ARRAY_SIZE(hctx_state_name));
 225        seq_puts(m, "\n");
 226        return 0;
 227}
 228
 229#define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
 230static const char *const alloc_policy_name[] = {
 231        BLK_TAG_ALLOC_NAME(FIFO),
 232        BLK_TAG_ALLOC_NAME(RR),
 233};
 234#undef BLK_TAG_ALLOC_NAME
 235
 236#define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
 237static const char *const hctx_flag_name[] = {
 238        HCTX_FLAG_NAME(SHOULD_MERGE),
 239        HCTX_FLAG_NAME(TAG_SHARED),
 240        HCTX_FLAG_NAME(BLOCKING),
 241        HCTX_FLAG_NAME(NO_SCHED),
 242};
 243#undef HCTX_FLAG_NAME
 244
 245static int hctx_flags_show(void *data, struct seq_file *m)
 246{
 247        struct blk_mq_hw_ctx *hctx = data;
 248        const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
 249
 250        seq_puts(m, "alloc_policy=");
 251        if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
 252            alloc_policy_name[alloc_policy])
 253                seq_puts(m, alloc_policy_name[alloc_policy]);
 254        else
 255                seq_printf(m, "%d", alloc_policy);
 256        seq_puts(m, " ");
 257        blk_flags_show(m,
 258                       hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
 259                       hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
 260        seq_puts(m, "\n");
 261        return 0;
 262}
 263
 264#define REQ_OP_NAME(name) [REQ_OP_##name] = #name
 265static const char *const op_name[] = {
 266        REQ_OP_NAME(READ),
 267        REQ_OP_NAME(WRITE),
 268        REQ_OP_NAME(FLUSH),
 269        REQ_OP_NAME(DISCARD),
 270        REQ_OP_NAME(SECURE_ERASE),
 271        REQ_OP_NAME(ZONE_RESET),
 272        REQ_OP_NAME(WRITE_SAME),
 273        REQ_OP_NAME(WRITE_ZEROES),
 274        REQ_OP_NAME(SCSI_IN),
 275        REQ_OP_NAME(SCSI_OUT),
 276        REQ_OP_NAME(DRV_IN),
 277        REQ_OP_NAME(DRV_OUT),
 278};
 279#undef REQ_OP_NAME
 280
 281#define CMD_FLAG_NAME(name) [__REQ_##name] = #name
 282static const char *const cmd_flag_name[] = {
 283        CMD_FLAG_NAME(FAILFAST_DEV),
 284        CMD_FLAG_NAME(FAILFAST_TRANSPORT),
 285        CMD_FLAG_NAME(FAILFAST_DRIVER),
 286        CMD_FLAG_NAME(SYNC),
 287        CMD_FLAG_NAME(META),
 288        CMD_FLAG_NAME(PRIO),
 289        CMD_FLAG_NAME(NOMERGE),
 290        CMD_FLAG_NAME(IDLE),
 291        CMD_FLAG_NAME(INTEGRITY),
 292        CMD_FLAG_NAME(FUA),
 293        CMD_FLAG_NAME(PREFLUSH),
 294        CMD_FLAG_NAME(RAHEAD),
 295        CMD_FLAG_NAME(BACKGROUND),
 296        CMD_FLAG_NAME(NOWAIT),
 297        CMD_FLAG_NAME(NOUNMAP),
 298        CMD_FLAG_NAME(HIPRI),
 299};
 300#undef CMD_FLAG_NAME
 301
 302#define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
 303static const char *const rqf_name[] = {
 304        RQF_NAME(SORTED),
 305        RQF_NAME(STARTED),
 306        RQF_NAME(SOFTBARRIER),
 307        RQF_NAME(FLUSH_SEQ),
 308        RQF_NAME(MIXED_MERGE),
 309        RQF_NAME(MQ_INFLIGHT),
 310        RQF_NAME(DONTPREP),
 311        RQF_NAME(PREEMPT),
 312        RQF_NAME(COPY_USER),
 313        RQF_NAME(FAILED),
 314        RQF_NAME(QUIET),
 315        RQF_NAME(ELVPRIV),
 316        RQF_NAME(IO_STAT),
 317        RQF_NAME(ALLOCED),
 318        RQF_NAME(PM),
 319        RQF_NAME(HASHED),
 320        RQF_NAME(STATS),
 321        RQF_NAME(SPECIAL_PAYLOAD),
 322        RQF_NAME(ZONE_WRITE_LOCKED),
 323        RQF_NAME(MQ_POLL_SLEPT),
 324};
 325#undef RQF_NAME
 326
 327static const char *const blk_mq_rq_state_name_array[] = {
 328        [MQ_RQ_IDLE]            = "idle",
 329        [MQ_RQ_IN_FLIGHT]       = "in_flight",
 330        [MQ_RQ_COMPLETE]        = "complete",
 331};
 332
 333static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
 334{
 335        if (WARN_ON_ONCE((unsigned int)rq_state >=
 336                         ARRAY_SIZE(blk_mq_rq_state_name_array)))
 337                return "(?)";
 338        return blk_mq_rq_state_name_array[rq_state];
 339}
 340
 341int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
 342{
 343        const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
 344        const unsigned int op = rq->cmd_flags & REQ_OP_MASK;
 345
 346        seq_printf(m, "%p {.op=", rq);
 347        if (op < ARRAY_SIZE(op_name) && op_name[op])
 348                seq_printf(m, "%s", op_name[op]);
 349        else
 350                seq_printf(m, "%d", op);
 351        seq_puts(m, ", .cmd_flags=");
 352        blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name,
 353                       ARRAY_SIZE(cmd_flag_name));
 354        seq_puts(m, ", .rq_flags=");
 355        blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
 356                       ARRAY_SIZE(rqf_name));
 357        seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
 358        seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
 359                   rq->internal_tag);
 360        if (mq_ops->show_rq)
 361                mq_ops->show_rq(m, rq);
 362        seq_puts(m, "}\n");
 363        return 0;
 364}
 365EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
 366
 367int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
 368{
 369        return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
 370}
 371EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
 372
 373static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
 374        __acquires(&hctx->lock)
 375{
 376        struct blk_mq_hw_ctx *hctx = m->private;
 377
 378        spin_lock(&hctx->lock);
 379        return seq_list_start(&hctx->dispatch, *pos);
 380}
 381
 382static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
 383{
 384        struct blk_mq_hw_ctx *hctx = m->private;
 385
 386        return seq_list_next(v, &hctx->dispatch, pos);
 387}
 388
 389static void hctx_dispatch_stop(struct seq_file *m, void *v)
 390        __releases(&hctx->lock)
 391{
 392        struct blk_mq_hw_ctx *hctx = m->private;
 393
 394        spin_unlock(&hctx->lock);
 395}
 396
 397static const struct seq_operations hctx_dispatch_seq_ops = {
 398        .start  = hctx_dispatch_start,
 399        .next   = hctx_dispatch_next,
 400        .stop   = hctx_dispatch_stop,
 401        .show   = blk_mq_debugfs_rq_show,
 402};
 403
 404struct show_busy_params {
 405        struct seq_file         *m;
 406        struct blk_mq_hw_ctx    *hctx;
 407};
 408
 409/*
 410 * Note: the state of a request may change while this function is in progress,
 411 * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
 412 * keep iterating requests.
 413 */
 414static bool hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
 415{
 416        const struct show_busy_params *params = data;
 417
 418        if (rq->mq_hctx == params->hctx)
 419                __blk_mq_debugfs_rq_show(params->m,
 420                                         list_entry_rq(&rq->queuelist));
 421
 422        return true;
 423}
 424
 425static int hctx_busy_show(void *data, struct seq_file *m)
 426{
 427        struct blk_mq_hw_ctx *hctx = data;
 428        struct show_busy_params params = { .m = m, .hctx = hctx };
 429
 430        blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
 431                                &params);
 432
 433        return 0;
 434}
 435
 436static const char *const hctx_types[] = {
 437        [HCTX_TYPE_DEFAULT]     = "default",
 438        [HCTX_TYPE_READ]        = "read",
 439        [HCTX_TYPE_POLL]        = "poll",
 440};
 441
 442static int hctx_type_show(void *data, struct seq_file *m)
 443{
 444        struct blk_mq_hw_ctx *hctx = data;
 445
 446        BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES);
 447        seq_printf(m, "%s\n", hctx_types[hctx->type]);
 448        return 0;
 449}
 450
 451static int hctx_ctx_map_show(void *data, struct seq_file *m)
 452{
 453        struct blk_mq_hw_ctx *hctx = data;
 454
 455        sbitmap_bitmap_show(&hctx->ctx_map, m);
 456        return 0;
 457}
 458
 459static void blk_mq_debugfs_tags_show(struct seq_file *m,
 460                                     struct blk_mq_tags *tags)
 461{
 462        seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
 463        seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
 464        seq_printf(m, "active_queues=%d\n",
 465                   atomic_read(&tags->active_queues));
 466
 467        seq_puts(m, "\nbitmap_tags:\n");
 468        sbitmap_queue_show(&tags->bitmap_tags, m);
 469
 470        if (tags->nr_reserved_tags) {
 471                seq_puts(m, "\nbreserved_tags:\n");
 472                sbitmap_queue_show(&tags->breserved_tags, m);
 473        }
 474}
 475
 476static int hctx_tags_show(void *data, struct seq_file *m)
 477{
 478        struct blk_mq_hw_ctx *hctx = data;
 479        struct request_queue *q = hctx->queue;
 480        int res;
 481
 482        res = mutex_lock_interruptible(&q->sysfs_lock);
 483        if (res)
 484                goto out;
 485        if (hctx->tags)
 486                blk_mq_debugfs_tags_show(m, hctx->tags);
 487        mutex_unlock(&q->sysfs_lock);
 488
 489out:
 490        return res;
 491}
 492
 493static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
 494{
 495        struct blk_mq_hw_ctx *hctx = data;
 496        struct request_queue *q = hctx->queue;
 497        int res;
 498
 499        res = mutex_lock_interruptible(&q->sysfs_lock);
 500        if (res)
 501                goto out;
 502        if (hctx->tags)
 503                sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
 504        mutex_unlock(&q->sysfs_lock);
 505
 506out:
 507        return res;
 508}
 509
 510static int hctx_sched_tags_show(void *data, struct seq_file *m)
 511{
 512        struct blk_mq_hw_ctx *hctx = data;
 513        struct request_queue *q = hctx->queue;
 514        int res;
 515
 516        res = mutex_lock_interruptible(&q->sysfs_lock);
 517        if (res)
 518                goto out;
 519        if (hctx->sched_tags)
 520                blk_mq_debugfs_tags_show(m, hctx->sched_tags);
 521        mutex_unlock(&q->sysfs_lock);
 522
 523out:
 524        return res;
 525}
 526
 527static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
 528{
 529        struct blk_mq_hw_ctx *hctx = data;
 530        struct request_queue *q = hctx->queue;
 531        int res;
 532
 533        res = mutex_lock_interruptible(&q->sysfs_lock);
 534        if (res)
 535                goto out;
 536        if (hctx->sched_tags)
 537                sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
 538        mutex_unlock(&q->sysfs_lock);
 539
 540out:
 541        return res;
 542}
 543
 544static int hctx_io_poll_show(void *data, struct seq_file *m)
 545{
 546        struct blk_mq_hw_ctx *hctx = data;
 547
 548        seq_printf(m, "considered=%lu\n", hctx->poll_considered);
 549        seq_printf(m, "invoked=%lu\n", hctx->poll_invoked);
 550        seq_printf(m, "success=%lu\n", hctx->poll_success);
 551        return 0;
 552}
 553
 554static ssize_t hctx_io_poll_write(void *data, const char __user *buf,
 555                                  size_t count, loff_t *ppos)
 556{
 557        struct blk_mq_hw_ctx *hctx = data;
 558
 559        hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
 560        return count;
 561}
 562
 563static int hctx_dispatched_show(void *data, struct seq_file *m)
 564{
 565        struct blk_mq_hw_ctx *hctx = data;
 566        int i;
 567
 568        seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
 569
 570        for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
 571                unsigned int d = 1U << (i - 1);
 572
 573                seq_printf(m, "%8u\t%lu\n", d, hctx->dispatched[i]);
 574        }
 575
 576        seq_printf(m, "%8u+\t%lu\n", 1U << (i - 1), hctx->dispatched[i]);
 577        return 0;
 578}
 579
 580static ssize_t hctx_dispatched_write(void *data, const char __user *buf,
 581                                     size_t count, loff_t *ppos)
 582{
 583        struct blk_mq_hw_ctx *hctx = data;
 584        int i;
 585
 586        for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++)
 587                hctx->dispatched[i] = 0;
 588        return count;
 589}
 590
 591static int hctx_queued_show(void *data, struct seq_file *m)
 592{
 593        struct blk_mq_hw_ctx *hctx = data;
 594
 595        seq_printf(m, "%lu\n", hctx->queued);
 596        return 0;
 597}
 598
 599static ssize_t hctx_queued_write(void *data, const char __user *buf,
 600                                 size_t count, loff_t *ppos)
 601{
 602        struct blk_mq_hw_ctx *hctx = data;
 603
 604        hctx->queued = 0;
 605        return count;
 606}
 607
 608static int hctx_run_show(void *data, struct seq_file *m)
 609{
 610        struct blk_mq_hw_ctx *hctx = data;
 611
 612        seq_printf(m, "%lu\n", hctx->run);
 613        return 0;
 614}
 615
 616static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
 617                              loff_t *ppos)
 618{
 619        struct blk_mq_hw_ctx *hctx = data;
 620
 621        hctx->run = 0;
 622        return count;
 623}
 624
 625static int hctx_active_show(void *data, struct seq_file *m)
 626{
 627        struct blk_mq_hw_ctx *hctx = data;
 628
 629        seq_printf(m, "%d\n", atomic_read(&hctx->nr_active));
 630        return 0;
 631}
 632
 633static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
 634{
 635        struct blk_mq_hw_ctx *hctx = data;
 636
 637        seq_printf(m, "%u\n", hctx->dispatch_busy);
 638        return 0;
 639}
 640
 641#define CTX_RQ_SEQ_OPS(name, type)                                      \
 642static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
 643        __acquires(&ctx->lock)                                          \
 644{                                                                       \
 645        struct blk_mq_ctx *ctx = m->private;                            \
 646                                                                        \
 647        spin_lock(&ctx->lock);                                          \
 648        return seq_list_start(&ctx->rq_lists[type], *pos);              \
 649}                                                                       \
 650                                                                        \
 651static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v,     \
 652                                     loff_t *pos)                       \
 653{                                                                       \
 654        struct blk_mq_ctx *ctx = m->private;                            \
 655                                                                        \
 656        return seq_list_next(v, &ctx->rq_lists[type], pos);             \
 657}                                                                       \
 658                                                                        \
 659static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v)      \
 660        __releases(&ctx->lock)                                          \
 661{                                                                       \
 662        struct blk_mq_ctx *ctx = m->private;                            \
 663                                                                        \
 664        spin_unlock(&ctx->lock);                                        \
 665}                                                                       \
 666                                                                        \
 667static const struct seq_operations ctx_##name##_rq_list_seq_ops = {     \
 668        .start  = ctx_##name##_rq_list_start,                           \
 669        .next   = ctx_##name##_rq_list_next,                            \
 670        .stop   = ctx_##name##_rq_list_stop,                            \
 671        .show   = blk_mq_debugfs_rq_show,                               \
 672}
 673
 674CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT);
 675CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ);
 676CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL);
 677
 678static int ctx_dispatched_show(void *data, struct seq_file *m)
 679{
 680        struct blk_mq_ctx *ctx = data;
 681
 682        seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]);
 683        return 0;
 684}
 685
 686static ssize_t ctx_dispatched_write(void *data, const char __user *buf,
 687                                    size_t count, loff_t *ppos)
 688{
 689        struct blk_mq_ctx *ctx = data;
 690
 691        ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0;
 692        return count;
 693}
 694
 695static int ctx_merged_show(void *data, struct seq_file *m)
 696{
 697        struct blk_mq_ctx *ctx = data;
 698
 699        seq_printf(m, "%lu\n", ctx->rq_merged);
 700        return 0;
 701}
 702
 703static ssize_t ctx_merged_write(void *data, const char __user *buf,
 704                                size_t count, loff_t *ppos)
 705{
 706        struct blk_mq_ctx *ctx = data;
 707
 708        ctx->rq_merged = 0;
 709        return count;
 710}
 711
 712static int ctx_completed_show(void *data, struct seq_file *m)
 713{
 714        struct blk_mq_ctx *ctx = data;
 715
 716        seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]);
 717        return 0;
 718}
 719
 720static ssize_t ctx_completed_write(void *data, const char __user *buf,
 721                                   size_t count, loff_t *ppos)
 722{
 723        struct blk_mq_ctx *ctx = data;
 724
 725        ctx->rq_completed[0] = ctx->rq_completed[1] = 0;
 726        return count;
 727}
 728
 729static int blk_mq_debugfs_show(struct seq_file *m, void *v)
 730{
 731        const struct blk_mq_debugfs_attr *attr = m->private;
 732        void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
 733
 734        return attr->show(data, m);
 735}
 736
 737static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
 738                                    size_t count, loff_t *ppos)
 739{
 740        struct seq_file *m = file->private_data;
 741        const struct blk_mq_debugfs_attr *attr = m->private;
 742        void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
 743
 744        /*
 745         * Attributes that only implement .seq_ops are read-only and 'attr' is
 746         * the same with 'data' in this case.
 747         */
 748        if (attr == data || !attr->write)
 749                return -EPERM;
 750
 751        return attr->write(data, buf, count, ppos);
 752}
 753
 754static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
 755{
 756        const struct blk_mq_debugfs_attr *attr = inode->i_private;
 757        void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
 758        struct seq_file *m;
 759        int ret;
 760
 761        if (attr->seq_ops) {
 762                ret = seq_open(file, attr->seq_ops);
 763                if (!ret) {
 764                        m = file->private_data;
 765                        m->private = data;
 766                }
 767                return ret;
 768        }
 769
 770        if (WARN_ON_ONCE(!attr->show))
 771                return -EPERM;
 772
 773        return single_open(file, blk_mq_debugfs_show, inode->i_private);
 774}
 775
 776static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
 777{
 778        const struct blk_mq_debugfs_attr *attr = inode->i_private;
 779
 780        if (attr->show)
 781                return single_release(inode, file);
 782        else
 783                return seq_release(inode, file);
 784}
 785
 786static const struct file_operations blk_mq_debugfs_fops = {
 787        .open           = blk_mq_debugfs_open,
 788        .read           = seq_read,
 789        .write          = blk_mq_debugfs_write,
 790        .llseek         = seq_lseek,
 791        .release        = blk_mq_debugfs_release,
 792};
 793
 794static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
 795        {"state", 0400, hctx_state_show},
 796        {"flags", 0400, hctx_flags_show},
 797        {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
 798        {"busy", 0400, hctx_busy_show},
 799        {"ctx_map", 0400, hctx_ctx_map_show},
 800        {"tags", 0400, hctx_tags_show},
 801        {"tags_bitmap", 0400, hctx_tags_bitmap_show},
 802        {"sched_tags", 0400, hctx_sched_tags_show},
 803        {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
 804        {"io_poll", 0600, hctx_io_poll_show, hctx_io_poll_write},
 805        {"dispatched", 0600, hctx_dispatched_show, hctx_dispatched_write},
 806        {"queued", 0600, hctx_queued_show, hctx_queued_write},
 807        {"run", 0600, hctx_run_show, hctx_run_write},
 808        {"active", 0400, hctx_active_show},
 809        {"dispatch_busy", 0400, hctx_dispatch_busy_show},
 810        {"type", 0400, hctx_type_show},
 811        {},
 812};
 813
 814static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
 815        {"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops},
 816        {"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops},
 817        {"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops},
 818        {"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write},
 819        {"merged", 0600, ctx_merged_show, ctx_merged_write},
 820        {"completed", 0600, ctx_completed_show, ctx_completed_write},
 821        {},
 822};
 823
 824static void debugfs_create_files(struct dentry *parent, void *data,
 825                                 const struct blk_mq_debugfs_attr *attr)
 826{
 827        if (IS_ERR_OR_NULL(parent))
 828                return;
 829
 830        d_inode(parent)->i_private = data;
 831
 832        for (; attr->name; attr++)
 833                debugfs_create_file(attr->name, attr->mode, parent,
 834                                    (void *)attr, &blk_mq_debugfs_fops);
 835}
 836
 837void blk_mq_debugfs_register(struct request_queue *q)
 838{
 839        struct blk_mq_hw_ctx *hctx;
 840        int i;
 841
 842        q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
 843                                            blk_debugfs_root);
 844
 845        debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
 846
 847        /*
 848         * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
 849         * didn't exist yet (because we don't know what to name the directory
 850         * until the queue is registered to a gendisk).
 851         */
 852        if (q->elevator && !q->sched_debugfs_dir)
 853                blk_mq_debugfs_register_sched(q);
 854
 855        /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
 856        queue_for_each_hw_ctx(q, hctx, i) {
 857                if (!hctx->debugfs_dir)
 858                        blk_mq_debugfs_register_hctx(q, hctx);
 859                if (q->elevator && !hctx->sched_debugfs_dir)
 860                        blk_mq_debugfs_register_sched_hctx(q, hctx);
 861        }
 862
 863        if (q->rq_qos) {
 864                struct rq_qos *rqos = q->rq_qos;
 865
 866                while (rqos) {
 867                        blk_mq_debugfs_register_rqos(rqos);
 868                        rqos = rqos->next;
 869                }
 870        }
 871}
 872
 873void blk_mq_debugfs_unregister(struct request_queue *q)
 874{
 875        debugfs_remove_recursive(q->debugfs_dir);
 876        q->sched_debugfs_dir = NULL;
 877        q->debugfs_dir = NULL;
 878}
 879
 880static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
 881                                        struct blk_mq_ctx *ctx)
 882{
 883        struct dentry *ctx_dir;
 884        char name[20];
 885
 886        snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
 887        ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
 888
 889        debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs);
 890}
 891
 892void blk_mq_debugfs_register_hctx(struct request_queue *q,
 893                                  struct blk_mq_hw_ctx *hctx)
 894{
 895        struct blk_mq_ctx *ctx;
 896        char name[20];
 897        int i;
 898
 899        snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
 900        hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
 901
 902        debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs);
 903
 904        hctx_for_each_ctx(hctx, ctx, i)
 905                blk_mq_debugfs_register_ctx(hctx, ctx);
 906}
 907
 908void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
 909{
 910        debugfs_remove_recursive(hctx->debugfs_dir);
 911        hctx->sched_debugfs_dir = NULL;
 912        hctx->debugfs_dir = NULL;
 913}
 914
 915void blk_mq_debugfs_register_hctxs(struct request_queue *q)
 916{
 917        struct blk_mq_hw_ctx *hctx;
 918        int i;
 919
 920        queue_for_each_hw_ctx(q, hctx, i)
 921                blk_mq_debugfs_register_hctx(q, hctx);
 922}
 923
 924void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
 925{
 926        struct blk_mq_hw_ctx *hctx;
 927        int i;
 928
 929        queue_for_each_hw_ctx(q, hctx, i)
 930                blk_mq_debugfs_unregister_hctx(hctx);
 931}
 932
 933void blk_mq_debugfs_register_sched(struct request_queue *q)
 934{
 935        struct elevator_type *e = q->elevator->type;
 936
 937        /*
 938         * If the parent directory has not been created yet, return, we will be
 939         * called again later on and the directory/files will be created then.
 940         */
 941        if (!q->debugfs_dir)
 942                return;
 943
 944        if (!e->queue_debugfs_attrs)
 945                return;
 946
 947        q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
 948
 949        debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
 950}
 951
 952void blk_mq_debugfs_unregister_sched(struct request_queue *q)
 953{
 954        debugfs_remove_recursive(q->sched_debugfs_dir);
 955        q->sched_debugfs_dir = NULL;
 956}
 957
 958void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
 959{
 960        debugfs_remove_recursive(rqos->debugfs_dir);
 961        rqos->debugfs_dir = NULL;
 962}
 963
 964void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
 965{
 966        struct request_queue *q = rqos->q;
 967        const char *dir_name = rq_qos_id_to_name(rqos->id);
 968
 969        if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
 970                return;
 971
 972        if (!q->rqos_debugfs_dir)
 973                q->rqos_debugfs_dir = debugfs_create_dir("rqos",
 974                                                         q->debugfs_dir);
 975
 976        rqos->debugfs_dir = debugfs_create_dir(dir_name,
 977                                               rqos->q->rqos_debugfs_dir);
 978
 979        debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
 980}
 981
 982void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
 983{
 984        debugfs_remove_recursive(q->rqos_debugfs_dir);
 985        q->rqos_debugfs_dir = NULL;
 986}
 987
 988void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
 989                                        struct blk_mq_hw_ctx *hctx)
 990{
 991        struct elevator_type *e = q->elevator->type;
 992
 993        if (!e->hctx_debugfs_attrs)
 994                return;
 995
 996        hctx->sched_debugfs_dir = debugfs_create_dir("sched",
 997                                                     hctx->debugfs_dir);
 998        debugfs_create_files(hctx->sched_debugfs_dir, hctx,
 999                             e->hctx_debugfs_attrs);
1000}
1001
1002void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
1003{
1004        debugfs_remove_recursive(hctx->sched_debugfs_dir);
1005        hctx->sched_debugfs_dir = NULL;
1006}
1007