linux/block/blk-mq-sysfs.c
<<
>>
Prefs
   1#include <linux/kernel.h>
   2#include <linux/module.h>
   3#include <linux/backing-dev.h>
   4#include <linux/bio.h>
   5#include <linux/blkdev.h>
   6#include <linux/mm.h>
   7#include <linux/init.h>
   8#include <linux/slab.h>
   9#include <linux/workqueue.h>
  10#include <linux/smp.h>
  11
  12#include <linux/blk-mq.h>
  13#include "blk-mq.h"
  14#include "blk-mq-tag.h"
  15
  16static void blk_mq_sysfs_release(struct kobject *kobj)
  17{
  18}
  19
  20struct blk_mq_ctx_sysfs_entry {
  21        struct attribute attr;
  22        ssize_t (*show)(struct blk_mq_ctx *, char *);
  23        ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
  24};
  25
  26struct blk_mq_hw_ctx_sysfs_entry {
  27        struct attribute attr;
  28        ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
  29        ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
  30};
  31
  32static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
  33                                 char *page)
  34{
  35        struct blk_mq_ctx_sysfs_entry *entry;
  36        struct blk_mq_ctx *ctx;
  37        struct request_queue *q;
  38        ssize_t res;
  39
  40        entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
  41        ctx = container_of(kobj, struct blk_mq_ctx, kobj);
  42        q = ctx->queue;
  43
  44        if (!entry->show)
  45                return -EIO;
  46
  47        res = -ENOENT;
  48        mutex_lock(&q->sysfs_lock);
  49        if (!blk_queue_dying(q))
  50                res = entry->show(ctx, page);
  51        mutex_unlock(&q->sysfs_lock);
  52        return res;
  53}
  54
  55static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
  56                                  const char *page, size_t length)
  57{
  58        struct blk_mq_ctx_sysfs_entry *entry;
  59        struct blk_mq_ctx *ctx;
  60        struct request_queue *q;
  61        ssize_t res;
  62
  63        entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
  64        ctx = container_of(kobj, struct blk_mq_ctx, kobj);
  65        q = ctx->queue;
  66
  67        if (!entry->store)
  68                return -EIO;
  69
  70        res = -ENOENT;
  71        mutex_lock(&q->sysfs_lock);
  72        if (!blk_queue_dying(q))
  73                res = entry->store(ctx, page, length);
  74        mutex_unlock(&q->sysfs_lock);
  75        return res;
  76}
  77
  78static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
  79                                    struct attribute *attr, char *page)
  80{
  81        struct blk_mq_hw_ctx_sysfs_entry *entry;
  82        struct blk_mq_hw_ctx *hctx;
  83        struct request_queue *q;
  84        ssize_t res;
  85
  86        entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
  87        hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
  88        q = hctx->queue;
  89
  90        if (!entry->show)
  91                return -EIO;
  92
  93        res = -ENOENT;
  94        mutex_lock(&q->sysfs_lock);
  95        if (!blk_queue_dying(q))
  96                res = entry->show(hctx, page);
  97        mutex_unlock(&q->sysfs_lock);
  98        return res;
  99}
 100
 101static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
 102                                     struct attribute *attr, const char *page,
 103                                     size_t length)
 104{
 105        struct blk_mq_hw_ctx_sysfs_entry *entry;
 106        struct blk_mq_hw_ctx *hctx;
 107        struct request_queue *q;
 108        ssize_t res;
 109
 110        entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
 111        hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
 112        q = hctx->queue;
 113
 114        if (!entry->store)
 115                return -EIO;
 116
 117        res = -ENOENT;
 118        mutex_lock(&q->sysfs_lock);
 119        if (!blk_queue_dying(q))
 120                res = entry->store(hctx, page, length);
 121        mutex_unlock(&q->sysfs_lock);
 122        return res;
 123}
 124
 125static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page)
 126{
 127        return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1],
 128                                ctx->rq_dispatched[0]);
 129}
 130
 131static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page)
 132{
 133        return sprintf(page, "%lu\n", ctx->rq_merged);
 134}
 135
 136static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
 137{
 138        return sprintf(page, "%lu %lu\n", ctx->rq_completed[1],
 139                                ctx->rq_completed[0]);
 140}
 141
 142static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg)
 143{
 144        struct request *rq;
 145        int len = snprintf(page, PAGE_SIZE - 1, "%s:\n", msg);
 146
 147        list_for_each_entry(rq, list, queuelist) {
 148                const int rq_len = 2 * sizeof(rq) + 2;
 149
 150                /* if the output will be truncated */
 151                if (PAGE_SIZE - 1 < len + rq_len) {
 152                        /* backspacing if it can't hold '\t...\n' */
 153                        if (PAGE_SIZE - 1 < len + 5)
 154                                len -= rq_len;
 155                        len += snprintf(page + len, PAGE_SIZE - 1 - len,
 156                                        "\t...\n");
 157                        break;
 158                }
 159                len += snprintf(page + len, PAGE_SIZE - 1 - len,
 160                                "\t%p\n", rq);
 161        }
 162
 163        return len;
 164}
 165
 166static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
 167{
 168        ssize_t ret;
 169
 170        spin_lock(&ctx->lock);
 171        ret = sysfs_list_show(page, &ctx->rq_list, "CTX pending");
 172        spin_unlock(&ctx->lock);
 173
 174        return ret;
 175}
 176
 177static ssize_t blk_mq_hw_sysfs_poll_show(struct blk_mq_hw_ctx *hctx, char *page)
 178{
 179        return sprintf(page, "considered=%lu, invoked=%lu, success=%lu\n",
 180                       hctx->poll_considered, hctx->poll_invoked,
 181                       hctx->poll_success);
 182}
 183
 184static ssize_t blk_mq_hw_sysfs_poll_store(struct blk_mq_hw_ctx *hctx,
 185                                          const char *page, size_t size)
 186{
 187        hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
 188
 189        return size;
 190}
 191
 192static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
 193                                           char *page)
 194{
 195        return sprintf(page, "%lu\n", hctx->queued);
 196}
 197
 198static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page)
 199{
 200        return sprintf(page, "%lu\n", hctx->run);
 201}
 202
 203static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx,
 204                                               char *page)
 205{
 206        char *start_page = page;
 207        int i;
 208
 209        page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
 210
 211        for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
 212                unsigned int d = 1U << (i - 1);
 213
 214                page += sprintf(page, "%8u\t%lu\n", d, hctx->dispatched[i]);
 215        }
 216
 217        page += sprintf(page, "%8u+\t%lu\n", 1U << (i - 1),
 218                                                hctx->dispatched[i]);
 219        return page - start_page;
 220}
 221
 222static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx,
 223                                            char *page)
 224{
 225        ssize_t ret;
 226
 227        spin_lock(&hctx->lock);
 228        ret = sysfs_list_show(page, &hctx->dispatch, "HCTX pending");
 229        spin_unlock(&hctx->lock);
 230
 231        return ret;
 232}
 233
 234static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
 235{
 236        return blk_mq_tag_sysfs_show(hctx->tags, page);
 237}
 238
 239static ssize_t blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx *hctx, char *page)
 240{
 241        return sprintf(page, "%u\n", atomic_read(&hctx->nr_active));
 242}
 243
 244static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
 245{
 246        unsigned int i, first = 1;
 247        ssize_t ret = 0;
 248
 249        for_each_cpu(i, hctx->cpumask) {
 250                if (first)
 251                        ret += sprintf(ret + page, "%u", i);
 252                else
 253                        ret += sprintf(ret + page, ", %u", i);
 254
 255                first = 0;
 256        }
 257
 258        ret += sprintf(ret + page, "\n");
 259        return ret;
 260}
 261
 262static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
 263        .attr = {.name = "dispatched", .mode = S_IRUGO },
 264        .show = blk_mq_sysfs_dispatched_show,
 265};
 266static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged = {
 267        .attr = {.name = "merged", .mode = S_IRUGO },
 268        .show = blk_mq_sysfs_merged_show,
 269};
 270static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed = {
 271        .attr = {.name = "completed", .mode = S_IRUGO },
 272        .show = blk_mq_sysfs_completed_show,
 273};
 274static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list = {
 275        .attr = {.name = "rq_list", .mode = S_IRUGO },
 276        .show = blk_mq_sysfs_rq_list_show,
 277};
 278
 279static struct attribute *default_ctx_attrs[] = {
 280        &blk_mq_sysfs_dispatched.attr,
 281        &blk_mq_sysfs_merged.attr,
 282        &blk_mq_sysfs_completed.attr,
 283        &blk_mq_sysfs_rq_list.attr,
 284        NULL,
 285};
 286
 287static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued = {
 288        .attr = {.name = "queued", .mode = S_IRUGO },
 289        .show = blk_mq_hw_sysfs_queued_show,
 290};
 291static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run = {
 292        .attr = {.name = "run", .mode = S_IRUGO },
 293        .show = blk_mq_hw_sysfs_run_show,
 294};
 295static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = {
 296        .attr = {.name = "dispatched", .mode = S_IRUGO },
 297        .show = blk_mq_hw_sysfs_dispatched_show,
 298};
 299static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_active = {
 300        .attr = {.name = "active", .mode = S_IRUGO },
 301        .show = blk_mq_hw_sysfs_active_show,
 302};
 303static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = {
 304        .attr = {.name = "pending", .mode = S_IRUGO },
 305        .show = blk_mq_hw_sysfs_rq_list_show,
 306};
 307static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
 308        .attr = {.name = "tags", .mode = S_IRUGO },
 309        .show = blk_mq_hw_sysfs_tags_show,
 310};
 311static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
 312        .attr = {.name = "cpu_list", .mode = S_IRUGO },
 313        .show = blk_mq_hw_sysfs_cpus_show,
 314};
 315static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_poll = {
 316        .attr = {.name = "io_poll", .mode = S_IWUSR | S_IRUGO },
 317        .show = blk_mq_hw_sysfs_poll_show,
 318        .store = blk_mq_hw_sysfs_poll_store,
 319};
 320
 321static struct attribute *default_hw_ctx_attrs[] = {
 322        &blk_mq_hw_sysfs_queued.attr,
 323        &blk_mq_hw_sysfs_run.attr,
 324        &blk_mq_hw_sysfs_dispatched.attr,
 325        &blk_mq_hw_sysfs_pending.attr,
 326        &blk_mq_hw_sysfs_tags.attr,
 327        &blk_mq_hw_sysfs_cpus.attr,
 328        &blk_mq_hw_sysfs_active.attr,
 329        &blk_mq_hw_sysfs_poll.attr,
 330        NULL,
 331};
 332
 333static const struct sysfs_ops blk_mq_sysfs_ops = {
 334        .show   = blk_mq_sysfs_show,
 335        .store  = blk_mq_sysfs_store,
 336};
 337
 338static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
 339        .show   = blk_mq_hw_sysfs_show,
 340        .store  = blk_mq_hw_sysfs_store,
 341};
 342
 343static struct kobj_type blk_mq_ktype = {
 344        .sysfs_ops      = &blk_mq_sysfs_ops,
 345        .release        = blk_mq_sysfs_release,
 346};
 347
 348static struct kobj_type blk_mq_ctx_ktype = {
 349        .sysfs_ops      = &blk_mq_sysfs_ops,
 350        .default_attrs  = default_ctx_attrs,
 351        .release        = blk_mq_sysfs_release,
 352};
 353
 354static struct kobj_type blk_mq_hw_ktype = {
 355        .sysfs_ops      = &blk_mq_hw_sysfs_ops,
 356        .default_attrs  = default_hw_ctx_attrs,
 357        .release        = blk_mq_sysfs_release,
 358};
 359
 360static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
 361{
 362        struct blk_mq_ctx *ctx;
 363        int i;
 364
 365        if (!hctx->nr_ctx)
 366                return;
 367
 368        hctx_for_each_ctx(hctx, ctx, i)
 369                kobject_del(&ctx->kobj);
 370
 371        kobject_del(&hctx->kobj);
 372}
 373
 374static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
 375{
 376        struct request_queue *q = hctx->queue;
 377        struct blk_mq_ctx *ctx;
 378        int i, ret;
 379
 380        if (!hctx->nr_ctx)
 381                return 0;
 382
 383        ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
 384        if (ret)
 385                return ret;
 386
 387        hctx_for_each_ctx(hctx, ctx, i) {
 388                ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
 389                if (ret)
 390                        break;
 391        }
 392
 393        return ret;
 394}
 395
 396static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
 397{
 398        struct blk_mq_hw_ctx *hctx;
 399        struct blk_mq_ctx *ctx;
 400        int i, j;
 401
 402        queue_for_each_hw_ctx(q, hctx, i) {
 403                blk_mq_unregister_hctx(hctx);
 404
 405                hctx_for_each_ctx(hctx, ctx, j)
 406                        kobject_put(&ctx->kobj);
 407
 408                kobject_put(&hctx->kobj);
 409        }
 410
 411        kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
 412        kobject_del(&q->mq_kobj);
 413        kobject_put(&q->mq_kobj);
 414
 415        kobject_put(&dev->kobj);
 416
 417        q->mq_sysfs_init_done = false;
 418}
 419
 420void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
 421{
 422        blk_mq_disable_hotplug();
 423        __blk_mq_unregister_dev(dev, q);
 424        blk_mq_enable_hotplug();
 425}
 426
 427void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
 428{
 429        kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
 430}
 431
 432static void blk_mq_sysfs_init(struct request_queue *q)
 433{
 434        struct blk_mq_ctx *ctx;
 435        int cpu;
 436
 437        kobject_init(&q->mq_kobj, &blk_mq_ktype);
 438
 439        for_each_possible_cpu(cpu) {
 440                ctx = per_cpu_ptr(q->queue_ctx, cpu);
 441                kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
 442        }
 443}
 444
 445int blk_mq_register_dev(struct device *dev, struct request_queue *q)
 446{
 447        struct blk_mq_hw_ctx *hctx;
 448        int ret, i;
 449
 450        blk_mq_disable_hotplug();
 451
 452        blk_mq_sysfs_init(q);
 453
 454        ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
 455        if (ret < 0)
 456                goto out;
 457
 458        kobject_uevent(&q->mq_kobj, KOBJ_ADD);
 459
 460        queue_for_each_hw_ctx(q, hctx, i) {
 461                ret = blk_mq_register_hctx(hctx);
 462                if (ret)
 463                        break;
 464        }
 465
 466        if (ret)
 467                __blk_mq_unregister_dev(dev, q);
 468        else
 469                q->mq_sysfs_init_done = true;
 470out:
 471        blk_mq_enable_hotplug();
 472
 473        return ret;
 474}
 475EXPORT_SYMBOL_GPL(blk_mq_register_dev);
 476
 477void blk_mq_sysfs_unregister(struct request_queue *q)
 478{
 479        struct blk_mq_hw_ctx *hctx;
 480        int i;
 481
 482        if (!q->mq_sysfs_init_done)
 483                return;
 484
 485        queue_for_each_hw_ctx(q, hctx, i)
 486                blk_mq_unregister_hctx(hctx);
 487}
 488
 489int blk_mq_sysfs_register(struct request_queue *q)
 490{
 491        struct blk_mq_hw_ctx *hctx;
 492        int i, ret = 0;
 493
 494        if (!q->mq_sysfs_init_done)
 495                return ret;
 496
 497        queue_for_each_hw_ctx(q, hctx, i) {
 498                ret = blk_mq_register_hctx(hctx);
 499                if (ret)
 500                        break;
 501        }
 502
 503        return ret;
 504}
 505