linux/block/blk-mq-sysfs.c
<<
>>
Prefs
   1#include <linux/kernel.h>
   2#include <linux/module.h>
   3#include <linux/backing-dev.h>
   4#include <linux/bio.h>
   5#include <linux/blkdev.h>
   6#include <linux/mm.h>
   7#include <linux/init.h>
   8#include <linux/slab.h>
   9#include <linux/workqueue.h>
  10#include <linux/smp.h>
  11
  12#include <linux/blk-mq.h>
  13#include "blk-mq.h"
  14#include "blk-mq-tag.h"
  15
  16static void blk_mq_sysfs_release(struct kobject *kobj)
  17{
  18}
  19
  20struct blk_mq_ctx_sysfs_entry {
  21        struct attribute attr;
  22        ssize_t (*show)(struct blk_mq_ctx *, char *);
  23        ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
  24};
  25
  26struct blk_mq_hw_ctx_sysfs_entry {
  27        struct attribute attr;
  28        ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
  29        ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
  30};
  31
  32static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
  33                                 char *page)
  34{
  35        struct blk_mq_ctx_sysfs_entry *entry;
  36        struct blk_mq_ctx *ctx;
  37        struct request_queue *q;
  38        ssize_t res;
  39
  40        entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
  41        ctx = container_of(kobj, struct blk_mq_ctx, kobj);
  42        q = ctx->queue;
  43
  44        if (!entry->show)
  45                return -EIO;
  46
  47        res = -ENOENT;
  48        mutex_lock(&q->sysfs_lock);
  49        if (!blk_queue_dying(q))
  50                res = entry->show(ctx, page);
  51        mutex_unlock(&q->sysfs_lock);
  52        return res;
  53}
  54
  55static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
  56                                  const char *page, size_t length)
  57{
  58        struct blk_mq_ctx_sysfs_entry *entry;
  59        struct blk_mq_ctx *ctx;
  60        struct request_queue *q;
  61        ssize_t res;
  62
  63        entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
  64        ctx = container_of(kobj, struct blk_mq_ctx, kobj);
  65        q = ctx->queue;
  66
  67        if (!entry->store)
  68                return -EIO;
  69
  70        res = -ENOENT;
  71        mutex_lock(&q->sysfs_lock);
  72        if (!blk_queue_dying(q))
  73                res = entry->store(ctx, page, length);
  74        mutex_unlock(&q->sysfs_lock);
  75        return res;
  76}
  77
  78static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
  79                                    struct attribute *attr, char *page)
  80{
  81        struct blk_mq_hw_ctx_sysfs_entry *entry;
  82        struct blk_mq_hw_ctx *hctx;
  83        struct request_queue *q;
  84        ssize_t res;
  85
  86        entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
  87        hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
  88        q = hctx->queue;
  89
  90        if (!entry->show)
  91                return -EIO;
  92
  93        res = -ENOENT;
  94        mutex_lock(&q->sysfs_lock);
  95        if (!blk_queue_dying(q))
  96                res = entry->show(hctx, page);
  97        mutex_unlock(&q->sysfs_lock);
  98        return res;
  99}
 100
 101static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
 102                                     struct attribute *attr, const char *page,
 103                                     size_t length)
 104{
 105        struct blk_mq_hw_ctx_sysfs_entry *entry;
 106        struct blk_mq_hw_ctx *hctx;
 107        struct request_queue *q;
 108        ssize_t res;
 109
 110        entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
 111        hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
 112        q = hctx->queue;
 113
 114        if (!entry->store)
 115                return -EIO;
 116
 117        res = -ENOENT;
 118        mutex_lock(&q->sysfs_lock);
 119        if (!blk_queue_dying(q))
 120                res = entry->store(hctx, page, length);
 121        mutex_unlock(&q->sysfs_lock);
 122        return res;
 123}
 124
 125static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page)
 126{
 127        return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1],
 128                                ctx->rq_dispatched[0]);
 129}
 130
 131static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page)
 132{
 133        return sprintf(page, "%lu\n", ctx->rq_merged);
 134}
 135
 136static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
 137{
 138        return sprintf(page, "%lu %lu\n", ctx->rq_completed[1],
 139                                ctx->rq_completed[0]);
 140}
 141
 142static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg)
 143{
 144        struct request *rq;
 145        int len = snprintf(page, PAGE_SIZE - 1, "%s:\n", msg);
 146
 147        list_for_each_entry(rq, list, queuelist) {
 148                const int rq_len = 2 * sizeof(rq) + 2;
 149
 150                /* if the output will be truncated */
 151                if (PAGE_SIZE - 1 < len + rq_len) {
 152                        /* backspacing if it can't hold '\t...\n' */
 153                        if (PAGE_SIZE - 1 < len + 5)
 154                                len -= rq_len;
 155                        len += snprintf(page + len, PAGE_SIZE - 1 - len,
 156                                        "\t...\n");
 157                        break;
 158                }
 159                len += snprintf(page + len, PAGE_SIZE - 1 - len,
 160                                "\t%p\n", rq);
 161        }
 162
 163        return len;
 164}
 165
 166static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
 167{
 168        ssize_t ret;
 169
 170        spin_lock(&ctx->lock);
 171        ret = sysfs_list_show(page, &ctx->rq_list, "CTX pending");
 172        spin_unlock(&ctx->lock);
 173
 174        return ret;
 175}
 176
 177static ssize_t blk_mq_hw_sysfs_poll_show(struct blk_mq_hw_ctx *hctx, char *page)
 178{
 179        return sprintf(page, "invoked=%lu, success=%lu\n", hctx->poll_invoked, hctx->poll_success);
 180}
 181
 182static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
 183                                           char *page)
 184{
 185        return sprintf(page, "%lu\n", hctx->queued);
 186}
 187
 188static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page)
 189{
 190        return sprintf(page, "%lu\n", hctx->run);
 191}
 192
 193static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx,
 194                                               char *page)
 195{
 196        char *start_page = page;
 197        int i;
 198
 199        page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
 200
 201        for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) {
 202                unsigned long d = 1U << (i - 1);
 203
 204                page += sprintf(page, "%8lu\t%lu\n", d, hctx->dispatched[i]);
 205        }
 206
 207        return page - start_page;
 208}
 209
 210static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx,
 211                                            char *page)
 212{
 213        ssize_t ret;
 214
 215        spin_lock(&hctx->lock);
 216        ret = sysfs_list_show(page, &hctx->dispatch, "HCTX pending");
 217        spin_unlock(&hctx->lock);
 218
 219        return ret;
 220}
 221
 222static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
 223{
 224        return blk_mq_tag_sysfs_show(hctx->tags, page);
 225}
 226
 227static ssize_t blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx *hctx, char *page)
 228{
 229        return sprintf(page, "%u\n", atomic_read(&hctx->nr_active));
 230}
 231
 232static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
 233{
 234        unsigned int i, first = 1;
 235        ssize_t ret = 0;
 236
 237        for_each_cpu(i, hctx->cpumask) {
 238                if (first)
 239                        ret += sprintf(ret + page, "%u", i);
 240                else
 241                        ret += sprintf(ret + page, ", %u", i);
 242
 243                first = 0;
 244        }
 245
 246        ret += sprintf(ret + page, "\n");
 247        return ret;
 248}
 249
 250static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
 251        .attr = {.name = "dispatched", .mode = S_IRUGO },
 252        .show = blk_mq_sysfs_dispatched_show,
 253};
 254static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged = {
 255        .attr = {.name = "merged", .mode = S_IRUGO },
 256        .show = blk_mq_sysfs_merged_show,
 257};
 258static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed = {
 259        .attr = {.name = "completed", .mode = S_IRUGO },
 260        .show = blk_mq_sysfs_completed_show,
 261};
 262static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list = {
 263        .attr = {.name = "rq_list", .mode = S_IRUGO },
 264        .show = blk_mq_sysfs_rq_list_show,
 265};
 266
 267static struct attribute *default_ctx_attrs[] = {
 268        &blk_mq_sysfs_dispatched.attr,
 269        &blk_mq_sysfs_merged.attr,
 270        &blk_mq_sysfs_completed.attr,
 271        &blk_mq_sysfs_rq_list.attr,
 272        NULL,
 273};
 274
 275static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued = {
 276        .attr = {.name = "queued", .mode = S_IRUGO },
 277        .show = blk_mq_hw_sysfs_queued_show,
 278};
 279static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run = {
 280        .attr = {.name = "run", .mode = S_IRUGO },
 281        .show = blk_mq_hw_sysfs_run_show,
 282};
 283static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = {
 284        .attr = {.name = "dispatched", .mode = S_IRUGO },
 285        .show = blk_mq_hw_sysfs_dispatched_show,
 286};
 287static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_active = {
 288        .attr = {.name = "active", .mode = S_IRUGO },
 289        .show = blk_mq_hw_sysfs_active_show,
 290};
 291static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = {
 292        .attr = {.name = "pending", .mode = S_IRUGO },
 293        .show = blk_mq_hw_sysfs_rq_list_show,
 294};
 295static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
 296        .attr = {.name = "tags", .mode = S_IRUGO },
 297        .show = blk_mq_hw_sysfs_tags_show,
 298};
 299static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
 300        .attr = {.name = "cpu_list", .mode = S_IRUGO },
 301        .show = blk_mq_hw_sysfs_cpus_show,
 302};
 303static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_poll = {
 304        .attr = {.name = "io_poll", .mode = S_IRUGO },
 305        .show = blk_mq_hw_sysfs_poll_show,
 306};
 307
 308static struct attribute *default_hw_ctx_attrs[] = {
 309        &blk_mq_hw_sysfs_queued.attr,
 310        &blk_mq_hw_sysfs_run.attr,
 311        &blk_mq_hw_sysfs_dispatched.attr,
 312        &blk_mq_hw_sysfs_pending.attr,
 313        &blk_mq_hw_sysfs_tags.attr,
 314        &blk_mq_hw_sysfs_cpus.attr,
 315        &blk_mq_hw_sysfs_active.attr,
 316        &blk_mq_hw_sysfs_poll.attr,
 317        NULL,
 318};
 319
 320static const struct sysfs_ops blk_mq_sysfs_ops = {
 321        .show   = blk_mq_sysfs_show,
 322        .store  = blk_mq_sysfs_store,
 323};
 324
 325static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
 326        .show   = blk_mq_hw_sysfs_show,
 327        .store  = blk_mq_hw_sysfs_store,
 328};
 329
 330static struct kobj_type blk_mq_ktype = {
 331        .sysfs_ops      = &blk_mq_sysfs_ops,
 332        .release        = blk_mq_sysfs_release,
 333};
 334
 335static struct kobj_type blk_mq_ctx_ktype = {
 336        .sysfs_ops      = &blk_mq_sysfs_ops,
 337        .default_attrs  = default_ctx_attrs,
 338        .release        = blk_mq_sysfs_release,
 339};
 340
 341static struct kobj_type blk_mq_hw_ktype = {
 342        .sysfs_ops      = &blk_mq_hw_sysfs_ops,
 343        .default_attrs  = default_hw_ctx_attrs,
 344        .release        = blk_mq_sysfs_release,
 345};
 346
 347static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
 348{
 349        struct blk_mq_ctx *ctx;
 350        int i;
 351
 352        if (!hctx->nr_ctx)
 353                return;
 354
 355        hctx_for_each_ctx(hctx, ctx, i)
 356                kobject_del(&ctx->kobj);
 357
 358        kobject_del(&hctx->kobj);
 359}
 360
 361static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
 362{
 363        struct request_queue *q = hctx->queue;
 364        struct blk_mq_ctx *ctx;
 365        int i, ret;
 366
 367        if (!hctx->nr_ctx)
 368                return 0;
 369
 370        ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
 371        if (ret)
 372                return ret;
 373
 374        hctx_for_each_ctx(hctx, ctx, i) {
 375                ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
 376                if (ret)
 377                        break;
 378        }
 379
 380        return ret;
 381}
 382
 383void blk_mq_unregister_disk(struct gendisk *disk)
 384{
 385        struct request_queue *q = disk->queue;
 386        struct blk_mq_hw_ctx *hctx;
 387        struct blk_mq_ctx *ctx;
 388        int i, j;
 389
 390        blk_mq_disable_hotplug();
 391
 392        queue_for_each_hw_ctx(q, hctx, i) {
 393                blk_mq_unregister_hctx(hctx);
 394
 395                hctx_for_each_ctx(hctx, ctx, j)
 396                        kobject_put(&ctx->kobj);
 397
 398                kobject_put(&hctx->kobj);
 399        }
 400
 401        kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
 402        kobject_del(&q->mq_kobj);
 403        kobject_put(&q->mq_kobj);
 404
 405        kobject_put(&disk_to_dev(disk)->kobj);
 406
 407        q->mq_sysfs_init_done = false;
 408        blk_mq_enable_hotplug();
 409}
 410
 411static void blk_mq_sysfs_init(struct request_queue *q)
 412{
 413        struct blk_mq_hw_ctx *hctx;
 414        struct blk_mq_ctx *ctx;
 415        int i;
 416
 417        kobject_init(&q->mq_kobj, &blk_mq_ktype);
 418
 419        queue_for_each_hw_ctx(q, hctx, i)
 420                kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
 421
 422        queue_for_each_ctx(q, ctx, i)
 423                kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
 424}
 425
 426int blk_mq_register_disk(struct gendisk *disk)
 427{
 428        struct device *dev = disk_to_dev(disk);
 429        struct request_queue *q = disk->queue;
 430        struct blk_mq_hw_ctx *hctx;
 431        int ret, i;
 432
 433        blk_mq_disable_hotplug();
 434
 435        blk_mq_sysfs_init(q);
 436
 437        ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
 438        if (ret < 0)
 439                goto out;
 440
 441        kobject_uevent(&q->mq_kobj, KOBJ_ADD);
 442
 443        queue_for_each_hw_ctx(q, hctx, i) {
 444                ret = blk_mq_register_hctx(hctx);
 445                if (ret)
 446                        break;
 447        }
 448
 449        if (ret)
 450                blk_mq_unregister_disk(disk);
 451        else
 452                q->mq_sysfs_init_done = true;
 453out:
 454        blk_mq_enable_hotplug();
 455
 456        return ret;
 457}
 458EXPORT_SYMBOL_GPL(blk_mq_register_disk);
 459
 460void blk_mq_sysfs_unregister(struct request_queue *q)
 461{
 462        struct blk_mq_hw_ctx *hctx;
 463        int i;
 464
 465        if (!q->mq_sysfs_init_done)
 466                return;
 467
 468        queue_for_each_hw_ctx(q, hctx, i)
 469                blk_mq_unregister_hctx(hctx);
 470}
 471
 472int blk_mq_sysfs_register(struct request_queue *q)
 473{
 474        struct blk_mq_hw_ctx *hctx;
 475        int i, ret = 0;
 476
 477        if (!q->mq_sysfs_init_done)
 478                return ret;
 479
 480        queue_for_each_hw_ctx(q, hctx, i) {
 481                ret = blk_mq_register_hctx(hctx);
 482                if (ret)
 483                        break;
 484        }
 485
 486        return ret;
 487}
 488