linux/block/blk-mq-sysfs.c
<<
>>
Prefs
   1#include <linux/kernel.h>
   2#include <linux/module.h>
   3#include <linux/backing-dev.h>
   4#include <linux/bio.h>
   5#include <linux/blkdev.h>
   6#include <linux/mm.h>
   7#include <linux/init.h>
   8#include <linux/slab.h>
   9#include <linux/workqueue.h>
  10#include <linux/smp.h>
  11
  12#include <linux/blk-mq.h>
  13#include "blk-mq.h"
  14#include "blk-mq-tag.h"
  15
  16static void blk_mq_sysfs_release(struct kobject *kobj)
  17{
  18}
  19
  20struct blk_mq_ctx_sysfs_entry {
  21        struct attribute attr;
  22        ssize_t (*show)(struct blk_mq_ctx *, char *);
  23        ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
  24};
  25
  26struct blk_mq_hw_ctx_sysfs_entry {
  27        struct attribute attr;
  28        ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
  29        ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
  30};
  31
  32static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
  33                                 char *page)
  34{
  35        struct blk_mq_ctx_sysfs_entry *entry;
  36        struct blk_mq_ctx *ctx;
  37        struct request_queue *q;
  38        ssize_t res;
  39
  40        entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
  41        ctx = container_of(kobj, struct blk_mq_ctx, kobj);
  42        q = ctx->queue;
  43
  44        if (!entry->show)
  45                return -EIO;
  46
  47        res = -ENOENT;
  48        mutex_lock(&q->sysfs_lock);
  49        if (!blk_queue_dying(q))
  50                res = entry->show(ctx, page);
  51        mutex_unlock(&q->sysfs_lock);
  52        return res;
  53}
  54
  55static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
  56                                  const char *page, size_t length)
  57{
  58        struct blk_mq_ctx_sysfs_entry *entry;
  59        struct blk_mq_ctx *ctx;
  60        struct request_queue *q;
  61        ssize_t res;
  62
  63        entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
  64        ctx = container_of(kobj, struct blk_mq_ctx, kobj);
  65        q = ctx->queue;
  66
  67        if (!entry->store)
  68                return -EIO;
  69
  70        res = -ENOENT;
  71        mutex_lock(&q->sysfs_lock);
  72        if (!blk_queue_dying(q))
  73                res = entry->store(ctx, page, length);
  74        mutex_unlock(&q->sysfs_lock);
  75        return res;
  76}
  77
  78static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
  79                                    struct attribute *attr, char *page)
  80{
  81        struct blk_mq_hw_ctx_sysfs_entry *entry;
  82        struct blk_mq_hw_ctx *hctx;
  83        struct request_queue *q;
  84        ssize_t res;
  85
  86        entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
  87        hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
  88        q = hctx->queue;
  89
  90        if (!entry->show)
  91                return -EIO;
  92
  93        res = -ENOENT;
  94        mutex_lock(&q->sysfs_lock);
  95        if (!blk_queue_dying(q))
  96                res = entry->show(hctx, page);
  97        mutex_unlock(&q->sysfs_lock);
  98        return res;
  99}
 100
 101static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
 102                                     struct attribute *attr, const char *page,
 103                                     size_t length)
 104{
 105        struct blk_mq_hw_ctx_sysfs_entry *entry;
 106        struct blk_mq_hw_ctx *hctx;
 107        struct request_queue *q;
 108        ssize_t res;
 109
 110        entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
 111        hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
 112        q = hctx->queue;
 113
 114        if (!entry->store)
 115                return -EIO;
 116
 117        res = -ENOENT;
 118        mutex_lock(&q->sysfs_lock);
 119        if (!blk_queue_dying(q))
 120                res = entry->store(hctx, page, length);
 121        mutex_unlock(&q->sysfs_lock);
 122        return res;
 123}
 124
 125static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page)
 126{
 127        return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1],
 128                                ctx->rq_dispatched[0]);
 129}
 130
 131static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page)
 132{
 133        return sprintf(page, "%lu\n", ctx->rq_merged);
 134}
 135
 136static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
 137{
 138        return sprintf(page, "%lu %lu\n", ctx->rq_completed[1],
 139                                ctx->rq_completed[0]);
 140}
 141
 142static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg)
 143{
 144        char *start_page = page;
 145        struct request *rq;
 146
 147        page += sprintf(page, "%s:\n", msg);
 148
 149        list_for_each_entry(rq, list, queuelist)
 150                page += sprintf(page, "\t%p\n", rq);
 151
 152        return page - start_page;
 153}
 154
 155static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
 156{
 157        ssize_t ret;
 158
 159        spin_lock(&ctx->lock);
 160        ret = sysfs_list_show(page, &ctx->rq_list, "CTX pending");
 161        spin_unlock(&ctx->lock);
 162
 163        return ret;
 164}
 165
 166static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
 167                                           char *page)
 168{
 169        return sprintf(page, "%lu\n", hctx->queued);
 170}
 171
 172static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page)
 173{
 174        return sprintf(page, "%lu\n", hctx->run);
 175}
 176
 177static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx,
 178                                               char *page)
 179{
 180        char *start_page = page;
 181        int i;
 182
 183        page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
 184
 185        for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) {
 186                unsigned long d = 1U << (i - 1);
 187
 188                page += sprintf(page, "%8lu\t%lu\n", d, hctx->dispatched[i]);
 189        }
 190
 191        return page - start_page;
 192}
 193
 194static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx,
 195                                            char *page)
 196{
 197        ssize_t ret;
 198
 199        spin_lock(&hctx->lock);
 200        ret = sysfs_list_show(page, &hctx->dispatch, "HCTX pending");
 201        spin_unlock(&hctx->lock);
 202
 203        return ret;
 204}
 205
 206static ssize_t blk_mq_hw_sysfs_ipi_show(struct blk_mq_hw_ctx *hctx, char *page)
 207{
 208        ssize_t ret;
 209
 210        spin_lock(&hctx->lock);
 211        ret = sprintf(page, "%u\n", !!(hctx->flags & BLK_MQ_F_SHOULD_IPI));
 212        spin_unlock(&hctx->lock);
 213
 214        return ret;
 215}
 216
 217static ssize_t blk_mq_hw_sysfs_ipi_store(struct blk_mq_hw_ctx *hctx,
 218                                         const char *page, size_t len)
 219{
 220        struct blk_mq_ctx *ctx;
 221        unsigned long ret;
 222        unsigned int i;
 223
 224        if (kstrtoul(page, 10, &ret)) {
 225                pr_err("blk-mq-sysfs: invalid input '%s'\n", page);
 226                return -EINVAL;
 227        }
 228
 229        spin_lock(&hctx->lock);
 230        if (ret)
 231                hctx->flags |= BLK_MQ_F_SHOULD_IPI;
 232        else
 233                hctx->flags &= ~BLK_MQ_F_SHOULD_IPI;
 234        spin_unlock(&hctx->lock);
 235
 236        hctx_for_each_ctx(hctx, ctx, i)
 237                ctx->ipi_redirect = !!ret;
 238
 239        return len;
 240}
 241
 242static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
 243{
 244        return blk_mq_tag_sysfs_show(hctx->tags, page);
 245}
 246
 247static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
 248        .attr = {.name = "dispatched", .mode = S_IRUGO },
 249        .show = blk_mq_sysfs_dispatched_show,
 250};
 251static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged = {
 252        .attr = {.name = "merged", .mode = S_IRUGO },
 253        .show = blk_mq_sysfs_merged_show,
 254};
 255static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed = {
 256        .attr = {.name = "completed", .mode = S_IRUGO },
 257        .show = blk_mq_sysfs_completed_show,
 258};
 259static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list = {
 260        .attr = {.name = "rq_list", .mode = S_IRUGO },
 261        .show = blk_mq_sysfs_rq_list_show,
 262};
 263
 264static struct attribute *default_ctx_attrs[] = {
 265        &blk_mq_sysfs_dispatched.attr,
 266        &blk_mq_sysfs_merged.attr,
 267        &blk_mq_sysfs_completed.attr,
 268        &blk_mq_sysfs_rq_list.attr,
 269        NULL,
 270};
 271
 272static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued = {
 273        .attr = {.name = "queued", .mode = S_IRUGO },
 274        .show = blk_mq_hw_sysfs_queued_show,
 275};
 276static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run = {
 277        .attr = {.name = "run", .mode = S_IRUGO },
 278        .show = blk_mq_hw_sysfs_run_show,
 279};
 280static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = {
 281        .attr = {.name = "dispatched", .mode = S_IRUGO },
 282        .show = blk_mq_hw_sysfs_dispatched_show,
 283};
 284static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = {
 285        .attr = {.name = "pending", .mode = S_IRUGO },
 286        .show = blk_mq_hw_sysfs_rq_list_show,
 287};
 288static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_ipi = {
 289        .attr = {.name = "ipi_redirect", .mode = S_IRUGO | S_IWUSR},
 290        .show = blk_mq_hw_sysfs_ipi_show,
 291        .store = blk_mq_hw_sysfs_ipi_store,
 292};
 293static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
 294        .attr = {.name = "tags", .mode = S_IRUGO },
 295        .show = blk_mq_hw_sysfs_tags_show,
 296};
 297
 298static struct attribute *default_hw_ctx_attrs[] = {
 299        &blk_mq_hw_sysfs_queued.attr,
 300        &blk_mq_hw_sysfs_run.attr,
 301        &blk_mq_hw_sysfs_dispatched.attr,
 302        &blk_mq_hw_sysfs_pending.attr,
 303        &blk_mq_hw_sysfs_ipi.attr,
 304        &blk_mq_hw_sysfs_tags.attr,
 305        NULL,
 306};
 307
 308static const struct sysfs_ops blk_mq_sysfs_ops = {
 309        .show   = blk_mq_sysfs_show,
 310        .store  = blk_mq_sysfs_store,
 311};
 312
 313static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
 314        .show   = blk_mq_hw_sysfs_show,
 315        .store  = blk_mq_hw_sysfs_store,
 316};
 317
 318static struct kobj_type blk_mq_ktype = {
 319        .sysfs_ops      = &blk_mq_sysfs_ops,
 320        .release        = blk_mq_sysfs_release,
 321};
 322
 323static struct kobj_type blk_mq_ctx_ktype = {
 324        .sysfs_ops      = &blk_mq_sysfs_ops,
 325        .default_attrs  = default_ctx_attrs,
 326        .release        = blk_mq_sysfs_release,
 327};
 328
 329static struct kobj_type blk_mq_hw_ktype = {
 330        .sysfs_ops      = &blk_mq_hw_sysfs_ops,
 331        .default_attrs  = default_hw_ctx_attrs,
 332        .release        = blk_mq_sysfs_release,
 333};
 334
 335void blk_mq_unregister_disk(struct gendisk *disk)
 336{
 337        struct request_queue *q = disk->queue;
 338        struct blk_mq_hw_ctx *hctx;
 339        struct blk_mq_ctx *ctx;
 340        int i, j;
 341
 342        queue_for_each_hw_ctx(q, hctx, i) {
 343                hctx_for_each_ctx(hctx, ctx, j) {
 344                        kobject_del(&ctx->kobj);
 345                        kobject_put(&ctx->kobj);
 346                }
 347                kobject_del(&hctx->kobj);
 348                kobject_put(&hctx->kobj);
 349        }
 350
 351        kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
 352        kobject_del(&q->mq_kobj);
 353        kobject_put(&q->mq_kobj);
 354
 355        kobject_put(&disk_to_dev(disk)->kobj);
 356}
 357
 358int blk_mq_register_disk(struct gendisk *disk)
 359{
 360        struct device *dev = disk_to_dev(disk);
 361        struct request_queue *q = disk->queue;
 362        struct blk_mq_hw_ctx *hctx;
 363        struct blk_mq_ctx *ctx;
 364        int ret, i, j;
 365
 366        kobject_init(&q->mq_kobj, &blk_mq_ktype);
 367
 368        ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
 369        if (ret < 0)
 370                return ret;
 371
 372        kobject_uevent(&q->mq_kobj, KOBJ_ADD);
 373
 374        queue_for_each_hw_ctx(q, hctx, i) {
 375                kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
 376                ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", i);
 377                if (ret)
 378                        break;
 379
 380                if (!hctx->nr_ctx)
 381                        continue;
 382
 383                hctx_for_each_ctx(hctx, ctx, j) {
 384                        kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
 385                        ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
 386                        if (ret)
 387                                break;
 388                }
 389        }
 390
 391        if (ret) {
 392                blk_mq_unregister_disk(disk);
 393                return ret;
 394        }
 395
 396        return 0;
 397}
 398