linux/block/blk-mq-sysfs.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/kernel.h>
   3#include <linux/module.h>
   4#include <linux/backing-dev.h>
   5#include <linux/bio.h>
   6#include <linux/blkdev.h>
   7#include <linux/mm.h>
   8#include <linux/init.h>
   9#include <linux/slab.h>
  10#include <linux/workqueue.h>
  11#include <linux/smp.h>
  12
  13#include <linux/blk-mq.h>
  14#include "blk.h"
  15#include "blk-mq.h"
  16#include "blk-mq-tag.h"
  17
  18static void blk_mq_sysfs_release(struct kobject *kobj)
  19{
  20        struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj);
  21
  22        free_percpu(ctxs->queue_ctx);
  23        kfree(ctxs);
  24}
  25
  26static void blk_mq_ctx_sysfs_release(struct kobject *kobj)
  27{
  28        struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj);
  29
  30        /* ctx->ctxs won't be released until all ctx are freed */
  31        kobject_put(&ctx->ctxs->kobj);
  32}
  33
  34static void blk_mq_hw_sysfs_release(struct kobject *kobj)
  35{
  36        struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
  37                                                  kobj);
  38
  39        cancel_delayed_work_sync(&hctx->run_work);
  40
  41        if (hctx->flags & BLK_MQ_F_BLOCKING)
  42                cleanup_srcu_struct(hctx->srcu);
  43        blk_free_flush_queue(hctx->fq);
  44        sbitmap_free(&hctx->ctx_map);
  45        free_cpumask_var(hctx->cpumask);
  46        kfree(hctx->ctxs);
  47        kfree(hctx);
  48}
  49
  50struct blk_mq_ctx_sysfs_entry {
  51        struct attribute attr;
  52        ssize_t (*show)(struct blk_mq_ctx *, char *);
  53        ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
  54};
  55
  56struct blk_mq_hw_ctx_sysfs_entry {
  57        struct attribute attr;
  58        ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
  59        ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
  60};
  61
  62static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
  63                                 char *page)
  64{
  65        struct blk_mq_ctx_sysfs_entry *entry;
  66        struct blk_mq_ctx *ctx;
  67        struct request_queue *q;
  68        ssize_t res;
  69
  70        entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
  71        ctx = container_of(kobj, struct blk_mq_ctx, kobj);
  72        q = ctx->queue;
  73
  74        if (!entry->show)
  75                return -EIO;
  76
  77        mutex_lock(&q->sysfs_lock);
  78        res = entry->show(ctx, page);
  79        mutex_unlock(&q->sysfs_lock);
  80        return res;
  81}
  82
  83static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
  84                                  const char *page, size_t length)
  85{
  86        struct blk_mq_ctx_sysfs_entry *entry;
  87        struct blk_mq_ctx *ctx;
  88        struct request_queue *q;
  89        ssize_t res;
  90
  91        entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
  92        ctx = container_of(kobj, struct blk_mq_ctx, kobj);
  93        q = ctx->queue;
  94
  95        if (!entry->store)
  96                return -EIO;
  97
  98        mutex_lock(&q->sysfs_lock);
  99        res = entry->store(ctx, page, length);
 100        mutex_unlock(&q->sysfs_lock);
 101        return res;
 102}
 103
 104static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
 105                                    struct attribute *attr, char *page)
 106{
 107        struct blk_mq_hw_ctx_sysfs_entry *entry;
 108        struct blk_mq_hw_ctx *hctx;
 109        struct request_queue *q;
 110        ssize_t res;
 111
 112        entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
 113        hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
 114        q = hctx->queue;
 115
 116        if (!entry->show)
 117                return -EIO;
 118
 119        mutex_lock(&q->sysfs_lock);
 120        res = entry->show(hctx, page);
 121        mutex_unlock(&q->sysfs_lock);
 122        return res;
 123}
 124
 125static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
 126                                     struct attribute *attr, const char *page,
 127                                     size_t length)
 128{
 129        struct blk_mq_hw_ctx_sysfs_entry *entry;
 130        struct blk_mq_hw_ctx *hctx;
 131        struct request_queue *q;
 132        ssize_t res;
 133
 134        entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
 135        hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
 136        q = hctx->queue;
 137
 138        if (!entry->store)
 139                return -EIO;
 140
 141        mutex_lock(&q->sysfs_lock);
 142        res = entry->store(hctx, page, length);
 143        mutex_unlock(&q->sysfs_lock);
 144        return res;
 145}
 146
 147static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
 148                                            char *page)
 149{
 150        return sprintf(page, "%u\n", hctx->tags->nr_tags);
 151}
 152
 153static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
 154                                                     char *page)
 155{
 156        return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
 157}
 158
 159static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
 160{
 161        const size_t size = PAGE_SIZE - 1;
 162        unsigned int i, first = 1;
 163        int ret = 0, pos = 0;
 164
 165        for_each_cpu(i, hctx->cpumask) {
 166                if (first)
 167                        ret = snprintf(pos + page, size - pos, "%u", i);
 168                else
 169                        ret = snprintf(pos + page, size - pos, ", %u", i);
 170
 171                if (ret >= size - pos)
 172                        break;
 173
 174                first = 0;
 175                pos += ret;
 176        }
 177
 178        ret = snprintf(pos + page, size + 1 - pos, "\n");
 179        return pos + ret;
 180}
 181
 182static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
 183        .attr = {.name = "nr_tags", .mode = 0444 },
 184        .show = blk_mq_hw_sysfs_nr_tags_show,
 185};
 186static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
 187        .attr = {.name = "nr_reserved_tags", .mode = 0444 },
 188        .show = blk_mq_hw_sysfs_nr_reserved_tags_show,
 189};
 190static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
 191        .attr = {.name = "cpu_list", .mode = 0444 },
 192        .show = blk_mq_hw_sysfs_cpus_show,
 193};
 194
 195static struct attribute *default_hw_ctx_attrs[] = {
 196        &blk_mq_hw_sysfs_nr_tags.attr,
 197        &blk_mq_hw_sysfs_nr_reserved_tags.attr,
 198        &blk_mq_hw_sysfs_cpus.attr,
 199        NULL,
 200};
 201ATTRIBUTE_GROUPS(default_hw_ctx);
 202
 203static const struct sysfs_ops blk_mq_sysfs_ops = {
 204        .show   = blk_mq_sysfs_show,
 205        .store  = blk_mq_sysfs_store,
 206};
 207
 208static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
 209        .show   = blk_mq_hw_sysfs_show,
 210        .store  = blk_mq_hw_sysfs_store,
 211};
 212
 213static struct kobj_type blk_mq_ktype = {
 214        .sysfs_ops      = &blk_mq_sysfs_ops,
 215        .release        = blk_mq_sysfs_release,
 216};
 217
 218static struct kobj_type blk_mq_ctx_ktype = {
 219        .sysfs_ops      = &blk_mq_sysfs_ops,
 220        .release        = blk_mq_ctx_sysfs_release,
 221};
 222
 223static struct kobj_type blk_mq_hw_ktype = {
 224        .sysfs_ops      = &blk_mq_hw_sysfs_ops,
 225        .default_groups = default_hw_ctx_groups,
 226        .release        = blk_mq_hw_sysfs_release,
 227};
 228
 229static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
 230{
 231        struct blk_mq_ctx *ctx;
 232        int i;
 233
 234        if (!hctx->nr_ctx)
 235                return;
 236
 237        hctx_for_each_ctx(hctx, ctx, i)
 238                kobject_del(&ctx->kobj);
 239
 240        kobject_del(&hctx->kobj);
 241}
 242
 243static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
 244{
 245        struct request_queue *q = hctx->queue;
 246        struct blk_mq_ctx *ctx;
 247        int i, ret;
 248
 249        if (!hctx->nr_ctx)
 250                return 0;
 251
 252        ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num);
 253        if (ret)
 254                return ret;
 255
 256        hctx_for_each_ctx(hctx, ctx, i) {
 257                ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
 258                if (ret)
 259                        break;
 260        }
 261
 262        return ret;
 263}
 264
 265void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
 266{
 267        struct blk_mq_hw_ctx *hctx;
 268        int i;
 269
 270        lockdep_assert_held(&q->sysfs_dir_lock);
 271
 272        queue_for_each_hw_ctx(q, hctx, i)
 273                blk_mq_unregister_hctx(hctx);
 274
 275        kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
 276        kobject_del(q->mq_kobj);
 277        kobject_put(&dev->kobj);
 278
 279        q->mq_sysfs_init_done = false;
 280}
 281
 282void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
 283{
 284        kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
 285}
 286
 287void blk_mq_sysfs_deinit(struct request_queue *q)
 288{
 289        struct blk_mq_ctx *ctx;
 290        int cpu;
 291
 292        for_each_possible_cpu(cpu) {
 293                ctx = per_cpu_ptr(q->queue_ctx, cpu);
 294                kobject_put(&ctx->kobj);
 295        }
 296        kobject_put(q->mq_kobj);
 297}
 298
 299void blk_mq_sysfs_init(struct request_queue *q)
 300{
 301        struct blk_mq_ctx *ctx;
 302        int cpu;
 303
 304        kobject_init(q->mq_kobj, &blk_mq_ktype);
 305
 306        for_each_possible_cpu(cpu) {
 307                ctx = per_cpu_ptr(q->queue_ctx, cpu);
 308
 309                kobject_get(q->mq_kobj);
 310                kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
 311        }
 312}
 313
 314int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
 315{
 316        struct blk_mq_hw_ctx *hctx;
 317        int ret, i;
 318
 319        WARN_ON_ONCE(!q->kobj.parent);
 320        lockdep_assert_held(&q->sysfs_dir_lock);
 321
 322        ret = kobject_add(q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
 323        if (ret < 0)
 324                goto out;
 325
 326        kobject_uevent(q->mq_kobj, KOBJ_ADD);
 327
 328        queue_for_each_hw_ctx(q, hctx, i) {
 329                ret = blk_mq_register_hctx(hctx);
 330                if (ret)
 331                        goto unreg;
 332        }
 333
 334        q->mq_sysfs_init_done = true;
 335
 336out:
 337        return ret;
 338
 339unreg:
 340        while (--i >= 0)
 341                blk_mq_unregister_hctx(q->queue_hw_ctx[i]);
 342
 343        kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
 344        kobject_del(q->mq_kobj);
 345        kobject_put(&dev->kobj);
 346        return ret;
 347}
 348
 349void blk_mq_sysfs_unregister(struct request_queue *q)
 350{
 351        struct blk_mq_hw_ctx *hctx;
 352        int i;
 353
 354        mutex_lock(&q->sysfs_dir_lock);
 355        if (!q->mq_sysfs_init_done)
 356                goto unlock;
 357
 358        queue_for_each_hw_ctx(q, hctx, i)
 359                blk_mq_unregister_hctx(hctx);
 360
 361unlock:
 362        mutex_unlock(&q->sysfs_dir_lock);
 363}
 364
 365int blk_mq_sysfs_register(struct request_queue *q)
 366{
 367        struct blk_mq_hw_ctx *hctx;
 368        int i, ret = 0;
 369
 370        mutex_lock(&q->sysfs_dir_lock);
 371        if (!q->mq_sysfs_init_done)
 372                goto unlock;
 373
 374        queue_for_each_hw_ctx(q, hctx, i) {
 375                ret = blk_mq_register_hctx(hctx);
 376                if (ret)
 377                        break;
 378        }
 379
 380unlock:
 381        mutex_unlock(&q->sysfs_dir_lock);
 382
 383        return ret;
 384}
 385