linux/drivers/mtd/mtd_blkdevs.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Interface to Linux block layer for MTD 'translation layers'.
   4 *
   5 * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org>
   6 */
   7
   8#include <linux/kernel.h>
   9#include <linux/slab.h>
  10#include <linux/module.h>
  11#include <linux/list.h>
  12#include <linux/fs.h>
  13#include <linux/mtd/blktrans.h>
  14#include <linux/mtd/mtd.h>
  15#include <linux/blkdev.h>
  16#include <linux/blk-mq.h>
  17#include <linux/blkpg.h>
  18#include <linux/spinlock.h>
  19#include <linux/hdreg.h>
  20#include <linux/mutex.h>
  21#include <linux/uaccess.h>
  22
  23#include "mtdcore.h"
  24
  25static LIST_HEAD(blktrans_majors);
  26
  27static void blktrans_dev_release(struct kref *kref)
  28{
  29        struct mtd_blktrans_dev *dev =
  30                container_of(kref, struct mtd_blktrans_dev, ref);
  31
  32        blk_cleanup_disk(dev->disk);
  33        blk_mq_free_tag_set(dev->tag_set);
  34        kfree(dev->tag_set);
  35        list_del(&dev->list);
  36        kfree(dev);
  37}
  38
  39static void blktrans_dev_put(struct mtd_blktrans_dev *dev)
  40{
  41        kref_put(&dev->ref, blktrans_dev_release);
  42}
  43
  44
  45static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
  46                               struct mtd_blktrans_dev *dev,
  47                               struct request *req)
  48{
  49        unsigned long block, nsect;
  50        char *buf;
  51
  52        block = blk_rq_pos(req) << 9 >> tr->blkshift;
  53        nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
  54
  55        if (req_op(req) == REQ_OP_FLUSH) {
  56                if (tr->flush(dev))
  57                        return BLK_STS_IOERR;
  58                return BLK_STS_OK;
  59        }
  60
  61        if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
  62            get_capacity(req->rq_disk))
  63                return BLK_STS_IOERR;
  64
  65        switch (req_op(req)) {
  66        case REQ_OP_DISCARD:
  67                if (tr->discard(dev, block, nsect))
  68                        return BLK_STS_IOERR;
  69                return BLK_STS_OK;
  70        case REQ_OP_READ:
  71                buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
  72                for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
  73                        if (tr->readsect(dev, block, buf)) {
  74                                kunmap(bio_page(req->bio));
  75                                return BLK_STS_IOERR;
  76                        }
  77                }
  78                kunmap(bio_page(req->bio));
  79                rq_flush_dcache_pages(req);
  80                return BLK_STS_OK;
  81        case REQ_OP_WRITE:
  82                if (!tr->writesect)
  83                        return BLK_STS_IOERR;
  84
  85                rq_flush_dcache_pages(req);
  86                buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
  87                for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
  88                        if (tr->writesect(dev, block, buf)) {
  89                                kunmap(bio_page(req->bio));
  90                                return BLK_STS_IOERR;
  91                        }
  92                }
  93                kunmap(bio_page(req->bio));
  94                return BLK_STS_OK;
  95        default:
  96                return BLK_STS_IOERR;
  97        }
  98}
  99
 100int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
 101{
 102        return dev->bg_stop;
 103}
 104EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
 105
 106static struct request *mtd_next_request(struct mtd_blktrans_dev *dev)
 107{
 108        struct request *rq;
 109
 110        rq = list_first_entry_or_null(&dev->rq_list, struct request, queuelist);
 111        if (rq) {
 112                list_del_init(&rq->queuelist);
 113                blk_mq_start_request(rq);
 114                return rq;
 115        }
 116
 117        return NULL;
 118}
 119
 120static void mtd_blktrans_work(struct mtd_blktrans_dev *dev)
 121        __releases(&dev->queue_lock)
 122        __acquires(&dev->queue_lock)
 123{
 124        struct mtd_blktrans_ops *tr = dev->tr;
 125        struct request *req = NULL;
 126        int background_done = 0;
 127
 128        while (1) {
 129                blk_status_t res;
 130
 131                dev->bg_stop = false;
 132                if (!req && !(req = mtd_next_request(dev))) {
 133                        if (tr->background && !background_done) {
 134                                spin_unlock_irq(&dev->queue_lock);
 135                                mutex_lock(&dev->lock);
 136                                tr->background(dev);
 137                                mutex_unlock(&dev->lock);
 138                                spin_lock_irq(&dev->queue_lock);
 139                                /*
 140                                 * Do background processing just once per idle
 141                                 * period.
 142                                 */
 143                                background_done = !dev->bg_stop;
 144                                continue;
 145                        }
 146                        break;
 147                }
 148
 149                spin_unlock_irq(&dev->queue_lock);
 150
 151                mutex_lock(&dev->lock);
 152                res = do_blktrans_request(dev->tr, dev, req);
 153                mutex_unlock(&dev->lock);
 154
 155                if (!blk_update_request(req, res, blk_rq_cur_bytes(req))) {
 156                        __blk_mq_end_request(req, res);
 157                        req = NULL;
 158                }
 159
 160                background_done = 0;
 161                spin_lock_irq(&dev->queue_lock);
 162        }
 163}
 164
 165static blk_status_t mtd_queue_rq(struct blk_mq_hw_ctx *hctx,
 166                                 const struct blk_mq_queue_data *bd)
 167{
 168        struct mtd_blktrans_dev *dev;
 169
 170        dev = hctx->queue->queuedata;
 171        if (!dev) {
 172                blk_mq_start_request(bd->rq);
 173                return BLK_STS_IOERR;
 174        }
 175
 176        spin_lock_irq(&dev->queue_lock);
 177        list_add_tail(&bd->rq->queuelist, &dev->rq_list);
 178        mtd_blktrans_work(dev);
 179        spin_unlock_irq(&dev->queue_lock);
 180
 181        return BLK_STS_OK;
 182}
 183
 184static int blktrans_open(struct block_device *bdev, fmode_t mode)
 185{
 186        struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
 187        int ret = 0;
 188
 189        kref_get(&dev->ref);
 190
 191        mutex_lock(&dev->lock);
 192
 193        if (dev->open)
 194                goto unlock;
 195
 196        __module_get(dev->tr->owner);
 197
 198        if (!dev->mtd)
 199                goto unlock;
 200
 201        if (dev->tr->open) {
 202                ret = dev->tr->open(dev);
 203                if (ret)
 204                        goto error_put;
 205        }
 206
 207        ret = __get_mtd_device(dev->mtd);
 208        if (ret)
 209                goto error_release;
 210        dev->file_mode = mode;
 211
 212unlock:
 213        dev->open++;
 214        mutex_unlock(&dev->lock);
 215        return ret;
 216
 217error_release:
 218        if (dev->tr->release)
 219                dev->tr->release(dev);
 220error_put:
 221        module_put(dev->tr->owner);
 222        mutex_unlock(&dev->lock);
 223        blktrans_dev_put(dev);
 224        return ret;
 225}
 226
 227static void blktrans_release(struct gendisk *disk, fmode_t mode)
 228{
 229        struct mtd_blktrans_dev *dev = disk->private_data;
 230
 231        mutex_lock(&dev->lock);
 232
 233        if (--dev->open)
 234                goto unlock;
 235
 236        module_put(dev->tr->owner);
 237
 238        if (dev->mtd) {
 239                if (dev->tr->release)
 240                        dev->tr->release(dev);
 241                __put_mtd_device(dev->mtd);
 242        }
 243unlock:
 244        mutex_unlock(&dev->lock);
 245        blktrans_dev_put(dev);
 246}
 247
 248static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 249{
 250        struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
 251        int ret = -ENXIO;
 252
 253        mutex_lock(&dev->lock);
 254
 255        if (!dev->mtd)
 256                goto unlock;
 257
 258        ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : -ENOTTY;
 259unlock:
 260        mutex_unlock(&dev->lock);
 261        return ret;
 262}
 263
 264static const struct block_device_operations mtd_block_ops = {
 265        .owner          = THIS_MODULE,
 266        .open           = blktrans_open,
 267        .release        = blktrans_release,
 268        .getgeo         = blktrans_getgeo,
 269};
 270
 271static const struct blk_mq_ops mtd_mq_ops = {
 272        .queue_rq       = mtd_queue_rq,
 273};
 274
 275int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
 276{
 277        struct mtd_blktrans_ops *tr = new->tr;
 278        struct mtd_blktrans_dev *d;
 279        int last_devnum = -1;
 280        struct gendisk *gd;
 281        int ret;
 282
 283        lockdep_assert_held(&mtd_table_mutex);
 284
 285        list_for_each_entry(d, &tr->devs, list) {
 286                if (new->devnum == -1) {
 287                        /* Use first free number */
 288                        if (d->devnum != last_devnum+1) {
 289                                /* Found a free devnum. Plug it in here */
 290                                new->devnum = last_devnum+1;
 291                                list_add_tail(&new->list, &d->list);
 292                                goto added;
 293                        }
 294                } else if (d->devnum == new->devnum) {
 295                        /* Required number taken */
 296                        return -EBUSY;
 297                } else if (d->devnum > new->devnum) {
 298                        /* Required number was free */
 299                        list_add_tail(&new->list, &d->list);
 300                        goto added;
 301                }
 302                last_devnum = d->devnum;
 303        }
 304
 305        ret = -EBUSY;
 306        if (new->devnum == -1)
 307                new->devnum = last_devnum+1;
 308
 309        /* Check that the device and any partitions will get valid
 310         * minor numbers and that the disk naming code below can cope
 311         * with this number. */
 312        if (new->devnum > (MINORMASK >> tr->part_bits) ||
 313            (tr->part_bits && new->devnum >= 27 * 26))
 314                return ret;
 315
 316        list_add_tail(&new->list, &tr->devs);
 317 added:
 318
 319        mutex_init(&new->lock);
 320        kref_init(&new->ref);
 321        if (!tr->writesect)
 322                new->readonly = 1;
 323
 324        ret = -ENOMEM;
 325        new->tag_set = kzalloc(sizeof(*new->tag_set), GFP_KERNEL);
 326        if (!new->tag_set)
 327                goto out_list_del;
 328
 329        ret = blk_mq_alloc_sq_tag_set(new->tag_set, &mtd_mq_ops, 2,
 330                        BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
 331        if (ret)
 332                goto out_kfree_tag_set;
 333
 334        /* Create gendisk */
 335        gd = blk_mq_alloc_disk(new->tag_set, new);
 336        if (IS_ERR(gd)) {
 337                ret = PTR_ERR(gd);
 338                goto out_free_tag_set;
 339        }
 340
 341        new->disk = gd;
 342        new->rq = new->disk->queue;
 343        gd->private_data = new;
 344        gd->major = tr->major;
 345        gd->first_minor = (new->devnum) << tr->part_bits;
 346        gd->minors = 1 << tr->part_bits;
 347        gd->fops = &mtd_block_ops;
 348
 349        if (tr->part_bits)
 350                if (new->devnum < 26)
 351                        snprintf(gd->disk_name, sizeof(gd->disk_name),
 352                                 "%s%c", tr->name, 'a' + new->devnum);
 353                else
 354                        snprintf(gd->disk_name, sizeof(gd->disk_name),
 355                                 "%s%c%c", tr->name,
 356                                 'a' - 1 + new->devnum / 26,
 357                                 'a' + new->devnum % 26);
 358        else
 359                snprintf(gd->disk_name, sizeof(gd->disk_name),
 360                         "%s%d", tr->name, new->devnum);
 361
 362        set_capacity(gd, ((u64)new->size * tr->blksize) >> 9);
 363
 364        /* Create the request queue */
 365        spin_lock_init(&new->queue_lock);
 366        INIT_LIST_HEAD(&new->rq_list);
 367
 368        if (tr->flush)
 369                blk_queue_write_cache(new->rq, true, false);
 370
 371        blk_queue_logical_block_size(new->rq, tr->blksize);
 372
 373        blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq);
 374        blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq);
 375
 376        if (tr->discard) {
 377                blk_queue_flag_set(QUEUE_FLAG_DISCARD, new->rq);
 378                blk_queue_max_discard_sectors(new->rq, UINT_MAX);
 379                new->rq->limits.discard_granularity = tr->blksize;
 380        }
 381
 382        gd->queue = new->rq;
 383
 384        if (new->readonly)
 385                set_disk_ro(gd, 1);
 386
 387        device_add_disk(&new->mtd->dev, gd, NULL);
 388
 389        if (new->disk_attributes) {
 390                ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
 391                                        new->disk_attributes);
 392                WARN_ON(ret);
 393        }
 394        return 0;
 395
 396out_free_tag_set:
 397        blk_mq_free_tag_set(new->tag_set);
 398out_kfree_tag_set:
 399        kfree(new->tag_set);
 400out_list_del:
 401        list_del(&new->list);
 402        return ret;
 403}
 404
 405int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
 406{
 407        unsigned long flags;
 408
 409        lockdep_assert_held(&mtd_table_mutex);
 410
 411        if (old->disk_attributes)
 412                sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
 413                                                old->disk_attributes);
 414
 415        /* Stop new requests to arrive */
 416        del_gendisk(old->disk);
 417
 418        /* Kill current requests */
 419        spin_lock_irqsave(&old->queue_lock, flags);
 420        old->rq->queuedata = NULL;
 421        spin_unlock_irqrestore(&old->queue_lock, flags);
 422
 423        /* freeze+quiesce queue to ensure all requests are flushed */
 424        blk_mq_freeze_queue(old->rq);
 425        blk_mq_quiesce_queue(old->rq);
 426        blk_mq_unquiesce_queue(old->rq);
 427        blk_mq_unfreeze_queue(old->rq);
 428
 429        /* If the device is currently open, tell trans driver to close it,
 430                then put mtd device, and don't touch it again */
 431        mutex_lock(&old->lock);
 432        if (old->open) {
 433                if (old->tr->release)
 434                        old->tr->release(old);
 435                __put_mtd_device(old->mtd);
 436        }
 437
 438        old->mtd = NULL;
 439
 440        mutex_unlock(&old->lock);
 441        blktrans_dev_put(old);
 442        return 0;
 443}
 444
 445static void blktrans_notify_remove(struct mtd_info *mtd)
 446{
 447        struct mtd_blktrans_ops *tr;
 448        struct mtd_blktrans_dev *dev, *next;
 449
 450        list_for_each_entry(tr, &blktrans_majors, list)
 451                list_for_each_entry_safe(dev, next, &tr->devs, list)
 452                        if (dev->mtd == mtd)
 453                                tr->remove_dev(dev);
 454}
 455
 456static void blktrans_notify_add(struct mtd_info *mtd)
 457{
 458        struct mtd_blktrans_ops *tr;
 459
 460        if (mtd->type == MTD_ABSENT)
 461                return;
 462
 463        list_for_each_entry(tr, &blktrans_majors, list)
 464                tr->add_mtd(tr, mtd);
 465}
 466
 467static struct mtd_notifier blktrans_notifier = {
 468        .add = blktrans_notify_add,
 469        .remove = blktrans_notify_remove,
 470};
 471
 472int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
 473{
 474        struct mtd_info *mtd;
 475        int ret;
 476
 477        /* Register the notifier if/when the first device type is
 478           registered, to prevent the link/init ordering from fucking
 479           us over. */
 480        if (!blktrans_notifier.list.next)
 481                register_mtd_user(&blktrans_notifier);
 482
 483        ret = register_blkdev(tr->major, tr->name);
 484        if (ret < 0) {
 485                printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
 486                       tr->name, tr->major, ret);
 487                return ret;
 488        }
 489
 490        if (ret)
 491                tr->major = ret;
 492
 493        tr->blkshift = ffs(tr->blksize) - 1;
 494
 495        INIT_LIST_HEAD(&tr->devs);
 496
 497        mutex_lock(&mtd_table_mutex);
 498        list_add(&tr->list, &blktrans_majors);
 499        mtd_for_each_device(mtd)
 500                if (mtd->type != MTD_ABSENT)
 501                        tr->add_mtd(tr, mtd);
 502        mutex_unlock(&mtd_table_mutex);
 503        return 0;
 504}
 505
 506int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
 507{
 508        struct mtd_blktrans_dev *dev, *next;
 509
 510        mutex_lock(&mtd_table_mutex);
 511
 512        /* Remove it from the list of active majors */
 513        list_del(&tr->list);
 514
 515        list_for_each_entry_safe(dev, next, &tr->devs, list)
 516                tr->remove_dev(dev);
 517
 518        mutex_unlock(&mtd_table_mutex);
 519        unregister_blkdev(tr->major, tr->name);
 520
 521        BUG_ON(!list_empty(&tr->devs));
 522        return 0;
 523}
 524
 525static void __exit mtd_blktrans_exit(void)
 526{
 527        /* No race here -- if someone's currently in register_mtd_blktrans
 528           we're screwed anyway. */
 529        if (blktrans_notifier.list.next)
 530                unregister_mtd_user(&blktrans_notifier);
 531}
 532
 533module_exit(mtd_blktrans_exit);
 534
 535EXPORT_SYMBOL_GPL(register_mtd_blktrans);
 536EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
 537EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
 538EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
 539
 540MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
 541MODULE_LICENSE("GPL");
 542MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");
 543