linux/drivers/mtd/mtd_blkdevs.c
<<
>>
Prefs
   1/*
   2 * Interface to Linux block layer for MTD 'translation layers'.
   3 *
   4 * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  19 *
  20 */
  21
  22#include <linux/kernel.h>
  23#include <linux/slab.h>
  24#include <linux/module.h>
  25#include <linux/list.h>
  26#include <linux/fs.h>
  27#include <linux/mtd/blktrans.h>
  28#include <linux/mtd/mtd.h>
  29#include <linux/blkdev.h>
  30#include <linux/blkpg.h>
  31#include <linux/spinlock.h>
  32#include <linux/hdreg.h>
  33#include <linux/mutex.h>
  34#include <asm/uaccess.h>
  35
  36#include "mtdcore.h"
  37
  38static LIST_HEAD(blktrans_majors);
  39static DEFINE_MUTEX(blktrans_ref_mutex);
  40
  41static void blktrans_dev_release(struct kref *kref)
  42{
  43        struct mtd_blktrans_dev *dev =
  44                container_of(kref, struct mtd_blktrans_dev, ref);
  45
  46        dev->disk->private_data = NULL;
  47        blk_cleanup_queue(dev->rq);
  48        put_disk(dev->disk);
  49        list_del(&dev->list);
  50        kfree(dev);
  51}
  52
  53static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk)
  54{
  55        struct mtd_blktrans_dev *dev;
  56
  57        mutex_lock(&blktrans_ref_mutex);
  58        dev = disk->private_data;
  59
  60        if (!dev)
  61                goto unlock;
  62        kref_get(&dev->ref);
  63unlock:
  64        mutex_unlock(&blktrans_ref_mutex);
  65        return dev;
  66}
  67
  68static void blktrans_dev_put(struct mtd_blktrans_dev *dev)
  69{
  70        mutex_lock(&blktrans_ref_mutex);
  71        kref_put(&dev->ref, blktrans_dev_release);
  72        mutex_unlock(&blktrans_ref_mutex);
  73}
  74
  75
  76static int do_blktrans_request(struct mtd_blktrans_ops *tr,
  77                               struct mtd_blktrans_dev *dev,
  78                               struct request *req)
  79{
  80        unsigned long block, nsect;
  81        char *buf;
  82
  83        block = blk_rq_pos(req) << 9 >> tr->blkshift;
  84        nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
  85        buf = bio_data(req->bio);
  86
  87        if (req->cmd_type != REQ_TYPE_FS)
  88                return -EIO;
  89
  90        if (req->cmd_flags & REQ_FLUSH)
  91                return tr->flush(dev);
  92
  93        if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
  94            get_capacity(req->rq_disk))
  95                return -EIO;
  96
  97        if (req->cmd_flags & REQ_DISCARD)
  98                return tr->discard(dev, block, nsect);
  99
 100        switch(rq_data_dir(req)) {
 101        case READ:
 102                for (; nsect > 0; nsect--, block++, buf += tr->blksize)
 103                        if (tr->readsect(dev, block, buf))
 104                                return -EIO;
 105                rq_flush_dcache_pages(req);
 106                return 0;
 107        case WRITE:
 108                if (!tr->writesect)
 109                        return -EIO;
 110
 111                rq_flush_dcache_pages(req);
 112                for (; nsect > 0; nsect--, block++, buf += tr->blksize)
 113                        if (tr->writesect(dev, block, buf))
 114                                return -EIO;
 115                return 0;
 116        default:
 117                printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
 118                return -EIO;
 119        }
 120}
 121
 122int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
 123{
 124        return dev->bg_stop;
 125}
 126EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
 127
 128static void mtd_blktrans_work(struct work_struct *work)
 129{
 130        struct mtd_blktrans_dev *dev =
 131                container_of(work, struct mtd_blktrans_dev, work);
 132        struct mtd_blktrans_ops *tr = dev->tr;
 133        struct request_queue *rq = dev->rq;
 134        struct request *req = NULL;
 135        int background_done = 0;
 136
 137        spin_lock_irq(rq->queue_lock);
 138
 139        while (1) {
 140                int res;
 141
 142                dev->bg_stop = false;
 143                if (!req && !(req = blk_fetch_request(rq))) {
 144                        if (tr->background && !background_done) {
 145                                spin_unlock_irq(rq->queue_lock);
 146                                mutex_lock(&dev->lock);
 147                                tr->background(dev);
 148                                mutex_unlock(&dev->lock);
 149                                spin_lock_irq(rq->queue_lock);
 150                                /*
 151                                 * Do background processing just once per idle
 152                                 * period.
 153                                 */
 154                                background_done = !dev->bg_stop;
 155                                continue;
 156                        }
 157                        break;
 158                }
 159
 160                spin_unlock_irq(rq->queue_lock);
 161
 162                mutex_lock(&dev->lock);
 163                res = do_blktrans_request(dev->tr, dev, req);
 164                mutex_unlock(&dev->lock);
 165
 166                spin_lock_irq(rq->queue_lock);
 167
 168                if (!__blk_end_request_cur(req, res))
 169                        req = NULL;
 170
 171                background_done = 0;
 172        }
 173
 174        if (req)
 175                __blk_end_request_all(req, -EIO);
 176
 177        spin_unlock_irq(rq->queue_lock);
 178}
 179
 180static void mtd_blktrans_request(struct request_queue *rq)
 181{
 182        struct mtd_blktrans_dev *dev;
 183        struct request *req = NULL;
 184
 185        dev = rq->queuedata;
 186
 187        if (!dev)
 188                while ((req = blk_fetch_request(rq)) != NULL)
 189                        __blk_end_request_all(req, -ENODEV);
 190        else
 191                queue_work(dev->wq, &dev->work);
 192}
 193
 194static int blktrans_open(struct block_device *bdev, fmode_t mode)
 195{
 196        struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
 197        int ret = 0;
 198
 199        if (!dev)
 200                return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/
 201
 202        mutex_lock(&dev->lock);
 203
 204        if (dev->open)
 205                goto unlock;
 206
 207        kref_get(&dev->ref);
 208        __module_get(dev->tr->owner);
 209
 210        if (!dev->mtd)
 211                goto unlock;
 212
 213        if (dev->tr->open) {
 214                ret = dev->tr->open(dev);
 215                if (ret)
 216                        goto error_put;
 217        }
 218
 219        ret = __get_mtd_device(dev->mtd);
 220        if (ret)
 221                goto error_release;
 222        dev->file_mode = mode;
 223
 224unlock:
 225        dev->open++;
 226        mutex_unlock(&dev->lock);
 227        blktrans_dev_put(dev);
 228        return ret;
 229
 230error_release:
 231        if (dev->tr->release)
 232                dev->tr->release(dev);
 233error_put:
 234        module_put(dev->tr->owner);
 235        kref_put(&dev->ref, blktrans_dev_release);
 236        mutex_unlock(&dev->lock);
 237        blktrans_dev_put(dev);
 238        return ret;
 239}
 240
 241static void blktrans_release(struct gendisk *disk, fmode_t mode)
 242{
 243        struct mtd_blktrans_dev *dev = blktrans_dev_get(disk);
 244
 245        if (!dev)
 246                return;
 247
 248        mutex_lock(&dev->lock);
 249
 250        if (--dev->open)
 251                goto unlock;
 252
 253        kref_put(&dev->ref, blktrans_dev_release);
 254        module_put(dev->tr->owner);
 255
 256        if (dev->mtd) {
 257                if (dev->tr->release)
 258                        dev->tr->release(dev);
 259                __put_mtd_device(dev->mtd);
 260        }
 261unlock:
 262        mutex_unlock(&dev->lock);
 263        blktrans_dev_put(dev);
 264}
 265
 266static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 267{
 268        struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
 269        int ret = -ENXIO;
 270
 271        if (!dev)
 272                return ret;
 273
 274        mutex_lock(&dev->lock);
 275
 276        if (!dev->mtd)
 277                goto unlock;
 278
 279        ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : 0;
 280unlock:
 281        mutex_unlock(&dev->lock);
 282        blktrans_dev_put(dev);
 283        return ret;
 284}
 285
 286static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
 287                              unsigned int cmd, unsigned long arg)
 288{
 289        struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
 290        int ret = -ENXIO;
 291
 292        if (!dev)
 293                return ret;
 294
 295        mutex_lock(&dev->lock);
 296
 297        if (!dev->mtd)
 298                goto unlock;
 299
 300        switch (cmd) {
 301        case BLKFLSBUF:
 302                ret = dev->tr->flush ? dev->tr->flush(dev) : 0;
 303                break;
 304        default:
 305                ret = -ENOTTY;
 306        }
 307unlock:
 308        mutex_unlock(&dev->lock);
 309        blktrans_dev_put(dev);
 310        return ret;
 311}
 312
 313static const struct block_device_operations mtd_block_ops = {
 314        .owner          = THIS_MODULE,
 315        .open           = blktrans_open,
 316        .release        = blktrans_release,
 317        .ioctl          = blktrans_ioctl,
 318        .getgeo         = blktrans_getgeo,
 319};
 320
 321int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
 322{
 323        struct mtd_blktrans_ops *tr = new->tr;
 324        struct mtd_blktrans_dev *d;
 325        int last_devnum = -1;
 326        struct gendisk *gd;
 327        int ret;
 328
 329        if (mutex_trylock(&mtd_table_mutex)) {
 330                mutex_unlock(&mtd_table_mutex);
 331                BUG();
 332        }
 333
 334        mutex_lock(&blktrans_ref_mutex);
 335        list_for_each_entry(d, &tr->devs, list) {
 336                if (new->devnum == -1) {
 337                        /* Use first free number */
 338                        if (d->devnum != last_devnum+1) {
 339                                /* Found a free devnum. Plug it in here */
 340                                new->devnum = last_devnum+1;
 341                                list_add_tail(&new->list, &d->list);
 342                                goto added;
 343                        }
 344                } else if (d->devnum == new->devnum) {
 345                        /* Required number taken */
 346                        mutex_unlock(&blktrans_ref_mutex);
 347                        return -EBUSY;
 348                } else if (d->devnum > new->devnum) {
 349                        /* Required number was free */
 350                        list_add_tail(&new->list, &d->list);
 351                        goto added;
 352                }
 353                last_devnum = d->devnum;
 354        }
 355
 356        ret = -EBUSY;
 357        if (new->devnum == -1)
 358                new->devnum = last_devnum+1;
 359
 360        /* Check that the device and any partitions will get valid
 361         * minor numbers and that the disk naming code below can cope
 362         * with this number. */
 363        if (new->devnum > (MINORMASK >> tr->part_bits) ||
 364            (tr->part_bits && new->devnum >= 27 * 26)) {
 365                mutex_unlock(&blktrans_ref_mutex);
 366                goto error1;
 367        }
 368
 369        list_add_tail(&new->list, &tr->devs);
 370 added:
 371        mutex_unlock(&blktrans_ref_mutex);
 372
 373        mutex_init(&new->lock);
 374        kref_init(&new->ref);
 375        if (!tr->writesect)
 376                new->readonly = 1;
 377
 378        /* Create gendisk */
 379        ret = -ENOMEM;
 380        gd = alloc_disk(1 << tr->part_bits);
 381
 382        if (!gd)
 383                goto error2;
 384
 385        new->disk = gd;
 386        gd->private_data = new;
 387        gd->major = tr->major;
 388        gd->first_minor = (new->devnum) << tr->part_bits;
 389        gd->fops = &mtd_block_ops;
 390
 391        if (tr->part_bits)
 392                if (new->devnum < 26)
 393                        snprintf(gd->disk_name, sizeof(gd->disk_name),
 394                                 "%s%c", tr->name, 'a' + new->devnum);
 395                else
 396                        snprintf(gd->disk_name, sizeof(gd->disk_name),
 397                                 "%s%c%c", tr->name,
 398                                 'a' - 1 + new->devnum / 26,
 399                                 'a' + new->devnum % 26);
 400        else
 401                snprintf(gd->disk_name, sizeof(gd->disk_name),
 402                         "%s%d", tr->name, new->devnum);
 403
 404        set_capacity(gd, (new->size * tr->blksize) >> 9);
 405
 406        /* Create the request queue */
 407        spin_lock_init(&new->queue_lock);
 408        new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock);
 409
 410        if (!new->rq)
 411                goto error3;
 412
 413        if (tr->flush)
 414                blk_queue_flush(new->rq, REQ_FLUSH);
 415
 416        new->rq->queuedata = new;
 417        blk_queue_logical_block_size(new->rq, tr->blksize);
 418
 419        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq);
 420        queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, new->rq);
 421
 422        if (tr->discard) {
 423                queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq);
 424                new->rq->limits.max_discard_sectors = UINT_MAX;
 425        }
 426
 427        gd->queue = new->rq;
 428
 429        /* Create processing workqueue */
 430        new->wq = alloc_workqueue("%s%d", 0, 0,
 431                                  tr->name, new->mtd->index);
 432        if (!new->wq)
 433                goto error4;
 434        INIT_WORK(&new->work, mtd_blktrans_work);
 435
 436        gd->driverfs_dev = &new->mtd->dev;
 437
 438        if (new->readonly)
 439                set_disk_ro(gd, 1);
 440
 441        add_disk(gd);
 442
 443        if (new->disk_attributes) {
 444                ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
 445                                        new->disk_attributes);
 446                WARN_ON(ret);
 447        }
 448        return 0;
 449error4:
 450        blk_cleanup_queue(new->rq);
 451error3:
 452        put_disk(new->disk);
 453error2:
 454        list_del(&new->list);
 455error1:
 456        return ret;
 457}
 458
 459int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
 460{
 461        unsigned long flags;
 462
 463        if (mutex_trylock(&mtd_table_mutex)) {
 464                mutex_unlock(&mtd_table_mutex);
 465                BUG();
 466        }
 467
 468        if (old->disk_attributes)
 469                sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
 470                                                old->disk_attributes);
 471
 472        /* Stop new requests to arrive */
 473        del_gendisk(old->disk);
 474
 475        /* Stop workqueue. This will perform any pending request. */
 476        destroy_workqueue(old->wq);
 477
 478        /* Kill current requests */
 479        spin_lock_irqsave(&old->queue_lock, flags);
 480        old->rq->queuedata = NULL;
 481        blk_start_queue(old->rq);
 482        spin_unlock_irqrestore(&old->queue_lock, flags);
 483
 484        /* If the device is currently open, tell trans driver to close it,
 485                then put mtd device, and don't touch it again */
 486        mutex_lock(&old->lock);
 487        if (old->open) {
 488                if (old->tr->release)
 489                        old->tr->release(old);
 490                __put_mtd_device(old->mtd);
 491        }
 492
 493        old->mtd = NULL;
 494
 495        mutex_unlock(&old->lock);
 496        blktrans_dev_put(old);
 497        return 0;
 498}
 499
 500static void blktrans_notify_remove(struct mtd_info *mtd)
 501{
 502        struct mtd_blktrans_ops *tr;
 503        struct mtd_blktrans_dev *dev, *next;
 504
 505        list_for_each_entry(tr, &blktrans_majors, list)
 506                list_for_each_entry_safe(dev, next, &tr->devs, list)
 507                        if (dev->mtd == mtd)
 508                                tr->remove_dev(dev);
 509}
 510
 511static void blktrans_notify_add(struct mtd_info *mtd)
 512{
 513        struct mtd_blktrans_ops *tr;
 514
 515        if (mtd->type == MTD_ABSENT)
 516                return;
 517
 518        list_for_each_entry(tr, &blktrans_majors, list)
 519                tr->add_mtd(tr, mtd);
 520}
 521
 522static struct mtd_notifier blktrans_notifier = {
 523        .add = blktrans_notify_add,
 524        .remove = blktrans_notify_remove,
 525};
 526
 527int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
 528{
 529        struct mtd_info *mtd;
 530        int ret;
 531
 532        /* Register the notifier if/when the first device type is
 533           registered, to prevent the link/init ordering from fucking
 534           us over. */
 535        if (!blktrans_notifier.list.next)
 536                register_mtd_user(&blktrans_notifier);
 537
 538
 539        mutex_lock(&mtd_table_mutex);
 540
 541        ret = register_blkdev(tr->major, tr->name);
 542        if (ret < 0) {
 543                printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
 544                       tr->name, tr->major, ret);
 545                mutex_unlock(&mtd_table_mutex);
 546                return ret;
 547        }
 548
 549        if (ret)
 550                tr->major = ret;
 551
 552        tr->blkshift = ffs(tr->blksize) - 1;
 553
 554        INIT_LIST_HEAD(&tr->devs);
 555        list_add(&tr->list, &blktrans_majors);
 556
 557        mtd_for_each_device(mtd)
 558                if (mtd->type != MTD_ABSENT)
 559                        tr->add_mtd(tr, mtd);
 560
 561        mutex_unlock(&mtd_table_mutex);
 562        return 0;
 563}
 564
 565int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
 566{
 567        struct mtd_blktrans_dev *dev, *next;
 568
 569        mutex_lock(&mtd_table_mutex);
 570
 571        /* Remove it from the list of active majors */
 572        list_del(&tr->list);
 573
 574        list_for_each_entry_safe(dev, next, &tr->devs, list)
 575                tr->remove_dev(dev);
 576
 577        unregister_blkdev(tr->major, tr->name);
 578        mutex_unlock(&mtd_table_mutex);
 579
 580        BUG_ON(!list_empty(&tr->devs));
 581        return 0;
 582}
 583
 584static void __exit mtd_blktrans_exit(void)
 585{
 586        /* No race here -- if someone's currently in register_mtd_blktrans
 587           we're screwed anyway. */
 588        if (blktrans_notifier.list.next)
 589                unregister_mtd_user(&blktrans_notifier);
 590}
 591
 592module_exit(mtd_blktrans_exit);
 593
 594EXPORT_SYMBOL_GPL(register_mtd_blktrans);
 595EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
 596EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
 597EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
 598
 599MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
 600MODULE_LICENSE("GPL");
 601MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");
 602