linux/drivers/mtd/mtd_blkdevs.c
<<
>>
Prefs
   1/*
   2 * Interface to Linux block layer for MTD 'translation layers'.
   3 *
   4 * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  19 *
  20 */
  21
  22#include <linux/kernel.h>
  23#include <linux/slab.h>
  24#include <linux/module.h>
  25#include <linux/list.h>
  26#include <linux/fs.h>
  27#include <linux/mtd/blktrans.h>
  28#include <linux/mtd/mtd.h>
  29#include <linux/blkdev.h>
  30#include <linux/blkpg.h>
  31#include <linux/spinlock.h>
  32#include <linux/hdreg.h>
  33#include <linux/init.h>
  34#include <linux/mutex.h>
  35#include <asm/uaccess.h>
  36
  37#include "mtdcore.h"
  38
  39static LIST_HEAD(blktrans_majors);
  40static DEFINE_MUTEX(blktrans_ref_mutex);
  41
  42static void blktrans_dev_release(struct kref *kref)
  43{
  44        struct mtd_blktrans_dev *dev =
  45                container_of(kref, struct mtd_blktrans_dev, ref);
  46
  47        dev->disk->private_data = NULL;
  48        blk_cleanup_queue(dev->rq);
  49        put_disk(dev->disk);
  50        list_del(&dev->list);
  51        kfree(dev);
  52}
  53
  54static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk)
  55{
  56        struct mtd_blktrans_dev *dev;
  57
  58        mutex_lock(&blktrans_ref_mutex);
  59        dev = disk->private_data;
  60
  61        if (!dev)
  62                goto unlock;
  63        kref_get(&dev->ref);
  64unlock:
  65        mutex_unlock(&blktrans_ref_mutex);
  66        return dev;
  67}
  68
  69static void blktrans_dev_put(struct mtd_blktrans_dev *dev)
  70{
  71        mutex_lock(&blktrans_ref_mutex);
  72        kref_put(&dev->ref, blktrans_dev_release);
  73        mutex_unlock(&blktrans_ref_mutex);
  74}
  75
  76
  77static int do_blktrans_request(struct mtd_blktrans_ops *tr,
  78                               struct mtd_blktrans_dev *dev,
  79                               struct request *req)
  80{
  81        unsigned long block, nsect;
  82        char *buf;
  83
  84        block = blk_rq_pos(req) << 9 >> tr->blkshift;
  85        nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
  86
  87        buf = req->buffer;
  88
  89        if (req->cmd_type != REQ_TYPE_FS)
  90                return -EIO;
  91
  92        if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
  93            get_capacity(req->rq_disk))
  94                return -EIO;
  95
  96        if (req->cmd_flags & REQ_DISCARD)
  97                return tr->discard(dev, block, nsect);
  98
  99        switch(rq_data_dir(req)) {
 100        case READ:
 101                for (; nsect > 0; nsect--, block++, buf += tr->blksize)
 102                        if (tr->readsect(dev, block, buf))
 103                                return -EIO;
 104                rq_flush_dcache_pages(req);
 105                return 0;
 106        case WRITE:
 107                if (!tr->writesect)
 108                        return -EIO;
 109
 110                rq_flush_dcache_pages(req);
 111                for (; nsect > 0; nsect--, block++, buf += tr->blksize)
 112                        if (tr->writesect(dev, block, buf))
 113                                return -EIO;
 114                return 0;
 115        default:
 116                printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
 117                return -EIO;
 118        }
 119}
 120
 121int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
 122{
 123        return dev->bg_stop;
 124}
 125EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
 126
 127static void mtd_blktrans_work(struct work_struct *work)
 128{
 129        struct mtd_blktrans_dev *dev =
 130                container_of(work, struct mtd_blktrans_dev, work);
 131        struct mtd_blktrans_ops *tr = dev->tr;
 132        struct request_queue *rq = dev->rq;
 133        struct request *req = NULL;
 134        int background_done = 0;
 135
 136        spin_lock_irq(rq->queue_lock);
 137
 138        while (1) {
 139                int res;
 140
 141                dev->bg_stop = false;
 142                if (!req && !(req = blk_fetch_request(rq))) {
 143                        if (tr->background && !background_done) {
 144                                spin_unlock_irq(rq->queue_lock);
 145                                mutex_lock(&dev->lock);
 146                                tr->background(dev);
 147                                mutex_unlock(&dev->lock);
 148                                spin_lock_irq(rq->queue_lock);
 149                                /*
 150                                 * Do background processing just once per idle
 151                                 * period.
 152                                 */
 153                                background_done = !dev->bg_stop;
 154                                continue;
 155                        }
 156                        break;
 157                }
 158
 159                spin_unlock_irq(rq->queue_lock);
 160
 161                mutex_lock(&dev->lock);
 162                res = do_blktrans_request(dev->tr, dev, req);
 163                mutex_unlock(&dev->lock);
 164
 165                spin_lock_irq(rq->queue_lock);
 166
 167                if (!__blk_end_request_cur(req, res))
 168                        req = NULL;
 169
 170                background_done = 0;
 171        }
 172
 173        if (req)
 174                __blk_end_request_all(req, -EIO);
 175
 176        spin_unlock_irq(rq->queue_lock);
 177}
 178
 179static void mtd_blktrans_request(struct request_queue *rq)
 180{
 181        struct mtd_blktrans_dev *dev;
 182        struct request *req = NULL;
 183
 184        dev = rq->queuedata;
 185
 186        if (!dev)
 187                while ((req = blk_fetch_request(rq)) != NULL)
 188                        __blk_end_request_all(req, -ENODEV);
 189        else
 190                queue_work(dev->wq, &dev->work);
 191}
 192
 193static int blktrans_open(struct block_device *bdev, fmode_t mode)
 194{
 195        struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
 196        int ret = 0;
 197
 198        if (!dev)
 199                return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/
 200
 201        mutex_lock(&dev->lock);
 202
 203        if (dev->open)
 204                goto unlock;
 205
 206        kref_get(&dev->ref);
 207        __module_get(dev->tr->owner);
 208
 209        if (!dev->mtd)
 210                goto unlock;
 211
 212        if (dev->tr->open) {
 213                ret = dev->tr->open(dev);
 214                if (ret)
 215                        goto error_put;
 216        }
 217
 218        ret = __get_mtd_device(dev->mtd);
 219        if (ret)
 220                goto error_release;
 221        dev->file_mode = mode;
 222
 223unlock:
 224        dev->open++;
 225        mutex_unlock(&dev->lock);
 226        blktrans_dev_put(dev);
 227        return ret;
 228
 229error_release:
 230        if (dev->tr->release)
 231                dev->tr->release(dev);
 232error_put:
 233        module_put(dev->tr->owner);
 234        kref_put(&dev->ref, blktrans_dev_release);
 235        mutex_unlock(&dev->lock);
 236        blktrans_dev_put(dev);
 237        return ret;
 238}
 239
 240static void blktrans_release(struct gendisk *disk, fmode_t mode)
 241{
 242        struct mtd_blktrans_dev *dev = blktrans_dev_get(disk);
 243
 244        if (!dev)
 245                return;
 246
 247        mutex_lock(&dev->lock);
 248
 249        if (--dev->open)
 250                goto unlock;
 251
 252        kref_put(&dev->ref, blktrans_dev_release);
 253        module_put(dev->tr->owner);
 254
 255        if (dev->mtd) {
 256                if (dev->tr->release)
 257                        dev->tr->release(dev);
 258                __put_mtd_device(dev->mtd);
 259        }
 260unlock:
 261        mutex_unlock(&dev->lock);
 262        blktrans_dev_put(dev);
 263}
 264
 265static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 266{
 267        struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
 268        int ret = -ENXIO;
 269
 270        if (!dev)
 271                return ret;
 272
 273        mutex_lock(&dev->lock);
 274
 275        if (!dev->mtd)
 276                goto unlock;
 277
 278        ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : 0;
 279unlock:
 280        mutex_unlock(&dev->lock);
 281        blktrans_dev_put(dev);
 282        return ret;
 283}
 284
 285static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
 286                              unsigned int cmd, unsigned long arg)
 287{
 288        struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
 289        int ret = -ENXIO;
 290
 291        if (!dev)
 292                return ret;
 293
 294        mutex_lock(&dev->lock);
 295
 296        if (!dev->mtd)
 297                goto unlock;
 298
 299        switch (cmd) {
 300        case BLKFLSBUF:
 301                ret = dev->tr->flush ? dev->tr->flush(dev) : 0;
 302                break;
 303        default:
 304                ret = -ENOTTY;
 305        }
 306unlock:
 307        mutex_unlock(&dev->lock);
 308        blktrans_dev_put(dev);
 309        return ret;
 310}
 311
 312static const struct block_device_operations mtd_block_ops = {
 313        .owner          = THIS_MODULE,
 314        .open           = blktrans_open,
 315        .release        = blktrans_release,
 316        .ioctl          = blktrans_ioctl,
 317        .getgeo         = blktrans_getgeo,
 318};
 319
 320int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
 321{
 322        struct mtd_blktrans_ops *tr = new->tr;
 323        struct mtd_blktrans_dev *d;
 324        int last_devnum = -1;
 325        struct gendisk *gd;
 326        int ret;
 327
 328        if (mutex_trylock(&mtd_table_mutex)) {
 329                mutex_unlock(&mtd_table_mutex);
 330                BUG();
 331        }
 332
 333        mutex_lock(&blktrans_ref_mutex);
 334        list_for_each_entry(d, &tr->devs, list) {
 335                if (new->devnum == -1) {
 336                        /* Use first free number */
 337                        if (d->devnum != last_devnum+1) {
 338                                /* Found a free devnum. Plug it in here */
 339                                new->devnum = last_devnum+1;
 340                                list_add_tail(&new->list, &d->list);
 341                                goto added;
 342                        }
 343                } else if (d->devnum == new->devnum) {
 344                        /* Required number taken */
 345                        mutex_unlock(&blktrans_ref_mutex);
 346                        return -EBUSY;
 347                } else if (d->devnum > new->devnum) {
 348                        /* Required number was free */
 349                        list_add_tail(&new->list, &d->list);
 350                        goto added;
 351                }
 352                last_devnum = d->devnum;
 353        }
 354
 355        ret = -EBUSY;
 356        if (new->devnum == -1)
 357                new->devnum = last_devnum+1;
 358
 359        /* Check that the device and any partitions will get valid
 360         * minor numbers and that the disk naming code below can cope
 361         * with this number. */
 362        if (new->devnum > (MINORMASK >> tr->part_bits) ||
 363            (tr->part_bits && new->devnum >= 27 * 26)) {
 364                mutex_unlock(&blktrans_ref_mutex);
 365                goto error1;
 366        }
 367
 368        list_add_tail(&new->list, &tr->devs);
 369 added:
 370        mutex_unlock(&blktrans_ref_mutex);
 371
 372        mutex_init(&new->lock);
 373        kref_init(&new->ref);
 374        if (!tr->writesect)
 375                new->readonly = 1;
 376
 377        /* Create gendisk */
 378        ret = -ENOMEM;
 379        gd = alloc_disk(1 << tr->part_bits);
 380
 381        if (!gd)
 382                goto error2;
 383
 384        new->disk = gd;
 385        gd->private_data = new;
 386        gd->major = tr->major;
 387        gd->first_minor = (new->devnum) << tr->part_bits;
 388        gd->fops = &mtd_block_ops;
 389
 390        if (tr->part_bits)
 391                if (new->devnum < 26)
 392                        snprintf(gd->disk_name, sizeof(gd->disk_name),
 393                                 "%s%c", tr->name, 'a' + new->devnum);
 394                else
 395                        snprintf(gd->disk_name, sizeof(gd->disk_name),
 396                                 "%s%c%c", tr->name,
 397                                 'a' - 1 + new->devnum / 26,
 398                                 'a' + new->devnum % 26);
 399        else
 400                snprintf(gd->disk_name, sizeof(gd->disk_name),
 401                         "%s%d", tr->name, new->devnum);
 402
 403        set_capacity(gd, (new->size * tr->blksize) >> 9);
 404
 405        /* Create the request queue */
 406        spin_lock_init(&new->queue_lock);
 407        new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock);
 408
 409        if (!new->rq)
 410                goto error3;
 411
 412        new->rq->queuedata = new;
 413        blk_queue_logical_block_size(new->rq, tr->blksize);
 414
 415        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq);
 416
 417        if (tr->discard) {
 418                queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq);
 419                new->rq->limits.max_discard_sectors = UINT_MAX;
 420        }
 421
 422        gd->queue = new->rq;
 423
 424        /* Create processing workqueue */
 425        new->wq = alloc_workqueue("%s%d", 0, 0,
 426                                  tr->name, new->mtd->index);
 427        if (!new->wq)
 428                goto error4;
 429        INIT_WORK(&new->work, mtd_blktrans_work);
 430
 431        gd->driverfs_dev = &new->mtd->dev;
 432
 433        if (new->readonly)
 434                set_disk_ro(gd, 1);
 435
 436        add_disk(gd);
 437
 438        if (new->disk_attributes) {
 439                ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
 440                                        new->disk_attributes);
 441                WARN_ON(ret);
 442        }
 443        return 0;
 444error4:
 445        blk_cleanup_queue(new->rq);
 446error3:
 447        put_disk(new->disk);
 448error2:
 449        list_del(&new->list);
 450error1:
 451        return ret;
 452}
 453
 454int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
 455{
 456        unsigned long flags;
 457
 458        if (mutex_trylock(&mtd_table_mutex)) {
 459                mutex_unlock(&mtd_table_mutex);
 460                BUG();
 461        }
 462
 463        if (old->disk_attributes)
 464                sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
 465                                                old->disk_attributes);
 466
 467        /* Stop new requests to arrive */
 468        del_gendisk(old->disk);
 469
 470        /* Stop workqueue. This will perform any pending request. */
 471        destroy_workqueue(old->wq);
 472
 473        /* Kill current requests */
 474        spin_lock_irqsave(&old->queue_lock, flags);
 475        old->rq->queuedata = NULL;
 476        blk_start_queue(old->rq);
 477        spin_unlock_irqrestore(&old->queue_lock, flags);
 478
 479        /* If the device is currently open, tell trans driver to close it,
 480                then put mtd device, and don't touch it again */
 481        mutex_lock(&old->lock);
 482        if (old->open) {
 483                if (old->tr->release)
 484                        old->tr->release(old);
 485                __put_mtd_device(old->mtd);
 486        }
 487
 488        old->mtd = NULL;
 489
 490        mutex_unlock(&old->lock);
 491        blktrans_dev_put(old);
 492        return 0;
 493}
 494
 495static void blktrans_notify_remove(struct mtd_info *mtd)
 496{
 497        struct mtd_blktrans_ops *tr;
 498        struct mtd_blktrans_dev *dev, *next;
 499
 500        list_for_each_entry(tr, &blktrans_majors, list)
 501                list_for_each_entry_safe(dev, next, &tr->devs, list)
 502                        if (dev->mtd == mtd)
 503                                tr->remove_dev(dev);
 504}
 505
 506static void blktrans_notify_add(struct mtd_info *mtd)
 507{
 508        struct mtd_blktrans_ops *tr;
 509
 510        if (mtd->type == MTD_ABSENT)
 511                return;
 512
 513        list_for_each_entry(tr, &blktrans_majors, list)
 514                tr->add_mtd(tr, mtd);
 515}
 516
 517static struct mtd_notifier blktrans_notifier = {
 518        .add = blktrans_notify_add,
 519        .remove = blktrans_notify_remove,
 520};
 521
 522int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
 523{
 524        struct mtd_info *mtd;
 525        int ret;
 526
 527        /* Register the notifier if/when the first device type is
 528           registered, to prevent the link/init ordering from fucking
 529           us over. */
 530        if (!blktrans_notifier.list.next)
 531                register_mtd_user(&blktrans_notifier);
 532
 533
 534        mutex_lock(&mtd_table_mutex);
 535
 536        ret = register_blkdev(tr->major, tr->name);
 537        if (ret < 0) {
 538                printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
 539                       tr->name, tr->major, ret);
 540                mutex_unlock(&mtd_table_mutex);
 541                return ret;
 542        }
 543
 544        if (ret)
 545                tr->major = ret;
 546
 547        tr->blkshift = ffs(tr->blksize) - 1;
 548
 549        INIT_LIST_HEAD(&tr->devs);
 550        list_add(&tr->list, &blktrans_majors);
 551
 552        mtd_for_each_device(mtd)
 553                if (mtd->type != MTD_ABSENT)
 554                        tr->add_mtd(tr, mtd);
 555
 556        mutex_unlock(&mtd_table_mutex);
 557        return 0;
 558}
 559
 560int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
 561{
 562        struct mtd_blktrans_dev *dev, *next;
 563
 564        mutex_lock(&mtd_table_mutex);
 565
 566        /* Remove it from the list of active majors */
 567        list_del(&tr->list);
 568
 569        list_for_each_entry_safe(dev, next, &tr->devs, list)
 570                tr->remove_dev(dev);
 571
 572        unregister_blkdev(tr->major, tr->name);
 573        mutex_unlock(&mtd_table_mutex);
 574
 575        BUG_ON(!list_empty(&tr->devs));
 576        return 0;
 577}
 578
 579static void __exit mtd_blktrans_exit(void)
 580{
 581        /* No race here -- if someone's currently in register_mtd_blktrans
 582           we're screwed anyway. */
 583        if (blktrans_notifier.list.next)
 584                unregister_mtd_user(&blktrans_notifier);
 585}
 586
 587module_exit(mtd_blktrans_exit);
 588
 589EXPORT_SYMBOL_GPL(register_mtd_blktrans);
 590EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
 591EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
 592EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
 593
 594MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
 595MODULE_LICENSE("GPL");
 596MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");
 597