linux/drivers/mtd/mtd_blkdevs.c
<<
>>
Prefs
   1/*
   2 * Interface to Linux block layer for MTD 'translation layers'.
   3 *
   4 * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  19 *
  20 */
  21
  22#include <linux/kernel.h>
  23#include <linux/slab.h>
  24#include <linux/module.h>
  25#include <linux/list.h>
  26#include <linux/fs.h>
  27#include <linux/mtd/blktrans.h>
  28#include <linux/mtd/mtd.h>
  29#include <linux/blkdev.h>
  30#include <linux/blkpg.h>
  31#include <linux/spinlock.h>
  32#include <linux/hdreg.h>
  33#include <linux/mutex.h>
  34#include <linux/uaccess.h>
  35
  36#include "mtdcore.h"
  37
  38static LIST_HEAD(blktrans_majors);
  39static DEFINE_MUTEX(blktrans_ref_mutex);
  40
  41static void blktrans_dev_release(struct kref *kref)
  42{
  43        struct mtd_blktrans_dev *dev =
  44                container_of(kref, struct mtd_blktrans_dev, ref);
  45
  46        dev->disk->private_data = NULL;
  47        blk_cleanup_queue(dev->rq);
  48        put_disk(dev->disk);
  49        list_del(&dev->list);
  50        kfree(dev);
  51}
  52
  53static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk)
  54{
  55        struct mtd_blktrans_dev *dev;
  56
  57        mutex_lock(&blktrans_ref_mutex);
  58        dev = disk->private_data;
  59
  60        if (!dev)
  61                goto unlock;
  62        kref_get(&dev->ref);
  63unlock:
  64        mutex_unlock(&blktrans_ref_mutex);
  65        return dev;
  66}
  67
  68static void blktrans_dev_put(struct mtd_blktrans_dev *dev)
  69{
  70        mutex_lock(&blktrans_ref_mutex);
  71        kref_put(&dev->ref, blktrans_dev_release);
  72        mutex_unlock(&blktrans_ref_mutex);
  73}
  74
  75
  76static int do_blktrans_request(struct mtd_blktrans_ops *tr,
  77                               struct mtd_blktrans_dev *dev,
  78                               struct request *req)
  79{
  80        unsigned long block, nsect;
  81        char *buf;
  82
  83        block = blk_rq_pos(req) << 9 >> tr->blkshift;
  84        nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
  85        buf = bio_data(req->bio);
  86
  87        if (req->cmd_type != REQ_TYPE_FS)
  88                return -EIO;
  89
  90        if (req_op(req) == REQ_OP_FLUSH)
  91                return tr->flush(dev);
  92
  93        if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
  94            get_capacity(req->rq_disk))
  95                return -EIO;
  96
  97        if (req_op(req) == REQ_OP_DISCARD)
  98                return tr->discard(dev, block, nsect);
  99
 100        if (rq_data_dir(req) == READ) {
 101                for (; nsect > 0; nsect--, block++, buf += tr->blksize)
 102                        if (tr->readsect(dev, block, buf))
 103                                return -EIO;
 104                rq_flush_dcache_pages(req);
 105                return 0;
 106        } else {
 107                if (!tr->writesect)
 108                        return -EIO;
 109
 110                rq_flush_dcache_pages(req);
 111                for (; nsect > 0; nsect--, block++, buf += tr->blksize)
 112                        if (tr->writesect(dev, block, buf))
 113                                return -EIO;
 114                return 0;
 115        }
 116}
 117
 118int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
 119{
 120        return dev->bg_stop;
 121}
 122EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
 123
 124static void mtd_blktrans_work(struct work_struct *work)
 125{
 126        struct mtd_blktrans_dev *dev =
 127                container_of(work, struct mtd_blktrans_dev, work);
 128        struct mtd_blktrans_ops *tr = dev->tr;
 129        struct request_queue *rq = dev->rq;
 130        struct request *req = NULL;
 131        int background_done = 0;
 132
 133        spin_lock_irq(rq->queue_lock);
 134
 135        while (1) {
 136                int res;
 137
 138                dev->bg_stop = false;
 139                if (!req && !(req = blk_fetch_request(rq))) {
 140                        if (tr->background && !background_done) {
 141                                spin_unlock_irq(rq->queue_lock);
 142                                mutex_lock(&dev->lock);
 143                                tr->background(dev);
 144                                mutex_unlock(&dev->lock);
 145                                spin_lock_irq(rq->queue_lock);
 146                                /*
 147                                 * Do background processing just once per idle
 148                                 * period.
 149                                 */
 150                                background_done = !dev->bg_stop;
 151                                continue;
 152                        }
 153                        break;
 154                }
 155
 156                spin_unlock_irq(rq->queue_lock);
 157
 158                mutex_lock(&dev->lock);
 159                res = do_blktrans_request(dev->tr, dev, req);
 160                mutex_unlock(&dev->lock);
 161
 162                spin_lock_irq(rq->queue_lock);
 163
 164                if (!__blk_end_request_cur(req, res))
 165                        req = NULL;
 166
 167                background_done = 0;
 168        }
 169
 170        spin_unlock_irq(rq->queue_lock);
 171}
 172
 173static void mtd_blktrans_request(struct request_queue *rq)
 174{
 175        struct mtd_blktrans_dev *dev;
 176        struct request *req = NULL;
 177
 178        dev = rq->queuedata;
 179
 180        if (!dev)
 181                while ((req = blk_fetch_request(rq)) != NULL)
 182                        __blk_end_request_all(req, -ENODEV);
 183        else
 184                queue_work(dev->wq, &dev->work);
 185}
 186
 187static int blktrans_open(struct block_device *bdev, fmode_t mode)
 188{
 189        struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
 190        int ret = 0;
 191
 192        if (!dev)
 193                return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/
 194
 195        mutex_lock(&mtd_table_mutex);
 196        mutex_lock(&dev->lock);
 197
 198        if (dev->open)
 199                goto unlock;
 200
 201        kref_get(&dev->ref);
 202        __module_get(dev->tr->owner);
 203
 204        if (!dev->mtd)
 205                goto unlock;
 206
 207        if (dev->tr->open) {
 208                ret = dev->tr->open(dev);
 209                if (ret)
 210                        goto error_put;
 211        }
 212
 213        ret = __get_mtd_device(dev->mtd);
 214        if (ret)
 215                goto error_release;
 216        dev->file_mode = mode;
 217
 218unlock:
 219        dev->open++;
 220        mutex_unlock(&dev->lock);
 221        mutex_unlock(&mtd_table_mutex);
 222        blktrans_dev_put(dev);
 223        return ret;
 224
 225error_release:
 226        if (dev->tr->release)
 227                dev->tr->release(dev);
 228error_put:
 229        module_put(dev->tr->owner);
 230        kref_put(&dev->ref, blktrans_dev_release);
 231        mutex_unlock(&dev->lock);
 232        mutex_unlock(&mtd_table_mutex);
 233        blktrans_dev_put(dev);
 234        return ret;
 235}
 236
 237static void blktrans_release(struct gendisk *disk, fmode_t mode)
 238{
 239        struct mtd_blktrans_dev *dev = blktrans_dev_get(disk);
 240
 241        if (!dev)
 242                return;
 243
 244        mutex_lock(&mtd_table_mutex);
 245        mutex_lock(&dev->lock);
 246
 247        if (--dev->open)
 248                goto unlock;
 249
 250        kref_put(&dev->ref, blktrans_dev_release);
 251        module_put(dev->tr->owner);
 252
 253        if (dev->mtd) {
 254                if (dev->tr->release)
 255                        dev->tr->release(dev);
 256                __put_mtd_device(dev->mtd);
 257        }
 258unlock:
 259        mutex_unlock(&dev->lock);
 260        mutex_unlock(&mtd_table_mutex);
 261        blktrans_dev_put(dev);
 262}
 263
 264static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 265{
 266        struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
 267        int ret = -ENXIO;
 268
 269        if (!dev)
 270                return ret;
 271
 272        mutex_lock(&dev->lock);
 273
 274        if (!dev->mtd)
 275                goto unlock;
 276
 277        ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : -ENOTTY;
 278unlock:
 279        mutex_unlock(&dev->lock);
 280        blktrans_dev_put(dev);
 281        return ret;
 282}
 283
 284static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
 285                              unsigned int cmd, unsigned long arg)
 286{
 287        struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
 288        int ret = -ENXIO;
 289
 290        if (!dev)
 291                return ret;
 292
 293        mutex_lock(&dev->lock);
 294
 295        if (!dev->mtd)
 296                goto unlock;
 297
 298        switch (cmd) {
 299        case BLKFLSBUF:
 300                ret = dev->tr->flush ? dev->tr->flush(dev) : 0;
 301                break;
 302        default:
 303                ret = -ENOTTY;
 304        }
 305unlock:
 306        mutex_unlock(&dev->lock);
 307        blktrans_dev_put(dev);
 308        return ret;
 309}
 310
 311static const struct block_device_operations mtd_block_ops = {
 312        .owner          = THIS_MODULE,
 313        .open           = blktrans_open,
 314        .release        = blktrans_release,
 315        .ioctl          = blktrans_ioctl,
 316        .getgeo         = blktrans_getgeo,
 317};
 318
 319int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
 320{
 321        struct mtd_blktrans_ops *tr = new->tr;
 322        struct mtd_blktrans_dev *d;
 323        int last_devnum = -1;
 324        struct gendisk *gd;
 325        int ret;
 326
 327        if (mutex_trylock(&mtd_table_mutex)) {
 328                mutex_unlock(&mtd_table_mutex);
 329                BUG();
 330        }
 331
 332        mutex_lock(&blktrans_ref_mutex);
 333        list_for_each_entry(d, &tr->devs, list) {
 334                if (new->devnum == -1) {
 335                        /* Use first free number */
 336                        if (d->devnum != last_devnum+1) {
 337                                /* Found a free devnum. Plug it in here */
 338                                new->devnum = last_devnum+1;
 339                                list_add_tail(&new->list, &d->list);
 340                                goto added;
 341                        }
 342                } else if (d->devnum == new->devnum) {
 343                        /* Required number taken */
 344                        mutex_unlock(&blktrans_ref_mutex);
 345                        return -EBUSY;
 346                } else if (d->devnum > new->devnum) {
 347                        /* Required number was free */
 348                        list_add_tail(&new->list, &d->list);
 349                        goto added;
 350                }
 351                last_devnum = d->devnum;
 352        }
 353
 354        ret = -EBUSY;
 355        if (new->devnum == -1)
 356                new->devnum = last_devnum+1;
 357
 358        /* Check that the device and any partitions will get valid
 359         * minor numbers and that the disk naming code below can cope
 360         * with this number. */
 361        if (new->devnum > (MINORMASK >> tr->part_bits) ||
 362            (tr->part_bits && new->devnum >= 27 * 26)) {
 363                mutex_unlock(&blktrans_ref_mutex);
 364                goto error1;
 365        }
 366
 367        list_add_tail(&new->list, &tr->devs);
 368 added:
 369        mutex_unlock(&blktrans_ref_mutex);
 370
 371        mutex_init(&new->lock);
 372        kref_init(&new->ref);
 373        if (!tr->writesect)
 374                new->readonly = 1;
 375
 376        /* Create gendisk */
 377        ret = -ENOMEM;
 378        gd = alloc_disk(1 << tr->part_bits);
 379
 380        if (!gd)
 381                goto error2;
 382
 383        new->disk = gd;
 384        gd->private_data = new;
 385        gd->major = tr->major;
 386        gd->first_minor = (new->devnum) << tr->part_bits;
 387        gd->fops = &mtd_block_ops;
 388
 389        if (tr->part_bits)
 390                if (new->devnum < 26)
 391                        snprintf(gd->disk_name, sizeof(gd->disk_name),
 392                                 "%s%c", tr->name, 'a' + new->devnum);
 393                else
 394                        snprintf(gd->disk_name, sizeof(gd->disk_name),
 395                                 "%s%c%c", tr->name,
 396                                 'a' - 1 + new->devnum / 26,
 397                                 'a' + new->devnum % 26);
 398        else
 399                snprintf(gd->disk_name, sizeof(gd->disk_name),
 400                         "%s%d", tr->name, new->devnum);
 401
 402        set_capacity(gd, ((u64)new->size * tr->blksize) >> 9);
 403
 404        /* Create the request queue */
 405        spin_lock_init(&new->queue_lock);
 406        new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock);
 407
 408        if (!new->rq)
 409                goto error3;
 410
 411        if (tr->flush)
 412                blk_queue_write_cache(new->rq, true, false);
 413
 414        new->rq->queuedata = new;
 415        blk_queue_logical_block_size(new->rq, tr->blksize);
 416
 417        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq);
 418        queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, new->rq);
 419
 420        if (tr->discard) {
 421                queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq);
 422                blk_queue_max_discard_sectors(new->rq, UINT_MAX);
 423        }
 424
 425        gd->queue = new->rq;
 426
 427        /* Create processing workqueue */
 428        new->wq = alloc_workqueue("%s%d", 0, 0,
 429                                  tr->name, new->mtd->index);
 430        if (!new->wq)
 431                goto error4;
 432        INIT_WORK(&new->work, mtd_blktrans_work);
 433
 434        if (new->readonly)
 435                set_disk_ro(gd, 1);
 436
 437        device_add_disk(&new->mtd->dev, gd);
 438
 439        if (new->disk_attributes) {
 440                ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
 441                                        new->disk_attributes);
 442                WARN_ON(ret);
 443        }
 444        return 0;
 445error4:
 446        blk_cleanup_queue(new->rq);
 447error3:
 448        put_disk(new->disk);
 449error2:
 450        list_del(&new->list);
 451error1:
 452        return ret;
 453}
 454
 455int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
 456{
 457        unsigned long flags;
 458
 459        if (mutex_trylock(&mtd_table_mutex)) {
 460                mutex_unlock(&mtd_table_mutex);
 461                BUG();
 462        }
 463
 464        if (old->disk_attributes)
 465                sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
 466                                                old->disk_attributes);
 467
 468        /* Stop new requests to arrive */
 469        del_gendisk(old->disk);
 470
 471        /* Stop workqueue. This will perform any pending request. */
 472        destroy_workqueue(old->wq);
 473
 474        /* Kill current requests */
 475        spin_lock_irqsave(&old->queue_lock, flags);
 476        old->rq->queuedata = NULL;
 477        blk_start_queue(old->rq);
 478        spin_unlock_irqrestore(&old->queue_lock, flags);
 479
 480        /* If the device is currently open, tell trans driver to close it,
 481                then put mtd device, and don't touch it again */
 482        mutex_lock(&old->lock);
 483        if (old->open) {
 484                if (old->tr->release)
 485                        old->tr->release(old);
 486                __put_mtd_device(old->mtd);
 487        }
 488
 489        old->mtd = NULL;
 490
 491        mutex_unlock(&old->lock);
 492        blktrans_dev_put(old);
 493        return 0;
 494}
 495
 496static void blktrans_notify_remove(struct mtd_info *mtd)
 497{
 498        struct mtd_blktrans_ops *tr;
 499        struct mtd_blktrans_dev *dev, *next;
 500
 501        list_for_each_entry(tr, &blktrans_majors, list)
 502                list_for_each_entry_safe(dev, next, &tr->devs, list)
 503                        if (dev->mtd == mtd)
 504                                tr->remove_dev(dev);
 505}
 506
 507static void blktrans_notify_add(struct mtd_info *mtd)
 508{
 509        struct mtd_blktrans_ops *tr;
 510
 511        if (mtd->type == MTD_ABSENT)
 512                return;
 513
 514        list_for_each_entry(tr, &blktrans_majors, list)
 515                tr->add_mtd(tr, mtd);
 516}
 517
 518static struct mtd_notifier blktrans_notifier = {
 519        .add = blktrans_notify_add,
 520        .remove = blktrans_notify_remove,
 521};
 522
 523int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
 524{
 525        struct mtd_info *mtd;
 526        int ret;
 527
 528        /* Register the notifier if/when the first device type is
 529           registered, to prevent the link/init ordering from fucking
 530           us over. */
 531        if (!blktrans_notifier.list.next)
 532                register_mtd_user(&blktrans_notifier);
 533
 534
 535        mutex_lock(&mtd_table_mutex);
 536
 537        ret = register_blkdev(tr->major, tr->name);
 538        if (ret < 0) {
 539                printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
 540                       tr->name, tr->major, ret);
 541                mutex_unlock(&mtd_table_mutex);
 542                return ret;
 543        }
 544
 545        if (ret)
 546                tr->major = ret;
 547
 548        tr->blkshift = ffs(tr->blksize) - 1;
 549
 550        INIT_LIST_HEAD(&tr->devs);
 551        list_add(&tr->list, &blktrans_majors);
 552
 553        mtd_for_each_device(mtd)
 554                if (mtd->type != MTD_ABSENT)
 555                        tr->add_mtd(tr, mtd);
 556
 557        mutex_unlock(&mtd_table_mutex);
 558        return 0;
 559}
 560
 561int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
 562{
 563        struct mtd_blktrans_dev *dev, *next;
 564
 565        mutex_lock(&mtd_table_mutex);
 566
 567        /* Remove it from the list of active majors */
 568        list_del(&tr->list);
 569
 570        list_for_each_entry_safe(dev, next, &tr->devs, list)
 571                tr->remove_dev(dev);
 572
 573        unregister_blkdev(tr->major, tr->name);
 574        mutex_unlock(&mtd_table_mutex);
 575
 576        BUG_ON(!list_empty(&tr->devs));
 577        return 0;
 578}
 579
 580static void __exit mtd_blktrans_exit(void)
 581{
 582        /* No race here -- if someone's currently in register_mtd_blktrans
 583           we're screwed anyway. */
 584        if (blktrans_notifier.list.next)
 585                unregister_mtd_user(&blktrans_notifier);
 586}
 587
 588module_exit(mtd_blktrans_exit);
 589
 590EXPORT_SYMBOL_GPL(register_mtd_blktrans);
 591EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
 592EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
 593EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
 594
 595MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
 596MODULE_LICENSE("GPL");
 597MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");
 598