linux/drivers/mtd/mtdblock.c
<<
>>
Prefs
   1/*
   2 * Direct MTD block device access
   3 *
   4 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
   5 * Copyright © 2000-2003 Nicolas Pitre <nico@fluxnic.net>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation; either version 2 of the License, or
  10 * (at your option) any later version.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program; if not, write to the Free Software
  19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  20 *
  21 */
  22
  23#include <linux/fs.h>
  24#include <linux/init.h>
  25#include <linux/kernel.h>
  26#include <linux/module.h>
  27#include <linux/sched.h>
  28#include <linux/slab.h>
  29#include <linux/types.h>
  30#include <linux/vmalloc.h>
  31
  32#include <linux/mtd/mtd.h>
  33#include <linux/mtd/blktrans.h>
  34#include <linux/mutex.h>
  35#include <linux/major.h>
  36
  37
  38struct mtdblk_dev {
  39        struct mtd_blktrans_dev mbd;
  40        int count;
  41        struct mutex cache_mutex;
  42        unsigned char *cache_data;
  43        unsigned long cache_offset;
  44        unsigned int cache_size;
  45        enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
  46};
  47
  48/*
  49 * Cache stuff...
  50 *
  51 * Since typical flash erasable sectors are much larger than what Linux's
  52 * buffer cache can handle, we must implement read-modify-write on flash
  53 * sectors for each block write requests.  To avoid over-erasing flash sectors
  54 * and to speed things up, we locally cache a whole flash sector while it is
  55 * being written to until a different sector is required.
  56 */
  57
  58static void erase_callback(struct erase_info *done)
  59{
  60        wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv;
  61        wake_up(wait_q);
  62}
  63
  64static int erase_write (struct mtd_info *mtd, unsigned long pos,
  65                        int len, const char *buf)
  66{
  67        struct erase_info erase;
  68        DECLARE_WAITQUEUE(wait, current);
  69        wait_queue_head_t wait_q;
  70        size_t retlen;
  71        int ret;
  72
  73        /*
  74         * First, let's erase the flash block.
  75         */
  76
  77        init_waitqueue_head(&wait_q);
  78        erase.mtd = mtd;
  79        erase.callback = erase_callback;
  80        erase.addr = pos;
  81        erase.len = len;
  82        erase.priv = (u_long)&wait_q;
  83
  84        set_current_state(TASK_INTERRUPTIBLE);
  85        add_wait_queue(&wait_q, &wait);
  86
  87        ret = mtd_erase(mtd, &erase);
  88        if (ret) {
  89                set_current_state(TASK_RUNNING);
  90                remove_wait_queue(&wait_q, &wait);
  91                printk (KERN_WARNING "mtdblock: erase of region [0x%lx, 0x%x] "
  92                                     "on \"%s\" failed\n",
  93                        pos, len, mtd->name);
  94                return ret;
  95        }
  96
  97        schedule();  /* Wait for erase to finish. */
  98        remove_wait_queue(&wait_q, &wait);
  99
 100        /*
 101         * Next, write the data to flash.
 102         */
 103
 104        ret = mtd_write(mtd, pos, len, &retlen, buf);
 105        if (ret)
 106                return ret;
 107        if (retlen != len)
 108                return -EIO;
 109        return 0;
 110}
 111
 112
 113static int write_cached_data (struct mtdblk_dev *mtdblk)
 114{
 115        struct mtd_info *mtd = mtdblk->mbd.mtd;
 116        int ret;
 117
 118        if (mtdblk->cache_state != STATE_DIRTY)
 119                return 0;
 120
 121        pr_debug("mtdblock: writing cached data for \"%s\" "
 122                        "at 0x%lx, size 0x%x\n", mtd->name,
 123                        mtdblk->cache_offset, mtdblk->cache_size);
 124
 125        ret = erase_write (mtd, mtdblk->cache_offset,
 126                           mtdblk->cache_size, mtdblk->cache_data);
 127        if (ret)
 128                return ret;
 129
 130        /*
 131         * Here we could arguably set the cache state to STATE_CLEAN.
 132         * However this could lead to inconsistency since we will not
 133         * be notified if this content is altered on the flash by other
 134         * means.  Let's declare it empty and leave buffering tasks to
 135         * the buffer cache instead.
 136         */
 137        mtdblk->cache_state = STATE_EMPTY;
 138        return 0;
 139}
 140
 141
 142static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
 143                            int len, const char *buf)
 144{
 145        struct mtd_info *mtd = mtdblk->mbd.mtd;
 146        unsigned int sect_size = mtdblk->cache_size;
 147        size_t retlen;
 148        int ret;
 149
 150        pr_debug("mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n",
 151                mtd->name, pos, len);
 152
 153        if (!sect_size)
 154                return mtd_write(mtd, pos, len, &retlen, buf);
 155
 156        while (len > 0) {
 157                unsigned long sect_start = (pos/sect_size)*sect_size;
 158                unsigned int offset = pos - sect_start;
 159                unsigned int size = sect_size - offset;
 160                if( size > len )
 161                        size = len;
 162
 163                if (size == sect_size) {
 164                        /*
 165                         * We are covering a whole sector.  Thus there is no
 166                         * need to bother with the cache while it may still be
 167                         * useful for other partial writes.
 168                         */
 169                        ret = erase_write (mtd, pos, size, buf);
 170                        if (ret)
 171                                return ret;
 172                } else {
 173                        /* Partial sector: need to use the cache */
 174
 175                        if (mtdblk->cache_state == STATE_DIRTY &&
 176                            mtdblk->cache_offset != sect_start) {
 177                                ret = write_cached_data(mtdblk);
 178                                if (ret)
 179                                        return ret;
 180                        }
 181
 182                        if (mtdblk->cache_state == STATE_EMPTY ||
 183                            mtdblk->cache_offset != sect_start) {
 184                                /* fill the cache with the current sector */
 185                                mtdblk->cache_state = STATE_EMPTY;
 186                                ret = mtd_read(mtd, sect_start, sect_size,
 187                                               &retlen, mtdblk->cache_data);
 188                                if (ret)
 189                                        return ret;
 190                                if (retlen != sect_size)
 191                                        return -EIO;
 192
 193                                mtdblk->cache_offset = sect_start;
 194                                mtdblk->cache_size = sect_size;
 195                                mtdblk->cache_state = STATE_CLEAN;
 196                        }
 197
 198                        /* write data to our local cache */
 199                        memcpy (mtdblk->cache_data + offset, buf, size);
 200                        mtdblk->cache_state = STATE_DIRTY;
 201                }
 202
 203                buf += size;
 204                pos += size;
 205                len -= size;
 206        }
 207
 208        return 0;
 209}
 210
 211
 212static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
 213                           int len, char *buf)
 214{
 215        struct mtd_info *mtd = mtdblk->mbd.mtd;
 216        unsigned int sect_size = mtdblk->cache_size;
 217        size_t retlen;
 218        int ret;
 219
 220        pr_debug("mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
 221                        mtd->name, pos, len);
 222
 223        if (!sect_size)
 224                return mtd_read(mtd, pos, len, &retlen, buf);
 225
 226        while (len > 0) {
 227                unsigned long sect_start = (pos/sect_size)*sect_size;
 228                unsigned int offset = pos - sect_start;
 229                unsigned int size = sect_size - offset;
 230                if (size > len)
 231                        size = len;
 232
 233                /*
 234                 * Check if the requested data is already cached
 235                 * Read the requested amount of data from our internal cache if it
 236                 * contains what we want, otherwise we read the data directly
 237                 * from flash.
 238                 */
 239                if (mtdblk->cache_state != STATE_EMPTY &&
 240                    mtdblk->cache_offset == sect_start) {
 241                        memcpy (buf, mtdblk->cache_data + offset, size);
 242                } else {
 243                        ret = mtd_read(mtd, pos, size, &retlen, buf);
 244                        if (ret)
 245                                return ret;
 246                        if (retlen != size)
 247                                return -EIO;
 248                }
 249
 250                buf += size;
 251                pos += size;
 252                len -= size;
 253        }
 254
 255        return 0;
 256}
 257
 258static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
 259                              unsigned long block, char *buf)
 260{
 261        struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
 262        return do_cached_read(mtdblk, block<<9, 512, buf);
 263}
 264
 265static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
 266                              unsigned long block, char *buf)
 267{
 268        struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
 269        if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) {
 270                mtdblk->cache_data = vmalloc(mtdblk->mbd.mtd->erasesize);
 271                if (!mtdblk->cache_data)
 272                        return -EINTR;
 273                /* -EINTR is not really correct, but it is the best match
 274                 * documented in man 2 write for all cases.  We could also
 275                 * return -EAGAIN sometimes, but why bother?
 276                 */
 277        }
 278        return do_cached_write(mtdblk, block<<9, 512, buf);
 279}
 280
 281static int mtdblock_open(struct mtd_blktrans_dev *mbd)
 282{
 283        struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
 284
 285        pr_debug("mtdblock_open\n");
 286
 287        if (mtdblk->count) {
 288                mtdblk->count++;
 289                return 0;
 290        }
 291
 292        /* OK, it's not open. Create cache info for it */
 293        mtdblk->count = 1;
 294        mutex_init(&mtdblk->cache_mutex);
 295        mtdblk->cache_state = STATE_EMPTY;
 296        if (!(mbd->mtd->flags & MTD_NO_ERASE) && mbd->mtd->erasesize) {
 297                mtdblk->cache_size = mbd->mtd->erasesize;
 298                mtdblk->cache_data = NULL;
 299        }
 300
 301        pr_debug("ok\n");
 302
 303        return 0;
 304}
 305
 306static void mtdblock_release(struct mtd_blktrans_dev *mbd)
 307{
 308        struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
 309
 310        pr_debug("mtdblock_release\n");
 311
 312        mutex_lock(&mtdblk->cache_mutex);
 313        write_cached_data(mtdblk);
 314        mutex_unlock(&mtdblk->cache_mutex);
 315
 316        if (!--mtdblk->count) {
 317                /*
 318                 * It was the last usage. Free the cache, but only sync if
 319                 * opened for writing.
 320                 */
 321                if (mbd->file_mode & FMODE_WRITE)
 322                        mtd_sync(mbd->mtd);
 323                vfree(mtdblk->cache_data);
 324        }
 325
 326        pr_debug("ok\n");
 327}
 328
 329static int mtdblock_flush(struct mtd_blktrans_dev *dev)
 330{
 331        struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
 332
 333        mutex_lock(&mtdblk->cache_mutex);
 334        write_cached_data(mtdblk);
 335        mutex_unlock(&mtdblk->cache_mutex);
 336        mtd_sync(dev->mtd);
 337        return 0;
 338}
 339
 340static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
 341{
 342        struct mtdblk_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 343
 344        if (!dev)
 345                return;
 346
 347        dev->mbd.mtd = mtd;
 348        dev->mbd.devnum = mtd->index;
 349
 350        dev->mbd.size = mtd->size >> 9;
 351        dev->mbd.tr = tr;
 352
 353        if (!(mtd->flags & MTD_WRITEABLE))
 354                dev->mbd.readonly = 1;
 355
 356        if (add_mtd_blktrans_dev(&dev->mbd))
 357                kfree(dev);
 358}
 359
 360static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
 361{
 362        del_mtd_blktrans_dev(dev);
 363}
 364
 365static struct mtd_blktrans_ops mtdblock_tr = {
 366        .name           = "mtdblock",
 367        .major          = MTD_BLOCK_MAJOR,
 368        .part_bits      = 0,
 369        .blksize        = 512,
 370        .open           = mtdblock_open,
 371        .flush          = mtdblock_flush,
 372        .release        = mtdblock_release,
 373        .readsect       = mtdblock_readsect,
 374        .writesect      = mtdblock_writesect,
 375        .add_mtd        = mtdblock_add_mtd,
 376        .remove_dev     = mtdblock_remove_dev,
 377        .owner          = THIS_MODULE,
 378};
 379
 380static int __init init_mtdblock(void)
 381{
 382        return register_mtd_blktrans(&mtdblock_tr);
 383}
 384
 385static void __exit cleanup_mtdblock(void)
 386{
 387        deregister_mtd_blktrans(&mtdblock_tr);
 388}
 389
 390module_init(init_mtdblock);
 391module_exit(cleanup_mtdblock);
 392
 393
 394MODULE_LICENSE("GPL");
 395MODULE_AUTHOR("Nicolas Pitre <nico@fluxnic.net> et al.");
 396MODULE_DESCRIPTION("Caching read/erase/writeback block device emulation access to MTD devices");
 397