uboot/drivers/mtd/mtdpart.c
<<
>>
Prefs
   1/*
   2 * Simple MTD partitioning layer
   3 *
   4 * (C) 2000 Nicolas Pitre <nico@cam.org>
   5 *
   6 * This code is GPL
   7 *
   8 *      02-21-2002      Thomas Gleixner <gleixner@autronix.de>
   9 *                      added support for read_oob, write_oob
  10 */
  11
  12#include <common.h>
  13#include <malloc.h>
  14#include <asm/errno.h>
  15
  16#include <linux/types.h>
  17#include <linux/list.h>
  18#include <linux/mtd/mtd.h>
  19#include <linux/mtd/partitions.h>
  20#include <linux/mtd/compat.h>
  21
  22/* Our partition linked list */
  23struct list_head mtd_partitions;
  24
  25/* Our partition node structure */
  26struct mtd_part {
  27        struct mtd_info mtd;
  28        struct mtd_info *master;
  29        uint64_t offset;
  30        int index;
  31        struct list_head list;
  32        int registered;
  33};
  34
  35/*
  36 * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
  37 * the pointer to that structure with this macro.
  38 */
  39#define PART(x)  ((struct mtd_part *)(x))
  40
  41
  42/*
  43 * MTD methods which simply translate the effective address and pass through
  44 * to the _real_ device.
  45 */
  46
  47static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
  48                size_t *retlen, u_char *buf)
  49{
  50        struct mtd_part *part = PART(mtd);
  51        struct mtd_ecc_stats stats;
  52        int res;
  53
  54        stats = part->master->ecc_stats;
  55
  56        if (from >= mtd->size)
  57                len = 0;
  58        else if (from + len > mtd->size)
  59                len = mtd->size - from;
  60        res = part->master->read(part->master, from + part->offset,
  61                                   len, retlen, buf);
  62        if (unlikely(res)) {
  63                if (res == -EUCLEAN)
  64                        mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected;
  65                if (res == -EBADMSG)
  66                        mtd->ecc_stats.failed += part->master->ecc_stats.failed - stats.failed;
  67        }
  68        return res;
  69}
  70
  71static int part_read_oob(struct mtd_info *mtd, loff_t from,
  72                struct mtd_oob_ops *ops)
  73{
  74        struct mtd_part *part = PART(mtd);
  75        int res;
  76
  77        if (from >= mtd->size)
  78                return -EINVAL;
  79        if (ops->datbuf && from + ops->len > mtd->size)
  80                return -EINVAL;
  81        res = part->master->read_oob(part->master, from + part->offset, ops);
  82
  83        if (unlikely(res)) {
  84                if (res == -EUCLEAN)
  85                        mtd->ecc_stats.corrected++;
  86                if (res == -EBADMSG)
  87                        mtd->ecc_stats.failed++;
  88        }
  89        return res;
  90}
  91
  92static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
  93                size_t len, size_t *retlen, u_char *buf)
  94{
  95        struct mtd_part *part = PART(mtd);
  96        return part->master->read_user_prot_reg(part->master, from,
  97                                        len, retlen, buf);
  98}
  99
 100static int part_get_user_prot_info(struct mtd_info *mtd,
 101                struct otp_info *buf, size_t len)
 102{
 103        struct mtd_part *part = PART(mtd);
 104        return part->master->get_user_prot_info(part->master, buf, len);
 105}
 106
 107static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
 108                size_t len, size_t *retlen, u_char *buf)
 109{
 110        struct mtd_part *part = PART(mtd);
 111        return part->master->read_fact_prot_reg(part->master, from,
 112                                        len, retlen, buf);
 113}
 114
 115static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
 116                size_t len)
 117{
 118        struct mtd_part *part = PART(mtd);
 119        return part->master->get_fact_prot_info(part->master, buf, len);
 120}
 121
 122static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
 123                size_t *retlen, const u_char *buf)
 124{
 125        struct mtd_part *part = PART(mtd);
 126        if (!(mtd->flags & MTD_WRITEABLE))
 127                return -EROFS;
 128        if (to >= mtd->size)
 129                len = 0;
 130        else if (to + len > mtd->size)
 131                len = mtd->size - to;
 132        return part->master->write(part->master, to + part->offset,
 133                                    len, retlen, buf);
 134}
 135
 136static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
 137                size_t *retlen, const u_char *buf)
 138{
 139        struct mtd_part *part = PART(mtd);
 140        if (!(mtd->flags & MTD_WRITEABLE))
 141                return -EROFS;
 142        if (to >= mtd->size)
 143                len = 0;
 144        else if (to + len > mtd->size)
 145                len = mtd->size - to;
 146        return part->master->panic_write(part->master, to + part->offset,
 147                                    len, retlen, buf);
 148}
 149
 150static int part_write_oob(struct mtd_info *mtd, loff_t to,
 151                struct mtd_oob_ops *ops)
 152{
 153        struct mtd_part *part = PART(mtd);
 154
 155        if (!(mtd->flags & MTD_WRITEABLE))
 156                return -EROFS;
 157
 158        if (to >= mtd->size)
 159                return -EINVAL;
 160        if (ops->datbuf && to + ops->len > mtd->size)
 161                return -EINVAL;
 162        return part->master->write_oob(part->master, to + part->offset, ops);
 163}
 164
 165static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
 166                size_t len, size_t *retlen, u_char *buf)
 167{
 168        struct mtd_part *part = PART(mtd);
 169        return part->master->write_user_prot_reg(part->master, from,
 170                                        len, retlen, buf);
 171}
 172
 173static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
 174                size_t len)
 175{
 176        struct mtd_part *part = PART(mtd);
 177        return part->master->lock_user_prot_reg(part->master, from, len);
 178}
 179
 180static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
 181{
 182        struct mtd_part *part = PART(mtd);
 183        int ret;
 184        if (!(mtd->flags & MTD_WRITEABLE))
 185                return -EROFS;
 186        if (instr->addr >= mtd->size)
 187                return -EINVAL;
 188        instr->addr += part->offset;
 189        ret = part->master->erase(part->master, instr);
 190        if (ret) {
 191                if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
 192                        instr->fail_addr -= part->offset;
 193                instr->addr -= part->offset;
 194        }
 195        return ret;
 196}
 197
 198void mtd_erase_callback(struct erase_info *instr)
 199{
 200        if (instr->mtd->erase == part_erase) {
 201                struct mtd_part *part = PART(instr->mtd);
 202
 203                if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
 204                        instr->fail_addr -= part->offset;
 205                instr->addr -= part->offset;
 206        }
 207        if (instr->callback)
 208                instr->callback(instr);
 209}
 210
 211static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 212{
 213        struct mtd_part *part = PART(mtd);
 214        if ((len + ofs) > mtd->size)
 215                return -EINVAL;
 216        return part->master->lock(part->master, ofs + part->offset, len);
 217}
 218
 219static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 220{
 221        struct mtd_part *part = PART(mtd);
 222        if ((len + ofs) > mtd->size)
 223                return -EINVAL;
 224        return part->master->unlock(part->master, ofs + part->offset, len);
 225}
 226
 227static void part_sync(struct mtd_info *mtd)
 228{
 229        struct mtd_part *part = PART(mtd);
 230        part->master->sync(part->master);
 231}
 232
 233static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
 234{
 235        struct mtd_part *part = PART(mtd);
 236        if (ofs >= mtd->size)
 237                return -EINVAL;
 238        ofs += part->offset;
 239        return part->master->block_isbad(part->master, ofs);
 240}
 241
 242static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
 243{
 244        struct mtd_part *part = PART(mtd);
 245        int res;
 246
 247        if (!(mtd->flags & MTD_WRITEABLE))
 248                return -EROFS;
 249        if (ofs >= mtd->size)
 250                return -EINVAL;
 251        ofs += part->offset;
 252        res = part->master->block_markbad(part->master, ofs);
 253        if (!res)
 254                mtd->ecc_stats.badblocks++;
 255        return res;
 256}
 257
 258/*
 259 * This function unregisters and destroy all slave MTD objects which are
 260 * attached to the given master MTD object.
 261 */
 262
 263int del_mtd_partitions(struct mtd_info *master)
 264{
 265        struct mtd_part *slave, *next;
 266
 267        list_for_each_entry_safe(slave, next, &mtd_partitions, list)
 268                if (slave->master == master) {
 269                        list_del(&slave->list);
 270                        if (slave->registered)
 271                                del_mtd_device(&slave->mtd);
 272                        kfree(slave);
 273                }
 274
 275        return 0;
 276}
 277
 278static struct mtd_part *add_one_partition(struct mtd_info *master,
 279                const struct mtd_partition *part, int partno,
 280                uint64_t cur_offset)
 281{
 282        struct mtd_part *slave;
 283
 284        /* allocate the partition structure */
 285        slave = kzalloc(sizeof(*slave), GFP_KERNEL);
 286        if (!slave) {
 287                printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
 288                        master->name);
 289                del_mtd_partitions(master);
 290                return NULL;
 291        }
 292        list_add(&slave->list, &mtd_partitions);
 293
 294        /* set up the MTD object for this partition */
 295        slave->mtd.type = master->type;
 296        slave->mtd.flags = master->flags & ~part->mask_flags;
 297        slave->mtd.size = part->size;
 298        slave->mtd.writesize = master->writesize;
 299        slave->mtd.oobsize = master->oobsize;
 300        slave->mtd.oobavail = master->oobavail;
 301        slave->mtd.subpage_sft = master->subpage_sft;
 302
 303        slave->mtd.name = part->name;
 304        slave->mtd.owner = master->owner;
 305
 306        slave->mtd.read = part_read;
 307        slave->mtd.write = part_write;
 308
 309        if (master->panic_write)
 310                slave->mtd.panic_write = part_panic_write;
 311
 312        if (master->read_oob)
 313                slave->mtd.read_oob = part_read_oob;
 314        if (master->write_oob)
 315                slave->mtd.write_oob = part_write_oob;
 316        if (master->read_user_prot_reg)
 317                slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
 318        if (master->read_fact_prot_reg)
 319                slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
 320        if (master->write_user_prot_reg)
 321                slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
 322        if (master->lock_user_prot_reg)
 323                slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
 324        if (master->get_user_prot_info)
 325                slave->mtd.get_user_prot_info = part_get_user_prot_info;
 326        if (master->get_fact_prot_info)
 327                slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
 328        if (master->sync)
 329                slave->mtd.sync = part_sync;
 330        if (master->lock)
 331                slave->mtd.lock = part_lock;
 332        if (master->unlock)
 333                slave->mtd.unlock = part_unlock;
 334        if (master->block_isbad)
 335                slave->mtd.block_isbad = part_block_isbad;
 336        if (master->block_markbad)
 337                slave->mtd.block_markbad = part_block_markbad;
 338        slave->mtd.erase = part_erase;
 339        slave->master = master;
 340        slave->offset = part->offset;
 341        slave->index = partno;
 342
 343        if (slave->offset == MTDPART_OFS_APPEND)
 344                slave->offset = cur_offset;
 345        if (slave->offset == MTDPART_OFS_NXTBLK) {
 346                slave->offset = cur_offset;
 347                if (mtd_mod_by_eb(cur_offset, master) != 0) {
 348                        /* Round up to next erasesize */
 349                        slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
 350                        printk(KERN_NOTICE "Moving partition %d: "
 351                               "0x%012llx -> 0x%012llx\n", partno,
 352                               (unsigned long long)cur_offset, (unsigned long long)slave->offset);
 353                }
 354        }
 355        if (slave->mtd.size == MTDPART_SIZ_FULL)
 356                slave->mtd.size = master->size - slave->offset;
 357
 358        printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
 359                (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
 360
 361        /* let's do some sanity checks */
 362        if (slave->offset >= master->size) {
 363                /* let's register it anyway to preserve ordering */
 364                slave->offset = 0;
 365                slave->mtd.size = 0;
 366                printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
 367                        part->name);
 368                goto out_register;
 369        }
 370        if (slave->offset + slave->mtd.size > master->size) {
 371                slave->mtd.size = master->size - slave->offset;
 372                printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
 373                        part->name, master->name, (unsigned long long)slave->mtd.size);
 374        }
 375        if (master->numeraseregions > 1) {
 376                /* Deal with variable erase size stuff */
 377                int i, max = master->numeraseregions;
 378                u64 end = slave->offset + slave->mtd.size;
 379                struct mtd_erase_region_info *regions = master->eraseregions;
 380
 381                /* Find the first erase regions which is part of this
 382                 * partition. */
 383                for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
 384                        ;
 385                /* The loop searched for the region _behind_ the first one */
 386                i--;
 387
 388                /* Pick biggest erasesize */
 389                for (; i < max && regions[i].offset < end; i++) {
 390                        if (slave->mtd.erasesize < regions[i].erasesize) {
 391                                slave->mtd.erasesize = regions[i].erasesize;
 392                        }
 393                }
 394                BUG_ON(slave->mtd.erasesize == 0);
 395        } else {
 396                /* Single erase size */
 397                slave->mtd.erasesize = master->erasesize;
 398        }
 399
 400        if ((slave->mtd.flags & MTD_WRITEABLE) &&
 401            mtd_mod_by_eb(slave->offset, &slave->mtd)) {
 402                /* Doesn't start on a boundary of major erase size */
 403                /* FIXME: Let it be writable if it is on a boundary of
 404                 * _minor_ erase size though */
 405                slave->mtd.flags &= ~MTD_WRITEABLE;
 406                printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
 407                        part->name);
 408        }
 409        if ((slave->mtd.flags & MTD_WRITEABLE) &&
 410            mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
 411                slave->mtd.flags &= ~MTD_WRITEABLE;
 412                printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
 413                        part->name);
 414        }
 415
 416        slave->mtd.ecclayout = master->ecclayout;
 417        if (master->block_isbad) {
 418                uint64_t offs = 0;
 419
 420                while (offs < slave->mtd.size) {
 421                        if (master->block_isbad(master,
 422                                                offs + slave->offset))
 423                                slave->mtd.ecc_stats.badblocks++;
 424                        offs += slave->mtd.erasesize;
 425                }
 426        }
 427
 428out_register:
 429        if (part->mtdp) {
 430                /* store the object pointer (caller may or may not register it*/
 431                *part->mtdp = &slave->mtd;
 432                slave->registered = 0;
 433        } else {
 434                /* register our partition */
 435                add_mtd_device(&slave->mtd);
 436                slave->registered = 1;
 437        }
 438        return slave;
 439}
 440
 441/*
 442 * This function, given a master MTD object and a partition table, creates
 443 * and registers slave MTD objects which are bound to the master according to
 444 * the partition definitions.
 445 *
 446 * We don't register the master, or expect the caller to have done so,
 447 * for reasons of data integrity.
 448 */
 449
 450int add_mtd_partitions(struct mtd_info *master,
 451                       const struct mtd_partition *parts,
 452                       int nbparts)
 453{
 454        struct mtd_part *slave;
 455        uint64_t cur_offset = 0;
 456        int i;
 457
 458        /*
 459         * Need to init the list here, since LIST_INIT() does not
 460         * work on platforms where relocation has problems (like MIPS
 461         * & PPC).
 462         */
 463        if (mtd_partitions.next == NULL)
 464                INIT_LIST_HEAD(&mtd_partitions);
 465
 466        printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
 467
 468        for (i = 0; i < nbparts; i++) {
 469                slave = add_one_partition(master, parts + i, i, cur_offset);
 470                if (!slave)
 471                        return -ENOMEM;
 472                cur_offset = slave->offset + slave->mtd.size;
 473        }
 474
 475        return 0;
 476}
 477