linux/drivers/mtd/mtdconcat.c
<<
>>
Prefs
   1/*
   2 * MTD device concatenation layer
   3 *
   4 * Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de>
   5 * Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org>
   6 *
   7 * NAND support by Christian Gan <cgan@iders.ca>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License as published by
  11 * the Free Software Foundation; either version 2 of the License, or
  12 * (at your option) any later version.
  13 *
  14 * This program is distributed in the hope that it will be useful,
  15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17 * GNU General Public License for more details.
  18 *
  19 * You should have received a copy of the GNU General Public License
  20 * along with this program; if not, write to the Free Software
  21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  22 *
  23 */
  24
  25#include <linux/kernel.h>
  26#include <linux/module.h>
  27#include <linux/slab.h>
  28#include <linux/sched.h>
  29#include <linux/types.h>
  30#include <linux/backing-dev.h>
  31
  32#include <linux/mtd/mtd.h>
  33#include <linux/mtd/concat.h>
  34
  35#include <asm/div64.h>
  36
  37/*
  38 * Our storage structure:
  39 * Subdev points to an array of pointers to struct mtd_info objects
  40 * which is allocated along with this structure
  41 *
  42 */
  43struct mtd_concat {
  44        struct mtd_info mtd;
  45        int num_subdev;
  46        struct mtd_info **subdev;
  47};
  48
  49/*
  50 * how to calculate the size required for the above structure,
  51 * including the pointer array subdev points to:
  52 */
  53#define SIZEOF_STRUCT_MTD_CONCAT(num_subdev)    \
  54        ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
  55
  56/*
  57 * Given a pointer to the MTD object in the mtd_concat structure,
  58 * we can retrieve the pointer to that structure with this macro.
  59 */
  60#define CONCAT(x)  ((struct mtd_concat *)(x))
  61
  62/*
  63 * MTD methods which look up the relevant subdevice, translate the
  64 * effective address and pass through to the subdevice.
  65 */
  66
  67static int
  68concat_read(struct mtd_info *mtd, loff_t from, size_t len,
  69            size_t * retlen, u_char * buf)
  70{
  71        struct mtd_concat *concat = CONCAT(mtd);
  72        int ret = 0, err;
  73        int i;
  74
  75        *retlen = 0;
  76
  77        for (i = 0; i < concat->num_subdev; i++) {
  78                struct mtd_info *subdev = concat->subdev[i];
  79                size_t size, retsize;
  80
  81                if (from >= subdev->size) {
  82                        /* Not destined for this subdev */
  83                        size = 0;
  84                        from -= subdev->size;
  85                        continue;
  86                }
  87                if (from + len > subdev->size)
  88                        /* First part goes into this subdev */
  89                        size = subdev->size - from;
  90                else
  91                        /* Entire transaction goes into this subdev */
  92                        size = len;
  93
  94                err = subdev->read(subdev, from, size, &retsize, buf);
  95
  96                /* Save information about bitflips! */
  97                if (unlikely(err)) {
  98                        if (err == -EBADMSG) {
  99                                mtd->ecc_stats.failed++;
 100                                ret = err;
 101                        } else if (err == -EUCLEAN) {
 102                                mtd->ecc_stats.corrected++;
 103                                /* Do not overwrite -EBADMSG !! */
 104                                if (!ret)
 105                                        ret = err;
 106                        } else
 107                                return err;
 108                }
 109
 110                *retlen += retsize;
 111                len -= size;
 112                if (len == 0)
 113                        return ret;
 114
 115                buf += size;
 116                from = 0;
 117        }
 118        return -EINVAL;
 119}
 120
 121static int
 122concat_write(struct mtd_info *mtd, loff_t to, size_t len,
 123             size_t * retlen, const u_char * buf)
 124{
 125        struct mtd_concat *concat = CONCAT(mtd);
 126        int err = -EINVAL;
 127        int i;
 128
 129        if (!(mtd->flags & MTD_WRITEABLE))
 130                return -EROFS;
 131
 132        *retlen = 0;
 133
 134        for (i = 0; i < concat->num_subdev; i++) {
 135                struct mtd_info *subdev = concat->subdev[i];
 136                size_t size, retsize;
 137
 138                if (to >= subdev->size) {
 139                        size = 0;
 140                        to -= subdev->size;
 141                        continue;
 142                }
 143                if (to + len > subdev->size)
 144                        size = subdev->size - to;
 145                else
 146                        size = len;
 147
 148                if (!(subdev->flags & MTD_WRITEABLE))
 149                        err = -EROFS;
 150                else
 151                        err = subdev->write(subdev, to, size, &retsize, buf);
 152
 153                if (err)
 154                        break;
 155
 156                *retlen += retsize;
 157                len -= size;
 158                if (len == 0)
 159                        break;
 160
 161                err = -EINVAL;
 162                buf += size;
 163                to = 0;
 164        }
 165        return err;
 166}
 167
 168static int
 169concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
 170                unsigned long count, loff_t to, size_t * retlen)
 171{
 172        struct mtd_concat *concat = CONCAT(mtd);
 173        struct kvec *vecs_copy;
 174        unsigned long entry_low, entry_high;
 175        size_t total_len = 0;
 176        int i;
 177        int err = -EINVAL;
 178
 179        if (!(mtd->flags & MTD_WRITEABLE))
 180                return -EROFS;
 181
 182        *retlen = 0;
 183
 184        /* Calculate total length of data */
 185        for (i = 0; i < count; i++)
 186                total_len += vecs[i].iov_len;
 187
 188        /* Do not allow write past end of device */
 189        if ((to + total_len) > mtd->size)
 190                return -EINVAL;
 191
 192        /* Check alignment */
 193        if (mtd->writesize > 1) {
 194                uint64_t __to = to;
 195                if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
 196                        return -EINVAL;
 197        }
 198
 199        /* make a copy of vecs */
 200        vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
 201        if (!vecs_copy)
 202                return -ENOMEM;
 203
 204        entry_low = 0;
 205        for (i = 0; i < concat->num_subdev; i++) {
 206                struct mtd_info *subdev = concat->subdev[i];
 207                size_t size, wsize, retsize, old_iov_len;
 208
 209                if (to >= subdev->size) {
 210                        to -= subdev->size;
 211                        continue;
 212                }
 213
 214                size = min_t(uint64_t, total_len, subdev->size - to);
 215                wsize = size; /* store for future use */
 216
 217                entry_high = entry_low;
 218                while (entry_high < count) {
 219                        if (size <= vecs_copy[entry_high].iov_len)
 220                                break;
 221                        size -= vecs_copy[entry_high++].iov_len;
 222                }
 223
 224                old_iov_len = vecs_copy[entry_high].iov_len;
 225                vecs_copy[entry_high].iov_len = size;
 226
 227                if (!(subdev->flags & MTD_WRITEABLE))
 228                        err = -EROFS;
 229                else
 230                        err = subdev->writev(subdev, &vecs_copy[entry_low],
 231                                entry_high - entry_low + 1, to, &retsize);
 232
 233                vecs_copy[entry_high].iov_len = old_iov_len - size;
 234                vecs_copy[entry_high].iov_base += size;
 235
 236                entry_low = entry_high;
 237
 238                if (err)
 239                        break;
 240
 241                *retlen += retsize;
 242                total_len -= wsize;
 243
 244                if (total_len == 0)
 245                        break;
 246
 247                err = -EINVAL;
 248                to = 0;
 249        }
 250
 251        kfree(vecs_copy);
 252        return err;
 253}
 254
 255static int
 256concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
 257{
 258        struct mtd_concat *concat = CONCAT(mtd);
 259        struct mtd_oob_ops devops = *ops;
 260        int i, err, ret = 0;
 261
 262        ops->retlen = ops->oobretlen = 0;
 263
 264        for (i = 0; i < concat->num_subdev; i++) {
 265                struct mtd_info *subdev = concat->subdev[i];
 266
 267                if (from >= subdev->size) {
 268                        from -= subdev->size;
 269                        continue;
 270                }
 271
 272                /* partial read ? */
 273                if (from + devops.len > subdev->size)
 274                        devops.len = subdev->size - from;
 275
 276                err = subdev->read_oob(subdev, from, &devops);
 277                ops->retlen += devops.retlen;
 278                ops->oobretlen += devops.oobretlen;
 279
 280                /* Save information about bitflips! */
 281                if (unlikely(err)) {
 282                        if (err == -EBADMSG) {
 283                                mtd->ecc_stats.failed++;
 284                                ret = err;
 285                        } else if (err == -EUCLEAN) {
 286                                mtd->ecc_stats.corrected++;
 287                                /* Do not overwrite -EBADMSG !! */
 288                                if (!ret)
 289                                        ret = err;
 290                        } else
 291                                return err;
 292                }
 293
 294                if (devops.datbuf) {
 295                        devops.len = ops->len - ops->retlen;
 296                        if (!devops.len)
 297                                return ret;
 298                        devops.datbuf += devops.retlen;
 299                }
 300                if (devops.oobbuf) {
 301                        devops.ooblen = ops->ooblen - ops->oobretlen;
 302                        if (!devops.ooblen)
 303                                return ret;
 304                        devops.oobbuf += ops->oobretlen;
 305                }
 306
 307                from = 0;
 308        }
 309        return -EINVAL;
 310}
 311
 312static int
 313concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
 314{
 315        struct mtd_concat *concat = CONCAT(mtd);
 316        struct mtd_oob_ops devops = *ops;
 317        int i, err;
 318
 319        if (!(mtd->flags & MTD_WRITEABLE))
 320                return -EROFS;
 321
 322        ops->retlen = 0;
 323
 324        for (i = 0; i < concat->num_subdev; i++) {
 325                struct mtd_info *subdev = concat->subdev[i];
 326
 327                if (to >= subdev->size) {
 328                        to -= subdev->size;
 329                        continue;
 330                }
 331
 332                /* partial write ? */
 333                if (to + devops.len > subdev->size)
 334                        devops.len = subdev->size - to;
 335
 336                err = subdev->write_oob(subdev, to, &devops);
 337                ops->retlen += devops.retlen;
 338                if (err)
 339                        return err;
 340
 341                if (devops.datbuf) {
 342                        devops.len = ops->len - ops->retlen;
 343                        if (!devops.len)
 344                                return 0;
 345                        devops.datbuf += devops.retlen;
 346                }
 347                if (devops.oobbuf) {
 348                        devops.ooblen = ops->ooblen - ops->oobretlen;
 349                        if (!devops.ooblen)
 350                                return 0;
 351                        devops.oobbuf += devops.oobretlen;
 352                }
 353                to = 0;
 354        }
 355        return -EINVAL;
 356}
 357
 358static void concat_erase_callback(struct erase_info *instr)
 359{
 360        wake_up((wait_queue_head_t *) instr->priv);
 361}
 362
 363static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
 364{
 365        int err;
 366        wait_queue_head_t waitq;
 367        DECLARE_WAITQUEUE(wait, current);
 368
 369        /*
 370         * This code was stol^H^H^H^Hinspired by mtdchar.c
 371         */
 372        init_waitqueue_head(&waitq);
 373
 374        erase->mtd = mtd;
 375        erase->callback = concat_erase_callback;
 376        erase->priv = (unsigned long) &waitq;
 377
 378        /*
 379         * FIXME: Allow INTERRUPTIBLE. Which means
 380         * not having the wait_queue head on the stack.
 381         */
 382        err = mtd->erase(mtd, erase);
 383        if (!err) {
 384                set_current_state(TASK_UNINTERRUPTIBLE);
 385                add_wait_queue(&waitq, &wait);
 386                if (erase->state != MTD_ERASE_DONE
 387                    && erase->state != MTD_ERASE_FAILED)
 388                        schedule();
 389                remove_wait_queue(&waitq, &wait);
 390                set_current_state(TASK_RUNNING);
 391
 392                err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
 393        }
 394        return err;
 395}
 396
 397static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
 398{
 399        struct mtd_concat *concat = CONCAT(mtd);
 400        struct mtd_info *subdev;
 401        int i, err;
 402        uint64_t length, offset = 0;
 403        struct erase_info *erase;
 404
 405        if (!(mtd->flags & MTD_WRITEABLE))
 406                return -EROFS;
 407
 408        if (instr->addr > concat->mtd.size)
 409                return -EINVAL;
 410
 411        if (instr->len + instr->addr > concat->mtd.size)
 412                return -EINVAL;
 413
 414        /*
 415         * Check for proper erase block alignment of the to-be-erased area.
 416         * It is easier to do this based on the super device's erase
 417         * region info rather than looking at each particular sub-device
 418         * in turn.
 419         */
 420        if (!concat->mtd.numeraseregions) {
 421                /* the easy case: device has uniform erase block size */
 422                if (instr->addr & (concat->mtd.erasesize - 1))
 423                        return -EINVAL;
 424                if (instr->len & (concat->mtd.erasesize - 1))
 425                        return -EINVAL;
 426        } else {
 427                /* device has variable erase size */
 428                struct mtd_erase_region_info *erase_regions =
 429                    concat->mtd.eraseregions;
 430
 431                /*
 432                 * Find the erase region where the to-be-erased area begins:
 433                 */
 434                for (i = 0; i < concat->mtd.numeraseregions &&
 435                     instr->addr >= erase_regions[i].offset; i++) ;
 436                --i;
 437
 438                /*
 439                 * Now erase_regions[i] is the region in which the
 440                 * to-be-erased area begins. Verify that the starting
 441                 * offset is aligned to this region's erase size:
 442                 */
 443                if (i < 0 || instr->addr & (erase_regions[i].erasesize - 1))
 444                        return -EINVAL;
 445
 446                /*
 447                 * now find the erase region where the to-be-erased area ends:
 448                 */
 449                for (; i < concat->mtd.numeraseregions &&
 450                     (instr->addr + instr->len) >= erase_regions[i].offset;
 451                     ++i) ;
 452                --i;
 453                /*
 454                 * check if the ending offset is aligned to this region's erase size
 455                 */
 456                if (i < 0 || ((instr->addr + instr->len) &
 457                                        (erase_regions[i].erasesize - 1)))
 458                        return -EINVAL;
 459        }
 460
 461        instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
 462
 463        /* make a local copy of instr to avoid modifying the caller's struct */
 464        erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
 465
 466        if (!erase)
 467                return -ENOMEM;
 468
 469        *erase = *instr;
 470        length = instr->len;
 471
 472        /*
 473         * find the subdevice where the to-be-erased area begins, adjust
 474         * starting offset to be relative to the subdevice start
 475         */
 476        for (i = 0; i < concat->num_subdev; i++) {
 477                subdev = concat->subdev[i];
 478                if (subdev->size <= erase->addr) {
 479                        erase->addr -= subdev->size;
 480                        offset += subdev->size;
 481                } else {
 482                        break;
 483                }
 484        }
 485
 486        /* must never happen since size limit has been verified above */
 487        BUG_ON(i >= concat->num_subdev);
 488
 489        /* now do the erase: */
 490        err = 0;
 491        for (; length > 0; i++) {
 492                /* loop for all subdevices affected by this request */
 493                subdev = concat->subdev[i];     /* get current subdevice */
 494
 495                /* limit length to subdevice's size: */
 496                if (erase->addr + length > subdev->size)
 497                        erase->len = subdev->size - erase->addr;
 498                else
 499                        erase->len = length;
 500
 501                if (!(subdev->flags & MTD_WRITEABLE)) {
 502                        err = -EROFS;
 503                        break;
 504                }
 505                length -= erase->len;
 506                if ((err = concat_dev_erase(subdev, erase))) {
 507                        /* sanity check: should never happen since
 508                         * block alignment has been checked above */
 509                        BUG_ON(err == -EINVAL);
 510                        if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
 511                                instr->fail_addr = erase->fail_addr + offset;
 512                        break;
 513                }
 514                /*
 515                 * erase->addr specifies the offset of the area to be
 516                 * erased *within the current subdevice*. It can be
 517                 * non-zero only the first time through this loop, i.e.
 518                 * for the first subdevice where blocks need to be erased.
 519                 * All the following erases must begin at the start of the
 520                 * current subdevice, i.e. at offset zero.
 521                 */
 522                erase->addr = 0;
 523                offset += subdev->size;
 524        }
 525        instr->state = erase->state;
 526        kfree(erase);
 527        if (err)
 528                return err;
 529
 530        if (instr->callback)
 531                instr->callback(instr);
 532        return 0;
 533}
 534
 535static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 536{
 537        struct mtd_concat *concat = CONCAT(mtd);
 538        int i, err = -EINVAL;
 539
 540        if ((len + ofs) > mtd->size)
 541                return -EINVAL;
 542
 543        for (i = 0; i < concat->num_subdev; i++) {
 544                struct mtd_info *subdev = concat->subdev[i];
 545                uint64_t size;
 546
 547                if (ofs >= subdev->size) {
 548                        size = 0;
 549                        ofs -= subdev->size;
 550                        continue;
 551                }
 552                if (ofs + len > subdev->size)
 553                        size = subdev->size - ofs;
 554                else
 555                        size = len;
 556
 557                if (subdev->lock) {
 558                        err = subdev->lock(subdev, ofs, size);
 559                        if (err)
 560                                break;
 561                } else
 562                        err = -EOPNOTSUPP;
 563
 564                len -= size;
 565                if (len == 0)
 566                        break;
 567
 568                err = -EINVAL;
 569                ofs = 0;
 570        }
 571
 572        return err;
 573}
 574
 575static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 576{
 577        struct mtd_concat *concat = CONCAT(mtd);
 578        int i, err = 0;
 579
 580        if ((len + ofs) > mtd->size)
 581                return -EINVAL;
 582
 583        for (i = 0; i < concat->num_subdev; i++) {
 584                struct mtd_info *subdev = concat->subdev[i];
 585                uint64_t size;
 586
 587                if (ofs >= subdev->size) {
 588                        size = 0;
 589                        ofs -= subdev->size;
 590                        continue;
 591                }
 592                if (ofs + len > subdev->size)
 593                        size = subdev->size - ofs;
 594                else
 595                        size = len;
 596
 597                if (subdev->unlock) {
 598                        err = subdev->unlock(subdev, ofs, size);
 599                        if (err)
 600                                break;
 601                } else
 602                        err = -EOPNOTSUPP;
 603
 604                len -= size;
 605                if (len == 0)
 606                        break;
 607
 608                err = -EINVAL;
 609                ofs = 0;
 610        }
 611
 612        return err;
 613}
 614
 615static void concat_sync(struct mtd_info *mtd)
 616{
 617        struct mtd_concat *concat = CONCAT(mtd);
 618        int i;
 619
 620        for (i = 0; i < concat->num_subdev; i++) {
 621                struct mtd_info *subdev = concat->subdev[i];
 622                subdev->sync(subdev);
 623        }
 624}
 625
 626static int concat_suspend(struct mtd_info *mtd)
 627{
 628        struct mtd_concat *concat = CONCAT(mtd);
 629        int i, rc = 0;
 630
 631        for (i = 0; i < concat->num_subdev; i++) {
 632                struct mtd_info *subdev = concat->subdev[i];
 633                if ((rc = subdev->suspend(subdev)) < 0)
 634                        return rc;
 635        }
 636        return rc;
 637}
 638
 639static void concat_resume(struct mtd_info *mtd)
 640{
 641        struct mtd_concat *concat = CONCAT(mtd);
 642        int i;
 643
 644        for (i = 0; i < concat->num_subdev; i++) {
 645                struct mtd_info *subdev = concat->subdev[i];
 646                subdev->resume(subdev);
 647        }
 648}
 649
 650static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
 651{
 652        struct mtd_concat *concat = CONCAT(mtd);
 653        int i, res = 0;
 654
 655        if (!concat->subdev[0]->block_isbad)
 656                return res;
 657
 658        if (ofs > mtd->size)
 659                return -EINVAL;
 660
 661        for (i = 0; i < concat->num_subdev; i++) {
 662                struct mtd_info *subdev = concat->subdev[i];
 663
 664                if (ofs >= subdev->size) {
 665                        ofs -= subdev->size;
 666                        continue;
 667                }
 668
 669                res = subdev->block_isbad(subdev, ofs);
 670                break;
 671        }
 672
 673        return res;
 674}
 675
 676static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
 677{
 678        struct mtd_concat *concat = CONCAT(mtd);
 679        int i, err = -EINVAL;
 680
 681        if (!concat->subdev[0]->block_markbad)
 682                return 0;
 683
 684        if (ofs > mtd->size)
 685                return -EINVAL;
 686
 687        for (i = 0; i < concat->num_subdev; i++) {
 688                struct mtd_info *subdev = concat->subdev[i];
 689
 690                if (ofs >= subdev->size) {
 691                        ofs -= subdev->size;
 692                        continue;
 693                }
 694
 695                err = subdev->block_markbad(subdev, ofs);
 696                if (!err)
 697                        mtd->ecc_stats.badblocks++;
 698                break;
 699        }
 700
 701        return err;
 702}
 703
 704/*
 705 * try to support NOMMU mmaps on concatenated devices
 706 * - we don't support subdev spanning as we can't guarantee it'll work
 707 */
 708static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
 709                                              unsigned long len,
 710                                              unsigned long offset,
 711                                              unsigned long flags)
 712{
 713        struct mtd_concat *concat = CONCAT(mtd);
 714        int i;
 715
 716        for (i = 0; i < concat->num_subdev; i++) {
 717                struct mtd_info *subdev = concat->subdev[i];
 718
 719                if (offset >= subdev->size) {
 720                        offset -= subdev->size;
 721                        continue;
 722                }
 723
 724                /* we've found the subdev over which the mapping will reside */
 725                if (offset + len > subdev->size)
 726                        return (unsigned long) -EINVAL;
 727
 728                if (subdev->get_unmapped_area)
 729                        return subdev->get_unmapped_area(subdev, len, offset,
 730                                                         flags);
 731
 732                break;
 733        }
 734
 735        return (unsigned long) -ENOSYS;
 736}
 737
 738/*
 739 * This function constructs a virtual MTD device by concatenating
 740 * num_devs MTD devices. A pointer to the new device object is
 741 * stored to *new_dev upon success. This function does _not_
 742 * register any devices: this is the caller's responsibility.
 743 */
 744struct mtd_info *mtd_concat_create(struct mtd_info *subdev[],   /* subdevices to concatenate */
 745                                   int num_devs,        /* number of subdevices      */
 746                                   const char *name)
 747{                               /* name for the new device   */
 748        int i;
 749        size_t size;
 750        struct mtd_concat *concat;
 751        uint32_t max_erasesize, curr_erasesize;
 752        int num_erase_region;
 753
 754        printk(KERN_NOTICE "Concatenating MTD devices:\n");
 755        for (i = 0; i < num_devs; i++)
 756                printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
 757        printk(KERN_NOTICE "into device \"%s\"\n", name);
 758
 759        /* allocate the device structure */
 760        size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
 761        concat = kzalloc(size, GFP_KERNEL);
 762        if (!concat) {
 763                printk
 764                    ("memory allocation error while creating concatenated device \"%s\"\n",
 765                     name);
 766                return NULL;
 767        }
 768        concat->subdev = (struct mtd_info **) (concat + 1);
 769
 770        /*
 771         * Set up the new "super" device's MTD object structure, check for
 772         * incompatibilites between the subdevices.
 773         */
 774        concat->mtd.type = subdev[0]->type;
 775        concat->mtd.flags = subdev[0]->flags;
 776        concat->mtd.size = subdev[0]->size;
 777        concat->mtd.erasesize = subdev[0]->erasesize;
 778        concat->mtd.writesize = subdev[0]->writesize;
 779        concat->mtd.writebufsize = subdev[0]->writebufsize;
 780        concat->mtd.subpage_sft = subdev[0]->subpage_sft;
 781        concat->mtd.oobsize = subdev[0]->oobsize;
 782        concat->mtd.oobavail = subdev[0]->oobavail;
 783        if (subdev[0]->writev)
 784                concat->mtd.writev = concat_writev;
 785        if (subdev[0]->read_oob)
 786                concat->mtd.read_oob = concat_read_oob;
 787        if (subdev[0]->write_oob)
 788                concat->mtd.write_oob = concat_write_oob;
 789        if (subdev[0]->block_isbad)
 790                concat->mtd.block_isbad = concat_block_isbad;
 791        if (subdev[0]->block_markbad)
 792                concat->mtd.block_markbad = concat_block_markbad;
 793
 794        concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
 795
 796        concat->mtd.backing_dev_info = subdev[0]->backing_dev_info;
 797
 798        concat->subdev[0] = subdev[0];
 799
 800        for (i = 1; i < num_devs; i++) {
 801                if (concat->mtd.type != subdev[i]->type) {
 802                        kfree(concat);
 803                        printk("Incompatible device type on \"%s\"\n",
 804                               subdev[i]->name);
 805                        return NULL;
 806                }
 807                if (concat->mtd.flags != subdev[i]->flags) {
 808                        /*
 809                         * Expect all flags except MTD_WRITEABLE to be
 810                         * equal on all subdevices.
 811                         */
 812                        if ((concat->mtd.flags ^ subdev[i]->
 813                             flags) & ~MTD_WRITEABLE) {
 814                                kfree(concat);
 815                                printk("Incompatible device flags on \"%s\"\n",
 816                                       subdev[i]->name);
 817                                return NULL;
 818                        } else
 819                                /* if writeable attribute differs,
 820                                   make super device writeable */
 821                                concat->mtd.flags |=
 822                                    subdev[i]->flags & MTD_WRITEABLE;
 823                }
 824
 825                /* only permit direct mapping if the BDIs are all the same
 826                 * - copy-mapping is still permitted
 827                 */
 828                if (concat->mtd.backing_dev_info !=
 829                    subdev[i]->backing_dev_info)
 830                        concat->mtd.backing_dev_info =
 831                                &default_backing_dev_info;
 832
 833                concat->mtd.size += subdev[i]->size;
 834                concat->mtd.ecc_stats.badblocks +=
 835                        subdev[i]->ecc_stats.badblocks;
 836                if (concat->mtd.writesize   !=  subdev[i]->writesize ||
 837                    concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
 838                    concat->mtd.oobsize    !=  subdev[i]->oobsize ||
 839                    !concat->mtd.read_oob  != !subdev[i]->read_oob ||
 840                    !concat->mtd.write_oob != !subdev[i]->write_oob) {
 841                        kfree(concat);
 842                        printk("Incompatible OOB or ECC data on \"%s\"\n",
 843                               subdev[i]->name);
 844                        return NULL;
 845                }
 846                concat->subdev[i] = subdev[i];
 847
 848        }
 849
 850        concat->mtd.ecclayout = subdev[0]->ecclayout;
 851
 852        concat->num_subdev = num_devs;
 853        concat->mtd.name = name;
 854
 855        concat->mtd.erase = concat_erase;
 856        concat->mtd.read = concat_read;
 857        concat->mtd.write = concat_write;
 858        concat->mtd.sync = concat_sync;
 859        concat->mtd.lock = concat_lock;
 860        concat->mtd.unlock = concat_unlock;
 861        concat->mtd.suspend = concat_suspend;
 862        concat->mtd.resume = concat_resume;
 863        concat->mtd.get_unmapped_area = concat_get_unmapped_area;
 864
 865        /*
 866         * Combine the erase block size info of the subdevices:
 867         *
 868         * first, walk the map of the new device and see how
 869         * many changes in erase size we have
 870         */
 871        max_erasesize = curr_erasesize = subdev[0]->erasesize;
 872        num_erase_region = 1;
 873        for (i = 0; i < num_devs; i++) {
 874                if (subdev[i]->numeraseregions == 0) {
 875                        /* current subdevice has uniform erase size */
 876                        if (subdev[i]->erasesize != curr_erasesize) {
 877                                /* if it differs from the last subdevice's erase size, count it */
 878                                ++num_erase_region;
 879                                curr_erasesize = subdev[i]->erasesize;
 880                                if (curr_erasesize > max_erasesize)
 881                                        max_erasesize = curr_erasesize;
 882                        }
 883                } else {
 884                        /* current subdevice has variable erase size */
 885                        int j;
 886                        for (j = 0; j < subdev[i]->numeraseregions; j++) {
 887
 888                                /* walk the list of erase regions, count any changes */
 889                                if (subdev[i]->eraseregions[j].erasesize !=
 890                                    curr_erasesize) {
 891                                        ++num_erase_region;
 892                                        curr_erasesize =
 893                                            subdev[i]->eraseregions[j].
 894                                            erasesize;
 895                                        if (curr_erasesize > max_erasesize)
 896                                                max_erasesize = curr_erasesize;
 897                                }
 898                        }
 899                }
 900        }
 901
 902        if (num_erase_region == 1) {
 903                /*
 904                 * All subdevices have the same uniform erase size.
 905                 * This is easy:
 906                 */
 907                concat->mtd.erasesize = curr_erasesize;
 908                concat->mtd.numeraseregions = 0;
 909        } else {
 910                uint64_t tmp64;
 911
 912                /*
 913                 * erase block size varies across the subdevices: allocate
 914                 * space to store the data describing the variable erase regions
 915                 */
 916                struct mtd_erase_region_info *erase_region_p;
 917                uint64_t begin, position;
 918
 919                concat->mtd.erasesize = max_erasesize;
 920                concat->mtd.numeraseregions = num_erase_region;
 921                concat->mtd.eraseregions = erase_region_p =
 922                    kmalloc(num_erase_region *
 923                            sizeof (struct mtd_erase_region_info), GFP_KERNEL);
 924                if (!erase_region_p) {
 925                        kfree(concat);
 926                        printk
 927                            ("memory allocation error while creating erase region list"
 928                             " for device \"%s\"\n", name);
 929                        return NULL;
 930                }
 931
 932                /*
 933                 * walk the map of the new device once more and fill in
 934                 * in erase region info:
 935                 */
 936                curr_erasesize = subdev[0]->erasesize;
 937                begin = position = 0;
 938                for (i = 0; i < num_devs; i++) {
 939                        if (subdev[i]->numeraseregions == 0) {
 940                                /* current subdevice has uniform erase size */
 941                                if (subdev[i]->erasesize != curr_erasesize) {
 942                                        /*
 943                                         *  fill in an mtd_erase_region_info structure for the area
 944                                         *  we have walked so far:
 945                                         */
 946                                        erase_region_p->offset = begin;
 947                                        erase_region_p->erasesize =
 948                                            curr_erasesize;
 949                                        tmp64 = position - begin;
 950                                        do_div(tmp64, curr_erasesize);
 951                                        erase_region_p->numblocks = tmp64;
 952                                        begin = position;
 953
 954                                        curr_erasesize = subdev[i]->erasesize;
 955                                        ++erase_region_p;
 956                                }
 957                                position += subdev[i]->size;
 958                        } else {
 959                                /* current subdevice has variable erase size */
 960                                int j;
 961                                for (j = 0; j < subdev[i]->numeraseregions; j++) {
 962                                        /* walk the list of erase regions, count any changes */
 963                                        if (subdev[i]->eraseregions[j].
 964                                            erasesize != curr_erasesize) {
 965                                                erase_region_p->offset = begin;
 966                                                erase_region_p->erasesize =
 967                                                    curr_erasesize;
 968                                                tmp64 = position - begin;
 969                                                do_div(tmp64, curr_erasesize);
 970                                                erase_region_p->numblocks = tmp64;
 971                                                begin = position;
 972
 973                                                curr_erasesize =
 974                                                    subdev[i]->eraseregions[j].
 975                                                    erasesize;
 976                                                ++erase_region_p;
 977                                        }
 978                                        position +=
 979                                            subdev[i]->eraseregions[j].
 980                                            numblocks * (uint64_t)curr_erasesize;
 981                                }
 982                        }
 983                }
 984                /* Now write the final entry */
 985                erase_region_p->offset = begin;
 986                erase_region_p->erasesize = curr_erasesize;
 987                tmp64 = position - begin;
 988                do_div(tmp64, curr_erasesize);
 989                erase_region_p->numblocks = tmp64;
 990        }
 991
 992        return &concat->mtd;
 993}
 994
 995/*
 996 * This function destroys an MTD object obtained from concat_mtd_devs()
 997 */
 998
 999void mtd_concat_destroy(struct mtd_info *mtd)
1000{
1001        struct mtd_concat *concat = CONCAT(mtd);
1002        if (concat->mtd.numeraseregions)
1003                kfree(concat->mtd.eraseregions);
1004        kfree(concat);
1005}
1006
1007EXPORT_SYMBOL(mtd_concat_create);
1008EXPORT_SYMBOL(mtd_concat_destroy);
1009
1010MODULE_LICENSE("GPL");
1011MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
1012MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");
1013