1
2
3
4
5
6
7
8
9
10
11
12#ifndef __UBOOT__
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/sched.h>
17#include <linux/types.h>
18#include <linux/backing-dev.h>
19#include <asm/div64.h>
20#else
21#include <div64.h>
22#include <linux/compat.h>
23#endif
24
25#include <linux/mtd/mtd.h>
26#include <linux/mtd/concat.h>
27
28#include <ubi_uboot.h>
29
30
31
32
33
34
35
36struct mtd_concat {
37 struct mtd_info mtd;
38 int num_subdev;
39 struct mtd_info **subdev;
40};
41
42
43
44
45
46#define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \
47 ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
48
49
50
51
52
53#define CONCAT(x) ((struct mtd_concat *)(x))
54
55
56
57
58
59
60static int
61concat_read(struct mtd_info *mtd, loff_t from, size_t len,
62 size_t * retlen, u_char * buf)
63{
64 struct mtd_concat *concat = CONCAT(mtd);
65 int ret = 0, err;
66 int i;
67
68#ifdef __UBOOT__
69 *retlen = 0;
70#endif
71
72 for (i = 0; i < concat->num_subdev; i++) {
73 struct mtd_info *subdev = concat->subdev[i];
74 size_t size, retsize;
75
76 if (from >= subdev->size) {
77
78 size = 0;
79 from -= subdev->size;
80 continue;
81 }
82 if (from + len > subdev->size)
83
84 size = subdev->size - from;
85 else
86
87 size = len;
88
89 err = mtd_read(subdev, from, size, &retsize, buf);
90
91
92 if (unlikely(err)) {
93 if (mtd_is_eccerr(err)) {
94 mtd->ecc_stats.failed++;
95 ret = err;
96 } else if (mtd_is_bitflip(err)) {
97 mtd->ecc_stats.corrected++;
98
99 if (!ret)
100 ret = err;
101 } else
102 return err;
103 }
104
105 *retlen += retsize;
106 len -= size;
107 if (len == 0)
108 return ret;
109
110 buf += size;
111 from = 0;
112 }
113 return -EINVAL;
114}
115
116static int
117concat_write(struct mtd_info *mtd, loff_t to, size_t len,
118 size_t * retlen, const u_char * buf)
119{
120 struct mtd_concat *concat = CONCAT(mtd);
121 int err = -EINVAL;
122 int i;
123
124#ifdef __UBOOT__
125 *retlen = 0;
126#endif
127
128 for (i = 0; i < concat->num_subdev; i++) {
129 struct mtd_info *subdev = concat->subdev[i];
130 size_t size, retsize;
131
132 if (to >= subdev->size) {
133 size = 0;
134 to -= subdev->size;
135 continue;
136 }
137 if (to + len > subdev->size)
138 size = subdev->size - to;
139 else
140 size = len;
141
142 err = mtd_write(subdev, to, size, &retsize, buf);
143 if (err)
144 break;
145
146 *retlen += retsize;
147 len -= size;
148 if (len == 0)
149 break;
150
151 err = -EINVAL;
152 buf += size;
153 to = 0;
154 }
155 return err;
156}
157
158#ifndef __UBOOT__
159static int
160concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
161 unsigned long count, loff_t to, size_t * retlen)
162{
163 struct mtd_concat *concat = CONCAT(mtd);
164 struct kvec *vecs_copy;
165 unsigned long entry_low, entry_high;
166 size_t total_len = 0;
167 int i;
168 int err = -EINVAL;
169
170
171 for (i = 0; i < count; i++)
172 total_len += vecs[i].iov_len;
173
174
175 if (mtd->writesize > 1) {
176 uint64_t __to = to;
177 if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
178 return -EINVAL;
179 }
180
181
182 vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
183 if (!vecs_copy)
184 return -ENOMEM;
185
186 entry_low = 0;
187 for (i = 0; i < concat->num_subdev; i++) {
188 struct mtd_info *subdev = concat->subdev[i];
189 size_t size, wsize, retsize, old_iov_len;
190
191 if (to >= subdev->size) {
192 to -= subdev->size;
193 continue;
194 }
195
196 size = min_t(uint64_t, total_len, subdev->size - to);
197 wsize = size;
198
199 entry_high = entry_low;
200 while (entry_high < count) {
201 if (size <= vecs_copy[entry_high].iov_len)
202 break;
203 size -= vecs_copy[entry_high++].iov_len;
204 }
205
206 old_iov_len = vecs_copy[entry_high].iov_len;
207 vecs_copy[entry_high].iov_len = size;
208
209 err = mtd_writev(subdev, &vecs_copy[entry_low],
210 entry_high - entry_low + 1, to, &retsize);
211
212 vecs_copy[entry_high].iov_len = old_iov_len - size;
213 vecs_copy[entry_high].iov_base += size;
214
215 entry_low = entry_high;
216
217 if (err)
218 break;
219
220 *retlen += retsize;
221 total_len -= wsize;
222
223 if (total_len == 0)
224 break;
225
226 err = -EINVAL;
227 to = 0;
228 }
229
230 kfree(vecs_copy);
231 return err;
232}
233#endif
234
235static int
236concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
237{
238 struct mtd_concat *concat = CONCAT(mtd);
239 struct mtd_oob_ops devops = *ops;
240 int i, err, ret = 0;
241
242 ops->retlen = ops->oobretlen = 0;
243
244 for (i = 0; i < concat->num_subdev; i++) {
245 struct mtd_info *subdev = concat->subdev[i];
246
247 if (from >= subdev->size) {
248 from -= subdev->size;
249 continue;
250 }
251
252
253 if (from + devops.len > subdev->size)
254 devops.len = subdev->size - from;
255
256 err = mtd_read_oob(subdev, from, &devops);
257 ops->retlen += devops.retlen;
258 ops->oobretlen += devops.oobretlen;
259
260
261 if (unlikely(err)) {
262 if (mtd_is_eccerr(err)) {
263 mtd->ecc_stats.failed++;
264 ret = err;
265 } else if (mtd_is_bitflip(err)) {
266 mtd->ecc_stats.corrected++;
267
268 if (!ret)
269 ret = err;
270 } else
271 return err;
272 }
273
274 if (devops.datbuf) {
275 devops.len = ops->len - ops->retlen;
276 if (!devops.len)
277 return ret;
278 devops.datbuf += devops.retlen;
279 }
280 if (devops.oobbuf) {
281 devops.ooblen = ops->ooblen - ops->oobretlen;
282 if (!devops.ooblen)
283 return ret;
284 devops.oobbuf += ops->oobretlen;
285 }
286
287 from = 0;
288 }
289 return -EINVAL;
290}
291
292static int
293concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
294{
295 struct mtd_concat *concat = CONCAT(mtd);
296 struct mtd_oob_ops devops = *ops;
297 int i, err;
298
299 if (!(mtd->flags & MTD_WRITEABLE))
300 return -EROFS;
301
302 ops->retlen = ops->oobretlen = 0;
303
304 for (i = 0; i < concat->num_subdev; i++) {
305 struct mtd_info *subdev = concat->subdev[i];
306
307 if (to >= subdev->size) {
308 to -= subdev->size;
309 continue;
310 }
311
312
313 if (to + devops.len > subdev->size)
314 devops.len = subdev->size - to;
315
316 err = mtd_write_oob(subdev, to, &devops);
317 ops->retlen += devops.oobretlen;
318 if (err)
319 return err;
320
321 if (devops.datbuf) {
322 devops.len = ops->len - ops->retlen;
323 if (!devops.len)
324 return 0;
325 devops.datbuf += devops.retlen;
326 }
327 if (devops.oobbuf) {
328 devops.ooblen = ops->ooblen - ops->oobretlen;
329 if (!devops.ooblen)
330 return 0;
331 devops.oobbuf += devops.oobretlen;
332 }
333 to = 0;
334 }
335 return -EINVAL;
336}
337
338static void concat_erase_callback(struct erase_info *instr)
339{
340
341#ifndef __UBOOT__
342 wake_up((wait_queue_head_t *) instr->priv);
343#endif
344}
345
346static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
347{
348 int err;
349 wait_queue_head_t waitq;
350 DECLARE_WAITQUEUE(wait, current);
351
352
353
354
355 init_waitqueue_head(&waitq);
356
357 erase->mtd = mtd;
358 erase->callback = concat_erase_callback;
359 erase->priv = (unsigned long) &waitq;
360
361
362
363
364
365 err = mtd_erase(mtd, erase);
366 if (!err) {
367 set_current_state(TASK_UNINTERRUPTIBLE);
368 add_wait_queue(&waitq, &wait);
369 if (erase->state != MTD_ERASE_DONE
370 && erase->state != MTD_ERASE_FAILED)
371 schedule();
372 remove_wait_queue(&waitq, &wait);
373 set_current_state(TASK_RUNNING);
374
375 err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
376 }
377 return err;
378}
379
380static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
381{
382 struct mtd_concat *concat = CONCAT(mtd);
383 struct mtd_info *subdev;
384 int i, err;
385 uint64_t length, offset = 0;
386 struct erase_info *erase;
387
388
389
390
391
392
393
394 if (!concat->mtd.numeraseregions) {
395
396 if (instr->addr & (concat->mtd.erasesize - 1))
397 return -EINVAL;
398 if (instr->len & (concat->mtd.erasesize - 1))
399 return -EINVAL;
400 } else {
401
402 struct mtd_erase_region_info *erase_regions =
403 concat->mtd.eraseregions;
404
405
406
407
408 for (i = 0; i < concat->mtd.numeraseregions &&
409 instr->addr >= erase_regions[i].offset; i++) ;
410 --i;
411
412
413
414
415
416
417 if (i < 0 || instr->addr & (erase_regions[i].erasesize - 1))
418 return -EINVAL;
419
420
421
422
423 for (; i < concat->mtd.numeraseregions &&
424 (instr->addr + instr->len) >= erase_regions[i].offset;
425 ++i) ;
426 --i;
427
428
429
430 if (i < 0 || ((instr->addr + instr->len) &
431 (erase_regions[i].erasesize - 1)))
432 return -EINVAL;
433 }
434
435
436 erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
437
438 if (!erase)
439 return -ENOMEM;
440
441 *erase = *instr;
442 length = instr->len;
443
444
445
446
447
448 for (i = 0; i < concat->num_subdev; i++) {
449 subdev = concat->subdev[i];
450 if (subdev->size <= erase->addr) {
451 erase->addr -= subdev->size;
452 offset += subdev->size;
453 } else {
454 break;
455 }
456 }
457
458
459 BUG_ON(i >= concat->num_subdev);
460
461
462 err = 0;
463 for (; length > 0; i++) {
464
465 subdev = concat->subdev[i];
466
467
468 if (erase->addr + length > subdev->size)
469 erase->len = subdev->size - erase->addr;
470 else
471 erase->len = length;
472
473 length -= erase->len;
474 if ((err = concat_dev_erase(subdev, erase))) {
475
476
477 BUG_ON(err == -EINVAL);
478 if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
479 instr->fail_addr = erase->fail_addr + offset;
480 break;
481 }
482
483
484
485
486
487
488
489
490 erase->addr = 0;
491 offset += subdev->size;
492 }
493 instr->state = erase->state;
494 kfree(erase);
495 if (err)
496 return err;
497
498 if (instr->callback)
499 instr->callback(instr);
500 return 0;
501}
502
503static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
504{
505 struct mtd_concat *concat = CONCAT(mtd);
506 int i, err = -EINVAL;
507
508 for (i = 0; i < concat->num_subdev; i++) {
509 struct mtd_info *subdev = concat->subdev[i];
510 uint64_t size;
511
512 if (ofs >= subdev->size) {
513 size = 0;
514 ofs -= subdev->size;
515 continue;
516 }
517 if (ofs + len > subdev->size)
518 size = subdev->size - ofs;
519 else
520 size = len;
521
522 err = mtd_lock(subdev, ofs, size);
523 if (err)
524 break;
525
526 len -= size;
527 if (len == 0)
528 break;
529
530 err = -EINVAL;
531 ofs = 0;
532 }
533
534 return err;
535}
536
537static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
538{
539 struct mtd_concat *concat = CONCAT(mtd);
540 int i, err = 0;
541
542 for (i = 0; i < concat->num_subdev; i++) {
543 struct mtd_info *subdev = concat->subdev[i];
544 uint64_t size;
545
546 if (ofs >= subdev->size) {
547 size = 0;
548 ofs -= subdev->size;
549 continue;
550 }
551 if (ofs + len > subdev->size)
552 size = subdev->size - ofs;
553 else
554 size = len;
555
556 err = mtd_unlock(subdev, ofs, size);
557 if (err)
558 break;
559
560 len -= size;
561 if (len == 0)
562 break;
563
564 err = -EINVAL;
565 ofs = 0;
566 }
567
568 return err;
569}
570
571static void concat_sync(struct mtd_info *mtd)
572{
573 struct mtd_concat *concat = CONCAT(mtd);
574 int i;
575
576 for (i = 0; i < concat->num_subdev; i++) {
577 struct mtd_info *subdev = concat->subdev[i];
578 mtd_sync(subdev);
579 }
580}
581
582#ifndef __UBOOT__
583static int concat_suspend(struct mtd_info *mtd)
584{
585 struct mtd_concat *concat = CONCAT(mtd);
586 int i, rc = 0;
587
588 for (i = 0; i < concat->num_subdev; i++) {
589 struct mtd_info *subdev = concat->subdev[i];
590 if ((rc = mtd_suspend(subdev)) < 0)
591 return rc;
592 }
593 return rc;
594}
595
596static void concat_resume(struct mtd_info *mtd)
597{
598 struct mtd_concat *concat = CONCAT(mtd);
599 int i;
600
601 for (i = 0; i < concat->num_subdev; i++) {
602 struct mtd_info *subdev = concat->subdev[i];
603 mtd_resume(subdev);
604 }
605}
606#endif
607
608static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
609{
610 struct mtd_concat *concat = CONCAT(mtd);
611 int i, res = 0;
612
613 if (!mtd_can_have_bb(concat->subdev[0]))
614 return res;
615
616 for (i = 0; i < concat->num_subdev; i++) {
617 struct mtd_info *subdev = concat->subdev[i];
618
619 if (ofs >= subdev->size) {
620 ofs -= subdev->size;
621 continue;
622 }
623
624 res = mtd_block_isbad(subdev, ofs);
625 break;
626 }
627
628 return res;
629}
630
631static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
632{
633 struct mtd_concat *concat = CONCAT(mtd);
634 int i, err = -EINVAL;
635
636 for (i = 0; i < concat->num_subdev; i++) {
637 struct mtd_info *subdev = concat->subdev[i];
638
639 if (ofs >= subdev->size) {
640 ofs -= subdev->size;
641 continue;
642 }
643
644 err = mtd_block_markbad(subdev, ofs);
645 if (!err)
646 mtd->ecc_stats.badblocks++;
647 break;
648 }
649
650 return err;
651}
652
653
654
655
656
657static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
658 unsigned long len,
659 unsigned long offset,
660 unsigned long flags)
661{
662 struct mtd_concat *concat = CONCAT(mtd);
663 int i;
664
665 for (i = 0; i < concat->num_subdev; i++) {
666 struct mtd_info *subdev = concat->subdev[i];
667
668 if (offset >= subdev->size) {
669 offset -= subdev->size;
670 continue;
671 }
672
673 return mtd_get_unmapped_area(subdev, len, offset, flags);
674 }
675
676 return (unsigned long) -ENOSYS;
677}
678
679
680
681
682
683
684
685struct mtd_info *mtd_concat_create(struct mtd_info *subdev[],
686 int num_devs,
687#ifndef __UBOOT__
688 const char *name)
689#else
690 char *name)
691#endif
692{
693 int i;
694 size_t size;
695 struct mtd_concat *concat;
696 uint32_t max_erasesize, curr_erasesize;
697 int num_erase_region;
698 int max_writebufsize = 0;
699
700 debug("Concatenating MTD devices:\n");
701 for (i = 0; i < num_devs; i++)
702 printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
703 debug("into device \"%s\"\n", name);
704
705
706 size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
707 concat = kzalloc(size, GFP_KERNEL);
708 if (!concat) {
709 printk
710 ("memory allocation error while creating concatenated device \"%s\"\n",
711 name);
712 return NULL;
713 }
714 concat->subdev = (struct mtd_info **) (concat + 1);
715
716
717
718
719
720 concat->mtd.type = subdev[0]->type;
721 concat->mtd.flags = subdev[0]->flags;
722 concat->mtd.size = subdev[0]->size;
723 concat->mtd.erasesize = subdev[0]->erasesize;
724 concat->mtd.writesize = subdev[0]->writesize;
725
726 for (i = 0; i < num_devs; i++)
727 if (max_writebufsize < subdev[i]->writebufsize)
728 max_writebufsize = subdev[i]->writebufsize;
729 concat->mtd.writebufsize = max_writebufsize;
730
731 concat->mtd.subpage_sft = subdev[0]->subpage_sft;
732 concat->mtd.oobsize = subdev[0]->oobsize;
733 concat->mtd.oobavail = subdev[0]->oobavail;
734#ifndef __UBOOT__
735 if (subdev[0]->_writev)
736 concat->mtd._writev = concat_writev;
737#endif
738 if (subdev[0]->_read_oob)
739 concat->mtd._read_oob = concat_read_oob;
740 if (subdev[0]->_write_oob)
741 concat->mtd._write_oob = concat_write_oob;
742 if (subdev[0]->_block_isbad)
743 concat->mtd._block_isbad = concat_block_isbad;
744 if (subdev[0]->_block_markbad)
745 concat->mtd._block_markbad = concat_block_markbad;
746
747 concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
748
749#ifndef __UBOOT__
750 concat->mtd.backing_dev_info = subdev[0]->backing_dev_info;
751#endif
752
753 concat->subdev[0] = subdev[0];
754
755 for (i = 1; i < num_devs; i++) {
756 if (concat->mtd.type != subdev[i]->type) {
757 kfree(concat);
758 printk("Incompatible device type on \"%s\"\n",
759 subdev[i]->name);
760 return NULL;
761 }
762 if (concat->mtd.flags != subdev[i]->flags) {
763
764
765
766
767 if ((concat->mtd.flags ^ subdev[i]->
768 flags) & ~MTD_WRITEABLE) {
769 kfree(concat);
770 printk("Incompatible device flags on \"%s\"\n",
771 subdev[i]->name);
772 return NULL;
773 } else
774
775
776 concat->mtd.flags |=
777 subdev[i]->flags & MTD_WRITEABLE;
778 }
779
780#ifndef __UBOOT__
781
782
783
784 if (concat->mtd.backing_dev_info !=
785 subdev[i]->backing_dev_info)
786 concat->mtd.backing_dev_info =
787 &default_backing_dev_info;
788#endif
789
790 concat->mtd.size += subdev[i]->size;
791 concat->mtd.ecc_stats.badblocks +=
792 subdev[i]->ecc_stats.badblocks;
793 if (concat->mtd.writesize != subdev[i]->writesize ||
794 concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
795 concat->mtd.oobsize != subdev[i]->oobsize ||
796 !concat->mtd._read_oob != !subdev[i]->_read_oob ||
797 !concat->mtd._write_oob != !subdev[i]->_write_oob) {
798 kfree(concat);
799 printk("Incompatible OOB or ECC data on \"%s\"\n",
800 subdev[i]->name);
801 return NULL;
802 }
803 concat->subdev[i] = subdev[i];
804
805 }
806
807 concat->mtd.ecclayout = subdev[0]->ecclayout;
808
809 concat->num_subdev = num_devs;
810 concat->mtd.name = name;
811
812 concat->mtd._erase = concat_erase;
813 concat->mtd._read = concat_read;
814 concat->mtd._write = concat_write;
815 concat->mtd._sync = concat_sync;
816 concat->mtd._lock = concat_lock;
817 concat->mtd._unlock = concat_unlock;
818#ifndef __UBOOT__
819 concat->mtd._suspend = concat_suspend;
820 concat->mtd._resume = concat_resume;
821#endif
822 concat->mtd._get_unmapped_area = concat_get_unmapped_area;
823
824
825
826
827
828
829
830 max_erasesize = curr_erasesize = subdev[0]->erasesize;
831 num_erase_region = 1;
832 for (i = 0; i < num_devs; i++) {
833 if (subdev[i]->numeraseregions == 0) {
834
835 if (subdev[i]->erasesize != curr_erasesize) {
836
837 ++num_erase_region;
838 curr_erasesize = subdev[i]->erasesize;
839 if (curr_erasesize > max_erasesize)
840 max_erasesize = curr_erasesize;
841 }
842 } else {
843
844 int j;
845 for (j = 0; j < subdev[i]->numeraseregions; j++) {
846
847
848 if (subdev[i]->eraseregions[j].erasesize !=
849 curr_erasesize) {
850 ++num_erase_region;
851 curr_erasesize =
852 subdev[i]->eraseregions[j].
853 erasesize;
854 if (curr_erasesize > max_erasesize)
855 max_erasesize = curr_erasesize;
856 }
857 }
858 }
859 }
860
861 if (num_erase_region == 1) {
862
863
864
865
866 concat->mtd.erasesize = curr_erasesize;
867 concat->mtd.numeraseregions = 0;
868 } else {
869 uint64_t tmp64;
870
871
872
873
874
875 struct mtd_erase_region_info *erase_region_p;
876 uint64_t begin, position;
877
878 concat->mtd.erasesize = max_erasesize;
879 concat->mtd.numeraseregions = num_erase_region;
880 concat->mtd.eraseregions = erase_region_p =
881 kmalloc(num_erase_region *
882 sizeof (struct mtd_erase_region_info), GFP_KERNEL);
883 if (!erase_region_p) {
884 kfree(concat);
885 printk
886 ("memory allocation error while creating erase region list"
887 " for device \"%s\"\n", name);
888 return NULL;
889 }
890
891
892
893
894
895 curr_erasesize = subdev[0]->erasesize;
896 begin = position = 0;
897 for (i = 0; i < num_devs; i++) {
898 if (subdev[i]->numeraseregions == 0) {
899
900 if (subdev[i]->erasesize != curr_erasesize) {
901
902
903
904
905 erase_region_p->offset = begin;
906 erase_region_p->erasesize =
907 curr_erasesize;
908 tmp64 = position - begin;
909 do_div(tmp64, curr_erasesize);
910 erase_region_p->numblocks = tmp64;
911 begin = position;
912
913 curr_erasesize = subdev[i]->erasesize;
914 ++erase_region_p;
915 }
916 position += subdev[i]->size;
917 } else {
918
919 int j;
920 for (j = 0; j < subdev[i]->numeraseregions; j++) {
921
922 if (subdev[i]->eraseregions[j].
923 erasesize != curr_erasesize) {
924 erase_region_p->offset = begin;
925 erase_region_p->erasesize =
926 curr_erasesize;
927 tmp64 = position - begin;
928 do_div(tmp64, curr_erasesize);
929 erase_region_p->numblocks = tmp64;
930 begin = position;
931
932 curr_erasesize =
933 subdev[i]->eraseregions[j].
934 erasesize;
935 ++erase_region_p;
936 }
937 position +=
938 subdev[i]->eraseregions[j].
939 numblocks * (uint64_t)curr_erasesize;
940 }
941 }
942 }
943
944 erase_region_p->offset = begin;
945 erase_region_p->erasesize = curr_erasesize;
946 tmp64 = position - begin;
947 do_div(tmp64, curr_erasesize);
948 erase_region_p->numblocks = tmp64;
949 }
950
951 return &concat->mtd;
952}
953
954
955
956
957
958void mtd_concat_destroy(struct mtd_info *mtd)
959{
960 struct mtd_concat *concat = CONCAT(mtd);
961 if (concat->mtd.numeraseregions)
962 kfree(concat->mtd.eraseregions);
963 kfree(concat);
964}
965
966EXPORT_SYMBOL(mtd_concat_create);
967EXPORT_SYMBOL(mtd_concat_destroy);
968
969MODULE_LICENSE("GPL");
970MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
971MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");
972