1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/crc32.h>
17#include "ubi.h"
18
19
20
21
22
23size_t ubi_calc_fm_size(struct ubi_device *ubi)
24{
25 size_t size;
26
27 size = sizeof(struct ubi_fm_hdr) + \
28 sizeof(struct ubi_fm_scan_pool) + \
29 sizeof(struct ubi_fm_scan_pool) + \
30 (ubi->peb_count * sizeof(struct ubi_fm_ec)) + \
31 (sizeof(struct ubi_fm_eba) + \
32 (ubi->peb_count * sizeof(__be32))) + \
33 sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
34 return roundup(size, ubi->leb_size);
35}
36
37
38
39
40
41
42
43
44
45
46static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
47{
48 struct ubi_vid_hdr *new;
49
50 new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
51 if (!new)
52 goto out;
53
54 new->vol_type = UBI_VID_DYNAMIC;
55 new->vol_id = cpu_to_be32(vol_id);
56
57
58
59
60 new->compat = UBI_COMPAT_DELETE;
61
62out:
63 return new;
64}
65
66
67
68
69
70
71
72
73
74
75
76static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
77 int pnum, int ec, int scrub)
78{
79 struct ubi_ainf_peb *aeb;
80
81 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
82 if (!aeb)
83 return -ENOMEM;
84
85 aeb->pnum = pnum;
86 aeb->ec = ec;
87 aeb->lnum = -1;
88 aeb->scrub = scrub;
89 aeb->copy_flag = aeb->sqnum = 0;
90
91 ai->ec_sum += aeb->ec;
92 ai->ec_count++;
93
94 if (ai->max_ec < aeb->ec)
95 ai->max_ec = aeb->ec;
96
97 if (ai->min_ec > aeb->ec)
98 ai->min_ec = aeb->ec;
99
100 list_add_tail(&aeb->u.list, list);
101
102 return 0;
103}
104
105
106
107
108
109
110
111
112
113
114
115
116
117static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
118 int used_ebs, int data_pad, u8 vol_type,
119 int last_eb_bytes)
120{
121 struct ubi_ainf_volume *av;
122 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
123
124 while (*p) {
125 parent = *p;
126 av = rb_entry(parent, struct ubi_ainf_volume, rb);
127
128 if (vol_id > av->vol_id)
129 p = &(*p)->rb_left;
130 else if (vol_id > av->vol_id)
131 p = &(*p)->rb_right;
132 }
133
134 av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
135 if (!av)
136 goto out;
137
138 av->highest_lnum = av->leb_count = 0;
139 av->vol_id = vol_id;
140 av->used_ebs = used_ebs;
141 av->data_pad = data_pad;
142 av->last_data_size = last_eb_bytes;
143 av->compat = 0;
144 av->vol_type = vol_type;
145 av->root = RB_ROOT;
146
147 dbg_bld("found volume (ID %i)", vol_id);
148
149 rb_link_node(&av->rb, parent, p);
150 rb_insert_color(&av->rb, &ai->volumes);
151
152out:
153 return av;
154}
155
156
157
158
159
160
161
162
163static void assign_aeb_to_av(struct ubi_attach_info *ai,
164 struct ubi_ainf_peb *aeb,
165 struct ubi_ainf_volume *av)
166{
167 struct ubi_ainf_peb *tmp_aeb;
168 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
169
170 p = &av->root.rb_node;
171 while (*p) {
172 parent = *p;
173
174 tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
175 if (aeb->lnum != tmp_aeb->lnum) {
176 if (aeb->lnum < tmp_aeb->lnum)
177 p = &(*p)->rb_left;
178 else
179 p = &(*p)->rb_right;
180
181 continue;
182 } else
183 break;
184 }
185
186 list_del(&aeb->u.list);
187 av->leb_count++;
188
189 rb_link_node(&aeb->u.rb, parent, p);
190 rb_insert_color(&aeb->u.rb, &av->root);
191}
192
193
194
195
196
197
198
199
200
201
202
203static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
204 struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
205 struct ubi_ainf_peb *new_aeb)
206{
207 struct rb_node **p = &av->root.rb_node, *parent = NULL;
208 struct ubi_ainf_peb *aeb, *victim;
209 int cmp_res;
210
211 while (*p) {
212 parent = *p;
213 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
214
215 if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
216 if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
217 p = &(*p)->rb_left;
218 else
219 p = &(*p)->rb_right;
220
221 continue;
222 }
223
224
225
226
227
228 if (aeb->pnum == new_aeb->pnum) {
229 ubi_assert(aeb->lnum == new_aeb->lnum);
230 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
231
232 return 0;
233 }
234
235 cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
236 if (cmp_res < 0)
237 return cmp_res;
238
239
240 if (cmp_res & 1) {
241 victim = kmem_cache_alloc(ai->aeb_slab_cache,
242 GFP_KERNEL);
243 if (!victim)
244 return -ENOMEM;
245
246 victim->ec = aeb->ec;
247 victim->pnum = aeb->pnum;
248 list_add_tail(&victim->u.list, &ai->erase);
249
250 if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
251 av->last_data_size = \
252 be32_to_cpu(new_vh->data_size);
253
254 dbg_bld("vol %i: AEB %i's PEB %i is the newer",
255 av->vol_id, aeb->lnum, new_aeb->pnum);
256
257 aeb->ec = new_aeb->ec;
258 aeb->pnum = new_aeb->pnum;
259 aeb->copy_flag = new_vh->copy_flag;
260 aeb->scrub = new_aeb->scrub;
261 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
262
263
264 } else {
265 dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
266 av->vol_id, aeb->lnum, new_aeb->pnum);
267 list_add_tail(&new_aeb->u.list, &ai->erase);
268 }
269
270 return 0;
271 }
272
273
274 if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
275 av->highest_lnum = be32_to_cpu(new_vh->lnum);
276 av->last_data_size = be32_to_cpu(new_vh->data_size);
277 }
278
279 if (av->vol_type == UBI_STATIC_VOLUME)
280 av->used_ebs = be32_to_cpu(new_vh->used_ebs);
281
282 av->leb_count++;
283
284 rb_link_node(&new_aeb->u.rb, parent, p);
285 rb_insert_color(&new_aeb->u.rb, &av->root);
286
287 return 0;
288}
289
290
291
292
293
294
295
296
297
298
299static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
300 struct ubi_vid_hdr *new_vh,
301 struct ubi_ainf_peb *new_aeb)
302{
303 struct ubi_ainf_volume *av, *tmp_av = NULL;
304 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
305 int found = 0;
306
307 if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID ||
308 be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) {
309 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
310
311 return 0;
312 }
313
314
315 while (*p) {
316 parent = *p;
317 tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb);
318
319 if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id)
320 p = &(*p)->rb_left;
321 else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id)
322 p = &(*p)->rb_right;
323 else {
324 found = 1;
325 break;
326 }
327 }
328
329 if (found)
330 av = tmp_av;
331 else {
332 ubi_err("orphaned volume in fastmap pool!");
333 return UBI_BAD_FASTMAP;
334 }
335
336 ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id);
337
338 return update_vol(ubi, ai, av, new_vh, new_aeb);
339}
340
341
342
343
344
345
346
347
348
349static void unmap_peb(struct ubi_attach_info *ai, int pnum)
350{
351 struct ubi_ainf_volume *av;
352 struct rb_node *node, *node2;
353 struct ubi_ainf_peb *aeb;
354
355 for (node = rb_first(&ai->volumes); node; node = rb_next(node)) {
356 av = rb_entry(node, struct ubi_ainf_volume, rb);
357
358 for (node2 = rb_first(&av->root); node2;
359 node2 = rb_next(node2)) {
360 aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
361 if (aeb->pnum == pnum) {
362 rb_erase(&aeb->u.rb, &av->root);
363 kmem_cache_free(ai->aeb_slab_cache, aeb);
364 return;
365 }
366 }
367 }
368}
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
384 int *pebs, int pool_size, unsigned long long *max_sqnum,
385 struct list_head *eba_orphans, struct list_head *free)
386{
387 struct ubi_vid_hdr *vh;
388 struct ubi_ec_hdr *ech;
389 struct ubi_ainf_peb *new_aeb, *tmp_aeb;
390 int i, pnum, err, found_orphan, ret = 0;
391
392 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
393 if (!ech)
394 return -ENOMEM;
395
396 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
397 if (!vh) {
398 kfree(ech);
399 return -ENOMEM;
400 }
401
402 dbg_bld("scanning fastmap pool: size = %i", pool_size);
403
404
405
406
407
408 for (i = 0; i < pool_size; i++) {
409 int scrub = 0;
410
411 pnum = be32_to_cpu(pebs[i]);
412
413 if (ubi_io_is_bad(ubi, pnum)) {
414 ubi_err("bad PEB in fastmap pool!");
415 ret = UBI_BAD_FASTMAP;
416 goto out;
417 }
418
419 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
420 if (err && err != UBI_IO_BITFLIPS) {
421 ubi_err("unable to read EC header! PEB:%i err:%i",
422 pnum, err);
423 ret = err > 0 ? UBI_BAD_FASTMAP : err;
424 goto out;
425 } else if (ret == UBI_IO_BITFLIPS)
426 scrub = 1;
427
428 if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
429 ubi_err("bad image seq: 0x%x, expected: 0x%x",
430 be32_to_cpu(ech->image_seq), ubi->image_seq);
431 err = UBI_BAD_FASTMAP;
432 goto out;
433 }
434
435 err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
436 if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
437 unsigned long long ec = be64_to_cpu(ech->ec);
438 unmap_peb(ai, pnum);
439 dbg_bld("Adding PEB to free: %i", pnum);
440 if (err == UBI_IO_FF_BITFLIPS)
441 add_aeb(ai, free, pnum, ec, 1);
442 else
443 add_aeb(ai, free, pnum, ec, 0);
444 continue;
445 } else if (err == 0 || err == UBI_IO_BITFLIPS) {
446 dbg_bld("Found non empty PEB:%i in pool", pnum);
447
448 if (err == UBI_IO_BITFLIPS)
449 scrub = 1;
450
451 found_orphan = 0;
452 list_for_each_entry(tmp_aeb, eba_orphans, u.list) {
453 if (tmp_aeb->pnum == pnum) {
454 found_orphan = 1;
455 break;
456 }
457 }
458 if (found_orphan) {
459 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
460 list_del(&tmp_aeb->u.list);
461 }
462
463 new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
464 GFP_KERNEL);
465 if (!new_aeb) {
466 ret = -ENOMEM;
467 goto out;
468 }
469
470 new_aeb->ec = be64_to_cpu(ech->ec);
471 new_aeb->pnum = pnum;
472 new_aeb->lnum = be32_to_cpu(vh->lnum);
473 new_aeb->sqnum = be64_to_cpu(vh->sqnum);
474 new_aeb->copy_flag = vh->copy_flag;
475 new_aeb->scrub = scrub;
476
477 if (*max_sqnum < new_aeb->sqnum)
478 *max_sqnum = new_aeb->sqnum;
479
480 err = process_pool_aeb(ubi, ai, vh, new_aeb);
481 if (err) {
482 ret = err > 0 ? UBI_BAD_FASTMAP : err;
483 goto out;
484 }
485 } else {
486
487 ubi_err("fastmap pool PEBs contains damaged PEBs!");
488 ret = err > 0 ? UBI_BAD_FASTMAP : err;
489 goto out;
490 }
491
492 }
493
494out:
495 ubi_free_vid_hdr(ubi, vh);
496 kfree(ech);
497 return ret;
498}
499
500
501
502
503
504static int count_fastmap_pebs(struct ubi_attach_info *ai)
505{
506 struct ubi_ainf_peb *aeb;
507 struct ubi_ainf_volume *av;
508 struct rb_node *rb1, *rb2;
509 int n = 0;
510
511 list_for_each_entry(aeb, &ai->erase, u.list)
512 n++;
513
514 list_for_each_entry(aeb, &ai->free, u.list)
515 n++;
516
517 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
518 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
519 n++;
520
521 return n;
522}
523
524
525
526
527
528
529
530
531
532
533static int ubi_attach_fastmap(struct ubi_device *ubi,
534 struct ubi_attach_info *ai,
535 struct ubi_fastmap_layout *fm)
536{
537 struct list_head used, eba_orphans, free;
538 struct ubi_ainf_volume *av;
539 struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
540 struct ubi_ec_hdr *ech;
541 struct ubi_fm_sb *fmsb;
542 struct ubi_fm_hdr *fmhdr;
543 struct ubi_fm_scan_pool *fmpl1, *fmpl2;
544 struct ubi_fm_ec *fmec;
545 struct ubi_fm_volhdr *fmvhdr;
546 struct ubi_fm_eba *fm_eba;
547 int ret, i, j, pool_size, wl_pool_size;
548 size_t fm_pos = 0, fm_size = ubi->fm_size;
549 unsigned long long max_sqnum = 0;
550 void *fm_raw = ubi->fm_buf;
551
552 INIT_LIST_HEAD(&used);
553 INIT_LIST_HEAD(&free);
554 INIT_LIST_HEAD(&eba_orphans);
555 INIT_LIST_HEAD(&ai->corr);
556 INIT_LIST_HEAD(&ai->free);
557 INIT_LIST_HEAD(&ai->erase);
558 INIT_LIST_HEAD(&ai->alien);
559 ai->volumes = RB_ROOT;
560 ai->min_ec = UBI_MAX_ERASECOUNTER;
561
562 ai->aeb_slab_cache = kmem_cache_create("ubi_ainf_peb_slab",
563 sizeof(struct ubi_ainf_peb),
564 0, 0, NULL);
565 if (!ai->aeb_slab_cache) {
566 ret = -ENOMEM;
567 goto fail;
568 }
569
570 fmsb = (struct ubi_fm_sb *)(fm_raw);
571 ai->max_sqnum = fmsb->sqnum;
572 fm_pos += sizeof(struct ubi_fm_sb);
573 if (fm_pos >= fm_size)
574 goto fail_bad;
575
576 fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
577 fm_pos += sizeof(*fmhdr);
578 if (fm_pos >= fm_size)
579 goto fail_bad;
580
581 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
582 ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x",
583 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
584 goto fail_bad;
585 }
586
587 fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
588 fm_pos += sizeof(*fmpl1);
589 if (fm_pos >= fm_size)
590 goto fail_bad;
591 if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
592 ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
593 be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC);
594 goto fail_bad;
595 }
596
597 fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
598 fm_pos += sizeof(*fmpl2);
599 if (fm_pos >= fm_size)
600 goto fail_bad;
601 if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
602 ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
603 be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC);
604 goto fail_bad;
605 }
606
607 pool_size = be16_to_cpu(fmpl1->size);
608 wl_pool_size = be16_to_cpu(fmpl2->size);
609 fm->max_pool_size = be16_to_cpu(fmpl1->max_size);
610 fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
611
612 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
613 ubi_err("bad pool size: %i", pool_size);
614 goto fail_bad;
615 }
616
617 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
618 ubi_err("bad WL pool size: %i", wl_pool_size);
619 goto fail_bad;
620 }
621
622
623 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
624 fm->max_pool_size < 0) {
625 ubi_err("bad maximal pool size: %i", fm->max_pool_size);
626 goto fail_bad;
627 }
628
629 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
630 fm->max_wl_pool_size < 0) {
631 ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size);
632 goto fail_bad;
633 }
634
635
636 for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
637 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
638 fm_pos += sizeof(*fmec);
639 if (fm_pos >= fm_size)
640 goto fail_bad;
641
642 add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
643 be32_to_cpu(fmec->ec), 0);
644 }
645
646
647 for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
648 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
649 fm_pos += sizeof(*fmec);
650 if (fm_pos >= fm_size)
651 goto fail_bad;
652
653 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
654 be32_to_cpu(fmec->ec), 0);
655 }
656
657
658 for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
659 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
660 fm_pos += sizeof(*fmec);
661 if (fm_pos >= fm_size)
662 goto fail_bad;
663
664 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
665 be32_to_cpu(fmec->ec), 1);
666 }
667
668
669 for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
670 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
671 fm_pos += sizeof(*fmec);
672 if (fm_pos >= fm_size)
673 goto fail_bad;
674
675 add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
676 be32_to_cpu(fmec->ec), 1);
677 }
678
679 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
680 ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
681
682
683 for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
684 fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
685 fm_pos += sizeof(*fmvhdr);
686 if (fm_pos >= fm_size)
687 goto fail_bad;
688
689 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
690 ubi_err("bad fastmap vol header magic: 0x%x, " \
691 "expected: 0x%x",
692 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
693 goto fail_bad;
694 }
695
696 av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
697 be32_to_cpu(fmvhdr->used_ebs),
698 be32_to_cpu(fmvhdr->data_pad),
699 fmvhdr->vol_type,
700 be32_to_cpu(fmvhdr->last_eb_bytes));
701
702 if (!av)
703 goto fail_bad;
704
705 ai->vols_found++;
706 if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
707 ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
708
709 fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
710 fm_pos += sizeof(*fm_eba);
711 fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
712 if (fm_pos >= fm_size)
713 goto fail_bad;
714
715 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
716 ubi_err("bad fastmap EBA header magic: 0x%x, " \
717 "expected: 0x%x",
718 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
719 goto fail_bad;
720 }
721
722 for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
723 int pnum = be32_to_cpu(fm_eba->pnum[j]);
724
725 if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
726 continue;
727
728 aeb = NULL;
729 list_for_each_entry(tmp_aeb, &used, u.list) {
730 if (tmp_aeb->pnum == pnum)
731 aeb = tmp_aeb;
732 }
733
734
735
736
737
738
739
740
741 if (!aeb) {
742 aeb = kmem_cache_alloc(ai->aeb_slab_cache,
743 GFP_KERNEL);
744 if (!aeb) {
745 ret = -ENOMEM;
746
747 goto fail;
748 }
749
750 aeb->lnum = j;
751 aeb->pnum = be32_to_cpu(fm_eba->pnum[j]);
752 aeb->ec = -1;
753 aeb->scrub = aeb->copy_flag = aeb->sqnum = 0;
754 list_add_tail(&aeb->u.list, &eba_orphans);
755 continue;
756 }
757
758 aeb->lnum = j;
759
760 if (av->highest_lnum <= aeb->lnum)
761 av->highest_lnum = aeb->lnum;
762
763 assign_aeb_to_av(ai, aeb, av);
764
765 dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
766 aeb->pnum, aeb->lnum, av->vol_id);
767 }
768
769 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
770 if (!ech) {
771 ret = -ENOMEM;
772 goto fail;
773 }
774
775 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans,
776 u.list) {
777 int err;
778
779 if (ubi_io_is_bad(ubi, tmp_aeb->pnum)) {
780 ubi_err("bad PEB in fastmap EBA orphan list");
781 ret = UBI_BAD_FASTMAP;
782 kfree(ech);
783 goto fail;
784 }
785
786 err = ubi_io_read_ec_hdr(ubi, tmp_aeb->pnum, ech, 0);
787 if (err && err != UBI_IO_BITFLIPS) {
788 ubi_err("unable to read EC header! PEB:%i " \
789 "err:%i", tmp_aeb->pnum, err);
790 ret = err > 0 ? UBI_BAD_FASTMAP : err;
791 kfree(ech);
792
793 goto fail;
794 } else if (err == UBI_IO_BITFLIPS)
795 tmp_aeb->scrub = 1;
796
797 tmp_aeb->ec = be64_to_cpu(ech->ec);
798 assign_aeb_to_av(ai, tmp_aeb, av);
799 }
800
801 kfree(ech);
802 }
803
804 ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum,
805 &eba_orphans, &free);
806 if (ret)
807 goto fail;
808
809 ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum,
810 &eba_orphans, &free);
811 if (ret)
812 goto fail;
813
814 if (max_sqnum > ai->max_sqnum)
815 ai->max_sqnum = max_sqnum;
816
817 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
818 list_move_tail(&tmp_aeb->u.list, &ai->free);
819
820
821
822
823
824
825
826 if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
827 ai->bad_peb_count - fm->used_blocks))
828 goto fail_bad;
829
830 return 0;
831
832fail_bad:
833 ret = UBI_BAD_FASTMAP;
834fail:
835 return ret;
836}
837
838
839
840
841
842
843
844
845
846
847
848int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
849 int fm_anchor)
850{
851 struct ubi_fm_sb *fmsb, *fmsb2;
852 struct ubi_vid_hdr *vh;
853 struct ubi_ec_hdr *ech;
854 struct ubi_fastmap_layout *fm;
855 int i, used_blocks, pnum, ret = 0;
856 size_t fm_size;
857 __be32 crc, tmp_crc;
858 unsigned long long sqnum = 0;
859
860 mutex_lock(&ubi->fm_mutex);
861 memset(ubi->fm_buf, 0, ubi->fm_size);
862
863 fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
864 if (!fmsb) {
865 ret = -ENOMEM;
866 goto out;
867 }
868
869 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
870 if (!fm) {
871 ret = -ENOMEM;
872 kfree(fmsb);
873 goto out;
874 }
875
876 ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
877 if (ret && ret != UBI_IO_BITFLIPS)
878 goto free_fm_sb;
879 else if (ret == UBI_IO_BITFLIPS)
880 fm->to_be_tortured[0] = 1;
881
882 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
883 ubi_err("bad super block magic: 0x%x, expected: 0x%x",
884 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
885 ret = UBI_BAD_FASTMAP;
886 goto free_fm_sb;
887 }
888
889 if (fmsb->version != UBI_FM_FMT_VERSION) {
890 ubi_err("bad fastmap version: %i, expected: %i",
891 fmsb->version, UBI_FM_FMT_VERSION);
892 ret = UBI_BAD_FASTMAP;
893 goto free_fm_sb;
894 }
895
896 used_blocks = be32_to_cpu(fmsb->used_blocks);
897 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
898 ubi_err("number of fastmap blocks is invalid: %i", used_blocks);
899 ret = UBI_BAD_FASTMAP;
900 goto free_fm_sb;
901 }
902
903 fm_size = ubi->leb_size * used_blocks;
904 if (fm_size != ubi->fm_size) {
905 ubi_err("bad fastmap size: %zi, expected: %zi", fm_size,
906 ubi->fm_size);
907 ret = UBI_BAD_FASTMAP;
908 goto free_fm_sb;
909 }
910
911 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
912 if (!ech) {
913 ret = -ENOMEM;
914 goto free_fm_sb;
915 }
916
917 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
918 if (!vh) {
919 ret = -ENOMEM;
920 goto free_hdr;
921 }
922
923 for (i = 0; i < used_blocks; i++) {
924 pnum = be32_to_cpu(fmsb->block_loc[i]);
925
926 if (ubi_io_is_bad(ubi, pnum)) {
927 ret = UBI_BAD_FASTMAP;
928 goto free_hdr;
929 }
930
931 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
932 if (ret && ret != UBI_IO_BITFLIPS) {
933 ubi_err("unable to read fastmap block# %i EC (PEB: %i)",
934 i, pnum);
935 if (ret > 0)
936 ret = UBI_BAD_FASTMAP;
937 goto free_hdr;
938 } else if (ret == UBI_IO_BITFLIPS)
939 fm->to_be_tortured[i] = 1;
940
941 if (!ubi->image_seq)
942 ubi->image_seq = be32_to_cpu(ech->image_seq);
943
944 if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
945 ret = UBI_BAD_FASTMAP;
946 goto free_hdr;
947 }
948
949 ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
950 if (ret && ret != UBI_IO_BITFLIPS) {
951 ubi_err("unable to read fastmap block# %i (PEB: %i)",
952 i, pnum);
953 goto free_hdr;
954 }
955
956 if (i == 0) {
957 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
958 ubi_err("bad fastmap anchor vol_id: 0x%x," \
959 " expected: 0x%x",
960 be32_to_cpu(vh->vol_id),
961 UBI_FM_SB_VOLUME_ID);
962 ret = UBI_BAD_FASTMAP;
963 goto free_hdr;
964 }
965 } else {
966 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
967 ubi_err("bad fastmap data vol_id: 0x%x," \
968 " expected: 0x%x",
969 be32_to_cpu(vh->vol_id),
970 UBI_FM_DATA_VOLUME_ID);
971 ret = UBI_BAD_FASTMAP;
972 goto free_hdr;
973 }
974 }
975
976 if (sqnum < be64_to_cpu(vh->sqnum))
977 sqnum = be64_to_cpu(vh->sqnum);
978
979 ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
980 ubi->leb_start, ubi->leb_size);
981 if (ret && ret != UBI_IO_BITFLIPS) {
982 ubi_err("unable to read fastmap block# %i (PEB: %i, " \
983 "err: %i)", i, pnum, ret);
984 goto free_hdr;
985 }
986 }
987
988 kfree(fmsb);
989 fmsb = NULL;
990
991 fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
992 tmp_crc = be32_to_cpu(fmsb2->data_crc);
993 fmsb2->data_crc = 0;
994 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
995 if (crc != tmp_crc) {
996 ubi_err("fastmap data CRC is invalid");
997 ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc);
998 ret = UBI_BAD_FASTMAP;
999 goto free_hdr;
1000 }
1001
1002 fmsb2->sqnum = sqnum;
1003
1004 fm->used_blocks = used_blocks;
1005
1006 ret = ubi_attach_fastmap(ubi, ai, fm);
1007 if (ret) {
1008 if (ret > 0)
1009 ret = UBI_BAD_FASTMAP;
1010 goto free_hdr;
1011 }
1012
1013 for (i = 0; i < used_blocks; i++) {
1014 struct ubi_wl_entry *e;
1015
1016 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1017 if (!e) {
1018 while (i--)
1019 kfree(fm->e[i]);
1020
1021 ret = -ENOMEM;
1022 goto free_hdr;
1023 }
1024
1025 e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
1026 e->ec = be32_to_cpu(fmsb2->block_ec[i]);
1027 fm->e[i] = e;
1028 }
1029
1030 ubi->fm = fm;
1031 ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1032 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
1033 ubi_msg("attached by fastmap");
1034 ubi_msg("fastmap pool size: %d", ubi->fm_pool.max_size);
1035 ubi_msg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
1036 ubi->fm_disabled = 0;
1037
1038 ubi_free_vid_hdr(ubi, vh);
1039 kfree(ech);
1040out:
1041 mutex_unlock(&ubi->fm_mutex);
1042 if (ret == UBI_BAD_FASTMAP)
1043 ubi_err("Attach by fastmap failed, doing a full scan!");
1044 return ret;
1045
1046free_hdr:
1047 ubi_free_vid_hdr(ubi, vh);
1048 kfree(ech);
1049free_fm_sb:
1050 kfree(fmsb);
1051 kfree(fm);
1052 goto out;
1053}
1054
1055
1056
1057
1058
1059
1060
1061
1062static int ubi_write_fastmap(struct ubi_device *ubi,
1063 struct ubi_fastmap_layout *new_fm)
1064{
1065 size_t fm_pos = 0;
1066 void *fm_raw;
1067 struct ubi_fm_sb *fmsb;
1068 struct ubi_fm_hdr *fmh;
1069 struct ubi_fm_scan_pool *fmpl1, *fmpl2;
1070 struct ubi_fm_ec *fec;
1071 struct ubi_fm_volhdr *fvh;
1072 struct ubi_fm_eba *feba;
1073 struct rb_node *node;
1074 struct ubi_wl_entry *wl_e;
1075 struct ubi_volume *vol;
1076 struct ubi_vid_hdr *avhdr, *dvhdr;
1077 struct ubi_work *ubi_wrk;
1078 int ret, i, j, free_peb_count, used_peb_count, vol_count;
1079 int scrub_peb_count, erase_peb_count;
1080
1081 fm_raw = ubi->fm_buf;
1082 memset(ubi->fm_buf, 0, ubi->fm_size);
1083
1084 avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1085 if (!avhdr) {
1086 ret = -ENOMEM;
1087 goto out;
1088 }
1089
1090 dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
1091 if (!dvhdr) {
1092 ret = -ENOMEM;
1093 goto out_kfree;
1094 }
1095
1096 spin_lock(&ubi->volumes_lock);
1097 spin_lock(&ubi->wl_lock);
1098
1099 fmsb = (struct ubi_fm_sb *)fm_raw;
1100 fm_pos += sizeof(*fmsb);
1101 ubi_assert(fm_pos <= ubi->fm_size);
1102
1103 fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1104 fm_pos += sizeof(*fmh);
1105 ubi_assert(fm_pos <= ubi->fm_size);
1106
1107 fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
1108 fmsb->version = UBI_FM_FMT_VERSION;
1109 fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1110
1111 fmsb->sqnum = 0;
1112
1113 fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
1114 free_peb_count = 0;
1115 used_peb_count = 0;
1116 scrub_peb_count = 0;
1117 erase_peb_count = 0;
1118 vol_count = 0;
1119
1120 fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1121 fm_pos += sizeof(*fmpl1);
1122 fmpl1->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1123 fmpl1->size = cpu_to_be16(ubi->fm_pool.size);
1124 fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size);
1125
1126 for (i = 0; i < ubi->fm_pool.size; i++)
1127 fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
1128
1129 fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1130 fm_pos += sizeof(*fmpl2);
1131 fmpl2->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1132 fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size);
1133 fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
1134
1135 for (i = 0; i < ubi->fm_wl_pool.size; i++)
1136 fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
1137
1138 for (node = rb_first(&ubi->free); node; node = rb_next(node)) {
1139 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1140 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1141
1142 fec->pnum = cpu_to_be32(wl_e->pnum);
1143 fec->ec = cpu_to_be32(wl_e->ec);
1144
1145 free_peb_count++;
1146 fm_pos += sizeof(*fec);
1147 ubi_assert(fm_pos <= ubi->fm_size);
1148 }
1149 fmh->free_peb_count = cpu_to_be32(free_peb_count);
1150
1151 for (node = rb_first(&ubi->used); node; node = rb_next(node)) {
1152 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1153 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1154
1155 fec->pnum = cpu_to_be32(wl_e->pnum);
1156 fec->ec = cpu_to_be32(wl_e->ec);
1157
1158 used_peb_count++;
1159 fm_pos += sizeof(*fec);
1160 ubi_assert(fm_pos <= ubi->fm_size);
1161 }
1162 fmh->used_peb_count = cpu_to_be32(used_peb_count);
1163
1164 for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) {
1165 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1166 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1167
1168 fec->pnum = cpu_to_be32(wl_e->pnum);
1169 fec->ec = cpu_to_be32(wl_e->ec);
1170
1171 scrub_peb_count++;
1172 fm_pos += sizeof(*fec);
1173 ubi_assert(fm_pos <= ubi->fm_size);
1174 }
1175 fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1176
1177
1178 list_for_each_entry(ubi_wrk, &ubi->works, list) {
1179 if (ubi_is_erase_work(ubi_wrk)) {
1180 wl_e = ubi_wrk->e;
1181 ubi_assert(wl_e);
1182
1183 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1184
1185 fec->pnum = cpu_to_be32(wl_e->pnum);
1186 fec->ec = cpu_to_be32(wl_e->ec);
1187
1188 erase_peb_count++;
1189 fm_pos += sizeof(*fec);
1190 ubi_assert(fm_pos <= ubi->fm_size);
1191 }
1192 }
1193 fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1194
1195 for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1196 vol = ubi->volumes[i];
1197
1198 if (!vol)
1199 continue;
1200
1201 vol_count++;
1202
1203 fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1204 fm_pos += sizeof(*fvh);
1205 ubi_assert(fm_pos <= ubi->fm_size);
1206
1207 fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
1208 fvh->vol_id = cpu_to_be32(vol->vol_id);
1209 fvh->vol_type = vol->vol_type;
1210 fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1211 fvh->data_pad = cpu_to_be32(vol->data_pad);
1212 fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
1213
1214 ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
1215 vol->vol_type == UBI_STATIC_VOLUME);
1216
1217 feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1218 fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1219 ubi_assert(fm_pos <= ubi->fm_size);
1220
1221 for (j = 0; j < vol->reserved_pebs; j++)
1222 feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
1223
1224 feba->reserved_pebs = cpu_to_be32(j);
1225 feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
1226 }
1227 fmh->vol_count = cpu_to_be32(vol_count);
1228 fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
1229
1230 avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1231 avhdr->lnum = 0;
1232
1233 spin_unlock(&ubi->wl_lock);
1234 spin_unlock(&ubi->volumes_lock);
1235
1236 dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1237 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
1238 if (ret) {
1239 ubi_err("unable to write vid_hdr to fastmap SB!");
1240 goto out_kfree;
1241 }
1242
1243 for (i = 0; i < new_fm->used_blocks; i++) {
1244 fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
1245 fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1246 }
1247
1248 fmsb->data_crc = 0;
1249 fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1250 ubi->fm_size));
1251
1252 for (i = 1; i < new_fm->used_blocks; i++) {
1253 dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1254 dvhdr->lnum = cpu_to_be32(i);
1255 dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1256 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1257 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
1258 if (ret) {
1259 ubi_err("unable to write vid_hdr to PEB %i!",
1260 new_fm->e[i]->pnum);
1261 goto out_kfree;
1262 }
1263 }
1264
1265 for (i = 0; i < new_fm->used_blocks; i++) {
1266 ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
1267 new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
1268 if (ret) {
1269 ubi_err("unable to write fastmap to PEB %i!",
1270 new_fm->e[i]->pnum);
1271 goto out_kfree;
1272 }
1273 }
1274
1275 ubi_assert(new_fm);
1276 ubi->fm = new_fm;
1277
1278 dbg_bld("fastmap written!");
1279
1280out_kfree:
1281 ubi_free_vid_hdr(ubi, avhdr);
1282 ubi_free_vid_hdr(ubi, dvhdr);
1283out:
1284 return ret;
1285}
1286
1287
1288
1289
1290
1291
1292
1293
1294static int erase_block(struct ubi_device *ubi, int pnum)
1295{
1296 int ret;
1297 struct ubi_ec_hdr *ec_hdr;
1298 long long ec;
1299
1300 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1301 if (!ec_hdr)
1302 return -ENOMEM;
1303
1304 ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1305 if (ret < 0)
1306 goto out;
1307 else if (ret && ret != UBI_IO_BITFLIPS) {
1308 ret = -EINVAL;
1309 goto out;
1310 }
1311
1312 ret = ubi_io_sync_erase(ubi, pnum, 0);
1313 if (ret < 0)
1314 goto out;
1315
1316 ec = be64_to_cpu(ec_hdr->ec);
1317 ec += ret;
1318 if (ec > UBI_MAX_ERASECOUNTER) {
1319 ret = -EINVAL;
1320 goto out;
1321 }
1322
1323 ec_hdr->ec = cpu_to_be64(ec);
1324 ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
1325 if (ret < 0)
1326 goto out;
1327
1328 ret = ec;
1329out:
1330 kfree(ec_hdr);
1331 return ret;
1332}
1333
1334
1335
1336
1337
1338
1339
1340
1341static int invalidate_fastmap(struct ubi_device *ubi,
1342 struct ubi_fastmap_layout *fm)
1343{
1344 int ret, i;
1345 struct ubi_vid_hdr *vh;
1346
1347 ret = erase_block(ubi, fm->e[0]->pnum);
1348 if (ret < 0)
1349 return ret;
1350
1351 vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1352 if (!vh)
1353 return -ENOMEM;
1354
1355
1356
1357
1358 vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1359 ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh);
1360
1361 for (i = 0; i < fm->used_blocks; i++)
1362 ubi_wl_put_fm_peb(ubi, fm->e[i], i, fm->to_be_tortured[i]);
1363
1364 return ret;
1365}
1366
1367
1368
1369
1370
1371
1372
1373
1374int ubi_update_fastmap(struct ubi_device *ubi)
1375{
1376 int ret, i;
1377 struct ubi_fastmap_layout *new_fm, *old_fm;
1378 struct ubi_wl_entry *tmp_e;
1379
1380 mutex_lock(&ubi->fm_mutex);
1381
1382 ubi_refill_pools(ubi);
1383
1384 if (ubi->ro_mode || ubi->fm_disabled) {
1385 mutex_unlock(&ubi->fm_mutex);
1386 return 0;
1387 }
1388
1389 ret = ubi_ensure_anchor_pebs(ubi);
1390 if (ret) {
1391 mutex_unlock(&ubi->fm_mutex);
1392 return ret;
1393 }
1394
1395 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1396 if (!new_fm) {
1397 mutex_unlock(&ubi->fm_mutex);
1398 return -ENOMEM;
1399 }
1400
1401 new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
1402
1403 for (i = 0; i < new_fm->used_blocks; i++) {
1404 new_fm->e[i] = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1405 if (!new_fm->e[i]) {
1406 while (i--)
1407 kfree(new_fm->e[i]);
1408
1409 kfree(new_fm);
1410 mutex_unlock(&ubi->fm_mutex);
1411 return -ENOMEM;
1412 }
1413 }
1414
1415 old_fm = ubi->fm;
1416 ubi->fm = NULL;
1417
1418 if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
1419 ubi_err("fastmap too large");
1420 ret = -ENOSPC;
1421 goto err;
1422 }
1423
1424 for (i = 1; i < new_fm->used_blocks; i++) {
1425 spin_lock(&ubi->wl_lock);
1426 tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1427 spin_unlock(&ubi->wl_lock);
1428
1429 if (!tmp_e && !old_fm) {
1430 int j;
1431 ubi_err("could not get any free erase block");
1432
1433 for (j = 1; j < i; j++)
1434 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1435
1436 ret = -ENOSPC;
1437 goto err;
1438 } else if (!tmp_e && old_fm) {
1439 ret = erase_block(ubi, old_fm->e[i]->pnum);
1440 if (ret < 0) {
1441 int j;
1442
1443 for (j = 1; j < i; j++)
1444 ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1445 j, 0);
1446
1447 ubi_err("could not erase old fastmap PEB");
1448 goto err;
1449 }
1450
1451 new_fm->e[i]->pnum = old_fm->e[i]->pnum;
1452 new_fm->e[i]->ec = old_fm->e[i]->ec;
1453 } else {
1454 new_fm->e[i]->pnum = tmp_e->pnum;
1455 new_fm->e[i]->ec = tmp_e->ec;
1456
1457 if (old_fm)
1458 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1459 old_fm->to_be_tortured[i]);
1460 }
1461 }
1462
1463 spin_lock(&ubi->wl_lock);
1464 tmp_e = ubi_wl_get_fm_peb(ubi, 1);
1465 spin_unlock(&ubi->wl_lock);
1466
1467 if (old_fm) {
1468
1469 if (!tmp_e) {
1470 ret = erase_block(ubi, old_fm->e[0]->pnum);
1471 if (ret < 0) {
1472 int i;
1473 ubi_err("could not erase old anchor PEB");
1474
1475 for (i = 1; i < new_fm->used_blocks; i++)
1476 ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1477 i, 0);
1478 goto err;
1479 }
1480
1481 new_fm->e[0]->pnum = old_fm->e[0]->pnum;
1482 new_fm->e[0]->ec = ret;
1483 } else {
1484
1485 ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1486 old_fm->to_be_tortured[0]);
1487
1488 new_fm->e[0]->pnum = tmp_e->pnum;
1489 new_fm->e[0]->ec = tmp_e->ec;
1490 }
1491 } else {
1492 if (!tmp_e) {
1493 int i;
1494 ubi_err("could not find any anchor PEB");
1495
1496 for (i = 1; i < new_fm->used_blocks; i++)
1497 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
1498
1499 ret = -ENOSPC;
1500 goto err;
1501 }
1502
1503 new_fm->e[0]->pnum = tmp_e->pnum;
1504 new_fm->e[0]->ec = tmp_e->ec;
1505 }
1506
1507 down_write(&ubi->work_sem);
1508 down_write(&ubi->fm_sem);
1509 ret = ubi_write_fastmap(ubi, new_fm);
1510 up_write(&ubi->fm_sem);
1511 up_write(&ubi->work_sem);
1512
1513 if (ret)
1514 goto err;
1515
1516out_unlock:
1517 mutex_unlock(&ubi->fm_mutex);
1518 kfree(old_fm);
1519 return ret;
1520
1521err:
1522 kfree(new_fm);
1523
1524 ubi_warn("Unable to write new fastmap, err=%i", ret);
1525
1526 ret = 0;
1527 if (old_fm) {
1528 ret = invalidate_fastmap(ubi, old_fm);
1529 if (ret < 0)
1530 ubi_err("Unable to invalidiate current fastmap!");
1531 else if (ret)
1532 ret = 0;
1533 }
1534 goto out_unlock;
1535}
1536