1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#ifndef __UBOOT__
17#include <log.h>
18#include <dm/devres.h>
19#include <linux/crc16.h>
20#include <linux/slab.h>
21#include <linux/random.h>
22#else
23#include <linux/bitops.h>
24#include <linux/compat.h>
25#include <linux/err.h>
26#include "crc16.h"
27#endif
28#include "ubifs.h"
29
30#ifndef __UBOOT__
31static int dbg_populate_lsave(struct ubifs_info *c);
32#endif
33
34
35
36
37
38
39
40
41static struct ubifs_cnode *first_dirty_cnode(struct ubifs_nnode *nnode)
42{
43 ubifs_assert(nnode);
44 while (1) {
45 int i, cont = 0;
46
47 for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
48 struct ubifs_cnode *cnode;
49
50 cnode = nnode->nbranch[i].cnode;
51 if (cnode &&
52 test_bit(DIRTY_CNODE, &cnode->flags)) {
53 if (cnode->level == 0)
54 return cnode;
55 nnode = (struct ubifs_nnode *)cnode;
56 cont = 1;
57 break;
58 }
59 }
60 if (!cont)
61 return (struct ubifs_cnode *)nnode;
62 }
63}
64
65
66
67
68
69
70
71static struct ubifs_cnode *next_dirty_cnode(struct ubifs_cnode *cnode)
72{
73 struct ubifs_nnode *nnode;
74 int i;
75
76 ubifs_assert(cnode);
77 nnode = cnode->parent;
78 if (!nnode)
79 return NULL;
80 for (i = cnode->iip + 1; i < UBIFS_LPT_FANOUT; i++) {
81 cnode = nnode->nbranch[i].cnode;
82 if (cnode && test_bit(DIRTY_CNODE, &cnode->flags)) {
83 if (cnode->level == 0)
84 return cnode;
85
86 return first_dirty_cnode((struct ubifs_nnode *)cnode);
87 }
88 }
89 return (struct ubifs_cnode *)nnode;
90}
91
92
93
94
95
96
97
98static int get_cnodes_to_commit(struct ubifs_info *c)
99{
100 struct ubifs_cnode *cnode, *cnext;
101 int cnt = 0;
102
103 if (!c->nroot)
104 return 0;
105
106 if (!test_bit(DIRTY_CNODE, &c->nroot->flags))
107 return 0;
108
109 c->lpt_cnext = first_dirty_cnode(c->nroot);
110 cnode = c->lpt_cnext;
111 if (!cnode)
112 return 0;
113 cnt += 1;
114 while (1) {
115 ubifs_assert(!test_bit(COW_CNODE, &cnode->flags));
116 __set_bit(COW_CNODE, &cnode->flags);
117 cnext = next_dirty_cnode(cnode);
118 if (!cnext) {
119 cnode->cnext = c->lpt_cnext;
120 break;
121 }
122 cnode->cnext = cnext;
123 cnode = cnext;
124 cnt += 1;
125 }
126 dbg_cmt("committing %d cnodes", cnt);
127 dbg_lp("committing %d cnodes", cnt);
128 ubifs_assert(cnt == c->dirty_nn_cnt + c->dirty_pn_cnt);
129 return cnt;
130}
131
132
133
134
135
136
137
138
139static void upd_ltab(struct ubifs_info *c, int lnum, int free, int dirty)
140{
141 dbg_lp("LEB %d free %d dirty %d to %d +%d",
142 lnum, c->ltab[lnum - c->lpt_first].free,
143 c->ltab[lnum - c->lpt_first].dirty, free, dirty);
144 ubifs_assert(lnum >= c->lpt_first && lnum <= c->lpt_last);
145 c->ltab[lnum - c->lpt_first].free = free;
146 c->ltab[lnum - c->lpt_first].dirty += dirty;
147}
148
149
150
151
152
153
154
155
156
157
158
159static int alloc_lpt_leb(struct ubifs_info *c, int *lnum)
160{
161 int i, n;
162
163 n = *lnum - c->lpt_first + 1;
164 for (i = n; i < c->lpt_lebs; i++) {
165 if (c->ltab[i].tgc || c->ltab[i].cmt)
166 continue;
167 if (c->ltab[i].free == c->leb_size) {
168 c->ltab[i].cmt = 1;
169 *lnum = i + c->lpt_first;
170 return 0;
171 }
172 }
173
174 for (i = 0; i < n; i++) {
175 if (c->ltab[i].tgc || c->ltab[i].cmt)
176 continue;
177 if (c->ltab[i].free == c->leb_size) {
178 c->ltab[i].cmt = 1;
179 *lnum = i + c->lpt_first;
180 return 0;
181 }
182 }
183 return -ENOSPC;
184}
185
186
187
188
189
190
191
192static int layout_cnodes(struct ubifs_info *c)
193{
194 int lnum, offs, len, alen, done_lsave, done_ltab, err;
195 struct ubifs_cnode *cnode;
196
197 err = dbg_chk_lpt_sz(c, 0, 0);
198 if (err)
199 return err;
200 cnode = c->lpt_cnext;
201 if (!cnode)
202 return 0;
203 lnum = c->nhead_lnum;
204 offs = c->nhead_offs;
205
206 done_lsave = !c->big_lpt;
207 done_ltab = 0;
208 if (!done_lsave && offs + c->lsave_sz <= c->leb_size) {
209 done_lsave = 1;
210 c->lsave_lnum = lnum;
211 c->lsave_offs = offs;
212 offs += c->lsave_sz;
213 dbg_chk_lpt_sz(c, 1, c->lsave_sz);
214 }
215
216 if (offs + c->ltab_sz <= c->leb_size) {
217 done_ltab = 1;
218 c->ltab_lnum = lnum;
219 c->ltab_offs = offs;
220 offs += c->ltab_sz;
221 dbg_chk_lpt_sz(c, 1, c->ltab_sz);
222 }
223
224 do {
225 if (cnode->level) {
226 len = c->nnode_sz;
227 c->dirty_nn_cnt -= 1;
228 } else {
229 len = c->pnode_sz;
230 c->dirty_pn_cnt -= 1;
231 }
232 while (offs + len > c->leb_size) {
233 alen = ALIGN(offs, c->min_io_size);
234 upd_ltab(c, lnum, c->leb_size - alen, alen - offs);
235 dbg_chk_lpt_sz(c, 2, c->leb_size - offs);
236 err = alloc_lpt_leb(c, &lnum);
237 if (err)
238 goto no_space;
239 offs = 0;
240 ubifs_assert(lnum >= c->lpt_first &&
241 lnum <= c->lpt_last);
242
243 if (!done_lsave) {
244 done_lsave = 1;
245 c->lsave_lnum = lnum;
246 c->lsave_offs = offs;
247 offs += c->lsave_sz;
248 dbg_chk_lpt_sz(c, 1, c->lsave_sz);
249 continue;
250 }
251 if (!done_ltab) {
252 done_ltab = 1;
253 c->ltab_lnum = lnum;
254 c->ltab_offs = offs;
255 offs += c->ltab_sz;
256 dbg_chk_lpt_sz(c, 1, c->ltab_sz);
257 continue;
258 }
259 break;
260 }
261 if (cnode->parent) {
262 cnode->parent->nbranch[cnode->iip].lnum = lnum;
263 cnode->parent->nbranch[cnode->iip].offs = offs;
264 } else {
265 c->lpt_lnum = lnum;
266 c->lpt_offs = offs;
267 }
268 offs += len;
269 dbg_chk_lpt_sz(c, 1, len);
270 cnode = cnode->cnext;
271 } while (cnode && cnode != c->lpt_cnext);
272
273
274 if (!done_lsave) {
275 if (offs + c->lsave_sz > c->leb_size) {
276 alen = ALIGN(offs, c->min_io_size);
277 upd_ltab(c, lnum, c->leb_size - alen, alen - offs);
278 dbg_chk_lpt_sz(c, 2, c->leb_size - offs);
279 err = alloc_lpt_leb(c, &lnum);
280 if (err)
281 goto no_space;
282 offs = 0;
283 ubifs_assert(lnum >= c->lpt_first &&
284 lnum <= c->lpt_last);
285 }
286 done_lsave = 1;
287 c->lsave_lnum = lnum;
288 c->lsave_offs = offs;
289 offs += c->lsave_sz;
290 dbg_chk_lpt_sz(c, 1, c->lsave_sz);
291 }
292
293
294 if (!done_ltab) {
295 if (offs + c->ltab_sz > c->leb_size) {
296 alen = ALIGN(offs, c->min_io_size);
297 upd_ltab(c, lnum, c->leb_size - alen, alen - offs);
298 dbg_chk_lpt_sz(c, 2, c->leb_size - offs);
299 err = alloc_lpt_leb(c, &lnum);
300 if (err)
301 goto no_space;
302 offs = 0;
303 ubifs_assert(lnum >= c->lpt_first &&
304 lnum <= c->lpt_last);
305 }
306 c->ltab_lnum = lnum;
307 c->ltab_offs = offs;
308 offs += c->ltab_sz;
309 dbg_chk_lpt_sz(c, 1, c->ltab_sz);
310 }
311
312 alen = ALIGN(offs, c->min_io_size);
313 upd_ltab(c, lnum, c->leb_size - alen, alen - offs);
314 dbg_chk_lpt_sz(c, 4, alen - offs);
315 err = dbg_chk_lpt_sz(c, 3, alen);
316 if (err)
317 return err;
318 return 0;
319
320no_space:
321 ubifs_err(c, "LPT out of space at LEB %d:%d needing %d, done_ltab %d, done_lsave %d",
322 lnum, offs, len, done_ltab, done_lsave);
323 ubifs_dump_lpt_info(c);
324 ubifs_dump_lpt_lebs(c);
325 dump_stack();
326 return err;
327}
328
329#ifndef __UBOOT__
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344static int realloc_lpt_leb(struct ubifs_info *c, int *lnum)
345{
346 int i, n;
347
348 n = *lnum - c->lpt_first + 1;
349 for (i = n; i < c->lpt_lebs; i++)
350 if (c->ltab[i].cmt) {
351 c->ltab[i].cmt = 0;
352 *lnum = i + c->lpt_first;
353 return 0;
354 }
355
356 for (i = 0; i < n; i++)
357 if (c->ltab[i].cmt) {
358 c->ltab[i].cmt = 0;
359 *lnum = i + c->lpt_first;
360 return 0;
361 }
362 return -ENOSPC;
363}
364
365
366
367
368
369
370
371static int write_cnodes(struct ubifs_info *c)
372{
373 int lnum, offs, len, from, err, wlen, alen, done_ltab, done_lsave;
374 struct ubifs_cnode *cnode;
375 void *buf = c->lpt_buf;
376
377 cnode = c->lpt_cnext;
378 if (!cnode)
379 return 0;
380 lnum = c->nhead_lnum;
381 offs = c->nhead_offs;
382 from = offs;
383
384 if (offs == 0) {
385 err = ubifs_leb_unmap(c, lnum);
386 if (err)
387 return err;
388 }
389
390 done_lsave = !c->big_lpt;
391 done_ltab = 0;
392 if (!done_lsave && offs + c->lsave_sz <= c->leb_size) {
393 done_lsave = 1;
394 ubifs_pack_lsave(c, buf + offs, c->lsave);
395 offs += c->lsave_sz;
396 dbg_chk_lpt_sz(c, 1, c->lsave_sz);
397 }
398
399 if (offs + c->ltab_sz <= c->leb_size) {
400 done_ltab = 1;
401 ubifs_pack_ltab(c, buf + offs, c->ltab_cmt);
402 offs += c->ltab_sz;
403 dbg_chk_lpt_sz(c, 1, c->ltab_sz);
404 }
405
406
407 do {
408 if (cnode->level)
409 len = c->nnode_sz;
410 else
411 len = c->pnode_sz;
412 while (offs + len > c->leb_size) {
413 wlen = offs - from;
414 if (wlen) {
415 alen = ALIGN(wlen, c->min_io_size);
416 memset(buf + offs, 0xff, alen - wlen);
417 err = ubifs_leb_write(c, lnum, buf + from, from,
418 alen);
419 if (err)
420 return err;
421 }
422 dbg_chk_lpt_sz(c, 2, c->leb_size - offs);
423 err = realloc_lpt_leb(c, &lnum);
424 if (err)
425 goto no_space;
426 offs = from = 0;
427 ubifs_assert(lnum >= c->lpt_first &&
428 lnum <= c->lpt_last);
429 err = ubifs_leb_unmap(c, lnum);
430 if (err)
431 return err;
432
433 if (!done_lsave) {
434 done_lsave = 1;
435 ubifs_pack_lsave(c, buf + offs, c->lsave);
436 offs += c->lsave_sz;
437 dbg_chk_lpt_sz(c, 1, c->lsave_sz);
438 continue;
439 }
440 if (!done_ltab) {
441 done_ltab = 1;
442 ubifs_pack_ltab(c, buf + offs, c->ltab_cmt);
443 offs += c->ltab_sz;
444 dbg_chk_lpt_sz(c, 1, c->ltab_sz);
445 continue;
446 }
447 break;
448 }
449 if (cnode->level)
450 ubifs_pack_nnode(c, buf + offs,
451 (struct ubifs_nnode *)cnode);
452 else
453 ubifs_pack_pnode(c, buf + offs,
454 (struct ubifs_pnode *)cnode);
455
456
457
458
459
460
461 clear_bit(DIRTY_CNODE, &cnode->flags);
462 smp_mb__before_atomic();
463 clear_bit(COW_CNODE, &cnode->flags);
464 smp_mb__after_atomic();
465 offs += len;
466 dbg_chk_lpt_sz(c, 1, len);
467 cnode = cnode->cnext;
468 } while (cnode && cnode != c->lpt_cnext);
469
470
471 if (!done_lsave) {
472 if (offs + c->lsave_sz > c->leb_size) {
473 wlen = offs - from;
474 alen = ALIGN(wlen, c->min_io_size);
475 memset(buf + offs, 0xff, alen - wlen);
476 err = ubifs_leb_write(c, lnum, buf + from, from, alen);
477 if (err)
478 return err;
479 dbg_chk_lpt_sz(c, 2, c->leb_size - offs);
480 err = realloc_lpt_leb(c, &lnum);
481 if (err)
482 goto no_space;
483 offs = from = 0;
484 ubifs_assert(lnum >= c->lpt_first &&
485 lnum <= c->lpt_last);
486 err = ubifs_leb_unmap(c, lnum);
487 if (err)
488 return err;
489 }
490 done_lsave = 1;
491 ubifs_pack_lsave(c, buf + offs, c->lsave);
492 offs += c->lsave_sz;
493 dbg_chk_lpt_sz(c, 1, c->lsave_sz);
494 }
495
496
497 if (!done_ltab) {
498 if (offs + c->ltab_sz > c->leb_size) {
499 wlen = offs - from;
500 alen = ALIGN(wlen, c->min_io_size);
501 memset(buf + offs, 0xff, alen - wlen);
502 err = ubifs_leb_write(c, lnum, buf + from, from, alen);
503 if (err)
504 return err;
505 dbg_chk_lpt_sz(c, 2, c->leb_size - offs);
506 err = realloc_lpt_leb(c, &lnum);
507 if (err)
508 goto no_space;
509 offs = from = 0;
510 ubifs_assert(lnum >= c->lpt_first &&
511 lnum <= c->lpt_last);
512 err = ubifs_leb_unmap(c, lnum);
513 if (err)
514 return err;
515 }
516 ubifs_pack_ltab(c, buf + offs, c->ltab_cmt);
517 offs += c->ltab_sz;
518 dbg_chk_lpt_sz(c, 1, c->ltab_sz);
519 }
520
521
522 wlen = offs - from;
523 alen = ALIGN(wlen, c->min_io_size);
524 memset(buf + offs, 0xff, alen - wlen);
525 err = ubifs_leb_write(c, lnum, buf + from, from, alen);
526 if (err)
527 return err;
528
529 dbg_chk_lpt_sz(c, 4, alen - wlen);
530 err = dbg_chk_lpt_sz(c, 3, ALIGN(offs, c->min_io_size));
531 if (err)
532 return err;
533
534 c->nhead_lnum = lnum;
535 c->nhead_offs = ALIGN(offs, c->min_io_size);
536
537 dbg_lp("LPT root is at %d:%d", c->lpt_lnum, c->lpt_offs);
538 dbg_lp("LPT head is at %d:%d", c->nhead_lnum, c->nhead_offs);
539 dbg_lp("LPT ltab is at %d:%d", c->ltab_lnum, c->ltab_offs);
540 if (c->big_lpt)
541 dbg_lp("LPT lsave is at %d:%d", c->lsave_lnum, c->lsave_offs);
542
543 return 0;
544
545no_space:
546 ubifs_err(c, "LPT out of space mismatch at LEB %d:%d needing %d, done_ltab %d, done_lsave %d",
547 lnum, offs, len, done_ltab, done_lsave);
548 ubifs_dump_lpt_info(c);
549 ubifs_dump_lpt_lebs(c);
550 dump_stack();
551 return err;
552}
553#endif
554
555
556
557
558
559
560
561
562
563
564static struct ubifs_pnode *next_pnode_to_dirty(struct ubifs_info *c,
565 struct ubifs_pnode *pnode)
566{
567 struct ubifs_nnode *nnode;
568 int iip;
569
570
571 nnode = pnode->parent;
572 for (iip = pnode->iip + 1; iip < UBIFS_LPT_FANOUT; iip++) {
573 if (nnode->nbranch[iip].lnum)
574 return ubifs_get_pnode(c, nnode, iip);
575 }
576
577
578 do {
579 iip = nnode->iip + 1;
580 nnode = nnode->parent;
581 if (!nnode)
582 return NULL;
583 for (; iip < UBIFS_LPT_FANOUT; iip++) {
584 if (nnode->nbranch[iip].lnum)
585 break;
586 }
587 } while (iip >= UBIFS_LPT_FANOUT);
588
589
590 nnode = ubifs_get_nnode(c, nnode, iip);
591 if (IS_ERR(nnode))
592 return (void *)nnode;
593
594
595 while (nnode->level > 1) {
596 for (iip = 0; iip < UBIFS_LPT_FANOUT; iip++) {
597 if (nnode->nbranch[iip].lnum)
598 break;
599 }
600 if (iip >= UBIFS_LPT_FANOUT) {
601
602
603
604
605 iip = 0;
606 }
607 nnode = ubifs_get_nnode(c, nnode, iip);
608 if (IS_ERR(nnode))
609 return (void *)nnode;
610 }
611
612 for (iip = 0; iip < UBIFS_LPT_FANOUT; iip++)
613 if (nnode->nbranch[iip].lnum)
614 break;
615 if (iip >= UBIFS_LPT_FANOUT)
616
617 iip = 0;
618 return ubifs_get_pnode(c, nnode, iip);
619}
620
621
622
623
624
625
626
627
628
629static struct ubifs_pnode *pnode_lookup(struct ubifs_info *c, int i)
630{
631 int err, h, iip, shft;
632 struct ubifs_nnode *nnode;
633
634 if (!c->nroot) {
635 err = ubifs_read_nnode(c, NULL, 0);
636 if (err)
637 return ERR_PTR(err);
638 }
639 i <<= UBIFS_LPT_FANOUT_SHIFT;
640 nnode = c->nroot;
641 shft = c->lpt_hght * UBIFS_LPT_FANOUT_SHIFT;
642 for (h = 1; h < c->lpt_hght; h++) {
643 iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1));
644 shft -= UBIFS_LPT_FANOUT_SHIFT;
645 nnode = ubifs_get_nnode(c, nnode, iip);
646 if (IS_ERR(nnode))
647 return ERR_CAST(nnode);
648 }
649 iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1));
650 return ubifs_get_pnode(c, nnode, iip);
651}
652
653
654
655
656
657
658static void add_pnode_dirt(struct ubifs_info *c, struct ubifs_pnode *pnode)
659{
660 ubifs_add_lpt_dirt(c, pnode->parent->nbranch[pnode->iip].lnum,
661 c->pnode_sz);
662}
663
664
665
666
667
668
669static void do_make_pnode_dirty(struct ubifs_info *c, struct ubifs_pnode *pnode)
670{
671
672 if (!test_and_set_bit(DIRTY_CNODE, &pnode->flags)) {
673 struct ubifs_nnode *nnode;
674
675 c->dirty_pn_cnt += 1;
676 add_pnode_dirt(c, pnode);
677
678 nnode = pnode->parent;
679 while (nnode) {
680 if (!test_and_set_bit(DIRTY_CNODE, &nnode->flags)) {
681 c->dirty_nn_cnt += 1;
682 ubifs_add_nnode_dirt(c, nnode);
683 nnode = nnode->parent;
684 } else
685 break;
686 }
687 }
688}
689
690
691
692
693
694
695
696
697
698
699
700
701static int make_tree_dirty(struct ubifs_info *c)
702{
703 struct ubifs_pnode *pnode;
704
705 pnode = pnode_lookup(c, 0);
706 if (IS_ERR(pnode))
707 return PTR_ERR(pnode);
708
709 while (pnode) {
710 do_make_pnode_dirty(c, pnode);
711 pnode = next_pnode_to_dirty(c, pnode);
712 if (IS_ERR(pnode))
713 return PTR_ERR(pnode);
714 }
715 return 0;
716}
717
718
719
720
721
722
723
724
725static int need_write_all(struct ubifs_info *c)
726{
727 long long free = 0;
728 int i;
729
730 for (i = 0; i < c->lpt_lebs; i++) {
731 if (i + c->lpt_first == c->nhead_lnum)
732 free += c->leb_size - c->nhead_offs;
733 else if (c->ltab[i].free == c->leb_size)
734 free += c->leb_size;
735 else if (c->ltab[i].free + c->ltab[i].dirty == c->leb_size)
736 free += c->leb_size;
737 }
738
739 if (free <= c->lpt_sz * 2)
740 return 1;
741 return 0;
742}
743
744
745
746
747
748
749
750
751
752static void lpt_tgc_start(struct ubifs_info *c)
753{
754 int i;
755
756 for (i = 0; i < c->lpt_lebs; i++) {
757 if (i + c->lpt_first == c->nhead_lnum)
758 continue;
759 if (c->ltab[i].dirty > 0 &&
760 c->ltab[i].free + c->ltab[i].dirty == c->leb_size) {
761 c->ltab[i].tgc = 1;
762 c->ltab[i].free = c->leb_size;
763 c->ltab[i].dirty = 0;
764 dbg_lp("LEB %d", i + c->lpt_first);
765 }
766 }
767}
768
769
770
771
772
773
774
775
776
777
778static int lpt_tgc_end(struct ubifs_info *c)
779{
780 int i, err;
781
782 for (i = 0; i < c->lpt_lebs; i++)
783 if (c->ltab[i].tgc) {
784 err = ubifs_leb_unmap(c, i + c->lpt_first);
785 if (err)
786 return err;
787 c->ltab[i].tgc = 0;
788 dbg_lp("LEB %d", i + c->lpt_first);
789 }
790 return 0;
791}
792
793
794
795
796
797
798
799
800
801
802
803
804
805static void populate_lsave(struct ubifs_info *c)
806{
807 struct ubifs_lprops *lprops;
808 struct ubifs_lpt_heap *heap;
809 int i, cnt = 0;
810
811 ubifs_assert(c->big_lpt);
812 if (!(c->lpt_drty_flgs & LSAVE_DIRTY)) {
813 c->lpt_drty_flgs |= LSAVE_DIRTY;
814 ubifs_add_lpt_dirt(c, c->lsave_lnum, c->lsave_sz);
815 }
816
817#ifndef __UBOOT__
818 if (dbg_populate_lsave(c))
819 return;
820#endif
821
822 list_for_each_entry(lprops, &c->empty_list, list) {
823 c->lsave[cnt++] = lprops->lnum;
824 if (cnt >= c->lsave_cnt)
825 return;
826 }
827 list_for_each_entry(lprops, &c->freeable_list, list) {
828 c->lsave[cnt++] = lprops->lnum;
829 if (cnt >= c->lsave_cnt)
830 return;
831 }
832 list_for_each_entry(lprops, &c->frdi_idx_list, list) {
833 c->lsave[cnt++] = lprops->lnum;
834 if (cnt >= c->lsave_cnt)
835 return;
836 }
837 heap = &c->lpt_heap[LPROPS_DIRTY_IDX - 1];
838 for (i = 0; i < heap->cnt; i++) {
839 c->lsave[cnt++] = heap->arr[i]->lnum;
840 if (cnt >= c->lsave_cnt)
841 return;
842 }
843 heap = &c->lpt_heap[LPROPS_DIRTY - 1];
844 for (i = 0; i < heap->cnt; i++) {
845 c->lsave[cnt++] = heap->arr[i]->lnum;
846 if (cnt >= c->lsave_cnt)
847 return;
848 }
849 heap = &c->lpt_heap[LPROPS_FREE - 1];
850 for (i = 0; i < heap->cnt; i++) {
851 c->lsave[cnt++] = heap->arr[i]->lnum;
852 if (cnt >= c->lsave_cnt)
853 return;
854 }
855
856 while (cnt < c->lsave_cnt)
857 c->lsave[cnt++] = c->main_first;
858}
859
860
861
862
863
864
865
866
867
868static struct ubifs_nnode *nnode_lookup(struct ubifs_info *c, int i)
869{
870 int err, iip;
871 struct ubifs_nnode *nnode;
872
873 if (!c->nroot) {
874 err = ubifs_read_nnode(c, NULL, 0);
875 if (err)
876 return ERR_PTR(err);
877 }
878 nnode = c->nroot;
879 while (1) {
880 iip = i & (UBIFS_LPT_FANOUT - 1);
881 i >>= UBIFS_LPT_FANOUT_SHIFT;
882 if (!i)
883 break;
884 nnode = ubifs_get_nnode(c, nnode, iip);
885 if (IS_ERR(nnode))
886 return nnode;
887 }
888 return nnode;
889}
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906static int make_nnode_dirty(struct ubifs_info *c, int node_num, int lnum,
907 int offs)
908{
909 struct ubifs_nnode *nnode;
910
911 nnode = nnode_lookup(c, node_num);
912 if (IS_ERR(nnode))
913 return PTR_ERR(nnode);
914 if (nnode->parent) {
915 struct ubifs_nbranch *branch;
916
917 branch = &nnode->parent->nbranch[nnode->iip];
918 if (branch->lnum != lnum || branch->offs != offs)
919 return 0;
920 } else if (c->lpt_lnum != lnum || c->lpt_offs != offs)
921 return 0;
922
923 if (!test_and_set_bit(DIRTY_CNODE, &nnode->flags)) {
924 c->dirty_nn_cnt += 1;
925 ubifs_add_nnode_dirt(c, nnode);
926
927 nnode = nnode->parent;
928 while (nnode) {
929 if (!test_and_set_bit(DIRTY_CNODE, &nnode->flags)) {
930 c->dirty_nn_cnt += 1;
931 ubifs_add_nnode_dirt(c, nnode);
932 nnode = nnode->parent;
933 } else
934 break;
935 }
936 }
937 return 0;
938}
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955static int make_pnode_dirty(struct ubifs_info *c, int node_num, int lnum,
956 int offs)
957{
958 struct ubifs_pnode *pnode;
959 struct ubifs_nbranch *branch;
960
961 pnode = pnode_lookup(c, node_num);
962 if (IS_ERR(pnode))
963 return PTR_ERR(pnode);
964 branch = &pnode->parent->nbranch[pnode->iip];
965 if (branch->lnum != lnum || branch->offs != offs)
966 return 0;
967 do_make_pnode_dirty(c, pnode);
968 return 0;
969}
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985static int make_ltab_dirty(struct ubifs_info *c, int lnum, int offs)
986{
987 if (lnum != c->ltab_lnum || offs != c->ltab_offs)
988 return 0;
989 if (!(c->lpt_drty_flgs & LTAB_DIRTY)) {
990 c->lpt_drty_flgs |= LTAB_DIRTY;
991 ubifs_add_lpt_dirt(c, c->ltab_lnum, c->ltab_sz);
992 }
993 return 0;
994}
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010static int make_lsave_dirty(struct ubifs_info *c, int lnum, int offs)
1011{
1012 if (lnum != c->lsave_lnum || offs != c->lsave_offs)
1013 return 0;
1014 if (!(c->lpt_drty_flgs & LSAVE_DIRTY)) {
1015 c->lpt_drty_flgs |= LSAVE_DIRTY;
1016 ubifs_add_lpt_dirt(c, c->lsave_lnum, c->lsave_sz);
1017 }
1018 return 0;
1019}
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037static int make_node_dirty(struct ubifs_info *c, int node_type, int node_num,
1038 int lnum, int offs)
1039{
1040 switch (node_type) {
1041 case UBIFS_LPT_NNODE:
1042 return make_nnode_dirty(c, node_num, lnum, offs);
1043 case UBIFS_LPT_PNODE:
1044 return make_pnode_dirty(c, node_num, lnum, offs);
1045 case UBIFS_LPT_LTAB:
1046 return make_ltab_dirty(c, lnum, offs);
1047 case UBIFS_LPT_LSAVE:
1048 return make_lsave_dirty(c, lnum, offs);
1049 }
1050 return -EINVAL;
1051}
1052
1053
1054
1055
1056
1057
1058static int get_lpt_node_len(const struct ubifs_info *c, int node_type)
1059{
1060 switch (node_type) {
1061 case UBIFS_LPT_NNODE:
1062 return c->nnode_sz;
1063 case UBIFS_LPT_PNODE:
1064 return c->pnode_sz;
1065 case UBIFS_LPT_LTAB:
1066 return c->ltab_sz;
1067 case UBIFS_LPT_LSAVE:
1068 return c->lsave_sz;
1069 }
1070 return 0;
1071}
1072
1073
1074
1075
1076
1077
1078
1079static int get_pad_len(const struct ubifs_info *c, uint8_t *buf, int len)
1080{
1081 int offs, pad_len;
1082
1083 if (c->min_io_size == 1)
1084 return 0;
1085 offs = c->leb_size - len;
1086 pad_len = ALIGN(offs, c->min_io_size) - offs;
1087 return pad_len;
1088}
1089
1090
1091
1092
1093
1094
1095
1096static int get_lpt_node_type(const struct ubifs_info *c, uint8_t *buf,
1097 int *node_num)
1098{
1099 uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
1100 int pos = 0, node_type;
1101
1102 node_type = ubifs_unpack_bits(&addr, &pos, UBIFS_LPT_TYPE_BITS);
1103 *node_num = ubifs_unpack_bits(&addr, &pos, c->pcnt_bits);
1104 return node_type;
1105}
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115static int is_a_node(const struct ubifs_info *c, uint8_t *buf, int len)
1116{
1117 uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
1118 int pos = 0, node_type, node_len;
1119 uint16_t crc, calc_crc;
1120
1121 if (len < UBIFS_LPT_CRC_BYTES + (UBIFS_LPT_TYPE_BITS + 7) / 8)
1122 return 0;
1123 node_type = ubifs_unpack_bits(&addr, &pos, UBIFS_LPT_TYPE_BITS);
1124 if (node_type == UBIFS_LPT_NOT_A_NODE)
1125 return 0;
1126 node_len = get_lpt_node_len(c, node_type);
1127 if (!node_len || node_len > len)
1128 return 0;
1129 pos = 0;
1130 addr = buf;
1131 crc = ubifs_unpack_bits(&addr, &pos, UBIFS_LPT_CRC_BITS);
1132 calc_crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES,
1133 node_len - UBIFS_LPT_CRC_BYTES);
1134 if (crc != calc_crc)
1135 return 0;
1136 return 1;
1137}
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151static int lpt_gc_lnum(struct ubifs_info *c, int lnum)
1152{
1153 int err, len = c->leb_size, node_type, node_num, node_len, offs;
1154 void *buf = c->lpt_buf;
1155
1156 dbg_lp("LEB %d", lnum);
1157
1158 err = ubifs_leb_read(c, lnum, buf, 0, c->leb_size, 1);
1159 if (err)
1160 return err;
1161
1162 while (1) {
1163 if (!is_a_node(c, buf, len)) {
1164 int pad_len;
1165
1166 pad_len = get_pad_len(c, buf, len);
1167 if (pad_len) {
1168 buf += pad_len;
1169 len -= pad_len;
1170 continue;
1171 }
1172 return 0;
1173 }
1174 node_type = get_lpt_node_type(c, buf, &node_num);
1175 node_len = get_lpt_node_len(c, node_type);
1176 offs = c->leb_size - len;
1177 ubifs_assert(node_len != 0);
1178 mutex_lock(&c->lp_mutex);
1179 err = make_node_dirty(c, node_type, node_num, lnum, offs);
1180 mutex_unlock(&c->lp_mutex);
1181 if (err)
1182 return err;
1183 buf += node_len;
1184 len -= node_len;
1185 }
1186 return 0;
1187}
1188
1189
1190
1191
1192
1193
1194
1195
1196static int lpt_gc(struct ubifs_info *c)
1197{
1198 int i, lnum = -1, dirty = 0;
1199
1200 mutex_lock(&c->lp_mutex);
1201 for (i = 0; i < c->lpt_lebs; i++) {
1202 ubifs_assert(!c->ltab[i].tgc);
1203 if (i + c->lpt_first == c->nhead_lnum ||
1204 c->ltab[i].free + c->ltab[i].dirty == c->leb_size)
1205 continue;
1206 if (c->ltab[i].dirty > dirty) {
1207 dirty = c->ltab[i].dirty;
1208 lnum = i + c->lpt_first;
1209 }
1210 }
1211 mutex_unlock(&c->lp_mutex);
1212 if (lnum == -1)
1213 return -ENOSPC;
1214 return lpt_gc_lnum(c, lnum);
1215}
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227int ubifs_lpt_start_commit(struct ubifs_info *c)
1228{
1229 int err, cnt;
1230
1231 dbg_lp("");
1232
1233 mutex_lock(&c->lp_mutex);
1234 err = dbg_chk_lpt_free_spc(c);
1235 if (err)
1236 goto out;
1237 err = dbg_check_ltab(c);
1238 if (err)
1239 goto out;
1240
1241 if (c->check_lpt_free) {
1242
1243
1244
1245
1246
1247
1248 c->check_lpt_free = 0;
1249 while (need_write_all(c)) {
1250 mutex_unlock(&c->lp_mutex);
1251 err = lpt_gc(c);
1252 if (err)
1253 return err;
1254 mutex_lock(&c->lp_mutex);
1255 }
1256 }
1257
1258 lpt_tgc_start(c);
1259
1260 if (!c->dirty_pn_cnt) {
1261 dbg_cmt("no cnodes to commit");
1262 err = 0;
1263 goto out;
1264 }
1265
1266 if (!c->big_lpt && need_write_all(c)) {
1267
1268 err = make_tree_dirty(c);
1269 if (err)
1270 goto out;
1271 lpt_tgc_start(c);
1272 }
1273
1274 if (c->big_lpt)
1275 populate_lsave(c);
1276
1277 cnt = get_cnodes_to_commit(c);
1278 ubifs_assert(cnt != 0);
1279
1280 err = layout_cnodes(c);
1281 if (err)
1282 goto out;
1283
1284
1285 memcpy(c->ltab_cmt, c->ltab,
1286 sizeof(struct ubifs_lpt_lprops) * c->lpt_lebs);
1287 c->lpt_drty_flgs &= ~(LTAB_DIRTY | LSAVE_DIRTY);
1288
1289out:
1290 mutex_unlock(&c->lp_mutex);
1291 return err;
1292}
1293
1294
1295
1296
1297
1298static void free_obsolete_cnodes(struct ubifs_info *c)
1299{
1300 struct ubifs_cnode *cnode, *cnext;
1301
1302 cnext = c->lpt_cnext;
1303 if (!cnext)
1304 return;
1305 do {
1306 cnode = cnext;
1307 cnext = cnode->cnext;
1308 if (test_bit(OBSOLETE_CNODE, &cnode->flags))
1309 kfree(cnode);
1310 else
1311 cnode->cnext = NULL;
1312 } while (cnext != c->lpt_cnext);
1313 c->lpt_cnext = NULL;
1314}
1315
1316#ifndef __UBOOT__
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326int ubifs_lpt_end_commit(struct ubifs_info *c)
1327{
1328 int err;
1329
1330 dbg_lp("");
1331
1332 if (!c->lpt_cnext)
1333 return 0;
1334
1335 err = write_cnodes(c);
1336 if (err)
1337 return err;
1338
1339 mutex_lock(&c->lp_mutex);
1340 free_obsolete_cnodes(c);
1341 mutex_unlock(&c->lp_mutex);
1342
1343 return 0;
1344}
1345#endif
1346
1347
1348
1349
1350
1351
1352
1353
1354int ubifs_lpt_post_commit(struct ubifs_info *c)
1355{
1356 int err;
1357
1358 mutex_lock(&c->lp_mutex);
1359 err = lpt_tgc_end(c);
1360 if (err)
1361 goto out;
1362 if (c->big_lpt)
1363 while (need_write_all(c)) {
1364 mutex_unlock(&c->lp_mutex);
1365 err = lpt_gc(c);
1366 if (err)
1367 return err;
1368 mutex_lock(&c->lp_mutex);
1369 }
1370out:
1371 mutex_unlock(&c->lp_mutex);
1372 return err;
1373}
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383static struct ubifs_nnode *first_nnode(struct ubifs_info *c, int *hght)
1384{
1385 struct ubifs_nnode *nnode;
1386 int h, i, found;
1387
1388 nnode = c->nroot;
1389 *hght = 0;
1390 if (!nnode)
1391 return NULL;
1392 for (h = 1; h < c->lpt_hght; h++) {
1393 found = 0;
1394 for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
1395 if (nnode->nbranch[i].nnode) {
1396 found = 1;
1397 nnode = nnode->nbranch[i].nnode;
1398 *hght = h;
1399 break;
1400 }
1401 }
1402 if (!found)
1403 break;
1404 }
1405 return nnode;
1406}
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417static struct ubifs_nnode *next_nnode(struct ubifs_info *c,
1418 struct ubifs_nnode *nnode, int *hght)
1419{
1420 struct ubifs_nnode *parent;
1421 int iip, h, i, found;
1422
1423 parent = nnode->parent;
1424 if (!parent)
1425 return NULL;
1426 if (nnode->iip == UBIFS_LPT_FANOUT - 1) {
1427 *hght -= 1;
1428 return parent;
1429 }
1430 for (iip = nnode->iip + 1; iip < UBIFS_LPT_FANOUT; iip++) {
1431 nnode = parent->nbranch[iip].nnode;
1432 if (nnode)
1433 break;
1434 }
1435 if (!nnode) {
1436 *hght -= 1;
1437 return parent;
1438 }
1439 for (h = *hght + 1; h < c->lpt_hght; h++) {
1440 found = 0;
1441 for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
1442 if (nnode->nbranch[i].nnode) {
1443 found = 1;
1444 nnode = nnode->nbranch[i].nnode;
1445 *hght = h;
1446 break;
1447 }
1448 }
1449 if (!found)
1450 break;
1451 }
1452 return nnode;
1453}
1454
1455
1456
1457
1458
1459
1460void ubifs_lpt_free(struct ubifs_info *c, int wr_only)
1461{
1462 struct ubifs_nnode *nnode;
1463 int i, hght;
1464
1465
1466
1467 free_obsolete_cnodes(c);
1468
1469 vfree(c->ltab_cmt);
1470 c->ltab_cmt = NULL;
1471 vfree(c->lpt_buf);
1472 c->lpt_buf = NULL;
1473 kfree(c->lsave);
1474 c->lsave = NULL;
1475
1476 if (wr_only)
1477 return;
1478
1479
1480
1481 nnode = first_nnode(c, &hght);
1482 while (nnode) {
1483 for (i = 0; i < UBIFS_LPT_FANOUT; i++)
1484 kfree(nnode->nbranch[i].nnode);
1485 nnode = next_nnode(c, nnode, &hght);
1486 }
1487 for (i = 0; i < LPROPS_HEAP_CNT; i++)
1488 kfree(c->lpt_heap[i].arr);
1489 kfree(c->dirty_idx.arr);
1490 kfree(c->nroot);
1491 vfree(c->ltab);
1492 kfree(c->lpt_nod_buf);
1493}
1494
1495#ifndef __UBOOT__
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505static int dbg_is_all_ff(uint8_t *buf, int len)
1506{
1507 int i;
1508
1509 for (i = 0; i < len; i++)
1510 if (buf[i] != 0xff)
1511 return 0;
1512 return 1;
1513}
1514
1515
1516
1517
1518
1519
1520
1521static int dbg_is_nnode_dirty(struct ubifs_info *c, int lnum, int offs)
1522{
1523 struct ubifs_nnode *nnode;
1524 int hght;
1525
1526
1527 nnode = first_nnode(c, &hght);
1528 for (; nnode; nnode = next_nnode(c, nnode, &hght)) {
1529 struct ubifs_nbranch *branch;
1530
1531 cond_resched();
1532 if (nnode->parent) {
1533 branch = &nnode->parent->nbranch[nnode->iip];
1534 if (branch->lnum != lnum || branch->offs != offs)
1535 continue;
1536 if (test_bit(DIRTY_CNODE, &nnode->flags))
1537 return 1;
1538 return 0;
1539 } else {
1540 if (c->lpt_lnum != lnum || c->lpt_offs != offs)
1541 continue;
1542 if (test_bit(DIRTY_CNODE, &nnode->flags))
1543 return 1;
1544 return 0;
1545 }
1546 }
1547 return 1;
1548}
1549
1550
1551
1552
1553
1554
1555
1556static int dbg_is_pnode_dirty(struct ubifs_info *c, int lnum, int offs)
1557{
1558 int i, cnt;
1559
1560 cnt = DIV_ROUND_UP(c->main_lebs, UBIFS_LPT_FANOUT);
1561 for (i = 0; i < cnt; i++) {
1562 struct ubifs_pnode *pnode;
1563 struct ubifs_nbranch *branch;
1564
1565 cond_resched();
1566 pnode = pnode_lookup(c, i);
1567 if (IS_ERR(pnode))
1568 return PTR_ERR(pnode);
1569 branch = &pnode->parent->nbranch[pnode->iip];
1570 if (branch->lnum != lnum || branch->offs != offs)
1571 continue;
1572 if (test_bit(DIRTY_CNODE, &pnode->flags))
1573 return 1;
1574 return 0;
1575 }
1576 return 1;
1577}
1578
1579
1580
1581
1582
1583
1584
1585static int dbg_is_ltab_dirty(struct ubifs_info *c, int lnum, int offs)
1586{
1587 if (lnum != c->ltab_lnum || offs != c->ltab_offs)
1588 return 1;
1589 return (c->lpt_drty_flgs & LTAB_DIRTY) != 0;
1590}
1591
1592
1593
1594
1595
1596
1597
1598static int dbg_is_lsave_dirty(struct ubifs_info *c, int lnum, int offs)
1599{
1600 if (lnum != c->lsave_lnum || offs != c->lsave_offs)
1601 return 1;
1602 return (c->lpt_drty_flgs & LSAVE_DIRTY) != 0;
1603}
1604
1605
1606
1607
1608
1609
1610
1611
1612static int dbg_is_node_dirty(struct ubifs_info *c, int node_type, int lnum,
1613 int offs)
1614{
1615 switch (node_type) {
1616 case UBIFS_LPT_NNODE:
1617 return dbg_is_nnode_dirty(c, lnum, offs);
1618 case UBIFS_LPT_PNODE:
1619 return dbg_is_pnode_dirty(c, lnum, offs);
1620 case UBIFS_LPT_LTAB:
1621 return dbg_is_ltab_dirty(c, lnum, offs);
1622 case UBIFS_LPT_LSAVE:
1623 return dbg_is_lsave_dirty(c, lnum, offs);
1624 }
1625 return 1;
1626}
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636static int dbg_check_ltab_lnum(struct ubifs_info *c, int lnum)
1637{
1638 int err, len = c->leb_size, dirty = 0, node_type, node_num, node_len;
1639 int ret;
1640 void *buf, *p;
1641
1642 if (!dbg_is_chk_lprops(c))
1643 return 0;
1644
1645 buf = p = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
1646 if (!buf) {
1647 ubifs_err(c, "cannot allocate memory for ltab checking");
1648 return 0;
1649 }
1650
1651 dbg_lp("LEB %d", lnum);
1652
1653 err = ubifs_leb_read(c, lnum, buf, 0, c->leb_size, 1);
1654 if (err)
1655 goto out;
1656
1657 while (1) {
1658 if (!is_a_node(c, p, len)) {
1659 int i, pad_len;
1660
1661 pad_len = get_pad_len(c, p, len);
1662 if (pad_len) {
1663 p += pad_len;
1664 len -= pad_len;
1665 dirty += pad_len;
1666 continue;
1667 }
1668 if (!dbg_is_all_ff(p, len)) {
1669 ubifs_err(c, "invalid empty space in LEB %d at %d",
1670 lnum, c->leb_size - len);
1671 err = -EINVAL;
1672 }
1673 i = lnum - c->lpt_first;
1674 if (len != c->ltab[i].free) {
1675 ubifs_err(c, "invalid free space in LEB %d (free %d, expected %d)",
1676 lnum, len, c->ltab[i].free);
1677 err = -EINVAL;
1678 }
1679 if (dirty != c->ltab[i].dirty) {
1680 ubifs_err(c, "invalid dirty space in LEB %d (dirty %d, expected %d)",
1681 lnum, dirty, c->ltab[i].dirty);
1682 err = -EINVAL;
1683 }
1684 goto out;
1685 }
1686 node_type = get_lpt_node_type(c, p, &node_num);
1687 node_len = get_lpt_node_len(c, node_type);
1688 ret = dbg_is_node_dirty(c, node_type, lnum, c->leb_size - len);
1689 if (ret == 1)
1690 dirty += node_len;
1691 p += node_len;
1692 len -= node_len;
1693 }
1694
1695 err = 0;
1696out:
1697 vfree(buf);
1698 return err;
1699}
1700
1701
1702
1703
1704
1705
1706
1707int dbg_check_ltab(struct ubifs_info *c)
1708{
1709 int lnum, err, i, cnt;
1710
1711 if (!dbg_is_chk_lprops(c))
1712 return 0;
1713
1714
1715 cnt = DIV_ROUND_UP(c->main_lebs, UBIFS_LPT_FANOUT);
1716 for (i = 0; i < cnt; i++) {
1717 struct ubifs_pnode *pnode;
1718
1719 pnode = pnode_lookup(c, i);
1720 if (IS_ERR(pnode))
1721 return PTR_ERR(pnode);
1722 cond_resched();
1723 }
1724
1725
1726 err = dbg_check_lpt_nodes(c, (struct ubifs_cnode *)c->nroot, 0, 0);
1727 if (err)
1728 return err;
1729
1730
1731 for (lnum = c->lpt_first; lnum <= c->lpt_last; lnum++) {
1732 err = dbg_check_ltab_lnum(c, lnum);
1733 if (err) {
1734 ubifs_err(c, "failed at LEB %d", lnum);
1735 return err;
1736 }
1737 }
1738
1739 dbg_lp("succeeded");
1740 return 0;
1741}
1742
1743
1744
1745
1746
1747
1748
1749int dbg_chk_lpt_free_spc(struct ubifs_info *c)
1750{
1751 long long free = 0;
1752 int i;
1753
1754 if (!dbg_is_chk_lprops(c))
1755 return 0;
1756
1757 for (i = 0; i < c->lpt_lebs; i++) {
1758 if (c->ltab[i].tgc || c->ltab[i].cmt)
1759 continue;
1760 if (i + c->lpt_first == c->nhead_lnum)
1761 free += c->leb_size - c->nhead_offs;
1762 else if (c->ltab[i].free == c->leb_size)
1763 free += c->leb_size;
1764 }
1765 if (free < c->lpt_sz) {
1766 ubifs_err(c, "LPT space error: free %lld lpt_sz %lld",
1767 free, c->lpt_sz);
1768 ubifs_dump_lpt_info(c);
1769 ubifs_dump_lpt_lebs(c);
1770 dump_stack();
1771 return -EINVAL;
1772 }
1773 return 0;
1774}
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len)
1791{
1792 struct ubifs_debug_info *d = c->dbg;
1793 long long chk_lpt_sz, lpt_sz;
1794 int err = 0;
1795
1796 if (!dbg_is_chk_lprops(c))
1797 return 0;
1798
1799 switch (action) {
1800 case 0:
1801 d->chk_lpt_sz = 0;
1802 d->chk_lpt_sz2 = 0;
1803 d->chk_lpt_lebs = 0;
1804 d->chk_lpt_wastage = 0;
1805 if (c->dirty_pn_cnt > c->pnode_cnt) {
1806 ubifs_err(c, "dirty pnodes %d exceed max %d",
1807 c->dirty_pn_cnt, c->pnode_cnt);
1808 err = -EINVAL;
1809 }
1810 if (c->dirty_nn_cnt > c->nnode_cnt) {
1811 ubifs_err(c, "dirty nnodes %d exceed max %d",
1812 c->dirty_nn_cnt, c->nnode_cnt);
1813 err = -EINVAL;
1814 }
1815 return err;
1816 case 1:
1817 d->chk_lpt_sz += len;
1818 return 0;
1819 case 2:
1820 d->chk_lpt_sz += len;
1821 d->chk_lpt_wastage += len;
1822 d->chk_lpt_lebs += 1;
1823 return 0;
1824 case 3:
1825 chk_lpt_sz = c->leb_size;
1826 chk_lpt_sz *= d->chk_lpt_lebs;
1827 chk_lpt_sz += len - c->nhead_offs;
1828 if (d->chk_lpt_sz != chk_lpt_sz) {
1829 ubifs_err(c, "LPT wrote %lld but space used was %lld",
1830 d->chk_lpt_sz, chk_lpt_sz);
1831 err = -EINVAL;
1832 }
1833 if (d->chk_lpt_sz > c->lpt_sz) {
1834 ubifs_err(c, "LPT wrote %lld but lpt_sz is %lld",
1835 d->chk_lpt_sz, c->lpt_sz);
1836 err = -EINVAL;
1837 }
1838 if (d->chk_lpt_sz2 && d->chk_lpt_sz != d->chk_lpt_sz2) {
1839 ubifs_err(c, "LPT layout size %lld but wrote %lld",
1840 d->chk_lpt_sz, d->chk_lpt_sz2);
1841 err = -EINVAL;
1842 }
1843 if (d->chk_lpt_sz2 && d->new_nhead_offs != len) {
1844 ubifs_err(c, "LPT new nhead offs: expected %d was %d",
1845 d->new_nhead_offs, len);
1846 err = -EINVAL;
1847 }
1848 lpt_sz = (long long)c->pnode_cnt * c->pnode_sz;
1849 lpt_sz += (long long)c->nnode_cnt * c->nnode_sz;
1850 lpt_sz += c->ltab_sz;
1851 if (c->big_lpt)
1852 lpt_sz += c->lsave_sz;
1853 if (d->chk_lpt_sz - d->chk_lpt_wastage > lpt_sz) {
1854 ubifs_err(c, "LPT chk_lpt_sz %lld + waste %lld exceeds %lld",
1855 d->chk_lpt_sz, d->chk_lpt_wastage, lpt_sz);
1856 err = -EINVAL;
1857 }
1858 if (err) {
1859 ubifs_dump_lpt_info(c);
1860 ubifs_dump_lpt_lebs(c);
1861 dump_stack();
1862 }
1863 d->chk_lpt_sz2 = d->chk_lpt_sz;
1864 d->chk_lpt_sz = 0;
1865 d->chk_lpt_wastage = 0;
1866 d->chk_lpt_lebs = 0;
1867 d->new_nhead_offs = len;
1868 return err;
1869 case 4:
1870 d->chk_lpt_sz += len;
1871 d->chk_lpt_wastage += len;
1872 return 0;
1873 default:
1874 return -EINVAL;
1875 }
1876}
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888static void dump_lpt_leb(const struct ubifs_info *c, int lnum)
1889{
1890 int err, len = c->leb_size, node_type, node_num, node_len, offs;
1891 void *buf, *p;
1892
1893 pr_err("(pid %d) start dumping LEB %d\n", current->pid, lnum);
1894 buf = p = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
1895 if (!buf) {
1896 ubifs_err(c, "cannot allocate memory to dump LPT");
1897 return;
1898 }
1899
1900 err = ubifs_leb_read(c, lnum, buf, 0, c->leb_size, 1);
1901 if (err)
1902 goto out;
1903
1904 while (1) {
1905 offs = c->leb_size - len;
1906 if (!is_a_node(c, p, len)) {
1907 int pad_len;
1908
1909 pad_len = get_pad_len(c, p, len);
1910 if (pad_len) {
1911 pr_err("LEB %d:%d, pad %d bytes\n",
1912 lnum, offs, pad_len);
1913 p += pad_len;
1914 len -= pad_len;
1915 continue;
1916 }
1917 if (len)
1918 pr_err("LEB %d:%d, free %d bytes\n",
1919 lnum, offs, len);
1920 break;
1921 }
1922
1923 node_type = get_lpt_node_type(c, p, &node_num);
1924 switch (node_type) {
1925 case UBIFS_LPT_PNODE:
1926 {
1927 node_len = c->pnode_sz;
1928 if (c->big_lpt)
1929 pr_err("LEB %d:%d, pnode num %d\n",
1930 lnum, offs, node_num);
1931 else
1932 pr_err("LEB %d:%d, pnode\n", lnum, offs);
1933 break;
1934 }
1935 case UBIFS_LPT_NNODE:
1936 {
1937 int i;
1938 struct ubifs_nnode nnode;
1939
1940 node_len = c->nnode_sz;
1941 if (c->big_lpt)
1942 pr_err("LEB %d:%d, nnode num %d, ",
1943 lnum, offs, node_num);
1944 else
1945 pr_err("LEB %d:%d, nnode, ",
1946 lnum, offs);
1947 err = ubifs_unpack_nnode(c, p, &nnode);
1948 if (err) {
1949 pr_err("failed to unpack_node, error %d\n",
1950 err);
1951 break;
1952 }
1953 for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
1954 pr_cont("%d:%d", nnode.nbranch[i].lnum,
1955 nnode.nbranch[i].offs);
1956 if (i != UBIFS_LPT_FANOUT - 1)
1957 pr_cont(", ");
1958 }
1959 pr_cont("\n");
1960 break;
1961 }
1962 case UBIFS_LPT_LTAB:
1963 node_len = c->ltab_sz;
1964 pr_err("LEB %d:%d, ltab\n", lnum, offs);
1965 break;
1966 case UBIFS_LPT_LSAVE:
1967 node_len = c->lsave_sz;
1968 pr_err("LEB %d:%d, lsave len\n", lnum, offs);
1969 break;
1970 default:
1971 ubifs_err(c, "LPT node type %d not recognized", node_type);
1972 goto out;
1973 }
1974
1975 p += node_len;
1976 len -= node_len;
1977 }
1978
1979 pr_err("(pid %d) finish dumping LEB %d\n", current->pid, lnum);
1980out:
1981 vfree(buf);
1982 return;
1983}
1984
1985
1986
1987
1988
1989
1990
1991
1992void ubifs_dump_lpt_lebs(const struct ubifs_info *c)
1993{
1994 int i;
1995
1996 pr_err("(pid %d) start dumping all LPT LEBs\n", current->pid);
1997 for (i = 0; i < c->lpt_lebs; i++)
1998 dump_lpt_leb(c, i + c->lpt_first);
1999 pr_err("(pid %d) finish dumping all LPT LEBs\n", current->pid);
2000}
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011static int dbg_populate_lsave(struct ubifs_info *c)
2012{
2013 struct ubifs_lprops *lprops;
2014 struct ubifs_lpt_heap *heap;
2015 int i;
2016
2017 if (!dbg_is_chk_gen(c))
2018 return 0;
2019 if (prandom_u32() & 3)
2020 return 0;
2021
2022 for (i = 0; i < c->lsave_cnt; i++)
2023 c->lsave[i] = c->main_first;
2024
2025 list_for_each_entry(lprops, &c->empty_list, list)
2026 c->lsave[prandom_u32() % c->lsave_cnt] = lprops->lnum;
2027 list_for_each_entry(lprops, &c->freeable_list, list)
2028 c->lsave[prandom_u32() % c->lsave_cnt] = lprops->lnum;
2029 list_for_each_entry(lprops, &c->frdi_idx_list, list)
2030 c->lsave[prandom_u32() % c->lsave_cnt] = lprops->lnum;
2031
2032 heap = &c->lpt_heap[LPROPS_DIRTY_IDX - 1];
2033 for (i = 0; i < heap->cnt; i++)
2034 c->lsave[prandom_u32() % c->lsave_cnt] = heap->arr[i]->lnum;
2035 heap = &c->lpt_heap[LPROPS_DIRTY - 1];
2036 for (i = 0; i < heap->cnt; i++)
2037 c->lsave[prandom_u32() % c->lsave_cnt] = heap->arr[i]->lnum;
2038 heap = &c->lpt_heap[LPROPS_FREE - 1];
2039 for (i = 0; i < heap->cnt; i++)
2040 c->lsave[prandom_u32() % c->lsave_cnt] = heap->arr[i]->lnum;
2041
2042 return 1;
2043}
2044#endif
2045