1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#ifdef __UBOOT__
19#include <log.h>
20#include <dm/devres.h>
21#include <linux/err.h>
22#endif
23#include "ubifs.h"
24
25static int dbg_check_bud_bytes(struct ubifs_info *c);
26
27
28
29
30
31
32
33
34
35struct ubifs_bud *ubifs_search_bud(struct ubifs_info *c, int lnum)
36{
37 struct rb_node *p;
38 struct ubifs_bud *bud;
39
40 spin_lock(&c->buds_lock);
41 p = c->buds.rb_node;
42 while (p) {
43 bud = rb_entry(p, struct ubifs_bud, rb);
44 if (lnum < bud->lnum)
45 p = p->rb_left;
46 else if (lnum > bud->lnum)
47 p = p->rb_right;
48 else {
49 spin_unlock(&c->buds_lock);
50 return bud;
51 }
52 }
53 spin_unlock(&c->buds_lock);
54 return NULL;
55}
56
57
58
59
60
61
62
63
64struct ubifs_wbuf *ubifs_get_wbuf(struct ubifs_info *c, int lnum)
65{
66 struct rb_node *p;
67 struct ubifs_bud *bud;
68 int jhead;
69
70 if (!c->jheads)
71 return NULL;
72
73 spin_lock(&c->buds_lock);
74 p = c->buds.rb_node;
75 while (p) {
76 bud = rb_entry(p, struct ubifs_bud, rb);
77 if (lnum < bud->lnum)
78 p = p->rb_left;
79 else if (lnum > bud->lnum)
80 p = p->rb_right;
81 else {
82 jhead = bud->jhead;
83 spin_unlock(&c->buds_lock);
84 return &c->jheads[jhead].wbuf;
85 }
86 }
87 spin_unlock(&c->buds_lock);
88 return NULL;
89}
90
91
92
93
94
95static inline long long empty_log_bytes(const struct ubifs_info *c)
96{
97 long long h, t;
98
99 h = (long long)c->lhead_lnum * c->leb_size + c->lhead_offs;
100 t = (long long)c->ltail_lnum * c->leb_size;
101
102 if (h > t)
103 return c->log_bytes - h + t;
104 else if (h != t)
105 return t - h;
106 else if (c->lhead_lnum != c->ltail_lnum)
107 return 0;
108 else
109 return c->log_bytes;
110}
111
112
113
114
115
116
117void ubifs_add_bud(struct ubifs_info *c, struct ubifs_bud *bud)
118{
119 struct rb_node **p, *parent = NULL;
120 struct ubifs_bud *b;
121 struct ubifs_jhead *jhead;
122
123 spin_lock(&c->buds_lock);
124 p = &c->buds.rb_node;
125 while (*p) {
126 parent = *p;
127 b = rb_entry(parent, struct ubifs_bud, rb);
128 ubifs_assert(bud->lnum != b->lnum);
129 if (bud->lnum < b->lnum)
130 p = &(*p)->rb_left;
131 else
132 p = &(*p)->rb_right;
133 }
134
135 rb_link_node(&bud->rb, parent, p);
136 rb_insert_color(&bud->rb, &c->buds);
137 if (c->jheads) {
138 jhead = &c->jheads[bud->jhead];
139 list_add_tail(&bud->list, &jhead->buds_list);
140 } else
141 ubifs_assert(c->replaying && c->ro_mount);
142
143
144
145
146
147
148
149 c->bud_bytes += c->leb_size - bud->start;
150
151 dbg_log("LEB %d:%d, jhead %s, bud_bytes %lld", bud->lnum,
152 bud->start, dbg_jhead(bud->jhead), c->bud_bytes);
153 spin_unlock(&c->buds_lock);
154}
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs)
170{
171 int err;
172 struct ubifs_bud *bud;
173 struct ubifs_ref_node *ref;
174
175 bud = kmalloc(sizeof(struct ubifs_bud), GFP_NOFS);
176 if (!bud)
177 return -ENOMEM;
178 ref = kzalloc(c->ref_node_alsz, GFP_NOFS);
179 if (!ref) {
180 kfree(bud);
181 return -ENOMEM;
182 }
183
184 mutex_lock(&c->log_mutex);
185 ubifs_assert(!c->ro_media && !c->ro_mount);
186 if (c->ro_error) {
187 err = -EROFS;
188 goto out_unlock;
189 }
190
191
192 if (empty_log_bytes(c) - c->ref_node_alsz < c->min_log_bytes) {
193 dbg_log("not enough log space - %lld, required %d",
194 empty_log_bytes(c), c->min_log_bytes);
195 ubifs_commit_required(c);
196 err = -EAGAIN;
197 goto out_unlock;
198 }
199
200
201
202
203
204
205
206
207
208
209 if (c->bud_bytes + c->leb_size - offs > c->max_bud_bytes) {
210 dbg_log("bud bytes %lld (%lld max), require commit",
211 c->bud_bytes, c->max_bud_bytes);
212 ubifs_commit_required(c);
213 err = -EAGAIN;
214 goto out_unlock;
215 }
216
217
218
219
220
221
222 if (c->bud_bytes >= c->bg_bud_bytes &&
223 c->cmt_state == COMMIT_RESTING) {
224 dbg_log("bud bytes %lld (%lld max), initiate BG commit",
225 c->bud_bytes, c->max_bud_bytes);
226 ubifs_request_bg_commit(c);
227 }
228
229 bud->lnum = lnum;
230 bud->start = offs;
231 bud->jhead = jhead;
232
233 ref->ch.node_type = UBIFS_REF_NODE;
234 ref->lnum = cpu_to_le32(bud->lnum);
235 ref->offs = cpu_to_le32(bud->start);
236 ref->jhead = cpu_to_le32(jhead);
237
238 if (c->lhead_offs > c->leb_size - c->ref_node_alsz) {
239 c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
240 ubifs_assert(c->lhead_lnum != c->ltail_lnum);
241 c->lhead_offs = 0;
242 }
243
244 if (c->lhead_offs == 0) {
245
246 err = ubifs_leb_unmap(c, c->lhead_lnum);
247 if (err)
248 goto out_unlock;
249 }
250
251 if (bud->start == 0) {
252
253
254
255
256
257
258
259 err = ubifs_leb_map(c, bud->lnum);
260 if (err)
261 goto out_unlock;
262 }
263
264 dbg_log("write ref LEB %d:%d",
265 c->lhead_lnum, c->lhead_offs);
266 err = ubifs_write_node(c, ref, UBIFS_REF_NODE_SZ, c->lhead_lnum,
267 c->lhead_offs);
268 if (err)
269 goto out_unlock;
270
271 c->lhead_offs += c->ref_node_alsz;
272
273 ubifs_add_bud(c, bud);
274
275 mutex_unlock(&c->log_mutex);
276 kfree(ref);
277 return 0;
278
279out_unlock:
280 mutex_unlock(&c->log_mutex);
281 kfree(ref);
282 kfree(bud);
283 return err;
284}
285
286
287
288
289
290
291
292
293static void remove_buds(struct ubifs_info *c)
294{
295 struct rb_node *p;
296
297 ubifs_assert(list_empty(&c->old_buds));
298 c->cmt_bud_bytes = 0;
299 spin_lock(&c->buds_lock);
300 p = rb_first(&c->buds);
301 while (p) {
302 struct rb_node *p1 = p;
303 struct ubifs_bud *bud;
304 struct ubifs_wbuf *wbuf;
305
306 p = rb_next(p);
307 bud = rb_entry(p1, struct ubifs_bud, rb);
308 wbuf = &c->jheads[bud->jhead].wbuf;
309
310 if (wbuf->lnum == bud->lnum) {
311
312
313
314
315 c->cmt_bud_bytes += wbuf->offs - bud->start;
316 dbg_log("preserve %d:%d, jhead %s, bud bytes %d, cmt_bud_bytes %lld",
317 bud->lnum, bud->start, dbg_jhead(bud->jhead),
318 wbuf->offs - bud->start, c->cmt_bud_bytes);
319 bud->start = wbuf->offs;
320 } else {
321 c->cmt_bud_bytes += c->leb_size - bud->start;
322 dbg_log("remove %d:%d, jhead %s, bud bytes %d, cmt_bud_bytes %lld",
323 bud->lnum, bud->start, dbg_jhead(bud->jhead),
324 c->leb_size - bud->start, c->cmt_bud_bytes);
325 rb_erase(p1, &c->buds);
326
327
328
329
330
331
332
333 list_move(&bud->list, &c->old_buds);
334 }
335 }
336 spin_unlock(&c->buds_lock);
337}
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum)
353{
354 void *buf;
355 struct ubifs_cs_node *cs;
356 struct ubifs_ref_node *ref;
357 int err, i, max_len, len;
358
359 err = dbg_check_bud_bytes(c);
360 if (err)
361 return err;
362
363 max_len = UBIFS_CS_NODE_SZ + c->jhead_cnt * UBIFS_REF_NODE_SZ;
364 max_len = ALIGN(max_len, c->min_io_size);
365 buf = cs = kmalloc(max_len, GFP_NOFS);
366 if (!buf)
367 return -ENOMEM;
368
369 cs->ch.node_type = UBIFS_CS_NODE;
370 cs->cmt_no = cpu_to_le64(c->cmt_no);
371 ubifs_prepare_node(c, cs, UBIFS_CS_NODE_SZ, 0);
372
373
374
375
376
377
378
379
380 len = UBIFS_CS_NODE_SZ;
381 for (i = 0; i < c->jhead_cnt; i++) {
382 int lnum = c->jheads[i].wbuf.lnum;
383 int offs = c->jheads[i].wbuf.offs;
384
385 if (lnum == -1 || offs == c->leb_size)
386 continue;
387
388 dbg_log("add ref to LEB %d:%d for jhead %s",
389 lnum, offs, dbg_jhead(i));
390 ref = buf + len;
391 ref->ch.node_type = UBIFS_REF_NODE;
392 ref->lnum = cpu_to_le32(lnum);
393 ref->offs = cpu_to_le32(offs);
394 ref->jhead = cpu_to_le32(i);
395
396 ubifs_prepare_node(c, ref, UBIFS_REF_NODE_SZ, 0);
397 len += UBIFS_REF_NODE_SZ;
398 }
399
400 ubifs_pad(c, buf + len, ALIGN(len, c->min_io_size) - len);
401
402
403 if (c->lhead_offs) {
404 c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
405 ubifs_assert(c->lhead_lnum != c->ltail_lnum);
406 c->lhead_offs = 0;
407 }
408
409
410 err = ubifs_leb_unmap(c, c->lhead_lnum);
411 if (err)
412 goto out;
413
414 len = ALIGN(len, c->min_io_size);
415 dbg_log("writing commit start at LEB %d:0, len %d", c->lhead_lnum, len);
416 err = ubifs_leb_write(c, c->lhead_lnum, cs, 0, len);
417 if (err)
418 goto out;
419
420 *ltail_lnum = c->lhead_lnum;
421
422 c->lhead_offs += len;
423 if (c->lhead_offs == c->leb_size) {
424 c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
425 c->lhead_offs = 0;
426 }
427
428 remove_buds(c);
429
430
431
432
433
434 c->min_log_bytes = 0;
435
436out:
437 kfree(buf);
438 return err;
439}
440
441
442
443
444
445
446
447
448
449
450
451int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum)
452{
453 int err;
454
455
456
457
458
459
460 mutex_lock(&c->log_mutex);
461
462 dbg_log("old tail was LEB %d:0, new tail is LEB %d:0",
463 c->ltail_lnum, ltail_lnum);
464
465 c->ltail_lnum = ltail_lnum;
466
467
468
469
470 c->min_log_bytes = c->leb_size;
471
472 spin_lock(&c->buds_lock);
473 c->bud_bytes -= c->cmt_bud_bytes;
474 spin_unlock(&c->buds_lock);
475
476 err = dbg_check_bud_bytes(c);
477 if (err)
478 goto out;
479
480 err = ubifs_write_master(c);
481
482out:
483 mutex_unlock(&c->log_mutex);
484 return err;
485}
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500int ubifs_log_post_commit(struct ubifs_info *c, int old_ltail_lnum)
501{
502 int lnum, err = 0;
503
504 while (!list_empty(&c->old_buds)) {
505 struct ubifs_bud *bud;
506
507 bud = list_entry(c->old_buds.next, struct ubifs_bud, list);
508 err = ubifs_return_leb(c, bud->lnum);
509 if (err)
510 return err;
511 list_del(&bud->list);
512 kfree(bud);
513 }
514 mutex_lock(&c->log_mutex);
515 for (lnum = old_ltail_lnum; lnum != c->ltail_lnum;
516 lnum = ubifs_next_log_lnum(c, lnum)) {
517 dbg_log("unmap log LEB %d", lnum);
518 err = ubifs_leb_unmap(c, lnum);
519 if (err)
520 goto out;
521 }
522out:
523 mutex_unlock(&c->log_mutex);
524 return err;
525}
526
527
528
529
530
531
532struct done_ref {
533 struct rb_node rb;
534 int lnum;
535};
536
537
538
539
540
541
542
543
544
545static int done_already(struct rb_root *done_tree, int lnum)
546{
547 struct rb_node **p = &done_tree->rb_node, *parent = NULL;
548 struct done_ref *dr;
549
550 while (*p) {
551 parent = *p;
552 dr = rb_entry(parent, struct done_ref, rb);
553 if (lnum < dr->lnum)
554 p = &(*p)->rb_left;
555 else if (lnum > dr->lnum)
556 p = &(*p)->rb_right;
557 else
558 return 1;
559 }
560
561 dr = kzalloc(sizeof(struct done_ref), GFP_NOFS);
562 if (!dr)
563 return -ENOMEM;
564
565 dr->lnum = lnum;
566
567 rb_link_node(&dr->rb, parent, p);
568 rb_insert_color(&dr->rb, done_tree);
569
570 return 0;
571}
572
573
574
575
576
577static void destroy_done_tree(struct rb_root *done_tree)
578{
579 struct done_ref *dr, *n;
580
581 rbtree_postorder_for_each_entry_safe(dr, n, done_tree, rb)
582 kfree(dr);
583}
584
585
586
587
588
589
590
591
592
593
594
595static int add_node(struct ubifs_info *c, void *buf, int *lnum, int *offs,
596 void *node)
597{
598 struct ubifs_ch *ch = node;
599 int len = le32_to_cpu(ch->len), remains = c->leb_size - *offs;
600
601 if (len > remains) {
602 int sz = ALIGN(*offs, c->min_io_size), err;
603
604 ubifs_pad(c, buf + *offs, sz - *offs);
605 err = ubifs_leb_change(c, *lnum, buf, sz);
606 if (err)
607 return err;
608 *lnum = ubifs_next_log_lnum(c, *lnum);
609 *offs = 0;
610 }
611 memcpy(buf + *offs, node, len);
612 *offs += ALIGN(len, 8);
613 return 0;
614}
615
616
617
618
619
620
621
622
623
624
625
626int ubifs_consolidate_log(struct ubifs_info *c)
627{
628 struct ubifs_scan_leb *sleb;
629 struct ubifs_scan_node *snod;
630 struct rb_root done_tree = RB_ROOT;
631 int lnum, err, first = 1, write_lnum, offs = 0;
632 void *buf;
633
634 dbg_rcvry("log tail LEB %d, log head LEB %d", c->ltail_lnum,
635 c->lhead_lnum);
636 buf = vmalloc(c->leb_size);
637 if (!buf)
638 return -ENOMEM;
639 lnum = c->ltail_lnum;
640 write_lnum = lnum;
641 while (1) {
642 sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0);
643 if (IS_ERR(sleb)) {
644 err = PTR_ERR(sleb);
645 goto out_free;
646 }
647 list_for_each_entry(snod, &sleb->nodes, list) {
648 switch (snod->type) {
649 case UBIFS_REF_NODE: {
650 struct ubifs_ref_node *ref = snod->node;
651 int ref_lnum = le32_to_cpu(ref->lnum);
652
653 err = done_already(&done_tree, ref_lnum);
654 if (err < 0)
655 goto out_scan;
656 if (err != 1) {
657 err = add_node(c, buf, &write_lnum,
658 &offs, snod->node);
659 if (err)
660 goto out_scan;
661 }
662 break;
663 }
664 case UBIFS_CS_NODE:
665 if (!first)
666 break;
667 err = add_node(c, buf, &write_lnum, &offs,
668 snod->node);
669 if (err)
670 goto out_scan;
671 first = 0;
672 break;
673 }
674 }
675 ubifs_scan_destroy(sleb);
676 if (lnum == c->lhead_lnum)
677 break;
678 lnum = ubifs_next_log_lnum(c, lnum);
679 }
680 if (offs) {
681 int sz = ALIGN(offs, c->min_io_size);
682
683 ubifs_pad(c, buf + offs, sz - offs);
684 err = ubifs_leb_change(c, write_lnum, buf, sz);
685 if (err)
686 goto out_free;
687 offs = ALIGN(offs, c->min_io_size);
688 }
689 destroy_done_tree(&done_tree);
690 vfree(buf);
691 if (write_lnum == c->lhead_lnum) {
692 ubifs_err(c, "log is too full");
693 return -EINVAL;
694 }
695
696 lnum = write_lnum;
697 do {
698 lnum = ubifs_next_log_lnum(c, lnum);
699 err = ubifs_leb_unmap(c, lnum);
700 if (err)
701 return err;
702 } while (lnum != c->lhead_lnum);
703 c->lhead_lnum = write_lnum;
704 c->lhead_offs = offs;
705 dbg_rcvry("new log head at %d:%d", c->lhead_lnum, c->lhead_offs);
706 return 0;
707
708out_scan:
709 ubifs_scan_destroy(sleb);
710out_free:
711 destroy_done_tree(&done_tree);
712 vfree(buf);
713 return err;
714}
715
716
717
718
719
720
721
722
723
724static int dbg_check_bud_bytes(struct ubifs_info *c)
725{
726 int i, err = 0;
727 struct ubifs_bud *bud;
728 long long bud_bytes = 0;
729
730 if (!dbg_is_chk_gen(c))
731 return 0;
732
733 spin_lock(&c->buds_lock);
734 for (i = 0; i < c->jhead_cnt; i++)
735 list_for_each_entry(bud, &c->jheads[i].buds_list, list)
736 bud_bytes += c->leb_size - bud->start;
737
738 if (c->bud_bytes != bud_bytes) {
739 ubifs_err(c, "bad bud_bytes %lld, calculated %lld",
740 c->bud_bytes, bud_bytes);
741 err = -EINVAL;
742 }
743 spin_unlock(&c->buds_lock);
744
745 return err;
746}
747