1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifdef __UBOOT__
20#include <linux/err.h>
21#endif
22#include "ubifs.h"
23
24static int dbg_check_bud_bytes(struct ubifs_info *c);
25
26
27
28
29
30
31
32
33
34struct ubifs_bud *ubifs_search_bud(struct ubifs_info *c, int lnum)
35{
36 struct rb_node *p;
37 struct ubifs_bud *bud;
38
39 spin_lock(&c->buds_lock);
40 p = c->buds.rb_node;
41 while (p) {
42 bud = rb_entry(p, struct ubifs_bud, rb);
43 if (lnum < bud->lnum)
44 p = p->rb_left;
45 else if (lnum > bud->lnum)
46 p = p->rb_right;
47 else {
48 spin_unlock(&c->buds_lock);
49 return bud;
50 }
51 }
52 spin_unlock(&c->buds_lock);
53 return NULL;
54}
55
56
57
58
59
60
61
62
63struct ubifs_wbuf *ubifs_get_wbuf(struct ubifs_info *c, int lnum)
64{
65 struct rb_node *p;
66 struct ubifs_bud *bud;
67 int jhead;
68
69 if (!c->jheads)
70 return NULL;
71
72 spin_lock(&c->buds_lock);
73 p = c->buds.rb_node;
74 while (p) {
75 bud = rb_entry(p, struct ubifs_bud, rb);
76 if (lnum < bud->lnum)
77 p = p->rb_left;
78 else if (lnum > bud->lnum)
79 p = p->rb_right;
80 else {
81 jhead = bud->jhead;
82 spin_unlock(&c->buds_lock);
83 return &c->jheads[jhead].wbuf;
84 }
85 }
86 spin_unlock(&c->buds_lock);
87 return NULL;
88}
89
90
91
92
93
94static inline long long empty_log_bytes(const struct ubifs_info *c)
95{
96 long long h, t;
97
98 h = (long long)c->lhead_lnum * c->leb_size + c->lhead_offs;
99 t = (long long)c->ltail_lnum * c->leb_size;
100
101 if (h > t)
102 return c->log_bytes - h + t;
103 else if (h != t)
104 return t - h;
105 else if (c->lhead_lnum != c->ltail_lnum)
106 return 0;
107 else
108 return c->log_bytes;
109}
110
111
112
113
114
115
116void ubifs_add_bud(struct ubifs_info *c, struct ubifs_bud *bud)
117{
118 struct rb_node **p, *parent = NULL;
119 struct ubifs_bud *b;
120 struct ubifs_jhead *jhead;
121
122 spin_lock(&c->buds_lock);
123 p = &c->buds.rb_node;
124 while (*p) {
125 parent = *p;
126 b = rb_entry(parent, struct ubifs_bud, rb);
127 ubifs_assert(bud->lnum != b->lnum);
128 if (bud->lnum < b->lnum)
129 p = &(*p)->rb_left;
130 else
131 p = &(*p)->rb_right;
132 }
133
134 rb_link_node(&bud->rb, parent, p);
135 rb_insert_color(&bud->rb, &c->buds);
136 if (c->jheads) {
137 jhead = &c->jheads[bud->jhead];
138 list_add_tail(&bud->list, &jhead->buds_list);
139 } else
140 ubifs_assert(c->replaying && c->ro_mount);
141
142
143
144
145
146
147
148 c->bud_bytes += c->leb_size - bud->start;
149
150 dbg_log("LEB %d:%d, jhead %s, bud_bytes %lld", bud->lnum,
151 bud->start, dbg_jhead(bud->jhead), c->bud_bytes);
152 spin_unlock(&c->buds_lock);
153}
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs)
169{
170 int err;
171 struct ubifs_bud *bud;
172 struct ubifs_ref_node *ref;
173
174 bud = kmalloc(sizeof(struct ubifs_bud), GFP_NOFS);
175 if (!bud)
176 return -ENOMEM;
177 ref = kzalloc(c->ref_node_alsz, GFP_NOFS);
178 if (!ref) {
179 kfree(bud);
180 return -ENOMEM;
181 }
182
183 mutex_lock(&c->log_mutex);
184 ubifs_assert(!c->ro_media && !c->ro_mount);
185 if (c->ro_error) {
186 err = -EROFS;
187 goto out_unlock;
188 }
189
190
191 if (empty_log_bytes(c) - c->ref_node_alsz < c->min_log_bytes) {
192 dbg_log("not enough log space - %lld, required %d",
193 empty_log_bytes(c), c->min_log_bytes);
194 ubifs_commit_required(c);
195 err = -EAGAIN;
196 goto out_unlock;
197 }
198
199
200
201
202
203
204
205
206
207
208 if (c->bud_bytes + c->leb_size - offs > c->max_bud_bytes) {
209 dbg_log("bud bytes %lld (%lld max), require commit",
210 c->bud_bytes, c->max_bud_bytes);
211 ubifs_commit_required(c);
212 err = -EAGAIN;
213 goto out_unlock;
214 }
215
216
217
218
219
220
221 if (c->bud_bytes >= c->bg_bud_bytes &&
222 c->cmt_state == COMMIT_RESTING) {
223 dbg_log("bud bytes %lld (%lld max), initiate BG commit",
224 c->bud_bytes, c->max_bud_bytes);
225 ubifs_request_bg_commit(c);
226 }
227
228 bud->lnum = lnum;
229 bud->start = offs;
230 bud->jhead = jhead;
231
232 ref->ch.node_type = UBIFS_REF_NODE;
233 ref->lnum = cpu_to_le32(bud->lnum);
234 ref->offs = cpu_to_le32(bud->start);
235 ref->jhead = cpu_to_le32(jhead);
236
237 if (c->lhead_offs > c->leb_size - c->ref_node_alsz) {
238 c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
239 ubifs_assert(c->lhead_lnum != c->ltail_lnum);
240 c->lhead_offs = 0;
241 }
242
243 if (c->lhead_offs == 0) {
244
245 err = ubifs_leb_unmap(c, c->lhead_lnum);
246 if (err)
247 goto out_unlock;
248 }
249
250 if (bud->start == 0) {
251
252
253
254
255
256
257
258 err = ubifs_leb_map(c, bud->lnum);
259 if (err)
260 goto out_unlock;
261 }
262
263 dbg_log("write ref LEB %d:%d",
264 c->lhead_lnum, c->lhead_offs);
265 err = ubifs_write_node(c, ref, UBIFS_REF_NODE_SZ, c->lhead_lnum,
266 c->lhead_offs);
267 if (err)
268 goto out_unlock;
269
270 c->lhead_offs += c->ref_node_alsz;
271
272 ubifs_add_bud(c, bud);
273
274 mutex_unlock(&c->log_mutex);
275 kfree(ref);
276 return 0;
277
278out_unlock:
279 mutex_unlock(&c->log_mutex);
280 kfree(ref);
281 kfree(bud);
282 return err;
283}
284
285
286
287
288
289
290
291
292static void remove_buds(struct ubifs_info *c)
293{
294 struct rb_node *p;
295
296 ubifs_assert(list_empty(&c->old_buds));
297 c->cmt_bud_bytes = 0;
298 spin_lock(&c->buds_lock);
299 p = rb_first(&c->buds);
300 while (p) {
301 struct rb_node *p1 = p;
302 struct ubifs_bud *bud;
303 struct ubifs_wbuf *wbuf;
304
305 p = rb_next(p);
306 bud = rb_entry(p1, struct ubifs_bud, rb);
307 wbuf = &c->jheads[bud->jhead].wbuf;
308
309 if (wbuf->lnum == bud->lnum) {
310
311
312
313
314 c->cmt_bud_bytes += wbuf->offs - bud->start;
315 dbg_log("preserve %d:%d, jhead %s, bud bytes %d, cmt_bud_bytes %lld",
316 bud->lnum, bud->start, dbg_jhead(bud->jhead),
317 wbuf->offs - bud->start, c->cmt_bud_bytes);
318 bud->start = wbuf->offs;
319 } else {
320 c->cmt_bud_bytes += c->leb_size - bud->start;
321 dbg_log("remove %d:%d, jhead %s, bud bytes %d, cmt_bud_bytes %lld",
322 bud->lnum, bud->start, dbg_jhead(bud->jhead),
323 c->leb_size - bud->start, c->cmt_bud_bytes);
324 rb_erase(p1, &c->buds);
325
326
327
328
329
330
331
332 list_move(&bud->list, &c->old_buds);
333 }
334 }
335 spin_unlock(&c->buds_lock);
336}
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum)
352{
353 void *buf;
354 struct ubifs_cs_node *cs;
355 struct ubifs_ref_node *ref;
356 int err, i, max_len, len;
357
358 err = dbg_check_bud_bytes(c);
359 if (err)
360 return err;
361
362 max_len = UBIFS_CS_NODE_SZ + c->jhead_cnt * UBIFS_REF_NODE_SZ;
363 max_len = ALIGN(max_len, c->min_io_size);
364 buf = cs = kmalloc(max_len, GFP_NOFS);
365 if (!buf)
366 return -ENOMEM;
367
368 cs->ch.node_type = UBIFS_CS_NODE;
369 cs->cmt_no = cpu_to_le64(c->cmt_no);
370 ubifs_prepare_node(c, cs, UBIFS_CS_NODE_SZ, 0);
371
372
373
374
375
376
377
378
379 len = UBIFS_CS_NODE_SZ;
380 for (i = 0; i < c->jhead_cnt; i++) {
381 int lnum = c->jheads[i].wbuf.lnum;
382 int offs = c->jheads[i].wbuf.offs;
383
384 if (lnum == -1 || offs == c->leb_size)
385 continue;
386
387 dbg_log("add ref to LEB %d:%d for jhead %s",
388 lnum, offs, dbg_jhead(i));
389 ref = buf + len;
390 ref->ch.node_type = UBIFS_REF_NODE;
391 ref->lnum = cpu_to_le32(lnum);
392 ref->offs = cpu_to_le32(offs);
393 ref->jhead = cpu_to_le32(i);
394
395 ubifs_prepare_node(c, ref, UBIFS_REF_NODE_SZ, 0);
396 len += UBIFS_REF_NODE_SZ;
397 }
398
399 ubifs_pad(c, buf + len, ALIGN(len, c->min_io_size) - len);
400
401
402 if (c->lhead_offs) {
403 c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
404 ubifs_assert(c->lhead_lnum != c->ltail_lnum);
405 c->lhead_offs = 0;
406 }
407
408
409 err = ubifs_leb_unmap(c, c->lhead_lnum);
410 if (err)
411 goto out;
412
413 len = ALIGN(len, c->min_io_size);
414 dbg_log("writing commit start at LEB %d:0, len %d", c->lhead_lnum, len);
415 err = ubifs_leb_write(c, c->lhead_lnum, cs, 0, len);
416 if (err)
417 goto out;
418
419 *ltail_lnum = c->lhead_lnum;
420
421 c->lhead_offs += len;
422 if (c->lhead_offs == c->leb_size) {
423 c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
424 c->lhead_offs = 0;
425 }
426
427 remove_buds(c);
428
429
430
431
432
433 c->min_log_bytes = 0;
434
435out:
436 kfree(buf);
437 return err;
438}
439
440
441
442
443
444
445
446
447
448
449
450int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum)
451{
452 int err;
453
454
455
456
457
458
459 mutex_lock(&c->log_mutex);
460
461 dbg_log("old tail was LEB %d:0, new tail is LEB %d:0",
462 c->ltail_lnum, ltail_lnum);
463
464 c->ltail_lnum = ltail_lnum;
465
466
467
468
469 c->min_log_bytes = c->leb_size;
470
471 spin_lock(&c->buds_lock);
472 c->bud_bytes -= c->cmt_bud_bytes;
473 spin_unlock(&c->buds_lock);
474
475 err = dbg_check_bud_bytes(c);
476 if (err)
477 goto out;
478
479 err = ubifs_write_master(c);
480
481out:
482 mutex_unlock(&c->log_mutex);
483 return err;
484}
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499int ubifs_log_post_commit(struct ubifs_info *c, int old_ltail_lnum)
500{
501 int lnum, err = 0;
502
503 while (!list_empty(&c->old_buds)) {
504 struct ubifs_bud *bud;
505
506 bud = list_entry(c->old_buds.next, struct ubifs_bud, list);
507 err = ubifs_return_leb(c, bud->lnum);
508 if (err)
509 return err;
510 list_del(&bud->list);
511 kfree(bud);
512 }
513 mutex_lock(&c->log_mutex);
514 for (lnum = old_ltail_lnum; lnum != c->ltail_lnum;
515 lnum = ubifs_next_log_lnum(c, lnum)) {
516 dbg_log("unmap log LEB %d", lnum);
517 err = ubifs_leb_unmap(c, lnum);
518 if (err)
519 goto out;
520 }
521out:
522 mutex_unlock(&c->log_mutex);
523 return err;
524}
525
526
527
528
529
530
531struct done_ref {
532 struct rb_node rb;
533 int lnum;
534};
535
536
537
538
539
540
541
542
543
544static int done_already(struct rb_root *done_tree, int lnum)
545{
546 struct rb_node **p = &done_tree->rb_node, *parent = NULL;
547 struct done_ref *dr;
548
549 while (*p) {
550 parent = *p;
551 dr = rb_entry(parent, struct done_ref, rb);
552 if (lnum < dr->lnum)
553 p = &(*p)->rb_left;
554 else if (lnum > dr->lnum)
555 p = &(*p)->rb_right;
556 else
557 return 1;
558 }
559
560 dr = kzalloc(sizeof(struct done_ref), GFP_NOFS);
561 if (!dr)
562 return -ENOMEM;
563
564 dr->lnum = lnum;
565
566 rb_link_node(&dr->rb, parent, p);
567 rb_insert_color(&dr->rb, done_tree);
568
569 return 0;
570}
571
572
573
574
575
576static void destroy_done_tree(struct rb_root *done_tree)
577{
578 struct done_ref *dr, *n;
579
580 rbtree_postorder_for_each_entry_safe(dr, n, done_tree, rb)
581 kfree(dr);
582}
583
584
585
586
587
588
589
590
591
592
593
594static int add_node(struct ubifs_info *c, void *buf, int *lnum, int *offs,
595 void *node)
596{
597 struct ubifs_ch *ch = node;
598 int len = le32_to_cpu(ch->len), remains = c->leb_size - *offs;
599
600 if (len > remains) {
601 int sz = ALIGN(*offs, c->min_io_size), err;
602
603 ubifs_pad(c, buf + *offs, sz - *offs);
604 err = ubifs_leb_change(c, *lnum, buf, sz);
605 if (err)
606 return err;
607 *lnum = ubifs_next_log_lnum(c, *lnum);
608 *offs = 0;
609 }
610 memcpy(buf + *offs, node, len);
611 *offs += ALIGN(len, 8);
612 return 0;
613}
614
615
616
617
618
619
620
621
622
623
624
625int ubifs_consolidate_log(struct ubifs_info *c)
626{
627 struct ubifs_scan_leb *sleb;
628 struct ubifs_scan_node *snod;
629 struct rb_root done_tree = RB_ROOT;
630 int lnum, err, first = 1, write_lnum, offs = 0;
631 void *buf;
632
633 dbg_rcvry("log tail LEB %d, log head LEB %d", c->ltail_lnum,
634 c->lhead_lnum);
635 buf = vmalloc(c->leb_size);
636 if (!buf)
637 return -ENOMEM;
638 lnum = c->ltail_lnum;
639 write_lnum = lnum;
640 while (1) {
641 sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0);
642 if (IS_ERR(sleb)) {
643 err = PTR_ERR(sleb);
644 goto out_free;
645 }
646 list_for_each_entry(snod, &sleb->nodes, list) {
647 switch (snod->type) {
648 case UBIFS_REF_NODE: {
649 struct ubifs_ref_node *ref = snod->node;
650 int ref_lnum = le32_to_cpu(ref->lnum);
651
652 err = done_already(&done_tree, ref_lnum);
653 if (err < 0)
654 goto out_scan;
655 if (err != 1) {
656 err = add_node(c, buf, &write_lnum,
657 &offs, snod->node);
658 if (err)
659 goto out_scan;
660 }
661 break;
662 }
663 case UBIFS_CS_NODE:
664 if (!first)
665 break;
666 err = add_node(c, buf, &write_lnum, &offs,
667 snod->node);
668 if (err)
669 goto out_scan;
670 first = 0;
671 break;
672 }
673 }
674 ubifs_scan_destroy(sleb);
675 if (lnum == c->lhead_lnum)
676 break;
677 lnum = ubifs_next_log_lnum(c, lnum);
678 }
679 if (offs) {
680 int sz = ALIGN(offs, c->min_io_size);
681
682 ubifs_pad(c, buf + offs, sz - offs);
683 err = ubifs_leb_change(c, write_lnum, buf, sz);
684 if (err)
685 goto out_free;
686 offs = ALIGN(offs, c->min_io_size);
687 }
688 destroy_done_tree(&done_tree);
689 vfree(buf);
690 if (write_lnum == c->lhead_lnum) {
691 ubifs_err(c, "log is too full");
692 return -EINVAL;
693 }
694
695 lnum = write_lnum;
696 do {
697 lnum = ubifs_next_log_lnum(c, lnum);
698 err = ubifs_leb_unmap(c, lnum);
699 if (err)
700 return err;
701 } while (lnum != c->lhead_lnum);
702 c->lhead_lnum = write_lnum;
703 c->lhead_offs = offs;
704 dbg_rcvry("new log head at %d:%d", c->lhead_lnum, c->lhead_offs);
705 return 0;
706
707out_scan:
708 ubifs_scan_destroy(sleb);
709out_free:
710 destroy_done_tree(&done_tree);
711 vfree(buf);
712 return err;
713}
714
715
716
717
718
719
720
721
722
723static int dbg_check_bud_bytes(struct ubifs_info *c)
724{
725 int i, err = 0;
726 struct ubifs_bud *bud;
727 long long bud_bytes = 0;
728
729 if (!dbg_is_chk_gen(c))
730 return 0;
731
732 spin_lock(&c->buds_lock);
733 for (i = 0; i < c->jhead_cnt; i++)
734 list_for_each_entry(bud, &c->jheads[i].buds_list, list)
735 bud_bytes += c->leb_size - bud->start;
736
737 if (c->bud_bytes != bud_bytes) {
738 ubifs_err(c, "bad bud_bytes %lld, calculated %lld",
739 c->bud_bytes, bud_bytes);
740 err = -EINVAL;
741 }
742 spin_unlock(&c->buds_lock);
743
744 return err;
745}
746