1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "ubifs.h"
19
20static int dbg_check_bud_bytes(struct ubifs_info *c);
21
22
23
24
25
26
27
28
29
30struct ubifs_bud *ubifs_search_bud(struct ubifs_info *c, int lnum)
31{
32 struct rb_node *p;
33 struct ubifs_bud *bud;
34
35 spin_lock(&c->buds_lock);
36 p = c->buds.rb_node;
37 while (p) {
38 bud = rb_entry(p, struct ubifs_bud, rb);
39 if (lnum < bud->lnum)
40 p = p->rb_left;
41 else if (lnum > bud->lnum)
42 p = p->rb_right;
43 else {
44 spin_unlock(&c->buds_lock);
45 return bud;
46 }
47 }
48 spin_unlock(&c->buds_lock);
49 return NULL;
50}
51
52
53
54
55
56
57
58
59struct ubifs_wbuf *ubifs_get_wbuf(struct ubifs_info *c, int lnum)
60{
61 struct rb_node *p;
62 struct ubifs_bud *bud;
63 int jhead;
64
65 if (!c->jheads)
66 return NULL;
67
68 spin_lock(&c->buds_lock);
69 p = c->buds.rb_node;
70 while (p) {
71 bud = rb_entry(p, struct ubifs_bud, rb);
72 if (lnum < bud->lnum)
73 p = p->rb_left;
74 else if (lnum > bud->lnum)
75 p = p->rb_right;
76 else {
77 jhead = bud->jhead;
78 spin_unlock(&c->buds_lock);
79 return &c->jheads[jhead].wbuf;
80 }
81 }
82 spin_unlock(&c->buds_lock);
83 return NULL;
84}
85
86
87
88
89
90static inline long long empty_log_bytes(const struct ubifs_info *c)
91{
92 long long h, t;
93
94 h = (long long)c->lhead_lnum * c->leb_size + c->lhead_offs;
95 t = (long long)c->ltail_lnum * c->leb_size;
96
97 if (h > t)
98 return c->log_bytes - h + t;
99 else if (h != t)
100 return t - h;
101 else if (c->lhead_lnum != c->ltail_lnum)
102 return 0;
103 else
104 return c->log_bytes;
105}
106
107
108
109
110
111
112void ubifs_add_bud(struct ubifs_info *c, struct ubifs_bud *bud)
113{
114 struct rb_node **p, *parent = NULL;
115 struct ubifs_bud *b;
116 struct ubifs_jhead *jhead;
117
118 spin_lock(&c->buds_lock);
119 p = &c->buds.rb_node;
120 while (*p) {
121 parent = *p;
122 b = rb_entry(parent, struct ubifs_bud, rb);
123 ubifs_assert(c, bud->lnum != b->lnum);
124 if (bud->lnum < b->lnum)
125 p = &(*p)->rb_left;
126 else
127 p = &(*p)->rb_right;
128 }
129
130 rb_link_node(&bud->rb, parent, p);
131 rb_insert_color(&bud->rb, &c->buds);
132 if (c->jheads) {
133 jhead = &c->jheads[bud->jhead];
134 list_add_tail(&bud->list, &jhead->buds_list);
135 } else
136 ubifs_assert(c, c->replaying && c->ro_mount);
137
138
139
140
141
142
143
144 c->bud_bytes += c->leb_size - bud->start;
145
146 dbg_log("LEB %d:%d, jhead %s, bud_bytes %lld", bud->lnum,
147 bud->start, dbg_jhead(bud->jhead), c->bud_bytes);
148 spin_unlock(&c->buds_lock);
149}
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs)
165{
166 int err;
167 struct ubifs_bud *bud;
168 struct ubifs_ref_node *ref;
169
170 bud = kmalloc(sizeof(struct ubifs_bud), GFP_NOFS);
171 if (!bud)
172 return -ENOMEM;
173 ref = kzalloc(c->ref_node_alsz, GFP_NOFS);
174 if (!ref) {
175 kfree(bud);
176 return -ENOMEM;
177 }
178
179 mutex_lock(&c->log_mutex);
180 ubifs_assert(c, !c->ro_media && !c->ro_mount);
181 if (c->ro_error) {
182 err = -EROFS;
183 goto out_unlock;
184 }
185
186
187 if (empty_log_bytes(c) - c->ref_node_alsz < c->min_log_bytes) {
188 dbg_log("not enough log space - %lld, required %d",
189 empty_log_bytes(c), c->min_log_bytes);
190 ubifs_commit_required(c);
191 err = -EAGAIN;
192 goto out_unlock;
193 }
194
195
196
197
198
199
200
201
202
203
204 if (c->bud_bytes + c->leb_size - offs > c->max_bud_bytes) {
205 dbg_log("bud bytes %lld (%lld max), require commit",
206 c->bud_bytes, c->max_bud_bytes);
207 ubifs_commit_required(c);
208 err = -EAGAIN;
209 goto out_unlock;
210 }
211
212
213
214
215
216
217 if (c->bud_bytes >= c->bg_bud_bytes &&
218 c->cmt_state == COMMIT_RESTING) {
219 dbg_log("bud bytes %lld (%lld max), initiate BG commit",
220 c->bud_bytes, c->max_bud_bytes);
221 ubifs_request_bg_commit(c);
222 }
223
224 bud->lnum = lnum;
225 bud->start = offs;
226 bud->jhead = jhead;
227 bud->log_hash = NULL;
228
229 ref->ch.node_type = UBIFS_REF_NODE;
230 ref->lnum = cpu_to_le32(bud->lnum);
231 ref->offs = cpu_to_le32(bud->start);
232 ref->jhead = cpu_to_le32(jhead);
233
234 if (c->lhead_offs > c->leb_size - c->ref_node_alsz) {
235 c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
236 ubifs_assert(c, c->lhead_lnum != c->ltail_lnum);
237 c->lhead_offs = 0;
238 }
239
240 if (c->lhead_offs == 0) {
241
242 err = ubifs_leb_unmap(c, c->lhead_lnum);
243 if (err)
244 goto out_unlock;
245 }
246
247 if (bud->start == 0) {
248
249
250
251
252
253
254
255 err = ubifs_leb_map(c, bud->lnum);
256 if (err)
257 goto out_unlock;
258 }
259
260 dbg_log("write ref LEB %d:%d",
261 c->lhead_lnum, c->lhead_offs);
262 err = ubifs_write_node(c, ref, UBIFS_REF_NODE_SZ, c->lhead_lnum,
263 c->lhead_offs);
264 if (err)
265 goto out_unlock;
266
267 err = ubifs_shash_update(c, c->log_hash, ref, UBIFS_REF_NODE_SZ);
268 if (err)
269 goto out_unlock;
270
271 err = ubifs_shash_copy_state(c, c->log_hash, c->jheads[jhead].log_hash);
272 if (err)
273 goto out_unlock;
274
275 c->lhead_offs += c->ref_node_alsz;
276
277 ubifs_add_bud(c, bud);
278
279 mutex_unlock(&c->log_mutex);
280 kfree(ref);
281 return 0;
282
283out_unlock:
284 mutex_unlock(&c->log_mutex);
285 kfree(ref);
286 kfree(bud);
287 return err;
288}
289
290
291
292
293
294
295
296
297static void remove_buds(struct ubifs_info *c)
298{
299 struct rb_node *p;
300
301 ubifs_assert(c, list_empty(&c->old_buds));
302 c->cmt_bud_bytes = 0;
303 spin_lock(&c->buds_lock);
304 p = rb_first(&c->buds);
305 while (p) {
306 struct rb_node *p1 = p;
307 struct ubifs_bud *bud;
308 struct ubifs_wbuf *wbuf;
309
310 p = rb_next(p);
311 bud = rb_entry(p1, struct ubifs_bud, rb);
312 wbuf = &c->jheads[bud->jhead].wbuf;
313
314 if (wbuf->lnum == bud->lnum) {
315
316
317
318
319 c->cmt_bud_bytes += wbuf->offs - bud->start;
320 dbg_log("preserve %d:%d, jhead %s, bud bytes %d, cmt_bud_bytes %lld",
321 bud->lnum, bud->start, dbg_jhead(bud->jhead),
322 wbuf->offs - bud->start, c->cmt_bud_bytes);
323 bud->start = wbuf->offs;
324 } else {
325 c->cmt_bud_bytes += c->leb_size - bud->start;
326 dbg_log("remove %d:%d, jhead %s, bud bytes %d, cmt_bud_bytes %lld",
327 bud->lnum, bud->start, dbg_jhead(bud->jhead),
328 c->leb_size - bud->start, c->cmt_bud_bytes);
329 rb_erase(p1, &c->buds);
330
331
332
333
334
335
336
337 list_move(&bud->list, &c->old_buds);
338 }
339 }
340 spin_unlock(&c->buds_lock);
341}
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum)
357{
358 void *buf;
359 struct ubifs_cs_node *cs;
360 struct ubifs_ref_node *ref;
361 int err, i, max_len, len;
362
363 err = dbg_check_bud_bytes(c);
364 if (err)
365 return err;
366
367 max_len = UBIFS_CS_NODE_SZ + c->jhead_cnt * UBIFS_REF_NODE_SZ;
368 max_len = ALIGN(max_len, c->min_io_size);
369 buf = cs = kmalloc(max_len, GFP_NOFS);
370 if (!buf)
371 return -ENOMEM;
372
373 cs->ch.node_type = UBIFS_CS_NODE;
374 cs->cmt_no = cpu_to_le64(c->cmt_no);
375 ubifs_prepare_node(c, cs, UBIFS_CS_NODE_SZ, 0);
376
377 err = ubifs_shash_init(c, c->log_hash);
378 if (err)
379 goto out;
380
381 err = ubifs_shash_update(c, c->log_hash, cs, UBIFS_CS_NODE_SZ);
382 if (err < 0)
383 goto out;
384
385
386
387
388
389
390
391
392 len = UBIFS_CS_NODE_SZ;
393 for (i = 0; i < c->jhead_cnt; i++) {
394 int lnum = c->jheads[i].wbuf.lnum;
395 int offs = c->jheads[i].wbuf.offs;
396
397 if (lnum == -1 || offs == c->leb_size)
398 continue;
399
400 dbg_log("add ref to LEB %d:%d for jhead %s",
401 lnum, offs, dbg_jhead(i));
402 ref = buf + len;
403 ref->ch.node_type = UBIFS_REF_NODE;
404 ref->lnum = cpu_to_le32(lnum);
405 ref->offs = cpu_to_le32(offs);
406 ref->jhead = cpu_to_le32(i);
407
408 ubifs_prepare_node(c, ref, UBIFS_REF_NODE_SZ, 0);
409 len += UBIFS_REF_NODE_SZ;
410
411 err = ubifs_shash_update(c, c->log_hash, ref,
412 UBIFS_REF_NODE_SZ);
413 if (err)
414 goto out;
415 ubifs_shash_copy_state(c, c->log_hash, c->jheads[i].log_hash);
416 }
417
418 ubifs_pad(c, buf + len, ALIGN(len, c->min_io_size) - len);
419
420
421 if (c->lhead_offs) {
422 c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
423 ubifs_assert(c, c->lhead_lnum != c->ltail_lnum);
424 c->lhead_offs = 0;
425 }
426
427
428 err = ubifs_leb_unmap(c, c->lhead_lnum);
429 if (err)
430 goto out;
431
432 len = ALIGN(len, c->min_io_size);
433 dbg_log("writing commit start at LEB %d:0, len %d", c->lhead_lnum, len);
434 err = ubifs_leb_write(c, c->lhead_lnum, cs, 0, len);
435 if (err)
436 goto out;
437
438 *ltail_lnum = c->lhead_lnum;
439
440 c->lhead_offs += len;
441 ubifs_assert(c, c->lhead_offs < c->leb_size);
442
443 remove_buds(c);
444
445
446
447
448
449 c->min_log_bytes = 0;
450
451out:
452 kfree(buf);
453 return err;
454}
455
456
457
458
459
460
461
462
463
464
465
466int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum)
467{
468 int err;
469
470
471
472
473
474
475 mutex_lock(&c->log_mutex);
476
477 dbg_log("old tail was LEB %d:0, new tail is LEB %d:0",
478 c->ltail_lnum, ltail_lnum);
479
480 c->ltail_lnum = ltail_lnum;
481
482
483
484
485 c->min_log_bytes = c->leb_size;
486
487 spin_lock(&c->buds_lock);
488 c->bud_bytes -= c->cmt_bud_bytes;
489 spin_unlock(&c->buds_lock);
490
491 err = dbg_check_bud_bytes(c);
492 if (err)
493 goto out;
494
495 err = ubifs_write_master(c);
496
497out:
498 mutex_unlock(&c->log_mutex);
499 return err;
500}
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515int ubifs_log_post_commit(struct ubifs_info *c, int old_ltail_lnum)
516{
517 int lnum, err = 0;
518
519 while (!list_empty(&c->old_buds)) {
520 struct ubifs_bud *bud;
521
522 bud = list_entry(c->old_buds.next, struct ubifs_bud, list);
523 err = ubifs_return_leb(c, bud->lnum);
524 if (err)
525 return err;
526 list_del(&bud->list);
527 kfree(bud->log_hash);
528 kfree(bud);
529 }
530 mutex_lock(&c->log_mutex);
531 for (lnum = old_ltail_lnum; lnum != c->ltail_lnum;
532 lnum = ubifs_next_log_lnum(c, lnum)) {
533 dbg_log("unmap log LEB %d", lnum);
534 err = ubifs_leb_unmap(c, lnum);
535 if (err)
536 goto out;
537 }
538out:
539 mutex_unlock(&c->log_mutex);
540 return err;
541}
542
543
544
545
546
547
548struct done_ref {
549 struct rb_node rb;
550 int lnum;
551};
552
553
554
555
556
557
558
559
560
561static int done_already(struct rb_root *done_tree, int lnum)
562{
563 struct rb_node **p = &done_tree->rb_node, *parent = NULL;
564 struct done_ref *dr;
565
566 while (*p) {
567 parent = *p;
568 dr = rb_entry(parent, struct done_ref, rb);
569 if (lnum < dr->lnum)
570 p = &(*p)->rb_left;
571 else if (lnum > dr->lnum)
572 p = &(*p)->rb_right;
573 else
574 return 1;
575 }
576
577 dr = kzalloc(sizeof(struct done_ref), GFP_NOFS);
578 if (!dr)
579 return -ENOMEM;
580
581 dr->lnum = lnum;
582
583 rb_link_node(&dr->rb, parent, p);
584 rb_insert_color(&dr->rb, done_tree);
585
586 return 0;
587}
588
589
590
591
592
593static void destroy_done_tree(struct rb_root *done_tree)
594{
595 struct done_ref *dr, *n;
596
597 rbtree_postorder_for_each_entry_safe(dr, n, done_tree, rb)
598 kfree(dr);
599}
600
601
602
603
604
605
606
607
608
609
610
611static int add_node(struct ubifs_info *c, void *buf, int *lnum, int *offs,
612 void *node)
613{
614 struct ubifs_ch *ch = node;
615 int len = le32_to_cpu(ch->len), remains = c->leb_size - *offs;
616
617 if (len > remains) {
618 int sz = ALIGN(*offs, c->min_io_size), err;
619
620 ubifs_pad(c, buf + *offs, sz - *offs);
621 err = ubifs_leb_change(c, *lnum, buf, sz);
622 if (err)
623 return err;
624 *lnum = ubifs_next_log_lnum(c, *lnum);
625 *offs = 0;
626 }
627 memcpy(buf + *offs, node, len);
628 *offs += ALIGN(len, 8);
629 return 0;
630}
631
632
633
634
635
636
637
638
639
640
641
642int ubifs_consolidate_log(struct ubifs_info *c)
643{
644 struct ubifs_scan_leb *sleb;
645 struct ubifs_scan_node *snod;
646 struct rb_root done_tree = RB_ROOT;
647 int lnum, err, first = 1, write_lnum, offs = 0;
648 void *buf;
649
650 dbg_rcvry("log tail LEB %d, log head LEB %d", c->ltail_lnum,
651 c->lhead_lnum);
652 buf = vmalloc(c->leb_size);
653 if (!buf)
654 return -ENOMEM;
655 lnum = c->ltail_lnum;
656 write_lnum = lnum;
657 while (1) {
658 sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0);
659 if (IS_ERR(sleb)) {
660 err = PTR_ERR(sleb);
661 goto out_free;
662 }
663 list_for_each_entry(snod, &sleb->nodes, list) {
664 switch (snod->type) {
665 case UBIFS_REF_NODE: {
666 struct ubifs_ref_node *ref = snod->node;
667 int ref_lnum = le32_to_cpu(ref->lnum);
668
669 err = done_already(&done_tree, ref_lnum);
670 if (err < 0)
671 goto out_scan;
672 if (err != 1) {
673 err = add_node(c, buf, &write_lnum,
674 &offs, snod->node);
675 if (err)
676 goto out_scan;
677 }
678 break;
679 }
680 case UBIFS_CS_NODE:
681 if (!first)
682 break;
683 err = add_node(c, buf, &write_lnum, &offs,
684 snod->node);
685 if (err)
686 goto out_scan;
687 first = 0;
688 break;
689 }
690 }
691 ubifs_scan_destroy(sleb);
692 if (lnum == c->lhead_lnum)
693 break;
694 lnum = ubifs_next_log_lnum(c, lnum);
695 }
696 if (offs) {
697 int sz = ALIGN(offs, c->min_io_size);
698
699 ubifs_pad(c, buf + offs, sz - offs);
700 err = ubifs_leb_change(c, write_lnum, buf, sz);
701 if (err)
702 goto out_free;
703 offs = ALIGN(offs, c->min_io_size);
704 }
705 destroy_done_tree(&done_tree);
706 vfree(buf);
707 if (write_lnum == c->lhead_lnum) {
708 ubifs_err(c, "log is too full");
709 return -EINVAL;
710 }
711
712 lnum = write_lnum;
713 do {
714 lnum = ubifs_next_log_lnum(c, lnum);
715 err = ubifs_leb_unmap(c, lnum);
716 if (err)
717 return err;
718 } while (lnum != c->lhead_lnum);
719 c->lhead_lnum = write_lnum;
720 c->lhead_offs = offs;
721 dbg_rcvry("new log head at %d:%d", c->lhead_lnum, c->lhead_offs);
722 return 0;
723
724out_scan:
725 ubifs_scan_destroy(sleb);
726out_free:
727 destroy_done_tree(&done_tree);
728 vfree(buf);
729 return err;
730}
731
732
733
734
735
736
737
738
739
740static int dbg_check_bud_bytes(struct ubifs_info *c)
741{
742 int i, err = 0;
743 struct ubifs_bud *bud;
744 long long bud_bytes = 0;
745
746 if (!dbg_is_chk_gen(c))
747 return 0;
748
749 spin_lock(&c->buds_lock);
750 for (i = 0; i < c->jhead_cnt; i++)
751 list_for_each_entry(bud, &c->jheads[i].buds_list, list)
752 bud_bytes += c->leb_size - bud->start;
753
754 if (c->bud_bytes != bud_bytes) {
755 ubifs_err(c, "bad bud_bytes %lld, calculated %lld",
756 c->bud_bytes, bud_bytes);
757 err = -EINVAL;
758 }
759 spin_unlock(&c->buds_lock);
760
761 return err;
762}
763