1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include "ubifs.h"
31
32#ifdef CONFIG_UBIFS_FS_DEBUG
33static int dbg_check_bud_bytes(struct ubifs_info *c);
34#else
35#define dbg_check_bud_bytes(c) 0
36#endif
37
38
39
40
41
42
43
44
45
46struct ubifs_bud *ubifs_search_bud(struct ubifs_info *c, int lnum)
47{
48 struct rb_node *p;
49 struct ubifs_bud *bud;
50
51 spin_lock(&c->buds_lock);
52 p = c->buds.rb_node;
53 while (p) {
54 bud = rb_entry(p, struct ubifs_bud, rb);
55 if (lnum < bud->lnum)
56 p = p->rb_left;
57 else if (lnum > bud->lnum)
58 p = p->rb_right;
59 else {
60 spin_unlock(&c->buds_lock);
61 return bud;
62 }
63 }
64 spin_unlock(&c->buds_lock);
65 return NULL;
66}
67
68
69
70
71
72
73
74
75struct ubifs_wbuf *ubifs_get_wbuf(struct ubifs_info *c, int lnum)
76{
77 struct rb_node *p;
78 struct ubifs_bud *bud;
79 int jhead;
80
81 if (!c->jheads)
82 return NULL;
83
84 spin_lock(&c->buds_lock);
85 p = c->buds.rb_node;
86 while (p) {
87 bud = rb_entry(p, struct ubifs_bud, rb);
88 if (lnum < bud->lnum)
89 p = p->rb_left;
90 else if (lnum > bud->lnum)
91 p = p->rb_right;
92 else {
93 jhead = bud->jhead;
94 spin_unlock(&c->buds_lock);
95 return &c->jheads[jhead].wbuf;
96 }
97 }
98 spin_unlock(&c->buds_lock);
99 return NULL;
100}
101
102
103
104
105
106
107static inline int next_log_lnum(const struct ubifs_info *c, int lnum)
108{
109 lnum += 1;
110 if (lnum > c->log_last)
111 lnum = UBIFS_LOG_LNUM;
112
113 return lnum;
114}
115
116
117
118
119
120static inline long long empty_log_bytes(const struct ubifs_info *c)
121{
122 long long h, t;
123
124 h = (long long)c->lhead_lnum * c->leb_size + c->lhead_offs;
125 t = (long long)c->ltail_lnum * c->leb_size;
126
127 if (h >= t)
128 return c->log_bytes - h + t;
129 else
130 return t - h;
131}
132
133
134
135
136
137
138void ubifs_add_bud(struct ubifs_info *c, struct ubifs_bud *bud)
139{
140 struct rb_node **p, *parent = NULL;
141 struct ubifs_bud *b;
142 struct ubifs_jhead *jhead;
143
144 spin_lock(&c->buds_lock);
145 p = &c->buds.rb_node;
146 while (*p) {
147 parent = *p;
148 b = rb_entry(parent, struct ubifs_bud, rb);
149 ubifs_assert(bud->lnum != b->lnum);
150 if (bud->lnum < b->lnum)
151 p = &(*p)->rb_left;
152 else
153 p = &(*p)->rb_right;
154 }
155
156 rb_link_node(&bud->rb, parent, p);
157 rb_insert_color(&bud->rb, &c->buds);
158 if (c->jheads) {
159 jhead = &c->jheads[bud->jhead];
160 list_add_tail(&bud->list, &jhead->buds_list);
161 } else
162 ubifs_assert(c->replaying && (c->vfs_sb->s_flags & MS_RDONLY));
163
164
165
166
167
168
169
170 c->bud_bytes += c->leb_size - bud->start;
171
172 dbg_log("LEB %d:%d, jhead %s, bud_bytes %lld", bud->lnum,
173 bud->start, dbg_jhead(bud->jhead), c->bud_bytes);
174 spin_unlock(&c->buds_lock);
175}
176
177
178
179
180
181void ubifs_create_buds_lists(struct ubifs_info *c)
182{
183 struct rb_node *p;
184
185 spin_lock(&c->buds_lock);
186 p = rb_first(&c->buds);
187 while (p) {
188 struct ubifs_bud *bud = rb_entry(p, struct ubifs_bud, rb);
189 struct ubifs_jhead *jhead = &c->jheads[bud->jhead];
190
191 list_add_tail(&bud->list, &jhead->buds_list);
192 p = rb_next(p);
193 }
194 spin_unlock(&c->buds_lock);
195}
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs)
211{
212 int err;
213 struct ubifs_bud *bud;
214 struct ubifs_ref_node *ref;
215
216 bud = kmalloc(sizeof(struct ubifs_bud), GFP_NOFS);
217 if (!bud)
218 return -ENOMEM;
219 ref = kzalloc(c->ref_node_alsz, GFP_NOFS);
220 if (!ref) {
221 kfree(bud);
222 return -ENOMEM;
223 }
224
225 mutex_lock(&c->log_mutex);
226
227 if (c->ro_media) {
228 err = -EROFS;
229 goto out_unlock;
230 }
231
232
233 if (empty_log_bytes(c) - c->ref_node_alsz < c->min_log_bytes) {
234 dbg_log("not enough log space - %lld, required %d",
235 empty_log_bytes(c), c->min_log_bytes);
236 ubifs_commit_required(c);
237 err = -EAGAIN;
238 goto out_unlock;
239 }
240
241
242
243
244
245
246
247
248
249
250 if (c->bud_bytes + c->leb_size - offs > c->max_bud_bytes) {
251 dbg_log("bud bytes %lld (%lld max), require commit",
252 c->bud_bytes, c->max_bud_bytes);
253 ubifs_commit_required(c);
254 err = -EAGAIN;
255 goto out_unlock;
256 }
257
258
259
260
261
262
263 if (c->bud_bytes >= c->bg_bud_bytes &&
264 c->cmt_state == COMMIT_RESTING) {
265 dbg_log("bud bytes %lld (%lld max), initiate BG commit",
266 c->bud_bytes, c->max_bud_bytes);
267 ubifs_request_bg_commit(c);
268 }
269
270 bud->lnum = lnum;
271 bud->start = offs;
272 bud->jhead = jhead;
273
274 ref->ch.node_type = UBIFS_REF_NODE;
275 ref->lnum = cpu_to_le32(bud->lnum);
276 ref->offs = cpu_to_le32(bud->start);
277 ref->jhead = cpu_to_le32(jhead);
278
279 if (c->lhead_offs > c->leb_size - c->ref_node_alsz) {
280 c->lhead_lnum = next_log_lnum(c, c->lhead_lnum);
281 c->lhead_offs = 0;
282 }
283
284 if (c->lhead_offs == 0) {
285
286 err = ubifs_leb_unmap(c, c->lhead_lnum);
287 if (err)
288 goto out_unlock;
289 }
290
291 if (bud->start == 0) {
292
293
294
295
296
297
298
299 err = ubi_leb_map(c->ubi, bud->lnum, UBI_SHORTTERM);
300 if (err)
301 goto out_unlock;
302 }
303
304 dbg_log("write ref LEB %d:%d",
305 c->lhead_lnum, c->lhead_offs);
306 err = ubifs_write_node(c, ref, UBIFS_REF_NODE_SZ, c->lhead_lnum,
307 c->lhead_offs, UBI_SHORTTERM);
308 if (err)
309 goto out_unlock;
310
311 c->lhead_offs += c->ref_node_alsz;
312
313 ubifs_add_bud(c, bud);
314
315 mutex_unlock(&c->log_mutex);
316 kfree(ref);
317 return 0;
318
319out_unlock:
320 if (err != -EAGAIN)
321 ubifs_ro_mode(c, err);
322 mutex_unlock(&c->log_mutex);
323 kfree(ref);
324 kfree(bud);
325 return err;
326}
327
328
329
330
331
332
333
334
335static void remove_buds(struct ubifs_info *c)
336{
337 struct rb_node *p;
338
339 ubifs_assert(list_empty(&c->old_buds));
340 c->cmt_bud_bytes = 0;
341 spin_lock(&c->buds_lock);
342 p = rb_first(&c->buds);
343 while (p) {
344 struct rb_node *p1 = p;
345 struct ubifs_bud *bud;
346 struct ubifs_wbuf *wbuf;
347
348 p = rb_next(p);
349 bud = rb_entry(p1, struct ubifs_bud, rb);
350 wbuf = &c->jheads[bud->jhead].wbuf;
351
352 if (wbuf->lnum == bud->lnum) {
353
354
355
356
357 c->cmt_bud_bytes += wbuf->offs - bud->start;
358 dbg_log("preserve %d:%d, jhead %s, bud bytes %d, "
359 "cmt_bud_bytes %lld", bud->lnum, bud->start,
360 dbg_jhead(bud->jhead), wbuf->offs - bud->start,
361 c->cmt_bud_bytes);
362 bud->start = wbuf->offs;
363 } else {
364 c->cmt_bud_bytes += c->leb_size - bud->start;
365 dbg_log("remove %d:%d, jhead %s, bud bytes %d, "
366 "cmt_bud_bytes %lld", bud->lnum, bud->start,
367 dbg_jhead(bud->jhead), c->leb_size - bud->start,
368 c->cmt_bud_bytes);
369 rb_erase(p1, &c->buds);
370
371
372
373
374
375
376
377 list_move(&bud->list, &c->old_buds);
378 }
379 }
380 spin_unlock(&c->buds_lock);
381}
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum)
397{
398 void *buf;
399 struct ubifs_cs_node *cs;
400 struct ubifs_ref_node *ref;
401 int err, i, max_len, len;
402
403 err = dbg_check_bud_bytes(c);
404 if (err)
405 return err;
406
407 max_len = UBIFS_CS_NODE_SZ + c->jhead_cnt * UBIFS_REF_NODE_SZ;
408 max_len = ALIGN(max_len, c->min_io_size);
409 buf = cs = kmalloc(max_len, GFP_NOFS);
410 if (!buf)
411 return -ENOMEM;
412
413 cs->ch.node_type = UBIFS_CS_NODE;
414 cs->cmt_no = cpu_to_le64(c->cmt_no);
415 ubifs_prepare_node(c, cs, UBIFS_CS_NODE_SZ, 0);
416
417
418
419
420
421
422
423
424 len = UBIFS_CS_NODE_SZ;
425 for (i = 0; i < c->jhead_cnt; i++) {
426 int lnum = c->jheads[i].wbuf.lnum;
427 int offs = c->jheads[i].wbuf.offs;
428
429 if (lnum == -1 || offs == c->leb_size)
430 continue;
431
432 dbg_log("add ref to LEB %d:%d for jhead %s",
433 lnum, offs, dbg_jhead(i));
434 ref = buf + len;
435 ref->ch.node_type = UBIFS_REF_NODE;
436 ref->lnum = cpu_to_le32(lnum);
437 ref->offs = cpu_to_le32(offs);
438 ref->jhead = cpu_to_le32(i);
439
440 ubifs_prepare_node(c, ref, UBIFS_REF_NODE_SZ, 0);
441 len += UBIFS_REF_NODE_SZ;
442 }
443
444 ubifs_pad(c, buf + len, ALIGN(len, c->min_io_size) - len);
445
446
447 if (c->lhead_offs) {
448 c->lhead_lnum = next_log_lnum(c, c->lhead_lnum);
449 c->lhead_offs = 0;
450 }
451
452 if (c->lhead_offs == 0) {
453
454 err = ubifs_leb_unmap(c, c->lhead_lnum);
455 if (err)
456 goto out;
457 }
458
459 len = ALIGN(len, c->min_io_size);
460 dbg_log("writing commit start at LEB %d:0, len %d", c->lhead_lnum, len);
461 err = ubifs_leb_write(c, c->lhead_lnum, cs, 0, len, UBI_SHORTTERM);
462 if (err)
463 goto out;
464
465 *ltail_lnum = c->lhead_lnum;
466
467 c->lhead_offs += len;
468 if (c->lhead_offs == c->leb_size) {
469 c->lhead_lnum = next_log_lnum(c, c->lhead_lnum);
470 c->lhead_offs = 0;
471 }
472
473 remove_buds(c);
474
475
476
477
478
479 c->min_log_bytes = 0;
480
481out:
482 kfree(buf);
483 return err;
484}
485
486
487
488
489
490
491
492
493
494
495
496int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum)
497{
498 int err;
499
500
501
502
503
504
505 mutex_lock(&c->log_mutex);
506
507 dbg_log("old tail was LEB %d:0, new tail is LEB %d:0",
508 c->ltail_lnum, ltail_lnum);
509
510 c->ltail_lnum = ltail_lnum;
511
512
513
514
515 c->min_log_bytes = c->leb_size;
516
517 spin_lock(&c->buds_lock);
518 c->bud_bytes -= c->cmt_bud_bytes;
519 spin_unlock(&c->buds_lock);
520
521 err = dbg_check_bud_bytes(c);
522
523 mutex_unlock(&c->log_mutex);
524 return err;
525}
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540int ubifs_log_post_commit(struct ubifs_info *c, int old_ltail_lnum)
541{
542 int lnum, err = 0;
543
544 while (!list_empty(&c->old_buds)) {
545 struct ubifs_bud *bud;
546
547 bud = list_entry(c->old_buds.next, struct ubifs_bud, list);
548 err = ubifs_return_leb(c, bud->lnum);
549 if (err)
550 return err;
551 list_del(&bud->list);
552 kfree(bud);
553 }
554 mutex_lock(&c->log_mutex);
555 for (lnum = old_ltail_lnum; lnum != c->ltail_lnum;
556 lnum = next_log_lnum(c, lnum)) {
557 dbg_log("unmap log LEB %d", lnum);
558 err = ubifs_leb_unmap(c, lnum);
559 if (err)
560 goto out;
561 }
562out:
563 mutex_unlock(&c->log_mutex);
564 return err;
565}
566
567
568
569
570
571
572struct done_ref {
573 struct rb_node rb;
574 int lnum;
575};
576
577
578
579
580
581
582
583
584
585static int done_already(struct rb_root *done_tree, int lnum)
586{
587 struct rb_node **p = &done_tree->rb_node, *parent = NULL;
588 struct done_ref *dr;
589
590 while (*p) {
591 parent = *p;
592 dr = rb_entry(parent, struct done_ref, rb);
593 if (lnum < dr->lnum)
594 p = &(*p)->rb_left;
595 else if (lnum > dr->lnum)
596 p = &(*p)->rb_right;
597 else
598 return 1;
599 }
600
601 dr = kzalloc(sizeof(struct done_ref), GFP_NOFS);
602 if (!dr)
603 return -ENOMEM;
604
605 dr->lnum = lnum;
606
607 rb_link_node(&dr->rb, parent, p);
608 rb_insert_color(&dr->rb, done_tree);
609
610 return 0;
611}
612
613
614
615
616
617static void destroy_done_tree(struct rb_root *done_tree)
618{
619 struct rb_node *this = done_tree->rb_node;
620 struct done_ref *dr;
621
622 while (this) {
623 if (this->rb_left) {
624 this = this->rb_left;
625 continue;
626 } else if (this->rb_right) {
627 this = this->rb_right;
628 continue;
629 }
630 dr = rb_entry(this, struct done_ref, rb);
631 this = rb_parent(this);
632 if (this) {
633 if (this->rb_left == &dr->rb)
634 this->rb_left = NULL;
635 else
636 this->rb_right = NULL;
637 }
638 kfree(dr);
639 }
640}
641
642
643
644
645
646
647
648
649
650
651
652static int add_node(struct ubifs_info *c, void *buf, int *lnum, int *offs,
653 void *node)
654{
655 struct ubifs_ch *ch = node;
656 int len = le32_to_cpu(ch->len), remains = c->leb_size - *offs;
657
658 if (len > remains) {
659 int sz = ALIGN(*offs, c->min_io_size), err;
660
661 ubifs_pad(c, buf + *offs, sz - *offs);
662 err = ubifs_leb_change(c, *lnum, buf, sz, UBI_SHORTTERM);
663 if (err)
664 return err;
665 *lnum = next_log_lnum(c, *lnum);
666 *offs = 0;
667 }
668 memcpy(buf + *offs, node, len);
669 *offs += ALIGN(len, 8);
670 return 0;
671}
672
673
674
675
676
677
678
679
680
681
682
683int ubifs_consolidate_log(struct ubifs_info *c)
684{
685 struct ubifs_scan_leb *sleb;
686 struct ubifs_scan_node *snod;
687 struct rb_root done_tree = RB_ROOT;
688 int lnum, err, first = 1, write_lnum, offs = 0;
689 void *buf;
690
691 dbg_rcvry("log tail LEB %d, log head LEB %d", c->ltail_lnum,
692 c->lhead_lnum);
693 buf = vmalloc(c->leb_size);
694 if (!buf)
695 return -ENOMEM;
696 lnum = c->ltail_lnum;
697 write_lnum = lnum;
698 while (1) {
699 sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0);
700 if (IS_ERR(sleb)) {
701 err = PTR_ERR(sleb);
702 goto out_free;
703 }
704 list_for_each_entry(snod, &sleb->nodes, list) {
705 switch (snod->type) {
706 case UBIFS_REF_NODE: {
707 struct ubifs_ref_node *ref = snod->node;
708 int ref_lnum = le32_to_cpu(ref->lnum);
709
710 err = done_already(&done_tree, ref_lnum);
711 if (err < 0)
712 goto out_scan;
713 if (err != 1) {
714 err = add_node(c, buf, &write_lnum,
715 &offs, snod->node);
716 if (err)
717 goto out_scan;
718 }
719 break;
720 }
721 case UBIFS_CS_NODE:
722 if (!first)
723 break;
724 err = add_node(c, buf, &write_lnum, &offs,
725 snod->node);
726 if (err)
727 goto out_scan;
728 first = 0;
729 break;
730 }
731 }
732 ubifs_scan_destroy(sleb);
733 if (lnum == c->lhead_lnum)
734 break;
735 lnum = next_log_lnum(c, lnum);
736 }
737 if (offs) {
738 int sz = ALIGN(offs, c->min_io_size);
739
740 ubifs_pad(c, buf + offs, sz - offs);
741 err = ubifs_leb_change(c, write_lnum, buf, sz, UBI_SHORTTERM);
742 if (err)
743 goto out_free;
744 offs = ALIGN(offs, c->min_io_size);
745 }
746 destroy_done_tree(&done_tree);
747 vfree(buf);
748 if (write_lnum == c->lhead_lnum) {
749 ubifs_err("log is too full");
750 return -EINVAL;
751 }
752
753 lnum = write_lnum;
754 do {
755 lnum = next_log_lnum(c, lnum);
756 err = ubifs_leb_unmap(c, lnum);
757 if (err)
758 return err;
759 } while (lnum != c->lhead_lnum);
760 c->lhead_lnum = write_lnum;
761 c->lhead_offs = offs;
762 dbg_rcvry("new log head at %d:%d", c->lhead_lnum, c->lhead_offs);
763 return 0;
764
765out_scan:
766 ubifs_scan_destroy(sleb);
767out_free:
768 destroy_done_tree(&done_tree);
769 vfree(buf);
770 return err;
771}
772
773#ifdef CONFIG_UBIFS_FS_DEBUG
774
775
776
777
778
779
780
781
782
783static int dbg_check_bud_bytes(struct ubifs_info *c)
784{
785 int i, err = 0;
786 struct ubifs_bud *bud;
787 long long bud_bytes = 0;
788
789 if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
790 return 0;
791
792 spin_lock(&c->buds_lock);
793 for (i = 0; i < c->jhead_cnt; i++)
794 list_for_each_entry(bud, &c->jheads[i].buds_list, list)
795 bud_bytes += c->leb_size - bud->start;
796
797 if (c->bud_bytes != bud_bytes) {
798 ubifs_err("bad bud_bytes %lld, calculated %lld",
799 c->bud_bytes, bud_bytes);
800 err = -EINVAL;
801 }
802 spin_unlock(&c->buds_lock);
803
804 return err;
805}
806
807#endif
808