1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include "ubifs.h"
31
32#ifdef CONFIG_UBIFS_FS_DEBUG
33static int dbg_check_bud_bytes(struct ubifs_info *c);
34#else
35#define dbg_check_bud_bytes(c) 0
36#endif
37
38
39
40
41
42
43
44
45
46struct ubifs_bud *ubifs_search_bud(struct ubifs_info *c, int lnum)
47{
48 struct rb_node *p;
49 struct ubifs_bud *bud;
50
51 spin_lock(&c->buds_lock);
52 p = c->buds.rb_node;
53 while (p) {
54 bud = rb_entry(p, struct ubifs_bud, rb);
55 if (lnum < bud->lnum)
56 p = p->rb_left;
57 else if (lnum > bud->lnum)
58 p = p->rb_right;
59 else {
60 spin_unlock(&c->buds_lock);
61 return bud;
62 }
63 }
64 spin_unlock(&c->buds_lock);
65 return NULL;
66}
67
68
69
70
71
72
73
74
75struct ubifs_wbuf *ubifs_get_wbuf(struct ubifs_info *c, int lnum)
76{
77 struct rb_node *p;
78 struct ubifs_bud *bud;
79 int jhead;
80
81 if (!c->jheads)
82 return NULL;
83
84 spin_lock(&c->buds_lock);
85 p = c->buds.rb_node;
86 while (p) {
87 bud = rb_entry(p, struct ubifs_bud, rb);
88 if (lnum < bud->lnum)
89 p = p->rb_left;
90 else if (lnum > bud->lnum)
91 p = p->rb_right;
92 else {
93 jhead = bud->jhead;
94 spin_unlock(&c->buds_lock);
95 return &c->jheads[jhead].wbuf;
96 }
97 }
98 spin_unlock(&c->buds_lock);
99 return NULL;
100}
101
102
103
104
105
106static inline long long empty_log_bytes(const struct ubifs_info *c)
107{
108 long long h, t;
109
110 h = (long long)c->lhead_lnum * c->leb_size + c->lhead_offs;
111 t = (long long)c->ltail_lnum * c->leb_size;
112
113 if (h >= t)
114 return c->log_bytes - h + t;
115 else
116 return t - h;
117}
118
119
120
121
122
123
124void ubifs_add_bud(struct ubifs_info *c, struct ubifs_bud *bud)
125{
126 struct rb_node **p, *parent = NULL;
127 struct ubifs_bud *b;
128 struct ubifs_jhead *jhead;
129
130 spin_lock(&c->buds_lock);
131 p = &c->buds.rb_node;
132 while (*p) {
133 parent = *p;
134 b = rb_entry(parent, struct ubifs_bud, rb);
135 ubifs_assert(bud->lnum != b->lnum);
136 if (bud->lnum < b->lnum)
137 p = &(*p)->rb_left;
138 else
139 p = &(*p)->rb_right;
140 }
141
142 rb_link_node(&bud->rb, parent, p);
143 rb_insert_color(&bud->rb, &c->buds);
144 if (c->jheads) {
145 jhead = &c->jheads[bud->jhead];
146 list_add_tail(&bud->list, &jhead->buds_list);
147 } else
148 ubifs_assert(c->replaying && c->ro_mount);
149
150
151
152
153
154
155
156 c->bud_bytes += c->leb_size - bud->start;
157
158 dbg_log("LEB %d:%d, jhead %s, bud_bytes %lld", bud->lnum,
159 bud->start, dbg_jhead(bud->jhead), c->bud_bytes);
160 spin_unlock(&c->buds_lock);
161}
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs)
177{
178 int err;
179 struct ubifs_bud *bud;
180 struct ubifs_ref_node *ref;
181
182 bud = kmalloc(sizeof(struct ubifs_bud), GFP_NOFS);
183 if (!bud)
184 return -ENOMEM;
185 ref = kzalloc(c->ref_node_alsz, GFP_NOFS);
186 if (!ref) {
187 kfree(bud);
188 return -ENOMEM;
189 }
190
191 mutex_lock(&c->log_mutex);
192 ubifs_assert(!c->ro_media && !c->ro_mount);
193 if (c->ro_error) {
194 err = -EROFS;
195 goto out_unlock;
196 }
197
198
199 if (empty_log_bytes(c) - c->ref_node_alsz < c->min_log_bytes) {
200 dbg_log("not enough log space - %lld, required %d",
201 empty_log_bytes(c), c->min_log_bytes);
202 ubifs_commit_required(c);
203 err = -EAGAIN;
204 goto out_unlock;
205 }
206
207
208
209
210
211
212
213
214
215
216 if (c->bud_bytes + c->leb_size - offs > c->max_bud_bytes) {
217 dbg_log("bud bytes %lld (%lld max), require commit",
218 c->bud_bytes, c->max_bud_bytes);
219 ubifs_commit_required(c);
220 err = -EAGAIN;
221 goto out_unlock;
222 }
223
224
225
226
227
228
229 if (c->bud_bytes >= c->bg_bud_bytes &&
230 c->cmt_state == COMMIT_RESTING) {
231 dbg_log("bud bytes %lld (%lld max), initiate BG commit",
232 c->bud_bytes, c->max_bud_bytes);
233 ubifs_request_bg_commit(c);
234 }
235
236 bud->lnum = lnum;
237 bud->start = offs;
238 bud->jhead = jhead;
239
240 ref->ch.node_type = UBIFS_REF_NODE;
241 ref->lnum = cpu_to_le32(bud->lnum);
242 ref->offs = cpu_to_le32(bud->start);
243 ref->jhead = cpu_to_le32(jhead);
244
245 if (c->lhead_offs > c->leb_size - c->ref_node_alsz) {
246 c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
247 c->lhead_offs = 0;
248 }
249
250 if (c->lhead_offs == 0) {
251
252 err = ubifs_leb_unmap(c, c->lhead_lnum);
253 if (err)
254 goto out_unlock;
255 }
256
257 if (bud->start == 0) {
258
259
260
261
262
263
264
265 err = ubifs_leb_map(c, bud->lnum, UBI_SHORTTERM);
266 if (err)
267 goto out_unlock;
268 }
269
270 dbg_log("write ref LEB %d:%d",
271 c->lhead_lnum, c->lhead_offs);
272 err = ubifs_write_node(c, ref, UBIFS_REF_NODE_SZ, c->lhead_lnum,
273 c->lhead_offs, UBI_SHORTTERM);
274 if (err)
275 goto out_unlock;
276
277 c->lhead_offs += c->ref_node_alsz;
278
279 ubifs_add_bud(c, bud);
280
281 mutex_unlock(&c->log_mutex);
282 kfree(ref);
283 return 0;
284
285out_unlock:
286 mutex_unlock(&c->log_mutex);
287 kfree(ref);
288 kfree(bud);
289 return err;
290}
291
292
293
294
295
296
297
298
299static void remove_buds(struct ubifs_info *c)
300{
301 struct rb_node *p;
302
303 ubifs_assert(list_empty(&c->old_buds));
304 c->cmt_bud_bytes = 0;
305 spin_lock(&c->buds_lock);
306 p = rb_first(&c->buds);
307 while (p) {
308 struct rb_node *p1 = p;
309 struct ubifs_bud *bud;
310 struct ubifs_wbuf *wbuf;
311
312 p = rb_next(p);
313 bud = rb_entry(p1, struct ubifs_bud, rb);
314 wbuf = &c->jheads[bud->jhead].wbuf;
315
316 if (wbuf->lnum == bud->lnum) {
317
318
319
320
321 c->cmt_bud_bytes += wbuf->offs - bud->start;
322 dbg_log("preserve %d:%d, jhead %s, bud bytes %d, "
323 "cmt_bud_bytes %lld", bud->lnum, bud->start,
324 dbg_jhead(bud->jhead), wbuf->offs - bud->start,
325 c->cmt_bud_bytes);
326 bud->start = wbuf->offs;
327 } else {
328 c->cmt_bud_bytes += c->leb_size - bud->start;
329 dbg_log("remove %d:%d, jhead %s, bud bytes %d, "
330 "cmt_bud_bytes %lld", bud->lnum, bud->start,
331 dbg_jhead(bud->jhead), c->leb_size - bud->start,
332 c->cmt_bud_bytes);
333 rb_erase(p1, &c->buds);
334
335
336
337
338
339
340
341 list_move(&bud->list, &c->old_buds);
342 }
343 }
344 spin_unlock(&c->buds_lock);
345}
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum)
361{
362 void *buf;
363 struct ubifs_cs_node *cs;
364 struct ubifs_ref_node *ref;
365 int err, i, max_len, len;
366
367 err = dbg_check_bud_bytes(c);
368 if (err)
369 return err;
370
371 max_len = UBIFS_CS_NODE_SZ + c->jhead_cnt * UBIFS_REF_NODE_SZ;
372 max_len = ALIGN(max_len, c->min_io_size);
373 buf = cs = kmalloc(max_len, GFP_NOFS);
374 if (!buf)
375 return -ENOMEM;
376
377 cs->ch.node_type = UBIFS_CS_NODE;
378 cs->cmt_no = cpu_to_le64(c->cmt_no);
379 ubifs_prepare_node(c, cs, UBIFS_CS_NODE_SZ, 0);
380
381
382
383
384
385
386
387
388 len = UBIFS_CS_NODE_SZ;
389 for (i = 0; i < c->jhead_cnt; i++) {
390 int lnum = c->jheads[i].wbuf.lnum;
391 int offs = c->jheads[i].wbuf.offs;
392
393 if (lnum == -1 || offs == c->leb_size)
394 continue;
395
396 dbg_log("add ref to LEB %d:%d for jhead %s",
397 lnum, offs, dbg_jhead(i));
398 ref = buf + len;
399 ref->ch.node_type = UBIFS_REF_NODE;
400 ref->lnum = cpu_to_le32(lnum);
401 ref->offs = cpu_to_le32(offs);
402 ref->jhead = cpu_to_le32(i);
403
404 ubifs_prepare_node(c, ref, UBIFS_REF_NODE_SZ, 0);
405 len += UBIFS_REF_NODE_SZ;
406 }
407
408 ubifs_pad(c, buf + len, ALIGN(len, c->min_io_size) - len);
409
410
411 if (c->lhead_offs) {
412 c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
413 c->lhead_offs = 0;
414 }
415
416 if (c->lhead_offs == 0) {
417
418 err = ubifs_leb_unmap(c, c->lhead_lnum);
419 if (err)
420 goto out;
421 }
422
423 len = ALIGN(len, c->min_io_size);
424 dbg_log("writing commit start at LEB %d:0, len %d", c->lhead_lnum, len);
425 err = ubifs_leb_write(c, c->lhead_lnum, cs, 0, len, UBI_SHORTTERM);
426 if (err)
427 goto out;
428
429 *ltail_lnum = c->lhead_lnum;
430
431 c->lhead_offs += len;
432 if (c->lhead_offs == c->leb_size) {
433 c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
434 c->lhead_offs = 0;
435 }
436
437 remove_buds(c);
438
439
440
441
442
443 c->min_log_bytes = 0;
444
445out:
446 kfree(buf);
447 return err;
448}
449
450
451
452
453
454
455
456
457
458
459
460int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum)
461{
462 int err;
463
464
465
466
467
468
469 mutex_lock(&c->log_mutex);
470
471 dbg_log("old tail was LEB %d:0, new tail is LEB %d:0",
472 c->ltail_lnum, ltail_lnum);
473
474 c->ltail_lnum = ltail_lnum;
475
476
477
478
479 c->min_log_bytes = c->leb_size;
480
481 spin_lock(&c->buds_lock);
482 c->bud_bytes -= c->cmt_bud_bytes;
483 spin_unlock(&c->buds_lock);
484
485 err = dbg_check_bud_bytes(c);
486
487 mutex_unlock(&c->log_mutex);
488 return err;
489}
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504int ubifs_log_post_commit(struct ubifs_info *c, int old_ltail_lnum)
505{
506 int lnum, err = 0;
507
508 while (!list_empty(&c->old_buds)) {
509 struct ubifs_bud *bud;
510
511 bud = list_entry(c->old_buds.next, struct ubifs_bud, list);
512 err = ubifs_return_leb(c, bud->lnum);
513 if (err)
514 return err;
515 list_del(&bud->list);
516 kfree(bud);
517 }
518 mutex_lock(&c->log_mutex);
519 for (lnum = old_ltail_lnum; lnum != c->ltail_lnum;
520 lnum = ubifs_next_log_lnum(c, lnum)) {
521 dbg_log("unmap log LEB %d", lnum);
522 err = ubifs_leb_unmap(c, lnum);
523 if (err)
524 goto out;
525 }
526out:
527 mutex_unlock(&c->log_mutex);
528 return err;
529}
530
531
532
533
534
535
536struct done_ref {
537 struct rb_node rb;
538 int lnum;
539};
540
541
542
543
544
545
546
547
548
549static int done_already(struct rb_root *done_tree, int lnum)
550{
551 struct rb_node **p = &done_tree->rb_node, *parent = NULL;
552 struct done_ref *dr;
553
554 while (*p) {
555 parent = *p;
556 dr = rb_entry(parent, struct done_ref, rb);
557 if (lnum < dr->lnum)
558 p = &(*p)->rb_left;
559 else if (lnum > dr->lnum)
560 p = &(*p)->rb_right;
561 else
562 return 1;
563 }
564
565 dr = kzalloc(sizeof(struct done_ref), GFP_NOFS);
566 if (!dr)
567 return -ENOMEM;
568
569 dr->lnum = lnum;
570
571 rb_link_node(&dr->rb, parent, p);
572 rb_insert_color(&dr->rb, done_tree);
573
574 return 0;
575}
576
577
578
579
580
581static void destroy_done_tree(struct rb_root *done_tree)
582{
583 struct rb_node *this = done_tree->rb_node;
584 struct done_ref *dr;
585
586 while (this) {
587 if (this->rb_left) {
588 this = this->rb_left;
589 continue;
590 } else if (this->rb_right) {
591 this = this->rb_right;
592 continue;
593 }
594 dr = rb_entry(this, struct done_ref, rb);
595 this = rb_parent(this);
596 if (this) {
597 if (this->rb_left == &dr->rb)
598 this->rb_left = NULL;
599 else
600 this->rb_right = NULL;
601 }
602 kfree(dr);
603 }
604}
605
606
607
608
609
610
611
612
613
614
615
616static int add_node(struct ubifs_info *c, void *buf, int *lnum, int *offs,
617 void *node)
618{
619 struct ubifs_ch *ch = node;
620 int len = le32_to_cpu(ch->len), remains = c->leb_size - *offs;
621
622 if (len > remains) {
623 int sz = ALIGN(*offs, c->min_io_size), err;
624
625 ubifs_pad(c, buf + *offs, sz - *offs);
626 err = ubifs_leb_change(c, *lnum, buf, sz, UBI_SHORTTERM);
627 if (err)
628 return err;
629 *lnum = ubifs_next_log_lnum(c, *lnum);
630 *offs = 0;
631 }
632 memcpy(buf + *offs, node, len);
633 *offs += ALIGN(len, 8);
634 return 0;
635}
636
637
638
639
640
641
642
643
644
645
646
647int ubifs_consolidate_log(struct ubifs_info *c)
648{
649 struct ubifs_scan_leb *sleb;
650 struct ubifs_scan_node *snod;
651 struct rb_root done_tree = RB_ROOT;
652 int lnum, err, first = 1, write_lnum, offs = 0;
653 void *buf;
654
655 dbg_rcvry("log tail LEB %d, log head LEB %d", c->ltail_lnum,
656 c->lhead_lnum);
657 buf = vmalloc(c->leb_size);
658 if (!buf)
659 return -ENOMEM;
660 lnum = c->ltail_lnum;
661 write_lnum = lnum;
662 while (1) {
663 sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0);
664 if (IS_ERR(sleb)) {
665 err = PTR_ERR(sleb);
666 goto out_free;
667 }
668 list_for_each_entry(snod, &sleb->nodes, list) {
669 switch (snod->type) {
670 case UBIFS_REF_NODE: {
671 struct ubifs_ref_node *ref = snod->node;
672 int ref_lnum = le32_to_cpu(ref->lnum);
673
674 err = done_already(&done_tree, ref_lnum);
675 if (err < 0)
676 goto out_scan;
677 if (err != 1) {
678 err = add_node(c, buf, &write_lnum,
679 &offs, snod->node);
680 if (err)
681 goto out_scan;
682 }
683 break;
684 }
685 case UBIFS_CS_NODE:
686 if (!first)
687 break;
688 err = add_node(c, buf, &write_lnum, &offs,
689 snod->node);
690 if (err)
691 goto out_scan;
692 first = 0;
693 break;
694 }
695 }
696 ubifs_scan_destroy(sleb);
697 if (lnum == c->lhead_lnum)
698 break;
699 lnum = ubifs_next_log_lnum(c, lnum);
700 }
701 if (offs) {
702 int sz = ALIGN(offs, c->min_io_size);
703
704 ubifs_pad(c, buf + offs, sz - offs);
705 err = ubifs_leb_change(c, write_lnum, buf, sz, UBI_SHORTTERM);
706 if (err)
707 goto out_free;
708 offs = ALIGN(offs, c->min_io_size);
709 }
710 destroy_done_tree(&done_tree);
711 vfree(buf);
712 if (write_lnum == c->lhead_lnum) {
713 ubifs_err("log is too full");
714 return -EINVAL;
715 }
716
717 lnum = write_lnum;
718 do {
719 lnum = ubifs_next_log_lnum(c, lnum);
720 err = ubifs_leb_unmap(c, lnum);
721 if (err)
722 return err;
723 } while (lnum != c->lhead_lnum);
724 c->lhead_lnum = write_lnum;
725 c->lhead_offs = offs;
726 dbg_rcvry("new log head at %d:%d", c->lhead_lnum, c->lhead_offs);
727 return 0;
728
729out_scan:
730 ubifs_scan_destroy(sleb);
731out_free:
732 destroy_done_tree(&done_tree);
733 vfree(buf);
734 return err;
735}
736
737#ifdef CONFIG_UBIFS_FS_DEBUG
738
739
740
741
742
743
744
745
746
747static int dbg_check_bud_bytes(struct ubifs_info *c)
748{
749 int i, err = 0;
750 struct ubifs_bud *bud;
751 long long bud_bytes = 0;
752
753 if (!dbg_is_chk_gen(c))
754 return 0;
755
756 spin_lock(&c->buds_lock);
757 for (i = 0; i < c->jhead_cnt; i++)
758 list_for_each_entry(bud, &c->jheads[i].buds_list, list)
759 bud_bytes += c->leb_size - bud->start;
760
761 if (c->bud_bytes != bud_bytes) {
762 ubifs_err("bad bud_bytes %lld, calculated %lld",
763 c->bud_bytes, bud_bytes);
764 err = -EINVAL;
765 }
766 spin_unlock(&c->buds_lock);
767
768 return err;
769}
770
771#endif
772