1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/random.h>
26#include "ubifs.h"
27
28
29
30
31
32
33
34
35
36
37static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx,
38 struct ubifs_znode *znode, int lnum, int offs, int len)
39{
40 struct ubifs_znode *zp;
41 u8 hash[UBIFS_HASH_ARR_SZ];
42 int i, err;
43
44
45 idx->ch.node_type = UBIFS_IDX_NODE;
46 idx->child_cnt = cpu_to_le16(znode->child_cnt);
47 idx->level = cpu_to_le16(znode->level);
48 for (i = 0; i < znode->child_cnt; i++) {
49 struct ubifs_branch *br = ubifs_idx_branch(c, idx, i);
50 struct ubifs_zbranch *zbr = &znode->zbranch[i];
51
52 key_write_idx(c, &zbr->key, &br->key);
53 br->lnum = cpu_to_le32(zbr->lnum);
54 br->offs = cpu_to_le32(zbr->offs);
55 br->len = cpu_to_le32(zbr->len);
56 ubifs_copy_hash(c, zbr->hash, ubifs_branch_hash(c, br));
57 if (!zbr->lnum || !zbr->len) {
58 ubifs_err(c, "bad ref in znode");
59 ubifs_dump_znode(c, znode);
60 if (zbr->znode)
61 ubifs_dump_znode(c, zbr->znode);
62
63 return -EINVAL;
64 }
65 }
66 ubifs_prepare_node(c, idx, len, 0);
67 ubifs_node_calc_hash(c, idx, hash);
68
69 znode->lnum = lnum;
70 znode->offs = offs;
71 znode->len = len;
72
73 err = insert_old_idx_znode(c, znode);
74
75
76 zp = znode->parent;
77 if (zp) {
78 struct ubifs_zbranch *zbr;
79
80 zbr = &zp->zbranch[znode->iip];
81 zbr->lnum = lnum;
82 zbr->offs = offs;
83 zbr->len = len;
84 ubifs_copy_hash(c, hash, zbr->hash);
85 } else {
86 c->zroot.lnum = lnum;
87 c->zroot.offs = offs;
88 c->zroot.len = len;
89 ubifs_copy_hash(c, hash, c->zroot.hash);
90 }
91 c->calc_idx_sz += ALIGN(len, 8);
92
93 atomic_long_dec(&c->dirty_zn_cnt);
94
95 ubifs_assert(c, ubifs_zn_dirty(znode));
96 ubifs_assert(c, ubifs_zn_cow(znode));
97
98
99
100
101
102 __clear_bit(DIRTY_ZNODE, &znode->flags);
103 __clear_bit(COW_ZNODE, &znode->flags);
104
105 return err;
106}
107
108
109
110
111
112
113
114
115
116
117
118static int fill_gap(struct ubifs_info *c, int lnum, int gap_start, int gap_end,
119 int *dirt)
120{
121 int len, gap_remains, gap_pos, written, pad_len;
122
123 ubifs_assert(c, (gap_start & 7) == 0);
124 ubifs_assert(c, (gap_end & 7) == 0);
125 ubifs_assert(c, gap_end >= gap_start);
126
127 gap_remains = gap_end - gap_start;
128 if (!gap_remains)
129 return 0;
130 gap_pos = gap_start;
131 written = 0;
132 while (c->enext) {
133 len = ubifs_idx_node_sz(c, c->enext->child_cnt);
134 if (len < gap_remains) {
135 struct ubifs_znode *znode = c->enext;
136 const int alen = ALIGN(len, 8);
137 int err;
138
139 ubifs_assert(c, alen <= gap_remains);
140 err = make_idx_node(c, c->ileb_buf + gap_pos, znode,
141 lnum, gap_pos, len);
142 if (err)
143 return err;
144 gap_remains -= alen;
145 gap_pos += alen;
146 c->enext = znode->cnext;
147 if (c->enext == c->cnext)
148 c->enext = NULL;
149 written += 1;
150 } else
151 break;
152 }
153 if (gap_end == c->leb_size) {
154 c->ileb_len = ALIGN(gap_pos, c->min_io_size);
155
156 pad_len = c->ileb_len - gap_pos;
157 } else
158
159 pad_len = gap_remains;
160 dbg_gc("LEB %d:%d to %d len %d nodes written %d wasted bytes %d",
161 lnum, gap_start, gap_end, gap_end - gap_start, written, pad_len);
162 ubifs_pad(c, c->ileb_buf + gap_pos, pad_len);
163 *dirt += pad_len;
164 return written;
165}
166
167
168
169
170
171
172
173
174
175static int find_old_idx(struct ubifs_info *c, int lnum, int offs)
176{
177 struct ubifs_old_idx *o;
178 struct rb_node *p;
179
180 p = c->old_idx.rb_node;
181 while (p) {
182 o = rb_entry(p, struct ubifs_old_idx, rb);
183 if (lnum < o->lnum)
184 p = p->rb_left;
185 else if (lnum > o->lnum)
186 p = p->rb_right;
187 else if (offs < o->offs)
188 p = p->rb_left;
189 else if (offs > o->offs)
190 p = p->rb_right;
191 else
192 return 1;
193 }
194 return 0;
195}
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210static int is_idx_node_in_use(struct ubifs_info *c, union ubifs_key *key,
211 int level, int lnum, int offs)
212{
213 int ret;
214
215 ret = is_idx_node_in_tnc(c, key, level, lnum, offs);
216 if (ret < 0)
217 return ret;
218 if (ret == 0)
219 if (find_old_idx(c, lnum, offs))
220 return 1;
221 return ret;
222}
223
224
225
226
227
228
229
230
231
232
233
234
235
236static int layout_leb_in_gaps(struct ubifs_info *c, int *p)
237{
238 struct ubifs_scan_leb *sleb;
239 struct ubifs_scan_node *snod;
240 int lnum, dirt = 0, gap_start, gap_end, err, written, tot_written;
241
242 tot_written = 0;
243
244 lnum = ubifs_find_dirty_idx_leb(c);
245 if (lnum < 0)
246
247
248
249
250 return lnum;
251 *p = lnum;
252 dbg_gc("LEB %d", lnum);
253
254
255
256
257
258 sleb = ubifs_scan(c, lnum, 0, c->ileb_buf, 0);
259 c->ileb_len = 0;
260 if (IS_ERR(sleb))
261 return PTR_ERR(sleb);
262 gap_start = 0;
263 list_for_each_entry(snod, &sleb->nodes, list) {
264 struct ubifs_idx_node *idx;
265 int in_use, level;
266
267 ubifs_assert(c, snod->type == UBIFS_IDX_NODE);
268 idx = snod->node;
269 key_read(c, ubifs_idx_key(c, idx), &snod->key);
270 level = le16_to_cpu(idx->level);
271
272 in_use = is_idx_node_in_use(c, &snod->key, level, lnum,
273 snod->offs);
274 if (in_use < 0) {
275 ubifs_scan_destroy(sleb);
276 return in_use;
277 }
278 if (in_use) {
279 if (in_use == 1)
280 dirt += ALIGN(snod->len, 8);
281
282
283
284
285
286
287 gap_end = snod->offs;
288
289 written = fill_gap(c, lnum, gap_start, gap_end, &dirt);
290 if (written < 0) {
291 ubifs_scan_destroy(sleb);
292 return written;
293 }
294 tot_written += written;
295 gap_start = ALIGN(snod->offs + snod->len, 8);
296 }
297 }
298 ubifs_scan_destroy(sleb);
299 c->ileb_len = c->leb_size;
300 gap_end = c->leb_size;
301
302 written = fill_gap(c, lnum, gap_start, gap_end, &dirt);
303 if (written < 0)
304 return written;
305 tot_written += written;
306 if (tot_written == 0) {
307 struct ubifs_lprops lp;
308
309 dbg_gc("LEB %d wrote %d index nodes", lnum, tot_written);
310 err = ubifs_read_one_lp(c, lnum, &lp);
311 if (err)
312 return err;
313 if (lp.free == c->leb_size) {
314
315
316
317
318 err = ubifs_change_one_lp(c, lnum,
319 c->leb_size - c->ileb_len,
320 dirt, 0, 0, 0);
321 if (err)
322 return err;
323 }
324 return 0;
325 }
326 err = ubifs_change_one_lp(c, lnum, c->leb_size - c->ileb_len, dirt,
327 0, 0, 0);
328 if (err)
329 return err;
330 err = ubifs_leb_change(c, lnum, c->ileb_buf, c->ileb_len);
331 if (err)
332 return err;
333 dbg_gc("LEB %d wrote %d index nodes", lnum, tot_written);
334 return tot_written;
335}
336
337
338
339
340
341
342
343
344
345
346static int get_leb_cnt(struct ubifs_info *c, int cnt)
347{
348 int d;
349
350
351 cnt -= (c->leb_size - c->ihead_offs) / c->max_idx_node_sz;
352 if (cnt < 0)
353 cnt = 0;
354 d = c->leb_size / c->max_idx_node_sz;
355 return DIV_ROUND_UP(cnt, d);
356}
357
358
359
360
361
362
363
364
365
366
367
368static int layout_in_gaps(struct ubifs_info *c, int cnt)
369{
370 int err, leb_needed_cnt, written, *p;
371
372 dbg_gc("%d znodes to write", cnt);
373
374 c->gap_lebs = kmalloc_array(c->lst.idx_lebs + 1, sizeof(int),
375 GFP_NOFS);
376 if (!c->gap_lebs)
377 return -ENOMEM;
378
379 p = c->gap_lebs;
380 do {
381 ubifs_assert(c, p < c->gap_lebs + c->lst.idx_lebs);
382 written = layout_leb_in_gaps(c, p);
383 if (written < 0) {
384 err = written;
385 if (err != -ENOSPC) {
386 kfree(c->gap_lebs);
387 c->gap_lebs = NULL;
388 return err;
389 }
390 if (!dbg_is_chk_index(c)) {
391
392
393
394
395 ubifs_warn(c, "out of space");
396 ubifs_dump_budg(c, &c->bi);
397 ubifs_dump_lprops(c);
398 }
399
400 break;
401 }
402 p++;
403 cnt -= written;
404 leb_needed_cnt = get_leb_cnt(c, cnt);
405 dbg_gc("%d znodes remaining, need %d LEBs, have %d", cnt,
406 leb_needed_cnt, c->ileb_cnt);
407 } while (leb_needed_cnt > c->ileb_cnt);
408
409 *p = -1;
410 return 0;
411}
412
413
414
415
416
417
418
419
420
421static int layout_in_empty_space(struct ubifs_info *c)
422{
423 struct ubifs_znode *znode, *cnext, *zp;
424 int lnum, offs, len, next_len, buf_len, buf_offs, used, avail;
425 int wlen, blen, err;
426
427 cnext = c->enext;
428 if (!cnext)
429 return 0;
430
431 lnum = c->ihead_lnum;
432 buf_offs = c->ihead_offs;
433
434 buf_len = ubifs_idx_node_sz(c, c->fanout);
435 buf_len = ALIGN(buf_len, c->min_io_size);
436 used = 0;
437 avail = buf_len;
438
439
440 next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
441 if (buf_offs + next_len > c->leb_size)
442 lnum = -1;
443
444 while (1) {
445 znode = cnext;
446
447 len = ubifs_idx_node_sz(c, znode->child_cnt);
448
449
450 if (lnum == -1) {
451 if (c->ileb_nxt >= c->ileb_cnt) {
452 ubifs_err(c, "out of space");
453 return -ENOSPC;
454 }
455 lnum = c->ilebs[c->ileb_nxt++];
456 buf_offs = 0;
457 used = 0;
458 avail = buf_len;
459 }
460
461 offs = buf_offs + used;
462
463 znode->lnum = lnum;
464 znode->offs = offs;
465 znode->len = len;
466
467
468 zp = znode->parent;
469 if (zp) {
470 struct ubifs_zbranch *zbr;
471 int i;
472
473 i = znode->iip;
474 zbr = &zp->zbranch[i];
475 zbr->lnum = lnum;
476 zbr->offs = offs;
477 zbr->len = len;
478 } else {
479 c->zroot.lnum = lnum;
480 c->zroot.offs = offs;
481 c->zroot.len = len;
482 }
483 c->calc_idx_sz += ALIGN(len, 8);
484
485
486
487
488
489 atomic_long_dec(&c->dirty_zn_cnt);
490
491
492
493
494
495 cnext = znode->cnext;
496 if (cnext == c->cnext)
497 next_len = 0;
498 else
499 next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
500
501
502 wlen = used + len;
503 used += ALIGN(len, 8);
504 avail -= ALIGN(len, 8);
505
506 if (next_len != 0 &&
507 buf_offs + used + next_len <= c->leb_size &&
508 avail > 0)
509 continue;
510
511 if (avail <= 0 && next_len &&
512 buf_offs + used + next_len <= c->leb_size)
513 blen = buf_len;
514 else
515 blen = ALIGN(wlen, c->min_io_size);
516
517
518 buf_offs += blen;
519 if (next_len) {
520 if (buf_offs + next_len > c->leb_size) {
521 err = ubifs_update_one_lp(c, lnum,
522 c->leb_size - buf_offs, blen - used,
523 0, 0);
524 if (err)
525 return err;
526 lnum = -1;
527 }
528 used -= blen;
529 if (used < 0)
530 used = 0;
531 avail = buf_len - used;
532 continue;
533 }
534 err = ubifs_update_one_lp(c, lnum, c->leb_size - buf_offs,
535 blen - used, 0, 0);
536 if (err)
537 return err;
538 break;
539 }
540
541 c->dbg->new_ihead_lnum = lnum;
542 c->dbg->new_ihead_offs = buf_offs;
543
544 return 0;
545}
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560static int layout_commit(struct ubifs_info *c, int no_space, int cnt)
561{
562 int err;
563
564 if (no_space) {
565 err = layout_in_gaps(c, cnt);
566 if (err)
567 return err;
568 }
569 err = layout_in_empty_space(c);
570 return err;
571}
572
573
574
575
576
577static struct ubifs_znode *find_first_dirty(struct ubifs_znode *znode)
578{
579 int i, cont;
580
581 if (!znode)
582 return NULL;
583
584 while (1) {
585 if (znode->level == 0) {
586 if (ubifs_zn_dirty(znode))
587 return znode;
588 return NULL;
589 }
590 cont = 0;
591 for (i = 0; i < znode->child_cnt; i++) {
592 struct ubifs_zbranch *zbr = &znode->zbranch[i];
593
594 if (zbr->znode && ubifs_zn_dirty(zbr->znode)) {
595 znode = zbr->znode;
596 cont = 1;
597 break;
598 }
599 }
600 if (!cont) {
601 if (ubifs_zn_dirty(znode))
602 return znode;
603 return NULL;
604 }
605 }
606}
607
608
609
610
611
612static struct ubifs_znode *find_next_dirty(struct ubifs_znode *znode)
613{
614 int n = znode->iip + 1;
615
616 znode = znode->parent;
617 if (!znode)
618 return NULL;
619 for (; n < znode->child_cnt; n++) {
620 struct ubifs_zbranch *zbr = &znode->zbranch[n];
621
622 if (zbr->znode && ubifs_zn_dirty(zbr->znode))
623 return find_first_dirty(zbr->znode);
624 }
625 return znode;
626}
627
628
629
630
631
632
633
634static int get_znodes_to_commit(struct ubifs_info *c)
635{
636 struct ubifs_znode *znode, *cnext;
637 int cnt = 0;
638
639 c->cnext = find_first_dirty(c->zroot.znode);
640 znode = c->enext = c->cnext;
641 if (!znode) {
642 dbg_cmt("no znodes to commit");
643 return 0;
644 }
645 cnt += 1;
646 while (1) {
647 ubifs_assert(c, !ubifs_zn_cow(znode));
648 __set_bit(COW_ZNODE, &znode->flags);
649 znode->alt = 0;
650 cnext = find_next_dirty(znode);
651 if (!cnext) {
652 znode->cnext = c->cnext;
653 break;
654 }
655 znode->cparent = znode->parent;
656 znode->ciip = znode->iip;
657 znode->cnext = cnext;
658 znode = cnext;
659 cnt += 1;
660 }
661 dbg_cmt("committing %d znodes", cnt);
662 ubifs_assert(c, cnt == atomic_long_read(&c->dirty_zn_cnt));
663 return cnt;
664}
665
666
667
668
669
670
671
672
673
674
675static int alloc_idx_lebs(struct ubifs_info *c, int cnt)
676{
677 int i, leb_cnt, lnum;
678
679 c->ileb_cnt = 0;
680 c->ileb_nxt = 0;
681 leb_cnt = get_leb_cnt(c, cnt);
682 dbg_cmt("need about %d empty LEBS for TNC commit", leb_cnt);
683 if (!leb_cnt)
684 return 0;
685 c->ilebs = kmalloc_array(leb_cnt, sizeof(int), GFP_NOFS);
686 if (!c->ilebs)
687 return -ENOMEM;
688 for (i = 0; i < leb_cnt; i++) {
689 lnum = ubifs_find_free_leb_for_idx(c);
690 if (lnum < 0)
691 return lnum;
692 c->ilebs[c->ileb_cnt++] = lnum;
693 dbg_cmt("LEB %d", lnum);
694 }
695 if (dbg_is_chk_index(c) && !(prandom_u32() & 7))
696 return -ENOSPC;
697 return 0;
698}
699
700
701
702
703
704
705
706
707
708
709static int free_unused_idx_lebs(struct ubifs_info *c)
710{
711 int i, err = 0, lnum, er;
712
713 for (i = c->ileb_nxt; i < c->ileb_cnt; i++) {
714 lnum = c->ilebs[i];
715 dbg_cmt("LEB %d", lnum);
716 er = ubifs_change_one_lp(c, lnum, LPROPS_NC, LPROPS_NC, 0,
717 LPROPS_INDEX | LPROPS_TAKEN, 0);
718 if (!err)
719 err = er;
720 }
721 return err;
722}
723
724
725
726
727
728
729
730static int free_idx_lebs(struct ubifs_info *c)
731{
732 int err;
733
734 err = free_unused_idx_lebs(c);
735 kfree(c->ilebs);
736 c->ilebs = NULL;
737 return err;
738}
739
740
741
742
743
744
745
746
747
748
749
750int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot)
751{
752 int err = 0, cnt;
753
754 mutex_lock(&c->tnc_mutex);
755 err = dbg_check_tnc(c, 1);
756 if (err)
757 goto out;
758 cnt = get_znodes_to_commit(c);
759 if (cnt != 0) {
760 int no_space = 0;
761
762 err = alloc_idx_lebs(c, cnt);
763 if (err == -ENOSPC)
764 no_space = 1;
765 else if (err)
766 goto out_free;
767 err = layout_commit(c, no_space, cnt);
768 if (err)
769 goto out_free;
770 ubifs_assert(c, atomic_long_read(&c->dirty_zn_cnt) == 0);
771 err = free_unused_idx_lebs(c);
772 if (err)
773 goto out;
774 }
775 destroy_old_idx(c);
776 memcpy(zroot, &c->zroot, sizeof(struct ubifs_zbranch));
777
778 err = ubifs_save_dirty_idx_lnums(c);
779 if (err)
780 goto out;
781
782 spin_lock(&c->space_lock);
783
784
785
786
787
788
789
790
791 ubifs_assert(c, c->bi.min_idx_lebs == ubifs_calc_min_idx_lebs(c));
792 c->bi.old_idx_sz = c->calc_idx_sz;
793 c->bi.uncommitted_idx = 0;
794 c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c);
795 spin_unlock(&c->space_lock);
796 mutex_unlock(&c->tnc_mutex);
797
798 dbg_cmt("number of index LEBs %d", c->lst.idx_lebs);
799 dbg_cmt("size of index %llu", c->calc_idx_sz);
800 return err;
801
802out_free:
803 free_idx_lebs(c);
804out:
805 mutex_unlock(&c->tnc_mutex);
806 return err;
807}
808
809
810
811
812
813
814
815
816static int write_index(struct ubifs_info *c)
817{
818 struct ubifs_idx_node *idx;
819 struct ubifs_znode *znode, *cnext;
820 int i, lnum, offs, len, next_len, buf_len, buf_offs, used;
821 int avail, wlen, err, lnum_pos = 0, blen, nxt_offs;
822
823 cnext = c->enext;
824 if (!cnext)
825 return 0;
826
827
828
829
830
831 lnum = c->ihead_lnum;
832 buf_offs = c->ihead_offs;
833
834
835 buf_len = ALIGN(c->max_idx_node_sz, c->min_io_size);
836 used = 0;
837 avail = buf_len;
838
839
840 next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
841 if (buf_offs + next_len > c->leb_size) {
842 err = ubifs_update_one_lp(c, lnum, LPROPS_NC, 0, 0,
843 LPROPS_TAKEN);
844 if (err)
845 return err;
846 lnum = -1;
847 }
848
849 while (1) {
850 u8 hash[UBIFS_HASH_ARR_SZ];
851
852 cond_resched();
853
854 znode = cnext;
855 idx = c->cbuf + used;
856
857
858 idx->ch.node_type = UBIFS_IDX_NODE;
859 idx->child_cnt = cpu_to_le16(znode->child_cnt);
860 idx->level = cpu_to_le16(znode->level);
861 for (i = 0; i < znode->child_cnt; i++) {
862 struct ubifs_branch *br = ubifs_idx_branch(c, idx, i);
863 struct ubifs_zbranch *zbr = &znode->zbranch[i];
864
865 key_write_idx(c, &zbr->key, &br->key);
866 br->lnum = cpu_to_le32(zbr->lnum);
867 br->offs = cpu_to_le32(zbr->offs);
868 br->len = cpu_to_le32(zbr->len);
869 ubifs_copy_hash(c, zbr->hash, ubifs_branch_hash(c, br));
870 if (!zbr->lnum || !zbr->len) {
871 ubifs_err(c, "bad ref in znode");
872 ubifs_dump_znode(c, znode);
873 if (zbr->znode)
874 ubifs_dump_znode(c, zbr->znode);
875
876 return -EINVAL;
877 }
878 }
879 len = ubifs_idx_node_sz(c, znode->child_cnt);
880 ubifs_prepare_node(c, idx, len, 0);
881 ubifs_node_calc_hash(c, idx, hash);
882
883 mutex_lock(&c->tnc_mutex);
884
885 if (znode->cparent)
886 ubifs_copy_hash(c, hash,
887 znode->cparent->zbranch[znode->ciip].hash);
888
889 if (znode->parent) {
890 if (!ubifs_zn_obsolete(znode))
891 ubifs_copy_hash(c, hash,
892 znode->parent->zbranch[znode->iip].hash);
893 } else {
894 ubifs_copy_hash(c, hash, c->zroot.hash);
895 }
896
897 mutex_unlock(&c->tnc_mutex);
898
899
900 if (lnum == -1) {
901 lnum = c->ilebs[lnum_pos++];
902 buf_offs = 0;
903 used = 0;
904 avail = buf_len;
905 }
906 offs = buf_offs + used;
907
908 if (lnum != znode->lnum || offs != znode->offs ||
909 len != znode->len) {
910 ubifs_err(c, "inconsistent znode posn");
911 return -EINVAL;
912 }
913
914
915 cnext = znode->cnext;
916
917 ubifs_assert(c, ubifs_zn_dirty(znode));
918 ubifs_assert(c, ubifs_zn_cow(znode));
919
920
921
922
923
924
925
926
927
928 clear_bit(DIRTY_ZNODE, &znode->flags);
929 smp_mb__before_atomic();
930 clear_bit(COW_ZNODE, &znode->flags);
931 smp_mb__after_atomic();
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958 wlen = used + len;
959 used += ALIGN(len, 8);
960 avail -= ALIGN(len, 8);
961
962
963
964
965
966 if (cnext == c->cnext)
967 next_len = 0;
968 else
969 next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
970
971 nxt_offs = buf_offs + used + next_len;
972 if (next_len && nxt_offs <= c->leb_size) {
973 if (avail > 0)
974 continue;
975 else
976 blen = buf_len;
977 } else {
978 wlen = ALIGN(wlen, 8);
979 blen = ALIGN(wlen, c->min_io_size);
980 ubifs_pad(c, c->cbuf + wlen, blen - wlen);
981 }
982
983
984 err = ubifs_leb_write(c, lnum, c->cbuf, buf_offs, blen);
985 if (err)
986 return err;
987 buf_offs += blen;
988 if (next_len) {
989 if (nxt_offs > c->leb_size) {
990 err = ubifs_update_one_lp(c, lnum, LPROPS_NC, 0,
991 0, LPROPS_TAKEN);
992 if (err)
993 return err;
994 lnum = -1;
995 }
996 used -= blen;
997 if (used < 0)
998 used = 0;
999 avail = buf_len - used;
1000 memmove(c->cbuf, c->cbuf + blen, used);
1001 continue;
1002 }
1003 break;
1004 }
1005
1006 if (lnum != c->dbg->new_ihead_lnum ||
1007 buf_offs != c->dbg->new_ihead_offs) {
1008 ubifs_err(c, "inconsistent ihead");
1009 return -EINVAL;
1010 }
1011
1012 c->ihead_lnum = lnum;
1013 c->ihead_offs = buf_offs;
1014
1015 return 0;
1016}
1017
1018
1019
1020
1021
1022
1023
1024static void free_obsolete_znodes(struct ubifs_info *c)
1025{
1026 struct ubifs_znode *znode, *cnext;
1027
1028 cnext = c->cnext;
1029 do {
1030 znode = cnext;
1031 cnext = znode->cnext;
1032 if (ubifs_zn_obsolete(znode))
1033 kfree(znode);
1034 else {
1035 znode->cnext = NULL;
1036 atomic_long_inc(&c->clean_zn_cnt);
1037 atomic_long_inc(&ubifs_clean_zn_cnt);
1038 }
1039 } while (cnext != c->cnext);
1040}
1041
1042
1043
1044
1045
1046
1047
1048
1049static int return_gap_lebs(struct ubifs_info *c)
1050{
1051 int *p, err;
1052
1053 if (!c->gap_lebs)
1054 return 0;
1055
1056 dbg_cmt("");
1057 for (p = c->gap_lebs; *p != -1; p++) {
1058 err = ubifs_change_one_lp(c, *p, LPROPS_NC, LPROPS_NC, 0,
1059 LPROPS_TAKEN, 0);
1060 if (err)
1061 return err;
1062 }
1063
1064 kfree(c->gap_lebs);
1065 c->gap_lebs = NULL;
1066 return 0;
1067}
1068
1069
1070
1071
1072
1073
1074
1075int ubifs_tnc_end_commit(struct ubifs_info *c)
1076{
1077 int err;
1078
1079 if (!c->cnext)
1080 return 0;
1081
1082 err = return_gap_lebs(c);
1083 if (err)
1084 return err;
1085
1086 err = write_index(c);
1087 if (err)
1088 return err;
1089
1090 mutex_lock(&c->tnc_mutex);
1091
1092 dbg_cmt("TNC height is %d", c->zroot.znode->level + 1);
1093
1094 free_obsolete_znodes(c);
1095
1096 c->cnext = NULL;
1097 kfree(c->ilebs);
1098 c->ilebs = NULL;
1099
1100 mutex_unlock(&c->tnc_mutex);
1101
1102 return 0;
1103}
1104