1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/random.h>
26#include "ubifs.h"
27
28
29
30
31
32
33
34
35
36
37static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx,
38 struct ubifs_znode *znode, int lnum, int offs, int len)
39{
40 struct ubifs_znode *zp;
41 int i, err;
42
43
44 idx->ch.node_type = UBIFS_IDX_NODE;
45 idx->child_cnt = cpu_to_le16(znode->child_cnt);
46 idx->level = cpu_to_le16(znode->level);
47 for (i = 0; i < znode->child_cnt; i++) {
48 struct ubifs_branch *br = ubifs_idx_branch(c, idx, i);
49 struct ubifs_zbranch *zbr = &znode->zbranch[i];
50
51 key_write_idx(c, &zbr->key, &br->key);
52 br->lnum = cpu_to_le32(zbr->lnum);
53 br->offs = cpu_to_le32(zbr->offs);
54 br->len = cpu_to_le32(zbr->len);
55 if (!zbr->lnum || !zbr->len) {
56 ubifs_err(c, "bad ref in znode");
57 ubifs_dump_znode(c, znode);
58 if (zbr->znode)
59 ubifs_dump_znode(c, zbr->znode);
60 }
61 }
62 ubifs_prepare_node(c, idx, len, 0);
63
64 znode->lnum = lnum;
65 znode->offs = offs;
66 znode->len = len;
67
68 err = insert_old_idx_znode(c, znode);
69
70
71 zp = znode->parent;
72 if (zp) {
73 struct ubifs_zbranch *zbr;
74
75 zbr = &zp->zbranch[znode->iip];
76 zbr->lnum = lnum;
77 zbr->offs = offs;
78 zbr->len = len;
79 } else {
80 c->zroot.lnum = lnum;
81 c->zroot.offs = offs;
82 c->zroot.len = len;
83 }
84 c->calc_idx_sz += ALIGN(len, 8);
85
86 atomic_long_dec(&c->dirty_zn_cnt);
87
88 ubifs_assert(ubifs_zn_dirty(znode));
89 ubifs_assert(ubifs_zn_cow(znode));
90
91
92
93
94
95 __clear_bit(DIRTY_ZNODE, &znode->flags);
96 __clear_bit(COW_ZNODE, &znode->flags);
97
98 return err;
99}
100
101
102
103
104
105
106
107
108
109
110
111static int fill_gap(struct ubifs_info *c, int lnum, int gap_start, int gap_end,
112 int *dirt)
113{
114 int len, gap_remains, gap_pos, written, pad_len;
115
116 ubifs_assert((gap_start & 7) == 0);
117 ubifs_assert((gap_end & 7) == 0);
118 ubifs_assert(gap_end >= gap_start);
119
120 gap_remains = gap_end - gap_start;
121 if (!gap_remains)
122 return 0;
123 gap_pos = gap_start;
124 written = 0;
125 while (c->enext) {
126 len = ubifs_idx_node_sz(c, c->enext->child_cnt);
127 if (len < gap_remains) {
128 struct ubifs_znode *znode = c->enext;
129 const int alen = ALIGN(len, 8);
130 int err;
131
132 ubifs_assert(alen <= gap_remains);
133 err = make_idx_node(c, c->ileb_buf + gap_pos, znode,
134 lnum, gap_pos, len);
135 if (err)
136 return err;
137 gap_remains -= alen;
138 gap_pos += alen;
139 c->enext = znode->cnext;
140 if (c->enext == c->cnext)
141 c->enext = NULL;
142 written += 1;
143 } else
144 break;
145 }
146 if (gap_end == c->leb_size) {
147 c->ileb_len = ALIGN(gap_pos, c->min_io_size);
148
149 pad_len = c->ileb_len - gap_pos;
150 } else
151
152 pad_len = gap_remains;
153 dbg_gc("LEB %d:%d to %d len %d nodes written %d wasted bytes %d",
154 lnum, gap_start, gap_end, gap_end - gap_start, written, pad_len);
155 ubifs_pad(c, c->ileb_buf + gap_pos, pad_len);
156 *dirt += pad_len;
157 return written;
158}
159
160
161
162
163
164
165
166
167
168static int find_old_idx(struct ubifs_info *c, int lnum, int offs)
169{
170 struct ubifs_old_idx *o;
171 struct rb_node *p;
172
173 p = c->old_idx.rb_node;
174 while (p) {
175 o = rb_entry(p, struct ubifs_old_idx, rb);
176 if (lnum < o->lnum)
177 p = p->rb_left;
178 else if (lnum > o->lnum)
179 p = p->rb_right;
180 else if (offs < o->offs)
181 p = p->rb_left;
182 else if (offs > o->offs)
183 p = p->rb_right;
184 else
185 return 1;
186 }
187 return 0;
188}
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203static int is_idx_node_in_use(struct ubifs_info *c, union ubifs_key *key,
204 int level, int lnum, int offs)
205{
206 int ret;
207
208 ret = is_idx_node_in_tnc(c, key, level, lnum, offs);
209 if (ret < 0)
210 return ret;
211 if (ret == 0)
212 if (find_old_idx(c, lnum, offs))
213 return 1;
214 return ret;
215}
216
217
218
219
220
221
222
223
224
225
226
227
228
229static int layout_leb_in_gaps(struct ubifs_info *c, int *p)
230{
231 struct ubifs_scan_leb *sleb;
232 struct ubifs_scan_node *snod;
233 int lnum, dirt = 0, gap_start, gap_end, err, written, tot_written;
234
235 tot_written = 0;
236
237 lnum = ubifs_find_dirty_idx_leb(c);
238 if (lnum < 0)
239
240
241
242
243 return lnum;
244 *p = lnum;
245 dbg_gc("LEB %d", lnum);
246
247
248
249
250
251 sleb = ubifs_scan(c, lnum, 0, c->ileb_buf, 0);
252 c->ileb_len = 0;
253 if (IS_ERR(sleb))
254 return PTR_ERR(sleb);
255 gap_start = 0;
256 list_for_each_entry(snod, &sleb->nodes, list) {
257 struct ubifs_idx_node *idx;
258 int in_use, level;
259
260 ubifs_assert(snod->type == UBIFS_IDX_NODE);
261 idx = snod->node;
262 key_read(c, ubifs_idx_key(c, idx), &snod->key);
263 level = le16_to_cpu(idx->level);
264
265 in_use = is_idx_node_in_use(c, &snod->key, level, lnum,
266 snod->offs);
267 if (in_use < 0) {
268 ubifs_scan_destroy(sleb);
269 return in_use;
270 }
271 if (in_use) {
272 if (in_use == 1)
273 dirt += ALIGN(snod->len, 8);
274
275
276
277
278
279
280 gap_end = snod->offs;
281
282 written = fill_gap(c, lnum, gap_start, gap_end, &dirt);
283 if (written < 0) {
284 ubifs_scan_destroy(sleb);
285 return written;
286 }
287 tot_written += written;
288 gap_start = ALIGN(snod->offs + snod->len, 8);
289 }
290 }
291 ubifs_scan_destroy(sleb);
292 c->ileb_len = c->leb_size;
293 gap_end = c->leb_size;
294
295 written = fill_gap(c, lnum, gap_start, gap_end, &dirt);
296 if (written < 0)
297 return written;
298 tot_written += written;
299 if (tot_written == 0) {
300 struct ubifs_lprops lp;
301
302 dbg_gc("LEB %d wrote %d index nodes", lnum, tot_written);
303 err = ubifs_read_one_lp(c, lnum, &lp);
304 if (err)
305 return err;
306 if (lp.free == c->leb_size) {
307
308
309
310
311 err = ubifs_change_one_lp(c, lnum,
312 c->leb_size - c->ileb_len,
313 dirt, 0, 0, 0);
314 if (err)
315 return err;
316 }
317 return 0;
318 }
319 err = ubifs_change_one_lp(c, lnum, c->leb_size - c->ileb_len, dirt,
320 0, 0, 0);
321 if (err)
322 return err;
323 err = ubifs_leb_change(c, lnum, c->ileb_buf, c->ileb_len);
324 if (err)
325 return err;
326 dbg_gc("LEB %d wrote %d index nodes", lnum, tot_written);
327 return tot_written;
328}
329
330
331
332
333
334
335
336
337
338
339static int get_leb_cnt(struct ubifs_info *c, int cnt)
340{
341 int d;
342
343
344 cnt -= (c->leb_size - c->ihead_offs) / c->max_idx_node_sz;
345 if (cnt < 0)
346 cnt = 0;
347 d = c->leb_size / c->max_idx_node_sz;
348 return DIV_ROUND_UP(cnt, d);
349}
350
351
352
353
354
355
356
357
358
359
360
361static int layout_in_gaps(struct ubifs_info *c, int cnt)
362{
363 int err, leb_needed_cnt, written, *p;
364
365 dbg_gc("%d znodes to write", cnt);
366
367 c->gap_lebs = kmalloc(sizeof(int) * (c->lst.idx_lebs + 1), GFP_NOFS);
368 if (!c->gap_lebs)
369 return -ENOMEM;
370
371 p = c->gap_lebs;
372 do {
373 ubifs_assert(p < c->gap_lebs + sizeof(int) * c->lst.idx_lebs);
374 written = layout_leb_in_gaps(c, p);
375 if (written < 0) {
376 err = written;
377 if (err != -ENOSPC) {
378 kfree(c->gap_lebs);
379 c->gap_lebs = NULL;
380 return err;
381 }
382 if (!dbg_is_chk_index(c)) {
383
384
385
386
387 ubifs_warn(c, "out of space");
388 ubifs_dump_budg(c, &c->bi);
389 ubifs_dump_lprops(c);
390 }
391
392 break;
393 }
394 p++;
395 cnt -= written;
396 leb_needed_cnt = get_leb_cnt(c, cnt);
397 dbg_gc("%d znodes remaining, need %d LEBs, have %d", cnt,
398 leb_needed_cnt, c->ileb_cnt);
399 } while (leb_needed_cnt > c->ileb_cnt);
400
401 *p = -1;
402 return 0;
403}
404
405
406
407
408
409
410
411
412
413static int layout_in_empty_space(struct ubifs_info *c)
414{
415 struct ubifs_znode *znode, *cnext, *zp;
416 int lnum, offs, len, next_len, buf_len, buf_offs, used, avail;
417 int wlen, blen, err;
418
419 cnext = c->enext;
420 if (!cnext)
421 return 0;
422
423 lnum = c->ihead_lnum;
424 buf_offs = c->ihead_offs;
425
426 buf_len = ubifs_idx_node_sz(c, c->fanout);
427 buf_len = ALIGN(buf_len, c->min_io_size);
428 used = 0;
429 avail = buf_len;
430
431
432 next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
433 if (buf_offs + next_len > c->leb_size)
434 lnum = -1;
435
436 while (1) {
437 znode = cnext;
438
439 len = ubifs_idx_node_sz(c, znode->child_cnt);
440
441
442 if (lnum == -1) {
443 if (c->ileb_nxt >= c->ileb_cnt) {
444 ubifs_err(c, "out of space");
445 return -ENOSPC;
446 }
447 lnum = c->ilebs[c->ileb_nxt++];
448 buf_offs = 0;
449 used = 0;
450 avail = buf_len;
451 }
452
453 offs = buf_offs + used;
454
455 znode->lnum = lnum;
456 znode->offs = offs;
457 znode->len = len;
458
459
460 zp = znode->parent;
461 if (zp) {
462 struct ubifs_zbranch *zbr;
463 int i;
464
465 i = znode->iip;
466 zbr = &zp->zbranch[i];
467 zbr->lnum = lnum;
468 zbr->offs = offs;
469 zbr->len = len;
470 } else {
471 c->zroot.lnum = lnum;
472 c->zroot.offs = offs;
473 c->zroot.len = len;
474 }
475 c->calc_idx_sz += ALIGN(len, 8);
476
477
478
479
480
481 atomic_long_dec(&c->dirty_zn_cnt);
482
483
484
485
486
487 cnext = znode->cnext;
488 if (cnext == c->cnext)
489 next_len = 0;
490 else
491 next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
492
493
494 wlen = used + len;
495 used += ALIGN(len, 8);
496 avail -= ALIGN(len, 8);
497
498 if (next_len != 0 &&
499 buf_offs + used + next_len <= c->leb_size &&
500 avail > 0)
501 continue;
502
503 if (avail <= 0 && next_len &&
504 buf_offs + used + next_len <= c->leb_size)
505 blen = buf_len;
506 else
507 blen = ALIGN(wlen, c->min_io_size);
508
509
510 buf_offs += blen;
511 if (next_len) {
512 if (buf_offs + next_len > c->leb_size) {
513 err = ubifs_update_one_lp(c, lnum,
514 c->leb_size - buf_offs, blen - used,
515 0, 0);
516 if (err)
517 return err;
518 lnum = -1;
519 }
520 used -= blen;
521 if (used < 0)
522 used = 0;
523 avail = buf_len - used;
524 continue;
525 }
526 err = ubifs_update_one_lp(c, lnum, c->leb_size - buf_offs,
527 blen - used, 0, 0);
528 if (err)
529 return err;
530 break;
531 }
532
533 c->dbg->new_ihead_lnum = lnum;
534 c->dbg->new_ihead_offs = buf_offs;
535
536 return 0;
537}
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552static int layout_commit(struct ubifs_info *c, int no_space, int cnt)
553{
554 int err;
555
556 if (no_space) {
557 err = layout_in_gaps(c, cnt);
558 if (err)
559 return err;
560 }
561 err = layout_in_empty_space(c);
562 return err;
563}
564
565
566
567
568
569static struct ubifs_znode *find_first_dirty(struct ubifs_znode *znode)
570{
571 int i, cont;
572
573 if (!znode)
574 return NULL;
575
576 while (1) {
577 if (znode->level == 0) {
578 if (ubifs_zn_dirty(znode))
579 return znode;
580 return NULL;
581 }
582 cont = 0;
583 for (i = 0; i < znode->child_cnt; i++) {
584 struct ubifs_zbranch *zbr = &znode->zbranch[i];
585
586 if (zbr->znode && ubifs_zn_dirty(zbr->znode)) {
587 znode = zbr->znode;
588 cont = 1;
589 break;
590 }
591 }
592 if (!cont) {
593 if (ubifs_zn_dirty(znode))
594 return znode;
595 return NULL;
596 }
597 }
598}
599
600
601
602
603
604static struct ubifs_znode *find_next_dirty(struct ubifs_znode *znode)
605{
606 int n = znode->iip + 1;
607
608 znode = znode->parent;
609 if (!znode)
610 return NULL;
611 for (; n < znode->child_cnt; n++) {
612 struct ubifs_zbranch *zbr = &znode->zbranch[n];
613
614 if (zbr->znode && ubifs_zn_dirty(zbr->znode))
615 return find_first_dirty(zbr->znode);
616 }
617 return znode;
618}
619
620
621
622
623
624
625
626static int get_znodes_to_commit(struct ubifs_info *c)
627{
628 struct ubifs_znode *znode, *cnext;
629 int cnt = 0;
630
631 c->cnext = find_first_dirty(c->zroot.znode);
632 znode = c->enext = c->cnext;
633 if (!znode) {
634 dbg_cmt("no znodes to commit");
635 return 0;
636 }
637 cnt += 1;
638 while (1) {
639 ubifs_assert(!ubifs_zn_cow(znode));
640 __set_bit(COW_ZNODE, &znode->flags);
641 znode->alt = 0;
642 cnext = find_next_dirty(znode);
643 if (!cnext) {
644 znode->cnext = c->cnext;
645 break;
646 }
647 znode->cnext = cnext;
648 znode = cnext;
649 cnt += 1;
650 }
651 dbg_cmt("committing %d znodes", cnt);
652 ubifs_assert(cnt == atomic_long_read(&c->dirty_zn_cnt));
653 return cnt;
654}
655
656
657
658
659
660
661
662
663
664
665static int alloc_idx_lebs(struct ubifs_info *c, int cnt)
666{
667 int i, leb_cnt, lnum;
668
669 c->ileb_cnt = 0;
670 c->ileb_nxt = 0;
671 leb_cnt = get_leb_cnt(c, cnt);
672 dbg_cmt("need about %d empty LEBS for TNC commit", leb_cnt);
673 if (!leb_cnt)
674 return 0;
675 c->ilebs = kmalloc(leb_cnt * sizeof(int), GFP_NOFS);
676 if (!c->ilebs)
677 return -ENOMEM;
678 for (i = 0; i < leb_cnt; i++) {
679 lnum = ubifs_find_free_leb_for_idx(c);
680 if (lnum < 0)
681 return lnum;
682 c->ilebs[c->ileb_cnt++] = lnum;
683 dbg_cmt("LEB %d", lnum);
684 }
685 if (dbg_is_chk_index(c) && !(prandom_u32() & 7))
686 return -ENOSPC;
687 return 0;
688}
689
690
691
692
693
694
695
696
697
698
699static int free_unused_idx_lebs(struct ubifs_info *c)
700{
701 int i, err = 0, lnum, er;
702
703 for (i = c->ileb_nxt; i < c->ileb_cnt; i++) {
704 lnum = c->ilebs[i];
705 dbg_cmt("LEB %d", lnum);
706 er = ubifs_change_one_lp(c, lnum, LPROPS_NC, LPROPS_NC, 0,
707 LPROPS_INDEX | LPROPS_TAKEN, 0);
708 if (!err)
709 err = er;
710 }
711 return err;
712}
713
714
715
716
717
718
719
720static int free_idx_lebs(struct ubifs_info *c)
721{
722 int err;
723
724 err = free_unused_idx_lebs(c);
725 kfree(c->ilebs);
726 c->ilebs = NULL;
727 return err;
728}
729
730
731
732
733
734
735
736
737
738
739
740int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot)
741{
742 int err = 0, cnt;
743
744 mutex_lock(&c->tnc_mutex);
745 err = dbg_check_tnc(c, 1);
746 if (err)
747 goto out;
748 cnt = get_znodes_to_commit(c);
749 if (cnt != 0) {
750 int no_space = 0;
751
752 err = alloc_idx_lebs(c, cnt);
753 if (err == -ENOSPC)
754 no_space = 1;
755 else if (err)
756 goto out_free;
757 err = layout_commit(c, no_space, cnt);
758 if (err)
759 goto out_free;
760 ubifs_assert(atomic_long_read(&c->dirty_zn_cnt) == 0);
761 err = free_unused_idx_lebs(c);
762 if (err)
763 goto out;
764 }
765 destroy_old_idx(c);
766 memcpy(zroot, &c->zroot, sizeof(struct ubifs_zbranch));
767
768 err = ubifs_save_dirty_idx_lnums(c);
769 if (err)
770 goto out;
771
772 spin_lock(&c->space_lock);
773
774
775
776
777
778
779
780
781 ubifs_assert(c->bi.min_idx_lebs == ubifs_calc_min_idx_lebs(c));
782 c->bi.old_idx_sz = c->calc_idx_sz;
783 c->bi.uncommitted_idx = 0;
784 c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c);
785 spin_unlock(&c->space_lock);
786 mutex_unlock(&c->tnc_mutex);
787
788 dbg_cmt("number of index LEBs %d", c->lst.idx_lebs);
789 dbg_cmt("size of index %llu", c->calc_idx_sz);
790 return err;
791
792out_free:
793 free_idx_lebs(c);
794out:
795 mutex_unlock(&c->tnc_mutex);
796 return err;
797}
798
799
800
801
802
803
804
805
806static int write_index(struct ubifs_info *c)
807{
808 struct ubifs_idx_node *idx;
809 struct ubifs_znode *znode, *cnext;
810 int i, lnum, offs, len, next_len, buf_len, buf_offs, used;
811 int avail, wlen, err, lnum_pos = 0, blen, nxt_offs;
812
813 cnext = c->enext;
814 if (!cnext)
815 return 0;
816
817
818
819
820
821 lnum = c->ihead_lnum;
822 buf_offs = c->ihead_offs;
823
824
825 buf_len = ALIGN(c->max_idx_node_sz, c->min_io_size);
826 used = 0;
827 avail = buf_len;
828
829
830 next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
831 if (buf_offs + next_len > c->leb_size) {
832 err = ubifs_update_one_lp(c, lnum, LPROPS_NC, 0, 0,
833 LPROPS_TAKEN);
834 if (err)
835 return err;
836 lnum = -1;
837 }
838
839 while (1) {
840 cond_resched();
841
842 znode = cnext;
843 idx = c->cbuf + used;
844
845
846 idx->ch.node_type = UBIFS_IDX_NODE;
847 idx->child_cnt = cpu_to_le16(znode->child_cnt);
848 idx->level = cpu_to_le16(znode->level);
849 for (i = 0; i < znode->child_cnt; i++) {
850 struct ubifs_branch *br = ubifs_idx_branch(c, idx, i);
851 struct ubifs_zbranch *zbr = &znode->zbranch[i];
852
853 key_write_idx(c, &zbr->key, &br->key);
854 br->lnum = cpu_to_le32(zbr->lnum);
855 br->offs = cpu_to_le32(zbr->offs);
856 br->len = cpu_to_le32(zbr->len);
857 if (!zbr->lnum || !zbr->len) {
858 ubifs_err(c, "bad ref in znode");
859 ubifs_dump_znode(c, znode);
860 if (zbr->znode)
861 ubifs_dump_znode(c, zbr->znode);
862 }
863 }
864 len = ubifs_idx_node_sz(c, znode->child_cnt);
865 ubifs_prepare_node(c, idx, len, 0);
866
867
868 if (lnum == -1) {
869 lnum = c->ilebs[lnum_pos++];
870 buf_offs = 0;
871 used = 0;
872 avail = buf_len;
873 }
874 offs = buf_offs + used;
875
876 if (lnum != znode->lnum || offs != znode->offs ||
877 len != znode->len) {
878 ubifs_err(c, "inconsistent znode posn");
879 return -EINVAL;
880 }
881
882
883 cnext = znode->cnext;
884
885 ubifs_assert(ubifs_zn_dirty(znode));
886 ubifs_assert(ubifs_zn_cow(znode));
887
888
889
890
891
892
893
894
895
896 clear_bit(DIRTY_ZNODE, &znode->flags);
897 smp_mb__before_atomic();
898 clear_bit(COW_ZNODE, &znode->flags);
899 smp_mb__after_atomic();
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926 wlen = used + len;
927 used += ALIGN(len, 8);
928 avail -= ALIGN(len, 8);
929
930
931
932
933
934 if (cnext == c->cnext)
935 next_len = 0;
936 else
937 next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
938
939 nxt_offs = buf_offs + used + next_len;
940 if (next_len && nxt_offs <= c->leb_size) {
941 if (avail > 0)
942 continue;
943 else
944 blen = buf_len;
945 } else {
946 wlen = ALIGN(wlen, 8);
947 blen = ALIGN(wlen, c->min_io_size);
948 ubifs_pad(c, c->cbuf + wlen, blen - wlen);
949 }
950
951
952 err = ubifs_leb_write(c, lnum, c->cbuf, buf_offs, blen);
953 if (err)
954 return err;
955 buf_offs += blen;
956 if (next_len) {
957 if (nxt_offs > c->leb_size) {
958 err = ubifs_update_one_lp(c, lnum, LPROPS_NC, 0,
959 0, LPROPS_TAKEN);
960 if (err)
961 return err;
962 lnum = -1;
963 }
964 used -= blen;
965 if (used < 0)
966 used = 0;
967 avail = buf_len - used;
968 memmove(c->cbuf, c->cbuf + blen, used);
969 continue;
970 }
971 break;
972 }
973
974 if (lnum != c->dbg->new_ihead_lnum ||
975 buf_offs != c->dbg->new_ihead_offs) {
976 ubifs_err(c, "inconsistent ihead");
977 return -EINVAL;
978 }
979
980 c->ihead_lnum = lnum;
981 c->ihead_offs = buf_offs;
982
983 return 0;
984}
985
986
987
988
989
990
991
992static void free_obsolete_znodes(struct ubifs_info *c)
993{
994 struct ubifs_znode *znode, *cnext;
995
996 cnext = c->cnext;
997 do {
998 znode = cnext;
999 cnext = znode->cnext;
1000 if (ubifs_zn_obsolete(znode))
1001 kfree(znode);
1002 else {
1003 znode->cnext = NULL;
1004 atomic_long_inc(&c->clean_zn_cnt);
1005 atomic_long_inc(&ubifs_clean_zn_cnt);
1006 }
1007 } while (cnext != c->cnext);
1008}
1009
1010
1011
1012
1013
1014
1015
1016
1017static int return_gap_lebs(struct ubifs_info *c)
1018{
1019 int *p, err;
1020
1021 if (!c->gap_lebs)
1022 return 0;
1023
1024 dbg_cmt("");
1025 for (p = c->gap_lebs; *p != -1; p++) {
1026 err = ubifs_change_one_lp(c, *p, LPROPS_NC, LPROPS_NC, 0,
1027 LPROPS_TAKEN, 0);
1028 if (err)
1029 return err;
1030 }
1031
1032 kfree(c->gap_lebs);
1033 c->gap_lebs = NULL;
1034 return 0;
1035}
1036
1037
1038
1039
1040
1041
1042
1043int ubifs_tnc_end_commit(struct ubifs_info *c)
1044{
1045 int err;
1046
1047 if (!c->cnext)
1048 return 0;
1049
1050 err = return_gap_lebs(c);
1051 if (err)
1052 return err;
1053
1054 err = write_index(c);
1055 if (err)
1056 return err;
1057
1058 mutex_lock(&c->tnc_mutex);
1059
1060 dbg_cmt("TNC height is %d", c->zroot.znode->level + 1);
1061
1062 free_obsolete_znodes(c);
1063
1064 c->cnext = NULL;
1065 kfree(c->ilebs);
1066 c->ilebs = NULL;
1067
1068 mutex_unlock(&c->tnc_mutex);
1069
1070 return 0;
1071}
1072