1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/crc32.h>
34#include <linux/slab.h>
35#include "ubifs.h"
36
37
38
39
40
41
42
43
44
45
46
47enum {
48 NAME_LESS = 0,
49 NAME_MATCHES = 1,
50 NAME_GREATER = 2,
51 NOT_ON_MEDIA = 3,
52};
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77static int insert_old_idx(struct ubifs_info *c, int lnum, int offs)
78{
79 struct ubifs_old_idx *old_idx, *o;
80 struct rb_node **p, *parent = NULL;
81
82 old_idx = kmalloc(sizeof(struct ubifs_old_idx), GFP_NOFS);
83 if (unlikely(!old_idx))
84 return -ENOMEM;
85 old_idx->lnum = lnum;
86 old_idx->offs = offs;
87
88 p = &c->old_idx.rb_node;
89 while (*p) {
90 parent = *p;
91 o = rb_entry(parent, struct ubifs_old_idx, rb);
92 if (lnum < o->lnum)
93 p = &(*p)->rb_left;
94 else if (lnum > o->lnum)
95 p = &(*p)->rb_right;
96 else if (offs < o->offs)
97 p = &(*p)->rb_left;
98 else if (offs > o->offs)
99 p = &(*p)->rb_right;
100 else {
101 ubifs_err("old idx added twice!");
102 kfree(old_idx);
103 return 0;
104 }
105 }
106 rb_link_node(&old_idx->rb, parent, p);
107 rb_insert_color(&old_idx->rb, &c->old_idx);
108 return 0;
109}
110
111
112
113
114
115
116
117
118int insert_old_idx_znode(struct ubifs_info *c, struct ubifs_znode *znode)
119{
120 if (znode->parent) {
121 struct ubifs_zbranch *zbr;
122
123 zbr = &znode->parent->zbranch[znode->iip];
124 if (zbr->len)
125 return insert_old_idx(c, zbr->lnum, zbr->offs);
126 } else
127 if (c->zroot.len)
128 return insert_old_idx(c, c->zroot.lnum,
129 c->zroot.offs);
130 return 0;
131}
132
133
134
135
136
137
138
139
140static int ins_clr_old_idx_znode(struct ubifs_info *c,
141 struct ubifs_znode *znode)
142{
143 int err;
144
145 if (znode->parent) {
146 struct ubifs_zbranch *zbr;
147
148 zbr = &znode->parent->zbranch[znode->iip];
149 if (zbr->len) {
150 err = insert_old_idx(c, zbr->lnum, zbr->offs);
151 if (err)
152 return err;
153 zbr->lnum = 0;
154 zbr->offs = 0;
155 zbr->len = 0;
156 }
157 } else
158 if (c->zroot.len) {
159 err = insert_old_idx(c, c->zroot.lnum, c->zroot.offs);
160 if (err)
161 return err;
162 c->zroot.lnum = 0;
163 c->zroot.offs = 0;
164 c->zroot.len = 0;
165 }
166 return 0;
167}
168
169
170
171
172
173
174
175
176
177
178
179void destroy_old_idx(struct ubifs_info *c)
180{
181 struct rb_node *this = c->old_idx.rb_node;
182 struct ubifs_old_idx *old_idx;
183
184 while (this) {
185 if (this->rb_left) {
186 this = this->rb_left;
187 continue;
188 } else if (this->rb_right) {
189 this = this->rb_right;
190 continue;
191 }
192 old_idx = rb_entry(this, struct ubifs_old_idx, rb);
193 this = rb_parent(this);
194 if (this) {
195 if (this->rb_left == &old_idx->rb)
196 this->rb_left = NULL;
197 else
198 this->rb_right = NULL;
199 }
200 kfree(old_idx);
201 }
202 c->old_idx = RB_ROOT;
203}
204
205
206
207
208
209
210
211
212static struct ubifs_znode *copy_znode(struct ubifs_info *c,
213 struct ubifs_znode *znode)
214{
215 struct ubifs_znode *zn;
216
217 zn = kmalloc(c->max_znode_sz, GFP_NOFS);
218 if (unlikely(!zn))
219 return ERR_PTR(-ENOMEM);
220
221 memcpy(zn, znode, c->max_znode_sz);
222 zn->cnext = NULL;
223 __set_bit(DIRTY_ZNODE, &zn->flags);
224 __clear_bit(COW_ZNODE, &zn->flags);
225
226 ubifs_assert(!test_bit(OBSOLETE_ZNODE, &znode->flags));
227 __set_bit(OBSOLETE_ZNODE, &znode->flags);
228
229 if (znode->level != 0) {
230 int i;
231 const int n = zn->child_cnt;
232
233
234 for (i = 0; i < n; i++) {
235 struct ubifs_zbranch *zbr = &zn->zbranch[i];
236
237 if (zbr->znode)
238 zbr->znode->parent = zn;
239 }
240 }
241
242 atomic_long_inc(&c->dirty_zn_cnt);
243 return zn;
244}
245
246
247
248
249
250
251
252
253
254static int add_idx_dirt(struct ubifs_info *c, int lnum, int dirt)
255{
256 c->calc_idx_sz -= ALIGN(dirt, 8);
257 return ubifs_add_dirt(c, lnum, dirt);
258}
259
260
261
262
263
264
265
266
267static struct ubifs_znode *dirty_cow_znode(struct ubifs_info *c,
268 struct ubifs_zbranch *zbr)
269{
270 struct ubifs_znode *znode = zbr->znode;
271 struct ubifs_znode *zn;
272 int err;
273
274 if (!test_bit(COW_ZNODE, &znode->flags)) {
275
276 if (!test_and_set_bit(DIRTY_ZNODE, &znode->flags)) {
277 atomic_long_inc(&c->dirty_zn_cnt);
278 atomic_long_dec(&c->clean_zn_cnt);
279 atomic_long_dec(&ubifs_clean_zn_cnt);
280 err = add_idx_dirt(c, zbr->lnum, zbr->len);
281 if (unlikely(err))
282 return ERR_PTR(err);
283 }
284 return znode;
285 }
286
287 zn = copy_znode(c, znode);
288 if (IS_ERR(zn))
289 return zn;
290
291 if (zbr->len) {
292 err = insert_old_idx(c, zbr->lnum, zbr->offs);
293 if (unlikely(err))
294 return ERR_PTR(err);
295 err = add_idx_dirt(c, zbr->lnum, zbr->len);
296 } else
297 err = 0;
298
299 zbr->znode = zn;
300 zbr->lnum = 0;
301 zbr->offs = 0;
302 zbr->len = 0;
303
304 if (unlikely(err))
305 return ERR_PTR(err);
306 return zn;
307}
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329static int lnc_add(struct ubifs_info *c, struct ubifs_zbranch *zbr,
330 const void *node)
331{
332 int err;
333 void *lnc_node;
334 const struct ubifs_dent_node *dent = node;
335
336 ubifs_assert(!zbr->leaf);
337 ubifs_assert(zbr->len != 0);
338 ubifs_assert(is_hash_key(c, &zbr->key));
339
340 err = ubifs_validate_entry(c, dent);
341 if (err) {
342 dbg_dump_stack();
343 dbg_dump_node(c, dent);
344 return err;
345 }
346
347 lnc_node = kmalloc(zbr->len, GFP_NOFS);
348 if (!lnc_node)
349
350 return 0;
351
352 memcpy(lnc_node, node, zbr->len);
353 zbr->leaf = lnc_node;
354 return 0;
355}
356
357
358
359
360
361
362
363
364
365
366static int lnc_add_directly(struct ubifs_info *c, struct ubifs_zbranch *zbr,
367 void *node)
368{
369 int err;
370
371 ubifs_assert(!zbr->leaf);
372 ubifs_assert(zbr->len != 0);
373
374 err = ubifs_validate_entry(c, node);
375 if (err) {
376 dbg_dump_stack();
377 dbg_dump_node(c, node);
378 return err;
379 }
380
381 zbr->leaf = node;
382 return 0;
383}
384
385
386
387
388
389
390static void lnc_free(struct ubifs_zbranch *zbr)
391{
392 if (!zbr->leaf)
393 return;
394 kfree(zbr->leaf);
395 zbr->leaf = NULL;
396}
397
398
399
400
401
402
403
404
405
406
407
408
409static int tnc_read_node_nm(struct ubifs_info *c, struct ubifs_zbranch *zbr,
410 void *node)
411{
412 int err;
413
414 ubifs_assert(is_hash_key(c, &zbr->key));
415
416 if (zbr->leaf) {
417
418 ubifs_assert(zbr->len != 0);
419 memcpy(node, zbr->leaf, zbr->len);
420 return 0;
421 }
422
423 err = ubifs_tnc_read_node(c, zbr, node);
424 if (err)
425 return err;
426
427
428 err = lnc_add(c, zbr, node);
429 return err;
430}
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456static int try_read_node(const struct ubifs_info *c, void *buf, int type,
457 int len, int lnum, int offs)
458{
459 int err, node_len;
460 struct ubifs_ch *ch = buf;
461 uint32_t crc, node_crc;
462
463 dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len);
464
465 err = ubi_read(c->ubi, lnum, buf, offs, len);
466 if (err) {
467 ubifs_err("cannot read node type %d from LEB %d:%d, error %d",
468 type, lnum, offs, err);
469 return err;
470 }
471
472 if (le32_to_cpu(ch->magic) != UBIFS_NODE_MAGIC)
473 return 0;
474
475 if (ch->node_type != type)
476 return 0;
477
478 node_len = le32_to_cpu(ch->len);
479 if (node_len != len)
480 return 0;
481
482 if (type == UBIFS_DATA_NODE && c->no_chk_data_crc && !c->mounting &&
483 !c->remounting_rw)
484 return 1;
485
486 crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8);
487 node_crc = le32_to_cpu(ch->crc);
488 if (crc != node_crc)
489 return 0;
490
491 return 1;
492}
493
494
495
496
497
498
499
500
501
502
503
504static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
505 struct ubifs_zbranch *zbr, void *node)
506{
507 int ret;
508
509 dbg_tnc("LEB %d:%d, key %s", zbr->lnum, zbr->offs, DBGKEY(key));
510
511 ret = try_read_node(c, node, key_type(c, key), zbr->len, zbr->lnum,
512 zbr->offs);
513 if (ret == 1) {
514 union ubifs_key node_key;
515 struct ubifs_dent_node *dent = node;
516
517
518 key_read(c, &dent->key, &node_key);
519 if (keys_cmp(c, key, &node_key) != 0)
520 ret = 0;
521 }
522 if (ret == 0 && c->replaying)
523 dbg_mnt("dangling branch LEB %d:%d len %d, key %s",
524 zbr->lnum, zbr->offs, zbr->len, DBGKEY(key));
525 return ret;
526}
527
528
529
530
531
532
533
534
535
536
537
538
539static int matches_name(struct ubifs_info *c, struct ubifs_zbranch *zbr,
540 const struct qstr *nm)
541{
542 struct ubifs_dent_node *dent;
543 int nlen, err;
544
545
546 if (!zbr->leaf) {
547 dent = kmalloc(zbr->len, GFP_NOFS);
548 if (!dent)
549 return -ENOMEM;
550
551 err = ubifs_tnc_read_node(c, zbr, dent);
552 if (err)
553 goto out_free;
554
555
556 err = lnc_add_directly(c, zbr, dent);
557 if (err)
558 goto out_free;
559 } else
560 dent = zbr->leaf;
561
562 nlen = le16_to_cpu(dent->nlen);
563 err = memcmp(dent->name, nm->name, min_t(int, nlen, nm->len));
564 if (err == 0) {
565 if (nlen == nm->len)
566 return NAME_MATCHES;
567 else if (nlen < nm->len)
568 return NAME_LESS;
569 else
570 return NAME_GREATER;
571 } else if (err < 0)
572 return NAME_LESS;
573 else
574 return NAME_GREATER;
575
576out_free:
577 kfree(dent);
578 return err;
579}
580
581
582
583
584
585
586
587
588
589static struct ubifs_znode *get_znode(struct ubifs_info *c,
590 struct ubifs_znode *znode, int n)
591{
592 struct ubifs_zbranch *zbr;
593
594 zbr = &znode->zbranch[n];
595 if (zbr->znode)
596 znode = zbr->znode;
597 else
598 znode = ubifs_load_znode(c, zbr, znode, n);
599 return znode;
600}
601
602
603
604
605
606
607
608
609
610
611static int tnc_next(struct ubifs_info *c, struct ubifs_znode **zn, int *n)
612{
613 struct ubifs_znode *znode = *zn;
614 int nn = *n;
615
616 nn += 1;
617 if (nn < znode->child_cnt) {
618 *n = nn;
619 return 0;
620 }
621 while (1) {
622 struct ubifs_znode *zp;
623
624 zp = znode->parent;
625 if (!zp)
626 return -ENOENT;
627 nn = znode->iip + 1;
628 znode = zp;
629 if (nn < znode->child_cnt) {
630 znode = get_znode(c, znode, nn);
631 if (IS_ERR(znode))
632 return PTR_ERR(znode);
633 while (znode->level != 0) {
634 znode = get_znode(c, znode, 0);
635 if (IS_ERR(znode))
636 return PTR_ERR(znode);
637 }
638 nn = 0;
639 break;
640 }
641 }
642 *zn = znode;
643 *n = nn;
644 return 0;
645}
646
647
648
649
650
651
652
653
654
655
656static int tnc_prev(struct ubifs_info *c, struct ubifs_znode **zn, int *n)
657{
658 struct ubifs_znode *znode = *zn;
659 int nn = *n;
660
661 if (nn > 0) {
662 *n = nn - 1;
663 return 0;
664 }
665 while (1) {
666 struct ubifs_znode *zp;
667
668 zp = znode->parent;
669 if (!zp)
670 return -ENOENT;
671 nn = znode->iip - 1;
672 znode = zp;
673 if (nn >= 0) {
674 znode = get_znode(c, znode, nn);
675 if (IS_ERR(znode))
676 return PTR_ERR(znode);
677 while (znode->level != 0) {
678 nn = znode->child_cnt - 1;
679 znode = get_znode(c, znode, nn);
680 if (IS_ERR(znode))
681 return PTR_ERR(znode);
682 }
683 nn = znode->child_cnt - 1;
684 break;
685 }
686 }
687 *zn = znode;
688 *n = nn;
689 return 0;
690}
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708static int resolve_collision(struct ubifs_info *c, const union ubifs_key *key,
709 struct ubifs_znode **zn, int *n,
710 const struct qstr *nm)
711{
712 int err;
713
714 err = matches_name(c, &(*zn)->zbranch[*n], nm);
715 if (unlikely(err < 0))
716 return err;
717 if (err == NAME_MATCHES)
718 return 1;
719
720 if (err == NAME_GREATER) {
721
722 while (1) {
723 err = tnc_prev(c, zn, n);
724 if (err == -ENOENT) {
725 ubifs_assert(*n == 0);
726 *n = -1;
727 return 0;
728 }
729 if (err < 0)
730 return err;
731 if (keys_cmp(c, &(*zn)->zbranch[*n].key, key)) {
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761 if (*n == (*zn)->child_cnt - 1) {
762 err = tnc_next(c, zn, n);
763 if (err) {
764
765 ubifs_assert(0);
766 if (err == -ENOENT)
767 err = -EINVAL;
768 return err;
769 }
770 ubifs_assert(*n == 0);
771 *n = -1;
772 }
773 return 0;
774 }
775 err = matches_name(c, &(*zn)->zbranch[*n], nm);
776 if (err < 0)
777 return err;
778 if (err == NAME_LESS)
779 return 0;
780 if (err == NAME_MATCHES)
781 return 1;
782 ubifs_assert(err == NAME_GREATER);
783 }
784 } else {
785 int nn = *n;
786 struct ubifs_znode *znode = *zn;
787
788
789 while (1) {
790 err = tnc_next(c, &znode, &nn);
791 if (err == -ENOENT)
792 return 0;
793 if (err < 0)
794 return err;
795 if (keys_cmp(c, &znode->zbranch[nn].key, key))
796 return 0;
797 err = matches_name(c, &znode->zbranch[nn], nm);
798 if (err < 0)
799 return err;
800 if (err == NAME_GREATER)
801 return 0;
802 *zn = znode;
803 *n = nn;
804 if (err == NAME_MATCHES)
805 return 1;
806 ubifs_assert(err == NAME_LESS);
807 }
808 }
809}
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826static int fallible_matches_name(struct ubifs_info *c,
827 struct ubifs_zbranch *zbr,
828 const struct qstr *nm)
829{
830 struct ubifs_dent_node *dent;
831 int nlen, err;
832
833
834 if (!zbr->leaf) {
835 dent = kmalloc(zbr->len, GFP_NOFS);
836 if (!dent)
837 return -ENOMEM;
838
839 err = fallible_read_node(c, &zbr->key, zbr, dent);
840 if (err < 0)
841 goto out_free;
842 if (err == 0) {
843
844 err = NOT_ON_MEDIA;
845 goto out_free;
846 }
847 ubifs_assert(err == 1);
848
849 err = lnc_add_directly(c, zbr, dent);
850 if (err)
851 goto out_free;
852 } else
853 dent = zbr->leaf;
854
855 nlen = le16_to_cpu(dent->nlen);
856 err = memcmp(dent->name, nm->name, min_t(int, nlen, nm->len));
857 if (err == 0) {
858 if (nlen == nm->len)
859 return NAME_MATCHES;
860 else if (nlen < nm->len)
861 return NAME_LESS;
862 else
863 return NAME_GREATER;
864 } else if (err < 0)
865 return NAME_LESS;
866 else
867 return NAME_GREATER;
868
869out_free:
870 kfree(dent);
871 return err;
872}
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896static int fallible_resolve_collision(struct ubifs_info *c,
897 const union ubifs_key *key,
898 struct ubifs_znode **zn, int *n,
899 const struct qstr *nm, int adding)
900{
901 struct ubifs_znode *o_znode = NULL, *znode = *zn;
902 int uninitialized_var(o_n), err, cmp, unsure = 0, nn = *n;
903
904 cmp = fallible_matches_name(c, &znode->zbranch[nn], nm);
905 if (unlikely(cmp < 0))
906 return cmp;
907 if (cmp == NAME_MATCHES)
908 return 1;
909 if (cmp == NOT_ON_MEDIA) {
910 o_znode = znode;
911 o_n = nn;
912
913
914
915
916
917 unsure = 1;
918 } else if (!adding)
919 unsure = 1;
920
921 if (cmp == NAME_GREATER || unsure) {
922
923 while (1) {
924 err = tnc_prev(c, zn, n);
925 if (err == -ENOENT) {
926 ubifs_assert(*n == 0);
927 *n = -1;
928 break;
929 }
930 if (err < 0)
931 return err;
932 if (keys_cmp(c, &(*zn)->zbranch[*n].key, key)) {
933
934 if (*n == (*zn)->child_cnt - 1) {
935 err = tnc_next(c, zn, n);
936 if (err) {
937
938 ubifs_assert(0);
939 if (err == -ENOENT)
940 err = -EINVAL;
941 return err;
942 }
943 ubifs_assert(*n == 0);
944 *n = -1;
945 }
946 break;
947 }
948 err = fallible_matches_name(c, &(*zn)->zbranch[*n], nm);
949 if (err < 0)
950 return err;
951 if (err == NAME_MATCHES)
952 return 1;
953 if (err == NOT_ON_MEDIA) {
954 o_znode = *zn;
955 o_n = *n;
956 continue;
957 }
958 if (!adding)
959 continue;
960 if (err == NAME_LESS)
961 break;
962 else
963 unsure = 0;
964 }
965 }
966
967 if (cmp == NAME_LESS || unsure) {
968
969 *zn = znode;
970 *n = nn;
971 while (1) {
972 err = tnc_next(c, &znode, &nn);
973 if (err == -ENOENT)
974 break;
975 if (err < 0)
976 return err;
977 if (keys_cmp(c, &znode->zbranch[nn].key, key))
978 break;
979 err = fallible_matches_name(c, &znode->zbranch[nn], nm);
980 if (err < 0)
981 return err;
982 if (err == NAME_GREATER)
983 break;
984 *zn = znode;
985 *n = nn;
986 if (err == NAME_MATCHES)
987 return 1;
988 if (err == NOT_ON_MEDIA) {
989 o_znode = znode;
990 o_n = nn;
991 }
992 }
993 }
994
995
996 if (adding || !o_znode)
997 return 0;
998
999 dbg_mnt("dangling match LEB %d:%d len %d %s",
1000 o_znode->zbranch[o_n].lnum, o_znode->zbranch[o_n].offs,
1001 o_znode->zbranch[o_n].len, DBGKEY(key));
1002 *zn = o_znode;
1003 *n = o_n;
1004 return 1;
1005}
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015static int matches_position(struct ubifs_zbranch *zbr, int lnum, int offs)
1016{
1017 if (zbr->lnum == lnum && zbr->offs == offs)
1018 return 1;
1019 else
1020 return 0;
1021}
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040static int resolve_collision_directly(struct ubifs_info *c,
1041 const union ubifs_key *key,
1042 struct ubifs_znode **zn, int *n,
1043 int lnum, int offs)
1044{
1045 struct ubifs_znode *znode;
1046 int nn, err;
1047
1048 znode = *zn;
1049 nn = *n;
1050 if (matches_position(&znode->zbranch[nn], lnum, offs))
1051 return 1;
1052
1053
1054 while (1) {
1055 err = tnc_prev(c, &znode, &nn);
1056 if (err == -ENOENT)
1057 break;
1058 if (err < 0)
1059 return err;
1060 if (keys_cmp(c, &znode->zbranch[nn].key, key))
1061 break;
1062 if (matches_position(&znode->zbranch[nn], lnum, offs)) {
1063 *zn = znode;
1064 *n = nn;
1065 return 1;
1066 }
1067 }
1068
1069
1070 znode = *zn;
1071 nn = *n;
1072 while (1) {
1073 err = tnc_next(c, &znode, &nn);
1074 if (err == -ENOENT)
1075 return 0;
1076 if (err < 0)
1077 return err;
1078 if (keys_cmp(c, &znode->zbranch[nn].key, key))
1079 return 0;
1080 *zn = znode;
1081 *n = nn;
1082 if (matches_position(&znode->zbranch[nn], lnum, offs))
1083 return 1;
1084 }
1085}
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097static struct ubifs_znode *dirty_cow_bottom_up(struct ubifs_info *c,
1098 struct ubifs_znode *znode)
1099{
1100 struct ubifs_znode *zp;
1101 int *path = c->bottom_up_buf, p = 0;
1102
1103 ubifs_assert(c->zroot.znode);
1104 ubifs_assert(znode);
1105 if (c->zroot.znode->level > BOTTOM_UP_HEIGHT) {
1106 kfree(c->bottom_up_buf);
1107 c->bottom_up_buf = kmalloc(c->zroot.znode->level * sizeof(int),
1108 GFP_NOFS);
1109 if (!c->bottom_up_buf)
1110 return ERR_PTR(-ENOMEM);
1111 path = c->bottom_up_buf;
1112 }
1113 if (c->zroot.znode->level) {
1114
1115 while (1) {
1116 int n;
1117
1118 zp = znode->parent;
1119 if (!zp)
1120 break;
1121 n = znode->iip;
1122 ubifs_assert(p < c->zroot.znode->level);
1123 path[p++] = n;
1124 if (!zp->cnext && ubifs_zn_dirty(znode))
1125 break;
1126 znode = zp;
1127 }
1128 }
1129
1130
1131 while (1) {
1132 struct ubifs_zbranch *zbr;
1133
1134 zp = znode->parent;
1135 if (zp) {
1136 ubifs_assert(path[p - 1] >= 0);
1137 ubifs_assert(path[p - 1] < zp->child_cnt);
1138 zbr = &zp->zbranch[path[--p]];
1139 znode = dirty_cow_znode(c, zbr);
1140 } else {
1141 ubifs_assert(znode == c->zroot.znode);
1142 znode = dirty_cow_znode(c, &c->zroot);
1143 }
1144 if (IS_ERR(znode) || !p)
1145 break;
1146 ubifs_assert(path[p - 1] >= 0);
1147 ubifs_assert(path[p - 1] < znode->child_cnt);
1148 znode = znode->zbranch[path[p - 1]].znode;
1149 }
1150
1151 return znode;
1152}
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176int ubifs_lookup_level0(struct ubifs_info *c, const union ubifs_key *key,
1177 struct ubifs_znode **zn, int *n)
1178{
1179 int err, exact;
1180 struct ubifs_znode *znode;
1181 unsigned long time = get_seconds();
1182
1183 dbg_tnc("search key %s", DBGKEY(key));
1184 ubifs_assert(key_type(c, key) < UBIFS_INVALID_KEY);
1185
1186 znode = c->zroot.znode;
1187 if (unlikely(!znode)) {
1188 znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
1189 if (IS_ERR(znode))
1190 return PTR_ERR(znode);
1191 }
1192
1193 znode->time = time;
1194
1195 while (1) {
1196 struct ubifs_zbranch *zbr;
1197
1198 exact = ubifs_search_zbranch(c, znode, key, n);
1199
1200 if (znode->level == 0)
1201 break;
1202
1203 if (*n < 0)
1204 *n = 0;
1205 zbr = &znode->zbranch[*n];
1206
1207 if (zbr->znode) {
1208 znode->time = time;
1209 znode = zbr->znode;
1210 continue;
1211 }
1212
1213
1214 znode = ubifs_load_znode(c, zbr, znode, *n);
1215 if (IS_ERR(znode))
1216 return PTR_ERR(znode);
1217 }
1218
1219 *zn = znode;
1220 if (exact || !is_hash_key(c, key) || *n != -1) {
1221 dbg_tnc("found %d, lvl %d, n %d", exact, znode->level, *n);
1222 return exact;
1223 }
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268 err = tnc_prev(c, &znode, n);
1269 if (err == -ENOENT) {
1270 dbg_tnc("found 0, lvl %d, n -1", znode->level);
1271 *n = -1;
1272 return 0;
1273 }
1274 if (unlikely(err < 0))
1275 return err;
1276 if (keys_cmp(c, key, &znode->zbranch[*n].key)) {
1277 dbg_tnc("found 0, lvl %d, n -1", znode->level);
1278 *n = -1;
1279 return 0;
1280 }
1281
1282 dbg_tnc("found 1, lvl %d, n %d", znode->level, *n);
1283 *zn = znode;
1284 return 1;
1285}
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312static int lookup_level0_dirty(struct ubifs_info *c, const union ubifs_key *key,
1313 struct ubifs_znode **zn, int *n)
1314{
1315 int err, exact;
1316 struct ubifs_znode *znode;
1317 unsigned long time = get_seconds();
1318
1319 dbg_tnc("search and dirty key %s", DBGKEY(key));
1320
1321 znode = c->zroot.znode;
1322 if (unlikely(!znode)) {
1323 znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
1324 if (IS_ERR(znode))
1325 return PTR_ERR(znode);
1326 }
1327
1328 znode = dirty_cow_znode(c, &c->zroot);
1329 if (IS_ERR(znode))
1330 return PTR_ERR(znode);
1331
1332 znode->time = time;
1333
1334 while (1) {
1335 struct ubifs_zbranch *zbr;
1336
1337 exact = ubifs_search_zbranch(c, znode, key, n);
1338
1339 if (znode->level == 0)
1340 break;
1341
1342 if (*n < 0)
1343 *n = 0;
1344 zbr = &znode->zbranch[*n];
1345
1346 if (zbr->znode) {
1347 znode->time = time;
1348 znode = dirty_cow_znode(c, zbr);
1349 if (IS_ERR(znode))
1350 return PTR_ERR(znode);
1351 continue;
1352 }
1353
1354
1355 znode = ubifs_load_znode(c, zbr, znode, *n);
1356 if (IS_ERR(znode))
1357 return PTR_ERR(znode);
1358 znode = dirty_cow_znode(c, zbr);
1359 if (IS_ERR(znode))
1360 return PTR_ERR(znode);
1361 }
1362
1363 *zn = znode;
1364 if (exact || !is_hash_key(c, key) || *n != -1) {
1365 dbg_tnc("found %d, lvl %d, n %d", exact, znode->level, *n);
1366 return exact;
1367 }
1368
1369
1370
1371
1372
1373 err = tnc_prev(c, &znode, n);
1374 if (err == -ENOENT) {
1375 *n = -1;
1376 dbg_tnc("found 0, lvl %d, n -1", znode->level);
1377 return 0;
1378 }
1379 if (unlikely(err < 0))
1380 return err;
1381 if (keys_cmp(c, key, &znode->zbranch[*n].key)) {
1382 *n = -1;
1383 dbg_tnc("found 0, lvl %d, n -1", znode->level);
1384 return 0;
1385 }
1386
1387 if (znode->cnext || !ubifs_zn_dirty(znode)) {
1388 znode = dirty_cow_bottom_up(c, znode);
1389 if (IS_ERR(znode))
1390 return PTR_ERR(znode);
1391 }
1392
1393 dbg_tnc("found 1, lvl %d, n %d", znode->level, *n);
1394 *zn = znode;
1395 return 1;
1396}
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408static int maybe_leb_gced(struct ubifs_info *c, int lnum, int gc_seq1)
1409{
1410 int gc_seq2, gced_lnum;
1411
1412 gced_lnum = c->gced_lnum;
1413 smp_rmb();
1414 gc_seq2 = c->gc_seq;
1415
1416 if (gc_seq1 == gc_seq2)
1417 return 0;
1418
1419 if (gc_seq1 + 1 != gc_seq2)
1420 return 1;
1421
1422
1423
1424
1425 smp_rmb();
1426 if (gced_lnum != c->gced_lnum)
1427 return 1;
1428
1429 if (gced_lnum == lnum)
1430 return 1;
1431 return 0;
1432}
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447int ubifs_tnc_locate(struct ubifs_info *c, const union ubifs_key *key,
1448 void *node, int *lnum, int *offs)
1449{
1450 int found, n, err, safely = 0, gc_seq1;
1451 struct ubifs_znode *znode;
1452 struct ubifs_zbranch zbr, *zt;
1453
1454again:
1455 mutex_lock(&c->tnc_mutex);
1456 found = ubifs_lookup_level0(c, key, &znode, &n);
1457 if (!found) {
1458 err = -ENOENT;
1459 goto out;
1460 } else if (found < 0) {
1461 err = found;
1462 goto out;
1463 }
1464 zt = &znode->zbranch[n];
1465 if (lnum) {
1466 *lnum = zt->lnum;
1467 *offs = zt->offs;
1468 }
1469 if (is_hash_key(c, key)) {
1470
1471
1472
1473
1474 err = tnc_read_node_nm(c, zt, node);
1475 goto out;
1476 }
1477 if (safely) {
1478 err = ubifs_tnc_read_node(c, zt, node);
1479 goto out;
1480 }
1481
1482 zbr = znode->zbranch[n];
1483 gc_seq1 = c->gc_seq;
1484 mutex_unlock(&c->tnc_mutex);
1485
1486 if (ubifs_get_wbuf(c, zbr.lnum)) {
1487
1488 err = ubifs_tnc_read_node(c, &zbr, node);
1489 return err;
1490 }
1491
1492 err = fallible_read_node(c, key, &zbr, node);
1493 if (err <= 0 || maybe_leb_gced(c, zbr.lnum, gc_seq1)) {
1494
1495
1496
1497
1498 safely = 1;
1499 goto again;
1500 }
1501 return 0;
1502
1503out:
1504 mutex_unlock(&c->tnc_mutex);
1505 return err;
1506}
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu)
1522{
1523 int n, err = 0, lnum = -1, uninitialized_var(offs);
1524 int uninitialized_var(len);
1525 unsigned int block = key_block(c, &bu->key);
1526 struct ubifs_znode *znode;
1527
1528 bu->cnt = 0;
1529 bu->blk_cnt = 0;
1530 bu->eof = 0;
1531
1532 mutex_lock(&c->tnc_mutex);
1533
1534 err = ubifs_lookup_level0(c, &bu->key, &znode, &n);
1535 if (err < 0)
1536 goto out;
1537 if (err) {
1538
1539 len = znode->zbranch[n].len;
1540
1541 if (len > bu->buf_len) {
1542 err = -EINVAL;
1543 goto out;
1544 }
1545
1546 bu->zbranch[bu->cnt++] = znode->zbranch[n];
1547 bu->blk_cnt += 1;
1548 lnum = znode->zbranch[n].lnum;
1549 offs = ALIGN(znode->zbranch[n].offs + len, 8);
1550 }
1551 while (1) {
1552 struct ubifs_zbranch *zbr;
1553 union ubifs_key *key;
1554 unsigned int next_block;
1555
1556
1557 err = tnc_next(c, &znode, &n);
1558 if (err)
1559 goto out;
1560 zbr = &znode->zbranch[n];
1561 key = &zbr->key;
1562
1563 if (key_inum(c, key) != key_inum(c, &bu->key) ||
1564 key_type(c, key) != UBIFS_DATA_KEY) {
1565 err = -ENOENT;
1566 goto out;
1567 }
1568 if (lnum < 0) {
1569
1570 lnum = zbr->lnum;
1571 offs = ALIGN(zbr->offs + zbr->len, 8);
1572 len = zbr->len;
1573 if (len > bu->buf_len) {
1574 err = -EINVAL;
1575 goto out;
1576 }
1577 } else {
1578
1579
1580
1581
1582 if (zbr->lnum != lnum || zbr->offs != offs)
1583 goto out;
1584 offs += ALIGN(zbr->len, 8);
1585 len = ALIGN(len, 8) + zbr->len;
1586
1587 if (len > bu->buf_len)
1588 goto out;
1589 }
1590
1591 next_block = key_block(c, key);
1592 bu->blk_cnt += (next_block - block - 1);
1593 if (bu->blk_cnt >= UBIFS_MAX_BULK_READ)
1594 goto out;
1595 block = next_block;
1596
1597 bu->zbranch[bu->cnt++] = *zbr;
1598 bu->blk_cnt += 1;
1599
1600 if (bu->cnt >= UBIFS_MAX_BULK_READ)
1601 goto out;
1602 if (bu->blk_cnt >= UBIFS_MAX_BULK_READ)
1603 goto out;
1604 }
1605out:
1606 if (err == -ENOENT) {
1607 bu->eof = 1;
1608 err = 0;
1609 }
1610 bu->gc_seq = c->gc_seq;
1611 mutex_unlock(&c->tnc_mutex);
1612 if (err)
1613 return err;
1614
1615
1616
1617
1618 if (bu->blk_cnt > UBIFS_MAX_BULK_READ)
1619 bu->blk_cnt = UBIFS_MAX_BULK_READ;
1620
1621
1622
1623
1624 if (UBIFS_BLOCKS_PER_PAGE == 1 ||
1625 !(bu->blk_cnt & (UBIFS_BLOCKS_PER_PAGE - 1)))
1626 return 0;
1627 if (bu->eof) {
1628
1629 bu->blk_cnt += UBIFS_BLOCKS_PER_PAGE - 1;
1630 return 0;
1631 }
1632
1633 block = key_block(c, &bu->key) + bu->blk_cnt;
1634 block &= ~(UBIFS_BLOCKS_PER_PAGE - 1);
1635 while (bu->cnt) {
1636 if (key_block(c, &bu->zbranch[bu->cnt - 1].key) < block)
1637 break;
1638 bu->cnt -= 1;
1639 }
1640 return 0;
1641}
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653static int read_wbuf(struct ubifs_wbuf *wbuf, void *buf, int len, int lnum,
1654 int offs)
1655{
1656 const struct ubifs_info *c = wbuf->c;
1657 int rlen, overlap;
1658
1659 dbg_io("LEB %d:%d, length %d", lnum, offs, len);
1660 ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
1661 ubifs_assert(!(offs & 7) && offs < c->leb_size);
1662 ubifs_assert(offs + len <= c->leb_size);
1663
1664 spin_lock(&wbuf->lock);
1665 overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs);
1666 if (!overlap) {
1667
1668 spin_unlock(&wbuf->lock);
1669 return ubi_read(c->ubi, lnum, buf, offs, len);
1670 }
1671
1672
1673 rlen = wbuf->offs - offs;
1674 if (rlen < 0)
1675 rlen = 0;
1676
1677
1678 memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen);
1679 spin_unlock(&wbuf->lock);
1680
1681 if (rlen > 0)
1682
1683 return ubi_read(c->ubi, lnum, buf, offs, rlen);
1684
1685 return 0;
1686}
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696static int validate_data_node(struct ubifs_info *c, void *buf,
1697 struct ubifs_zbranch *zbr)
1698{
1699 union ubifs_key key1;
1700 struct ubifs_ch *ch = buf;
1701 int err, len;
1702
1703 if (ch->node_type != UBIFS_DATA_NODE) {
1704 ubifs_err("bad node type (%d but expected %d)",
1705 ch->node_type, UBIFS_DATA_NODE);
1706 goto out_err;
1707 }
1708
1709 err = ubifs_check_node(c, buf, zbr->lnum, zbr->offs, 0, 0);
1710 if (err) {
1711 ubifs_err("expected node type %d", UBIFS_DATA_NODE);
1712 goto out;
1713 }
1714
1715 len = le32_to_cpu(ch->len);
1716 if (len != zbr->len) {
1717 ubifs_err("bad node length %d, expected %d", len, zbr->len);
1718 goto out_err;
1719 }
1720
1721
1722 key_read(c, buf + UBIFS_KEY_OFFSET, &key1);
1723 if (!keys_eq(c, &zbr->key, &key1)) {
1724 ubifs_err("bad key in node at LEB %d:%d",
1725 zbr->lnum, zbr->offs);
1726 dbg_tnc("looked for key %s found node's key %s",
1727 DBGKEY(&zbr->key), DBGKEY1(&key1));
1728 goto out_err;
1729 }
1730
1731 return 0;
1732
1733out_err:
1734 err = -EINVAL;
1735out:
1736 ubifs_err("bad node at LEB %d:%d", zbr->lnum, zbr->offs);
1737 dbg_dump_node(c, buf);
1738 dbg_dump_stack();
1739 return err;
1740}
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu)
1753{
1754 int lnum = bu->zbranch[0].lnum, offs = bu->zbranch[0].offs, len, err, i;
1755 struct ubifs_wbuf *wbuf;
1756 void *buf;
1757
1758 len = bu->zbranch[bu->cnt - 1].offs;
1759 len += bu->zbranch[bu->cnt - 1].len - offs;
1760 if (len > bu->buf_len) {
1761 ubifs_err("buffer too small %d vs %d", bu->buf_len, len);
1762 return -EINVAL;
1763 }
1764
1765
1766 wbuf = ubifs_get_wbuf(c, lnum);
1767 if (wbuf)
1768 err = read_wbuf(wbuf, bu->buf, len, lnum, offs);
1769 else
1770 err = ubi_read(c->ubi, lnum, bu->buf, offs, len);
1771
1772
1773 if (maybe_leb_gced(c, lnum, bu->gc_seq))
1774 return -EAGAIN;
1775
1776 if (err && err != -EBADMSG) {
1777 ubifs_err("failed to read from LEB %d:%d, error %d",
1778 lnum, offs, err);
1779 dbg_dump_stack();
1780 dbg_tnc("key %s", DBGKEY(&bu->key));
1781 return err;
1782 }
1783
1784
1785 buf = bu->buf;
1786 for (i = 0; i < bu->cnt; i++) {
1787 err = validate_data_node(c, buf, &bu->zbranch[i]);
1788 if (err)
1789 return err;
1790 buf = buf + ALIGN(bu->zbranch[i].len, 8);
1791 }
1792
1793 return 0;
1794}
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809static int do_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
1810 void *node, const struct qstr *nm)
1811{
1812 int found, n, err;
1813 struct ubifs_znode *znode;
1814
1815 dbg_tnc("name '%.*s' key %s", nm->len, nm->name, DBGKEY(key));
1816 mutex_lock(&c->tnc_mutex);
1817 found = ubifs_lookup_level0(c, key, &znode, &n);
1818 if (!found) {
1819 err = -ENOENT;
1820 goto out_unlock;
1821 } else if (found < 0) {
1822 err = found;
1823 goto out_unlock;
1824 }
1825
1826 ubifs_assert(n >= 0);
1827
1828 err = resolve_collision(c, key, &znode, &n, nm);
1829 dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n);
1830 if (unlikely(err < 0))
1831 goto out_unlock;
1832 if (err == 0) {
1833 err = -ENOENT;
1834 goto out_unlock;
1835 }
1836
1837 err = tnc_read_node_nm(c, &znode->zbranch[n], node);
1838
1839out_unlock:
1840 mutex_unlock(&c->tnc_mutex);
1841 return err;
1842}
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857int ubifs_tnc_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
1858 void *node, const struct qstr *nm)
1859{
1860 int err, len;
1861 const struct ubifs_dent_node *dent = node;
1862
1863
1864
1865
1866
1867 err = ubifs_tnc_lookup(c, key, node);
1868 if (err)
1869 return err;
1870
1871 len = le16_to_cpu(dent->nlen);
1872 if (nm->len == len && !memcmp(dent->name, nm->name, len))
1873 return 0;
1874
1875
1876
1877
1878
1879 return do_lookup_nm(c, key, node, nm);
1880}
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891static void correct_parent_keys(const struct ubifs_info *c,
1892 struct ubifs_znode *znode)
1893{
1894 union ubifs_key *key, *key1;
1895
1896 ubifs_assert(znode->parent);
1897 ubifs_assert(znode->iip == 0);
1898
1899 key = &znode->zbranch[0].key;
1900 key1 = &znode->parent->zbranch[0].key;
1901
1902 while (keys_cmp(c, key, key1) < 0) {
1903 key_copy(c, key, key1);
1904 znode = znode->parent;
1905 znode->alt = 1;
1906 if (!znode->parent || znode->iip)
1907 break;
1908 key1 = &znode->parent->zbranch[0].key;
1909 }
1910}
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923static void insert_zbranch(struct ubifs_znode *znode,
1924 const struct ubifs_zbranch *zbr, int n)
1925{
1926 int i;
1927
1928 ubifs_assert(ubifs_zn_dirty(znode));
1929
1930 if (znode->level) {
1931 for (i = znode->child_cnt; i > n; i--) {
1932 znode->zbranch[i] = znode->zbranch[i - 1];
1933 if (znode->zbranch[i].znode)
1934 znode->zbranch[i].znode->iip = i;
1935 }
1936 if (zbr->znode)
1937 zbr->znode->iip = n;
1938 } else
1939 for (i = znode->child_cnt; i > n; i--)
1940 znode->zbranch[i] = znode->zbranch[i - 1];
1941
1942 znode->zbranch[n] = *zbr;
1943 znode->child_cnt += 1;
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959 if (n == 0)
1960 znode->alt = 1;
1961}
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975static int tnc_insert(struct ubifs_info *c, struct ubifs_znode *znode,
1976 struct ubifs_zbranch *zbr, int n)
1977{
1978 struct ubifs_znode *zn, *zi, *zp;
1979 int i, keep, move, appending = 0;
1980 union ubifs_key *key = &zbr->key, *key1;
1981
1982 ubifs_assert(n >= 0 && n <= c->fanout);
1983
1984
1985again:
1986 zp = znode->parent;
1987 if (znode->child_cnt < c->fanout) {
1988 ubifs_assert(n != c->fanout);
1989 dbg_tnc("inserted at %d level %d, key %s", n, znode->level,
1990 DBGKEY(key));
1991
1992 insert_zbranch(znode, zbr, n);
1993
1994
1995 if (n == 0 && zp && znode->iip == 0)
1996 correct_parent_keys(c, znode);
1997
1998 return 0;
1999 }
2000
2001
2002
2003
2004
2005 dbg_tnc("splitting level %d, key %s", znode->level, DBGKEY(key));
2006
2007 if (znode->alt)
2008
2009
2010
2011
2012 ins_clr_old_idx_znode(c, znode);
2013
2014 zn = kzalloc(c->max_znode_sz, GFP_NOFS);
2015 if (!zn)
2016 return -ENOMEM;
2017 zn->parent = zp;
2018 zn->level = znode->level;
2019
2020
2021 if (znode->level == 0 && key_type(c, key) == UBIFS_DATA_KEY) {
2022
2023 if (n == c->fanout) {
2024 key1 = &znode->zbranch[n - 1].key;
2025 if (key_inum(c, key1) == key_inum(c, key) &&
2026 key_type(c, key1) == UBIFS_DATA_KEY)
2027 appending = 1;
2028 } else
2029 goto check_split;
2030 } else if (appending && n != c->fanout) {
2031
2032 appending = 0;
2033check_split:
2034 if (n >= (c->fanout + 1) / 2) {
2035 key1 = &znode->zbranch[0].key;
2036 if (key_inum(c, key1) == key_inum(c, key) &&
2037 key_type(c, key1) == UBIFS_DATA_KEY) {
2038 key1 = &znode->zbranch[n].key;
2039 if (key_inum(c, key1) != key_inum(c, key) ||
2040 key_type(c, key1) != UBIFS_DATA_KEY) {
2041 keep = n;
2042 move = c->fanout - keep;
2043 zi = znode;
2044 goto do_split;
2045 }
2046 }
2047 }
2048 }
2049
2050 if (appending) {
2051 keep = c->fanout;
2052 move = 0;
2053 } else {
2054 keep = (c->fanout + 1) / 2;
2055 move = c->fanout - keep;
2056 }
2057
2058
2059
2060
2061
2062
2063 if (n < keep) {
2064
2065 zi = znode;
2066 move += 1;
2067 keep -= 1;
2068 } else {
2069
2070 zi = zn;
2071 n -= keep;
2072
2073 if (zn->level != 0)
2074 zbr->znode->parent = zn;
2075 }
2076
2077do_split:
2078
2079 __set_bit(DIRTY_ZNODE, &zn->flags);
2080 atomic_long_inc(&c->dirty_zn_cnt);
2081
2082 zn->child_cnt = move;
2083 znode->child_cnt = keep;
2084
2085 dbg_tnc("moving %d, keeping %d", move, keep);
2086
2087
2088 for (i = 0; i < move; i++) {
2089 zn->zbranch[i] = znode->zbranch[keep + i];
2090
2091 if (zn->level != 0)
2092 if (zn->zbranch[i].znode) {
2093 zn->zbranch[i].znode->parent = zn;
2094 zn->zbranch[i].znode->iip = i;
2095 }
2096 }
2097
2098
2099 dbg_tnc("inserting at %d level %d, key %s", n, zn->level, DBGKEY(key));
2100
2101 insert_zbranch(zi, zbr, n);
2102
2103
2104 if (zp) {
2105 if (n == 0 && zi == znode && znode->iip == 0)
2106 correct_parent_keys(c, znode);
2107
2108
2109 n = znode->iip + 1;
2110
2111
2112 zbr->key = zn->zbranch[0].key;
2113 zbr->znode = zn;
2114 zbr->lnum = 0;
2115 zbr->offs = 0;
2116 zbr->len = 0;
2117 znode = zp;
2118
2119 goto again;
2120 }
2121
2122
2123 dbg_tnc("creating new zroot at level %d", znode->level + 1);
2124
2125 zi = kzalloc(c->max_znode_sz, GFP_NOFS);
2126 if (!zi)
2127 return -ENOMEM;
2128
2129 zi->child_cnt = 2;
2130 zi->level = znode->level + 1;
2131
2132 __set_bit(DIRTY_ZNODE, &zi->flags);
2133 atomic_long_inc(&c->dirty_zn_cnt);
2134
2135 zi->zbranch[0].key = znode->zbranch[0].key;
2136 zi->zbranch[0].znode = znode;
2137 zi->zbranch[0].lnum = c->zroot.lnum;
2138 zi->zbranch[0].offs = c->zroot.offs;
2139 zi->zbranch[0].len = c->zroot.len;
2140 zi->zbranch[1].key = zn->zbranch[0].key;
2141 zi->zbranch[1].znode = zn;
2142
2143 c->zroot.lnum = 0;
2144 c->zroot.offs = 0;
2145 c->zroot.len = 0;
2146 c->zroot.znode = zi;
2147
2148 zn->parent = zi;
2149 zn->iip = 1;
2150 znode->parent = zi;
2151 znode->iip = 0;
2152
2153 return 0;
2154}
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168int ubifs_tnc_add(struct ubifs_info *c, const union ubifs_key *key, int lnum,
2169 int offs, int len)
2170{
2171 int found, n, err = 0;
2172 struct ubifs_znode *znode;
2173
2174 mutex_lock(&c->tnc_mutex);
2175 dbg_tnc("%d:%d, len %d, key %s", lnum, offs, len, DBGKEY(key));
2176 found = lookup_level0_dirty(c, key, &znode, &n);
2177 if (!found) {
2178 struct ubifs_zbranch zbr;
2179
2180 zbr.znode = NULL;
2181 zbr.lnum = lnum;
2182 zbr.offs = offs;
2183 zbr.len = len;
2184 key_copy(c, key, &zbr.key);
2185 err = tnc_insert(c, znode, &zbr, n + 1);
2186 } else if (found == 1) {
2187 struct ubifs_zbranch *zbr = &znode->zbranch[n];
2188
2189 lnc_free(zbr);
2190 err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2191 zbr->lnum = lnum;
2192 zbr->offs = offs;
2193 zbr->len = len;
2194 } else
2195 err = found;
2196 if (!err)
2197 err = dbg_check_tnc(c, 0);
2198 mutex_unlock(&c->tnc_mutex);
2199
2200 return err;
2201}
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217int ubifs_tnc_replace(struct ubifs_info *c, const union ubifs_key *key,
2218 int old_lnum, int old_offs, int lnum, int offs, int len)
2219{
2220 int found, n, err = 0;
2221 struct ubifs_znode *znode;
2222
2223 mutex_lock(&c->tnc_mutex);
2224 dbg_tnc("old LEB %d:%d, new LEB %d:%d, len %d, key %s", old_lnum,
2225 old_offs, lnum, offs, len, DBGKEY(key));
2226 found = lookup_level0_dirty(c, key, &znode, &n);
2227 if (found < 0) {
2228 err = found;
2229 goto out_unlock;
2230 }
2231
2232 if (found == 1) {
2233 struct ubifs_zbranch *zbr = &znode->zbranch[n];
2234
2235 found = 0;
2236 if (zbr->lnum == old_lnum && zbr->offs == old_offs) {
2237 lnc_free(zbr);
2238 err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2239 if (err)
2240 goto out_unlock;
2241 zbr->lnum = lnum;
2242 zbr->offs = offs;
2243 zbr->len = len;
2244 found = 1;
2245 } else if (is_hash_key(c, key)) {
2246 found = resolve_collision_directly(c, key, &znode, &n,
2247 old_lnum, old_offs);
2248 dbg_tnc("rc returned %d, znode %p, n %d, LEB %d:%d",
2249 found, znode, n, old_lnum, old_offs);
2250 if (found < 0) {
2251 err = found;
2252 goto out_unlock;
2253 }
2254
2255 if (found) {
2256
2257 if (znode->cnext || !ubifs_zn_dirty(znode)) {
2258 znode = dirty_cow_bottom_up(c, znode);
2259 if (IS_ERR(znode)) {
2260 err = PTR_ERR(znode);
2261 goto out_unlock;
2262 }
2263 }
2264 zbr = &znode->zbranch[n];
2265 lnc_free(zbr);
2266 err = ubifs_add_dirt(c, zbr->lnum,
2267 zbr->len);
2268 if (err)
2269 goto out_unlock;
2270 zbr->lnum = lnum;
2271 zbr->offs = offs;
2272 zbr->len = len;
2273 }
2274 }
2275 }
2276
2277 if (!found)
2278 err = ubifs_add_dirt(c, lnum, len);
2279
2280 if (!err)
2281 err = dbg_check_tnc(c, 0);
2282
2283out_unlock:
2284 mutex_unlock(&c->tnc_mutex);
2285 return err;
2286}
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key,
2301 int lnum, int offs, int len, const struct qstr *nm)
2302{
2303 int found, n, err = 0;
2304 struct ubifs_znode *znode;
2305
2306 mutex_lock(&c->tnc_mutex);
2307 dbg_tnc("LEB %d:%d, name '%.*s', key %s", lnum, offs, nm->len, nm->name,
2308 DBGKEY(key));
2309 found = lookup_level0_dirty(c, key, &znode, &n);
2310 if (found < 0) {
2311 err = found;
2312 goto out_unlock;
2313 }
2314
2315 if (found == 1) {
2316 if (c->replaying)
2317 found = fallible_resolve_collision(c, key, &znode, &n,
2318 nm, 1);
2319 else
2320 found = resolve_collision(c, key, &znode, &n, nm);
2321 dbg_tnc("rc returned %d, znode %p, n %d", found, znode, n);
2322 if (found < 0) {
2323 err = found;
2324 goto out_unlock;
2325 }
2326
2327
2328 if (znode->cnext || !ubifs_zn_dirty(znode)) {
2329 znode = dirty_cow_bottom_up(c, znode);
2330 if (IS_ERR(znode)) {
2331 err = PTR_ERR(znode);
2332 goto out_unlock;
2333 }
2334 }
2335
2336 if (found == 1) {
2337 struct ubifs_zbranch *zbr = &znode->zbranch[n];
2338
2339 lnc_free(zbr);
2340 err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2341 zbr->lnum = lnum;
2342 zbr->offs = offs;
2343 zbr->len = len;
2344 goto out_unlock;
2345 }
2346 }
2347
2348 if (!found) {
2349 struct ubifs_zbranch zbr;
2350
2351 zbr.znode = NULL;
2352 zbr.lnum = lnum;
2353 zbr.offs = offs;
2354 zbr.len = len;
2355 key_copy(c, key, &zbr.key);
2356 err = tnc_insert(c, znode, &zbr, n + 1);
2357 if (err)
2358 goto out_unlock;
2359 if (c->replaying) {
2360
2361
2362
2363
2364
2365
2366 struct qstr noname = { .len = 0, .name = "" };
2367
2368 err = dbg_check_tnc(c, 0);
2369 mutex_unlock(&c->tnc_mutex);
2370 if (err)
2371 return err;
2372 return ubifs_tnc_remove_nm(c, key, &noname);
2373 }
2374 }
2375
2376out_unlock:
2377 if (!err)
2378 err = dbg_check_tnc(c, 0);
2379 mutex_unlock(&c->tnc_mutex);
2380 return err;
2381}
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392static int tnc_delete(struct ubifs_info *c, struct ubifs_znode *znode, int n)
2393{
2394 struct ubifs_zbranch *zbr;
2395 struct ubifs_znode *zp;
2396 int i, err;
2397
2398
2399 ubifs_assert(znode->level == 0);
2400 ubifs_assert(n >= 0 && n < c->fanout);
2401 dbg_tnc("deleting %s", DBGKEY(&znode->zbranch[n].key));
2402
2403 zbr = &znode->zbranch[n];
2404 lnc_free(zbr);
2405
2406 err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2407 if (err) {
2408 dbg_dump_znode(c, znode);
2409 return err;
2410 }
2411
2412
2413 for (i = n; i < znode->child_cnt - 1; i++)
2414 znode->zbranch[i] = znode->zbranch[i + 1];
2415 znode->child_cnt -= 1;
2416
2417 if (znode->child_cnt > 0)
2418 return 0;
2419
2420
2421
2422
2423
2424
2425 do {
2426 ubifs_assert(!test_bit(OBSOLETE_ZNODE, &znode->flags));
2427 ubifs_assert(ubifs_zn_dirty(znode));
2428
2429 zp = znode->parent;
2430 n = znode->iip;
2431
2432 atomic_long_dec(&c->dirty_zn_cnt);
2433
2434 err = insert_old_idx_znode(c, znode);
2435 if (err)
2436 return err;
2437
2438 if (znode->cnext) {
2439 __set_bit(OBSOLETE_ZNODE, &znode->flags);
2440 atomic_long_inc(&c->clean_zn_cnt);
2441 atomic_long_inc(&ubifs_clean_zn_cnt);
2442 } else
2443 kfree(znode);
2444 znode = zp;
2445 } while (znode->child_cnt == 1);
2446
2447
2448 znode->child_cnt -= 1;
2449 ubifs_assert(znode->level != 0);
2450 for (i = n; i < znode->child_cnt; i++) {
2451 znode->zbranch[i] = znode->zbranch[i + 1];
2452 if (znode->zbranch[i].znode)
2453 znode->zbranch[i].znode->iip = i;
2454 }
2455
2456
2457
2458
2459
2460 if (!znode->parent) {
2461 while (znode->child_cnt == 1 && znode->level != 0) {
2462 zp = znode;
2463 zbr = &znode->zbranch[0];
2464 znode = get_znode(c, znode, 0);
2465 if (IS_ERR(znode))
2466 return PTR_ERR(znode);
2467 znode = dirty_cow_znode(c, zbr);
2468 if (IS_ERR(znode))
2469 return PTR_ERR(znode);
2470 znode->parent = NULL;
2471 znode->iip = 0;
2472 if (c->zroot.len) {
2473 err = insert_old_idx(c, c->zroot.lnum,
2474 c->zroot.offs);
2475 if (err)
2476 return err;
2477 }
2478 c->zroot.lnum = zbr->lnum;
2479 c->zroot.offs = zbr->offs;
2480 c->zroot.len = zbr->len;
2481 c->zroot.znode = znode;
2482 ubifs_assert(!test_bit(OBSOLETE_ZNODE,
2483 &zp->flags));
2484 ubifs_assert(test_bit(DIRTY_ZNODE, &zp->flags));
2485 atomic_long_dec(&c->dirty_zn_cnt);
2486
2487 if (zp->cnext) {
2488 __set_bit(OBSOLETE_ZNODE, &zp->flags);
2489 atomic_long_inc(&c->clean_zn_cnt);
2490 atomic_long_inc(&ubifs_clean_zn_cnt);
2491 } else
2492 kfree(zp);
2493 }
2494 }
2495
2496 return 0;
2497}
2498
2499
2500
2501
2502
2503
2504
2505
2506int ubifs_tnc_remove(struct ubifs_info *c, const union ubifs_key *key)
2507{
2508 int found, n, err = 0;
2509 struct ubifs_znode *znode;
2510
2511 mutex_lock(&c->tnc_mutex);
2512 dbg_tnc("key %s", DBGKEY(key));
2513 found = lookup_level0_dirty(c, key, &znode, &n);
2514 if (found < 0) {
2515 err = found;
2516 goto out_unlock;
2517 }
2518 if (found == 1)
2519 err = tnc_delete(c, znode, n);
2520 if (!err)
2521 err = dbg_check_tnc(c, 0);
2522
2523out_unlock:
2524 mutex_unlock(&c->tnc_mutex);
2525 return err;
2526}
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536int ubifs_tnc_remove_nm(struct ubifs_info *c, const union ubifs_key *key,
2537 const struct qstr *nm)
2538{
2539 int n, err;
2540 struct ubifs_znode *znode;
2541
2542 mutex_lock(&c->tnc_mutex);
2543 dbg_tnc("%.*s, key %s", nm->len, nm->name, DBGKEY(key));
2544 err = lookup_level0_dirty(c, key, &znode, &n);
2545 if (err < 0)
2546 goto out_unlock;
2547
2548 if (err) {
2549 if (c->replaying)
2550 err = fallible_resolve_collision(c, key, &znode, &n,
2551 nm, 0);
2552 else
2553 err = resolve_collision(c, key, &znode, &n, nm);
2554 dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n);
2555 if (err < 0)
2556 goto out_unlock;
2557 if (err) {
2558
2559 if (znode->cnext || !ubifs_zn_dirty(znode)) {
2560 znode = dirty_cow_bottom_up(c, znode);
2561 if (IS_ERR(znode)) {
2562 err = PTR_ERR(znode);
2563 goto out_unlock;
2564 }
2565 }
2566 err = tnc_delete(c, znode, n);
2567 }
2568 }
2569
2570out_unlock:
2571 if (!err)
2572 err = dbg_check_tnc(c, 0);
2573 mutex_unlock(&c->tnc_mutex);
2574 return err;
2575}
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586static int key_in_range(struct ubifs_info *c, union ubifs_key *key,
2587 union ubifs_key *from_key, union ubifs_key *to_key)
2588{
2589 if (keys_cmp(c, key, from_key) < 0)
2590 return 0;
2591 if (keys_cmp(c, key, to_key) > 0)
2592 return 0;
2593 return 1;
2594}
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606int ubifs_tnc_remove_range(struct ubifs_info *c, union ubifs_key *from_key,
2607 union ubifs_key *to_key)
2608{
2609 int i, n, k, err = 0;
2610 struct ubifs_znode *znode;
2611 union ubifs_key *key;
2612
2613 mutex_lock(&c->tnc_mutex);
2614 while (1) {
2615
2616 err = ubifs_lookup_level0(c, from_key, &znode, &n);
2617 if (err < 0)
2618 goto out_unlock;
2619
2620 if (err)
2621 key = from_key;
2622 else {
2623 err = tnc_next(c, &znode, &n);
2624 if (err == -ENOENT) {
2625 err = 0;
2626 goto out_unlock;
2627 }
2628 if (err < 0)
2629 goto out_unlock;
2630 key = &znode->zbranch[n].key;
2631 if (!key_in_range(c, key, from_key, to_key)) {
2632 err = 0;
2633 goto out_unlock;
2634 }
2635 }
2636
2637
2638 if (znode->cnext || !ubifs_zn_dirty(znode)) {
2639 znode = dirty_cow_bottom_up(c, znode);
2640 if (IS_ERR(znode)) {
2641 err = PTR_ERR(znode);
2642 goto out_unlock;
2643 }
2644 }
2645
2646
2647 for (i = n + 1, k = 0; i < znode->child_cnt; i++, k++) {
2648 key = &znode->zbranch[i].key;
2649 if (!key_in_range(c, key, from_key, to_key))
2650 break;
2651 lnc_free(&znode->zbranch[i]);
2652 err = ubifs_add_dirt(c, znode->zbranch[i].lnum,
2653 znode->zbranch[i].len);
2654 if (err) {
2655 dbg_dump_znode(c, znode);
2656 goto out_unlock;
2657 }
2658 dbg_tnc("removing %s", DBGKEY(key));
2659 }
2660 if (k) {
2661 for (i = n + 1 + k; i < znode->child_cnt; i++)
2662 znode->zbranch[i - k] = znode->zbranch[i];
2663 znode->child_cnt -= k;
2664 }
2665
2666
2667 err = tnc_delete(c, znode, n);
2668 if (err)
2669 goto out_unlock;
2670 }
2671
2672out_unlock:
2673 if (!err)
2674 err = dbg_check_tnc(c, 0);
2675 mutex_unlock(&c->tnc_mutex);
2676 return err;
2677}
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum)
2689{
2690 union ubifs_key key1, key2;
2691 struct ubifs_dent_node *xent, *pxent = NULL;
2692 struct qstr nm = { .name = NULL };
2693
2694 dbg_tnc("ino %lu", (unsigned long)inum);
2695
2696
2697
2698
2699
2700 lowest_xent_key(c, &key1, inum);
2701 while (1) {
2702 ino_t xattr_inum;
2703 int err;
2704
2705 xent = ubifs_tnc_next_ent(c, &key1, &nm);
2706 if (IS_ERR(xent)) {
2707 err = PTR_ERR(xent);
2708 if (err == -ENOENT)
2709 break;
2710 return err;
2711 }
2712
2713 xattr_inum = le64_to_cpu(xent->inum);
2714 dbg_tnc("xent '%s', ino %lu", xent->name,
2715 (unsigned long)xattr_inum);
2716
2717 nm.name = xent->name;
2718 nm.len = le16_to_cpu(xent->nlen);
2719 err = ubifs_tnc_remove_nm(c, &key1, &nm);
2720 if (err) {
2721 kfree(xent);
2722 return err;
2723 }
2724
2725 lowest_ino_key(c, &key1, xattr_inum);
2726 highest_ino_key(c, &key2, xattr_inum);
2727 err = ubifs_tnc_remove_range(c, &key1, &key2);
2728 if (err) {
2729 kfree(xent);
2730 return err;
2731 }
2732
2733 kfree(pxent);
2734 pxent = xent;
2735 key_read(c, &xent->key, &key1);
2736 }
2737
2738 kfree(pxent);
2739 lowest_ino_key(c, &key1, inum);
2740 highest_ino_key(c, &key2, inum);
2741
2742 return ubifs_tnc_remove_range(c, &key1, &key2);
2743}
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c,
2769 union ubifs_key *key,
2770 const struct qstr *nm)
2771{
2772 int n, err, type = key_type(c, key);
2773 struct ubifs_znode *znode;
2774 struct ubifs_dent_node *dent;
2775 struct ubifs_zbranch *zbr;
2776 union ubifs_key *dkey;
2777
2778 dbg_tnc("%s %s", nm->name ? (char *)nm->name : "(lowest)", DBGKEY(key));
2779 ubifs_assert(is_hash_key(c, key));
2780
2781 mutex_lock(&c->tnc_mutex);
2782 err = ubifs_lookup_level0(c, key, &znode, &n);
2783 if (unlikely(err < 0))
2784 goto out_unlock;
2785
2786 if (nm->name) {
2787 if (err) {
2788
2789 err = resolve_collision(c, key, &znode, &n, nm);
2790 dbg_tnc("rc returned %d, znode %p, n %d",
2791 err, znode, n);
2792 if (unlikely(err < 0))
2793 goto out_unlock;
2794 }
2795
2796
2797 err = tnc_next(c, &znode, &n);
2798 if (unlikely(err))
2799 goto out_unlock;
2800 } else {
2801
2802
2803
2804
2805
2806 if (!err) {
2807
2808
2809
2810
2811
2812 err = tnc_next(c, &znode, &n);
2813 if (err)
2814 goto out_unlock;
2815 }
2816 }
2817
2818 zbr = &znode->zbranch[n];
2819 dent = kmalloc(zbr->len, GFP_NOFS);
2820 if (unlikely(!dent)) {
2821 err = -ENOMEM;
2822 goto out_unlock;
2823 }
2824
2825
2826
2827
2828
2829 dkey = &zbr->key;
2830 if (key_inum(c, dkey) != key_inum(c, key) ||
2831 key_type(c, dkey) != type) {
2832 err = -ENOENT;
2833 goto out_free;
2834 }
2835
2836 err = tnc_read_node_nm(c, zbr, dent);
2837 if (unlikely(err))
2838 goto out_free;
2839
2840 mutex_unlock(&c->tnc_mutex);
2841 return dent;
2842
2843out_free:
2844 kfree(dent);
2845out_unlock:
2846 mutex_unlock(&c->tnc_mutex);
2847 return ERR_PTR(err);
2848}
2849
2850
2851
2852
2853
2854
2855
2856static void tnc_destroy_cnext(struct ubifs_info *c)
2857{
2858 struct ubifs_znode *cnext;
2859
2860 if (!c->cnext)
2861 return;
2862 ubifs_assert(c->cmt_state == COMMIT_BROKEN);
2863 cnext = c->cnext;
2864 do {
2865 struct ubifs_znode *znode = cnext;
2866
2867 cnext = cnext->cnext;
2868 if (test_bit(OBSOLETE_ZNODE, &znode->flags))
2869 kfree(znode);
2870 } while (cnext && cnext != c->cnext);
2871}
2872
2873
2874
2875
2876
2877void ubifs_tnc_close(struct ubifs_info *c)
2878{
2879 long clean_freed;
2880
2881 tnc_destroy_cnext(c);
2882 if (c->zroot.znode) {
2883 clean_freed = ubifs_destroy_tnc_subtree(c->zroot.znode);
2884 atomic_long_sub(clean_freed, &ubifs_clean_zn_cnt);
2885 }
2886 kfree(c->gap_lebs);
2887 kfree(c->ilebs);
2888 destroy_old_idx(c);
2889}
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899static struct ubifs_znode *left_znode(struct ubifs_info *c,
2900 struct ubifs_znode *znode)
2901{
2902 int level = znode->level;
2903
2904 while (1) {
2905 int n = znode->iip - 1;
2906
2907
2908 znode = znode->parent;
2909 if (!znode)
2910 return NULL;
2911 if (n >= 0) {
2912
2913 znode = get_znode(c, znode, n);
2914 if (IS_ERR(znode))
2915 return znode;
2916 while (znode->level != level) {
2917 n = znode->child_cnt - 1;
2918 znode = get_znode(c, znode, n);
2919 if (IS_ERR(znode))
2920 return znode;
2921 }
2922 break;
2923 }
2924 }
2925 return znode;
2926}
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936static struct ubifs_znode *right_znode(struct ubifs_info *c,
2937 struct ubifs_znode *znode)
2938{
2939 int level = znode->level;
2940
2941 while (1) {
2942 int n = znode->iip + 1;
2943
2944
2945 znode = znode->parent;
2946 if (!znode)
2947 return NULL;
2948 if (n < znode->child_cnt) {
2949
2950 znode = get_znode(c, znode, n);
2951 if (IS_ERR(znode))
2952 return znode;
2953 while (znode->level != level) {
2954 znode = get_znode(c, znode, 0);
2955 if (IS_ERR(znode))
2956 return znode;
2957 }
2958 break;
2959 }
2960 }
2961 return znode;
2962}
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989static struct ubifs_znode *lookup_znode(struct ubifs_info *c,
2990 union ubifs_key *key, int level,
2991 int lnum, int offs)
2992{
2993 struct ubifs_znode *znode, *zn;
2994 int n, nn;
2995
2996 ubifs_assert(key_type(c, key) < UBIFS_INVALID_KEY);
2997
2998
2999
3000
3001
3002 if (level < 0)
3003 return ERR_PTR(-EINVAL);
3004
3005
3006 znode = c->zroot.znode;
3007 if (!znode) {
3008 znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
3009 if (IS_ERR(znode))
3010 return znode;
3011 }
3012
3013 if (c->zroot.lnum == lnum && c->zroot.offs == offs)
3014 return znode;
3015
3016 if (level >= znode->level)
3017 return NULL;
3018 while (1) {
3019 ubifs_search_zbranch(c, znode, key, &n);
3020 if (n < 0) {
3021
3022
3023
3024
3025
3026
3027
3028
3029 znode = left_znode(c, znode);
3030 if (!znode)
3031 return NULL;
3032 if (IS_ERR(znode))
3033 return znode;
3034 ubifs_search_zbranch(c, znode, key, &n);
3035 ubifs_assert(n >= 0);
3036 }
3037 if (znode->level == level + 1)
3038 break;
3039 znode = get_znode(c, znode, n);
3040 if (IS_ERR(znode))
3041 return znode;
3042 }
3043
3044 if (znode->zbranch[n].lnum == lnum && znode->zbranch[n].offs == offs)
3045 return get_znode(c, znode, n);
3046
3047 if (!is_hash_key(c, key))
3048 return NULL;
3049
3050
3051
3052
3053 zn = znode;
3054 nn = n;
3055
3056 while (1) {
3057
3058 if (n)
3059 n -= 1;
3060 else {
3061 znode = left_znode(c, znode);
3062 if (!znode)
3063 break;
3064 if (IS_ERR(znode))
3065 return znode;
3066 n = znode->child_cnt - 1;
3067 }
3068
3069 if (znode->zbranch[n].lnum == lnum &&
3070 znode->zbranch[n].offs == offs)
3071 return get_znode(c, znode, n);
3072
3073 if (keys_cmp(c, &znode->zbranch[n].key, key) < 0)
3074 break;
3075 }
3076
3077 znode = zn;
3078 n = nn;
3079
3080 while (1) {
3081
3082 if (++n >= znode->child_cnt) {
3083 znode = right_znode(c, znode);
3084 if (!znode)
3085 break;
3086 if (IS_ERR(znode))
3087 return znode;
3088 n = 0;
3089 }
3090
3091 if (znode->zbranch[n].lnum == lnum &&
3092 znode->zbranch[n].offs == offs)
3093 return get_znode(c, znode, n);
3094
3095 if (keys_cmp(c, &znode->zbranch[n].key, key) > 0)
3096 break;
3097 }
3098 return NULL;
3099}
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118int is_idx_node_in_tnc(struct ubifs_info *c, union ubifs_key *key, int level,
3119 int lnum, int offs)
3120{
3121 struct ubifs_znode *znode;
3122
3123 znode = lookup_znode(c, key, level, lnum, offs);
3124 if (!znode)
3125 return 0;
3126 if (IS_ERR(znode))
3127 return PTR_ERR(znode);
3128
3129 return ubifs_zn_dirty(znode) ? 1 : 2;
3130}
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145static int is_leaf_node_in_tnc(struct ubifs_info *c, union ubifs_key *key,
3146 int lnum, int offs)
3147{
3148 struct ubifs_zbranch *zbr;
3149 struct ubifs_znode *znode, *zn;
3150 int n, found, err, nn;
3151 const int unique = !is_hash_key(c, key);
3152
3153 found = ubifs_lookup_level0(c, key, &znode, &n);
3154 if (found < 0)
3155 return found;
3156 if (!found)
3157 return 0;
3158 zbr = &znode->zbranch[n];
3159 if (lnum == zbr->lnum && offs == zbr->offs)
3160 return 1;
3161 if (unique)
3162 return 0;
3163
3164
3165
3166
3167 zn = znode;
3168 nn = n;
3169
3170 while (1) {
3171 err = tnc_prev(c, &znode, &n);
3172 if (err == -ENOENT)
3173 break;
3174 if (err)
3175 return err;
3176 if (keys_cmp(c, key, &znode->zbranch[n].key))
3177 break;
3178 zbr = &znode->zbranch[n];
3179 if (lnum == zbr->lnum && offs == zbr->offs)
3180 return 1;
3181 }
3182
3183 znode = zn;
3184 n = nn;
3185 while (1) {
3186 err = tnc_next(c, &znode, &n);
3187 if (err) {
3188 if (err == -ENOENT)
3189 return 0;
3190 return err;
3191 }
3192 if (keys_cmp(c, key, &znode->zbranch[n].key))
3193 break;
3194 zbr = &znode->zbranch[n];
3195 if (lnum == zbr->lnum && offs == zbr->offs)
3196 return 1;
3197 }
3198 return 0;
3199}
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215int ubifs_tnc_has_node(struct ubifs_info *c, union ubifs_key *key, int level,
3216 int lnum, int offs, int is_idx)
3217{
3218 int err;
3219
3220 mutex_lock(&c->tnc_mutex);
3221 if (is_idx) {
3222 err = is_idx_node_in_tnc(c, key, level, lnum, offs);
3223 if (err < 0)
3224 goto out_unlock;
3225 if (err == 1)
3226
3227 err = 0;
3228 else if (err == 2)
3229
3230 err = 1;
3231 else
3232 BUG_ON(err != 0);
3233 } else
3234 err = is_leaf_node_in_tnc(c, key, lnum, offs);
3235
3236out_unlock:
3237 mutex_unlock(&c->tnc_mutex);
3238 return err;
3239}
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255int ubifs_dirty_idx_node(struct ubifs_info *c, union ubifs_key *key, int level,
3256 int lnum, int offs)
3257{
3258 struct ubifs_znode *znode;
3259 int err = 0;
3260
3261 mutex_lock(&c->tnc_mutex);
3262 znode = lookup_znode(c, key, level, lnum, offs);
3263 if (!znode)
3264 goto out_unlock;
3265 if (IS_ERR(znode)) {
3266 err = PTR_ERR(znode);
3267 goto out_unlock;
3268 }
3269 znode = dirty_cow_bottom_up(c, znode);
3270 if (IS_ERR(znode)) {
3271 err = PTR_ERR(znode);
3272 goto out_unlock;
3273 }
3274
3275out_unlock:
3276 mutex_unlock(&c->tnc_mutex);
3277 return err;
3278}
3279
3280#ifdef CONFIG_UBIFS_FS_DEBUG
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode,
3294 loff_t size)
3295{
3296 int err, n;
3297 union ubifs_key from_key, to_key, *key;
3298 struct ubifs_znode *znode;
3299 unsigned int block;
3300
3301 if (!S_ISREG(inode->i_mode))
3302 return 0;
3303 if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
3304 return 0;
3305
3306 block = (size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
3307 data_key_init(c, &from_key, inode->i_ino, block);
3308 highest_data_key(c, &to_key, inode->i_ino);
3309
3310 mutex_lock(&c->tnc_mutex);
3311 err = ubifs_lookup_level0(c, &from_key, &znode, &n);
3312 if (err < 0)
3313 goto out_unlock;
3314
3315 if (err) {
3316 err = -EINVAL;
3317 key = &from_key;
3318 goto out_dump;
3319 }
3320
3321 err = tnc_next(c, &znode, &n);
3322 if (err == -ENOENT) {
3323 err = 0;
3324 goto out_unlock;
3325 }
3326 if (err < 0)
3327 goto out_unlock;
3328
3329 ubifs_assert(err == 0);
3330 key = &znode->zbranch[n].key;
3331 if (!key_in_range(c, key, &from_key, &to_key))
3332 goto out_unlock;
3333
3334out_dump:
3335 block = key_block(c, key);
3336 ubifs_err("inode %lu has size %lld, but there are data at offset %lld "
3337 "(data key %s)", (unsigned long)inode->i_ino, size,
3338 ((loff_t)block) << UBIFS_BLOCK_SHIFT, DBGKEY(key));
3339 dbg_dump_inode(c, inode);
3340 dbg_dump_stack();
3341 err = -EINVAL;
3342
3343out_unlock:
3344 mutex_unlock(&c->tnc_mutex);
3345 return err;
3346}
3347
3348#endif
3349