1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/crc32.h>
34#include <linux/slab.h>
35#include "ubifs.h"
36
37
38
39
40
41
42
43
44
45
46
47enum {
48 NAME_LESS = 0,
49 NAME_MATCHES = 1,
50 NAME_GREATER = 2,
51 NOT_ON_MEDIA = 3,
52};
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77static int insert_old_idx(struct ubifs_info *c, int lnum, int offs)
78{
79 struct ubifs_old_idx *old_idx, *o;
80 struct rb_node **p, *parent = NULL;
81
82 old_idx = kmalloc(sizeof(struct ubifs_old_idx), GFP_NOFS);
83 if (unlikely(!old_idx))
84 return -ENOMEM;
85 old_idx->lnum = lnum;
86 old_idx->offs = offs;
87
88 p = &c->old_idx.rb_node;
89 while (*p) {
90 parent = *p;
91 o = rb_entry(parent, struct ubifs_old_idx, rb);
92 if (lnum < o->lnum)
93 p = &(*p)->rb_left;
94 else if (lnum > o->lnum)
95 p = &(*p)->rb_right;
96 else if (offs < o->offs)
97 p = &(*p)->rb_left;
98 else if (offs > o->offs)
99 p = &(*p)->rb_right;
100 else {
101 ubifs_err("old idx added twice!");
102 kfree(old_idx);
103 return 0;
104 }
105 }
106 rb_link_node(&old_idx->rb, parent, p);
107 rb_insert_color(&old_idx->rb, &c->old_idx);
108 return 0;
109}
110
111
112
113
114
115
116
117
118int insert_old_idx_znode(struct ubifs_info *c, struct ubifs_znode *znode)
119{
120 if (znode->parent) {
121 struct ubifs_zbranch *zbr;
122
123 zbr = &znode->parent->zbranch[znode->iip];
124 if (zbr->len)
125 return insert_old_idx(c, zbr->lnum, zbr->offs);
126 } else
127 if (c->zroot.len)
128 return insert_old_idx(c, c->zroot.lnum,
129 c->zroot.offs);
130 return 0;
131}
132
133
134
135
136
137
138
139
140static int ins_clr_old_idx_znode(struct ubifs_info *c,
141 struct ubifs_znode *znode)
142{
143 int err;
144
145 if (znode->parent) {
146 struct ubifs_zbranch *zbr;
147
148 zbr = &znode->parent->zbranch[znode->iip];
149 if (zbr->len) {
150 err = insert_old_idx(c, zbr->lnum, zbr->offs);
151 if (err)
152 return err;
153 zbr->lnum = 0;
154 zbr->offs = 0;
155 zbr->len = 0;
156 }
157 } else
158 if (c->zroot.len) {
159 err = insert_old_idx(c, c->zroot.lnum, c->zroot.offs);
160 if (err)
161 return err;
162 c->zroot.lnum = 0;
163 c->zroot.offs = 0;
164 c->zroot.len = 0;
165 }
166 return 0;
167}
168
169
170
171
172
173
174
175
176
177
178
179void destroy_old_idx(struct ubifs_info *c)
180{
181 struct rb_node *this = c->old_idx.rb_node;
182 struct ubifs_old_idx *old_idx;
183
184 while (this) {
185 if (this->rb_left) {
186 this = this->rb_left;
187 continue;
188 } else if (this->rb_right) {
189 this = this->rb_right;
190 continue;
191 }
192 old_idx = rb_entry(this, struct ubifs_old_idx, rb);
193 this = rb_parent(this);
194 if (this) {
195 if (this->rb_left == &old_idx->rb)
196 this->rb_left = NULL;
197 else
198 this->rb_right = NULL;
199 }
200 kfree(old_idx);
201 }
202 c->old_idx = RB_ROOT;
203}
204
205
206
207
208
209
210
211
212static struct ubifs_znode *copy_znode(struct ubifs_info *c,
213 struct ubifs_znode *znode)
214{
215 struct ubifs_znode *zn;
216
217 zn = kmalloc(c->max_znode_sz, GFP_NOFS);
218 if (unlikely(!zn))
219 return ERR_PTR(-ENOMEM);
220
221 memcpy(zn, znode, c->max_znode_sz);
222 zn->cnext = NULL;
223 __set_bit(DIRTY_ZNODE, &zn->flags);
224 __clear_bit(COW_ZNODE, &zn->flags);
225
226 ubifs_assert(!test_bit(OBSOLETE_ZNODE, &znode->flags));
227 __set_bit(OBSOLETE_ZNODE, &znode->flags);
228
229 if (znode->level != 0) {
230 int i;
231 const int n = zn->child_cnt;
232
233
234 for (i = 0; i < n; i++) {
235 struct ubifs_zbranch *zbr = &zn->zbranch[i];
236
237 if (zbr->znode)
238 zbr->znode->parent = zn;
239 }
240 }
241
242 atomic_long_inc(&c->dirty_zn_cnt);
243 return zn;
244}
245
246
247
248
249
250
251
252
253
254static int add_idx_dirt(struct ubifs_info *c, int lnum, int dirt)
255{
256 c->calc_idx_sz -= ALIGN(dirt, 8);
257 return ubifs_add_dirt(c, lnum, dirt);
258}
259
260
261
262
263
264
265
266
267static struct ubifs_znode *dirty_cow_znode(struct ubifs_info *c,
268 struct ubifs_zbranch *zbr)
269{
270 struct ubifs_znode *znode = zbr->znode;
271 struct ubifs_znode *zn;
272 int err;
273
274 if (!test_bit(COW_ZNODE, &znode->flags)) {
275
276 if (!test_and_set_bit(DIRTY_ZNODE, &znode->flags)) {
277 atomic_long_inc(&c->dirty_zn_cnt);
278 atomic_long_dec(&c->clean_zn_cnt);
279 atomic_long_dec(&ubifs_clean_zn_cnt);
280 err = add_idx_dirt(c, zbr->lnum, zbr->len);
281 if (unlikely(err))
282 return ERR_PTR(err);
283 }
284 return znode;
285 }
286
287 zn = copy_znode(c, znode);
288 if (IS_ERR(zn))
289 return zn;
290
291 if (zbr->len) {
292 err = insert_old_idx(c, zbr->lnum, zbr->offs);
293 if (unlikely(err))
294 return ERR_PTR(err);
295 err = add_idx_dirt(c, zbr->lnum, zbr->len);
296 } else
297 err = 0;
298
299 zbr->znode = zn;
300 zbr->lnum = 0;
301 zbr->offs = 0;
302 zbr->len = 0;
303
304 if (unlikely(err))
305 return ERR_PTR(err);
306 return zn;
307}
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329static int lnc_add(struct ubifs_info *c, struct ubifs_zbranch *zbr,
330 const void *node)
331{
332 int err;
333 void *lnc_node;
334 const struct ubifs_dent_node *dent = node;
335
336 ubifs_assert(!zbr->leaf);
337 ubifs_assert(zbr->len != 0);
338 ubifs_assert(is_hash_key(c, &zbr->key));
339
340 err = ubifs_validate_entry(c, dent);
341 if (err) {
342 dbg_dump_stack();
343 dbg_dump_node(c, dent);
344 return err;
345 }
346
347 lnc_node = kmalloc(zbr->len, GFP_NOFS);
348 if (!lnc_node)
349
350 return 0;
351
352 memcpy(lnc_node, node, zbr->len);
353 zbr->leaf = lnc_node;
354 return 0;
355}
356
357
358
359
360
361
362
363
364
365
366static int lnc_add_directly(struct ubifs_info *c, struct ubifs_zbranch *zbr,
367 void *node)
368{
369 int err;
370
371 ubifs_assert(!zbr->leaf);
372 ubifs_assert(zbr->len != 0);
373
374 err = ubifs_validate_entry(c, node);
375 if (err) {
376 dbg_dump_stack();
377 dbg_dump_node(c, node);
378 return err;
379 }
380
381 zbr->leaf = node;
382 return 0;
383}
384
385
386
387
388
389
390static void lnc_free(struct ubifs_zbranch *zbr)
391{
392 if (!zbr->leaf)
393 return;
394 kfree(zbr->leaf);
395 zbr->leaf = NULL;
396}
397
398
399
400
401
402
403
404
405
406
407
408
409static int tnc_read_node_nm(struct ubifs_info *c, struct ubifs_zbranch *zbr,
410 void *node)
411{
412 int err;
413
414 ubifs_assert(is_hash_key(c, &zbr->key));
415
416 if (zbr->leaf) {
417
418 ubifs_assert(zbr->len != 0);
419 memcpy(node, zbr->leaf, zbr->len);
420 return 0;
421 }
422
423 err = ubifs_tnc_read_node(c, zbr, node);
424 if (err)
425 return err;
426
427
428 err = lnc_add(c, zbr, node);
429 return err;
430}
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453static int try_read_node(const struct ubifs_info *c, void *buf, int type,
454 int len, int lnum, int offs)
455{
456 int err, node_len;
457 struct ubifs_ch *ch = buf;
458 uint32_t crc, node_crc;
459
460 dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len);
461
462 err = ubi_read(c->ubi, lnum, buf, offs, len);
463 if (err) {
464 ubifs_err("cannot read node type %d from LEB %d:%d, error %d",
465 type, lnum, offs, err);
466 return err;
467 }
468
469 if (le32_to_cpu(ch->magic) != UBIFS_NODE_MAGIC)
470 return 0;
471
472 if (ch->node_type != type)
473 return 0;
474
475 node_len = le32_to_cpu(ch->len);
476 if (node_len != len)
477 return 0;
478
479 if (type == UBIFS_DATA_NODE && !c->always_chk_crc && c->no_chk_data_crc)
480 return 1;
481
482 crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8);
483 node_crc = le32_to_cpu(ch->crc);
484 if (crc != node_crc)
485 return 0;
486
487 return 1;
488}
489
490
491
492
493
494
495
496
497
498
499
500static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
501 struct ubifs_zbranch *zbr, void *node)
502{
503 int ret;
504
505 dbg_tnc("LEB %d:%d, key %s", zbr->lnum, zbr->offs, DBGKEY(key));
506
507 ret = try_read_node(c, node, key_type(c, key), zbr->len, zbr->lnum,
508 zbr->offs);
509 if (ret == 1) {
510 union ubifs_key node_key;
511 struct ubifs_dent_node *dent = node;
512
513
514 key_read(c, &dent->key, &node_key);
515 if (keys_cmp(c, key, &node_key) != 0)
516 ret = 0;
517 }
518 if (ret == 0 && c->replaying)
519 dbg_mnt("dangling branch LEB %d:%d len %d, key %s",
520 zbr->lnum, zbr->offs, zbr->len, DBGKEY(key));
521 return ret;
522}
523
524
525
526
527
528
529
530
531
532
533
534
535static int matches_name(struct ubifs_info *c, struct ubifs_zbranch *zbr,
536 const struct qstr *nm)
537{
538 struct ubifs_dent_node *dent;
539 int nlen, err;
540
541
542 if (!zbr->leaf) {
543 dent = kmalloc(zbr->len, GFP_NOFS);
544 if (!dent)
545 return -ENOMEM;
546
547 err = ubifs_tnc_read_node(c, zbr, dent);
548 if (err)
549 goto out_free;
550
551
552 err = lnc_add_directly(c, zbr, dent);
553 if (err)
554 goto out_free;
555 } else
556 dent = zbr->leaf;
557
558 nlen = le16_to_cpu(dent->nlen);
559 err = memcmp(dent->name, nm->name, min_t(int, nlen, nm->len));
560 if (err == 0) {
561 if (nlen == nm->len)
562 return NAME_MATCHES;
563 else if (nlen < nm->len)
564 return NAME_LESS;
565 else
566 return NAME_GREATER;
567 } else if (err < 0)
568 return NAME_LESS;
569 else
570 return NAME_GREATER;
571
572out_free:
573 kfree(dent);
574 return err;
575}
576
577
578
579
580
581
582
583
584
585static struct ubifs_znode *get_znode(struct ubifs_info *c,
586 struct ubifs_znode *znode, int n)
587{
588 struct ubifs_zbranch *zbr;
589
590 zbr = &znode->zbranch[n];
591 if (zbr->znode)
592 znode = zbr->znode;
593 else
594 znode = ubifs_load_znode(c, zbr, znode, n);
595 return znode;
596}
597
598
599
600
601
602
603
604
605
606
607static int tnc_next(struct ubifs_info *c, struct ubifs_znode **zn, int *n)
608{
609 struct ubifs_znode *znode = *zn;
610 int nn = *n;
611
612 nn += 1;
613 if (nn < znode->child_cnt) {
614 *n = nn;
615 return 0;
616 }
617 while (1) {
618 struct ubifs_znode *zp;
619
620 zp = znode->parent;
621 if (!zp)
622 return -ENOENT;
623 nn = znode->iip + 1;
624 znode = zp;
625 if (nn < znode->child_cnt) {
626 znode = get_znode(c, znode, nn);
627 if (IS_ERR(znode))
628 return PTR_ERR(znode);
629 while (znode->level != 0) {
630 znode = get_znode(c, znode, 0);
631 if (IS_ERR(znode))
632 return PTR_ERR(znode);
633 }
634 nn = 0;
635 break;
636 }
637 }
638 *zn = znode;
639 *n = nn;
640 return 0;
641}
642
643
644
645
646
647
648
649
650
651
652static int tnc_prev(struct ubifs_info *c, struct ubifs_znode **zn, int *n)
653{
654 struct ubifs_znode *znode = *zn;
655 int nn = *n;
656
657 if (nn > 0) {
658 *n = nn - 1;
659 return 0;
660 }
661 while (1) {
662 struct ubifs_znode *zp;
663
664 zp = znode->parent;
665 if (!zp)
666 return -ENOENT;
667 nn = znode->iip - 1;
668 znode = zp;
669 if (nn >= 0) {
670 znode = get_znode(c, znode, nn);
671 if (IS_ERR(znode))
672 return PTR_ERR(znode);
673 while (znode->level != 0) {
674 nn = znode->child_cnt - 1;
675 znode = get_znode(c, znode, nn);
676 if (IS_ERR(znode))
677 return PTR_ERR(znode);
678 }
679 nn = znode->child_cnt - 1;
680 break;
681 }
682 }
683 *zn = znode;
684 *n = nn;
685 return 0;
686}
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704static int resolve_collision(struct ubifs_info *c, const union ubifs_key *key,
705 struct ubifs_znode **zn, int *n,
706 const struct qstr *nm)
707{
708 int err;
709
710 err = matches_name(c, &(*zn)->zbranch[*n], nm);
711 if (unlikely(err < 0))
712 return err;
713 if (err == NAME_MATCHES)
714 return 1;
715
716 if (err == NAME_GREATER) {
717
718 while (1) {
719 err = tnc_prev(c, zn, n);
720 if (err == -ENOENT) {
721 ubifs_assert(*n == 0);
722 *n = -1;
723 return 0;
724 }
725 if (err < 0)
726 return err;
727 if (keys_cmp(c, &(*zn)->zbranch[*n].key, key)) {
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757 if (*n == (*zn)->child_cnt - 1) {
758 err = tnc_next(c, zn, n);
759 if (err) {
760
761 ubifs_assert(0);
762 if (err == -ENOENT)
763 err = -EINVAL;
764 return err;
765 }
766 ubifs_assert(*n == 0);
767 *n = -1;
768 }
769 return 0;
770 }
771 err = matches_name(c, &(*zn)->zbranch[*n], nm);
772 if (err < 0)
773 return err;
774 if (err == NAME_LESS)
775 return 0;
776 if (err == NAME_MATCHES)
777 return 1;
778 ubifs_assert(err == NAME_GREATER);
779 }
780 } else {
781 int nn = *n;
782 struct ubifs_znode *znode = *zn;
783
784
785 while (1) {
786 err = tnc_next(c, &znode, &nn);
787 if (err == -ENOENT)
788 return 0;
789 if (err < 0)
790 return err;
791 if (keys_cmp(c, &znode->zbranch[nn].key, key))
792 return 0;
793 err = matches_name(c, &znode->zbranch[nn], nm);
794 if (err < 0)
795 return err;
796 if (err == NAME_GREATER)
797 return 0;
798 *zn = znode;
799 *n = nn;
800 if (err == NAME_MATCHES)
801 return 1;
802 ubifs_assert(err == NAME_LESS);
803 }
804 }
805}
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822static int fallible_matches_name(struct ubifs_info *c,
823 struct ubifs_zbranch *zbr,
824 const struct qstr *nm)
825{
826 struct ubifs_dent_node *dent;
827 int nlen, err;
828
829
830 if (!zbr->leaf) {
831 dent = kmalloc(zbr->len, GFP_NOFS);
832 if (!dent)
833 return -ENOMEM;
834
835 err = fallible_read_node(c, &zbr->key, zbr, dent);
836 if (err < 0)
837 goto out_free;
838 if (err == 0) {
839
840 err = NOT_ON_MEDIA;
841 goto out_free;
842 }
843 ubifs_assert(err == 1);
844
845 err = lnc_add_directly(c, zbr, dent);
846 if (err)
847 goto out_free;
848 } else
849 dent = zbr->leaf;
850
851 nlen = le16_to_cpu(dent->nlen);
852 err = memcmp(dent->name, nm->name, min_t(int, nlen, nm->len));
853 if (err == 0) {
854 if (nlen == nm->len)
855 return NAME_MATCHES;
856 else if (nlen < nm->len)
857 return NAME_LESS;
858 else
859 return NAME_GREATER;
860 } else if (err < 0)
861 return NAME_LESS;
862 else
863 return NAME_GREATER;
864
865out_free:
866 kfree(dent);
867 return err;
868}
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892static int fallible_resolve_collision(struct ubifs_info *c,
893 const union ubifs_key *key,
894 struct ubifs_znode **zn, int *n,
895 const struct qstr *nm, int adding)
896{
897 struct ubifs_znode *o_znode = NULL, *znode = *zn;
898 int uninitialized_var(o_n), err, cmp, unsure = 0, nn = *n;
899
900 cmp = fallible_matches_name(c, &znode->zbranch[nn], nm);
901 if (unlikely(cmp < 0))
902 return cmp;
903 if (cmp == NAME_MATCHES)
904 return 1;
905 if (cmp == NOT_ON_MEDIA) {
906 o_znode = znode;
907 o_n = nn;
908
909
910
911
912
913 unsure = 1;
914 } else if (!adding)
915 unsure = 1;
916
917 if (cmp == NAME_GREATER || unsure) {
918
919 while (1) {
920 err = tnc_prev(c, zn, n);
921 if (err == -ENOENT) {
922 ubifs_assert(*n == 0);
923 *n = -1;
924 break;
925 }
926 if (err < 0)
927 return err;
928 if (keys_cmp(c, &(*zn)->zbranch[*n].key, key)) {
929
930 if (*n == (*zn)->child_cnt - 1) {
931 err = tnc_next(c, zn, n);
932 if (err) {
933
934 ubifs_assert(0);
935 if (err == -ENOENT)
936 err = -EINVAL;
937 return err;
938 }
939 ubifs_assert(*n == 0);
940 *n = -1;
941 }
942 break;
943 }
944 err = fallible_matches_name(c, &(*zn)->zbranch[*n], nm);
945 if (err < 0)
946 return err;
947 if (err == NAME_MATCHES)
948 return 1;
949 if (err == NOT_ON_MEDIA) {
950 o_znode = *zn;
951 o_n = *n;
952 continue;
953 }
954 if (!adding)
955 continue;
956 if (err == NAME_LESS)
957 break;
958 else
959 unsure = 0;
960 }
961 }
962
963 if (cmp == NAME_LESS || unsure) {
964
965 *zn = znode;
966 *n = nn;
967 while (1) {
968 err = tnc_next(c, &znode, &nn);
969 if (err == -ENOENT)
970 break;
971 if (err < 0)
972 return err;
973 if (keys_cmp(c, &znode->zbranch[nn].key, key))
974 break;
975 err = fallible_matches_name(c, &znode->zbranch[nn], nm);
976 if (err < 0)
977 return err;
978 if (err == NAME_GREATER)
979 break;
980 *zn = znode;
981 *n = nn;
982 if (err == NAME_MATCHES)
983 return 1;
984 if (err == NOT_ON_MEDIA) {
985 o_znode = znode;
986 o_n = nn;
987 }
988 }
989 }
990
991
992 if (adding || !o_znode)
993 return 0;
994
995 dbg_mnt("dangling match LEB %d:%d len %d %s",
996 o_znode->zbranch[o_n].lnum, o_znode->zbranch[o_n].offs,
997 o_znode->zbranch[o_n].len, DBGKEY(key));
998 *zn = o_znode;
999 *n = o_n;
1000 return 1;
1001}
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011static int matches_position(struct ubifs_zbranch *zbr, int lnum, int offs)
1012{
1013 if (zbr->lnum == lnum && zbr->offs == offs)
1014 return 1;
1015 else
1016 return 0;
1017}
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036static int resolve_collision_directly(struct ubifs_info *c,
1037 const union ubifs_key *key,
1038 struct ubifs_znode **zn, int *n,
1039 int lnum, int offs)
1040{
1041 struct ubifs_znode *znode;
1042 int nn, err;
1043
1044 znode = *zn;
1045 nn = *n;
1046 if (matches_position(&znode->zbranch[nn], lnum, offs))
1047 return 1;
1048
1049
1050 while (1) {
1051 err = tnc_prev(c, &znode, &nn);
1052 if (err == -ENOENT)
1053 break;
1054 if (err < 0)
1055 return err;
1056 if (keys_cmp(c, &znode->zbranch[nn].key, key))
1057 break;
1058 if (matches_position(&znode->zbranch[nn], lnum, offs)) {
1059 *zn = znode;
1060 *n = nn;
1061 return 1;
1062 }
1063 }
1064
1065
1066 znode = *zn;
1067 nn = *n;
1068 while (1) {
1069 err = tnc_next(c, &znode, &nn);
1070 if (err == -ENOENT)
1071 return 0;
1072 if (err < 0)
1073 return err;
1074 if (keys_cmp(c, &znode->zbranch[nn].key, key))
1075 return 0;
1076 *zn = znode;
1077 *n = nn;
1078 if (matches_position(&znode->zbranch[nn], lnum, offs))
1079 return 1;
1080 }
1081}
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093static struct ubifs_znode *dirty_cow_bottom_up(struct ubifs_info *c,
1094 struct ubifs_znode *znode)
1095{
1096 struct ubifs_znode *zp;
1097 int *path = c->bottom_up_buf, p = 0;
1098
1099 ubifs_assert(c->zroot.znode);
1100 ubifs_assert(znode);
1101 if (c->zroot.znode->level > BOTTOM_UP_HEIGHT) {
1102 kfree(c->bottom_up_buf);
1103 c->bottom_up_buf = kmalloc(c->zroot.znode->level * sizeof(int),
1104 GFP_NOFS);
1105 if (!c->bottom_up_buf)
1106 return ERR_PTR(-ENOMEM);
1107 path = c->bottom_up_buf;
1108 }
1109 if (c->zroot.znode->level) {
1110
1111 while (1) {
1112 int n;
1113
1114 zp = znode->parent;
1115 if (!zp)
1116 break;
1117 n = znode->iip;
1118 ubifs_assert(p < c->zroot.znode->level);
1119 path[p++] = n;
1120 if (!zp->cnext && ubifs_zn_dirty(znode))
1121 break;
1122 znode = zp;
1123 }
1124 }
1125
1126
1127 while (1) {
1128 struct ubifs_zbranch *zbr;
1129
1130 zp = znode->parent;
1131 if (zp) {
1132 ubifs_assert(path[p - 1] >= 0);
1133 ubifs_assert(path[p - 1] < zp->child_cnt);
1134 zbr = &zp->zbranch[path[--p]];
1135 znode = dirty_cow_znode(c, zbr);
1136 } else {
1137 ubifs_assert(znode == c->zroot.znode);
1138 znode = dirty_cow_znode(c, &c->zroot);
1139 }
1140 if (IS_ERR(znode) || !p)
1141 break;
1142 ubifs_assert(path[p - 1] >= 0);
1143 ubifs_assert(path[p - 1] < znode->child_cnt);
1144 znode = znode->zbranch[path[p - 1]].znode;
1145 }
1146
1147 return znode;
1148}
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172int ubifs_lookup_level0(struct ubifs_info *c, const union ubifs_key *key,
1173 struct ubifs_znode **zn, int *n)
1174{
1175 int err, exact;
1176 struct ubifs_znode *znode;
1177 unsigned long time = get_seconds();
1178
1179 dbg_tnc("search key %s", DBGKEY(key));
1180 ubifs_assert(key_type(c, key) < UBIFS_INVALID_KEY);
1181
1182 znode = c->zroot.znode;
1183 if (unlikely(!znode)) {
1184 znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
1185 if (IS_ERR(znode))
1186 return PTR_ERR(znode);
1187 }
1188
1189 znode->time = time;
1190
1191 while (1) {
1192 struct ubifs_zbranch *zbr;
1193
1194 exact = ubifs_search_zbranch(c, znode, key, n);
1195
1196 if (znode->level == 0)
1197 break;
1198
1199 if (*n < 0)
1200 *n = 0;
1201 zbr = &znode->zbranch[*n];
1202
1203 if (zbr->znode) {
1204 znode->time = time;
1205 znode = zbr->znode;
1206 continue;
1207 }
1208
1209
1210 znode = ubifs_load_znode(c, zbr, znode, *n);
1211 if (IS_ERR(znode))
1212 return PTR_ERR(znode);
1213 }
1214
1215 *zn = znode;
1216 if (exact || !is_hash_key(c, key) || *n != -1) {
1217 dbg_tnc("found %d, lvl %d, n %d", exact, znode->level, *n);
1218 return exact;
1219 }
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264 err = tnc_prev(c, &znode, n);
1265 if (err == -ENOENT) {
1266 dbg_tnc("found 0, lvl %d, n -1", znode->level);
1267 *n = -1;
1268 return 0;
1269 }
1270 if (unlikely(err < 0))
1271 return err;
1272 if (keys_cmp(c, key, &znode->zbranch[*n].key)) {
1273 dbg_tnc("found 0, lvl %d, n -1", znode->level);
1274 *n = -1;
1275 return 0;
1276 }
1277
1278 dbg_tnc("found 1, lvl %d, n %d", znode->level, *n);
1279 *zn = znode;
1280 return 1;
1281}
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308static int lookup_level0_dirty(struct ubifs_info *c, const union ubifs_key *key,
1309 struct ubifs_znode **zn, int *n)
1310{
1311 int err, exact;
1312 struct ubifs_znode *znode;
1313 unsigned long time = get_seconds();
1314
1315 dbg_tnc("search and dirty key %s", DBGKEY(key));
1316
1317 znode = c->zroot.znode;
1318 if (unlikely(!znode)) {
1319 znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
1320 if (IS_ERR(znode))
1321 return PTR_ERR(znode);
1322 }
1323
1324 znode = dirty_cow_znode(c, &c->zroot);
1325 if (IS_ERR(znode))
1326 return PTR_ERR(znode);
1327
1328 znode->time = time;
1329
1330 while (1) {
1331 struct ubifs_zbranch *zbr;
1332
1333 exact = ubifs_search_zbranch(c, znode, key, n);
1334
1335 if (znode->level == 0)
1336 break;
1337
1338 if (*n < 0)
1339 *n = 0;
1340 zbr = &znode->zbranch[*n];
1341
1342 if (zbr->znode) {
1343 znode->time = time;
1344 znode = dirty_cow_znode(c, zbr);
1345 if (IS_ERR(znode))
1346 return PTR_ERR(znode);
1347 continue;
1348 }
1349
1350
1351 znode = ubifs_load_znode(c, zbr, znode, *n);
1352 if (IS_ERR(znode))
1353 return PTR_ERR(znode);
1354 znode = dirty_cow_znode(c, zbr);
1355 if (IS_ERR(znode))
1356 return PTR_ERR(znode);
1357 }
1358
1359 *zn = znode;
1360 if (exact || !is_hash_key(c, key) || *n != -1) {
1361 dbg_tnc("found %d, lvl %d, n %d", exact, znode->level, *n);
1362 return exact;
1363 }
1364
1365
1366
1367
1368
1369 err = tnc_prev(c, &znode, n);
1370 if (err == -ENOENT) {
1371 *n = -1;
1372 dbg_tnc("found 0, lvl %d, n -1", znode->level);
1373 return 0;
1374 }
1375 if (unlikely(err < 0))
1376 return err;
1377 if (keys_cmp(c, key, &znode->zbranch[*n].key)) {
1378 *n = -1;
1379 dbg_tnc("found 0, lvl %d, n -1", znode->level);
1380 return 0;
1381 }
1382
1383 if (znode->cnext || !ubifs_zn_dirty(znode)) {
1384 znode = dirty_cow_bottom_up(c, znode);
1385 if (IS_ERR(znode))
1386 return PTR_ERR(znode);
1387 }
1388
1389 dbg_tnc("found 1, lvl %d, n %d", znode->level, *n);
1390 *zn = znode;
1391 return 1;
1392}
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404static int maybe_leb_gced(struct ubifs_info *c, int lnum, int gc_seq1)
1405{
1406 int gc_seq2, gced_lnum;
1407
1408 gced_lnum = c->gced_lnum;
1409 smp_rmb();
1410 gc_seq2 = c->gc_seq;
1411
1412 if (gc_seq1 == gc_seq2)
1413 return 0;
1414
1415 if (gc_seq1 + 1 != gc_seq2)
1416 return 1;
1417
1418
1419
1420
1421 smp_rmb();
1422 if (gced_lnum != c->gced_lnum)
1423 return 1;
1424
1425 if (gced_lnum == lnum)
1426 return 1;
1427 return 0;
1428}
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443int ubifs_tnc_locate(struct ubifs_info *c, const union ubifs_key *key,
1444 void *node, int *lnum, int *offs)
1445{
1446 int found, n, err, safely = 0, gc_seq1;
1447 struct ubifs_znode *znode;
1448 struct ubifs_zbranch zbr, *zt;
1449
1450again:
1451 mutex_lock(&c->tnc_mutex);
1452 found = ubifs_lookup_level0(c, key, &znode, &n);
1453 if (!found) {
1454 err = -ENOENT;
1455 goto out;
1456 } else if (found < 0) {
1457 err = found;
1458 goto out;
1459 }
1460 zt = &znode->zbranch[n];
1461 if (lnum) {
1462 *lnum = zt->lnum;
1463 *offs = zt->offs;
1464 }
1465 if (is_hash_key(c, key)) {
1466
1467
1468
1469
1470 err = tnc_read_node_nm(c, zt, node);
1471 goto out;
1472 }
1473 if (safely) {
1474 err = ubifs_tnc_read_node(c, zt, node);
1475 goto out;
1476 }
1477
1478 zbr = znode->zbranch[n];
1479 gc_seq1 = c->gc_seq;
1480 mutex_unlock(&c->tnc_mutex);
1481
1482 if (ubifs_get_wbuf(c, zbr.lnum)) {
1483
1484 err = ubifs_tnc_read_node(c, &zbr, node);
1485 return err;
1486 }
1487
1488 err = fallible_read_node(c, key, &zbr, node);
1489 if (err <= 0 || maybe_leb_gced(c, zbr.lnum, gc_seq1)) {
1490
1491
1492
1493
1494 safely = 1;
1495 goto again;
1496 }
1497 return 0;
1498
1499out:
1500 mutex_unlock(&c->tnc_mutex);
1501 return err;
1502}
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu)
1518{
1519 int n, err = 0, lnum = -1, uninitialized_var(offs);
1520 int uninitialized_var(len);
1521 unsigned int block = key_block(c, &bu->key);
1522 struct ubifs_znode *znode;
1523
1524 bu->cnt = 0;
1525 bu->blk_cnt = 0;
1526 bu->eof = 0;
1527
1528 mutex_lock(&c->tnc_mutex);
1529
1530 err = ubifs_lookup_level0(c, &bu->key, &znode, &n);
1531 if (err < 0)
1532 goto out;
1533 if (err) {
1534
1535 len = znode->zbranch[n].len;
1536
1537 if (len > bu->buf_len) {
1538 err = -EINVAL;
1539 goto out;
1540 }
1541
1542 bu->zbranch[bu->cnt++] = znode->zbranch[n];
1543 bu->blk_cnt += 1;
1544 lnum = znode->zbranch[n].lnum;
1545 offs = ALIGN(znode->zbranch[n].offs + len, 8);
1546 }
1547 while (1) {
1548 struct ubifs_zbranch *zbr;
1549 union ubifs_key *key;
1550 unsigned int next_block;
1551
1552
1553 err = tnc_next(c, &znode, &n);
1554 if (err)
1555 goto out;
1556 zbr = &znode->zbranch[n];
1557 key = &zbr->key;
1558
1559 if (key_inum(c, key) != key_inum(c, &bu->key) ||
1560 key_type(c, key) != UBIFS_DATA_KEY) {
1561 err = -ENOENT;
1562 goto out;
1563 }
1564 if (lnum < 0) {
1565
1566 lnum = zbr->lnum;
1567 offs = ALIGN(zbr->offs + zbr->len, 8);
1568 len = zbr->len;
1569 if (len > bu->buf_len) {
1570 err = -EINVAL;
1571 goto out;
1572 }
1573 } else {
1574
1575
1576
1577
1578 if (zbr->lnum != lnum || zbr->offs != offs)
1579 goto out;
1580 offs += ALIGN(zbr->len, 8);
1581 len = ALIGN(len, 8) + zbr->len;
1582
1583 if (len > bu->buf_len)
1584 goto out;
1585 }
1586
1587 next_block = key_block(c, key);
1588 bu->blk_cnt += (next_block - block - 1);
1589 if (bu->blk_cnt >= UBIFS_MAX_BULK_READ)
1590 goto out;
1591 block = next_block;
1592
1593 bu->zbranch[bu->cnt++] = *zbr;
1594 bu->blk_cnt += 1;
1595
1596 if (bu->cnt >= UBIFS_MAX_BULK_READ)
1597 goto out;
1598 if (bu->blk_cnt >= UBIFS_MAX_BULK_READ)
1599 goto out;
1600 }
1601out:
1602 if (err == -ENOENT) {
1603 bu->eof = 1;
1604 err = 0;
1605 }
1606 bu->gc_seq = c->gc_seq;
1607 mutex_unlock(&c->tnc_mutex);
1608 if (err)
1609 return err;
1610
1611
1612
1613
1614 if (bu->blk_cnt > UBIFS_MAX_BULK_READ)
1615 bu->blk_cnt = UBIFS_MAX_BULK_READ;
1616
1617
1618
1619
1620 if (UBIFS_BLOCKS_PER_PAGE == 1 ||
1621 !(bu->blk_cnt & (UBIFS_BLOCKS_PER_PAGE - 1)))
1622 return 0;
1623 if (bu->eof) {
1624
1625 bu->blk_cnt += UBIFS_BLOCKS_PER_PAGE - 1;
1626 return 0;
1627 }
1628
1629 block = key_block(c, &bu->key) + bu->blk_cnt;
1630 block &= ~(UBIFS_BLOCKS_PER_PAGE - 1);
1631 while (bu->cnt) {
1632 if (key_block(c, &bu->zbranch[bu->cnt - 1].key) < block)
1633 break;
1634 bu->cnt -= 1;
1635 }
1636 return 0;
1637}
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649static int read_wbuf(struct ubifs_wbuf *wbuf, void *buf, int len, int lnum,
1650 int offs)
1651{
1652 const struct ubifs_info *c = wbuf->c;
1653 int rlen, overlap;
1654
1655 dbg_io("LEB %d:%d, length %d", lnum, offs, len);
1656 ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
1657 ubifs_assert(!(offs & 7) && offs < c->leb_size);
1658 ubifs_assert(offs + len <= c->leb_size);
1659
1660 spin_lock(&wbuf->lock);
1661 overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs);
1662 if (!overlap) {
1663
1664 spin_unlock(&wbuf->lock);
1665 return ubi_read(c->ubi, lnum, buf, offs, len);
1666 }
1667
1668
1669 rlen = wbuf->offs - offs;
1670 if (rlen < 0)
1671 rlen = 0;
1672
1673
1674 memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen);
1675 spin_unlock(&wbuf->lock);
1676
1677 if (rlen > 0)
1678
1679 return ubi_read(c->ubi, lnum, buf, offs, rlen);
1680
1681 return 0;
1682}
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692static int validate_data_node(struct ubifs_info *c, void *buf,
1693 struct ubifs_zbranch *zbr)
1694{
1695 union ubifs_key key1;
1696 struct ubifs_ch *ch = buf;
1697 int err, len;
1698
1699 if (ch->node_type != UBIFS_DATA_NODE) {
1700 ubifs_err("bad node type (%d but expected %d)",
1701 ch->node_type, UBIFS_DATA_NODE);
1702 goto out_err;
1703 }
1704
1705 err = ubifs_check_node(c, buf, zbr->lnum, zbr->offs, 0, 0);
1706 if (err) {
1707 ubifs_err("expected node type %d", UBIFS_DATA_NODE);
1708 goto out;
1709 }
1710
1711 len = le32_to_cpu(ch->len);
1712 if (len != zbr->len) {
1713 ubifs_err("bad node length %d, expected %d", len, zbr->len);
1714 goto out_err;
1715 }
1716
1717
1718 key_read(c, buf + UBIFS_KEY_OFFSET, &key1);
1719 if (!keys_eq(c, &zbr->key, &key1)) {
1720 ubifs_err("bad key in node at LEB %d:%d",
1721 zbr->lnum, zbr->offs);
1722 dbg_tnc("looked for key %s found node's key %s",
1723 DBGKEY(&zbr->key), DBGKEY1(&key1));
1724 goto out_err;
1725 }
1726
1727 return 0;
1728
1729out_err:
1730 err = -EINVAL;
1731out:
1732 ubifs_err("bad node at LEB %d:%d", zbr->lnum, zbr->offs);
1733 dbg_dump_node(c, buf);
1734 dbg_dump_stack();
1735 return err;
1736}
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu)
1749{
1750 int lnum = bu->zbranch[0].lnum, offs = bu->zbranch[0].offs, len, err, i;
1751 struct ubifs_wbuf *wbuf;
1752 void *buf;
1753
1754 len = bu->zbranch[bu->cnt - 1].offs;
1755 len += bu->zbranch[bu->cnt - 1].len - offs;
1756 if (len > bu->buf_len) {
1757 ubifs_err("buffer too small %d vs %d", bu->buf_len, len);
1758 return -EINVAL;
1759 }
1760
1761
1762 wbuf = ubifs_get_wbuf(c, lnum);
1763 if (wbuf)
1764 err = read_wbuf(wbuf, bu->buf, len, lnum, offs);
1765 else
1766 err = ubi_read(c->ubi, lnum, bu->buf, offs, len);
1767
1768
1769 if (maybe_leb_gced(c, lnum, bu->gc_seq))
1770 return -EAGAIN;
1771
1772 if (err && err != -EBADMSG) {
1773 ubifs_err("failed to read from LEB %d:%d, error %d",
1774 lnum, offs, err);
1775 dbg_dump_stack();
1776 dbg_tnc("key %s", DBGKEY(&bu->key));
1777 return err;
1778 }
1779
1780
1781 buf = bu->buf;
1782 for (i = 0; i < bu->cnt; i++) {
1783 err = validate_data_node(c, buf, &bu->zbranch[i]);
1784 if (err)
1785 return err;
1786 buf = buf + ALIGN(bu->zbranch[i].len, 8);
1787 }
1788
1789 return 0;
1790}
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805static int do_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
1806 void *node, const struct qstr *nm)
1807{
1808 int found, n, err;
1809 struct ubifs_znode *znode;
1810
1811 dbg_tnc("name '%.*s' key %s", nm->len, nm->name, DBGKEY(key));
1812 mutex_lock(&c->tnc_mutex);
1813 found = ubifs_lookup_level0(c, key, &znode, &n);
1814 if (!found) {
1815 err = -ENOENT;
1816 goto out_unlock;
1817 } else if (found < 0) {
1818 err = found;
1819 goto out_unlock;
1820 }
1821
1822 ubifs_assert(n >= 0);
1823
1824 err = resolve_collision(c, key, &znode, &n, nm);
1825 dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n);
1826 if (unlikely(err < 0))
1827 goto out_unlock;
1828 if (err == 0) {
1829 err = -ENOENT;
1830 goto out_unlock;
1831 }
1832
1833 err = tnc_read_node_nm(c, &znode->zbranch[n], node);
1834
1835out_unlock:
1836 mutex_unlock(&c->tnc_mutex);
1837 return err;
1838}
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853int ubifs_tnc_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
1854 void *node, const struct qstr *nm)
1855{
1856 int err, len;
1857 const struct ubifs_dent_node *dent = node;
1858
1859
1860
1861
1862
1863 err = ubifs_tnc_lookup(c, key, node);
1864 if (err)
1865 return err;
1866
1867 len = le16_to_cpu(dent->nlen);
1868 if (nm->len == len && !memcmp(dent->name, nm->name, len))
1869 return 0;
1870
1871
1872
1873
1874
1875 return do_lookup_nm(c, key, node, nm);
1876}
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887static void correct_parent_keys(const struct ubifs_info *c,
1888 struct ubifs_znode *znode)
1889{
1890 union ubifs_key *key, *key1;
1891
1892 ubifs_assert(znode->parent);
1893 ubifs_assert(znode->iip == 0);
1894
1895 key = &znode->zbranch[0].key;
1896 key1 = &znode->parent->zbranch[0].key;
1897
1898 while (keys_cmp(c, key, key1) < 0) {
1899 key_copy(c, key, key1);
1900 znode = znode->parent;
1901 znode->alt = 1;
1902 if (!znode->parent || znode->iip)
1903 break;
1904 key1 = &znode->parent->zbranch[0].key;
1905 }
1906}
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919static void insert_zbranch(struct ubifs_znode *znode,
1920 const struct ubifs_zbranch *zbr, int n)
1921{
1922 int i;
1923
1924 ubifs_assert(ubifs_zn_dirty(znode));
1925
1926 if (znode->level) {
1927 for (i = znode->child_cnt; i > n; i--) {
1928 znode->zbranch[i] = znode->zbranch[i - 1];
1929 if (znode->zbranch[i].znode)
1930 znode->zbranch[i].znode->iip = i;
1931 }
1932 if (zbr->znode)
1933 zbr->znode->iip = n;
1934 } else
1935 for (i = znode->child_cnt; i > n; i--)
1936 znode->zbranch[i] = znode->zbranch[i - 1];
1937
1938 znode->zbranch[n] = *zbr;
1939 znode->child_cnt += 1;
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955 if (n == 0)
1956 znode->alt = 1;
1957}
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971static int tnc_insert(struct ubifs_info *c, struct ubifs_znode *znode,
1972 struct ubifs_zbranch *zbr, int n)
1973{
1974 struct ubifs_znode *zn, *zi, *zp;
1975 int i, keep, move, appending = 0;
1976 union ubifs_key *key = &zbr->key, *key1;
1977
1978 ubifs_assert(n >= 0 && n <= c->fanout);
1979
1980
1981again:
1982 zp = znode->parent;
1983 if (znode->child_cnt < c->fanout) {
1984 ubifs_assert(n != c->fanout);
1985 dbg_tnc("inserted at %d level %d, key %s", n, znode->level,
1986 DBGKEY(key));
1987
1988 insert_zbranch(znode, zbr, n);
1989
1990
1991 if (n == 0 && zp && znode->iip == 0)
1992 correct_parent_keys(c, znode);
1993
1994 return 0;
1995 }
1996
1997
1998
1999
2000
2001 dbg_tnc("splitting level %d, key %s", znode->level, DBGKEY(key));
2002
2003 if (znode->alt)
2004
2005
2006
2007
2008 ins_clr_old_idx_znode(c, znode);
2009
2010 zn = kzalloc(c->max_znode_sz, GFP_NOFS);
2011 if (!zn)
2012 return -ENOMEM;
2013 zn->parent = zp;
2014 zn->level = znode->level;
2015
2016
2017 if (znode->level == 0 && key_type(c, key) == UBIFS_DATA_KEY) {
2018
2019 if (n == c->fanout) {
2020 key1 = &znode->zbranch[n - 1].key;
2021 if (key_inum(c, key1) == key_inum(c, key) &&
2022 key_type(c, key1) == UBIFS_DATA_KEY)
2023 appending = 1;
2024 } else
2025 goto check_split;
2026 } else if (appending && n != c->fanout) {
2027
2028 appending = 0;
2029check_split:
2030 if (n >= (c->fanout + 1) / 2) {
2031 key1 = &znode->zbranch[0].key;
2032 if (key_inum(c, key1) == key_inum(c, key) &&
2033 key_type(c, key1) == UBIFS_DATA_KEY) {
2034 key1 = &znode->zbranch[n].key;
2035 if (key_inum(c, key1) != key_inum(c, key) ||
2036 key_type(c, key1) != UBIFS_DATA_KEY) {
2037 keep = n;
2038 move = c->fanout - keep;
2039 zi = znode;
2040 goto do_split;
2041 }
2042 }
2043 }
2044 }
2045
2046 if (appending) {
2047 keep = c->fanout;
2048 move = 0;
2049 } else {
2050 keep = (c->fanout + 1) / 2;
2051 move = c->fanout - keep;
2052 }
2053
2054
2055
2056
2057
2058
2059 if (n < keep) {
2060
2061 zi = znode;
2062 move += 1;
2063 keep -= 1;
2064 } else {
2065
2066 zi = zn;
2067 n -= keep;
2068
2069 if (zn->level != 0)
2070 zbr->znode->parent = zn;
2071 }
2072
2073do_split:
2074
2075 __set_bit(DIRTY_ZNODE, &zn->flags);
2076 atomic_long_inc(&c->dirty_zn_cnt);
2077
2078 zn->child_cnt = move;
2079 znode->child_cnt = keep;
2080
2081 dbg_tnc("moving %d, keeping %d", move, keep);
2082
2083
2084 for (i = 0; i < move; i++) {
2085 zn->zbranch[i] = znode->zbranch[keep + i];
2086
2087 if (zn->level != 0)
2088 if (zn->zbranch[i].znode) {
2089 zn->zbranch[i].znode->parent = zn;
2090 zn->zbranch[i].znode->iip = i;
2091 }
2092 }
2093
2094
2095 dbg_tnc("inserting at %d level %d, key %s", n, zn->level, DBGKEY(key));
2096
2097 insert_zbranch(zi, zbr, n);
2098
2099
2100 if (zp) {
2101 if (n == 0 && zi == znode && znode->iip == 0)
2102 correct_parent_keys(c, znode);
2103
2104
2105 n = znode->iip + 1;
2106
2107
2108 zbr->key = zn->zbranch[0].key;
2109 zbr->znode = zn;
2110 zbr->lnum = 0;
2111 zbr->offs = 0;
2112 zbr->len = 0;
2113 znode = zp;
2114
2115 goto again;
2116 }
2117
2118
2119 dbg_tnc("creating new zroot at level %d", znode->level + 1);
2120
2121 zi = kzalloc(c->max_znode_sz, GFP_NOFS);
2122 if (!zi)
2123 return -ENOMEM;
2124
2125 zi->child_cnt = 2;
2126 zi->level = znode->level + 1;
2127
2128 __set_bit(DIRTY_ZNODE, &zi->flags);
2129 atomic_long_inc(&c->dirty_zn_cnt);
2130
2131 zi->zbranch[0].key = znode->zbranch[0].key;
2132 zi->zbranch[0].znode = znode;
2133 zi->zbranch[0].lnum = c->zroot.lnum;
2134 zi->zbranch[0].offs = c->zroot.offs;
2135 zi->zbranch[0].len = c->zroot.len;
2136 zi->zbranch[1].key = zn->zbranch[0].key;
2137 zi->zbranch[1].znode = zn;
2138
2139 c->zroot.lnum = 0;
2140 c->zroot.offs = 0;
2141 c->zroot.len = 0;
2142 c->zroot.znode = zi;
2143
2144 zn->parent = zi;
2145 zn->iip = 1;
2146 znode->parent = zi;
2147 znode->iip = 0;
2148
2149 return 0;
2150}
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164int ubifs_tnc_add(struct ubifs_info *c, const union ubifs_key *key, int lnum,
2165 int offs, int len)
2166{
2167 int found, n, err = 0;
2168 struct ubifs_znode *znode;
2169
2170 mutex_lock(&c->tnc_mutex);
2171 dbg_tnc("%d:%d, len %d, key %s", lnum, offs, len, DBGKEY(key));
2172 found = lookup_level0_dirty(c, key, &znode, &n);
2173 if (!found) {
2174 struct ubifs_zbranch zbr;
2175
2176 zbr.znode = NULL;
2177 zbr.lnum = lnum;
2178 zbr.offs = offs;
2179 zbr.len = len;
2180 key_copy(c, key, &zbr.key);
2181 err = tnc_insert(c, znode, &zbr, n + 1);
2182 } else if (found == 1) {
2183 struct ubifs_zbranch *zbr = &znode->zbranch[n];
2184
2185 lnc_free(zbr);
2186 err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2187 zbr->lnum = lnum;
2188 zbr->offs = offs;
2189 zbr->len = len;
2190 } else
2191 err = found;
2192 if (!err)
2193 err = dbg_check_tnc(c, 0);
2194 mutex_unlock(&c->tnc_mutex);
2195
2196 return err;
2197}
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213int ubifs_tnc_replace(struct ubifs_info *c, const union ubifs_key *key,
2214 int old_lnum, int old_offs, int lnum, int offs, int len)
2215{
2216 int found, n, err = 0;
2217 struct ubifs_znode *znode;
2218
2219 mutex_lock(&c->tnc_mutex);
2220 dbg_tnc("old LEB %d:%d, new LEB %d:%d, len %d, key %s", old_lnum,
2221 old_offs, lnum, offs, len, DBGKEY(key));
2222 found = lookup_level0_dirty(c, key, &znode, &n);
2223 if (found < 0) {
2224 err = found;
2225 goto out_unlock;
2226 }
2227
2228 if (found == 1) {
2229 struct ubifs_zbranch *zbr = &znode->zbranch[n];
2230
2231 found = 0;
2232 if (zbr->lnum == old_lnum && zbr->offs == old_offs) {
2233 lnc_free(zbr);
2234 err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2235 if (err)
2236 goto out_unlock;
2237 zbr->lnum = lnum;
2238 zbr->offs = offs;
2239 zbr->len = len;
2240 found = 1;
2241 } else if (is_hash_key(c, key)) {
2242 found = resolve_collision_directly(c, key, &znode, &n,
2243 old_lnum, old_offs);
2244 dbg_tnc("rc returned %d, znode %p, n %d, LEB %d:%d",
2245 found, znode, n, old_lnum, old_offs);
2246 if (found < 0) {
2247 err = found;
2248 goto out_unlock;
2249 }
2250
2251 if (found) {
2252
2253 if (znode->cnext || !ubifs_zn_dirty(znode)) {
2254 znode = dirty_cow_bottom_up(c, znode);
2255 if (IS_ERR(znode)) {
2256 err = PTR_ERR(znode);
2257 goto out_unlock;
2258 }
2259 }
2260 zbr = &znode->zbranch[n];
2261 lnc_free(zbr);
2262 err = ubifs_add_dirt(c, zbr->lnum,
2263 zbr->len);
2264 if (err)
2265 goto out_unlock;
2266 zbr->lnum = lnum;
2267 zbr->offs = offs;
2268 zbr->len = len;
2269 }
2270 }
2271 }
2272
2273 if (!found)
2274 err = ubifs_add_dirt(c, lnum, len);
2275
2276 if (!err)
2277 err = dbg_check_tnc(c, 0);
2278
2279out_unlock:
2280 mutex_unlock(&c->tnc_mutex);
2281 return err;
2282}
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key,
2297 int lnum, int offs, int len, const struct qstr *nm)
2298{
2299 int found, n, err = 0;
2300 struct ubifs_znode *znode;
2301
2302 mutex_lock(&c->tnc_mutex);
2303 dbg_tnc("LEB %d:%d, name '%.*s', key %s", lnum, offs, nm->len, nm->name,
2304 DBGKEY(key));
2305 found = lookup_level0_dirty(c, key, &znode, &n);
2306 if (found < 0) {
2307 err = found;
2308 goto out_unlock;
2309 }
2310
2311 if (found == 1) {
2312 if (c->replaying)
2313 found = fallible_resolve_collision(c, key, &znode, &n,
2314 nm, 1);
2315 else
2316 found = resolve_collision(c, key, &znode, &n, nm);
2317 dbg_tnc("rc returned %d, znode %p, n %d", found, znode, n);
2318 if (found < 0) {
2319 err = found;
2320 goto out_unlock;
2321 }
2322
2323
2324 if (znode->cnext || !ubifs_zn_dirty(znode)) {
2325 znode = dirty_cow_bottom_up(c, znode);
2326 if (IS_ERR(znode)) {
2327 err = PTR_ERR(znode);
2328 goto out_unlock;
2329 }
2330 }
2331
2332 if (found == 1) {
2333 struct ubifs_zbranch *zbr = &znode->zbranch[n];
2334
2335 lnc_free(zbr);
2336 err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2337 zbr->lnum = lnum;
2338 zbr->offs = offs;
2339 zbr->len = len;
2340 goto out_unlock;
2341 }
2342 }
2343
2344 if (!found) {
2345 struct ubifs_zbranch zbr;
2346
2347 zbr.znode = NULL;
2348 zbr.lnum = lnum;
2349 zbr.offs = offs;
2350 zbr.len = len;
2351 key_copy(c, key, &zbr.key);
2352 err = tnc_insert(c, znode, &zbr, n + 1);
2353 if (err)
2354 goto out_unlock;
2355 if (c->replaying) {
2356
2357
2358
2359
2360
2361
2362 struct qstr noname = { .len = 0, .name = "" };
2363
2364 err = dbg_check_tnc(c, 0);
2365 mutex_unlock(&c->tnc_mutex);
2366 if (err)
2367 return err;
2368 return ubifs_tnc_remove_nm(c, key, &noname);
2369 }
2370 }
2371
2372out_unlock:
2373 if (!err)
2374 err = dbg_check_tnc(c, 0);
2375 mutex_unlock(&c->tnc_mutex);
2376 return err;
2377}
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388static int tnc_delete(struct ubifs_info *c, struct ubifs_znode *znode, int n)
2389{
2390 struct ubifs_zbranch *zbr;
2391 struct ubifs_znode *zp;
2392 int i, err;
2393
2394
2395 ubifs_assert(znode->level == 0);
2396 ubifs_assert(n >= 0 && n < c->fanout);
2397 dbg_tnc("deleting %s", DBGKEY(&znode->zbranch[n].key));
2398
2399 zbr = &znode->zbranch[n];
2400 lnc_free(zbr);
2401
2402 err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2403 if (err) {
2404 dbg_dump_znode(c, znode);
2405 return err;
2406 }
2407
2408
2409 for (i = n; i < znode->child_cnt - 1; i++)
2410 znode->zbranch[i] = znode->zbranch[i + 1];
2411 znode->child_cnt -= 1;
2412
2413 if (znode->child_cnt > 0)
2414 return 0;
2415
2416
2417
2418
2419
2420
2421 do {
2422 ubifs_assert(!test_bit(OBSOLETE_ZNODE, &znode->flags));
2423 ubifs_assert(ubifs_zn_dirty(znode));
2424
2425 zp = znode->parent;
2426 n = znode->iip;
2427
2428 atomic_long_dec(&c->dirty_zn_cnt);
2429
2430 err = insert_old_idx_znode(c, znode);
2431 if (err)
2432 return err;
2433
2434 if (znode->cnext) {
2435 __set_bit(OBSOLETE_ZNODE, &znode->flags);
2436 atomic_long_inc(&c->clean_zn_cnt);
2437 atomic_long_inc(&ubifs_clean_zn_cnt);
2438 } else
2439 kfree(znode);
2440 znode = zp;
2441 } while (znode->child_cnt == 1);
2442
2443
2444 znode->child_cnt -= 1;
2445 ubifs_assert(znode->level != 0);
2446 for (i = n; i < znode->child_cnt; i++) {
2447 znode->zbranch[i] = znode->zbranch[i + 1];
2448 if (znode->zbranch[i].znode)
2449 znode->zbranch[i].znode->iip = i;
2450 }
2451
2452
2453
2454
2455
2456 if (!znode->parent) {
2457 while (znode->child_cnt == 1 && znode->level != 0) {
2458 zp = znode;
2459 zbr = &znode->zbranch[0];
2460 znode = get_znode(c, znode, 0);
2461 if (IS_ERR(znode))
2462 return PTR_ERR(znode);
2463 znode = dirty_cow_znode(c, zbr);
2464 if (IS_ERR(znode))
2465 return PTR_ERR(znode);
2466 znode->parent = NULL;
2467 znode->iip = 0;
2468 if (c->zroot.len) {
2469 err = insert_old_idx(c, c->zroot.lnum,
2470 c->zroot.offs);
2471 if (err)
2472 return err;
2473 }
2474 c->zroot.lnum = zbr->lnum;
2475 c->zroot.offs = zbr->offs;
2476 c->zroot.len = zbr->len;
2477 c->zroot.znode = znode;
2478 ubifs_assert(!test_bit(OBSOLETE_ZNODE,
2479 &zp->flags));
2480 ubifs_assert(test_bit(DIRTY_ZNODE, &zp->flags));
2481 atomic_long_dec(&c->dirty_zn_cnt);
2482
2483 if (zp->cnext) {
2484 __set_bit(OBSOLETE_ZNODE, &zp->flags);
2485 atomic_long_inc(&c->clean_zn_cnt);
2486 atomic_long_inc(&ubifs_clean_zn_cnt);
2487 } else
2488 kfree(zp);
2489 }
2490 }
2491
2492 return 0;
2493}
2494
2495
2496
2497
2498
2499
2500
2501
2502int ubifs_tnc_remove(struct ubifs_info *c, const union ubifs_key *key)
2503{
2504 int found, n, err = 0;
2505 struct ubifs_znode *znode;
2506
2507 mutex_lock(&c->tnc_mutex);
2508 dbg_tnc("key %s", DBGKEY(key));
2509 found = lookup_level0_dirty(c, key, &znode, &n);
2510 if (found < 0) {
2511 err = found;
2512 goto out_unlock;
2513 }
2514 if (found == 1)
2515 err = tnc_delete(c, znode, n);
2516 if (!err)
2517 err = dbg_check_tnc(c, 0);
2518
2519out_unlock:
2520 mutex_unlock(&c->tnc_mutex);
2521 return err;
2522}
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532int ubifs_tnc_remove_nm(struct ubifs_info *c, const union ubifs_key *key,
2533 const struct qstr *nm)
2534{
2535 int n, err;
2536 struct ubifs_znode *znode;
2537
2538 mutex_lock(&c->tnc_mutex);
2539 dbg_tnc("%.*s, key %s", nm->len, nm->name, DBGKEY(key));
2540 err = lookup_level0_dirty(c, key, &znode, &n);
2541 if (err < 0)
2542 goto out_unlock;
2543
2544 if (err) {
2545 if (c->replaying)
2546 err = fallible_resolve_collision(c, key, &znode, &n,
2547 nm, 0);
2548 else
2549 err = resolve_collision(c, key, &znode, &n, nm);
2550 dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n);
2551 if (err < 0)
2552 goto out_unlock;
2553 if (err) {
2554
2555 if (znode->cnext || !ubifs_zn_dirty(znode)) {
2556 znode = dirty_cow_bottom_up(c, znode);
2557 if (IS_ERR(znode)) {
2558 err = PTR_ERR(znode);
2559 goto out_unlock;
2560 }
2561 }
2562 err = tnc_delete(c, znode, n);
2563 }
2564 }
2565
2566out_unlock:
2567 if (!err)
2568 err = dbg_check_tnc(c, 0);
2569 mutex_unlock(&c->tnc_mutex);
2570 return err;
2571}
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582static int key_in_range(struct ubifs_info *c, union ubifs_key *key,
2583 union ubifs_key *from_key, union ubifs_key *to_key)
2584{
2585 if (keys_cmp(c, key, from_key) < 0)
2586 return 0;
2587 if (keys_cmp(c, key, to_key) > 0)
2588 return 0;
2589 return 1;
2590}
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602int ubifs_tnc_remove_range(struct ubifs_info *c, union ubifs_key *from_key,
2603 union ubifs_key *to_key)
2604{
2605 int i, n, k, err = 0;
2606 struct ubifs_znode *znode;
2607 union ubifs_key *key;
2608
2609 mutex_lock(&c->tnc_mutex);
2610 while (1) {
2611
2612 err = ubifs_lookup_level0(c, from_key, &znode, &n);
2613 if (err < 0)
2614 goto out_unlock;
2615
2616 if (err)
2617 key = from_key;
2618 else {
2619 err = tnc_next(c, &znode, &n);
2620 if (err == -ENOENT) {
2621 err = 0;
2622 goto out_unlock;
2623 }
2624 if (err < 0)
2625 goto out_unlock;
2626 key = &znode->zbranch[n].key;
2627 if (!key_in_range(c, key, from_key, to_key)) {
2628 err = 0;
2629 goto out_unlock;
2630 }
2631 }
2632
2633
2634 if (znode->cnext || !ubifs_zn_dirty(znode)) {
2635 znode = dirty_cow_bottom_up(c, znode);
2636 if (IS_ERR(znode)) {
2637 err = PTR_ERR(znode);
2638 goto out_unlock;
2639 }
2640 }
2641
2642
2643 for (i = n + 1, k = 0; i < znode->child_cnt; i++, k++) {
2644 key = &znode->zbranch[i].key;
2645 if (!key_in_range(c, key, from_key, to_key))
2646 break;
2647 lnc_free(&znode->zbranch[i]);
2648 err = ubifs_add_dirt(c, znode->zbranch[i].lnum,
2649 znode->zbranch[i].len);
2650 if (err) {
2651 dbg_dump_znode(c, znode);
2652 goto out_unlock;
2653 }
2654 dbg_tnc("removing %s", DBGKEY(key));
2655 }
2656 if (k) {
2657 for (i = n + 1 + k; i < znode->child_cnt; i++)
2658 znode->zbranch[i - k] = znode->zbranch[i];
2659 znode->child_cnt -= k;
2660 }
2661
2662
2663 err = tnc_delete(c, znode, n);
2664 if (err)
2665 goto out_unlock;
2666 }
2667
2668out_unlock:
2669 if (!err)
2670 err = dbg_check_tnc(c, 0);
2671 mutex_unlock(&c->tnc_mutex);
2672 return err;
2673}
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum)
2685{
2686 union ubifs_key key1, key2;
2687 struct ubifs_dent_node *xent, *pxent = NULL;
2688 struct qstr nm = { .name = NULL };
2689
2690 dbg_tnc("ino %lu", (unsigned long)inum);
2691
2692
2693
2694
2695
2696 lowest_xent_key(c, &key1, inum);
2697 while (1) {
2698 ino_t xattr_inum;
2699 int err;
2700
2701 xent = ubifs_tnc_next_ent(c, &key1, &nm);
2702 if (IS_ERR(xent)) {
2703 err = PTR_ERR(xent);
2704 if (err == -ENOENT)
2705 break;
2706 return err;
2707 }
2708
2709 xattr_inum = le64_to_cpu(xent->inum);
2710 dbg_tnc("xent '%s', ino %lu", xent->name,
2711 (unsigned long)xattr_inum);
2712
2713 nm.name = xent->name;
2714 nm.len = le16_to_cpu(xent->nlen);
2715 err = ubifs_tnc_remove_nm(c, &key1, &nm);
2716 if (err) {
2717 kfree(xent);
2718 return err;
2719 }
2720
2721 lowest_ino_key(c, &key1, xattr_inum);
2722 highest_ino_key(c, &key2, xattr_inum);
2723 err = ubifs_tnc_remove_range(c, &key1, &key2);
2724 if (err) {
2725 kfree(xent);
2726 return err;
2727 }
2728
2729 kfree(pxent);
2730 pxent = xent;
2731 key_read(c, &xent->key, &key1);
2732 }
2733
2734 kfree(pxent);
2735 lowest_ino_key(c, &key1, inum);
2736 highest_ino_key(c, &key2, inum);
2737
2738 return ubifs_tnc_remove_range(c, &key1, &key2);
2739}
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c,
2765 union ubifs_key *key,
2766 const struct qstr *nm)
2767{
2768 int n, err, type = key_type(c, key);
2769 struct ubifs_znode *znode;
2770 struct ubifs_dent_node *dent;
2771 struct ubifs_zbranch *zbr;
2772 union ubifs_key *dkey;
2773
2774 dbg_tnc("%s %s", nm->name ? (char *)nm->name : "(lowest)", DBGKEY(key));
2775 ubifs_assert(is_hash_key(c, key));
2776
2777 mutex_lock(&c->tnc_mutex);
2778 err = ubifs_lookup_level0(c, key, &znode, &n);
2779 if (unlikely(err < 0))
2780 goto out_unlock;
2781
2782 if (nm->name) {
2783 if (err) {
2784
2785 err = resolve_collision(c, key, &znode, &n, nm);
2786 dbg_tnc("rc returned %d, znode %p, n %d",
2787 err, znode, n);
2788 if (unlikely(err < 0))
2789 goto out_unlock;
2790 }
2791
2792
2793 err = tnc_next(c, &znode, &n);
2794 if (unlikely(err))
2795 goto out_unlock;
2796 } else {
2797
2798
2799
2800
2801
2802 if (!err) {
2803
2804
2805
2806
2807
2808 err = tnc_next(c, &znode, &n);
2809 if (err)
2810 goto out_unlock;
2811 }
2812 }
2813
2814 zbr = &znode->zbranch[n];
2815 dent = kmalloc(zbr->len, GFP_NOFS);
2816 if (unlikely(!dent)) {
2817 err = -ENOMEM;
2818 goto out_unlock;
2819 }
2820
2821
2822
2823
2824
2825 dkey = &zbr->key;
2826 if (key_inum(c, dkey) != key_inum(c, key) ||
2827 key_type(c, dkey) != type) {
2828 err = -ENOENT;
2829 goto out_free;
2830 }
2831
2832 err = tnc_read_node_nm(c, zbr, dent);
2833 if (unlikely(err))
2834 goto out_free;
2835
2836 mutex_unlock(&c->tnc_mutex);
2837 return dent;
2838
2839out_free:
2840 kfree(dent);
2841out_unlock:
2842 mutex_unlock(&c->tnc_mutex);
2843 return ERR_PTR(err);
2844}
2845
2846
2847
2848
2849
2850
2851
2852static void tnc_destroy_cnext(struct ubifs_info *c)
2853{
2854 struct ubifs_znode *cnext;
2855
2856 if (!c->cnext)
2857 return;
2858 ubifs_assert(c->cmt_state == COMMIT_BROKEN);
2859 cnext = c->cnext;
2860 do {
2861 struct ubifs_znode *znode = cnext;
2862
2863 cnext = cnext->cnext;
2864 if (test_bit(OBSOLETE_ZNODE, &znode->flags))
2865 kfree(znode);
2866 } while (cnext && cnext != c->cnext);
2867}
2868
2869
2870
2871
2872
2873void ubifs_tnc_close(struct ubifs_info *c)
2874{
2875 long clean_freed;
2876
2877 tnc_destroy_cnext(c);
2878 if (c->zroot.znode) {
2879 clean_freed = ubifs_destroy_tnc_subtree(c->zroot.znode);
2880 atomic_long_sub(clean_freed, &ubifs_clean_zn_cnt);
2881 }
2882 kfree(c->gap_lebs);
2883 kfree(c->ilebs);
2884 destroy_old_idx(c);
2885}
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895static struct ubifs_znode *left_znode(struct ubifs_info *c,
2896 struct ubifs_znode *znode)
2897{
2898 int level = znode->level;
2899
2900 while (1) {
2901 int n = znode->iip - 1;
2902
2903
2904 znode = znode->parent;
2905 if (!znode)
2906 return NULL;
2907 if (n >= 0) {
2908
2909 znode = get_znode(c, znode, n);
2910 if (IS_ERR(znode))
2911 return znode;
2912 while (znode->level != level) {
2913 n = znode->child_cnt - 1;
2914 znode = get_znode(c, znode, n);
2915 if (IS_ERR(znode))
2916 return znode;
2917 }
2918 break;
2919 }
2920 }
2921 return znode;
2922}
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932static struct ubifs_znode *right_znode(struct ubifs_info *c,
2933 struct ubifs_znode *znode)
2934{
2935 int level = znode->level;
2936
2937 while (1) {
2938 int n = znode->iip + 1;
2939
2940
2941 znode = znode->parent;
2942 if (!znode)
2943 return NULL;
2944 if (n < znode->child_cnt) {
2945
2946 znode = get_znode(c, znode, n);
2947 if (IS_ERR(znode))
2948 return znode;
2949 while (znode->level != level) {
2950 znode = get_znode(c, znode, 0);
2951 if (IS_ERR(znode))
2952 return znode;
2953 }
2954 break;
2955 }
2956 }
2957 return znode;
2958}
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985static struct ubifs_znode *lookup_znode(struct ubifs_info *c,
2986 union ubifs_key *key, int level,
2987 int lnum, int offs)
2988{
2989 struct ubifs_znode *znode, *zn;
2990 int n, nn;
2991
2992 ubifs_assert(key_type(c, key) < UBIFS_INVALID_KEY);
2993
2994
2995
2996
2997
2998 if (level < 0)
2999 return ERR_PTR(-EINVAL);
3000
3001
3002 znode = c->zroot.znode;
3003 if (!znode) {
3004 znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
3005 if (IS_ERR(znode))
3006 return znode;
3007 }
3008
3009 if (c->zroot.lnum == lnum && c->zroot.offs == offs)
3010 return znode;
3011
3012 if (level >= znode->level)
3013 return NULL;
3014 while (1) {
3015 ubifs_search_zbranch(c, znode, key, &n);
3016 if (n < 0) {
3017
3018
3019
3020
3021
3022
3023
3024
3025 znode = left_znode(c, znode);
3026 if (!znode)
3027 return NULL;
3028 if (IS_ERR(znode))
3029 return znode;
3030 ubifs_search_zbranch(c, znode, key, &n);
3031 ubifs_assert(n >= 0);
3032 }
3033 if (znode->level == level + 1)
3034 break;
3035 znode = get_znode(c, znode, n);
3036 if (IS_ERR(znode))
3037 return znode;
3038 }
3039
3040 if (znode->zbranch[n].lnum == lnum && znode->zbranch[n].offs == offs)
3041 return get_znode(c, znode, n);
3042
3043 if (!is_hash_key(c, key))
3044 return NULL;
3045
3046
3047
3048
3049 zn = znode;
3050 nn = n;
3051
3052 while (1) {
3053
3054 if (n)
3055 n -= 1;
3056 else {
3057 znode = left_znode(c, znode);
3058 if (!znode)
3059 break;
3060 if (IS_ERR(znode))
3061 return znode;
3062 n = znode->child_cnt - 1;
3063 }
3064
3065 if (znode->zbranch[n].lnum == lnum &&
3066 znode->zbranch[n].offs == offs)
3067 return get_znode(c, znode, n);
3068
3069 if (keys_cmp(c, &znode->zbranch[n].key, key) < 0)
3070 break;
3071 }
3072
3073 znode = zn;
3074 n = nn;
3075
3076 while (1) {
3077
3078 if (++n >= znode->child_cnt) {
3079 znode = right_znode(c, znode);
3080 if (!znode)
3081 break;
3082 if (IS_ERR(znode))
3083 return znode;
3084 n = 0;
3085 }
3086
3087 if (znode->zbranch[n].lnum == lnum &&
3088 znode->zbranch[n].offs == offs)
3089 return get_znode(c, znode, n);
3090
3091 if (keys_cmp(c, &znode->zbranch[n].key, key) > 0)
3092 break;
3093 }
3094 return NULL;
3095}
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114int is_idx_node_in_tnc(struct ubifs_info *c, union ubifs_key *key, int level,
3115 int lnum, int offs)
3116{
3117 struct ubifs_znode *znode;
3118
3119 znode = lookup_znode(c, key, level, lnum, offs);
3120 if (!znode)
3121 return 0;
3122 if (IS_ERR(znode))
3123 return PTR_ERR(znode);
3124
3125 return ubifs_zn_dirty(znode) ? 1 : 2;
3126}
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141static int is_leaf_node_in_tnc(struct ubifs_info *c, union ubifs_key *key,
3142 int lnum, int offs)
3143{
3144 struct ubifs_zbranch *zbr;
3145 struct ubifs_znode *znode, *zn;
3146 int n, found, err, nn;
3147 const int unique = !is_hash_key(c, key);
3148
3149 found = ubifs_lookup_level0(c, key, &znode, &n);
3150 if (found < 0)
3151 return found;
3152 if (!found)
3153 return 0;
3154 zbr = &znode->zbranch[n];
3155 if (lnum == zbr->lnum && offs == zbr->offs)
3156 return 1;
3157 if (unique)
3158 return 0;
3159
3160
3161
3162
3163 zn = znode;
3164 nn = n;
3165
3166 while (1) {
3167 err = tnc_prev(c, &znode, &n);
3168 if (err == -ENOENT)
3169 break;
3170 if (err)
3171 return err;
3172 if (keys_cmp(c, key, &znode->zbranch[n].key))
3173 break;
3174 zbr = &znode->zbranch[n];
3175 if (lnum == zbr->lnum && offs == zbr->offs)
3176 return 1;
3177 }
3178
3179 znode = zn;
3180 n = nn;
3181 while (1) {
3182 err = tnc_next(c, &znode, &n);
3183 if (err) {
3184 if (err == -ENOENT)
3185 return 0;
3186 return err;
3187 }
3188 if (keys_cmp(c, key, &znode->zbranch[n].key))
3189 break;
3190 zbr = &znode->zbranch[n];
3191 if (lnum == zbr->lnum && offs == zbr->offs)
3192 return 1;
3193 }
3194 return 0;
3195}
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211int ubifs_tnc_has_node(struct ubifs_info *c, union ubifs_key *key, int level,
3212 int lnum, int offs, int is_idx)
3213{
3214 int err;
3215
3216 mutex_lock(&c->tnc_mutex);
3217 if (is_idx) {
3218 err = is_idx_node_in_tnc(c, key, level, lnum, offs);
3219 if (err < 0)
3220 goto out_unlock;
3221 if (err == 1)
3222
3223 err = 0;
3224 else if (err == 2)
3225
3226 err = 1;
3227 else
3228 BUG_ON(err != 0);
3229 } else
3230 err = is_leaf_node_in_tnc(c, key, lnum, offs);
3231
3232out_unlock:
3233 mutex_unlock(&c->tnc_mutex);
3234 return err;
3235}
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251int ubifs_dirty_idx_node(struct ubifs_info *c, union ubifs_key *key, int level,
3252 int lnum, int offs)
3253{
3254 struct ubifs_znode *znode;
3255 int err = 0;
3256
3257 mutex_lock(&c->tnc_mutex);
3258 znode = lookup_znode(c, key, level, lnum, offs);
3259 if (!znode)
3260 goto out_unlock;
3261 if (IS_ERR(znode)) {
3262 err = PTR_ERR(znode);
3263 goto out_unlock;
3264 }
3265 znode = dirty_cow_bottom_up(c, znode);
3266 if (IS_ERR(znode)) {
3267 err = PTR_ERR(znode);
3268 goto out_unlock;
3269 }
3270
3271out_unlock:
3272 mutex_unlock(&c->tnc_mutex);
3273 return err;
3274}
3275
3276#ifdef CONFIG_UBIFS_FS_DEBUG
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode,
3290 loff_t size)
3291{
3292 int err, n;
3293 union ubifs_key from_key, to_key, *key;
3294 struct ubifs_znode *znode;
3295 unsigned int block;
3296
3297 if (!S_ISREG(inode->i_mode))
3298 return 0;
3299 if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
3300 return 0;
3301
3302 block = (size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
3303 data_key_init(c, &from_key, inode->i_ino, block);
3304 highest_data_key(c, &to_key, inode->i_ino);
3305
3306 mutex_lock(&c->tnc_mutex);
3307 err = ubifs_lookup_level0(c, &from_key, &znode, &n);
3308 if (err < 0)
3309 goto out_unlock;
3310
3311 if (err) {
3312 err = -EINVAL;
3313 key = &from_key;
3314 goto out_dump;
3315 }
3316
3317 err = tnc_next(c, &znode, &n);
3318 if (err == -ENOENT) {
3319 err = 0;
3320 goto out_unlock;
3321 }
3322 if (err < 0)
3323 goto out_unlock;
3324
3325 ubifs_assert(err == 0);
3326 key = &znode->zbranch[n].key;
3327 if (!key_in_range(c, key, &from_key, &to_key))
3328 goto out_unlock;
3329
3330out_dump:
3331 block = key_block(c, key);
3332 ubifs_err("inode %lu has size %lld, but there are data at offset %lld "
3333 "(data key %s)", (unsigned long)inode->i_ino, size,
3334 ((loff_t)block) << UBIFS_BLOCK_SHIFT, DBGKEY(key));
3335 dbg_dump_inode(c, inode);
3336 dbg_dump_stack();
3337 err = -EINVAL;
3338
3339out_unlock:
3340 mutex_unlock(&c->tnc_mutex);
3341 return err;
3342}
3343
3344#endif
3345