1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/crc32.h>
34#include "ubifs.h"
35
36
37
38
39
40
41
42
43
44
45
46enum {
47 NAME_LESS = 0,
48 NAME_MATCHES = 1,
49 NAME_GREATER = 2,
50 NOT_ON_MEDIA = 3,
51};
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76static int insert_old_idx(struct ubifs_info *c, int lnum, int offs)
77{
78 struct ubifs_old_idx *old_idx, *o;
79 struct rb_node **p, *parent = NULL;
80
81 old_idx = kmalloc(sizeof(struct ubifs_old_idx), GFP_NOFS);
82 if (unlikely(!old_idx))
83 return -ENOMEM;
84 old_idx->lnum = lnum;
85 old_idx->offs = offs;
86
87 p = &c->old_idx.rb_node;
88 while (*p) {
89 parent = *p;
90 o = rb_entry(parent, struct ubifs_old_idx, rb);
91 if (lnum < o->lnum)
92 p = &(*p)->rb_left;
93 else if (lnum > o->lnum)
94 p = &(*p)->rb_right;
95 else if (offs < o->offs)
96 p = &(*p)->rb_left;
97 else if (offs > o->offs)
98 p = &(*p)->rb_right;
99 else {
100 ubifs_err("old idx added twice!");
101 kfree(old_idx);
102 return 0;
103 }
104 }
105 rb_link_node(&old_idx->rb, parent, p);
106 rb_insert_color(&old_idx->rb, &c->old_idx);
107 return 0;
108}
109
110
111
112
113
114
115
116
117int insert_old_idx_znode(struct ubifs_info *c, struct ubifs_znode *znode)
118{
119 if (znode->parent) {
120 struct ubifs_zbranch *zbr;
121
122 zbr = &znode->parent->zbranch[znode->iip];
123 if (zbr->len)
124 return insert_old_idx(c, zbr->lnum, zbr->offs);
125 } else
126 if (c->zroot.len)
127 return insert_old_idx(c, c->zroot.lnum,
128 c->zroot.offs);
129 return 0;
130}
131
132
133
134
135
136
137
138
139static int ins_clr_old_idx_znode(struct ubifs_info *c,
140 struct ubifs_znode *znode)
141{
142 int err;
143
144 if (znode->parent) {
145 struct ubifs_zbranch *zbr;
146
147 zbr = &znode->parent->zbranch[znode->iip];
148 if (zbr->len) {
149 err = insert_old_idx(c, zbr->lnum, zbr->offs);
150 if (err)
151 return err;
152 zbr->lnum = 0;
153 zbr->offs = 0;
154 zbr->len = 0;
155 }
156 } else
157 if (c->zroot.len) {
158 err = insert_old_idx(c, c->zroot.lnum, c->zroot.offs);
159 if (err)
160 return err;
161 c->zroot.lnum = 0;
162 c->zroot.offs = 0;
163 c->zroot.len = 0;
164 }
165 return 0;
166}
167
168
169
170
171
172
173
174
175
176
177
178void destroy_old_idx(struct ubifs_info *c)
179{
180 struct rb_node *this = c->old_idx.rb_node;
181 struct ubifs_old_idx *old_idx;
182
183 while (this) {
184 if (this->rb_left) {
185 this = this->rb_left;
186 continue;
187 } else if (this->rb_right) {
188 this = this->rb_right;
189 continue;
190 }
191 old_idx = rb_entry(this, struct ubifs_old_idx, rb);
192 this = rb_parent(this);
193 if (this) {
194 if (this->rb_left == &old_idx->rb)
195 this->rb_left = NULL;
196 else
197 this->rb_right = NULL;
198 }
199 kfree(old_idx);
200 }
201 c->old_idx = RB_ROOT;
202}
203
204
205
206
207
208
209
210
211static struct ubifs_znode *copy_znode(struct ubifs_info *c,
212 struct ubifs_znode *znode)
213{
214 struct ubifs_znode *zn;
215
216 zn = kmalloc(c->max_znode_sz, GFP_NOFS);
217 if (unlikely(!zn))
218 return ERR_PTR(-ENOMEM);
219
220 memcpy(zn, znode, c->max_znode_sz);
221 zn->cnext = NULL;
222 __set_bit(DIRTY_ZNODE, &zn->flags);
223 __clear_bit(COW_ZNODE, &zn->flags);
224
225 ubifs_assert(!test_bit(OBSOLETE_ZNODE, &znode->flags));
226 __set_bit(OBSOLETE_ZNODE, &znode->flags);
227
228 if (znode->level != 0) {
229 int i;
230 const int n = zn->child_cnt;
231
232
233 for (i = 0; i < n; i++) {
234 struct ubifs_zbranch *zbr = &zn->zbranch[i];
235
236 if (zbr->znode)
237 zbr->znode->parent = zn;
238 }
239 }
240
241 atomic_long_inc(&c->dirty_zn_cnt);
242 return zn;
243}
244
245
246
247
248
249
250
251
252
253static int add_idx_dirt(struct ubifs_info *c, int lnum, int dirt)
254{
255 c->calc_idx_sz -= ALIGN(dirt, 8);
256 return ubifs_add_dirt(c, lnum, dirt);
257}
258
259
260
261
262
263
264
265
266static struct ubifs_znode *dirty_cow_znode(struct ubifs_info *c,
267 struct ubifs_zbranch *zbr)
268{
269 struct ubifs_znode *znode = zbr->znode;
270 struct ubifs_znode *zn;
271 int err;
272
273 if (!test_bit(COW_ZNODE, &znode->flags)) {
274
275 if (!test_and_set_bit(DIRTY_ZNODE, &znode->flags)) {
276 atomic_long_inc(&c->dirty_zn_cnt);
277 atomic_long_dec(&c->clean_zn_cnt);
278 atomic_long_dec(&ubifs_clean_zn_cnt);
279 err = add_idx_dirt(c, zbr->lnum, zbr->len);
280 if (unlikely(err))
281 return ERR_PTR(err);
282 }
283 return znode;
284 }
285
286 zn = copy_znode(c, znode);
287 if (IS_ERR(zn))
288 return zn;
289
290 if (zbr->len) {
291 err = insert_old_idx(c, zbr->lnum, zbr->offs);
292 if (unlikely(err))
293 return ERR_PTR(err);
294 err = add_idx_dirt(c, zbr->lnum, zbr->len);
295 } else
296 err = 0;
297
298 zbr->znode = zn;
299 zbr->lnum = 0;
300 zbr->offs = 0;
301 zbr->len = 0;
302
303 if (unlikely(err))
304 return ERR_PTR(err);
305 return zn;
306}
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328static int lnc_add(struct ubifs_info *c, struct ubifs_zbranch *zbr,
329 const void *node)
330{
331 int err;
332 void *lnc_node;
333 const struct ubifs_dent_node *dent = node;
334
335 ubifs_assert(!zbr->leaf);
336 ubifs_assert(zbr->len != 0);
337 ubifs_assert(is_hash_key(c, &zbr->key));
338
339 err = ubifs_validate_entry(c, dent);
340 if (err) {
341 dbg_dump_stack();
342 dbg_dump_node(c, dent);
343 return err;
344 }
345
346 lnc_node = kmalloc(zbr->len, GFP_NOFS);
347 if (!lnc_node)
348
349 return 0;
350
351 memcpy(lnc_node, node, zbr->len);
352 zbr->leaf = lnc_node;
353 return 0;
354}
355
356
357
358
359
360
361
362
363
364
365static int lnc_add_directly(struct ubifs_info *c, struct ubifs_zbranch *zbr,
366 void *node)
367{
368 int err;
369
370 ubifs_assert(!zbr->leaf);
371 ubifs_assert(zbr->len != 0);
372
373 err = ubifs_validate_entry(c, node);
374 if (err) {
375 dbg_dump_stack();
376 dbg_dump_node(c, node);
377 return err;
378 }
379
380 zbr->leaf = node;
381 return 0;
382}
383
384
385
386
387
388
389static void lnc_free(struct ubifs_zbranch *zbr)
390{
391 if (!zbr->leaf)
392 return;
393 kfree(zbr->leaf);
394 zbr->leaf = NULL;
395}
396
397
398
399
400
401
402
403
404
405
406
407
408static int tnc_read_node_nm(struct ubifs_info *c, struct ubifs_zbranch *zbr,
409 void *node)
410{
411 int err;
412
413 ubifs_assert(is_hash_key(c, &zbr->key));
414
415 if (zbr->leaf) {
416
417 ubifs_assert(zbr->len != 0);
418 memcpy(node, zbr->leaf, zbr->len);
419 return 0;
420 }
421
422 err = ubifs_tnc_read_node(c, zbr, node);
423 if (err)
424 return err;
425
426
427 err = lnc_add(c, zbr, node);
428 return err;
429}
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452static int try_read_node(const struct ubifs_info *c, void *buf, int type,
453 int len, int lnum, int offs)
454{
455 int err, node_len;
456 struct ubifs_ch *ch = buf;
457 uint32_t crc, node_crc;
458
459 dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len);
460
461 err = ubi_read(c->ubi, lnum, buf, offs, len);
462 if (err) {
463 ubifs_err("cannot read node type %d from LEB %d:%d, error %d",
464 type, lnum, offs, err);
465 return err;
466 }
467
468 if (le32_to_cpu(ch->magic) != UBIFS_NODE_MAGIC)
469 return 0;
470
471 if (ch->node_type != type)
472 return 0;
473
474 node_len = le32_to_cpu(ch->len);
475 if (node_len != len)
476 return 0;
477
478 if (type == UBIFS_DATA_NODE && !c->always_chk_crc && c->no_chk_data_crc)
479 return 1;
480
481 crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8);
482 node_crc = le32_to_cpu(ch->crc);
483 if (crc != node_crc)
484 return 0;
485
486 return 1;
487}
488
489
490
491
492
493
494
495
496
497
498
499static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
500 struct ubifs_zbranch *zbr, void *node)
501{
502 int ret;
503
504 dbg_tnc("LEB %d:%d, key %s", zbr->lnum, zbr->offs, DBGKEY(key));
505
506 ret = try_read_node(c, node, key_type(c, key), zbr->len, zbr->lnum,
507 zbr->offs);
508 if (ret == 1) {
509 union ubifs_key node_key;
510 struct ubifs_dent_node *dent = node;
511
512
513 key_read(c, &dent->key, &node_key);
514 if (keys_cmp(c, key, &node_key) != 0)
515 ret = 0;
516 }
517 if (ret == 0 && c->replaying)
518 dbg_mnt("dangling branch LEB %d:%d len %d, key %s",
519 zbr->lnum, zbr->offs, zbr->len, DBGKEY(key));
520 return ret;
521}
522
523
524
525
526
527
528
529
530
531
532
533
534static int matches_name(struct ubifs_info *c, struct ubifs_zbranch *zbr,
535 const struct qstr *nm)
536{
537 struct ubifs_dent_node *dent;
538 int nlen, err;
539
540
541 if (!zbr->leaf) {
542 dent = kmalloc(zbr->len, GFP_NOFS);
543 if (!dent)
544 return -ENOMEM;
545
546 err = ubifs_tnc_read_node(c, zbr, dent);
547 if (err)
548 goto out_free;
549
550
551 err = lnc_add_directly(c, zbr, dent);
552 if (err)
553 goto out_free;
554 } else
555 dent = zbr->leaf;
556
557 nlen = le16_to_cpu(dent->nlen);
558 err = memcmp(dent->name, nm->name, min_t(int, nlen, nm->len));
559 if (err == 0) {
560 if (nlen == nm->len)
561 return NAME_MATCHES;
562 else if (nlen < nm->len)
563 return NAME_LESS;
564 else
565 return NAME_GREATER;
566 } else if (err < 0)
567 return NAME_LESS;
568 else
569 return NAME_GREATER;
570
571out_free:
572 kfree(dent);
573 return err;
574}
575
576
577
578
579
580
581
582
583
584static struct ubifs_znode *get_znode(struct ubifs_info *c,
585 struct ubifs_znode *znode, int n)
586{
587 struct ubifs_zbranch *zbr;
588
589 zbr = &znode->zbranch[n];
590 if (zbr->znode)
591 znode = zbr->znode;
592 else
593 znode = ubifs_load_znode(c, zbr, znode, n);
594 return znode;
595}
596
597
598
599
600
601
602
603
604
605
606static int tnc_next(struct ubifs_info *c, struct ubifs_znode **zn, int *n)
607{
608 struct ubifs_znode *znode = *zn;
609 int nn = *n;
610
611 nn += 1;
612 if (nn < znode->child_cnt) {
613 *n = nn;
614 return 0;
615 }
616 while (1) {
617 struct ubifs_znode *zp;
618
619 zp = znode->parent;
620 if (!zp)
621 return -ENOENT;
622 nn = znode->iip + 1;
623 znode = zp;
624 if (nn < znode->child_cnt) {
625 znode = get_znode(c, znode, nn);
626 if (IS_ERR(znode))
627 return PTR_ERR(znode);
628 while (znode->level != 0) {
629 znode = get_znode(c, znode, 0);
630 if (IS_ERR(znode))
631 return PTR_ERR(znode);
632 }
633 nn = 0;
634 break;
635 }
636 }
637 *zn = znode;
638 *n = nn;
639 return 0;
640}
641
642
643
644
645
646
647
648
649
650
651static int tnc_prev(struct ubifs_info *c, struct ubifs_znode **zn, int *n)
652{
653 struct ubifs_znode *znode = *zn;
654 int nn = *n;
655
656 if (nn > 0) {
657 *n = nn - 1;
658 return 0;
659 }
660 while (1) {
661 struct ubifs_znode *zp;
662
663 zp = znode->parent;
664 if (!zp)
665 return -ENOENT;
666 nn = znode->iip - 1;
667 znode = zp;
668 if (nn >= 0) {
669 znode = get_znode(c, znode, nn);
670 if (IS_ERR(znode))
671 return PTR_ERR(znode);
672 while (znode->level != 0) {
673 nn = znode->child_cnt - 1;
674 znode = get_znode(c, znode, nn);
675 if (IS_ERR(znode))
676 return PTR_ERR(znode);
677 }
678 nn = znode->child_cnt - 1;
679 break;
680 }
681 }
682 *zn = znode;
683 *n = nn;
684 return 0;
685}
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703static int resolve_collision(struct ubifs_info *c, const union ubifs_key *key,
704 struct ubifs_znode **zn, int *n,
705 const struct qstr *nm)
706{
707 int err;
708
709 err = matches_name(c, &(*zn)->zbranch[*n], nm);
710 if (unlikely(err < 0))
711 return err;
712 if (err == NAME_MATCHES)
713 return 1;
714
715 if (err == NAME_GREATER) {
716
717 while (1) {
718 err = tnc_prev(c, zn, n);
719 if (err == -ENOENT) {
720 ubifs_assert(*n == 0);
721 *n = -1;
722 return 0;
723 }
724 if (err < 0)
725 return err;
726 if (keys_cmp(c, &(*zn)->zbranch[*n].key, key)) {
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756 if (*n == (*zn)->child_cnt - 1) {
757 err = tnc_next(c, zn, n);
758 if (err) {
759
760 ubifs_assert(0);
761 if (err == -ENOENT)
762 err = -EINVAL;
763 return err;
764 }
765 ubifs_assert(*n == 0);
766 *n = -1;
767 }
768 return 0;
769 }
770 err = matches_name(c, &(*zn)->zbranch[*n], nm);
771 if (err < 0)
772 return err;
773 if (err == NAME_LESS)
774 return 0;
775 if (err == NAME_MATCHES)
776 return 1;
777 ubifs_assert(err == NAME_GREATER);
778 }
779 } else {
780 int nn = *n;
781 struct ubifs_znode *znode = *zn;
782
783
784 while (1) {
785 err = tnc_next(c, &znode, &nn);
786 if (err == -ENOENT)
787 return 0;
788 if (err < 0)
789 return err;
790 if (keys_cmp(c, &znode->zbranch[nn].key, key))
791 return 0;
792 err = matches_name(c, &znode->zbranch[nn], nm);
793 if (err < 0)
794 return err;
795 if (err == NAME_GREATER)
796 return 0;
797 *zn = znode;
798 *n = nn;
799 if (err == NAME_MATCHES)
800 return 1;
801 ubifs_assert(err == NAME_LESS);
802 }
803 }
804}
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821static int fallible_matches_name(struct ubifs_info *c,
822 struct ubifs_zbranch *zbr,
823 const struct qstr *nm)
824{
825 struct ubifs_dent_node *dent;
826 int nlen, err;
827
828
829 if (!zbr->leaf) {
830 dent = kmalloc(zbr->len, GFP_NOFS);
831 if (!dent)
832 return -ENOMEM;
833
834 err = fallible_read_node(c, &zbr->key, zbr, dent);
835 if (err < 0)
836 goto out_free;
837 if (err == 0) {
838
839 err = NOT_ON_MEDIA;
840 goto out_free;
841 }
842 ubifs_assert(err == 1);
843
844 err = lnc_add_directly(c, zbr, dent);
845 if (err)
846 goto out_free;
847 } else
848 dent = zbr->leaf;
849
850 nlen = le16_to_cpu(dent->nlen);
851 err = memcmp(dent->name, nm->name, min_t(int, nlen, nm->len));
852 if (err == 0) {
853 if (nlen == nm->len)
854 return NAME_MATCHES;
855 else if (nlen < nm->len)
856 return NAME_LESS;
857 else
858 return NAME_GREATER;
859 } else if (err < 0)
860 return NAME_LESS;
861 else
862 return NAME_GREATER;
863
864out_free:
865 kfree(dent);
866 return err;
867}
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891static int fallible_resolve_collision(struct ubifs_info *c,
892 const union ubifs_key *key,
893 struct ubifs_znode **zn, int *n,
894 const struct qstr *nm, int adding)
895{
896 struct ubifs_znode *o_znode = NULL, *znode = *zn;
897 int uninitialized_var(o_n), err, cmp, unsure = 0, nn = *n;
898
899 cmp = fallible_matches_name(c, &znode->zbranch[nn], nm);
900 if (unlikely(cmp < 0))
901 return cmp;
902 if (cmp == NAME_MATCHES)
903 return 1;
904 if (cmp == NOT_ON_MEDIA) {
905 o_znode = znode;
906 o_n = nn;
907
908
909
910
911
912 unsure = 1;
913 } else if (!adding)
914 unsure = 1;
915
916 if (cmp == NAME_GREATER || unsure) {
917
918 while (1) {
919 err = tnc_prev(c, zn, n);
920 if (err == -ENOENT) {
921 ubifs_assert(*n == 0);
922 *n = -1;
923 break;
924 }
925 if (err < 0)
926 return err;
927 if (keys_cmp(c, &(*zn)->zbranch[*n].key, key)) {
928
929 if (*n == (*zn)->child_cnt - 1) {
930 err = tnc_next(c, zn, n);
931 if (err) {
932
933 ubifs_assert(0);
934 if (err == -ENOENT)
935 err = -EINVAL;
936 return err;
937 }
938 ubifs_assert(*n == 0);
939 *n = -1;
940 }
941 break;
942 }
943 err = fallible_matches_name(c, &(*zn)->zbranch[*n], nm);
944 if (err < 0)
945 return err;
946 if (err == NAME_MATCHES)
947 return 1;
948 if (err == NOT_ON_MEDIA) {
949 o_znode = *zn;
950 o_n = *n;
951 continue;
952 }
953 if (!adding)
954 continue;
955 if (err == NAME_LESS)
956 break;
957 else
958 unsure = 0;
959 }
960 }
961
962 if (cmp == NAME_LESS || unsure) {
963
964 *zn = znode;
965 *n = nn;
966 while (1) {
967 err = tnc_next(c, &znode, &nn);
968 if (err == -ENOENT)
969 break;
970 if (err < 0)
971 return err;
972 if (keys_cmp(c, &znode->zbranch[nn].key, key))
973 break;
974 err = fallible_matches_name(c, &znode->zbranch[nn], nm);
975 if (err < 0)
976 return err;
977 if (err == NAME_GREATER)
978 break;
979 *zn = znode;
980 *n = nn;
981 if (err == NAME_MATCHES)
982 return 1;
983 if (err == NOT_ON_MEDIA) {
984 o_znode = znode;
985 o_n = nn;
986 }
987 }
988 }
989
990
991 if (adding || !o_znode)
992 return 0;
993
994 dbg_mnt("dangling match LEB %d:%d len %d %s",
995 o_znode->zbranch[o_n].lnum, o_znode->zbranch[o_n].offs,
996 o_znode->zbranch[o_n].len, DBGKEY(key));
997 *zn = o_znode;
998 *n = o_n;
999 return 1;
1000}
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010static int matches_position(struct ubifs_zbranch *zbr, int lnum, int offs)
1011{
1012 if (zbr->lnum == lnum && zbr->offs == offs)
1013 return 1;
1014 else
1015 return 0;
1016}
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035static int resolve_collision_directly(struct ubifs_info *c,
1036 const union ubifs_key *key,
1037 struct ubifs_znode **zn, int *n,
1038 int lnum, int offs)
1039{
1040 struct ubifs_znode *znode;
1041 int nn, err;
1042
1043 znode = *zn;
1044 nn = *n;
1045 if (matches_position(&znode->zbranch[nn], lnum, offs))
1046 return 1;
1047
1048
1049 while (1) {
1050 err = tnc_prev(c, &znode, &nn);
1051 if (err == -ENOENT)
1052 break;
1053 if (err < 0)
1054 return err;
1055 if (keys_cmp(c, &znode->zbranch[nn].key, key))
1056 break;
1057 if (matches_position(&znode->zbranch[nn], lnum, offs)) {
1058 *zn = znode;
1059 *n = nn;
1060 return 1;
1061 }
1062 }
1063
1064
1065 znode = *zn;
1066 nn = *n;
1067 while (1) {
1068 err = tnc_next(c, &znode, &nn);
1069 if (err == -ENOENT)
1070 return 0;
1071 if (err < 0)
1072 return err;
1073 if (keys_cmp(c, &znode->zbranch[nn].key, key))
1074 return 0;
1075 *zn = znode;
1076 *n = nn;
1077 if (matches_position(&znode->zbranch[nn], lnum, offs))
1078 return 1;
1079 }
1080}
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092static struct ubifs_znode *dirty_cow_bottom_up(struct ubifs_info *c,
1093 struct ubifs_znode *znode)
1094{
1095 struct ubifs_znode *zp;
1096 int *path = c->bottom_up_buf, p = 0;
1097
1098 ubifs_assert(c->zroot.znode);
1099 ubifs_assert(znode);
1100 if (c->zroot.znode->level > BOTTOM_UP_HEIGHT) {
1101 kfree(c->bottom_up_buf);
1102 c->bottom_up_buf = kmalloc(c->zroot.znode->level * sizeof(int),
1103 GFP_NOFS);
1104 if (!c->bottom_up_buf)
1105 return ERR_PTR(-ENOMEM);
1106 path = c->bottom_up_buf;
1107 }
1108 if (c->zroot.znode->level) {
1109
1110 while (1) {
1111 int n;
1112
1113 zp = znode->parent;
1114 if (!zp)
1115 break;
1116 n = znode->iip;
1117 ubifs_assert(p < c->zroot.znode->level);
1118 path[p++] = n;
1119 if (!zp->cnext && ubifs_zn_dirty(znode))
1120 break;
1121 znode = zp;
1122 }
1123 }
1124
1125
1126 while (1) {
1127 struct ubifs_zbranch *zbr;
1128
1129 zp = znode->parent;
1130 if (zp) {
1131 ubifs_assert(path[p - 1] >= 0);
1132 ubifs_assert(path[p - 1] < zp->child_cnt);
1133 zbr = &zp->zbranch[path[--p]];
1134 znode = dirty_cow_znode(c, zbr);
1135 } else {
1136 ubifs_assert(znode == c->zroot.znode);
1137 znode = dirty_cow_znode(c, &c->zroot);
1138 }
1139 if (IS_ERR(znode) || !p)
1140 break;
1141 ubifs_assert(path[p - 1] >= 0);
1142 ubifs_assert(path[p - 1] < znode->child_cnt);
1143 znode = znode->zbranch[path[p - 1]].znode;
1144 }
1145
1146 return znode;
1147}
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171int ubifs_lookup_level0(struct ubifs_info *c, const union ubifs_key *key,
1172 struct ubifs_znode **zn, int *n)
1173{
1174 int err, exact;
1175 struct ubifs_znode *znode;
1176 unsigned long time = get_seconds();
1177
1178 dbg_tnc("search key %s", DBGKEY(key));
1179
1180 znode = c->zroot.znode;
1181 if (unlikely(!znode)) {
1182 znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
1183 if (IS_ERR(znode))
1184 return PTR_ERR(znode);
1185 }
1186
1187 znode->time = time;
1188
1189 while (1) {
1190 struct ubifs_zbranch *zbr;
1191
1192 exact = ubifs_search_zbranch(c, znode, key, n);
1193
1194 if (znode->level == 0)
1195 break;
1196
1197 if (*n < 0)
1198 *n = 0;
1199 zbr = &znode->zbranch[*n];
1200
1201 if (zbr->znode) {
1202 znode->time = time;
1203 znode = zbr->znode;
1204 continue;
1205 }
1206
1207
1208 znode = ubifs_load_znode(c, zbr, znode, *n);
1209 if (IS_ERR(znode))
1210 return PTR_ERR(znode);
1211 }
1212
1213 *zn = znode;
1214 if (exact || !is_hash_key(c, key) || *n != -1) {
1215 dbg_tnc("found %d, lvl %d, n %d", exact, znode->level, *n);
1216 return exact;
1217 }
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262 err = tnc_prev(c, &znode, n);
1263 if (err == -ENOENT) {
1264 dbg_tnc("found 0, lvl %d, n -1", znode->level);
1265 *n = -1;
1266 return 0;
1267 }
1268 if (unlikely(err < 0))
1269 return err;
1270 if (keys_cmp(c, key, &znode->zbranch[*n].key)) {
1271 dbg_tnc("found 0, lvl %d, n -1", znode->level);
1272 *n = -1;
1273 return 0;
1274 }
1275
1276 dbg_tnc("found 1, lvl %d, n %d", znode->level, *n);
1277 *zn = znode;
1278 return 1;
1279}
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306static int lookup_level0_dirty(struct ubifs_info *c, const union ubifs_key *key,
1307 struct ubifs_znode **zn, int *n)
1308{
1309 int err, exact;
1310 struct ubifs_znode *znode;
1311 unsigned long time = get_seconds();
1312
1313 dbg_tnc("search and dirty key %s", DBGKEY(key));
1314
1315 znode = c->zroot.znode;
1316 if (unlikely(!znode)) {
1317 znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
1318 if (IS_ERR(znode))
1319 return PTR_ERR(znode);
1320 }
1321
1322 znode = dirty_cow_znode(c, &c->zroot);
1323 if (IS_ERR(znode))
1324 return PTR_ERR(znode);
1325
1326 znode->time = time;
1327
1328 while (1) {
1329 struct ubifs_zbranch *zbr;
1330
1331 exact = ubifs_search_zbranch(c, znode, key, n);
1332
1333 if (znode->level == 0)
1334 break;
1335
1336 if (*n < 0)
1337 *n = 0;
1338 zbr = &znode->zbranch[*n];
1339
1340 if (zbr->znode) {
1341 znode->time = time;
1342 znode = dirty_cow_znode(c, zbr);
1343 if (IS_ERR(znode))
1344 return PTR_ERR(znode);
1345 continue;
1346 }
1347
1348
1349 znode = ubifs_load_znode(c, zbr, znode, *n);
1350 if (IS_ERR(znode))
1351 return PTR_ERR(znode);
1352 znode = dirty_cow_znode(c, zbr);
1353 if (IS_ERR(znode))
1354 return PTR_ERR(znode);
1355 }
1356
1357 *zn = znode;
1358 if (exact || !is_hash_key(c, key) || *n != -1) {
1359 dbg_tnc("found %d, lvl %d, n %d", exact, znode->level, *n);
1360 return exact;
1361 }
1362
1363
1364
1365
1366
1367 err = tnc_prev(c, &znode, n);
1368 if (err == -ENOENT) {
1369 *n = -1;
1370 dbg_tnc("found 0, lvl %d, n -1", znode->level);
1371 return 0;
1372 }
1373 if (unlikely(err < 0))
1374 return err;
1375 if (keys_cmp(c, key, &znode->zbranch[*n].key)) {
1376 *n = -1;
1377 dbg_tnc("found 0, lvl %d, n -1", znode->level);
1378 return 0;
1379 }
1380
1381 if (znode->cnext || !ubifs_zn_dirty(znode)) {
1382 znode = dirty_cow_bottom_up(c, znode);
1383 if (IS_ERR(znode))
1384 return PTR_ERR(znode);
1385 }
1386
1387 dbg_tnc("found 1, lvl %d, n %d", znode->level, *n);
1388 *zn = znode;
1389 return 1;
1390}
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402static int maybe_leb_gced(struct ubifs_info *c, int lnum, int gc_seq1)
1403{
1404 int gc_seq2, gced_lnum;
1405
1406 gced_lnum = c->gced_lnum;
1407 smp_rmb();
1408 gc_seq2 = c->gc_seq;
1409
1410 if (gc_seq1 == gc_seq2)
1411 return 0;
1412
1413 if (gc_seq1 + 1 != gc_seq2)
1414 return 1;
1415
1416
1417
1418
1419 smp_rmb();
1420 if (gced_lnum != c->gced_lnum)
1421 return 1;
1422
1423 if (gced_lnum == lnum)
1424 return 1;
1425 return 0;
1426}
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441int ubifs_tnc_locate(struct ubifs_info *c, const union ubifs_key *key,
1442 void *node, int *lnum, int *offs)
1443{
1444 int found, n, err, safely = 0, gc_seq1;
1445 struct ubifs_znode *znode;
1446 struct ubifs_zbranch zbr, *zt;
1447
1448again:
1449 mutex_lock(&c->tnc_mutex);
1450 found = ubifs_lookup_level0(c, key, &znode, &n);
1451 if (!found) {
1452 err = -ENOENT;
1453 goto out;
1454 } else if (found < 0) {
1455 err = found;
1456 goto out;
1457 }
1458 zt = &znode->zbranch[n];
1459 if (lnum) {
1460 *lnum = zt->lnum;
1461 *offs = zt->offs;
1462 }
1463 if (is_hash_key(c, key)) {
1464
1465
1466
1467
1468 err = tnc_read_node_nm(c, zt, node);
1469 goto out;
1470 }
1471 if (safely) {
1472 err = ubifs_tnc_read_node(c, zt, node);
1473 goto out;
1474 }
1475
1476 zbr = znode->zbranch[n];
1477 gc_seq1 = c->gc_seq;
1478 mutex_unlock(&c->tnc_mutex);
1479
1480 if (ubifs_get_wbuf(c, zbr.lnum)) {
1481
1482 err = ubifs_tnc_read_node(c, &zbr, node);
1483 return err;
1484 }
1485
1486 err = fallible_read_node(c, key, &zbr, node);
1487 if (err <= 0 || maybe_leb_gced(c, zbr.lnum, gc_seq1)) {
1488
1489
1490
1491
1492 safely = 1;
1493 goto again;
1494 }
1495 return 0;
1496
1497out:
1498 mutex_unlock(&c->tnc_mutex);
1499 return err;
1500}
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu)
1516{
1517 int n, err = 0, lnum = -1, uninitialized_var(offs);
1518 int uninitialized_var(len);
1519 unsigned int block = key_block(c, &bu->key);
1520 struct ubifs_znode *znode;
1521
1522 bu->cnt = 0;
1523 bu->blk_cnt = 0;
1524 bu->eof = 0;
1525
1526 mutex_lock(&c->tnc_mutex);
1527
1528 err = ubifs_lookup_level0(c, &bu->key, &znode, &n);
1529 if (err < 0)
1530 goto out;
1531 if (err) {
1532
1533 len = znode->zbranch[n].len;
1534
1535 if (len > bu->buf_len) {
1536 err = -EINVAL;
1537 goto out;
1538 }
1539
1540 bu->zbranch[bu->cnt++] = znode->zbranch[n];
1541 bu->blk_cnt += 1;
1542 lnum = znode->zbranch[n].lnum;
1543 offs = ALIGN(znode->zbranch[n].offs + len, 8);
1544 }
1545 while (1) {
1546 struct ubifs_zbranch *zbr;
1547 union ubifs_key *key;
1548 unsigned int next_block;
1549
1550
1551 err = tnc_next(c, &znode, &n);
1552 if (err)
1553 goto out;
1554 zbr = &znode->zbranch[n];
1555 key = &zbr->key;
1556
1557 if (key_inum(c, key) != key_inum(c, &bu->key) ||
1558 key_type(c, key) != UBIFS_DATA_KEY) {
1559 err = -ENOENT;
1560 goto out;
1561 }
1562 if (lnum < 0) {
1563
1564 lnum = zbr->lnum;
1565 offs = ALIGN(zbr->offs + zbr->len, 8);
1566 len = zbr->len;
1567 if (len > bu->buf_len) {
1568 err = -EINVAL;
1569 goto out;
1570 }
1571 } else {
1572
1573
1574
1575
1576 if (zbr->lnum != lnum || zbr->offs != offs)
1577 goto out;
1578 offs += ALIGN(zbr->len, 8);
1579 len = ALIGN(len, 8) + zbr->len;
1580
1581 if (len > bu->buf_len)
1582 goto out;
1583 }
1584
1585 next_block = key_block(c, key);
1586 bu->blk_cnt += (next_block - block - 1);
1587 if (bu->blk_cnt >= UBIFS_MAX_BULK_READ)
1588 goto out;
1589 block = next_block;
1590
1591 bu->zbranch[bu->cnt++] = *zbr;
1592 bu->blk_cnt += 1;
1593
1594 if (bu->cnt >= UBIFS_MAX_BULK_READ)
1595 goto out;
1596 if (bu->blk_cnt >= UBIFS_MAX_BULK_READ)
1597 goto out;
1598 }
1599out:
1600 if (err == -ENOENT) {
1601 bu->eof = 1;
1602 err = 0;
1603 }
1604 bu->gc_seq = c->gc_seq;
1605 mutex_unlock(&c->tnc_mutex);
1606 if (err)
1607 return err;
1608
1609
1610
1611
1612 if (bu->blk_cnt > UBIFS_MAX_BULK_READ)
1613 bu->blk_cnt = UBIFS_MAX_BULK_READ;
1614
1615
1616
1617
1618 if (UBIFS_BLOCKS_PER_PAGE == 1 ||
1619 !(bu->blk_cnt & (UBIFS_BLOCKS_PER_PAGE - 1)))
1620 return 0;
1621 if (bu->eof) {
1622
1623 bu->blk_cnt += UBIFS_BLOCKS_PER_PAGE - 1;
1624 return 0;
1625 }
1626
1627 block = key_block(c, &bu->key) + bu->blk_cnt;
1628 block &= ~(UBIFS_BLOCKS_PER_PAGE - 1);
1629 while (bu->cnt) {
1630 if (key_block(c, &bu->zbranch[bu->cnt - 1].key) < block)
1631 break;
1632 bu->cnt -= 1;
1633 }
1634 return 0;
1635}
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647static int read_wbuf(struct ubifs_wbuf *wbuf, void *buf, int len, int lnum,
1648 int offs)
1649{
1650 const struct ubifs_info *c = wbuf->c;
1651 int rlen, overlap;
1652
1653 dbg_io("LEB %d:%d, length %d", lnum, offs, len);
1654 ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
1655 ubifs_assert(!(offs & 7) && offs < c->leb_size);
1656 ubifs_assert(offs + len <= c->leb_size);
1657
1658 spin_lock(&wbuf->lock);
1659 overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs);
1660 if (!overlap) {
1661
1662 spin_unlock(&wbuf->lock);
1663 return ubi_read(c->ubi, lnum, buf, offs, len);
1664 }
1665
1666
1667 rlen = wbuf->offs - offs;
1668 if (rlen < 0)
1669 rlen = 0;
1670
1671
1672 memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen);
1673 spin_unlock(&wbuf->lock);
1674
1675 if (rlen > 0)
1676
1677 return ubi_read(c->ubi, lnum, buf, offs, rlen);
1678
1679 return 0;
1680}
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690static int validate_data_node(struct ubifs_info *c, void *buf,
1691 struct ubifs_zbranch *zbr)
1692{
1693 union ubifs_key key1;
1694 struct ubifs_ch *ch = buf;
1695 int err, len;
1696
1697 if (ch->node_type != UBIFS_DATA_NODE) {
1698 ubifs_err("bad node type (%d but expected %d)",
1699 ch->node_type, UBIFS_DATA_NODE);
1700 goto out_err;
1701 }
1702
1703 err = ubifs_check_node(c, buf, zbr->lnum, zbr->offs, 0, 0);
1704 if (err) {
1705 ubifs_err("expected node type %d", UBIFS_DATA_NODE);
1706 goto out;
1707 }
1708
1709 len = le32_to_cpu(ch->len);
1710 if (len != zbr->len) {
1711 ubifs_err("bad node length %d, expected %d", len, zbr->len);
1712 goto out_err;
1713 }
1714
1715
1716 key_read(c, buf + UBIFS_KEY_OFFSET, &key1);
1717 if (!keys_eq(c, &zbr->key, &key1)) {
1718 ubifs_err("bad key in node at LEB %d:%d",
1719 zbr->lnum, zbr->offs);
1720 dbg_tnc("looked for key %s found node's key %s",
1721 DBGKEY(&zbr->key), DBGKEY1(&key1));
1722 goto out_err;
1723 }
1724
1725 return 0;
1726
1727out_err:
1728 err = -EINVAL;
1729out:
1730 ubifs_err("bad node at LEB %d:%d", zbr->lnum, zbr->offs);
1731 dbg_dump_node(c, buf);
1732 dbg_dump_stack();
1733 return err;
1734}
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu)
1747{
1748 int lnum = bu->zbranch[0].lnum, offs = bu->zbranch[0].offs, len, err, i;
1749 struct ubifs_wbuf *wbuf;
1750 void *buf;
1751
1752 len = bu->zbranch[bu->cnt - 1].offs;
1753 len += bu->zbranch[bu->cnt - 1].len - offs;
1754 if (len > bu->buf_len) {
1755 ubifs_err("buffer too small %d vs %d", bu->buf_len, len);
1756 return -EINVAL;
1757 }
1758
1759
1760 wbuf = ubifs_get_wbuf(c, lnum);
1761 if (wbuf)
1762 err = read_wbuf(wbuf, bu->buf, len, lnum, offs);
1763 else
1764 err = ubi_read(c->ubi, lnum, bu->buf, offs, len);
1765
1766
1767 if (maybe_leb_gced(c, lnum, bu->gc_seq))
1768 return -EAGAIN;
1769
1770 if (err && err != -EBADMSG) {
1771 ubifs_err("failed to read from LEB %d:%d, error %d",
1772 lnum, offs, err);
1773 dbg_dump_stack();
1774 dbg_tnc("key %s", DBGKEY(&bu->key));
1775 return err;
1776 }
1777
1778
1779 buf = bu->buf;
1780 for (i = 0; i < bu->cnt; i++) {
1781 err = validate_data_node(c, buf, &bu->zbranch[i]);
1782 if (err)
1783 return err;
1784 buf = buf + ALIGN(bu->zbranch[i].len, 8);
1785 }
1786
1787 return 0;
1788}
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803static int do_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
1804 void *node, const struct qstr *nm)
1805{
1806 int found, n, err;
1807 struct ubifs_znode *znode;
1808
1809 dbg_tnc("name '%.*s' key %s", nm->len, nm->name, DBGKEY(key));
1810 mutex_lock(&c->tnc_mutex);
1811 found = ubifs_lookup_level0(c, key, &znode, &n);
1812 if (!found) {
1813 err = -ENOENT;
1814 goto out_unlock;
1815 } else if (found < 0) {
1816 err = found;
1817 goto out_unlock;
1818 }
1819
1820 ubifs_assert(n >= 0);
1821
1822 err = resolve_collision(c, key, &znode, &n, nm);
1823 dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n);
1824 if (unlikely(err < 0))
1825 goto out_unlock;
1826 if (err == 0) {
1827 err = -ENOENT;
1828 goto out_unlock;
1829 }
1830
1831 err = tnc_read_node_nm(c, &znode->zbranch[n], node);
1832
1833out_unlock:
1834 mutex_unlock(&c->tnc_mutex);
1835 return err;
1836}
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851int ubifs_tnc_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
1852 void *node, const struct qstr *nm)
1853{
1854 int err, len;
1855 const struct ubifs_dent_node *dent = node;
1856
1857
1858
1859
1860
1861 err = ubifs_tnc_lookup(c, key, node);
1862 if (err)
1863 return err;
1864
1865 len = le16_to_cpu(dent->nlen);
1866 if (nm->len == len && !memcmp(dent->name, nm->name, len))
1867 return 0;
1868
1869
1870
1871
1872
1873 return do_lookup_nm(c, key, node, nm);
1874}
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885static void correct_parent_keys(const struct ubifs_info *c,
1886 struct ubifs_znode *znode)
1887{
1888 union ubifs_key *key, *key1;
1889
1890 ubifs_assert(znode->parent);
1891 ubifs_assert(znode->iip == 0);
1892
1893 key = &znode->zbranch[0].key;
1894 key1 = &znode->parent->zbranch[0].key;
1895
1896 while (keys_cmp(c, key, key1) < 0) {
1897 key_copy(c, key, key1);
1898 znode = znode->parent;
1899 znode->alt = 1;
1900 if (!znode->parent || znode->iip)
1901 break;
1902 key1 = &znode->parent->zbranch[0].key;
1903 }
1904}
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917static void insert_zbranch(struct ubifs_znode *znode,
1918 const struct ubifs_zbranch *zbr, int n)
1919{
1920 int i;
1921
1922 ubifs_assert(ubifs_zn_dirty(znode));
1923
1924 if (znode->level) {
1925 for (i = znode->child_cnt; i > n; i--) {
1926 znode->zbranch[i] = znode->zbranch[i - 1];
1927 if (znode->zbranch[i].znode)
1928 znode->zbranch[i].znode->iip = i;
1929 }
1930 if (zbr->znode)
1931 zbr->znode->iip = n;
1932 } else
1933 for (i = znode->child_cnt; i > n; i--)
1934 znode->zbranch[i] = znode->zbranch[i - 1];
1935
1936 znode->zbranch[n] = *zbr;
1937 znode->child_cnt += 1;
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953 if (n == 0)
1954 znode->alt = 1;
1955}
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969static int tnc_insert(struct ubifs_info *c, struct ubifs_znode *znode,
1970 struct ubifs_zbranch *zbr, int n)
1971{
1972 struct ubifs_znode *zn, *zi, *zp;
1973 int i, keep, move, appending = 0;
1974 union ubifs_key *key = &zbr->key, *key1;
1975
1976 ubifs_assert(n >= 0 && n <= c->fanout);
1977
1978
1979again:
1980 zp = znode->parent;
1981 if (znode->child_cnt < c->fanout) {
1982 ubifs_assert(n != c->fanout);
1983 dbg_tnc("inserted at %d level %d, key %s", n, znode->level,
1984 DBGKEY(key));
1985
1986 insert_zbranch(znode, zbr, n);
1987
1988
1989 if (n == 0 && zp && znode->iip == 0)
1990 correct_parent_keys(c, znode);
1991
1992 return 0;
1993 }
1994
1995
1996
1997
1998
1999 dbg_tnc("splitting level %d, key %s", znode->level, DBGKEY(key));
2000
2001 if (znode->alt)
2002
2003
2004
2005
2006 ins_clr_old_idx_znode(c, znode);
2007
2008 zn = kzalloc(c->max_znode_sz, GFP_NOFS);
2009 if (!zn)
2010 return -ENOMEM;
2011 zn->parent = zp;
2012 zn->level = znode->level;
2013
2014
2015 if (znode->level == 0 && key_type(c, key) == UBIFS_DATA_KEY) {
2016
2017 if (n == c->fanout) {
2018 key1 = &znode->zbranch[n - 1].key;
2019 if (key_inum(c, key1) == key_inum(c, key) &&
2020 key_type(c, key1) == UBIFS_DATA_KEY)
2021 appending = 1;
2022 } else
2023 goto check_split;
2024 } else if (appending && n != c->fanout) {
2025
2026 appending = 0;
2027check_split:
2028 if (n >= (c->fanout + 1) / 2) {
2029 key1 = &znode->zbranch[0].key;
2030 if (key_inum(c, key1) == key_inum(c, key) &&
2031 key_type(c, key1) == UBIFS_DATA_KEY) {
2032 key1 = &znode->zbranch[n].key;
2033 if (key_inum(c, key1) != key_inum(c, key) ||
2034 key_type(c, key1) != UBIFS_DATA_KEY) {
2035 keep = n;
2036 move = c->fanout - keep;
2037 zi = znode;
2038 goto do_split;
2039 }
2040 }
2041 }
2042 }
2043
2044 if (appending) {
2045 keep = c->fanout;
2046 move = 0;
2047 } else {
2048 keep = (c->fanout + 1) / 2;
2049 move = c->fanout - keep;
2050 }
2051
2052
2053
2054
2055
2056
2057 if (n < keep) {
2058
2059 zi = znode;
2060 move += 1;
2061 keep -= 1;
2062 } else {
2063
2064 zi = zn;
2065 n -= keep;
2066
2067 if (zn->level != 0)
2068 zbr->znode->parent = zn;
2069 }
2070
2071do_split:
2072
2073 __set_bit(DIRTY_ZNODE, &zn->flags);
2074 atomic_long_inc(&c->dirty_zn_cnt);
2075
2076 zn->child_cnt = move;
2077 znode->child_cnt = keep;
2078
2079 dbg_tnc("moving %d, keeping %d", move, keep);
2080
2081
2082 for (i = 0; i < move; i++) {
2083 zn->zbranch[i] = znode->zbranch[keep + i];
2084
2085 if (zn->level != 0)
2086 if (zn->zbranch[i].znode) {
2087 zn->zbranch[i].znode->parent = zn;
2088 zn->zbranch[i].znode->iip = i;
2089 }
2090 }
2091
2092
2093 dbg_tnc("inserting at %d level %d, key %s", n, zn->level, DBGKEY(key));
2094
2095 insert_zbranch(zi, zbr, n);
2096
2097
2098 if (zp) {
2099 if (n == 0 && zi == znode && znode->iip == 0)
2100 correct_parent_keys(c, znode);
2101
2102
2103 n = znode->iip + 1;
2104
2105
2106 zbr->key = zn->zbranch[0].key;
2107 zbr->znode = zn;
2108 zbr->lnum = 0;
2109 zbr->offs = 0;
2110 zbr->len = 0;
2111 znode = zp;
2112
2113 goto again;
2114 }
2115
2116
2117 dbg_tnc("creating new zroot at level %d", znode->level + 1);
2118
2119 zi = kzalloc(c->max_znode_sz, GFP_NOFS);
2120 if (!zi)
2121 return -ENOMEM;
2122
2123 zi->child_cnt = 2;
2124 zi->level = znode->level + 1;
2125
2126 __set_bit(DIRTY_ZNODE, &zi->flags);
2127 atomic_long_inc(&c->dirty_zn_cnt);
2128
2129 zi->zbranch[0].key = znode->zbranch[0].key;
2130 zi->zbranch[0].znode = znode;
2131 zi->zbranch[0].lnum = c->zroot.lnum;
2132 zi->zbranch[0].offs = c->zroot.offs;
2133 zi->zbranch[0].len = c->zroot.len;
2134 zi->zbranch[1].key = zn->zbranch[0].key;
2135 zi->zbranch[1].znode = zn;
2136
2137 c->zroot.lnum = 0;
2138 c->zroot.offs = 0;
2139 c->zroot.len = 0;
2140 c->zroot.znode = zi;
2141
2142 zn->parent = zi;
2143 zn->iip = 1;
2144 znode->parent = zi;
2145 znode->iip = 0;
2146
2147 return 0;
2148}
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162int ubifs_tnc_add(struct ubifs_info *c, const union ubifs_key *key, int lnum,
2163 int offs, int len)
2164{
2165 int found, n, err = 0;
2166 struct ubifs_znode *znode;
2167
2168 mutex_lock(&c->tnc_mutex);
2169 dbg_tnc("%d:%d, len %d, key %s", lnum, offs, len, DBGKEY(key));
2170 found = lookup_level0_dirty(c, key, &znode, &n);
2171 if (!found) {
2172 struct ubifs_zbranch zbr;
2173
2174 zbr.znode = NULL;
2175 zbr.lnum = lnum;
2176 zbr.offs = offs;
2177 zbr.len = len;
2178 key_copy(c, key, &zbr.key);
2179 err = tnc_insert(c, znode, &zbr, n + 1);
2180 } else if (found == 1) {
2181 struct ubifs_zbranch *zbr = &znode->zbranch[n];
2182
2183 lnc_free(zbr);
2184 err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2185 zbr->lnum = lnum;
2186 zbr->offs = offs;
2187 zbr->len = len;
2188 } else
2189 err = found;
2190 if (!err)
2191 err = dbg_check_tnc(c, 0);
2192 mutex_unlock(&c->tnc_mutex);
2193
2194 return err;
2195}
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211int ubifs_tnc_replace(struct ubifs_info *c, const union ubifs_key *key,
2212 int old_lnum, int old_offs, int lnum, int offs, int len)
2213{
2214 int found, n, err = 0;
2215 struct ubifs_znode *znode;
2216
2217 mutex_lock(&c->tnc_mutex);
2218 dbg_tnc("old LEB %d:%d, new LEB %d:%d, len %d, key %s", old_lnum,
2219 old_offs, lnum, offs, len, DBGKEY(key));
2220 found = lookup_level0_dirty(c, key, &znode, &n);
2221 if (found < 0) {
2222 err = found;
2223 goto out_unlock;
2224 }
2225
2226 if (found == 1) {
2227 struct ubifs_zbranch *zbr = &znode->zbranch[n];
2228
2229 found = 0;
2230 if (zbr->lnum == old_lnum && zbr->offs == old_offs) {
2231 lnc_free(zbr);
2232 err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2233 if (err)
2234 goto out_unlock;
2235 zbr->lnum = lnum;
2236 zbr->offs = offs;
2237 zbr->len = len;
2238 found = 1;
2239 } else if (is_hash_key(c, key)) {
2240 found = resolve_collision_directly(c, key, &znode, &n,
2241 old_lnum, old_offs);
2242 dbg_tnc("rc returned %d, znode %p, n %d, LEB %d:%d",
2243 found, znode, n, old_lnum, old_offs);
2244 if (found < 0) {
2245 err = found;
2246 goto out_unlock;
2247 }
2248
2249 if (found) {
2250
2251 if (znode->cnext || !ubifs_zn_dirty(znode)) {
2252 znode = dirty_cow_bottom_up(c, znode);
2253 if (IS_ERR(znode)) {
2254 err = PTR_ERR(znode);
2255 goto out_unlock;
2256 }
2257 }
2258 zbr = &znode->zbranch[n];
2259 lnc_free(zbr);
2260 err = ubifs_add_dirt(c, zbr->lnum,
2261 zbr->len);
2262 if (err)
2263 goto out_unlock;
2264 zbr->lnum = lnum;
2265 zbr->offs = offs;
2266 zbr->len = len;
2267 }
2268 }
2269 }
2270
2271 if (!found)
2272 err = ubifs_add_dirt(c, lnum, len);
2273
2274 if (!err)
2275 err = dbg_check_tnc(c, 0);
2276
2277out_unlock:
2278 mutex_unlock(&c->tnc_mutex);
2279 return err;
2280}
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key,
2295 int lnum, int offs, int len, const struct qstr *nm)
2296{
2297 int found, n, err = 0;
2298 struct ubifs_znode *znode;
2299
2300 mutex_lock(&c->tnc_mutex);
2301 dbg_tnc("LEB %d:%d, name '%.*s', key %s", lnum, offs, nm->len, nm->name,
2302 DBGKEY(key));
2303 found = lookup_level0_dirty(c, key, &znode, &n);
2304 if (found < 0) {
2305 err = found;
2306 goto out_unlock;
2307 }
2308
2309 if (found == 1) {
2310 if (c->replaying)
2311 found = fallible_resolve_collision(c, key, &znode, &n,
2312 nm, 1);
2313 else
2314 found = resolve_collision(c, key, &znode, &n, nm);
2315 dbg_tnc("rc returned %d, znode %p, n %d", found, znode, n);
2316 if (found < 0) {
2317 err = found;
2318 goto out_unlock;
2319 }
2320
2321
2322 if (znode->cnext || !ubifs_zn_dirty(znode)) {
2323 znode = dirty_cow_bottom_up(c, znode);
2324 if (IS_ERR(znode)) {
2325 err = PTR_ERR(znode);
2326 goto out_unlock;
2327 }
2328 }
2329
2330 if (found == 1) {
2331 struct ubifs_zbranch *zbr = &znode->zbranch[n];
2332
2333 lnc_free(zbr);
2334 err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2335 zbr->lnum = lnum;
2336 zbr->offs = offs;
2337 zbr->len = len;
2338 goto out_unlock;
2339 }
2340 }
2341
2342 if (!found) {
2343 struct ubifs_zbranch zbr;
2344
2345 zbr.znode = NULL;
2346 zbr.lnum = lnum;
2347 zbr.offs = offs;
2348 zbr.len = len;
2349 key_copy(c, key, &zbr.key);
2350 err = tnc_insert(c, znode, &zbr, n + 1);
2351 if (err)
2352 goto out_unlock;
2353 if (c->replaying) {
2354
2355
2356
2357
2358
2359
2360 struct qstr noname = { .len = 0, .name = "" };
2361
2362 err = dbg_check_tnc(c, 0);
2363 mutex_unlock(&c->tnc_mutex);
2364 if (err)
2365 return err;
2366 return ubifs_tnc_remove_nm(c, key, &noname);
2367 }
2368 }
2369
2370out_unlock:
2371 if (!err)
2372 err = dbg_check_tnc(c, 0);
2373 mutex_unlock(&c->tnc_mutex);
2374 return err;
2375}
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386static int tnc_delete(struct ubifs_info *c, struct ubifs_znode *znode, int n)
2387{
2388 struct ubifs_zbranch *zbr;
2389 struct ubifs_znode *zp;
2390 int i, err;
2391
2392
2393 ubifs_assert(znode->level == 0);
2394 ubifs_assert(n >= 0 && n < c->fanout);
2395 dbg_tnc("deleting %s", DBGKEY(&znode->zbranch[n].key));
2396
2397 zbr = &znode->zbranch[n];
2398 lnc_free(zbr);
2399
2400 err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2401 if (err) {
2402 dbg_dump_znode(c, znode);
2403 return err;
2404 }
2405
2406
2407 for (i = n; i < znode->child_cnt - 1; i++)
2408 znode->zbranch[i] = znode->zbranch[i + 1];
2409 znode->child_cnt -= 1;
2410
2411 if (znode->child_cnt > 0)
2412 return 0;
2413
2414
2415
2416
2417
2418
2419 do {
2420 ubifs_assert(!test_bit(OBSOLETE_ZNODE, &znode->flags));
2421 ubifs_assert(ubifs_zn_dirty(znode));
2422
2423 zp = znode->parent;
2424 n = znode->iip;
2425
2426 atomic_long_dec(&c->dirty_zn_cnt);
2427
2428 err = insert_old_idx_znode(c, znode);
2429 if (err)
2430 return err;
2431
2432 if (znode->cnext) {
2433 __set_bit(OBSOLETE_ZNODE, &znode->flags);
2434 atomic_long_inc(&c->clean_zn_cnt);
2435 atomic_long_inc(&ubifs_clean_zn_cnt);
2436 } else
2437 kfree(znode);
2438 znode = zp;
2439 } while (znode->child_cnt == 1);
2440
2441
2442 znode->child_cnt -= 1;
2443 ubifs_assert(znode->level != 0);
2444 for (i = n; i < znode->child_cnt; i++) {
2445 znode->zbranch[i] = znode->zbranch[i + 1];
2446 if (znode->zbranch[i].znode)
2447 znode->zbranch[i].znode->iip = i;
2448 }
2449
2450
2451
2452
2453
2454 if (!znode->parent) {
2455 while (znode->child_cnt == 1 && znode->level != 0) {
2456 zp = znode;
2457 zbr = &znode->zbranch[0];
2458 znode = get_znode(c, znode, 0);
2459 if (IS_ERR(znode))
2460 return PTR_ERR(znode);
2461 znode = dirty_cow_znode(c, zbr);
2462 if (IS_ERR(znode))
2463 return PTR_ERR(znode);
2464 znode->parent = NULL;
2465 znode->iip = 0;
2466 if (c->zroot.len) {
2467 err = insert_old_idx(c, c->zroot.lnum,
2468 c->zroot.offs);
2469 if (err)
2470 return err;
2471 }
2472 c->zroot.lnum = zbr->lnum;
2473 c->zroot.offs = zbr->offs;
2474 c->zroot.len = zbr->len;
2475 c->zroot.znode = znode;
2476 ubifs_assert(!test_bit(OBSOLETE_ZNODE,
2477 &zp->flags));
2478 ubifs_assert(test_bit(DIRTY_ZNODE, &zp->flags));
2479 atomic_long_dec(&c->dirty_zn_cnt);
2480
2481 if (zp->cnext) {
2482 __set_bit(OBSOLETE_ZNODE, &zp->flags);
2483 atomic_long_inc(&c->clean_zn_cnt);
2484 atomic_long_inc(&ubifs_clean_zn_cnt);
2485 } else
2486 kfree(zp);
2487 }
2488 }
2489
2490 return 0;
2491}
2492
2493
2494
2495
2496
2497
2498
2499
2500int ubifs_tnc_remove(struct ubifs_info *c, const union ubifs_key *key)
2501{
2502 int found, n, err = 0;
2503 struct ubifs_znode *znode;
2504
2505 mutex_lock(&c->tnc_mutex);
2506 dbg_tnc("key %s", DBGKEY(key));
2507 found = lookup_level0_dirty(c, key, &znode, &n);
2508 if (found < 0) {
2509 err = found;
2510 goto out_unlock;
2511 }
2512 if (found == 1)
2513 err = tnc_delete(c, znode, n);
2514 if (!err)
2515 err = dbg_check_tnc(c, 0);
2516
2517out_unlock:
2518 mutex_unlock(&c->tnc_mutex);
2519 return err;
2520}
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530int ubifs_tnc_remove_nm(struct ubifs_info *c, const union ubifs_key *key,
2531 const struct qstr *nm)
2532{
2533 int n, err;
2534 struct ubifs_znode *znode;
2535
2536 mutex_lock(&c->tnc_mutex);
2537 dbg_tnc("%.*s, key %s", nm->len, nm->name, DBGKEY(key));
2538 err = lookup_level0_dirty(c, key, &znode, &n);
2539 if (err < 0)
2540 goto out_unlock;
2541
2542 if (err) {
2543 if (c->replaying)
2544 err = fallible_resolve_collision(c, key, &znode, &n,
2545 nm, 0);
2546 else
2547 err = resolve_collision(c, key, &znode, &n, nm);
2548 dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n);
2549 if (err < 0)
2550 goto out_unlock;
2551 if (err) {
2552
2553 if (znode->cnext || !ubifs_zn_dirty(znode)) {
2554 znode = dirty_cow_bottom_up(c, znode);
2555 if (IS_ERR(znode)) {
2556 err = PTR_ERR(znode);
2557 goto out_unlock;
2558 }
2559 }
2560 err = tnc_delete(c, znode, n);
2561 }
2562 }
2563
2564out_unlock:
2565 if (!err)
2566 err = dbg_check_tnc(c, 0);
2567 mutex_unlock(&c->tnc_mutex);
2568 return err;
2569}
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580static int key_in_range(struct ubifs_info *c, union ubifs_key *key,
2581 union ubifs_key *from_key, union ubifs_key *to_key)
2582{
2583 if (keys_cmp(c, key, from_key) < 0)
2584 return 0;
2585 if (keys_cmp(c, key, to_key) > 0)
2586 return 0;
2587 return 1;
2588}
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600int ubifs_tnc_remove_range(struct ubifs_info *c, union ubifs_key *from_key,
2601 union ubifs_key *to_key)
2602{
2603 int i, n, k, err = 0;
2604 struct ubifs_znode *znode;
2605 union ubifs_key *key;
2606
2607 mutex_lock(&c->tnc_mutex);
2608 while (1) {
2609
2610 err = ubifs_lookup_level0(c, from_key, &znode, &n);
2611 if (err < 0)
2612 goto out_unlock;
2613
2614 if (err)
2615 key = from_key;
2616 else {
2617 err = tnc_next(c, &znode, &n);
2618 if (err == -ENOENT) {
2619 err = 0;
2620 goto out_unlock;
2621 }
2622 if (err < 0)
2623 goto out_unlock;
2624 key = &znode->zbranch[n].key;
2625 if (!key_in_range(c, key, from_key, to_key)) {
2626 err = 0;
2627 goto out_unlock;
2628 }
2629 }
2630
2631
2632 if (znode->cnext || !ubifs_zn_dirty(znode)) {
2633 znode = dirty_cow_bottom_up(c, znode);
2634 if (IS_ERR(znode)) {
2635 err = PTR_ERR(znode);
2636 goto out_unlock;
2637 }
2638 }
2639
2640
2641 for (i = n + 1, k = 0; i < znode->child_cnt; i++, k++) {
2642 key = &znode->zbranch[i].key;
2643 if (!key_in_range(c, key, from_key, to_key))
2644 break;
2645 lnc_free(&znode->zbranch[i]);
2646 err = ubifs_add_dirt(c, znode->zbranch[i].lnum,
2647 znode->zbranch[i].len);
2648 if (err) {
2649 dbg_dump_znode(c, znode);
2650 goto out_unlock;
2651 }
2652 dbg_tnc("removing %s", DBGKEY(key));
2653 }
2654 if (k) {
2655 for (i = n + 1 + k; i < znode->child_cnt; i++)
2656 znode->zbranch[i - k] = znode->zbranch[i];
2657 znode->child_cnt -= k;
2658 }
2659
2660
2661 err = tnc_delete(c, znode, n);
2662 if (err)
2663 goto out_unlock;
2664 }
2665
2666out_unlock:
2667 if (!err)
2668 err = dbg_check_tnc(c, 0);
2669 mutex_unlock(&c->tnc_mutex);
2670 return err;
2671}
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum)
2683{
2684 union ubifs_key key1, key2;
2685 struct ubifs_dent_node *xent, *pxent = NULL;
2686 struct qstr nm = { .name = NULL };
2687
2688 dbg_tnc("ino %lu", (unsigned long)inum);
2689
2690
2691
2692
2693
2694 lowest_xent_key(c, &key1, inum);
2695 while (1) {
2696 ino_t xattr_inum;
2697 int err;
2698
2699 xent = ubifs_tnc_next_ent(c, &key1, &nm);
2700 if (IS_ERR(xent)) {
2701 err = PTR_ERR(xent);
2702 if (err == -ENOENT)
2703 break;
2704 return err;
2705 }
2706
2707 xattr_inum = le64_to_cpu(xent->inum);
2708 dbg_tnc("xent '%s', ino %lu", xent->name,
2709 (unsigned long)xattr_inum);
2710
2711 nm.name = xent->name;
2712 nm.len = le16_to_cpu(xent->nlen);
2713 err = ubifs_tnc_remove_nm(c, &key1, &nm);
2714 if (err) {
2715 kfree(xent);
2716 return err;
2717 }
2718
2719 lowest_ino_key(c, &key1, xattr_inum);
2720 highest_ino_key(c, &key2, xattr_inum);
2721 err = ubifs_tnc_remove_range(c, &key1, &key2);
2722 if (err) {
2723 kfree(xent);
2724 return err;
2725 }
2726
2727 kfree(pxent);
2728 pxent = xent;
2729 key_read(c, &xent->key, &key1);
2730 }
2731
2732 kfree(pxent);
2733 lowest_ino_key(c, &key1, inum);
2734 highest_ino_key(c, &key2, inum);
2735
2736 return ubifs_tnc_remove_range(c, &key1, &key2);
2737}
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c,
2763 union ubifs_key *key,
2764 const struct qstr *nm)
2765{
2766 int n, err, type = key_type(c, key);
2767 struct ubifs_znode *znode;
2768 struct ubifs_dent_node *dent;
2769 struct ubifs_zbranch *zbr;
2770 union ubifs_key *dkey;
2771
2772 dbg_tnc("%s %s", nm->name ? (char *)nm->name : "(lowest)", DBGKEY(key));
2773 ubifs_assert(is_hash_key(c, key));
2774
2775 mutex_lock(&c->tnc_mutex);
2776 err = ubifs_lookup_level0(c, key, &znode, &n);
2777 if (unlikely(err < 0))
2778 goto out_unlock;
2779
2780 if (nm->name) {
2781 if (err) {
2782
2783 err = resolve_collision(c, key, &znode, &n, nm);
2784 dbg_tnc("rc returned %d, znode %p, n %d",
2785 err, znode, n);
2786 if (unlikely(err < 0))
2787 goto out_unlock;
2788 }
2789
2790
2791 err = tnc_next(c, &znode, &n);
2792 if (unlikely(err))
2793 goto out_unlock;
2794 } else {
2795
2796
2797
2798
2799
2800 if (!err) {
2801
2802
2803
2804
2805
2806 err = tnc_next(c, &znode, &n);
2807 if (err)
2808 goto out_unlock;
2809 }
2810 }
2811
2812 zbr = &znode->zbranch[n];
2813 dent = kmalloc(zbr->len, GFP_NOFS);
2814 if (unlikely(!dent)) {
2815 err = -ENOMEM;
2816 goto out_unlock;
2817 }
2818
2819
2820
2821
2822
2823 dkey = &zbr->key;
2824 if (key_inum(c, dkey) != key_inum(c, key) ||
2825 key_type(c, dkey) != type) {
2826 err = -ENOENT;
2827 goto out_free;
2828 }
2829
2830 err = tnc_read_node_nm(c, zbr, dent);
2831 if (unlikely(err))
2832 goto out_free;
2833
2834 mutex_unlock(&c->tnc_mutex);
2835 return dent;
2836
2837out_free:
2838 kfree(dent);
2839out_unlock:
2840 mutex_unlock(&c->tnc_mutex);
2841 return ERR_PTR(err);
2842}
2843
2844
2845
2846
2847
2848
2849
2850static void tnc_destroy_cnext(struct ubifs_info *c)
2851{
2852 struct ubifs_znode *cnext;
2853
2854 if (!c->cnext)
2855 return;
2856 ubifs_assert(c->cmt_state == COMMIT_BROKEN);
2857 cnext = c->cnext;
2858 do {
2859 struct ubifs_znode *znode = cnext;
2860
2861 cnext = cnext->cnext;
2862 if (test_bit(OBSOLETE_ZNODE, &znode->flags))
2863 kfree(znode);
2864 } while (cnext && cnext != c->cnext);
2865}
2866
2867
2868
2869
2870
2871void ubifs_tnc_close(struct ubifs_info *c)
2872{
2873 long clean_freed;
2874
2875 tnc_destroy_cnext(c);
2876 if (c->zroot.znode) {
2877 clean_freed = ubifs_destroy_tnc_subtree(c->zroot.znode);
2878 atomic_long_sub(clean_freed, &ubifs_clean_zn_cnt);
2879 }
2880 kfree(c->gap_lebs);
2881 kfree(c->ilebs);
2882 destroy_old_idx(c);
2883}
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893static struct ubifs_znode *left_znode(struct ubifs_info *c,
2894 struct ubifs_znode *znode)
2895{
2896 int level = znode->level;
2897
2898 while (1) {
2899 int n = znode->iip - 1;
2900
2901
2902 znode = znode->parent;
2903 if (!znode)
2904 return NULL;
2905 if (n >= 0) {
2906
2907 znode = get_znode(c, znode, n);
2908 if (IS_ERR(znode))
2909 return znode;
2910 while (znode->level != level) {
2911 n = znode->child_cnt - 1;
2912 znode = get_znode(c, znode, n);
2913 if (IS_ERR(znode))
2914 return znode;
2915 }
2916 break;
2917 }
2918 }
2919 return znode;
2920}
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930static struct ubifs_znode *right_znode(struct ubifs_info *c,
2931 struct ubifs_znode *znode)
2932{
2933 int level = znode->level;
2934
2935 while (1) {
2936 int n = znode->iip + 1;
2937
2938
2939 znode = znode->parent;
2940 if (!znode)
2941 return NULL;
2942 if (n < znode->child_cnt) {
2943
2944 znode = get_znode(c, znode, n);
2945 if (IS_ERR(znode))
2946 return znode;
2947 while (znode->level != level) {
2948 znode = get_znode(c, znode, 0);
2949 if (IS_ERR(znode))
2950 return znode;
2951 }
2952 break;
2953 }
2954 }
2955 return znode;
2956}
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983static struct ubifs_znode *lookup_znode(struct ubifs_info *c,
2984 union ubifs_key *key, int level,
2985 int lnum, int offs)
2986{
2987 struct ubifs_znode *znode, *zn;
2988 int n, nn;
2989
2990
2991
2992
2993
2994 if (level < 0)
2995 return ERR_PTR(-EINVAL);
2996
2997
2998 znode = c->zroot.znode;
2999 if (!znode) {
3000 znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
3001 if (IS_ERR(znode))
3002 return znode;
3003 }
3004
3005 if (c->zroot.lnum == lnum && c->zroot.offs == offs)
3006 return znode;
3007
3008 if (level >= znode->level)
3009 return NULL;
3010 while (1) {
3011 ubifs_search_zbranch(c, znode, key, &n);
3012 if (n < 0) {
3013
3014
3015
3016
3017
3018
3019
3020
3021 znode = left_znode(c, znode);
3022 if (!znode)
3023 return NULL;
3024 if (IS_ERR(znode))
3025 return znode;
3026 ubifs_search_zbranch(c, znode, key, &n);
3027 ubifs_assert(n >= 0);
3028 }
3029 if (znode->level == level + 1)
3030 break;
3031 znode = get_znode(c, znode, n);
3032 if (IS_ERR(znode))
3033 return znode;
3034 }
3035
3036 if (znode->zbranch[n].lnum == lnum && znode->zbranch[n].offs == offs)
3037 return get_znode(c, znode, n);
3038
3039 if (!is_hash_key(c, key))
3040 return NULL;
3041
3042
3043
3044
3045 zn = znode;
3046 nn = n;
3047
3048 while (1) {
3049
3050 if (n)
3051 n -= 1;
3052 else {
3053 znode = left_znode(c, znode);
3054 if (!znode)
3055 break;
3056 if (IS_ERR(znode))
3057 return znode;
3058 n = znode->child_cnt - 1;
3059 }
3060
3061 if (znode->zbranch[n].lnum == lnum &&
3062 znode->zbranch[n].offs == offs)
3063 return get_znode(c, znode, n);
3064
3065 if (keys_cmp(c, &znode->zbranch[n].key, key) < 0)
3066 break;
3067 }
3068
3069 znode = zn;
3070 n = nn;
3071
3072 while (1) {
3073
3074 if (++n >= znode->child_cnt) {
3075 znode = right_znode(c, znode);
3076 if (!znode)
3077 break;
3078 if (IS_ERR(znode))
3079 return znode;
3080 n = 0;
3081 }
3082
3083 if (znode->zbranch[n].lnum == lnum &&
3084 znode->zbranch[n].offs == offs)
3085 return get_znode(c, znode, n);
3086
3087 if (keys_cmp(c, &znode->zbranch[n].key, key) > 0)
3088 break;
3089 }
3090 return NULL;
3091}
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110int is_idx_node_in_tnc(struct ubifs_info *c, union ubifs_key *key, int level,
3111 int lnum, int offs)
3112{
3113 struct ubifs_znode *znode;
3114
3115 znode = lookup_znode(c, key, level, lnum, offs);
3116 if (!znode)
3117 return 0;
3118 if (IS_ERR(znode))
3119 return PTR_ERR(znode);
3120
3121 return ubifs_zn_dirty(znode) ? 1 : 2;
3122}
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137static int is_leaf_node_in_tnc(struct ubifs_info *c, union ubifs_key *key,
3138 int lnum, int offs)
3139{
3140 struct ubifs_zbranch *zbr;
3141 struct ubifs_znode *znode, *zn;
3142 int n, found, err, nn;
3143 const int unique = !is_hash_key(c, key);
3144
3145 found = ubifs_lookup_level0(c, key, &znode, &n);
3146 if (found < 0)
3147 return found;
3148 if (!found)
3149 return 0;
3150 zbr = &znode->zbranch[n];
3151 if (lnum == zbr->lnum && offs == zbr->offs)
3152 return 1;
3153 if (unique)
3154 return 0;
3155
3156
3157
3158
3159 zn = znode;
3160 nn = n;
3161
3162 while (1) {
3163 err = tnc_prev(c, &znode, &n);
3164 if (err == -ENOENT)
3165 break;
3166 if (err)
3167 return err;
3168 if (keys_cmp(c, key, &znode->zbranch[n].key))
3169 break;
3170 zbr = &znode->zbranch[n];
3171 if (lnum == zbr->lnum && offs == zbr->offs)
3172 return 1;
3173 }
3174
3175 znode = zn;
3176 n = nn;
3177 while (1) {
3178 err = tnc_next(c, &znode, &n);
3179 if (err) {
3180 if (err == -ENOENT)
3181 return 0;
3182 return err;
3183 }
3184 if (keys_cmp(c, key, &znode->zbranch[n].key))
3185 break;
3186 zbr = &znode->zbranch[n];
3187 if (lnum == zbr->lnum && offs == zbr->offs)
3188 return 1;
3189 }
3190 return 0;
3191}
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207int ubifs_tnc_has_node(struct ubifs_info *c, union ubifs_key *key, int level,
3208 int lnum, int offs, int is_idx)
3209{
3210 int err;
3211
3212 mutex_lock(&c->tnc_mutex);
3213 if (is_idx) {
3214 err = is_idx_node_in_tnc(c, key, level, lnum, offs);
3215 if (err < 0)
3216 goto out_unlock;
3217 if (err == 1)
3218
3219 err = 0;
3220 else if (err == 2)
3221
3222 err = 1;
3223 else
3224 BUG_ON(err != 0);
3225 } else
3226 err = is_leaf_node_in_tnc(c, key, lnum, offs);
3227
3228out_unlock:
3229 mutex_unlock(&c->tnc_mutex);
3230 return err;
3231}
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247int ubifs_dirty_idx_node(struct ubifs_info *c, union ubifs_key *key, int level,
3248 int lnum, int offs)
3249{
3250 struct ubifs_znode *znode;
3251 int err = 0;
3252
3253 mutex_lock(&c->tnc_mutex);
3254 znode = lookup_znode(c, key, level, lnum, offs);
3255 if (!znode)
3256 goto out_unlock;
3257 if (IS_ERR(znode)) {
3258 err = PTR_ERR(znode);
3259 goto out_unlock;
3260 }
3261 znode = dirty_cow_bottom_up(c, znode);
3262 if (IS_ERR(znode)) {
3263 err = PTR_ERR(znode);
3264 goto out_unlock;
3265 }
3266
3267out_unlock:
3268 mutex_unlock(&c->tnc_mutex);
3269 return err;
3270}
3271
3272#ifdef CONFIG_UBIFS_FS_DEBUG
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode,
3286 loff_t size)
3287{
3288 int err, n;
3289 union ubifs_key from_key, to_key, *key;
3290 struct ubifs_znode *znode;
3291 unsigned int block;
3292
3293 if (!S_ISREG(inode->i_mode))
3294 return 0;
3295 if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
3296 return 0;
3297
3298 block = (size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
3299 data_key_init(c, &from_key, inode->i_ino, block);
3300 highest_data_key(c, &to_key, inode->i_ino);
3301
3302 mutex_lock(&c->tnc_mutex);
3303 err = ubifs_lookup_level0(c, &from_key, &znode, &n);
3304 if (err < 0)
3305 goto out_unlock;
3306
3307 if (err) {
3308 err = -EINVAL;
3309 key = &from_key;
3310 goto out_dump;
3311 }
3312
3313 err = tnc_next(c, &znode, &n);
3314 if (err == -ENOENT) {
3315 err = 0;
3316 goto out_unlock;
3317 }
3318 if (err < 0)
3319 goto out_unlock;
3320
3321 ubifs_assert(err == 0);
3322 key = &znode->zbranch[n].key;
3323 if (!key_in_range(c, key, &from_key, &to_key))
3324 goto out_unlock;
3325
3326out_dump:
3327 block = key_block(c, key);
3328 ubifs_err("inode %lu has size %lld, but there are data at offset %lld "
3329 "(data key %s)", (unsigned long)inode->i_ino, size,
3330 ((loff_t)block) << UBIFS_BLOCK_SHIFT, DBGKEY(key));
3331 dbg_dump_inode(c, inode);
3332 dbg_dump_stack();
3333 err = -EINVAL;
3334
3335out_unlock:
3336 mutex_unlock(&c->tnc_mutex);
3337 return err;
3338}
3339
3340#endif
3341