1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#ifndef __UBOOT__
22#include <log.h>
23#include <dm/devres.h>
24#include <linux/crc32.h>
25#include <linux/slab.h>
26#include <u-boot/crc.h>
27#else
28#include <linux/bitops.h>
29#include <linux/bug.h>
30#include <linux/compat.h>
31#include <linux/err.h>
32#include <linux/stat.h>
33#endif
34#include "ubifs.h"
35
36
37
38
39
40
41
42
43
44
45
46enum {
47 NAME_LESS = 0,
48 NAME_MATCHES = 1,
49 NAME_GREATER = 2,
50 NOT_ON_MEDIA = 3,
51};
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76static int insert_old_idx(struct ubifs_info *c, int lnum, int offs)
77{
78 struct ubifs_old_idx *old_idx, *o;
79 struct rb_node **p, *parent = NULL;
80
81 old_idx = kmalloc(sizeof(struct ubifs_old_idx), GFP_NOFS);
82 if (unlikely(!old_idx))
83 return -ENOMEM;
84 old_idx->lnum = lnum;
85 old_idx->offs = offs;
86
87 p = &c->old_idx.rb_node;
88 while (*p) {
89 parent = *p;
90 o = rb_entry(parent, struct ubifs_old_idx, rb);
91 if (lnum < o->lnum)
92 p = &(*p)->rb_left;
93 else if (lnum > o->lnum)
94 p = &(*p)->rb_right;
95 else if (offs < o->offs)
96 p = &(*p)->rb_left;
97 else if (offs > o->offs)
98 p = &(*p)->rb_right;
99 else {
100 ubifs_err(c, "old idx added twice!");
101 kfree(old_idx);
102 return 0;
103 }
104 }
105 rb_link_node(&old_idx->rb, parent, p);
106 rb_insert_color(&old_idx->rb, &c->old_idx);
107 return 0;
108}
109
110
111
112
113
114
115
116
117int insert_old_idx_znode(struct ubifs_info *c, struct ubifs_znode *znode)
118{
119 if (znode->parent) {
120 struct ubifs_zbranch *zbr;
121
122 zbr = &znode->parent->zbranch[znode->iip];
123 if (zbr->len)
124 return insert_old_idx(c, zbr->lnum, zbr->offs);
125 } else
126 if (c->zroot.len)
127 return insert_old_idx(c, c->zroot.lnum,
128 c->zroot.offs);
129 return 0;
130}
131
132
133
134
135
136
137
138
139static int ins_clr_old_idx_znode(struct ubifs_info *c,
140 struct ubifs_znode *znode)
141{
142 int err;
143
144 if (znode->parent) {
145 struct ubifs_zbranch *zbr;
146
147 zbr = &znode->parent->zbranch[znode->iip];
148 if (zbr->len) {
149 err = insert_old_idx(c, zbr->lnum, zbr->offs);
150 if (err)
151 return err;
152 zbr->lnum = 0;
153 zbr->offs = 0;
154 zbr->len = 0;
155 }
156 } else
157 if (c->zroot.len) {
158 err = insert_old_idx(c, c->zroot.lnum, c->zroot.offs);
159 if (err)
160 return err;
161 c->zroot.lnum = 0;
162 c->zroot.offs = 0;
163 c->zroot.len = 0;
164 }
165 return 0;
166}
167
168
169
170
171
172
173
174
175
176
177
178void destroy_old_idx(struct ubifs_info *c)
179{
180 struct ubifs_old_idx *old_idx, *n;
181
182 rbtree_postorder_for_each_entry_safe(old_idx, n, &c->old_idx, rb)
183 kfree(old_idx);
184
185 c->old_idx = RB_ROOT;
186}
187
188
189
190
191
192
193
194
195static struct ubifs_znode *copy_znode(struct ubifs_info *c,
196 struct ubifs_znode *znode)
197{
198 struct ubifs_znode *zn;
199
200 zn = kmalloc(c->max_znode_sz, GFP_NOFS);
201 if (unlikely(!zn))
202 return ERR_PTR(-ENOMEM);
203
204 memcpy(zn, znode, c->max_znode_sz);
205 zn->cnext = NULL;
206 __set_bit(DIRTY_ZNODE, &zn->flags);
207 __clear_bit(COW_ZNODE, &zn->flags);
208
209 ubifs_assert(!ubifs_zn_obsolete(znode));
210 __set_bit(OBSOLETE_ZNODE, &znode->flags);
211
212 if (znode->level != 0) {
213 int i;
214 const int n = zn->child_cnt;
215
216
217 for (i = 0; i < n; i++) {
218 struct ubifs_zbranch *zbr = &zn->zbranch[i];
219
220 if (zbr->znode)
221 zbr->znode->parent = zn;
222 }
223 }
224
225 atomic_long_inc(&c->dirty_zn_cnt);
226 return zn;
227}
228
229
230
231
232
233
234
235
236
237static int add_idx_dirt(struct ubifs_info *c, int lnum, int dirt)
238{
239 c->calc_idx_sz -= ALIGN(dirt, 8);
240 return ubifs_add_dirt(c, lnum, dirt);
241}
242
243
244
245
246
247
248
249
250static struct ubifs_znode *dirty_cow_znode(struct ubifs_info *c,
251 struct ubifs_zbranch *zbr)
252{
253 struct ubifs_znode *znode = zbr->znode;
254 struct ubifs_znode *zn;
255 int err;
256
257 if (!ubifs_zn_cow(znode)) {
258
259 if (!test_and_set_bit(DIRTY_ZNODE, &znode->flags)) {
260 atomic_long_inc(&c->dirty_zn_cnt);
261 atomic_long_dec(&c->clean_zn_cnt);
262 atomic_long_dec(&ubifs_clean_zn_cnt);
263 err = add_idx_dirt(c, zbr->lnum, zbr->len);
264 if (unlikely(err))
265 return ERR_PTR(err);
266 }
267 return znode;
268 }
269
270 zn = copy_znode(c, znode);
271 if (IS_ERR(zn))
272 return zn;
273
274 if (zbr->len) {
275 err = insert_old_idx(c, zbr->lnum, zbr->offs);
276 if (unlikely(err))
277 return ERR_PTR(err);
278 err = add_idx_dirt(c, zbr->lnum, zbr->len);
279 } else
280 err = 0;
281
282 zbr->znode = zn;
283 zbr->lnum = 0;
284 zbr->offs = 0;
285 zbr->len = 0;
286
287 if (unlikely(err))
288 return ERR_PTR(err);
289 return zn;
290}
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312static int lnc_add(struct ubifs_info *c, struct ubifs_zbranch *zbr,
313 const void *node)
314{
315 int err;
316 void *lnc_node;
317 const struct ubifs_dent_node *dent = node;
318
319 ubifs_assert(!zbr->leaf);
320 ubifs_assert(zbr->len != 0);
321 ubifs_assert(is_hash_key(c, &zbr->key));
322
323 err = ubifs_validate_entry(c, dent);
324 if (err) {
325 dump_stack();
326 ubifs_dump_node(c, dent);
327 return err;
328 }
329
330 lnc_node = kmemdup(node, zbr->len, GFP_NOFS);
331 if (!lnc_node)
332
333 return 0;
334
335 zbr->leaf = lnc_node;
336 return 0;
337}
338
339
340
341
342
343
344
345
346
347
348static int lnc_add_directly(struct ubifs_info *c, struct ubifs_zbranch *zbr,
349 void *node)
350{
351 int err;
352
353 ubifs_assert(!zbr->leaf);
354 ubifs_assert(zbr->len != 0);
355
356 err = ubifs_validate_entry(c, node);
357 if (err) {
358 dump_stack();
359 ubifs_dump_node(c, node);
360 return err;
361 }
362
363 zbr->leaf = node;
364 return 0;
365}
366
367
368
369
370
371
372static void lnc_free(struct ubifs_zbranch *zbr)
373{
374 if (!zbr->leaf)
375 return;
376 kfree(zbr->leaf);
377 zbr->leaf = NULL;
378}
379
380
381
382
383
384
385
386
387
388
389
390
391static int tnc_read_node_nm(struct ubifs_info *c, struct ubifs_zbranch *zbr,
392 void *node)
393{
394 int err;
395
396 ubifs_assert(is_hash_key(c, &zbr->key));
397
398 if (zbr->leaf) {
399
400 ubifs_assert(zbr->len != 0);
401 memcpy(node, zbr->leaf, zbr->len);
402 return 0;
403 }
404
405 err = ubifs_tnc_read_node(c, zbr, node);
406 if (err)
407 return err;
408
409
410 err = lnc_add(c, zbr, node);
411 return err;
412}
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438static int try_read_node(const struct ubifs_info *c, void *buf, int type,
439 int len, int lnum, int offs)
440{
441 int err, node_len;
442 struct ubifs_ch *ch = buf;
443 uint32_t crc, node_crc;
444
445 dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len);
446
447 err = ubifs_leb_read(c, lnum, buf, offs, len, 1);
448 if (err) {
449 ubifs_err(c, "cannot read node type %d from LEB %d:%d, error %d",
450 type, lnum, offs, err);
451 return err;
452 }
453
454 if (le32_to_cpu(ch->magic) != UBIFS_NODE_MAGIC)
455 return 0;
456
457 if (ch->node_type != type)
458 return 0;
459
460 node_len = le32_to_cpu(ch->len);
461 if (node_len != len)
462 return 0;
463
464 if (type == UBIFS_DATA_NODE && c->no_chk_data_crc && !c->mounting &&
465 !c->remounting_rw)
466 return 1;
467
468 crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8);
469 node_crc = le32_to_cpu(ch->crc);
470 if (crc != node_crc)
471 return 0;
472
473 return 1;
474}
475
476
477
478
479
480
481
482
483
484
485
486static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
487 struct ubifs_zbranch *zbr, void *node)
488{
489 int ret;
490
491 dbg_tnck(key, "LEB %d:%d, key ", zbr->lnum, zbr->offs);
492
493 ret = try_read_node(c, node, key_type(c, key), zbr->len, zbr->lnum,
494 zbr->offs);
495 if (ret == 1) {
496 union ubifs_key node_key;
497 struct ubifs_dent_node *dent = node;
498
499
500 key_read(c, &dent->key, &node_key);
501 if (keys_cmp(c, key, &node_key) != 0)
502 ret = 0;
503 }
504 if (ret == 0 && c->replaying)
505 dbg_mntk(key, "dangling branch LEB %d:%d len %d, key ",
506 zbr->lnum, zbr->offs, zbr->len);
507 return ret;
508}
509
510
511
512
513
514
515
516
517
518
519
520
521static int matches_name(struct ubifs_info *c, struct ubifs_zbranch *zbr,
522 const struct qstr *nm)
523{
524 struct ubifs_dent_node *dent;
525 int nlen, err;
526
527
528 if (!zbr->leaf) {
529 dent = kmalloc(zbr->len, GFP_NOFS);
530 if (!dent)
531 return -ENOMEM;
532
533 err = ubifs_tnc_read_node(c, zbr, dent);
534 if (err)
535 goto out_free;
536
537
538 err = lnc_add_directly(c, zbr, dent);
539 if (err)
540 goto out_free;
541 } else
542 dent = zbr->leaf;
543
544 nlen = le16_to_cpu(dent->nlen);
545 err = memcmp(dent->name, nm->name, min_t(int, nlen, nm->len));
546 if (err == 0) {
547 if (nlen == nm->len)
548 return NAME_MATCHES;
549 else if (nlen < nm->len)
550 return NAME_LESS;
551 else
552 return NAME_GREATER;
553 } else if (err < 0)
554 return NAME_LESS;
555 else
556 return NAME_GREATER;
557
558out_free:
559 kfree(dent);
560 return err;
561}
562
563
564
565
566
567
568
569
570
571static struct ubifs_znode *get_znode(struct ubifs_info *c,
572 struct ubifs_znode *znode, int n)
573{
574 struct ubifs_zbranch *zbr;
575
576 zbr = &znode->zbranch[n];
577 if (zbr->znode)
578 znode = zbr->znode;
579 else
580 znode = ubifs_load_znode(c, zbr, znode, n);
581 return znode;
582}
583
584
585
586
587
588
589
590
591
592
593static int tnc_next(struct ubifs_info *c, struct ubifs_znode **zn, int *n)
594{
595 struct ubifs_znode *znode = *zn;
596 int nn = *n;
597
598 nn += 1;
599 if (nn < znode->child_cnt) {
600 *n = nn;
601 return 0;
602 }
603 while (1) {
604 struct ubifs_znode *zp;
605
606 zp = znode->parent;
607 if (!zp)
608 return -ENOENT;
609 nn = znode->iip + 1;
610 znode = zp;
611 if (nn < znode->child_cnt) {
612 znode = get_znode(c, znode, nn);
613 if (IS_ERR(znode))
614 return PTR_ERR(znode);
615 while (znode->level != 0) {
616 znode = get_znode(c, znode, 0);
617 if (IS_ERR(znode))
618 return PTR_ERR(znode);
619 }
620 nn = 0;
621 break;
622 }
623 }
624 *zn = znode;
625 *n = nn;
626 return 0;
627}
628
629
630
631
632
633
634
635
636
637
638static int tnc_prev(struct ubifs_info *c, struct ubifs_znode **zn, int *n)
639{
640 struct ubifs_znode *znode = *zn;
641 int nn = *n;
642
643 if (nn > 0) {
644 *n = nn - 1;
645 return 0;
646 }
647 while (1) {
648 struct ubifs_znode *zp;
649
650 zp = znode->parent;
651 if (!zp)
652 return -ENOENT;
653 nn = znode->iip - 1;
654 znode = zp;
655 if (nn >= 0) {
656 znode = get_znode(c, znode, nn);
657 if (IS_ERR(znode))
658 return PTR_ERR(znode);
659 while (znode->level != 0) {
660 nn = znode->child_cnt - 1;
661 znode = get_znode(c, znode, nn);
662 if (IS_ERR(znode))
663 return PTR_ERR(znode);
664 }
665 nn = znode->child_cnt - 1;
666 break;
667 }
668 }
669 *zn = znode;
670 *n = nn;
671 return 0;
672}
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690static int resolve_collision(struct ubifs_info *c, const union ubifs_key *key,
691 struct ubifs_znode **zn, int *n,
692 const struct qstr *nm)
693{
694 int err;
695
696 err = matches_name(c, &(*zn)->zbranch[*n], nm);
697 if (unlikely(err < 0))
698 return err;
699 if (err == NAME_MATCHES)
700 return 1;
701
702 if (err == NAME_GREATER) {
703
704 while (1) {
705 err = tnc_prev(c, zn, n);
706 if (err == -ENOENT) {
707 ubifs_assert(*n == 0);
708 *n = -1;
709 return 0;
710 }
711 if (err < 0)
712 return err;
713 if (keys_cmp(c, &(*zn)->zbranch[*n].key, key)) {
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743 if (*n == (*zn)->child_cnt - 1) {
744 err = tnc_next(c, zn, n);
745 if (err) {
746
747 ubifs_assert(0);
748 if (err == -ENOENT)
749 err = -EINVAL;
750 return err;
751 }
752 ubifs_assert(*n == 0);
753 *n = -1;
754 }
755 return 0;
756 }
757 err = matches_name(c, &(*zn)->zbranch[*n], nm);
758 if (err < 0)
759 return err;
760 if (err == NAME_LESS)
761 return 0;
762 if (err == NAME_MATCHES)
763 return 1;
764 ubifs_assert(err == NAME_GREATER);
765 }
766 } else {
767 int nn = *n;
768 struct ubifs_znode *znode = *zn;
769
770
771 while (1) {
772 err = tnc_next(c, &znode, &nn);
773 if (err == -ENOENT)
774 return 0;
775 if (err < 0)
776 return err;
777 if (keys_cmp(c, &znode->zbranch[nn].key, key))
778 return 0;
779 err = matches_name(c, &znode->zbranch[nn], nm);
780 if (err < 0)
781 return err;
782 if (err == NAME_GREATER)
783 return 0;
784 *zn = znode;
785 *n = nn;
786 if (err == NAME_MATCHES)
787 return 1;
788 ubifs_assert(err == NAME_LESS);
789 }
790 }
791}
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808static int fallible_matches_name(struct ubifs_info *c,
809 struct ubifs_zbranch *zbr,
810 const struct qstr *nm)
811{
812 struct ubifs_dent_node *dent;
813 int nlen, err;
814
815
816 if (!zbr->leaf) {
817 dent = kmalloc(zbr->len, GFP_NOFS);
818 if (!dent)
819 return -ENOMEM;
820
821 err = fallible_read_node(c, &zbr->key, zbr, dent);
822 if (err < 0)
823 goto out_free;
824 if (err == 0) {
825
826 err = NOT_ON_MEDIA;
827 goto out_free;
828 }
829 ubifs_assert(err == 1);
830
831 err = lnc_add_directly(c, zbr, dent);
832 if (err)
833 goto out_free;
834 } else
835 dent = zbr->leaf;
836
837 nlen = le16_to_cpu(dent->nlen);
838 err = memcmp(dent->name, nm->name, min_t(int, nlen, nm->len));
839 if (err == 0) {
840 if (nlen == nm->len)
841 return NAME_MATCHES;
842 else if (nlen < nm->len)
843 return NAME_LESS;
844 else
845 return NAME_GREATER;
846 } else if (err < 0)
847 return NAME_LESS;
848 else
849 return NAME_GREATER;
850
851out_free:
852 kfree(dent);
853 return err;
854}
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878static int fallible_resolve_collision(struct ubifs_info *c,
879 const union ubifs_key *key,
880 struct ubifs_znode **zn, int *n,
881 const struct qstr *nm, int adding)
882{
883 struct ubifs_znode *o_znode = NULL, *znode = *zn;
884 int uninitialized_var(o_n), err, cmp, unsure = 0, nn = *n;
885
886 cmp = fallible_matches_name(c, &znode->zbranch[nn], nm);
887 if (unlikely(cmp < 0))
888 return cmp;
889 if (cmp == NAME_MATCHES)
890 return 1;
891 if (cmp == NOT_ON_MEDIA) {
892 o_znode = znode;
893 o_n = nn;
894
895
896
897
898
899 unsure = 1;
900 } else if (!adding)
901 unsure = 1;
902
903 if (cmp == NAME_GREATER || unsure) {
904
905 while (1) {
906 err = tnc_prev(c, zn, n);
907 if (err == -ENOENT) {
908 ubifs_assert(*n == 0);
909 *n = -1;
910 break;
911 }
912 if (err < 0)
913 return err;
914 if (keys_cmp(c, &(*zn)->zbranch[*n].key, key)) {
915
916 if (*n == (*zn)->child_cnt - 1) {
917 err = tnc_next(c, zn, n);
918 if (err) {
919
920 ubifs_assert(0);
921 if (err == -ENOENT)
922 err = -EINVAL;
923 return err;
924 }
925 ubifs_assert(*n == 0);
926 *n = -1;
927 }
928 break;
929 }
930 err = fallible_matches_name(c, &(*zn)->zbranch[*n], nm);
931 if (err < 0)
932 return err;
933 if (err == NAME_MATCHES)
934 return 1;
935 if (err == NOT_ON_MEDIA) {
936 o_znode = *zn;
937 o_n = *n;
938 continue;
939 }
940 if (!adding)
941 continue;
942 if (err == NAME_LESS)
943 break;
944 else
945 unsure = 0;
946 }
947 }
948
949 if (cmp == NAME_LESS || unsure) {
950
951 *zn = znode;
952 *n = nn;
953 while (1) {
954 err = tnc_next(c, &znode, &nn);
955 if (err == -ENOENT)
956 break;
957 if (err < 0)
958 return err;
959 if (keys_cmp(c, &znode->zbranch[nn].key, key))
960 break;
961 err = fallible_matches_name(c, &znode->zbranch[nn], nm);
962 if (err < 0)
963 return err;
964 if (err == NAME_GREATER)
965 break;
966 *zn = znode;
967 *n = nn;
968 if (err == NAME_MATCHES)
969 return 1;
970 if (err == NOT_ON_MEDIA) {
971 o_znode = znode;
972 o_n = nn;
973 }
974 }
975 }
976
977
978 if (adding || !o_znode)
979 return 0;
980
981 dbg_mntk(key, "dangling match LEB %d:%d len %d key ",
982 o_znode->zbranch[o_n].lnum, o_znode->zbranch[o_n].offs,
983 o_znode->zbranch[o_n].len);
984 *zn = o_znode;
985 *n = o_n;
986 return 1;
987}
988
989
990
991
992
993
994
995
996
997static int matches_position(struct ubifs_zbranch *zbr, int lnum, int offs)
998{
999 if (zbr->lnum == lnum && zbr->offs == offs)
1000 return 1;
1001 else
1002 return 0;
1003}
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022static int resolve_collision_directly(struct ubifs_info *c,
1023 const union ubifs_key *key,
1024 struct ubifs_znode **zn, int *n,
1025 int lnum, int offs)
1026{
1027 struct ubifs_znode *znode;
1028 int nn, err;
1029
1030 znode = *zn;
1031 nn = *n;
1032 if (matches_position(&znode->zbranch[nn], lnum, offs))
1033 return 1;
1034
1035
1036 while (1) {
1037 err = tnc_prev(c, &znode, &nn);
1038 if (err == -ENOENT)
1039 break;
1040 if (err < 0)
1041 return err;
1042 if (keys_cmp(c, &znode->zbranch[nn].key, key))
1043 break;
1044 if (matches_position(&znode->zbranch[nn], lnum, offs)) {
1045 *zn = znode;
1046 *n = nn;
1047 return 1;
1048 }
1049 }
1050
1051
1052 znode = *zn;
1053 nn = *n;
1054 while (1) {
1055 err = tnc_next(c, &znode, &nn);
1056 if (err == -ENOENT)
1057 return 0;
1058 if (err < 0)
1059 return err;
1060 if (keys_cmp(c, &znode->zbranch[nn].key, key))
1061 return 0;
1062 *zn = znode;
1063 *n = nn;
1064 if (matches_position(&znode->zbranch[nn], lnum, offs))
1065 return 1;
1066 }
1067}
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079static struct ubifs_znode *dirty_cow_bottom_up(struct ubifs_info *c,
1080 struct ubifs_znode *znode)
1081{
1082 struct ubifs_znode *zp;
1083 int *path = c->bottom_up_buf, p = 0;
1084
1085 ubifs_assert(c->zroot.znode);
1086 ubifs_assert(znode);
1087 if (c->zroot.znode->level > BOTTOM_UP_HEIGHT) {
1088 kfree(c->bottom_up_buf);
1089 c->bottom_up_buf = kmalloc(c->zroot.znode->level * sizeof(int),
1090 GFP_NOFS);
1091 if (!c->bottom_up_buf)
1092 return ERR_PTR(-ENOMEM);
1093 path = c->bottom_up_buf;
1094 }
1095 if (c->zroot.znode->level) {
1096
1097 while (1) {
1098 int n;
1099
1100 zp = znode->parent;
1101 if (!zp)
1102 break;
1103 n = znode->iip;
1104 ubifs_assert(p < c->zroot.znode->level);
1105 path[p++] = n;
1106 if (!zp->cnext && ubifs_zn_dirty(znode))
1107 break;
1108 znode = zp;
1109 }
1110 }
1111
1112
1113 while (1) {
1114 struct ubifs_zbranch *zbr;
1115
1116 zp = znode->parent;
1117 if (zp) {
1118 ubifs_assert(path[p - 1] >= 0);
1119 ubifs_assert(path[p - 1] < zp->child_cnt);
1120 zbr = &zp->zbranch[path[--p]];
1121 znode = dirty_cow_znode(c, zbr);
1122 } else {
1123 ubifs_assert(znode == c->zroot.znode);
1124 znode = dirty_cow_znode(c, &c->zroot);
1125 }
1126 if (IS_ERR(znode) || !p)
1127 break;
1128 ubifs_assert(path[p - 1] >= 0);
1129 ubifs_assert(path[p - 1] < znode->child_cnt);
1130 znode = znode->zbranch[path[p - 1]].znode;
1131 }
1132
1133 return znode;
1134}
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158int ubifs_lookup_level0(struct ubifs_info *c, const union ubifs_key *key,
1159 struct ubifs_znode **zn, int *n)
1160{
1161 int err, exact;
1162 struct ubifs_znode *znode;
1163 unsigned long time = get_seconds();
1164
1165 dbg_tnck(key, "search key ");
1166 ubifs_assert(key_type(c, key) < UBIFS_INVALID_KEY);
1167
1168 znode = c->zroot.znode;
1169 if (unlikely(!znode)) {
1170 znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
1171 if (IS_ERR(znode))
1172 return PTR_ERR(znode);
1173 }
1174
1175 znode->time = time;
1176
1177 while (1) {
1178 struct ubifs_zbranch *zbr;
1179
1180 exact = ubifs_search_zbranch(c, znode, key, n);
1181
1182 if (znode->level == 0)
1183 break;
1184
1185 if (*n < 0)
1186 *n = 0;
1187 zbr = &znode->zbranch[*n];
1188
1189 if (zbr->znode) {
1190 znode->time = time;
1191 znode = zbr->znode;
1192 continue;
1193 }
1194
1195
1196 znode = ubifs_load_znode(c, zbr, znode, *n);
1197 if (IS_ERR(znode))
1198 return PTR_ERR(znode);
1199 }
1200
1201 *zn = znode;
1202 if (exact || !is_hash_key(c, key) || *n != -1) {
1203 dbg_tnc("found %d, lvl %d, n %d", exact, znode->level, *n);
1204 return exact;
1205 }
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250 err = tnc_prev(c, &znode, n);
1251 if (err == -ENOENT) {
1252 dbg_tnc("found 0, lvl %d, n -1", znode->level);
1253 *n = -1;
1254 return 0;
1255 }
1256 if (unlikely(err < 0))
1257 return err;
1258 if (keys_cmp(c, key, &znode->zbranch[*n].key)) {
1259 dbg_tnc("found 0, lvl %d, n -1", znode->level);
1260 *n = -1;
1261 return 0;
1262 }
1263
1264 dbg_tnc("found 1, lvl %d, n %d", znode->level, *n);
1265 *zn = znode;
1266 return 1;
1267}
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294static int lookup_level0_dirty(struct ubifs_info *c, const union ubifs_key *key,
1295 struct ubifs_znode **zn, int *n)
1296{
1297 int err, exact;
1298 struct ubifs_znode *znode;
1299 unsigned long time = get_seconds();
1300
1301 dbg_tnck(key, "search and dirty key ");
1302
1303 znode = c->zroot.znode;
1304 if (unlikely(!znode)) {
1305 znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
1306 if (IS_ERR(znode))
1307 return PTR_ERR(znode);
1308 }
1309
1310 znode = dirty_cow_znode(c, &c->zroot);
1311 if (IS_ERR(znode))
1312 return PTR_ERR(znode);
1313
1314 znode->time = time;
1315
1316 while (1) {
1317 struct ubifs_zbranch *zbr;
1318
1319 exact = ubifs_search_zbranch(c, znode, key, n);
1320
1321 if (znode->level == 0)
1322 break;
1323
1324 if (*n < 0)
1325 *n = 0;
1326 zbr = &znode->zbranch[*n];
1327
1328 if (zbr->znode) {
1329 znode->time = time;
1330 znode = dirty_cow_znode(c, zbr);
1331 if (IS_ERR(znode))
1332 return PTR_ERR(znode);
1333 continue;
1334 }
1335
1336
1337 znode = ubifs_load_znode(c, zbr, znode, *n);
1338 if (IS_ERR(znode))
1339 return PTR_ERR(znode);
1340 znode = dirty_cow_znode(c, zbr);
1341 if (IS_ERR(znode))
1342 return PTR_ERR(znode);
1343 }
1344
1345 *zn = znode;
1346 if (exact || !is_hash_key(c, key) || *n != -1) {
1347 dbg_tnc("found %d, lvl %d, n %d", exact, znode->level, *n);
1348 return exact;
1349 }
1350
1351
1352
1353
1354
1355 err = tnc_prev(c, &znode, n);
1356 if (err == -ENOENT) {
1357 *n = -1;
1358 dbg_tnc("found 0, lvl %d, n -1", znode->level);
1359 return 0;
1360 }
1361 if (unlikely(err < 0))
1362 return err;
1363 if (keys_cmp(c, key, &znode->zbranch[*n].key)) {
1364 *n = -1;
1365 dbg_tnc("found 0, lvl %d, n -1", znode->level);
1366 return 0;
1367 }
1368
1369 if (znode->cnext || !ubifs_zn_dirty(znode)) {
1370 znode = dirty_cow_bottom_up(c, znode);
1371 if (IS_ERR(znode))
1372 return PTR_ERR(znode);
1373 }
1374
1375 dbg_tnc("found 1, lvl %d, n %d", znode->level, *n);
1376 *zn = znode;
1377 return 1;
1378}
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390static int maybe_leb_gced(struct ubifs_info *c, int lnum, int gc_seq1)
1391{
1392#ifndef __UBOOT__
1393 int gc_seq2, gced_lnum;
1394
1395 gced_lnum = c->gced_lnum;
1396 smp_rmb();
1397 gc_seq2 = c->gc_seq;
1398
1399 if (gc_seq1 == gc_seq2)
1400 return 0;
1401
1402 if (gc_seq1 + 1 != gc_seq2)
1403 return 1;
1404
1405
1406
1407
1408 smp_rmb();
1409 if (gced_lnum != c->gced_lnum)
1410 return 1;
1411
1412 if (gced_lnum == lnum)
1413 return 1;
1414#else
1415
1416#endif
1417 return 0;
1418}
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433int ubifs_tnc_locate(struct ubifs_info *c, const union ubifs_key *key,
1434 void *node, int *lnum, int *offs)
1435{
1436 int found, n, err, safely = 0, gc_seq1;
1437 struct ubifs_znode *znode;
1438 struct ubifs_zbranch zbr, *zt;
1439
1440again:
1441 mutex_lock(&c->tnc_mutex);
1442 found = ubifs_lookup_level0(c, key, &znode, &n);
1443 if (!found) {
1444 err = -ENOENT;
1445 goto out;
1446 } else if (found < 0) {
1447 err = found;
1448 goto out;
1449 }
1450 zt = &znode->zbranch[n];
1451 if (lnum) {
1452 *lnum = zt->lnum;
1453 *offs = zt->offs;
1454 }
1455 if (is_hash_key(c, key)) {
1456
1457
1458
1459
1460 err = tnc_read_node_nm(c, zt, node);
1461 goto out;
1462 }
1463 if (safely) {
1464 err = ubifs_tnc_read_node(c, zt, node);
1465 goto out;
1466 }
1467
1468 zbr = znode->zbranch[n];
1469 gc_seq1 = c->gc_seq;
1470 mutex_unlock(&c->tnc_mutex);
1471
1472 if (ubifs_get_wbuf(c, zbr.lnum)) {
1473
1474 err = ubifs_tnc_read_node(c, &zbr, node);
1475 return err;
1476 }
1477
1478 err = fallible_read_node(c, key, &zbr, node);
1479 if (err <= 0 || maybe_leb_gced(c, zbr.lnum, gc_seq1)) {
1480
1481
1482
1483
1484 safely = 1;
1485 goto again;
1486 }
1487 return 0;
1488
1489out:
1490 mutex_unlock(&c->tnc_mutex);
1491 return err;
1492}
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu)
1508{
1509 int n, err = 0, lnum = -1, uninitialized_var(offs);
1510 int uninitialized_var(len);
1511 unsigned int block = key_block(c, &bu->key);
1512 struct ubifs_znode *znode;
1513
1514 bu->cnt = 0;
1515 bu->blk_cnt = 0;
1516 bu->eof = 0;
1517
1518 mutex_lock(&c->tnc_mutex);
1519
1520 err = ubifs_lookup_level0(c, &bu->key, &znode, &n);
1521 if (err < 0)
1522 goto out;
1523 if (err) {
1524
1525 len = znode->zbranch[n].len;
1526
1527 if (len > bu->buf_len) {
1528 err = -EINVAL;
1529 goto out;
1530 }
1531
1532 bu->zbranch[bu->cnt++] = znode->zbranch[n];
1533 bu->blk_cnt += 1;
1534 lnum = znode->zbranch[n].lnum;
1535 offs = ALIGN(znode->zbranch[n].offs + len, 8);
1536 }
1537 while (1) {
1538 struct ubifs_zbranch *zbr;
1539 union ubifs_key *key;
1540 unsigned int next_block;
1541
1542
1543 err = tnc_next(c, &znode, &n);
1544 if (err)
1545 goto out;
1546 zbr = &znode->zbranch[n];
1547 key = &zbr->key;
1548
1549 if (key_inum(c, key) != key_inum(c, &bu->key) ||
1550 key_type(c, key) != UBIFS_DATA_KEY) {
1551 err = -ENOENT;
1552 goto out;
1553 }
1554 if (lnum < 0) {
1555
1556 lnum = zbr->lnum;
1557 offs = ALIGN(zbr->offs + zbr->len, 8);
1558 len = zbr->len;
1559 if (len > bu->buf_len) {
1560 err = -EINVAL;
1561 goto out;
1562 }
1563 } else {
1564
1565
1566
1567
1568 if (zbr->lnum != lnum || zbr->offs != offs)
1569 goto out;
1570 offs += ALIGN(zbr->len, 8);
1571 len = ALIGN(len, 8) + zbr->len;
1572
1573 if (len > bu->buf_len)
1574 goto out;
1575 }
1576
1577 next_block = key_block(c, key);
1578 bu->blk_cnt += (next_block - block - 1);
1579 if (bu->blk_cnt >= UBIFS_MAX_BULK_READ)
1580 goto out;
1581 block = next_block;
1582
1583 bu->zbranch[bu->cnt++] = *zbr;
1584 bu->blk_cnt += 1;
1585
1586 if (bu->cnt >= UBIFS_MAX_BULK_READ)
1587 goto out;
1588 if (bu->blk_cnt >= UBIFS_MAX_BULK_READ)
1589 goto out;
1590 }
1591out:
1592 if (err == -ENOENT) {
1593 bu->eof = 1;
1594 err = 0;
1595 }
1596 bu->gc_seq = c->gc_seq;
1597 mutex_unlock(&c->tnc_mutex);
1598 if (err)
1599 return err;
1600
1601
1602
1603
1604 if (bu->blk_cnt > UBIFS_MAX_BULK_READ)
1605 bu->blk_cnt = UBIFS_MAX_BULK_READ;
1606
1607
1608
1609
1610 if (UBIFS_BLOCKS_PER_PAGE == 1 ||
1611 !(bu->blk_cnt & (UBIFS_BLOCKS_PER_PAGE - 1)))
1612 return 0;
1613 if (bu->eof) {
1614
1615 bu->blk_cnt += UBIFS_BLOCKS_PER_PAGE - 1;
1616 return 0;
1617 }
1618
1619 block = key_block(c, &bu->key) + bu->blk_cnt;
1620 block &= ~(UBIFS_BLOCKS_PER_PAGE - 1);
1621 while (bu->cnt) {
1622 if (key_block(c, &bu->zbranch[bu->cnt - 1].key) < block)
1623 break;
1624 bu->cnt -= 1;
1625 }
1626 return 0;
1627}
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639static int read_wbuf(struct ubifs_wbuf *wbuf, void *buf, int len, int lnum,
1640 int offs)
1641{
1642 const struct ubifs_info *c = wbuf->c;
1643 int rlen, overlap;
1644
1645 dbg_io("LEB %d:%d, length %d", lnum, offs, len);
1646 ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
1647 ubifs_assert(!(offs & 7) && offs < c->leb_size);
1648 ubifs_assert(offs + len <= c->leb_size);
1649
1650 spin_lock(&wbuf->lock);
1651 overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs);
1652 if (!overlap) {
1653
1654 spin_unlock(&wbuf->lock);
1655 return ubifs_leb_read(c, lnum, buf, offs, len, 0);
1656 }
1657
1658
1659 rlen = wbuf->offs - offs;
1660 if (rlen < 0)
1661 rlen = 0;
1662
1663
1664 memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen);
1665 spin_unlock(&wbuf->lock);
1666
1667 if (rlen > 0)
1668
1669 return ubifs_leb_read(c, lnum, buf, offs, rlen, 0);
1670
1671 return 0;
1672}
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682static int validate_data_node(struct ubifs_info *c, void *buf,
1683 struct ubifs_zbranch *zbr)
1684{
1685 union ubifs_key key1;
1686 struct ubifs_ch *ch = buf;
1687 int err, len;
1688
1689 if (ch->node_type != UBIFS_DATA_NODE) {
1690 ubifs_err(c, "bad node type (%d but expected %d)",
1691 ch->node_type, UBIFS_DATA_NODE);
1692 goto out_err;
1693 }
1694
1695 err = ubifs_check_node(c, buf, zbr->lnum, zbr->offs, 0, 0);
1696 if (err) {
1697 ubifs_err(c, "expected node type %d", UBIFS_DATA_NODE);
1698 goto out;
1699 }
1700
1701 len = le32_to_cpu(ch->len);
1702 if (len != zbr->len) {
1703 ubifs_err(c, "bad node length %d, expected %d", len, zbr->len);
1704 goto out_err;
1705 }
1706
1707
1708 key_read(c, buf + UBIFS_KEY_OFFSET, &key1);
1709 if (!keys_eq(c, &zbr->key, &key1)) {
1710 ubifs_err(c, "bad key in node at LEB %d:%d",
1711 zbr->lnum, zbr->offs);
1712 dbg_tnck(&zbr->key, "looked for key ");
1713 dbg_tnck(&key1, "found node's key ");
1714 goto out_err;
1715 }
1716
1717 return 0;
1718
1719out_err:
1720 err = -EINVAL;
1721out:
1722 ubifs_err(c, "bad node at LEB %d:%d", zbr->lnum, zbr->offs);
1723 ubifs_dump_node(c, buf);
1724 dump_stack();
1725 return err;
1726}
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu)
1739{
1740 int lnum = bu->zbranch[0].lnum, offs = bu->zbranch[0].offs, len, err, i;
1741 struct ubifs_wbuf *wbuf;
1742 void *buf;
1743
1744 len = bu->zbranch[bu->cnt - 1].offs;
1745 len += bu->zbranch[bu->cnt - 1].len - offs;
1746 if (len > bu->buf_len) {
1747 ubifs_err(c, "buffer too small %d vs %d", bu->buf_len, len);
1748 return -EINVAL;
1749 }
1750
1751
1752 wbuf = ubifs_get_wbuf(c, lnum);
1753 if (wbuf)
1754 err = read_wbuf(wbuf, bu->buf, len, lnum, offs);
1755 else
1756 err = ubifs_leb_read(c, lnum, bu->buf, offs, len, 0);
1757
1758
1759 if (maybe_leb_gced(c, lnum, bu->gc_seq))
1760 return -EAGAIN;
1761
1762 if (err && err != -EBADMSG) {
1763 ubifs_err(c, "failed to read from LEB %d:%d, error %d",
1764 lnum, offs, err);
1765 dump_stack();
1766 dbg_tnck(&bu->key, "key ");
1767 return err;
1768 }
1769
1770
1771 buf = bu->buf;
1772 for (i = 0; i < bu->cnt; i++) {
1773 err = validate_data_node(c, buf, &bu->zbranch[i]);
1774 if (err)
1775 return err;
1776 buf = buf + ALIGN(bu->zbranch[i].len, 8);
1777 }
1778
1779 return 0;
1780}
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795static int do_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
1796 void *node, const struct qstr *nm)
1797{
1798 int found, n, err;
1799 struct ubifs_znode *znode;
1800
1801 dbg_tnck(key, "name '%.*s' key ", nm->len, nm->name);
1802 mutex_lock(&c->tnc_mutex);
1803 found = ubifs_lookup_level0(c, key, &znode, &n);
1804 if (!found) {
1805 err = -ENOENT;
1806 goto out_unlock;
1807 } else if (found < 0) {
1808 err = found;
1809 goto out_unlock;
1810 }
1811
1812 ubifs_assert(n >= 0);
1813
1814 err = resolve_collision(c, key, &znode, &n, nm);
1815 dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n);
1816 if (unlikely(err < 0))
1817 goto out_unlock;
1818 if (err == 0) {
1819 err = -ENOENT;
1820 goto out_unlock;
1821 }
1822
1823 err = tnc_read_node_nm(c, &znode->zbranch[n], node);
1824
1825out_unlock:
1826 mutex_unlock(&c->tnc_mutex);
1827 return err;
1828}
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843int ubifs_tnc_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
1844 void *node, const struct qstr *nm)
1845{
1846 int err, len;
1847 const struct ubifs_dent_node *dent = node;
1848
1849
1850
1851
1852
1853 err = ubifs_tnc_lookup(c, key, node);
1854 if (err)
1855 return err;
1856
1857 len = le16_to_cpu(dent->nlen);
1858 if (nm->len == len && !memcmp(dent->name, nm->name, len))
1859 return 0;
1860
1861
1862
1863
1864
1865 return do_lookup_nm(c, key, node, nm);
1866}
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877static void correct_parent_keys(const struct ubifs_info *c,
1878 struct ubifs_znode *znode)
1879{
1880 union ubifs_key *key, *key1;
1881
1882 ubifs_assert(znode->parent);
1883 ubifs_assert(znode->iip == 0);
1884
1885 key = &znode->zbranch[0].key;
1886 key1 = &znode->parent->zbranch[0].key;
1887
1888 while (keys_cmp(c, key, key1) < 0) {
1889 key_copy(c, key, key1);
1890 znode = znode->parent;
1891 znode->alt = 1;
1892 if (!znode->parent || znode->iip)
1893 break;
1894 key1 = &znode->parent->zbranch[0].key;
1895 }
1896}
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909static void insert_zbranch(struct ubifs_znode *znode,
1910 const struct ubifs_zbranch *zbr, int n)
1911{
1912 int i;
1913
1914 ubifs_assert(ubifs_zn_dirty(znode));
1915
1916 if (znode->level) {
1917 for (i = znode->child_cnt; i > n; i--) {
1918 znode->zbranch[i] = znode->zbranch[i - 1];
1919 if (znode->zbranch[i].znode)
1920 znode->zbranch[i].znode->iip = i;
1921 }
1922 if (zbr->znode)
1923 zbr->znode->iip = n;
1924 } else
1925 for (i = znode->child_cnt; i > n; i--)
1926 znode->zbranch[i] = znode->zbranch[i - 1];
1927
1928 znode->zbranch[n] = *zbr;
1929 znode->child_cnt += 1;
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945 if (n == 0)
1946 znode->alt = 1;
1947}
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961static int tnc_insert(struct ubifs_info *c, struct ubifs_znode *znode,
1962 struct ubifs_zbranch *zbr, int n)
1963{
1964 struct ubifs_znode *zn, *zi, *zp;
1965 int i, keep, move, appending = 0;
1966 union ubifs_key *key = &zbr->key, *key1;
1967
1968 ubifs_assert(n >= 0 && n <= c->fanout);
1969
1970
1971again:
1972 zp = znode->parent;
1973 if (znode->child_cnt < c->fanout) {
1974 ubifs_assert(n != c->fanout);
1975 dbg_tnck(key, "inserted at %d level %d, key ", n, znode->level);
1976
1977 insert_zbranch(znode, zbr, n);
1978
1979
1980 if (n == 0 && zp && znode->iip == 0)
1981 correct_parent_keys(c, znode);
1982
1983 return 0;
1984 }
1985
1986
1987
1988
1989
1990 dbg_tnck(key, "splitting level %d, key ", znode->level);
1991
1992 if (znode->alt)
1993
1994
1995
1996
1997 ins_clr_old_idx_znode(c, znode);
1998
1999 zn = kzalloc(c->max_znode_sz, GFP_NOFS);
2000 if (!zn)
2001 return -ENOMEM;
2002 zn->parent = zp;
2003 zn->level = znode->level;
2004
2005
2006 if (znode->level == 0 && key_type(c, key) == UBIFS_DATA_KEY) {
2007
2008 if (n == c->fanout) {
2009 key1 = &znode->zbranch[n - 1].key;
2010 if (key_inum(c, key1) == key_inum(c, key) &&
2011 key_type(c, key1) == UBIFS_DATA_KEY)
2012 appending = 1;
2013 } else
2014 goto check_split;
2015 } else if (appending && n != c->fanout) {
2016
2017 appending = 0;
2018check_split:
2019 if (n >= (c->fanout + 1) / 2) {
2020 key1 = &znode->zbranch[0].key;
2021 if (key_inum(c, key1) == key_inum(c, key) &&
2022 key_type(c, key1) == UBIFS_DATA_KEY) {
2023 key1 = &znode->zbranch[n].key;
2024 if (key_inum(c, key1) != key_inum(c, key) ||
2025 key_type(c, key1) != UBIFS_DATA_KEY) {
2026 keep = n;
2027 move = c->fanout - keep;
2028 zi = znode;
2029 goto do_split;
2030 }
2031 }
2032 }
2033 }
2034
2035 if (appending) {
2036 keep = c->fanout;
2037 move = 0;
2038 } else {
2039 keep = (c->fanout + 1) / 2;
2040 move = c->fanout - keep;
2041 }
2042
2043
2044
2045
2046
2047
2048 if (n < keep) {
2049
2050 zi = znode;
2051 move += 1;
2052 keep -= 1;
2053 } else {
2054
2055 zi = zn;
2056 n -= keep;
2057
2058 if (zn->level != 0)
2059 zbr->znode->parent = zn;
2060 }
2061
2062do_split:
2063
2064 __set_bit(DIRTY_ZNODE, &zn->flags);
2065 atomic_long_inc(&c->dirty_zn_cnt);
2066
2067 zn->child_cnt = move;
2068 znode->child_cnt = keep;
2069
2070 dbg_tnc("moving %d, keeping %d", move, keep);
2071
2072
2073 for (i = 0; i < move; i++) {
2074 zn->zbranch[i] = znode->zbranch[keep + i];
2075
2076 if (zn->level != 0)
2077 if (zn->zbranch[i].znode) {
2078 zn->zbranch[i].znode->parent = zn;
2079 zn->zbranch[i].znode->iip = i;
2080 }
2081 }
2082
2083
2084 dbg_tnck(key, "inserting at %d level %d, key ", n, zn->level);
2085
2086 insert_zbranch(zi, zbr, n);
2087
2088
2089 if (zp) {
2090 if (n == 0 && zi == znode && znode->iip == 0)
2091 correct_parent_keys(c, znode);
2092
2093
2094 n = znode->iip + 1;
2095
2096
2097 zbr->key = zn->zbranch[0].key;
2098 zbr->znode = zn;
2099 zbr->lnum = 0;
2100 zbr->offs = 0;
2101 zbr->len = 0;
2102 znode = zp;
2103
2104 goto again;
2105 }
2106
2107
2108 dbg_tnc("creating new zroot at level %d", znode->level + 1);
2109
2110 zi = kzalloc(c->max_znode_sz, GFP_NOFS);
2111 if (!zi)
2112 return -ENOMEM;
2113
2114 zi->child_cnt = 2;
2115 zi->level = znode->level + 1;
2116
2117 __set_bit(DIRTY_ZNODE, &zi->flags);
2118 atomic_long_inc(&c->dirty_zn_cnt);
2119
2120 zi->zbranch[0].key = znode->zbranch[0].key;
2121 zi->zbranch[0].znode = znode;
2122 zi->zbranch[0].lnum = c->zroot.lnum;
2123 zi->zbranch[0].offs = c->zroot.offs;
2124 zi->zbranch[0].len = c->zroot.len;
2125 zi->zbranch[1].key = zn->zbranch[0].key;
2126 zi->zbranch[1].znode = zn;
2127
2128 c->zroot.lnum = 0;
2129 c->zroot.offs = 0;
2130 c->zroot.len = 0;
2131 c->zroot.znode = zi;
2132
2133 zn->parent = zi;
2134 zn->iip = 1;
2135 znode->parent = zi;
2136 znode->iip = 0;
2137
2138 return 0;
2139}
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153int ubifs_tnc_add(struct ubifs_info *c, const union ubifs_key *key, int lnum,
2154 int offs, int len)
2155{
2156 int found, n, err = 0;
2157 struct ubifs_znode *znode;
2158
2159 mutex_lock(&c->tnc_mutex);
2160 dbg_tnck(key, "%d:%d, len %d, key ", lnum, offs, len);
2161 found = lookup_level0_dirty(c, key, &znode, &n);
2162 if (!found) {
2163 struct ubifs_zbranch zbr;
2164
2165 zbr.znode = NULL;
2166 zbr.lnum = lnum;
2167 zbr.offs = offs;
2168 zbr.len = len;
2169 key_copy(c, key, &zbr.key);
2170 err = tnc_insert(c, znode, &zbr, n + 1);
2171 } else if (found == 1) {
2172 struct ubifs_zbranch *zbr = &znode->zbranch[n];
2173
2174 lnc_free(zbr);
2175 err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2176 zbr->lnum = lnum;
2177 zbr->offs = offs;
2178 zbr->len = len;
2179 } else
2180 err = found;
2181 if (!err)
2182 err = dbg_check_tnc(c, 0);
2183 mutex_unlock(&c->tnc_mutex);
2184
2185 return err;
2186}
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202int ubifs_tnc_replace(struct ubifs_info *c, const union ubifs_key *key,
2203 int old_lnum, int old_offs, int lnum, int offs, int len)
2204{
2205 int found, n, err = 0;
2206 struct ubifs_znode *znode;
2207
2208 mutex_lock(&c->tnc_mutex);
2209 dbg_tnck(key, "old LEB %d:%d, new LEB %d:%d, len %d, key ", old_lnum,
2210 old_offs, lnum, offs, len);
2211 found = lookup_level0_dirty(c, key, &znode, &n);
2212 if (found < 0) {
2213 err = found;
2214 goto out_unlock;
2215 }
2216
2217 if (found == 1) {
2218 struct ubifs_zbranch *zbr = &znode->zbranch[n];
2219
2220 found = 0;
2221 if (zbr->lnum == old_lnum && zbr->offs == old_offs) {
2222 lnc_free(zbr);
2223 err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2224 if (err)
2225 goto out_unlock;
2226 zbr->lnum = lnum;
2227 zbr->offs = offs;
2228 zbr->len = len;
2229 found = 1;
2230 } else if (is_hash_key(c, key)) {
2231 found = resolve_collision_directly(c, key, &znode, &n,
2232 old_lnum, old_offs);
2233 dbg_tnc("rc returned %d, znode %p, n %d, LEB %d:%d",
2234 found, znode, n, old_lnum, old_offs);
2235 if (found < 0) {
2236 err = found;
2237 goto out_unlock;
2238 }
2239
2240 if (found) {
2241
2242 if (znode->cnext || !ubifs_zn_dirty(znode)) {
2243 znode = dirty_cow_bottom_up(c, znode);
2244 if (IS_ERR(znode)) {
2245 err = PTR_ERR(znode);
2246 goto out_unlock;
2247 }
2248 }
2249 zbr = &znode->zbranch[n];
2250 lnc_free(zbr);
2251 err = ubifs_add_dirt(c, zbr->lnum,
2252 zbr->len);
2253 if (err)
2254 goto out_unlock;
2255 zbr->lnum = lnum;
2256 zbr->offs = offs;
2257 zbr->len = len;
2258 }
2259 }
2260 }
2261
2262 if (!found)
2263 err = ubifs_add_dirt(c, lnum, len);
2264
2265 if (!err)
2266 err = dbg_check_tnc(c, 0);
2267
2268out_unlock:
2269 mutex_unlock(&c->tnc_mutex);
2270 return err;
2271}
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key,
2286 int lnum, int offs, int len, const struct qstr *nm)
2287{
2288 int found, n, err = 0;
2289 struct ubifs_znode *znode;
2290
2291 mutex_lock(&c->tnc_mutex);
2292 dbg_tnck(key, "LEB %d:%d, name '%.*s', key ",
2293 lnum, offs, nm->len, nm->name);
2294 found = lookup_level0_dirty(c, key, &znode, &n);
2295 if (found < 0) {
2296 err = found;
2297 goto out_unlock;
2298 }
2299
2300 if (found == 1) {
2301 if (c->replaying)
2302 found = fallible_resolve_collision(c, key, &znode, &n,
2303 nm, 1);
2304 else
2305 found = resolve_collision(c, key, &znode, &n, nm);
2306 dbg_tnc("rc returned %d, znode %p, n %d", found, znode, n);
2307 if (found < 0) {
2308 err = found;
2309 goto out_unlock;
2310 }
2311
2312
2313 if (znode->cnext || !ubifs_zn_dirty(znode)) {
2314 znode = dirty_cow_bottom_up(c, znode);
2315 if (IS_ERR(znode)) {
2316 err = PTR_ERR(znode);
2317 goto out_unlock;
2318 }
2319 }
2320
2321 if (found == 1) {
2322 struct ubifs_zbranch *zbr = &znode->zbranch[n];
2323
2324 lnc_free(zbr);
2325 err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2326 zbr->lnum = lnum;
2327 zbr->offs = offs;
2328 zbr->len = len;
2329 goto out_unlock;
2330 }
2331 }
2332
2333 if (!found) {
2334 struct ubifs_zbranch zbr;
2335
2336 zbr.znode = NULL;
2337 zbr.lnum = lnum;
2338 zbr.offs = offs;
2339 zbr.len = len;
2340 key_copy(c, key, &zbr.key);
2341 err = tnc_insert(c, znode, &zbr, n + 1);
2342 if (err)
2343 goto out_unlock;
2344 if (c->replaying) {
2345
2346
2347
2348
2349
2350
2351 struct qstr noname = { .name = "" };
2352
2353 err = dbg_check_tnc(c, 0);
2354 mutex_unlock(&c->tnc_mutex);
2355 if (err)
2356 return err;
2357 return ubifs_tnc_remove_nm(c, key, &noname);
2358 }
2359 }
2360
2361out_unlock:
2362 if (!err)
2363 err = dbg_check_tnc(c, 0);
2364 mutex_unlock(&c->tnc_mutex);
2365 return err;
2366}
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377static int tnc_delete(struct ubifs_info *c, struct ubifs_znode *znode, int n)
2378{
2379 struct ubifs_zbranch *zbr;
2380 struct ubifs_znode *zp;
2381 int i, err;
2382
2383
2384 ubifs_assert(znode->level == 0);
2385 ubifs_assert(n >= 0 && n < c->fanout);
2386 dbg_tnck(&znode->zbranch[n].key, "deleting key ");
2387
2388 zbr = &znode->zbranch[n];
2389 lnc_free(zbr);
2390
2391 err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2392 if (err) {
2393 ubifs_dump_znode(c, znode);
2394 return err;
2395 }
2396
2397
2398 for (i = n; i < znode->child_cnt - 1; i++)
2399 znode->zbranch[i] = znode->zbranch[i + 1];
2400 znode->child_cnt -= 1;
2401
2402 if (znode->child_cnt > 0)
2403 return 0;
2404
2405
2406
2407
2408
2409
2410 do {
2411 ubifs_assert(!ubifs_zn_obsolete(znode));
2412 ubifs_assert(ubifs_zn_dirty(znode));
2413
2414 zp = znode->parent;
2415 n = znode->iip;
2416
2417 atomic_long_dec(&c->dirty_zn_cnt);
2418
2419 err = insert_old_idx_znode(c, znode);
2420 if (err)
2421 return err;
2422
2423 if (znode->cnext) {
2424 __set_bit(OBSOLETE_ZNODE, &znode->flags);
2425 atomic_long_inc(&c->clean_zn_cnt);
2426 atomic_long_inc(&ubifs_clean_zn_cnt);
2427 } else
2428 kfree(znode);
2429 znode = zp;
2430 } while (znode->child_cnt == 1);
2431
2432
2433 znode->child_cnt -= 1;
2434 ubifs_assert(znode->level != 0);
2435 for (i = n; i < znode->child_cnt; i++) {
2436 znode->zbranch[i] = znode->zbranch[i + 1];
2437 if (znode->zbranch[i].znode)
2438 znode->zbranch[i].znode->iip = i;
2439 }
2440
2441
2442
2443
2444
2445 if (!znode->parent) {
2446 while (znode->child_cnt == 1 && znode->level != 0) {
2447 zp = znode;
2448 zbr = &znode->zbranch[0];
2449 znode = get_znode(c, znode, 0);
2450 if (IS_ERR(znode))
2451 return PTR_ERR(znode);
2452 znode = dirty_cow_znode(c, zbr);
2453 if (IS_ERR(znode))
2454 return PTR_ERR(znode);
2455 znode->parent = NULL;
2456 znode->iip = 0;
2457 if (c->zroot.len) {
2458 err = insert_old_idx(c, c->zroot.lnum,
2459 c->zroot.offs);
2460 if (err)
2461 return err;
2462 }
2463 c->zroot.lnum = zbr->lnum;
2464 c->zroot.offs = zbr->offs;
2465 c->zroot.len = zbr->len;
2466 c->zroot.znode = znode;
2467 ubifs_assert(!ubifs_zn_obsolete(zp));
2468 ubifs_assert(ubifs_zn_dirty(zp));
2469 atomic_long_dec(&c->dirty_zn_cnt);
2470
2471 if (zp->cnext) {
2472 __set_bit(OBSOLETE_ZNODE, &zp->flags);
2473 atomic_long_inc(&c->clean_zn_cnt);
2474 atomic_long_inc(&ubifs_clean_zn_cnt);
2475 } else
2476 kfree(zp);
2477 }
2478 }
2479
2480 return 0;
2481}
2482
2483
2484
2485
2486
2487
2488
2489
2490int ubifs_tnc_remove(struct ubifs_info *c, const union ubifs_key *key)
2491{
2492 int found, n, err = 0;
2493 struct ubifs_znode *znode;
2494
2495 mutex_lock(&c->tnc_mutex);
2496 dbg_tnck(key, "key ");
2497 found = lookup_level0_dirty(c, key, &znode, &n);
2498 if (found < 0) {
2499 err = found;
2500 goto out_unlock;
2501 }
2502 if (found == 1)
2503 err = tnc_delete(c, znode, n);
2504 if (!err)
2505 err = dbg_check_tnc(c, 0);
2506
2507out_unlock:
2508 mutex_unlock(&c->tnc_mutex);
2509 return err;
2510}
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520int ubifs_tnc_remove_nm(struct ubifs_info *c, const union ubifs_key *key,
2521 const struct qstr *nm)
2522{
2523 int n, err;
2524 struct ubifs_znode *znode;
2525
2526 mutex_lock(&c->tnc_mutex);
2527 dbg_tnck(key, "%.*s, key ", nm->len, nm->name);
2528 err = lookup_level0_dirty(c, key, &znode, &n);
2529 if (err < 0)
2530 goto out_unlock;
2531
2532 if (err) {
2533 if (c->replaying)
2534 err = fallible_resolve_collision(c, key, &znode, &n,
2535 nm, 0);
2536 else
2537 err = resolve_collision(c, key, &znode, &n, nm);
2538 dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n);
2539 if (err < 0)
2540 goto out_unlock;
2541 if (err) {
2542
2543 if (znode->cnext || !ubifs_zn_dirty(znode)) {
2544 znode = dirty_cow_bottom_up(c, znode);
2545 if (IS_ERR(znode)) {
2546 err = PTR_ERR(znode);
2547 goto out_unlock;
2548 }
2549 }
2550 err = tnc_delete(c, znode, n);
2551 }
2552 }
2553
2554out_unlock:
2555 if (!err)
2556 err = dbg_check_tnc(c, 0);
2557 mutex_unlock(&c->tnc_mutex);
2558 return err;
2559}
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570static int key_in_range(struct ubifs_info *c, union ubifs_key *key,
2571 union ubifs_key *from_key, union ubifs_key *to_key)
2572{
2573 if (keys_cmp(c, key, from_key) < 0)
2574 return 0;
2575 if (keys_cmp(c, key, to_key) > 0)
2576 return 0;
2577 return 1;
2578}
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590int ubifs_tnc_remove_range(struct ubifs_info *c, union ubifs_key *from_key,
2591 union ubifs_key *to_key)
2592{
2593 int i, n, k, err = 0;
2594 struct ubifs_znode *znode;
2595 union ubifs_key *key;
2596
2597 mutex_lock(&c->tnc_mutex);
2598 while (1) {
2599
2600 err = ubifs_lookup_level0(c, from_key, &znode, &n);
2601 if (err < 0)
2602 goto out_unlock;
2603
2604 if (err)
2605 key = from_key;
2606 else {
2607 err = tnc_next(c, &znode, &n);
2608 if (err == -ENOENT) {
2609 err = 0;
2610 goto out_unlock;
2611 }
2612 if (err < 0)
2613 goto out_unlock;
2614 key = &znode->zbranch[n].key;
2615 if (!key_in_range(c, key, from_key, to_key)) {
2616 err = 0;
2617 goto out_unlock;
2618 }
2619 }
2620
2621
2622 if (znode->cnext || !ubifs_zn_dirty(znode)) {
2623 znode = dirty_cow_bottom_up(c, znode);
2624 if (IS_ERR(znode)) {
2625 err = PTR_ERR(znode);
2626 goto out_unlock;
2627 }
2628 }
2629
2630
2631 for (i = n + 1, k = 0; i < znode->child_cnt; i++, k++) {
2632 key = &znode->zbranch[i].key;
2633 if (!key_in_range(c, key, from_key, to_key))
2634 break;
2635 lnc_free(&znode->zbranch[i]);
2636 err = ubifs_add_dirt(c, znode->zbranch[i].lnum,
2637 znode->zbranch[i].len);
2638 if (err) {
2639 ubifs_dump_znode(c, znode);
2640 goto out_unlock;
2641 }
2642 dbg_tnck(key, "removing key ");
2643 }
2644 if (k) {
2645 for (i = n + 1 + k; i < znode->child_cnt; i++)
2646 znode->zbranch[i - k] = znode->zbranch[i];
2647 znode->child_cnt -= k;
2648 }
2649
2650
2651 err = tnc_delete(c, znode, n);
2652 if (err)
2653 goto out_unlock;
2654 }
2655
2656out_unlock:
2657 if (!err)
2658 err = dbg_check_tnc(c, 0);
2659 mutex_unlock(&c->tnc_mutex);
2660 return err;
2661}
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum)
2673{
2674 union ubifs_key key1, key2;
2675 struct ubifs_dent_node *xent, *pxent = NULL;
2676 struct qstr nm = { .name = NULL };
2677
2678 dbg_tnc("ino %lu", (unsigned long)inum);
2679
2680
2681
2682
2683
2684 lowest_xent_key(c, &key1, inum);
2685 while (1) {
2686 ino_t xattr_inum;
2687 int err;
2688
2689 xent = ubifs_tnc_next_ent(c, &key1, &nm);
2690 if (IS_ERR(xent)) {
2691 err = PTR_ERR(xent);
2692 if (err == -ENOENT)
2693 break;
2694 return err;
2695 }
2696
2697 xattr_inum = le64_to_cpu(xent->inum);
2698 dbg_tnc("xent '%s', ino %lu", xent->name,
2699 (unsigned long)xattr_inum);
2700
2701 nm.name = xent->name;
2702 nm.len = le16_to_cpu(xent->nlen);
2703 err = ubifs_tnc_remove_nm(c, &key1, &nm);
2704 if (err) {
2705 kfree(xent);
2706 return err;
2707 }
2708
2709 lowest_ino_key(c, &key1, xattr_inum);
2710 highest_ino_key(c, &key2, xattr_inum);
2711 err = ubifs_tnc_remove_range(c, &key1, &key2);
2712 if (err) {
2713 kfree(xent);
2714 return err;
2715 }
2716
2717 kfree(pxent);
2718 pxent = xent;
2719 key_read(c, &xent->key, &key1);
2720 }
2721
2722 kfree(pxent);
2723 lowest_ino_key(c, &key1, inum);
2724 highest_ino_key(c, &key2, inum);
2725
2726 return ubifs_tnc_remove_range(c, &key1, &key2);
2727}
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c,
2753 union ubifs_key *key,
2754 const struct qstr *nm)
2755{
2756 int n, err, type = key_type(c, key);
2757 struct ubifs_znode *znode;
2758 struct ubifs_dent_node *dent;
2759 struct ubifs_zbranch *zbr;
2760 union ubifs_key *dkey;
2761
2762 dbg_tnck(key, "%s ", nm->name ? (char *)nm->name : "(lowest)");
2763 ubifs_assert(is_hash_key(c, key));
2764
2765 mutex_lock(&c->tnc_mutex);
2766 err = ubifs_lookup_level0(c, key, &znode, &n);
2767 if (unlikely(err < 0))
2768 goto out_unlock;
2769
2770 if (nm->name) {
2771 if (err) {
2772
2773 err = resolve_collision(c, key, &znode, &n, nm);
2774 dbg_tnc("rc returned %d, znode %p, n %d",
2775 err, znode, n);
2776 if (unlikely(err < 0))
2777 goto out_unlock;
2778 }
2779
2780
2781 err = tnc_next(c, &znode, &n);
2782 if (unlikely(err))
2783 goto out_unlock;
2784 } else {
2785
2786
2787
2788
2789
2790 if (!err) {
2791
2792
2793
2794
2795
2796 err = tnc_next(c, &znode, &n);
2797 if (err)
2798 goto out_unlock;
2799 }
2800 }
2801
2802 zbr = &znode->zbranch[n];
2803 dent = kmalloc(zbr->len, GFP_NOFS);
2804 if (unlikely(!dent)) {
2805 err = -ENOMEM;
2806 goto out_unlock;
2807 }
2808
2809
2810
2811
2812
2813 dkey = &zbr->key;
2814 if (key_inum(c, dkey) != key_inum(c, key) ||
2815 key_type(c, dkey) != type) {
2816 err = -ENOENT;
2817 goto out_free;
2818 }
2819
2820 err = tnc_read_node_nm(c, zbr, dent);
2821 if (unlikely(err))
2822 goto out_free;
2823
2824 mutex_unlock(&c->tnc_mutex);
2825 return dent;
2826
2827out_free:
2828 kfree(dent);
2829out_unlock:
2830 mutex_unlock(&c->tnc_mutex);
2831 return ERR_PTR(err);
2832}
2833
2834
2835
2836
2837
2838
2839
2840static void tnc_destroy_cnext(struct ubifs_info *c)
2841{
2842 struct ubifs_znode *cnext;
2843
2844 if (!c->cnext)
2845 return;
2846 ubifs_assert(c->cmt_state == COMMIT_BROKEN);
2847 cnext = c->cnext;
2848 do {
2849 struct ubifs_znode *znode = cnext;
2850
2851 cnext = cnext->cnext;
2852 if (ubifs_zn_obsolete(znode))
2853 kfree(znode);
2854 } while (cnext && cnext != c->cnext);
2855}
2856
2857
2858
2859
2860
2861void ubifs_tnc_close(struct ubifs_info *c)
2862{
2863 tnc_destroy_cnext(c);
2864 if (c->zroot.znode) {
2865 long n, freed;
2866
2867 n = atomic_long_read(&c->clean_zn_cnt);
2868 freed = ubifs_destroy_tnc_subtree(c->zroot.znode);
2869 ubifs_assert(freed == n);
2870 atomic_long_sub(n, &ubifs_clean_zn_cnt);
2871 }
2872 kfree(c->gap_lebs);
2873 kfree(c->ilebs);
2874 destroy_old_idx(c);
2875}
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885static struct ubifs_znode *left_znode(struct ubifs_info *c,
2886 struct ubifs_znode *znode)
2887{
2888 int level = znode->level;
2889
2890 while (1) {
2891 int n = znode->iip - 1;
2892
2893
2894 znode = znode->parent;
2895 if (!znode)
2896 return NULL;
2897 if (n >= 0) {
2898
2899 znode = get_znode(c, znode, n);
2900 if (IS_ERR(znode))
2901 return znode;
2902 while (znode->level != level) {
2903 n = znode->child_cnt - 1;
2904 znode = get_znode(c, znode, n);
2905 if (IS_ERR(znode))
2906 return znode;
2907 }
2908 break;
2909 }
2910 }
2911 return znode;
2912}
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922static struct ubifs_znode *right_znode(struct ubifs_info *c,
2923 struct ubifs_znode *znode)
2924{
2925 int level = znode->level;
2926
2927 while (1) {
2928 int n = znode->iip + 1;
2929
2930
2931 znode = znode->parent;
2932 if (!znode)
2933 return NULL;
2934 if (n < znode->child_cnt) {
2935
2936 znode = get_znode(c, znode, n);
2937 if (IS_ERR(znode))
2938 return znode;
2939 while (znode->level != level) {
2940 znode = get_znode(c, znode, 0);
2941 if (IS_ERR(znode))
2942 return znode;
2943 }
2944 break;
2945 }
2946 }
2947 return znode;
2948}
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975static struct ubifs_znode *lookup_znode(struct ubifs_info *c,
2976 union ubifs_key *key, int level,
2977 int lnum, int offs)
2978{
2979 struct ubifs_znode *znode, *zn;
2980 int n, nn;
2981
2982 ubifs_assert(key_type(c, key) < UBIFS_INVALID_KEY);
2983
2984
2985
2986
2987
2988 if (level < 0)
2989 return ERR_PTR(-EINVAL);
2990
2991
2992 znode = c->zroot.znode;
2993 if (!znode) {
2994 znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
2995 if (IS_ERR(znode))
2996 return znode;
2997 }
2998
2999 if (c->zroot.lnum == lnum && c->zroot.offs == offs)
3000 return znode;
3001
3002 if (level >= znode->level)
3003 return NULL;
3004 while (1) {
3005 ubifs_search_zbranch(c, znode, key, &n);
3006 if (n < 0) {
3007
3008
3009
3010
3011
3012
3013
3014
3015 znode = left_znode(c, znode);
3016 if (!znode)
3017 return NULL;
3018 if (IS_ERR(znode))
3019 return znode;
3020 ubifs_search_zbranch(c, znode, key, &n);
3021 ubifs_assert(n >= 0);
3022 }
3023 if (znode->level == level + 1)
3024 break;
3025 znode = get_znode(c, znode, n);
3026 if (IS_ERR(znode))
3027 return znode;
3028 }
3029
3030 if (znode->zbranch[n].lnum == lnum && znode->zbranch[n].offs == offs)
3031 return get_znode(c, znode, n);
3032
3033 if (!is_hash_key(c, key))
3034 return NULL;
3035
3036
3037
3038
3039 zn = znode;
3040 nn = n;
3041
3042 while (1) {
3043
3044 if (n)
3045 n -= 1;
3046 else {
3047 znode = left_znode(c, znode);
3048 if (!znode)
3049 break;
3050 if (IS_ERR(znode))
3051 return znode;
3052 n = znode->child_cnt - 1;
3053 }
3054
3055 if (znode->zbranch[n].lnum == lnum &&
3056 znode->zbranch[n].offs == offs)
3057 return get_znode(c, znode, n);
3058
3059 if (keys_cmp(c, &znode->zbranch[n].key, key) < 0)
3060 break;
3061 }
3062
3063 znode = zn;
3064 n = nn;
3065
3066 while (1) {
3067
3068 if (++n >= znode->child_cnt) {
3069 znode = right_znode(c, znode);
3070 if (!znode)
3071 break;
3072 if (IS_ERR(znode))
3073 return znode;
3074 n = 0;
3075 }
3076
3077 if (znode->zbranch[n].lnum == lnum &&
3078 znode->zbranch[n].offs == offs)
3079 return get_znode(c, znode, n);
3080
3081 if (keys_cmp(c, &znode->zbranch[n].key, key) > 0)
3082 break;
3083 }
3084 return NULL;
3085}
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104int is_idx_node_in_tnc(struct ubifs_info *c, union ubifs_key *key, int level,
3105 int lnum, int offs)
3106{
3107 struct ubifs_znode *znode;
3108
3109 znode = lookup_znode(c, key, level, lnum, offs);
3110 if (!znode)
3111 return 0;
3112 if (IS_ERR(znode))
3113 return PTR_ERR(znode);
3114
3115 return ubifs_zn_dirty(znode) ? 1 : 2;
3116}
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131static int is_leaf_node_in_tnc(struct ubifs_info *c, union ubifs_key *key,
3132 int lnum, int offs)
3133{
3134 struct ubifs_zbranch *zbr;
3135 struct ubifs_znode *znode, *zn;
3136 int n, found, err, nn;
3137 const int unique = !is_hash_key(c, key);
3138
3139 found = ubifs_lookup_level0(c, key, &znode, &n);
3140 if (found < 0)
3141 return found;
3142 if (!found)
3143 return 0;
3144 zbr = &znode->zbranch[n];
3145 if (lnum == zbr->lnum && offs == zbr->offs)
3146 return 1;
3147 if (unique)
3148 return 0;
3149
3150
3151
3152
3153 zn = znode;
3154 nn = n;
3155
3156 while (1) {
3157 err = tnc_prev(c, &znode, &n);
3158 if (err == -ENOENT)
3159 break;
3160 if (err)
3161 return err;
3162 if (keys_cmp(c, key, &znode->zbranch[n].key))
3163 break;
3164 zbr = &znode->zbranch[n];
3165 if (lnum == zbr->lnum && offs == zbr->offs)
3166 return 1;
3167 }
3168
3169 znode = zn;
3170 n = nn;
3171 while (1) {
3172 err = tnc_next(c, &znode, &n);
3173 if (err) {
3174 if (err == -ENOENT)
3175 return 0;
3176 return err;
3177 }
3178 if (keys_cmp(c, key, &znode->zbranch[n].key))
3179 break;
3180 zbr = &znode->zbranch[n];
3181 if (lnum == zbr->lnum && offs == zbr->offs)
3182 return 1;
3183 }
3184 return 0;
3185}
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201int ubifs_tnc_has_node(struct ubifs_info *c, union ubifs_key *key, int level,
3202 int lnum, int offs, int is_idx)
3203{
3204 int err;
3205
3206 mutex_lock(&c->tnc_mutex);
3207 if (is_idx) {
3208 err = is_idx_node_in_tnc(c, key, level, lnum, offs);
3209 if (err < 0)
3210 goto out_unlock;
3211 if (err == 1)
3212
3213 err = 0;
3214 else if (err == 2)
3215
3216 err = 1;
3217 else
3218 BUG_ON(err != 0);
3219 } else
3220 err = is_leaf_node_in_tnc(c, key, lnum, offs);
3221
3222out_unlock:
3223 mutex_unlock(&c->tnc_mutex);
3224 return err;
3225}
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241int ubifs_dirty_idx_node(struct ubifs_info *c, union ubifs_key *key, int level,
3242 int lnum, int offs)
3243{
3244 struct ubifs_znode *znode;
3245 int err = 0;
3246
3247 mutex_lock(&c->tnc_mutex);
3248 znode = lookup_znode(c, key, level, lnum, offs);
3249 if (!znode)
3250 goto out_unlock;
3251 if (IS_ERR(znode)) {
3252 err = PTR_ERR(znode);
3253 goto out_unlock;
3254 }
3255 znode = dirty_cow_bottom_up(c, znode);
3256 if (IS_ERR(znode)) {
3257 err = PTR_ERR(znode);
3258 goto out_unlock;
3259 }
3260
3261out_unlock:
3262 mutex_unlock(&c->tnc_mutex);
3263 return err;
3264}
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode,
3278 loff_t size)
3279{
3280 int err, n;
3281 union ubifs_key from_key, to_key, *key;
3282 struct ubifs_znode *znode;
3283 unsigned int block;
3284
3285 if (!S_ISREG(inode->i_mode))
3286 return 0;
3287 if (!dbg_is_chk_gen(c))
3288 return 0;
3289
3290 block = (size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
3291 data_key_init(c, &from_key, inode->i_ino, block);
3292 highest_data_key(c, &to_key, inode->i_ino);
3293
3294 mutex_lock(&c->tnc_mutex);
3295 err = ubifs_lookup_level0(c, &from_key, &znode, &n);
3296 if (err < 0)
3297 goto out_unlock;
3298
3299 if (err) {
3300 key = &from_key;
3301 goto out_dump;
3302 }
3303
3304 err = tnc_next(c, &znode, &n);
3305 if (err == -ENOENT) {
3306 err = 0;
3307 goto out_unlock;
3308 }
3309 if (err < 0)
3310 goto out_unlock;
3311
3312 ubifs_assert(err == 0);
3313 key = &znode->zbranch[n].key;
3314 if (!key_in_range(c, key, &from_key, &to_key))
3315 goto out_unlock;
3316
3317out_dump:
3318 block = key_block(c, key);
3319 ubifs_err(c, "inode %lu has size %lld, but there are data at offset %lld",
3320 (unsigned long)inode->i_ino, size,
3321 ((loff_t)block) << UBIFS_BLOCK_SHIFT);
3322 mutex_unlock(&c->tnc_mutex);
3323 ubifs_dump_inode(c, inode);
3324 dump_stack();
3325 return -EINVAL;
3326
3327out_unlock:
3328 mutex_unlock(&c->tnc_mutex);
3329 return err;
3330}
3331