1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#ifndef __UBOOT__
22#include <log.h>
23#include <dm/devres.h>
24#include <linux/crc32.h>
25#include <linux/slab.h>
26#include <u-boot/crc.h>
27#else
28#include <linux/bitops.h>
29#include <linux/bug.h>
30#include <linux/compat.h>
31#include <linux/err.h>
32#include <linux/stat.h>
33#endif
34#include "ubifs.h"
35
36
37
38
39
40
41
42
43
44
45
46enum {
47 NAME_LESS = 0,
48 NAME_MATCHES = 1,
49 NAME_GREATER = 2,
50 NOT_ON_MEDIA = 3,
51};
52
53static int try_read_node(const struct ubifs_info *c, void *buf, int type,
54 int len, int lnum, int offs);
55static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
56 struct ubifs_zbranch *zbr, void *node);
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81static int insert_old_idx(struct ubifs_info *c, int lnum, int offs)
82{
83 struct ubifs_old_idx *old_idx, *o;
84 struct rb_node **p, *parent = NULL;
85
86 old_idx = kmalloc(sizeof(struct ubifs_old_idx), GFP_NOFS);
87 if (unlikely(!old_idx))
88 return -ENOMEM;
89 old_idx->lnum = lnum;
90 old_idx->offs = offs;
91
92 p = &c->old_idx.rb_node;
93 while (*p) {
94 parent = *p;
95 o = rb_entry(parent, struct ubifs_old_idx, rb);
96 if (lnum < o->lnum)
97 p = &(*p)->rb_left;
98 else if (lnum > o->lnum)
99 p = &(*p)->rb_right;
100 else if (offs < o->offs)
101 p = &(*p)->rb_left;
102 else if (offs > o->offs)
103 p = &(*p)->rb_right;
104 else {
105 ubifs_err(c, "old idx added twice!");
106 kfree(old_idx);
107 return 0;
108 }
109 }
110 rb_link_node(&old_idx->rb, parent, p);
111 rb_insert_color(&old_idx->rb, &c->old_idx);
112 return 0;
113}
114
115
116
117
118
119
120
121
122int insert_old_idx_znode(struct ubifs_info *c, struct ubifs_znode *znode)
123{
124 if (znode->parent) {
125 struct ubifs_zbranch *zbr;
126
127 zbr = &znode->parent->zbranch[znode->iip];
128 if (zbr->len)
129 return insert_old_idx(c, zbr->lnum, zbr->offs);
130 } else
131 if (c->zroot.len)
132 return insert_old_idx(c, c->zroot.lnum,
133 c->zroot.offs);
134 return 0;
135}
136
137
138
139
140
141
142
143
144static int ins_clr_old_idx_znode(struct ubifs_info *c,
145 struct ubifs_znode *znode)
146{
147 int err;
148
149 if (znode->parent) {
150 struct ubifs_zbranch *zbr;
151
152 zbr = &znode->parent->zbranch[znode->iip];
153 if (zbr->len) {
154 err = insert_old_idx(c, zbr->lnum, zbr->offs);
155 if (err)
156 return err;
157 zbr->lnum = 0;
158 zbr->offs = 0;
159 zbr->len = 0;
160 }
161 } else
162 if (c->zroot.len) {
163 err = insert_old_idx(c, c->zroot.lnum, c->zroot.offs);
164 if (err)
165 return err;
166 c->zroot.lnum = 0;
167 c->zroot.offs = 0;
168 c->zroot.len = 0;
169 }
170 return 0;
171}
172
173
174
175
176
177
178
179
180
181
182
183void destroy_old_idx(struct ubifs_info *c)
184{
185 struct ubifs_old_idx *old_idx, *n;
186
187 rbtree_postorder_for_each_entry_safe(old_idx, n, &c->old_idx, rb)
188 kfree(old_idx);
189
190 c->old_idx = RB_ROOT;
191}
192
193
194
195
196
197
198
199
200static struct ubifs_znode *copy_znode(struct ubifs_info *c,
201 struct ubifs_znode *znode)
202{
203 struct ubifs_znode *zn;
204
205 zn = kmalloc(c->max_znode_sz, GFP_NOFS);
206 if (unlikely(!zn))
207 return ERR_PTR(-ENOMEM);
208
209 memcpy(zn, znode, c->max_znode_sz);
210 zn->cnext = NULL;
211 __set_bit(DIRTY_ZNODE, &zn->flags);
212 __clear_bit(COW_ZNODE, &zn->flags);
213
214 ubifs_assert(!ubifs_zn_obsolete(znode));
215 __set_bit(OBSOLETE_ZNODE, &znode->flags);
216
217 if (znode->level != 0) {
218 int i;
219 const int n = zn->child_cnt;
220
221
222 for (i = 0; i < n; i++) {
223 struct ubifs_zbranch *zbr = &zn->zbranch[i];
224
225 if (zbr->znode)
226 zbr->znode->parent = zn;
227 }
228 }
229
230 atomic_long_inc(&c->dirty_zn_cnt);
231 return zn;
232}
233
234
235
236
237
238
239
240
241
242static int add_idx_dirt(struct ubifs_info *c, int lnum, int dirt)
243{
244 c->calc_idx_sz -= ALIGN(dirt, 8);
245 return ubifs_add_dirt(c, lnum, dirt);
246}
247
248
249
250
251
252
253
254
255static struct ubifs_znode *dirty_cow_znode(struct ubifs_info *c,
256 struct ubifs_zbranch *zbr)
257{
258 struct ubifs_znode *znode = zbr->znode;
259 struct ubifs_znode *zn;
260 int err;
261
262 if (!ubifs_zn_cow(znode)) {
263
264 if (!test_and_set_bit(DIRTY_ZNODE, &znode->flags)) {
265 atomic_long_inc(&c->dirty_zn_cnt);
266 atomic_long_dec(&c->clean_zn_cnt);
267 atomic_long_dec(&ubifs_clean_zn_cnt);
268 err = add_idx_dirt(c, zbr->lnum, zbr->len);
269 if (unlikely(err))
270 return ERR_PTR(err);
271 }
272 return znode;
273 }
274
275 zn = copy_znode(c, znode);
276 if (IS_ERR(zn))
277 return zn;
278
279 if (zbr->len) {
280 err = insert_old_idx(c, zbr->lnum, zbr->offs);
281 if (unlikely(err))
282 return ERR_PTR(err);
283 err = add_idx_dirt(c, zbr->lnum, zbr->len);
284 } else
285 err = 0;
286
287 zbr->znode = zn;
288 zbr->lnum = 0;
289 zbr->offs = 0;
290 zbr->len = 0;
291
292 if (unlikely(err))
293 return ERR_PTR(err);
294 return zn;
295}
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317static int lnc_add(struct ubifs_info *c, struct ubifs_zbranch *zbr,
318 const void *node)
319{
320 int err;
321 void *lnc_node;
322 const struct ubifs_dent_node *dent = node;
323
324 ubifs_assert(!zbr->leaf);
325 ubifs_assert(zbr->len != 0);
326 ubifs_assert(is_hash_key(c, &zbr->key));
327
328 err = ubifs_validate_entry(c, dent);
329 if (err) {
330 dump_stack();
331 ubifs_dump_node(c, dent);
332 return err;
333 }
334
335 lnc_node = kmemdup(node, zbr->len, GFP_NOFS);
336 if (!lnc_node)
337
338 return 0;
339
340 zbr->leaf = lnc_node;
341 return 0;
342}
343
344
345
346
347
348
349
350
351
352
353static int lnc_add_directly(struct ubifs_info *c, struct ubifs_zbranch *zbr,
354 void *node)
355{
356 int err;
357
358 ubifs_assert(!zbr->leaf);
359 ubifs_assert(zbr->len != 0);
360
361 err = ubifs_validate_entry(c, node);
362 if (err) {
363 dump_stack();
364 ubifs_dump_node(c, node);
365 return err;
366 }
367
368 zbr->leaf = node;
369 return 0;
370}
371
372
373
374
375
376
377static void lnc_free(struct ubifs_zbranch *zbr)
378{
379 if (!zbr->leaf)
380 return;
381 kfree(zbr->leaf);
382 zbr->leaf = NULL;
383}
384
385
386
387
388
389
390
391
392
393
394
395
396static int tnc_read_node_nm(struct ubifs_info *c, struct ubifs_zbranch *zbr,
397 void *node)
398{
399 int err;
400
401 ubifs_assert(is_hash_key(c, &zbr->key));
402
403 if (zbr->leaf) {
404
405 ubifs_assert(zbr->len != 0);
406 memcpy(node, zbr->leaf, zbr->len);
407 return 0;
408 }
409
410 if (c->replaying) {
411 err = fallible_read_node(c, &zbr->key, zbr, node);
412
413
414
415
416 if (err == 0)
417 err = -ENOENT;
418 else if (err == 1)
419 err = 0;
420 } else {
421 err = ubifs_tnc_read_node(c, zbr, node);
422 }
423 if (err)
424 return err;
425
426
427 err = lnc_add(c, zbr, node);
428 return err;
429}
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455static int try_read_node(const struct ubifs_info *c, void *buf, int type,
456 int len, int lnum, int offs)
457{
458 int err, node_len;
459 struct ubifs_ch *ch = buf;
460 uint32_t crc, node_crc;
461
462 dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len);
463
464 err = ubifs_leb_read(c, lnum, buf, offs, len, 1);
465 if (err) {
466 ubifs_err(c, "cannot read node type %d from LEB %d:%d, error %d",
467 type, lnum, offs, err);
468 return err;
469 }
470
471 if (le32_to_cpu(ch->magic) != UBIFS_NODE_MAGIC)
472 return 0;
473
474 if (ch->node_type != type)
475 return 0;
476
477 node_len = le32_to_cpu(ch->len);
478 if (node_len != len)
479 return 0;
480
481 if (type == UBIFS_DATA_NODE && c->no_chk_data_crc && !c->mounting &&
482 !c->remounting_rw)
483 return 1;
484
485 crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8);
486 node_crc = le32_to_cpu(ch->crc);
487 if (crc != node_crc)
488 return 0;
489
490 return 1;
491}
492
493
494
495
496
497
498
499
500
501
502
503static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
504 struct ubifs_zbranch *zbr, void *node)
505{
506 int ret;
507
508 dbg_tnck(key, "LEB %d:%d, key ", zbr->lnum, zbr->offs);
509
510 ret = try_read_node(c, node, key_type(c, key), zbr->len, zbr->lnum,
511 zbr->offs);
512 if (ret == 1) {
513 union ubifs_key node_key;
514 struct ubifs_dent_node *dent = node;
515
516
517 key_read(c, &dent->key, &node_key);
518 if (keys_cmp(c, key, &node_key) != 0)
519 ret = 0;
520 }
521 if (ret == 0 && c->replaying)
522 dbg_mntk(key, "dangling branch LEB %d:%d len %d, key ",
523 zbr->lnum, zbr->offs, zbr->len);
524 return ret;
525}
526
527
528
529
530
531
532
533
534
535
536
537
538static int matches_name(struct ubifs_info *c, struct ubifs_zbranch *zbr,
539 const struct qstr *nm)
540{
541 struct ubifs_dent_node *dent;
542 int nlen, err;
543
544
545 if (!zbr->leaf) {
546 dent = kmalloc(zbr->len, GFP_NOFS);
547 if (!dent)
548 return -ENOMEM;
549
550 err = ubifs_tnc_read_node(c, zbr, dent);
551 if (err)
552 goto out_free;
553
554
555 err = lnc_add_directly(c, zbr, dent);
556 if (err)
557 goto out_free;
558 } else
559 dent = zbr->leaf;
560
561 nlen = le16_to_cpu(dent->nlen);
562 err = memcmp(dent->name, nm->name, min_t(int, nlen, nm->len));
563 if (err == 0) {
564 if (nlen == nm->len)
565 return NAME_MATCHES;
566 else if (nlen < nm->len)
567 return NAME_LESS;
568 else
569 return NAME_GREATER;
570 } else if (err < 0)
571 return NAME_LESS;
572 else
573 return NAME_GREATER;
574
575out_free:
576 kfree(dent);
577 return err;
578}
579
580
581
582
583
584
585
586
587
588static struct ubifs_znode *get_znode(struct ubifs_info *c,
589 struct ubifs_znode *znode, int n)
590{
591 struct ubifs_zbranch *zbr;
592
593 zbr = &znode->zbranch[n];
594 if (zbr->znode)
595 znode = zbr->znode;
596 else
597 znode = ubifs_load_znode(c, zbr, znode, n);
598 return znode;
599}
600
601
602
603
604
605
606
607
608
609
610static int tnc_next(struct ubifs_info *c, struct ubifs_znode **zn, int *n)
611{
612 struct ubifs_znode *znode = *zn;
613 int nn = *n;
614
615 nn += 1;
616 if (nn < znode->child_cnt) {
617 *n = nn;
618 return 0;
619 }
620 while (1) {
621 struct ubifs_znode *zp;
622
623 zp = znode->parent;
624 if (!zp)
625 return -ENOENT;
626 nn = znode->iip + 1;
627 znode = zp;
628 if (nn < znode->child_cnt) {
629 znode = get_znode(c, znode, nn);
630 if (IS_ERR(znode))
631 return PTR_ERR(znode);
632 while (znode->level != 0) {
633 znode = get_znode(c, znode, 0);
634 if (IS_ERR(znode))
635 return PTR_ERR(znode);
636 }
637 nn = 0;
638 break;
639 }
640 }
641 *zn = znode;
642 *n = nn;
643 return 0;
644}
645
646
647
648
649
650
651
652
653
654
655static int tnc_prev(struct ubifs_info *c, struct ubifs_znode **zn, int *n)
656{
657 struct ubifs_znode *znode = *zn;
658 int nn = *n;
659
660 if (nn > 0) {
661 *n = nn - 1;
662 return 0;
663 }
664 while (1) {
665 struct ubifs_znode *zp;
666
667 zp = znode->parent;
668 if (!zp)
669 return -ENOENT;
670 nn = znode->iip - 1;
671 znode = zp;
672 if (nn >= 0) {
673 znode = get_znode(c, znode, nn);
674 if (IS_ERR(znode))
675 return PTR_ERR(znode);
676 while (znode->level != 0) {
677 nn = znode->child_cnt - 1;
678 znode = get_znode(c, znode, nn);
679 if (IS_ERR(znode))
680 return PTR_ERR(znode);
681 }
682 nn = znode->child_cnt - 1;
683 break;
684 }
685 }
686 *zn = znode;
687 *n = nn;
688 return 0;
689}
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707static int resolve_collision(struct ubifs_info *c, const union ubifs_key *key,
708 struct ubifs_znode **zn, int *n,
709 const struct qstr *nm)
710{
711 int err;
712
713 err = matches_name(c, &(*zn)->zbranch[*n], nm);
714 if (unlikely(err < 0))
715 return err;
716 if (err == NAME_MATCHES)
717 return 1;
718
719 if (err == NAME_GREATER) {
720
721 while (1) {
722 err = tnc_prev(c, zn, n);
723 if (err == -ENOENT) {
724 ubifs_assert(*n == 0);
725 *n = -1;
726 return 0;
727 }
728 if (err < 0)
729 return err;
730 if (keys_cmp(c, &(*zn)->zbranch[*n].key, key)) {
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760 if (*n == (*zn)->child_cnt - 1) {
761 err = tnc_next(c, zn, n);
762 if (err) {
763
764 ubifs_assert(0);
765 if (err == -ENOENT)
766 err = -EINVAL;
767 return err;
768 }
769 ubifs_assert(*n == 0);
770 *n = -1;
771 }
772 return 0;
773 }
774 err = matches_name(c, &(*zn)->zbranch[*n], nm);
775 if (err < 0)
776 return err;
777 if (err == NAME_LESS)
778 return 0;
779 if (err == NAME_MATCHES)
780 return 1;
781 ubifs_assert(err == NAME_GREATER);
782 }
783 } else {
784 int nn = *n;
785 struct ubifs_znode *znode = *zn;
786
787
788 while (1) {
789 err = tnc_next(c, &znode, &nn);
790 if (err == -ENOENT)
791 return 0;
792 if (err < 0)
793 return err;
794 if (keys_cmp(c, &znode->zbranch[nn].key, key))
795 return 0;
796 err = matches_name(c, &znode->zbranch[nn], nm);
797 if (err < 0)
798 return err;
799 if (err == NAME_GREATER)
800 return 0;
801 *zn = znode;
802 *n = nn;
803 if (err == NAME_MATCHES)
804 return 1;
805 ubifs_assert(err == NAME_LESS);
806 }
807 }
808}
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825static int fallible_matches_name(struct ubifs_info *c,
826 struct ubifs_zbranch *zbr,
827 const struct qstr *nm)
828{
829 struct ubifs_dent_node *dent;
830 int nlen, err;
831
832
833 if (!zbr->leaf) {
834 dent = kmalloc(zbr->len, GFP_NOFS);
835 if (!dent)
836 return -ENOMEM;
837
838 err = fallible_read_node(c, &zbr->key, zbr, dent);
839 if (err < 0)
840 goto out_free;
841 if (err == 0) {
842
843 err = NOT_ON_MEDIA;
844 goto out_free;
845 }
846 ubifs_assert(err == 1);
847
848 err = lnc_add_directly(c, zbr, dent);
849 if (err)
850 goto out_free;
851 } else
852 dent = zbr->leaf;
853
854 nlen = le16_to_cpu(dent->nlen);
855 err = memcmp(dent->name, nm->name, min_t(int, nlen, nm->len));
856 if (err == 0) {
857 if (nlen == nm->len)
858 return NAME_MATCHES;
859 else if (nlen < nm->len)
860 return NAME_LESS;
861 else
862 return NAME_GREATER;
863 } else if (err < 0)
864 return NAME_LESS;
865 else
866 return NAME_GREATER;
867
868out_free:
869 kfree(dent);
870 return err;
871}
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895static int fallible_resolve_collision(struct ubifs_info *c,
896 const union ubifs_key *key,
897 struct ubifs_znode **zn, int *n,
898 const struct qstr *nm, int adding)
899{
900 struct ubifs_znode *o_znode = NULL, *znode = *zn;
901 int uninitialized_var(o_n), err, cmp, unsure = 0, nn = *n;
902
903 cmp = fallible_matches_name(c, &znode->zbranch[nn], nm);
904 if (unlikely(cmp < 0))
905 return cmp;
906 if (cmp == NAME_MATCHES)
907 return 1;
908 if (cmp == NOT_ON_MEDIA) {
909 o_znode = znode;
910 o_n = nn;
911
912
913
914
915
916 unsure = 1;
917 } else if (!adding)
918 unsure = 1;
919
920 if (cmp == NAME_GREATER || unsure) {
921
922 while (1) {
923 err = tnc_prev(c, zn, n);
924 if (err == -ENOENT) {
925 ubifs_assert(*n == 0);
926 *n = -1;
927 break;
928 }
929 if (err < 0)
930 return err;
931 if (keys_cmp(c, &(*zn)->zbranch[*n].key, key)) {
932
933 if (*n == (*zn)->child_cnt - 1) {
934 err = tnc_next(c, zn, n);
935 if (err) {
936
937 ubifs_assert(0);
938 if (err == -ENOENT)
939 err = -EINVAL;
940 return err;
941 }
942 ubifs_assert(*n == 0);
943 *n = -1;
944 }
945 break;
946 }
947 err = fallible_matches_name(c, &(*zn)->zbranch[*n], nm);
948 if (err < 0)
949 return err;
950 if (err == NAME_MATCHES)
951 return 1;
952 if (err == NOT_ON_MEDIA) {
953 o_znode = *zn;
954 o_n = *n;
955 continue;
956 }
957 if (!adding)
958 continue;
959 if (err == NAME_LESS)
960 break;
961 else
962 unsure = 0;
963 }
964 }
965
966 if (cmp == NAME_LESS || unsure) {
967
968 *zn = znode;
969 *n = nn;
970 while (1) {
971 err = tnc_next(c, &znode, &nn);
972 if (err == -ENOENT)
973 break;
974 if (err < 0)
975 return err;
976 if (keys_cmp(c, &znode->zbranch[nn].key, key))
977 break;
978 err = fallible_matches_name(c, &znode->zbranch[nn], nm);
979 if (err < 0)
980 return err;
981 if (err == NAME_GREATER)
982 break;
983 *zn = znode;
984 *n = nn;
985 if (err == NAME_MATCHES)
986 return 1;
987 if (err == NOT_ON_MEDIA) {
988 o_znode = znode;
989 o_n = nn;
990 }
991 }
992 }
993
994
995 if (adding || !o_znode)
996 return 0;
997
998 dbg_mntk(key, "dangling match LEB %d:%d len %d key ",
999 o_znode->zbranch[o_n].lnum, o_znode->zbranch[o_n].offs,
1000 o_znode->zbranch[o_n].len);
1001 *zn = o_znode;
1002 *n = o_n;
1003 return 1;
1004}
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014static int matches_position(struct ubifs_zbranch *zbr, int lnum, int offs)
1015{
1016 if (zbr->lnum == lnum && zbr->offs == offs)
1017 return 1;
1018 else
1019 return 0;
1020}
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039static int resolve_collision_directly(struct ubifs_info *c,
1040 const union ubifs_key *key,
1041 struct ubifs_znode **zn, int *n,
1042 int lnum, int offs)
1043{
1044 struct ubifs_znode *znode;
1045 int nn, err;
1046
1047 znode = *zn;
1048 nn = *n;
1049 if (matches_position(&znode->zbranch[nn], lnum, offs))
1050 return 1;
1051
1052
1053 while (1) {
1054 err = tnc_prev(c, &znode, &nn);
1055 if (err == -ENOENT)
1056 break;
1057 if (err < 0)
1058 return err;
1059 if (keys_cmp(c, &znode->zbranch[nn].key, key))
1060 break;
1061 if (matches_position(&znode->zbranch[nn], lnum, offs)) {
1062 *zn = znode;
1063 *n = nn;
1064 return 1;
1065 }
1066 }
1067
1068
1069 znode = *zn;
1070 nn = *n;
1071 while (1) {
1072 err = tnc_next(c, &znode, &nn);
1073 if (err == -ENOENT)
1074 return 0;
1075 if (err < 0)
1076 return err;
1077 if (keys_cmp(c, &znode->zbranch[nn].key, key))
1078 return 0;
1079 *zn = znode;
1080 *n = nn;
1081 if (matches_position(&znode->zbranch[nn], lnum, offs))
1082 return 1;
1083 }
1084}
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096static struct ubifs_znode *dirty_cow_bottom_up(struct ubifs_info *c,
1097 struct ubifs_znode *znode)
1098{
1099 struct ubifs_znode *zp;
1100 int *path = c->bottom_up_buf, p = 0;
1101
1102 ubifs_assert(c->zroot.znode);
1103 ubifs_assert(znode);
1104 if (c->zroot.znode->level > BOTTOM_UP_HEIGHT) {
1105 kfree(c->bottom_up_buf);
1106 c->bottom_up_buf = kmalloc(c->zroot.znode->level * sizeof(int),
1107 GFP_NOFS);
1108 if (!c->bottom_up_buf)
1109 return ERR_PTR(-ENOMEM);
1110 path = c->bottom_up_buf;
1111 }
1112 if (c->zroot.znode->level) {
1113
1114 while (1) {
1115 int n;
1116
1117 zp = znode->parent;
1118 if (!zp)
1119 break;
1120 n = znode->iip;
1121 ubifs_assert(p < c->zroot.znode->level);
1122 path[p++] = n;
1123 if (!zp->cnext && ubifs_zn_dirty(znode))
1124 break;
1125 znode = zp;
1126 }
1127 }
1128
1129
1130 while (1) {
1131 struct ubifs_zbranch *zbr;
1132
1133 zp = znode->parent;
1134 if (zp) {
1135 ubifs_assert(path[p - 1] >= 0);
1136 ubifs_assert(path[p - 1] < zp->child_cnt);
1137 zbr = &zp->zbranch[path[--p]];
1138 znode = dirty_cow_znode(c, zbr);
1139 } else {
1140 ubifs_assert(znode == c->zroot.znode);
1141 znode = dirty_cow_znode(c, &c->zroot);
1142 }
1143 if (IS_ERR(znode) || !p)
1144 break;
1145 ubifs_assert(path[p - 1] >= 0);
1146 ubifs_assert(path[p - 1] < znode->child_cnt);
1147 znode = znode->zbranch[path[p - 1]].znode;
1148 }
1149
1150 return znode;
1151}
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175int ubifs_lookup_level0(struct ubifs_info *c, const union ubifs_key *key,
1176 struct ubifs_znode **zn, int *n)
1177{
1178 int err, exact;
1179 struct ubifs_znode *znode;
1180 unsigned long time = get_seconds();
1181
1182 dbg_tnck(key, "search key ");
1183 ubifs_assert(key_type(c, key) < UBIFS_INVALID_KEY);
1184
1185 znode = c->zroot.znode;
1186 if (unlikely(!znode)) {
1187 znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
1188 if (IS_ERR(znode))
1189 return PTR_ERR(znode);
1190 }
1191
1192 znode->time = time;
1193
1194 while (1) {
1195 struct ubifs_zbranch *zbr;
1196
1197 exact = ubifs_search_zbranch(c, znode, key, n);
1198
1199 if (znode->level == 0)
1200 break;
1201
1202 if (*n < 0)
1203 *n = 0;
1204 zbr = &znode->zbranch[*n];
1205
1206 if (zbr->znode) {
1207 znode->time = time;
1208 znode = zbr->znode;
1209 continue;
1210 }
1211
1212
1213 znode = ubifs_load_znode(c, zbr, znode, *n);
1214 if (IS_ERR(znode))
1215 return PTR_ERR(znode);
1216 }
1217
1218 *zn = znode;
1219 if (exact || !is_hash_key(c, key) || *n != -1) {
1220 dbg_tnc("found %d, lvl %d, n %d", exact, znode->level, *n);
1221 return exact;
1222 }
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267 err = tnc_prev(c, &znode, n);
1268 if (err == -ENOENT) {
1269 dbg_tnc("found 0, lvl %d, n -1", znode->level);
1270 *n = -1;
1271 return 0;
1272 }
1273 if (unlikely(err < 0))
1274 return err;
1275 if (keys_cmp(c, key, &znode->zbranch[*n].key)) {
1276 dbg_tnc("found 0, lvl %d, n -1", znode->level);
1277 *n = -1;
1278 return 0;
1279 }
1280
1281 dbg_tnc("found 1, lvl %d, n %d", znode->level, *n);
1282 *zn = znode;
1283 return 1;
1284}
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311static int lookup_level0_dirty(struct ubifs_info *c, const union ubifs_key *key,
1312 struct ubifs_znode **zn, int *n)
1313{
1314 int err, exact;
1315 struct ubifs_znode *znode;
1316 unsigned long time = get_seconds();
1317
1318 dbg_tnck(key, "search and dirty key ");
1319
1320 znode = c->zroot.znode;
1321 if (unlikely(!znode)) {
1322 znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
1323 if (IS_ERR(znode))
1324 return PTR_ERR(znode);
1325 }
1326
1327 znode = dirty_cow_znode(c, &c->zroot);
1328 if (IS_ERR(znode))
1329 return PTR_ERR(znode);
1330
1331 znode->time = time;
1332
1333 while (1) {
1334 struct ubifs_zbranch *zbr;
1335
1336 exact = ubifs_search_zbranch(c, znode, key, n);
1337
1338 if (znode->level == 0)
1339 break;
1340
1341 if (*n < 0)
1342 *n = 0;
1343 zbr = &znode->zbranch[*n];
1344
1345 if (zbr->znode) {
1346 znode->time = time;
1347 znode = dirty_cow_znode(c, zbr);
1348 if (IS_ERR(znode))
1349 return PTR_ERR(znode);
1350 continue;
1351 }
1352
1353
1354 znode = ubifs_load_znode(c, zbr, znode, *n);
1355 if (IS_ERR(znode))
1356 return PTR_ERR(znode);
1357 znode = dirty_cow_znode(c, zbr);
1358 if (IS_ERR(znode))
1359 return PTR_ERR(znode);
1360 }
1361
1362 *zn = znode;
1363 if (exact || !is_hash_key(c, key) || *n != -1) {
1364 dbg_tnc("found %d, lvl %d, n %d", exact, znode->level, *n);
1365 return exact;
1366 }
1367
1368
1369
1370
1371
1372 err = tnc_prev(c, &znode, n);
1373 if (err == -ENOENT) {
1374 *n = -1;
1375 dbg_tnc("found 0, lvl %d, n -1", znode->level);
1376 return 0;
1377 }
1378 if (unlikely(err < 0))
1379 return err;
1380 if (keys_cmp(c, key, &znode->zbranch[*n].key)) {
1381 *n = -1;
1382 dbg_tnc("found 0, lvl %d, n -1", znode->level);
1383 return 0;
1384 }
1385
1386 if (znode->cnext || !ubifs_zn_dirty(znode)) {
1387 znode = dirty_cow_bottom_up(c, znode);
1388 if (IS_ERR(znode))
1389 return PTR_ERR(znode);
1390 }
1391
1392 dbg_tnc("found 1, lvl %d, n %d", znode->level, *n);
1393 *zn = znode;
1394 return 1;
1395}
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407static int maybe_leb_gced(struct ubifs_info *c, int lnum, int gc_seq1)
1408{
1409#ifndef __UBOOT__
1410 int gc_seq2, gced_lnum;
1411
1412 gced_lnum = c->gced_lnum;
1413 smp_rmb();
1414 gc_seq2 = c->gc_seq;
1415
1416 if (gc_seq1 == gc_seq2)
1417 return 0;
1418
1419 if (gc_seq1 + 1 != gc_seq2)
1420 return 1;
1421
1422
1423
1424
1425 smp_rmb();
1426 if (gced_lnum != c->gced_lnum)
1427 return 1;
1428
1429 if (gced_lnum == lnum)
1430 return 1;
1431#else
1432
1433#endif
1434 return 0;
1435}
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450int ubifs_tnc_locate(struct ubifs_info *c, const union ubifs_key *key,
1451 void *node, int *lnum, int *offs)
1452{
1453 int found, n, err, safely = 0, gc_seq1;
1454 struct ubifs_znode *znode;
1455 struct ubifs_zbranch zbr, *zt;
1456
1457again:
1458 mutex_lock(&c->tnc_mutex);
1459 found = ubifs_lookup_level0(c, key, &znode, &n);
1460 if (!found) {
1461 err = -ENOENT;
1462 goto out;
1463 } else if (found < 0) {
1464 err = found;
1465 goto out;
1466 }
1467 zt = &znode->zbranch[n];
1468 if (lnum) {
1469 *lnum = zt->lnum;
1470 *offs = zt->offs;
1471 }
1472 if (is_hash_key(c, key)) {
1473
1474
1475
1476
1477 err = tnc_read_node_nm(c, zt, node);
1478 goto out;
1479 }
1480 if (safely) {
1481 err = ubifs_tnc_read_node(c, zt, node);
1482 goto out;
1483 }
1484
1485 zbr = znode->zbranch[n];
1486 gc_seq1 = c->gc_seq;
1487 mutex_unlock(&c->tnc_mutex);
1488
1489 if (ubifs_get_wbuf(c, zbr.lnum)) {
1490
1491 err = ubifs_tnc_read_node(c, &zbr, node);
1492 return err;
1493 }
1494
1495 err = fallible_read_node(c, key, &zbr, node);
1496 if (err <= 0 || maybe_leb_gced(c, zbr.lnum, gc_seq1)) {
1497
1498
1499
1500
1501 safely = 1;
1502 goto again;
1503 }
1504 return 0;
1505
1506out:
1507 mutex_unlock(&c->tnc_mutex);
1508 return err;
1509}
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu)
1525{
1526 int n, err = 0, lnum = -1, uninitialized_var(offs);
1527 int uninitialized_var(len);
1528 unsigned int block = key_block(c, &bu->key);
1529 struct ubifs_znode *znode;
1530
1531 bu->cnt = 0;
1532 bu->blk_cnt = 0;
1533 bu->eof = 0;
1534
1535 mutex_lock(&c->tnc_mutex);
1536
1537 err = ubifs_lookup_level0(c, &bu->key, &znode, &n);
1538 if (err < 0)
1539 goto out;
1540 if (err) {
1541
1542 len = znode->zbranch[n].len;
1543
1544 if (len > bu->buf_len) {
1545 err = -EINVAL;
1546 goto out;
1547 }
1548
1549 bu->zbranch[bu->cnt++] = znode->zbranch[n];
1550 bu->blk_cnt += 1;
1551 lnum = znode->zbranch[n].lnum;
1552 offs = ALIGN(znode->zbranch[n].offs + len, 8);
1553 }
1554 while (1) {
1555 struct ubifs_zbranch *zbr;
1556 union ubifs_key *key;
1557 unsigned int next_block;
1558
1559
1560 err = tnc_next(c, &znode, &n);
1561 if (err)
1562 goto out;
1563 zbr = &znode->zbranch[n];
1564 key = &zbr->key;
1565
1566 if (key_inum(c, key) != key_inum(c, &bu->key) ||
1567 key_type(c, key) != UBIFS_DATA_KEY) {
1568 err = -ENOENT;
1569 goto out;
1570 }
1571 if (lnum < 0) {
1572
1573 lnum = zbr->lnum;
1574 offs = ALIGN(zbr->offs + zbr->len, 8);
1575 len = zbr->len;
1576 if (len > bu->buf_len) {
1577 err = -EINVAL;
1578 goto out;
1579 }
1580 } else {
1581
1582
1583
1584
1585 if (zbr->lnum != lnum || zbr->offs != offs)
1586 goto out;
1587 offs += ALIGN(zbr->len, 8);
1588 len = ALIGN(len, 8) + zbr->len;
1589
1590 if (len > bu->buf_len)
1591 goto out;
1592 }
1593
1594 next_block = key_block(c, key);
1595 bu->blk_cnt += (next_block - block - 1);
1596 if (bu->blk_cnt >= UBIFS_MAX_BULK_READ)
1597 goto out;
1598 block = next_block;
1599
1600 bu->zbranch[bu->cnt++] = *zbr;
1601 bu->blk_cnt += 1;
1602
1603 if (bu->cnt >= UBIFS_MAX_BULK_READ)
1604 goto out;
1605 if (bu->blk_cnt >= UBIFS_MAX_BULK_READ)
1606 goto out;
1607 }
1608out:
1609 if (err == -ENOENT) {
1610 bu->eof = 1;
1611 err = 0;
1612 }
1613 bu->gc_seq = c->gc_seq;
1614 mutex_unlock(&c->tnc_mutex);
1615 if (err)
1616 return err;
1617
1618
1619
1620
1621 if (bu->blk_cnt > UBIFS_MAX_BULK_READ)
1622 bu->blk_cnt = UBIFS_MAX_BULK_READ;
1623
1624
1625
1626
1627 if (UBIFS_BLOCKS_PER_PAGE == 1 ||
1628 !(bu->blk_cnt & (UBIFS_BLOCKS_PER_PAGE - 1)))
1629 return 0;
1630 if (bu->eof) {
1631
1632 bu->blk_cnt += UBIFS_BLOCKS_PER_PAGE - 1;
1633 return 0;
1634 }
1635
1636 block = key_block(c, &bu->key) + bu->blk_cnt;
1637 block &= ~(UBIFS_BLOCKS_PER_PAGE - 1);
1638 while (bu->cnt) {
1639 if (key_block(c, &bu->zbranch[bu->cnt - 1].key) < block)
1640 break;
1641 bu->cnt -= 1;
1642 }
1643 return 0;
1644}
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656static int read_wbuf(struct ubifs_wbuf *wbuf, void *buf, int len, int lnum,
1657 int offs)
1658{
1659 const struct ubifs_info *c = wbuf->c;
1660 int rlen, overlap;
1661
1662 dbg_io("LEB %d:%d, length %d", lnum, offs, len);
1663 ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
1664 ubifs_assert(!(offs & 7) && offs < c->leb_size);
1665 ubifs_assert(offs + len <= c->leb_size);
1666
1667 spin_lock(&wbuf->lock);
1668 overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs);
1669 if (!overlap) {
1670
1671 spin_unlock(&wbuf->lock);
1672 return ubifs_leb_read(c, lnum, buf, offs, len, 0);
1673 }
1674
1675
1676 rlen = wbuf->offs - offs;
1677 if (rlen < 0)
1678 rlen = 0;
1679
1680
1681 memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen);
1682 spin_unlock(&wbuf->lock);
1683
1684 if (rlen > 0)
1685
1686 return ubifs_leb_read(c, lnum, buf, offs, rlen, 0);
1687
1688 return 0;
1689}
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699static int validate_data_node(struct ubifs_info *c, void *buf,
1700 struct ubifs_zbranch *zbr)
1701{
1702 union ubifs_key key1;
1703 struct ubifs_ch *ch = buf;
1704 int err, len;
1705
1706 if (ch->node_type != UBIFS_DATA_NODE) {
1707 ubifs_err(c, "bad node type (%d but expected %d)",
1708 ch->node_type, UBIFS_DATA_NODE);
1709 goto out_err;
1710 }
1711
1712 err = ubifs_check_node(c, buf, zbr->lnum, zbr->offs, 0, 0);
1713 if (err) {
1714 ubifs_err(c, "expected node type %d", UBIFS_DATA_NODE);
1715 goto out;
1716 }
1717
1718 len = le32_to_cpu(ch->len);
1719 if (len != zbr->len) {
1720 ubifs_err(c, "bad node length %d, expected %d", len, zbr->len);
1721 goto out_err;
1722 }
1723
1724
1725 key_read(c, buf + UBIFS_KEY_OFFSET, &key1);
1726 if (!keys_eq(c, &zbr->key, &key1)) {
1727 ubifs_err(c, "bad key in node at LEB %d:%d",
1728 zbr->lnum, zbr->offs);
1729 dbg_tnck(&zbr->key, "looked for key ");
1730 dbg_tnck(&key1, "found node's key ");
1731 goto out_err;
1732 }
1733
1734 return 0;
1735
1736out_err:
1737 err = -EINVAL;
1738out:
1739 ubifs_err(c, "bad node at LEB %d:%d", zbr->lnum, zbr->offs);
1740 ubifs_dump_node(c, buf);
1741 dump_stack();
1742 return err;
1743}
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu)
1756{
1757 int lnum = bu->zbranch[0].lnum, offs = bu->zbranch[0].offs, len, err, i;
1758 struct ubifs_wbuf *wbuf;
1759 void *buf;
1760
1761 len = bu->zbranch[bu->cnt - 1].offs;
1762 len += bu->zbranch[bu->cnt - 1].len - offs;
1763 if (len > bu->buf_len) {
1764 ubifs_err(c, "buffer too small %d vs %d", bu->buf_len, len);
1765 return -EINVAL;
1766 }
1767
1768
1769 wbuf = ubifs_get_wbuf(c, lnum);
1770 if (wbuf)
1771 err = read_wbuf(wbuf, bu->buf, len, lnum, offs);
1772 else
1773 err = ubifs_leb_read(c, lnum, bu->buf, offs, len, 0);
1774
1775
1776 if (maybe_leb_gced(c, lnum, bu->gc_seq))
1777 return -EAGAIN;
1778
1779 if (err && err != -EBADMSG) {
1780 ubifs_err(c, "failed to read from LEB %d:%d, error %d",
1781 lnum, offs, err);
1782 dump_stack();
1783 dbg_tnck(&bu->key, "key ");
1784 return err;
1785 }
1786
1787
1788 buf = bu->buf;
1789 for (i = 0; i < bu->cnt; i++) {
1790 err = validate_data_node(c, buf, &bu->zbranch[i]);
1791 if (err)
1792 return err;
1793 buf = buf + ALIGN(bu->zbranch[i].len, 8);
1794 }
1795
1796 return 0;
1797}
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812static int do_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
1813 void *node, const struct qstr *nm)
1814{
1815 int found, n, err;
1816 struct ubifs_znode *znode;
1817
1818 dbg_tnck(key, "name '%.*s' key ", nm->len, nm->name);
1819 mutex_lock(&c->tnc_mutex);
1820 found = ubifs_lookup_level0(c, key, &znode, &n);
1821 if (!found) {
1822 err = -ENOENT;
1823 goto out_unlock;
1824 } else if (found < 0) {
1825 err = found;
1826 goto out_unlock;
1827 }
1828
1829 ubifs_assert(n >= 0);
1830
1831 err = resolve_collision(c, key, &znode, &n, nm);
1832 dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n);
1833 if (unlikely(err < 0))
1834 goto out_unlock;
1835 if (err == 0) {
1836 err = -ENOENT;
1837 goto out_unlock;
1838 }
1839
1840 err = tnc_read_node_nm(c, &znode->zbranch[n], node);
1841
1842out_unlock:
1843 mutex_unlock(&c->tnc_mutex);
1844 return err;
1845}
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860int ubifs_tnc_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
1861 void *node, const struct qstr *nm)
1862{
1863 int err, len;
1864 const struct ubifs_dent_node *dent = node;
1865
1866
1867
1868
1869
1870 err = ubifs_tnc_lookup(c, key, node);
1871 if (err)
1872 return err;
1873
1874 len = le16_to_cpu(dent->nlen);
1875 if (nm->len == len && !memcmp(dent->name, nm->name, len))
1876 return 0;
1877
1878
1879
1880
1881
1882 return do_lookup_nm(c, key, node, nm);
1883}
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894static void correct_parent_keys(const struct ubifs_info *c,
1895 struct ubifs_znode *znode)
1896{
1897 union ubifs_key *key, *key1;
1898
1899 ubifs_assert(znode->parent);
1900 ubifs_assert(znode->iip == 0);
1901
1902 key = &znode->zbranch[0].key;
1903 key1 = &znode->parent->zbranch[0].key;
1904
1905 while (keys_cmp(c, key, key1) < 0) {
1906 key_copy(c, key, key1);
1907 znode = znode->parent;
1908 znode->alt = 1;
1909 if (!znode->parent || znode->iip)
1910 break;
1911 key1 = &znode->parent->zbranch[0].key;
1912 }
1913}
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926static void insert_zbranch(struct ubifs_znode *znode,
1927 const struct ubifs_zbranch *zbr, int n)
1928{
1929 int i;
1930
1931 ubifs_assert(ubifs_zn_dirty(znode));
1932
1933 if (znode->level) {
1934 for (i = znode->child_cnt; i > n; i--) {
1935 znode->zbranch[i] = znode->zbranch[i - 1];
1936 if (znode->zbranch[i].znode)
1937 znode->zbranch[i].znode->iip = i;
1938 }
1939 if (zbr->znode)
1940 zbr->znode->iip = n;
1941 } else
1942 for (i = znode->child_cnt; i > n; i--)
1943 znode->zbranch[i] = znode->zbranch[i - 1];
1944
1945 znode->zbranch[n] = *zbr;
1946 znode->child_cnt += 1;
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962 if (n == 0)
1963 znode->alt = 1;
1964}
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978static int tnc_insert(struct ubifs_info *c, struct ubifs_znode *znode,
1979 struct ubifs_zbranch *zbr, int n)
1980{
1981 struct ubifs_znode *zn, *zi, *zp;
1982 int i, keep, move, appending = 0;
1983 union ubifs_key *key = &zbr->key, *key1;
1984
1985 ubifs_assert(n >= 0 && n <= c->fanout);
1986
1987
1988again:
1989 zp = znode->parent;
1990 if (znode->child_cnt < c->fanout) {
1991 ubifs_assert(n != c->fanout);
1992 dbg_tnck(key, "inserted at %d level %d, key ", n, znode->level);
1993
1994 insert_zbranch(znode, zbr, n);
1995
1996
1997 if (n == 0 && zp && znode->iip == 0)
1998 correct_parent_keys(c, znode);
1999
2000 return 0;
2001 }
2002
2003
2004
2005
2006
2007 dbg_tnck(key, "splitting level %d, key ", znode->level);
2008
2009 if (znode->alt)
2010
2011
2012
2013
2014 ins_clr_old_idx_znode(c, znode);
2015
2016 zn = kzalloc(c->max_znode_sz, GFP_NOFS);
2017 if (!zn)
2018 return -ENOMEM;
2019 zn->parent = zp;
2020 zn->level = znode->level;
2021
2022
2023 if (znode->level == 0 && key_type(c, key) == UBIFS_DATA_KEY) {
2024
2025 if (n == c->fanout) {
2026 key1 = &znode->zbranch[n - 1].key;
2027 if (key_inum(c, key1) == key_inum(c, key) &&
2028 key_type(c, key1) == UBIFS_DATA_KEY)
2029 appending = 1;
2030 } else
2031 goto check_split;
2032 } else if (appending && n != c->fanout) {
2033
2034 appending = 0;
2035check_split:
2036 if (n >= (c->fanout + 1) / 2) {
2037 key1 = &znode->zbranch[0].key;
2038 if (key_inum(c, key1) == key_inum(c, key) &&
2039 key_type(c, key1) == UBIFS_DATA_KEY) {
2040 key1 = &znode->zbranch[n].key;
2041 if (key_inum(c, key1) != key_inum(c, key) ||
2042 key_type(c, key1) != UBIFS_DATA_KEY) {
2043 keep = n;
2044 move = c->fanout - keep;
2045 zi = znode;
2046 goto do_split;
2047 }
2048 }
2049 }
2050 }
2051
2052 if (appending) {
2053 keep = c->fanout;
2054 move = 0;
2055 } else {
2056 keep = (c->fanout + 1) / 2;
2057 move = c->fanout - keep;
2058 }
2059
2060
2061
2062
2063
2064
2065 if (n < keep) {
2066
2067 zi = znode;
2068 move += 1;
2069 keep -= 1;
2070 } else {
2071
2072 zi = zn;
2073 n -= keep;
2074
2075 if (zn->level != 0)
2076 zbr->znode->parent = zn;
2077 }
2078
2079do_split:
2080
2081 __set_bit(DIRTY_ZNODE, &zn->flags);
2082 atomic_long_inc(&c->dirty_zn_cnt);
2083
2084 zn->child_cnt = move;
2085 znode->child_cnt = keep;
2086
2087 dbg_tnc("moving %d, keeping %d", move, keep);
2088
2089
2090 for (i = 0; i < move; i++) {
2091 zn->zbranch[i] = znode->zbranch[keep + i];
2092
2093 if (zn->level != 0)
2094 if (zn->zbranch[i].znode) {
2095 zn->zbranch[i].znode->parent = zn;
2096 zn->zbranch[i].znode->iip = i;
2097 }
2098 }
2099
2100
2101 dbg_tnck(key, "inserting at %d level %d, key ", n, zn->level);
2102
2103 insert_zbranch(zi, zbr, n);
2104
2105
2106 if (zp) {
2107 if (n == 0 && zi == znode && znode->iip == 0)
2108 correct_parent_keys(c, znode);
2109
2110
2111 n = znode->iip + 1;
2112
2113
2114 zbr->key = zn->zbranch[0].key;
2115 zbr->znode = zn;
2116 zbr->lnum = 0;
2117 zbr->offs = 0;
2118 zbr->len = 0;
2119 znode = zp;
2120
2121 goto again;
2122 }
2123
2124
2125 dbg_tnc("creating new zroot at level %d", znode->level + 1);
2126
2127 zi = kzalloc(c->max_znode_sz, GFP_NOFS);
2128 if (!zi)
2129 return -ENOMEM;
2130
2131 zi->child_cnt = 2;
2132 zi->level = znode->level + 1;
2133
2134 __set_bit(DIRTY_ZNODE, &zi->flags);
2135 atomic_long_inc(&c->dirty_zn_cnt);
2136
2137 zi->zbranch[0].key = znode->zbranch[0].key;
2138 zi->zbranch[0].znode = znode;
2139 zi->zbranch[0].lnum = c->zroot.lnum;
2140 zi->zbranch[0].offs = c->zroot.offs;
2141 zi->zbranch[0].len = c->zroot.len;
2142 zi->zbranch[1].key = zn->zbranch[0].key;
2143 zi->zbranch[1].znode = zn;
2144
2145 c->zroot.lnum = 0;
2146 c->zroot.offs = 0;
2147 c->zroot.len = 0;
2148 c->zroot.znode = zi;
2149
2150 zn->parent = zi;
2151 zn->iip = 1;
2152 znode->parent = zi;
2153 znode->iip = 0;
2154
2155 return 0;
2156}
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170int ubifs_tnc_add(struct ubifs_info *c, const union ubifs_key *key, int lnum,
2171 int offs, int len)
2172{
2173 int found, n, err = 0;
2174 struct ubifs_znode *znode;
2175
2176 mutex_lock(&c->tnc_mutex);
2177 dbg_tnck(key, "%d:%d, len %d, key ", lnum, offs, len);
2178 found = lookup_level0_dirty(c, key, &znode, &n);
2179 if (!found) {
2180 struct ubifs_zbranch zbr;
2181
2182 zbr.znode = NULL;
2183 zbr.lnum = lnum;
2184 zbr.offs = offs;
2185 zbr.len = len;
2186 key_copy(c, key, &zbr.key);
2187 err = tnc_insert(c, znode, &zbr, n + 1);
2188 } else if (found == 1) {
2189 struct ubifs_zbranch *zbr = &znode->zbranch[n];
2190
2191 lnc_free(zbr);
2192 err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2193 zbr->lnum = lnum;
2194 zbr->offs = offs;
2195 zbr->len = len;
2196 } else
2197 err = found;
2198 if (!err)
2199 err = dbg_check_tnc(c, 0);
2200 mutex_unlock(&c->tnc_mutex);
2201
2202 return err;
2203}
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219int ubifs_tnc_replace(struct ubifs_info *c, const union ubifs_key *key,
2220 int old_lnum, int old_offs, int lnum, int offs, int len)
2221{
2222 int found, n, err = 0;
2223 struct ubifs_znode *znode;
2224
2225 mutex_lock(&c->tnc_mutex);
2226 dbg_tnck(key, "old LEB %d:%d, new LEB %d:%d, len %d, key ", old_lnum,
2227 old_offs, lnum, offs, len);
2228 found = lookup_level0_dirty(c, key, &znode, &n);
2229 if (found < 0) {
2230 err = found;
2231 goto out_unlock;
2232 }
2233
2234 if (found == 1) {
2235 struct ubifs_zbranch *zbr = &znode->zbranch[n];
2236
2237 found = 0;
2238 if (zbr->lnum == old_lnum && zbr->offs == old_offs) {
2239 lnc_free(zbr);
2240 err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2241 if (err)
2242 goto out_unlock;
2243 zbr->lnum = lnum;
2244 zbr->offs = offs;
2245 zbr->len = len;
2246 found = 1;
2247 } else if (is_hash_key(c, key)) {
2248 found = resolve_collision_directly(c, key, &znode, &n,
2249 old_lnum, old_offs);
2250 dbg_tnc("rc returned %d, znode %p, n %d, LEB %d:%d",
2251 found, znode, n, old_lnum, old_offs);
2252 if (found < 0) {
2253 err = found;
2254 goto out_unlock;
2255 }
2256
2257 if (found) {
2258
2259 if (znode->cnext || !ubifs_zn_dirty(znode)) {
2260 znode = dirty_cow_bottom_up(c, znode);
2261 if (IS_ERR(znode)) {
2262 err = PTR_ERR(znode);
2263 goto out_unlock;
2264 }
2265 }
2266 zbr = &znode->zbranch[n];
2267 lnc_free(zbr);
2268 err = ubifs_add_dirt(c, zbr->lnum,
2269 zbr->len);
2270 if (err)
2271 goto out_unlock;
2272 zbr->lnum = lnum;
2273 zbr->offs = offs;
2274 zbr->len = len;
2275 }
2276 }
2277 }
2278
2279 if (!found)
2280 err = ubifs_add_dirt(c, lnum, len);
2281
2282 if (!err)
2283 err = dbg_check_tnc(c, 0);
2284
2285out_unlock:
2286 mutex_unlock(&c->tnc_mutex);
2287 return err;
2288}
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key,
2303 int lnum, int offs, int len, const struct qstr *nm)
2304{
2305 int found, n, err = 0;
2306 struct ubifs_znode *znode;
2307
2308 mutex_lock(&c->tnc_mutex);
2309 dbg_tnck(key, "LEB %d:%d, name '%.*s', key ",
2310 lnum, offs, nm->len, nm->name);
2311 found = lookup_level0_dirty(c, key, &znode, &n);
2312 if (found < 0) {
2313 err = found;
2314 goto out_unlock;
2315 }
2316
2317 if (found == 1) {
2318 if (c->replaying)
2319 found = fallible_resolve_collision(c, key, &znode, &n,
2320 nm, 1);
2321 else
2322 found = resolve_collision(c, key, &znode, &n, nm);
2323 dbg_tnc("rc returned %d, znode %p, n %d", found, znode, n);
2324 if (found < 0) {
2325 err = found;
2326 goto out_unlock;
2327 }
2328
2329
2330 if (znode->cnext || !ubifs_zn_dirty(znode)) {
2331 znode = dirty_cow_bottom_up(c, znode);
2332 if (IS_ERR(znode)) {
2333 err = PTR_ERR(znode);
2334 goto out_unlock;
2335 }
2336 }
2337
2338 if (found == 1) {
2339 struct ubifs_zbranch *zbr = &znode->zbranch[n];
2340
2341 lnc_free(zbr);
2342 err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2343 zbr->lnum = lnum;
2344 zbr->offs = offs;
2345 zbr->len = len;
2346 goto out_unlock;
2347 }
2348 }
2349
2350 if (!found) {
2351 struct ubifs_zbranch zbr;
2352
2353 zbr.znode = NULL;
2354 zbr.lnum = lnum;
2355 zbr.offs = offs;
2356 zbr.len = len;
2357 key_copy(c, key, &zbr.key);
2358 err = tnc_insert(c, znode, &zbr, n + 1);
2359 if (err)
2360 goto out_unlock;
2361 if (c->replaying) {
2362
2363
2364
2365
2366
2367
2368 struct qstr noname = { .name = "" };
2369
2370 err = dbg_check_tnc(c, 0);
2371 mutex_unlock(&c->tnc_mutex);
2372 if (err)
2373 return err;
2374 return ubifs_tnc_remove_nm(c, key, &noname);
2375 }
2376 }
2377
2378out_unlock:
2379 if (!err)
2380 err = dbg_check_tnc(c, 0);
2381 mutex_unlock(&c->tnc_mutex);
2382 return err;
2383}
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394static int tnc_delete(struct ubifs_info *c, struct ubifs_znode *znode, int n)
2395{
2396 struct ubifs_zbranch *zbr;
2397 struct ubifs_znode *zp;
2398 int i, err;
2399
2400
2401 ubifs_assert(znode->level == 0);
2402 ubifs_assert(n >= 0 && n < c->fanout);
2403 dbg_tnck(&znode->zbranch[n].key, "deleting key ");
2404
2405 zbr = &znode->zbranch[n];
2406 lnc_free(zbr);
2407
2408 err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2409 if (err) {
2410 ubifs_dump_znode(c, znode);
2411 return err;
2412 }
2413
2414
2415 for (i = n; i < znode->child_cnt - 1; i++)
2416 znode->zbranch[i] = znode->zbranch[i + 1];
2417 znode->child_cnt -= 1;
2418
2419 if (znode->child_cnt > 0)
2420 return 0;
2421
2422
2423
2424
2425
2426
2427 do {
2428 ubifs_assert(!ubifs_zn_obsolete(znode));
2429 ubifs_assert(ubifs_zn_dirty(znode));
2430
2431 zp = znode->parent;
2432 n = znode->iip;
2433
2434 atomic_long_dec(&c->dirty_zn_cnt);
2435
2436 err = insert_old_idx_znode(c, znode);
2437 if (err)
2438 return err;
2439
2440 if (znode->cnext) {
2441 __set_bit(OBSOLETE_ZNODE, &znode->flags);
2442 atomic_long_inc(&c->clean_zn_cnt);
2443 atomic_long_inc(&ubifs_clean_zn_cnt);
2444 } else
2445 kfree(znode);
2446 znode = zp;
2447 } while (znode->child_cnt == 1);
2448
2449
2450 znode->child_cnt -= 1;
2451 ubifs_assert(znode->level != 0);
2452 for (i = n; i < znode->child_cnt; i++) {
2453 znode->zbranch[i] = znode->zbranch[i + 1];
2454 if (znode->zbranch[i].znode)
2455 znode->zbranch[i].znode->iip = i;
2456 }
2457
2458
2459
2460
2461
2462 if (!znode->parent) {
2463 while (znode->child_cnt == 1 && znode->level != 0) {
2464 zp = znode;
2465 zbr = &znode->zbranch[0];
2466 znode = get_znode(c, znode, 0);
2467 if (IS_ERR(znode))
2468 return PTR_ERR(znode);
2469 znode = dirty_cow_znode(c, zbr);
2470 if (IS_ERR(znode))
2471 return PTR_ERR(znode);
2472 znode->parent = NULL;
2473 znode->iip = 0;
2474 if (c->zroot.len) {
2475 err = insert_old_idx(c, c->zroot.lnum,
2476 c->zroot.offs);
2477 if (err)
2478 return err;
2479 }
2480 c->zroot.lnum = zbr->lnum;
2481 c->zroot.offs = zbr->offs;
2482 c->zroot.len = zbr->len;
2483 c->zroot.znode = znode;
2484 ubifs_assert(!ubifs_zn_obsolete(zp));
2485 ubifs_assert(ubifs_zn_dirty(zp));
2486 atomic_long_dec(&c->dirty_zn_cnt);
2487
2488 if (zp->cnext) {
2489 __set_bit(OBSOLETE_ZNODE, &zp->flags);
2490 atomic_long_inc(&c->clean_zn_cnt);
2491 atomic_long_inc(&ubifs_clean_zn_cnt);
2492 } else
2493 kfree(zp);
2494 }
2495 }
2496
2497 return 0;
2498}
2499
2500
2501
2502
2503
2504
2505
2506
2507int ubifs_tnc_remove(struct ubifs_info *c, const union ubifs_key *key)
2508{
2509 int found, n, err = 0;
2510 struct ubifs_znode *znode;
2511
2512 mutex_lock(&c->tnc_mutex);
2513 dbg_tnck(key, "key ");
2514 found = lookup_level0_dirty(c, key, &znode, &n);
2515 if (found < 0) {
2516 err = found;
2517 goto out_unlock;
2518 }
2519 if (found == 1)
2520 err = tnc_delete(c, znode, n);
2521 if (!err)
2522 err = dbg_check_tnc(c, 0);
2523
2524out_unlock:
2525 mutex_unlock(&c->tnc_mutex);
2526 return err;
2527}
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537int ubifs_tnc_remove_nm(struct ubifs_info *c, const union ubifs_key *key,
2538 const struct qstr *nm)
2539{
2540 int n, err;
2541 struct ubifs_znode *znode;
2542
2543 mutex_lock(&c->tnc_mutex);
2544 dbg_tnck(key, "%.*s, key ", nm->len, nm->name);
2545 err = lookup_level0_dirty(c, key, &znode, &n);
2546 if (err < 0)
2547 goto out_unlock;
2548
2549 if (err) {
2550 if (c->replaying)
2551 err = fallible_resolve_collision(c, key, &znode, &n,
2552 nm, 0);
2553 else
2554 err = resolve_collision(c, key, &znode, &n, nm);
2555 dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n);
2556 if (err < 0)
2557 goto out_unlock;
2558 if (err) {
2559
2560 if (znode->cnext || !ubifs_zn_dirty(znode)) {
2561 znode = dirty_cow_bottom_up(c, znode);
2562 if (IS_ERR(znode)) {
2563 err = PTR_ERR(znode);
2564 goto out_unlock;
2565 }
2566 }
2567 err = tnc_delete(c, znode, n);
2568 }
2569 }
2570
2571out_unlock:
2572 if (!err)
2573 err = dbg_check_tnc(c, 0);
2574 mutex_unlock(&c->tnc_mutex);
2575 return err;
2576}
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587static int key_in_range(struct ubifs_info *c, union ubifs_key *key,
2588 union ubifs_key *from_key, union ubifs_key *to_key)
2589{
2590 if (keys_cmp(c, key, from_key) < 0)
2591 return 0;
2592 if (keys_cmp(c, key, to_key) > 0)
2593 return 0;
2594 return 1;
2595}
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607int ubifs_tnc_remove_range(struct ubifs_info *c, union ubifs_key *from_key,
2608 union ubifs_key *to_key)
2609{
2610 int i, n, k, err = 0;
2611 struct ubifs_znode *znode;
2612 union ubifs_key *key;
2613
2614 mutex_lock(&c->tnc_mutex);
2615 while (1) {
2616
2617 err = ubifs_lookup_level0(c, from_key, &znode, &n);
2618 if (err < 0)
2619 goto out_unlock;
2620
2621 if (err)
2622 key = from_key;
2623 else {
2624 err = tnc_next(c, &znode, &n);
2625 if (err == -ENOENT) {
2626 err = 0;
2627 goto out_unlock;
2628 }
2629 if (err < 0)
2630 goto out_unlock;
2631 key = &znode->zbranch[n].key;
2632 if (!key_in_range(c, key, from_key, to_key)) {
2633 err = 0;
2634 goto out_unlock;
2635 }
2636 }
2637
2638
2639 if (znode->cnext || !ubifs_zn_dirty(znode)) {
2640 znode = dirty_cow_bottom_up(c, znode);
2641 if (IS_ERR(znode)) {
2642 err = PTR_ERR(znode);
2643 goto out_unlock;
2644 }
2645 }
2646
2647
2648 for (i = n + 1, k = 0; i < znode->child_cnt; i++, k++) {
2649 key = &znode->zbranch[i].key;
2650 if (!key_in_range(c, key, from_key, to_key))
2651 break;
2652 lnc_free(&znode->zbranch[i]);
2653 err = ubifs_add_dirt(c, znode->zbranch[i].lnum,
2654 znode->zbranch[i].len);
2655 if (err) {
2656 ubifs_dump_znode(c, znode);
2657 goto out_unlock;
2658 }
2659 dbg_tnck(key, "removing key ");
2660 }
2661 if (k) {
2662 for (i = n + 1 + k; i < znode->child_cnt; i++)
2663 znode->zbranch[i - k] = znode->zbranch[i];
2664 znode->child_cnt -= k;
2665 }
2666
2667
2668 err = tnc_delete(c, znode, n);
2669 if (err)
2670 goto out_unlock;
2671 }
2672
2673out_unlock:
2674 if (!err)
2675 err = dbg_check_tnc(c, 0);
2676 mutex_unlock(&c->tnc_mutex);
2677 return err;
2678}
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum)
2690{
2691 union ubifs_key key1, key2;
2692 struct ubifs_dent_node *xent, *pxent = NULL;
2693 struct qstr nm = { .name = NULL };
2694
2695 dbg_tnc("ino %lu", (unsigned long)inum);
2696
2697
2698
2699
2700
2701 lowest_xent_key(c, &key1, inum);
2702 while (1) {
2703 ino_t xattr_inum;
2704 int err;
2705
2706 xent = ubifs_tnc_next_ent(c, &key1, &nm);
2707 if (IS_ERR(xent)) {
2708 err = PTR_ERR(xent);
2709 if (err == -ENOENT)
2710 break;
2711 return err;
2712 }
2713
2714 xattr_inum = le64_to_cpu(xent->inum);
2715 dbg_tnc("xent '%s', ino %lu", xent->name,
2716 (unsigned long)xattr_inum);
2717
2718 nm.name = xent->name;
2719 nm.len = le16_to_cpu(xent->nlen);
2720 err = ubifs_tnc_remove_nm(c, &key1, &nm);
2721 if (err) {
2722 kfree(xent);
2723 return err;
2724 }
2725
2726 lowest_ino_key(c, &key1, xattr_inum);
2727 highest_ino_key(c, &key2, xattr_inum);
2728 err = ubifs_tnc_remove_range(c, &key1, &key2);
2729 if (err) {
2730 kfree(xent);
2731 return err;
2732 }
2733
2734 kfree(pxent);
2735 pxent = xent;
2736 key_read(c, &xent->key, &key1);
2737 }
2738
2739 kfree(pxent);
2740 lowest_ino_key(c, &key1, inum);
2741 highest_ino_key(c, &key2, inum);
2742
2743 return ubifs_tnc_remove_range(c, &key1, &key2);
2744}
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c,
2770 union ubifs_key *key,
2771 const struct qstr *nm)
2772{
2773 int n, err, type = key_type(c, key);
2774 struct ubifs_znode *znode;
2775 struct ubifs_dent_node *dent;
2776 struct ubifs_zbranch *zbr;
2777 union ubifs_key *dkey;
2778
2779 dbg_tnck(key, "%s ", nm->name ? (char *)nm->name : "(lowest)");
2780 ubifs_assert(is_hash_key(c, key));
2781
2782 mutex_lock(&c->tnc_mutex);
2783 err = ubifs_lookup_level0(c, key, &znode, &n);
2784 if (unlikely(err < 0))
2785 goto out_unlock;
2786
2787 if (nm->name) {
2788 if (err) {
2789
2790 if (c->replaying)
2791 err = fallible_resolve_collision(c, key, &znode, &n,
2792 nm, 0);
2793 else
2794 err = resolve_collision(c, key, &znode, &n, nm);
2795 dbg_tnc("rc returned %d, znode %p, n %d",
2796 err, znode, n);
2797 if (unlikely(err < 0))
2798 goto out_unlock;
2799 }
2800
2801
2802 err = tnc_next(c, &znode, &n);
2803 if (unlikely(err))
2804 goto out_unlock;
2805 } else {
2806
2807
2808
2809
2810
2811 if (!err) {
2812
2813
2814
2815
2816
2817 err = tnc_next(c, &znode, &n);
2818 if (err)
2819 goto out_unlock;
2820 }
2821 }
2822
2823 zbr = &znode->zbranch[n];
2824 dent = kmalloc(zbr->len, GFP_NOFS);
2825 if (unlikely(!dent)) {
2826 err = -ENOMEM;
2827 goto out_unlock;
2828 }
2829
2830
2831
2832
2833
2834 dkey = &zbr->key;
2835 if (key_inum(c, dkey) != key_inum(c, key) ||
2836 key_type(c, dkey) != type) {
2837 err = -ENOENT;
2838 goto out_free;
2839 }
2840
2841 err = tnc_read_node_nm(c, zbr, dent);
2842 if (unlikely(err))
2843 goto out_free;
2844
2845 mutex_unlock(&c->tnc_mutex);
2846 return dent;
2847
2848out_free:
2849 kfree(dent);
2850out_unlock:
2851 mutex_unlock(&c->tnc_mutex);
2852 return ERR_PTR(err);
2853}
2854
2855
2856
2857
2858
2859
2860
2861static void tnc_destroy_cnext(struct ubifs_info *c)
2862{
2863 struct ubifs_znode *cnext;
2864
2865 if (!c->cnext)
2866 return;
2867 ubifs_assert(c->cmt_state == COMMIT_BROKEN);
2868 cnext = c->cnext;
2869 do {
2870 struct ubifs_znode *znode = cnext;
2871
2872 cnext = cnext->cnext;
2873 if (ubifs_zn_obsolete(znode))
2874 kfree(znode);
2875 } while (cnext && cnext != c->cnext);
2876}
2877
2878
2879
2880
2881
2882void ubifs_tnc_close(struct ubifs_info *c)
2883{
2884 tnc_destroy_cnext(c);
2885 if (c->zroot.znode) {
2886 long n, freed;
2887
2888 n = atomic_long_read(&c->clean_zn_cnt);
2889 freed = ubifs_destroy_tnc_subtree(c->zroot.znode);
2890 ubifs_assert(freed == n);
2891 atomic_long_sub(n, &ubifs_clean_zn_cnt);
2892 }
2893 kfree(c->gap_lebs);
2894 kfree(c->ilebs);
2895 destroy_old_idx(c);
2896}
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906static struct ubifs_znode *left_znode(struct ubifs_info *c,
2907 struct ubifs_znode *znode)
2908{
2909 int level = znode->level;
2910
2911 while (1) {
2912 int n = znode->iip - 1;
2913
2914
2915 znode = znode->parent;
2916 if (!znode)
2917 return NULL;
2918 if (n >= 0) {
2919
2920 znode = get_znode(c, znode, n);
2921 if (IS_ERR(znode))
2922 return znode;
2923 while (znode->level != level) {
2924 n = znode->child_cnt - 1;
2925 znode = get_znode(c, znode, n);
2926 if (IS_ERR(znode))
2927 return znode;
2928 }
2929 break;
2930 }
2931 }
2932 return znode;
2933}
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943static struct ubifs_znode *right_znode(struct ubifs_info *c,
2944 struct ubifs_znode *znode)
2945{
2946 int level = znode->level;
2947
2948 while (1) {
2949 int n = znode->iip + 1;
2950
2951
2952 znode = znode->parent;
2953 if (!znode)
2954 return NULL;
2955 if (n < znode->child_cnt) {
2956
2957 znode = get_znode(c, znode, n);
2958 if (IS_ERR(znode))
2959 return znode;
2960 while (znode->level != level) {
2961 znode = get_znode(c, znode, 0);
2962 if (IS_ERR(znode))
2963 return znode;
2964 }
2965 break;
2966 }
2967 }
2968 return znode;
2969}
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996static struct ubifs_znode *lookup_znode(struct ubifs_info *c,
2997 union ubifs_key *key, int level,
2998 int lnum, int offs)
2999{
3000 struct ubifs_znode *znode, *zn;
3001 int n, nn;
3002
3003 ubifs_assert(key_type(c, key) < UBIFS_INVALID_KEY);
3004
3005
3006
3007
3008
3009 if (level < 0)
3010 return ERR_PTR(-EINVAL);
3011
3012
3013 znode = c->zroot.znode;
3014 if (!znode) {
3015 znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
3016 if (IS_ERR(znode))
3017 return znode;
3018 }
3019
3020 if (c->zroot.lnum == lnum && c->zroot.offs == offs)
3021 return znode;
3022
3023 if (level >= znode->level)
3024 return NULL;
3025 while (1) {
3026 ubifs_search_zbranch(c, znode, key, &n);
3027 if (n < 0) {
3028
3029
3030
3031
3032
3033
3034
3035
3036 znode = left_znode(c, znode);
3037 if (!znode)
3038 return NULL;
3039 if (IS_ERR(znode))
3040 return znode;
3041 ubifs_search_zbranch(c, znode, key, &n);
3042 ubifs_assert(n >= 0);
3043 }
3044 if (znode->level == level + 1)
3045 break;
3046 znode = get_znode(c, znode, n);
3047 if (IS_ERR(znode))
3048 return znode;
3049 }
3050
3051 if (znode->zbranch[n].lnum == lnum && znode->zbranch[n].offs == offs)
3052 return get_znode(c, znode, n);
3053
3054 if (!is_hash_key(c, key))
3055 return NULL;
3056
3057
3058
3059
3060 zn = znode;
3061 nn = n;
3062
3063 while (1) {
3064
3065 if (n)
3066 n -= 1;
3067 else {
3068 znode = left_znode(c, znode);
3069 if (!znode)
3070 break;
3071 if (IS_ERR(znode))
3072 return znode;
3073 n = znode->child_cnt - 1;
3074 }
3075
3076 if (znode->zbranch[n].lnum == lnum &&
3077 znode->zbranch[n].offs == offs)
3078 return get_znode(c, znode, n);
3079
3080 if (keys_cmp(c, &znode->zbranch[n].key, key) < 0)
3081 break;
3082 }
3083
3084 znode = zn;
3085 n = nn;
3086
3087 while (1) {
3088
3089 if (++n >= znode->child_cnt) {
3090 znode = right_znode(c, znode);
3091 if (!znode)
3092 break;
3093 if (IS_ERR(znode))
3094 return znode;
3095 n = 0;
3096 }
3097
3098 if (znode->zbranch[n].lnum == lnum &&
3099 znode->zbranch[n].offs == offs)
3100 return get_znode(c, znode, n);
3101
3102 if (keys_cmp(c, &znode->zbranch[n].key, key) > 0)
3103 break;
3104 }
3105 return NULL;
3106}
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125int is_idx_node_in_tnc(struct ubifs_info *c, union ubifs_key *key, int level,
3126 int lnum, int offs)
3127{
3128 struct ubifs_znode *znode;
3129
3130 znode = lookup_znode(c, key, level, lnum, offs);
3131 if (!znode)
3132 return 0;
3133 if (IS_ERR(znode))
3134 return PTR_ERR(znode);
3135
3136 return ubifs_zn_dirty(znode) ? 1 : 2;
3137}
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152static int is_leaf_node_in_tnc(struct ubifs_info *c, union ubifs_key *key,
3153 int lnum, int offs)
3154{
3155 struct ubifs_zbranch *zbr;
3156 struct ubifs_znode *znode, *zn;
3157 int n, found, err, nn;
3158 const int unique = !is_hash_key(c, key);
3159
3160 found = ubifs_lookup_level0(c, key, &znode, &n);
3161 if (found < 0)
3162 return found;
3163 if (!found)
3164 return 0;
3165 zbr = &znode->zbranch[n];
3166 if (lnum == zbr->lnum && offs == zbr->offs)
3167 return 1;
3168 if (unique)
3169 return 0;
3170
3171
3172
3173
3174 zn = znode;
3175 nn = n;
3176
3177 while (1) {
3178 err = tnc_prev(c, &znode, &n);
3179 if (err == -ENOENT)
3180 break;
3181 if (err)
3182 return err;
3183 if (keys_cmp(c, key, &znode->zbranch[n].key))
3184 break;
3185 zbr = &znode->zbranch[n];
3186 if (lnum == zbr->lnum && offs == zbr->offs)
3187 return 1;
3188 }
3189
3190 znode = zn;
3191 n = nn;
3192 while (1) {
3193 err = tnc_next(c, &znode, &n);
3194 if (err) {
3195 if (err == -ENOENT)
3196 return 0;
3197 return err;
3198 }
3199 if (keys_cmp(c, key, &znode->zbranch[n].key))
3200 break;
3201 zbr = &znode->zbranch[n];
3202 if (lnum == zbr->lnum && offs == zbr->offs)
3203 return 1;
3204 }
3205 return 0;
3206}
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222int ubifs_tnc_has_node(struct ubifs_info *c, union ubifs_key *key, int level,
3223 int lnum, int offs, int is_idx)
3224{
3225 int err;
3226
3227 mutex_lock(&c->tnc_mutex);
3228 if (is_idx) {
3229 err = is_idx_node_in_tnc(c, key, level, lnum, offs);
3230 if (err < 0)
3231 goto out_unlock;
3232 if (err == 1)
3233
3234 err = 0;
3235 else if (err == 2)
3236
3237 err = 1;
3238 else
3239 BUG_ON(err != 0);
3240 } else
3241 err = is_leaf_node_in_tnc(c, key, lnum, offs);
3242
3243out_unlock:
3244 mutex_unlock(&c->tnc_mutex);
3245 return err;
3246}
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262int ubifs_dirty_idx_node(struct ubifs_info *c, union ubifs_key *key, int level,
3263 int lnum, int offs)
3264{
3265 struct ubifs_znode *znode;
3266 int err = 0;
3267
3268 mutex_lock(&c->tnc_mutex);
3269 znode = lookup_znode(c, key, level, lnum, offs);
3270 if (!znode)
3271 goto out_unlock;
3272 if (IS_ERR(znode)) {
3273 err = PTR_ERR(znode);
3274 goto out_unlock;
3275 }
3276 znode = dirty_cow_bottom_up(c, znode);
3277 if (IS_ERR(znode)) {
3278 err = PTR_ERR(znode);
3279 goto out_unlock;
3280 }
3281
3282out_unlock:
3283 mutex_unlock(&c->tnc_mutex);
3284 return err;
3285}
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode,
3299 loff_t size)
3300{
3301 int err, n;
3302 union ubifs_key from_key, to_key, *key;
3303 struct ubifs_znode *znode;
3304 unsigned int block;
3305
3306 if (!S_ISREG(inode->i_mode))
3307 return 0;
3308 if (!dbg_is_chk_gen(c))
3309 return 0;
3310
3311 block = (size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
3312 data_key_init(c, &from_key, inode->i_ino, block);
3313 highest_data_key(c, &to_key, inode->i_ino);
3314
3315 mutex_lock(&c->tnc_mutex);
3316 err = ubifs_lookup_level0(c, &from_key, &znode, &n);
3317 if (err < 0)
3318 goto out_unlock;
3319
3320 if (err) {
3321 key = &from_key;
3322 goto out_dump;
3323 }
3324
3325 err = tnc_next(c, &znode, &n);
3326 if (err == -ENOENT) {
3327 err = 0;
3328 goto out_unlock;
3329 }
3330 if (err < 0)
3331 goto out_unlock;
3332
3333 ubifs_assert(err == 0);
3334 key = &znode->zbranch[n].key;
3335 if (!key_in_range(c, key, &from_key, &to_key))
3336 goto out_unlock;
3337
3338out_dump:
3339 block = key_block(c, key);
3340 ubifs_err(c, "inode %lu has size %lld, but there are data at offset %lld",
3341 (unsigned long)inode->i_ino, size,
3342 ((loff_t)block) << UBIFS_BLOCK_SHIFT);
3343 mutex_unlock(&c->tnc_mutex);
3344 ubifs_dump_inode(c, inode);
3345 dump_stack();
3346 return -EINVAL;
3347
3348out_unlock:
3349 mutex_unlock(&c->tnc_mutex);
3350 return err;
3351}
3352