1
2
3
4
5
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_bit.h"
13#include "xfs_mount.h"
14#include "xfs_defer.h"
15#include "xfs_inode.h"
16#include "xfs_trans.h"
17#include "xfs_inode_item.h"
18#include "xfs_buf_item.h"
19#include "xfs_btree.h"
20#include "xfs_errortag.h"
21#include "xfs_error.h"
22#include "xfs_trace.h"
23#include "xfs_cksum.h"
24#include "xfs_alloc.h"
25#include "xfs_log.h"
26
27
28
29
30kmem_zone_t *xfs_btree_cur_zone;
31
32
33
34
35static const uint32_t xfs_magics[2][XFS_BTNUM_MAX] = {
36 { XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, 0, XFS_BMAP_MAGIC, XFS_IBT_MAGIC,
37 XFS_FIBT_MAGIC, 0 },
38 { XFS_ABTB_CRC_MAGIC, XFS_ABTC_CRC_MAGIC, XFS_RMAP_CRC_MAGIC,
39 XFS_BMAP_CRC_MAGIC, XFS_IBT_CRC_MAGIC, XFS_FIBT_CRC_MAGIC,
40 XFS_REFC_CRC_MAGIC }
41};
42
43uint32_t
44xfs_btree_magic(
45 int crc,
46 xfs_btnum_t btnum)
47{
48 uint32_t magic = xfs_magics[crc][btnum];
49
50
51 ASSERT(magic != 0);
52 return magic;
53}
54
55
56
57
58
59xfs_failaddr_t
60__xfs_btree_check_lblock(
61 struct xfs_btree_cur *cur,
62 struct xfs_btree_block *block,
63 int level,
64 struct xfs_buf *bp)
65{
66 struct xfs_mount *mp = cur->bc_mp;
67 xfs_btnum_t btnum = cur->bc_btnum;
68 int crc = xfs_sb_version_hascrc(&mp->m_sb);
69
70 if (crc) {
71 if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid))
72 return __this_address;
73 if (block->bb_u.l.bb_blkno !=
74 cpu_to_be64(bp ? bp->b_bn : XFS_BUF_DADDR_NULL))
75 return __this_address;
76 if (block->bb_u.l.bb_pad != cpu_to_be32(0))
77 return __this_address;
78 }
79
80 if (be32_to_cpu(block->bb_magic) != xfs_btree_magic(crc, btnum))
81 return __this_address;
82 if (be16_to_cpu(block->bb_level) != level)
83 return __this_address;
84 if (be16_to_cpu(block->bb_numrecs) >
85 cur->bc_ops->get_maxrecs(cur, level))
86 return __this_address;
87 if (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLFSBLOCK) &&
88 !xfs_btree_check_lptr(cur, be64_to_cpu(block->bb_u.l.bb_leftsib),
89 level + 1))
90 return __this_address;
91 if (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK) &&
92 !xfs_btree_check_lptr(cur, be64_to_cpu(block->bb_u.l.bb_rightsib),
93 level + 1))
94 return __this_address;
95
96 return NULL;
97}
98
99
100static int
101xfs_btree_check_lblock(
102 struct xfs_btree_cur *cur,
103 struct xfs_btree_block *block,
104 int level,
105 struct xfs_buf *bp)
106{
107 struct xfs_mount *mp = cur->bc_mp;
108 xfs_failaddr_t fa;
109
110 fa = __xfs_btree_check_lblock(cur, block, level, bp);
111 if (unlikely(XFS_TEST_ERROR(fa != NULL, mp,
112 XFS_ERRTAG_BTREE_CHECK_LBLOCK))) {
113 if (bp)
114 trace_xfs_btree_corrupt(bp, _RET_IP_);
115 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
116 return -EFSCORRUPTED;
117 }
118 return 0;
119}
120
121
122
123
124
125xfs_failaddr_t
126__xfs_btree_check_sblock(
127 struct xfs_btree_cur *cur,
128 struct xfs_btree_block *block,
129 int level,
130 struct xfs_buf *bp)
131{
132 struct xfs_mount *mp = cur->bc_mp;
133 xfs_btnum_t btnum = cur->bc_btnum;
134 int crc = xfs_sb_version_hascrc(&mp->m_sb);
135
136 if (crc) {
137 if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid))
138 return __this_address;
139 if (block->bb_u.s.bb_blkno !=
140 cpu_to_be64(bp ? bp->b_bn : XFS_BUF_DADDR_NULL))
141 return __this_address;
142 }
143
144 if (be32_to_cpu(block->bb_magic) != xfs_btree_magic(crc, btnum))
145 return __this_address;
146 if (be16_to_cpu(block->bb_level) != level)
147 return __this_address;
148 if (be16_to_cpu(block->bb_numrecs) >
149 cur->bc_ops->get_maxrecs(cur, level))
150 return __this_address;
151 if (block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK) &&
152 !xfs_btree_check_sptr(cur, be32_to_cpu(block->bb_u.s.bb_leftsib),
153 level + 1))
154 return __this_address;
155 if (block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK) &&
156 !xfs_btree_check_sptr(cur, be32_to_cpu(block->bb_u.s.bb_rightsib),
157 level + 1))
158 return __this_address;
159
160 return NULL;
161}
162
163
164STATIC int
165xfs_btree_check_sblock(
166 struct xfs_btree_cur *cur,
167 struct xfs_btree_block *block,
168 int level,
169 struct xfs_buf *bp)
170{
171 struct xfs_mount *mp = cur->bc_mp;
172 xfs_failaddr_t fa;
173
174 fa = __xfs_btree_check_sblock(cur, block, level, bp);
175 if (unlikely(XFS_TEST_ERROR(fa != NULL, mp,
176 XFS_ERRTAG_BTREE_CHECK_SBLOCK))) {
177 if (bp)
178 trace_xfs_btree_corrupt(bp, _RET_IP_);
179 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
180 return -EFSCORRUPTED;
181 }
182 return 0;
183}
184
185
186
187
188int
189xfs_btree_check_block(
190 struct xfs_btree_cur *cur,
191 struct xfs_btree_block *block,
192 int level,
193 struct xfs_buf *bp)
194{
195 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
196 return xfs_btree_check_lblock(cur, block, level, bp);
197 else
198 return xfs_btree_check_sblock(cur, block, level, bp);
199}
200
201
202bool
203xfs_btree_check_lptr(
204 struct xfs_btree_cur *cur,
205 xfs_fsblock_t fsbno,
206 int level)
207{
208 if (level <= 0)
209 return false;
210 return xfs_verify_fsbno(cur->bc_mp, fsbno);
211}
212
213
214bool
215xfs_btree_check_sptr(
216 struct xfs_btree_cur *cur,
217 xfs_agblock_t agbno,
218 int level)
219{
220 if (level <= 0)
221 return false;
222 return xfs_verify_agbno(cur->bc_mp, cur->bc_private.a.agno, agbno);
223}
224
225
226
227
228
229static int
230xfs_btree_check_ptr(
231 struct xfs_btree_cur *cur,
232 union xfs_btree_ptr *ptr,
233 int index,
234 int level)
235{
236 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
237 if (xfs_btree_check_lptr(cur, be64_to_cpu((&ptr->l)[index]),
238 level))
239 return 0;
240 xfs_err(cur->bc_mp,
241"Inode %llu fork %d: Corrupt btree %d pointer at level %d index %d.",
242 cur->bc_private.b.ip->i_ino,
243 cur->bc_private.b.whichfork, cur->bc_btnum,
244 level, index);
245 } else {
246 if (xfs_btree_check_sptr(cur, be32_to_cpu((&ptr->s)[index]),
247 level))
248 return 0;
249 xfs_err(cur->bc_mp,
250"AG %u: Corrupt btree %d pointer at level %d index %d.",
251 cur->bc_private.a.agno, cur->bc_btnum,
252 level, index);
253 }
254
255 return -EFSCORRUPTED;
256}
257
258#ifdef DEBUG
259# define xfs_btree_debug_check_ptr xfs_btree_check_ptr
260#else
261# define xfs_btree_debug_check_ptr(...) (0)
262#endif
263
264
265
266
267
268
269
270
271
272void
273xfs_btree_lblock_calc_crc(
274 struct xfs_buf *bp)
275{
276 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
277 struct xfs_buf_log_item *bip = bp->b_log_item;
278
279 if (!xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb))
280 return;
281 if (bip)
282 block->bb_u.l.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
283 xfs_buf_update_cksum(bp, XFS_BTREE_LBLOCK_CRC_OFF);
284}
285
286bool
287xfs_btree_lblock_verify_crc(
288 struct xfs_buf *bp)
289{
290 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
291 struct xfs_mount *mp = bp->b_target->bt_mount;
292
293 if (xfs_sb_version_hascrc(&mp->m_sb)) {
294 if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.l.bb_lsn)))
295 return false;
296 return xfs_buf_verify_cksum(bp, XFS_BTREE_LBLOCK_CRC_OFF);
297 }
298
299 return true;
300}
301
302
303
304
305
306
307
308
309
310void
311xfs_btree_sblock_calc_crc(
312 struct xfs_buf *bp)
313{
314 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
315 struct xfs_buf_log_item *bip = bp->b_log_item;
316
317 if (!xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb))
318 return;
319 if (bip)
320 block->bb_u.s.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
321 xfs_buf_update_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF);
322}
323
324bool
325xfs_btree_sblock_verify_crc(
326 struct xfs_buf *bp)
327{
328 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
329 struct xfs_mount *mp = bp->b_target->bt_mount;
330
331 if (xfs_sb_version_hascrc(&mp->m_sb)) {
332 if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.s.bb_lsn)))
333 return __this_address;
334 return xfs_buf_verify_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF);
335 }
336
337 return true;
338}
339
340static int
341xfs_btree_free_block(
342 struct xfs_btree_cur *cur,
343 struct xfs_buf *bp)
344{
345 int error;
346
347 error = cur->bc_ops->free_block(cur, bp);
348 if (!error) {
349 xfs_trans_binval(cur->bc_tp, bp);
350 XFS_BTREE_STATS_INC(cur, free);
351 }
352 return error;
353}
354
355
356
357
358void
359xfs_btree_del_cursor(
360 xfs_btree_cur_t *cur,
361 int error)
362{
363 int i;
364
365
366
367
368
369
370
371
372
373
374
375 for (i = 0; i < cur->bc_nlevels; i++) {
376 if (cur->bc_bufs[i])
377 xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[i]);
378 else if (!error)
379 break;
380 }
381
382
383
384
385 ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP ||
386 cur->bc_private.b.allocated == 0);
387
388
389
390 kmem_zone_free(xfs_btree_cur_zone, cur);
391}
392
393
394
395
396
397int
398xfs_btree_dup_cursor(
399 xfs_btree_cur_t *cur,
400 xfs_btree_cur_t **ncur)
401{
402 xfs_buf_t *bp;
403 int error;
404 int i;
405 xfs_mount_t *mp;
406 xfs_btree_cur_t *new;
407 xfs_trans_t *tp;
408
409 tp = cur->bc_tp;
410 mp = cur->bc_mp;
411
412
413
414
415 new = cur->bc_ops->dup_cursor(cur);
416
417
418
419
420 new->bc_rec = cur->bc_rec;
421
422
423
424
425 for (i = 0; i < new->bc_nlevels; i++) {
426 new->bc_ptrs[i] = cur->bc_ptrs[i];
427 new->bc_ra[i] = cur->bc_ra[i];
428 bp = cur->bc_bufs[i];
429 if (bp) {
430 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
431 XFS_BUF_ADDR(bp), mp->m_bsize,
432 0, &bp,
433 cur->bc_ops->buf_ops);
434 if (error) {
435 xfs_btree_del_cursor(new, error);
436 *ncur = NULL;
437 return error;
438 }
439 }
440 new->bc_bufs[i] = bp;
441 }
442 *ncur = new;
443 return 0;
444}
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523static inline size_t xfs_btree_block_len(struct xfs_btree_cur *cur)
524{
525 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
526 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS)
527 return XFS_BTREE_LBLOCK_CRC_LEN;
528 return XFS_BTREE_LBLOCK_LEN;
529 }
530 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS)
531 return XFS_BTREE_SBLOCK_CRC_LEN;
532 return XFS_BTREE_SBLOCK_LEN;
533}
534
535
536
537
538static inline size_t xfs_btree_ptr_len(struct xfs_btree_cur *cur)
539{
540 return (cur->bc_flags & XFS_BTREE_LONG_PTRS) ?
541 sizeof(__be64) : sizeof(__be32);
542}
543
544
545
546
547STATIC size_t
548xfs_btree_rec_offset(
549 struct xfs_btree_cur *cur,
550 int n)
551{
552 return xfs_btree_block_len(cur) +
553 (n - 1) * cur->bc_ops->rec_len;
554}
555
556
557
558
559STATIC size_t
560xfs_btree_key_offset(
561 struct xfs_btree_cur *cur,
562 int n)
563{
564 return xfs_btree_block_len(cur) +
565 (n - 1) * cur->bc_ops->key_len;
566}
567
568
569
570
571STATIC size_t
572xfs_btree_high_key_offset(
573 struct xfs_btree_cur *cur,
574 int n)
575{
576 return xfs_btree_block_len(cur) +
577 (n - 1) * cur->bc_ops->key_len + (cur->bc_ops->key_len / 2);
578}
579
580
581
582
583STATIC size_t
584xfs_btree_ptr_offset(
585 struct xfs_btree_cur *cur,
586 int n,
587 int level)
588{
589 return xfs_btree_block_len(cur) +
590 cur->bc_ops->get_maxrecs(cur, level) * cur->bc_ops->key_len +
591 (n - 1) * xfs_btree_ptr_len(cur);
592}
593
594
595
596
597union xfs_btree_rec *
598xfs_btree_rec_addr(
599 struct xfs_btree_cur *cur,
600 int n,
601 struct xfs_btree_block *block)
602{
603 return (union xfs_btree_rec *)
604 ((char *)block + xfs_btree_rec_offset(cur, n));
605}
606
607
608
609
610union xfs_btree_key *
611xfs_btree_key_addr(
612 struct xfs_btree_cur *cur,
613 int n,
614 struct xfs_btree_block *block)
615{
616 return (union xfs_btree_key *)
617 ((char *)block + xfs_btree_key_offset(cur, n));
618}
619
620
621
622
623union xfs_btree_key *
624xfs_btree_high_key_addr(
625 struct xfs_btree_cur *cur,
626 int n,
627 struct xfs_btree_block *block)
628{
629 return (union xfs_btree_key *)
630 ((char *)block + xfs_btree_high_key_offset(cur, n));
631}
632
633
634
635
636union xfs_btree_ptr *
637xfs_btree_ptr_addr(
638 struct xfs_btree_cur *cur,
639 int n,
640 struct xfs_btree_block *block)
641{
642 int level = xfs_btree_get_level(block);
643
644 ASSERT(block->bb_level != 0);
645
646 return (union xfs_btree_ptr *)
647 ((char *)block + xfs_btree_ptr_offset(cur, n, level));
648}
649
650
651
652
653
654
655
656STATIC struct xfs_btree_block *
657xfs_btree_get_iroot(
658 struct xfs_btree_cur *cur)
659{
660 struct xfs_ifork *ifp;
661
662 ifp = XFS_IFORK_PTR(cur->bc_private.b.ip, cur->bc_private.b.whichfork);
663 return (struct xfs_btree_block *)ifp->if_broot;
664}
665
666
667
668
669
670struct xfs_btree_block *
671xfs_btree_get_block(
672 struct xfs_btree_cur *cur,
673 int level,
674 struct xfs_buf **bpp)
675{
676 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
677 (level == cur->bc_nlevels - 1)) {
678 *bpp = NULL;
679 return xfs_btree_get_iroot(cur);
680 }
681
682 *bpp = cur->bc_bufs[level];
683 return XFS_BUF_TO_BLOCK(*bpp);
684}
685
686
687
688
689
690xfs_buf_t *
691xfs_btree_get_bufl(
692 xfs_mount_t *mp,
693 xfs_trans_t *tp,
694 xfs_fsblock_t fsbno,
695 uint lock)
696{
697 xfs_daddr_t d;
698
699 ASSERT(fsbno != NULLFSBLOCK);
700 d = XFS_FSB_TO_DADDR(mp, fsbno);
701 return xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock);
702}
703
704
705
706
707
708xfs_buf_t *
709xfs_btree_get_bufs(
710 xfs_mount_t *mp,
711 xfs_trans_t *tp,
712 xfs_agnumber_t agno,
713 xfs_agblock_t agbno,
714 uint lock)
715{
716 xfs_daddr_t d;
717
718 ASSERT(agno != NULLAGNUMBER);
719 ASSERT(agbno != NULLAGBLOCK);
720 d = XFS_AGB_TO_DADDR(mp, agno, agbno);
721 return xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock);
722}
723
724
725
726
727int
728xfs_btree_islastblock(
729 xfs_btree_cur_t *cur,
730 int level)
731{
732 struct xfs_btree_block *block;
733 xfs_buf_t *bp;
734
735 block = xfs_btree_get_block(cur, level, &bp);
736 xfs_btree_check_block(cur, block, level, bp);
737 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
738 return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK);
739 else
740 return block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK);
741}
742
743
744
745
746
747STATIC int
748xfs_btree_firstrec(
749 xfs_btree_cur_t *cur,
750 int level)
751{
752 struct xfs_btree_block *block;
753 xfs_buf_t *bp;
754
755
756
757
758 block = xfs_btree_get_block(cur, level, &bp);
759 if (xfs_btree_check_block(cur, block, level, bp))
760 return 0;
761
762
763
764 if (!block->bb_numrecs)
765 return 0;
766
767
768
769 cur->bc_ptrs[level] = 1;
770 return 1;
771}
772
773
774
775
776
777STATIC int
778xfs_btree_lastrec(
779 xfs_btree_cur_t *cur,
780 int level)
781{
782 struct xfs_btree_block *block;
783 xfs_buf_t *bp;
784
785
786
787
788 block = xfs_btree_get_block(cur, level, &bp);
789 if (xfs_btree_check_block(cur, block, level, bp))
790 return 0;
791
792
793
794 if (!block->bb_numrecs)
795 return 0;
796
797
798
799 cur->bc_ptrs[level] = be16_to_cpu(block->bb_numrecs);
800 return 1;
801}
802
803
804
805
806
807void
808xfs_btree_offsets(
809 int64_t fields,
810 const short *offsets,
811 int nbits,
812 int *first,
813 int *last)
814{
815 int i;
816 int64_t imask;
817
818 ASSERT(fields != 0);
819
820
821
822 for (i = 0, imask = 1LL; ; i++, imask <<= 1) {
823 if (imask & fields) {
824 *first = offsets[i];
825 break;
826 }
827 }
828
829
830
831 for (i = nbits - 1, imask = 1LL << i; ; i--, imask >>= 1) {
832 if (imask & fields) {
833 *last = offsets[i + 1] - 1;
834 break;
835 }
836 }
837}
838
839
840
841
842
843int
844xfs_btree_read_bufl(
845 struct xfs_mount *mp,
846 struct xfs_trans *tp,
847 xfs_fsblock_t fsbno,
848 uint lock,
849 struct xfs_buf **bpp,
850 int refval,
851 const struct xfs_buf_ops *ops)
852{
853 struct xfs_buf *bp;
854 xfs_daddr_t d;
855 int error;
856
857 if (!xfs_verify_fsbno(mp, fsbno))
858 return -EFSCORRUPTED;
859 d = XFS_FSB_TO_DADDR(mp, fsbno);
860 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d,
861 mp->m_bsize, lock, &bp, ops);
862 if (error)
863 return error;
864 if (bp)
865 xfs_buf_set_ref(bp, refval);
866 *bpp = bp;
867 return 0;
868}
869
870
871
872
873
874
875void
876xfs_btree_reada_bufl(
877 struct xfs_mount *mp,
878 xfs_fsblock_t fsbno,
879 xfs_extlen_t count,
880 const struct xfs_buf_ops *ops)
881{
882 xfs_daddr_t d;
883
884 ASSERT(fsbno != NULLFSBLOCK);
885 d = XFS_FSB_TO_DADDR(mp, fsbno);
886 xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count, ops);
887}
888
889
890
891
892
893
894void
895xfs_btree_reada_bufs(
896 struct xfs_mount *mp,
897 xfs_agnumber_t agno,
898 xfs_agblock_t agbno,
899 xfs_extlen_t count,
900 const struct xfs_buf_ops *ops)
901{
902 xfs_daddr_t d;
903
904 ASSERT(agno != NULLAGNUMBER);
905 ASSERT(agbno != NULLAGBLOCK);
906 d = XFS_AGB_TO_DADDR(mp, agno, agbno);
907 xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count, ops);
908}
909
910STATIC int
911xfs_btree_readahead_lblock(
912 struct xfs_btree_cur *cur,
913 int lr,
914 struct xfs_btree_block *block)
915{
916 int rval = 0;
917 xfs_fsblock_t left = be64_to_cpu(block->bb_u.l.bb_leftsib);
918 xfs_fsblock_t right = be64_to_cpu(block->bb_u.l.bb_rightsib);
919
920 if ((lr & XFS_BTCUR_LEFTRA) && left != NULLFSBLOCK) {
921 xfs_btree_reada_bufl(cur->bc_mp, left, 1,
922 cur->bc_ops->buf_ops);
923 rval++;
924 }
925
926 if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLFSBLOCK) {
927 xfs_btree_reada_bufl(cur->bc_mp, right, 1,
928 cur->bc_ops->buf_ops);
929 rval++;
930 }
931
932 return rval;
933}
934
935STATIC int
936xfs_btree_readahead_sblock(
937 struct xfs_btree_cur *cur,
938 int lr,
939 struct xfs_btree_block *block)
940{
941 int rval = 0;
942 xfs_agblock_t left = be32_to_cpu(block->bb_u.s.bb_leftsib);
943 xfs_agblock_t right = be32_to_cpu(block->bb_u.s.bb_rightsib);
944
945
946 if ((lr & XFS_BTCUR_LEFTRA) && left != NULLAGBLOCK) {
947 xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
948 left, 1, cur->bc_ops->buf_ops);
949 rval++;
950 }
951
952 if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLAGBLOCK) {
953 xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
954 right, 1, cur->bc_ops->buf_ops);
955 rval++;
956 }
957
958 return rval;
959}
960
961
962
963
964
965STATIC int
966xfs_btree_readahead(
967 struct xfs_btree_cur *cur,
968 int lev,
969 int lr)
970{
971 struct xfs_btree_block *block;
972
973
974
975
976
977 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
978 (lev == cur->bc_nlevels - 1))
979 return 0;
980
981 if ((cur->bc_ra[lev] | lr) == cur->bc_ra[lev])
982 return 0;
983
984 cur->bc_ra[lev] |= lr;
985 block = XFS_BUF_TO_BLOCK(cur->bc_bufs[lev]);
986
987 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
988 return xfs_btree_readahead_lblock(cur, lr, block);
989 return xfs_btree_readahead_sblock(cur, lr, block);
990}
991
992STATIC int
993xfs_btree_ptr_to_daddr(
994 struct xfs_btree_cur *cur,
995 union xfs_btree_ptr *ptr,
996 xfs_daddr_t *daddr)
997{
998 xfs_fsblock_t fsbno;
999 xfs_agblock_t agbno;
1000 int error;
1001
1002 error = xfs_btree_check_ptr(cur, ptr, 0, 1);
1003 if (error)
1004 return error;
1005
1006 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
1007 fsbno = be64_to_cpu(ptr->l);
1008 *daddr = XFS_FSB_TO_DADDR(cur->bc_mp, fsbno);
1009 } else {
1010 agbno = be32_to_cpu(ptr->s);
1011 *daddr = XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_private.a.agno,
1012 agbno);
1013 }
1014
1015 return 0;
1016}
1017
1018
1019
1020
1021
1022
1023
1024STATIC void
1025xfs_btree_readahead_ptr(
1026 struct xfs_btree_cur *cur,
1027 union xfs_btree_ptr *ptr,
1028 xfs_extlen_t count)
1029{
1030 xfs_daddr_t daddr;
1031
1032 if (xfs_btree_ptr_to_daddr(cur, ptr, &daddr))
1033 return;
1034 xfs_buf_readahead(cur->bc_mp->m_ddev_targp, daddr,
1035 cur->bc_mp->m_bsize * count, cur->bc_ops->buf_ops);
1036}
1037
1038
1039
1040
1041
1042STATIC void
1043xfs_btree_setbuf(
1044 xfs_btree_cur_t *cur,
1045 int lev,
1046 xfs_buf_t *bp)
1047{
1048 struct xfs_btree_block *b;
1049
1050 if (cur->bc_bufs[lev])
1051 xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[lev]);
1052 cur->bc_bufs[lev] = bp;
1053 cur->bc_ra[lev] = 0;
1054
1055 b = XFS_BUF_TO_BLOCK(bp);
1056 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
1057 if (b->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK))
1058 cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA;
1059 if (b->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK))
1060 cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA;
1061 } else {
1062 if (b->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK))
1063 cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA;
1064 if (b->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK))
1065 cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA;
1066 }
1067}
1068
1069bool
1070xfs_btree_ptr_is_null(
1071 struct xfs_btree_cur *cur,
1072 union xfs_btree_ptr *ptr)
1073{
1074 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
1075 return ptr->l == cpu_to_be64(NULLFSBLOCK);
1076 else
1077 return ptr->s == cpu_to_be32(NULLAGBLOCK);
1078}
1079
1080STATIC void
1081xfs_btree_set_ptr_null(
1082 struct xfs_btree_cur *cur,
1083 union xfs_btree_ptr *ptr)
1084{
1085 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
1086 ptr->l = cpu_to_be64(NULLFSBLOCK);
1087 else
1088 ptr->s = cpu_to_be32(NULLAGBLOCK);
1089}
1090
1091
1092
1093
1094void
1095xfs_btree_get_sibling(
1096 struct xfs_btree_cur *cur,
1097 struct xfs_btree_block *block,
1098 union xfs_btree_ptr *ptr,
1099 int lr)
1100{
1101 ASSERT(lr == XFS_BB_LEFTSIB || lr == XFS_BB_RIGHTSIB);
1102
1103 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
1104 if (lr == XFS_BB_RIGHTSIB)
1105 ptr->l = block->bb_u.l.bb_rightsib;
1106 else
1107 ptr->l = block->bb_u.l.bb_leftsib;
1108 } else {
1109 if (lr == XFS_BB_RIGHTSIB)
1110 ptr->s = block->bb_u.s.bb_rightsib;
1111 else
1112 ptr->s = block->bb_u.s.bb_leftsib;
1113 }
1114}
1115
1116STATIC void
1117xfs_btree_set_sibling(
1118 struct xfs_btree_cur *cur,
1119 struct xfs_btree_block *block,
1120 union xfs_btree_ptr *ptr,
1121 int lr)
1122{
1123 ASSERT(lr == XFS_BB_LEFTSIB || lr == XFS_BB_RIGHTSIB);
1124
1125 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
1126 if (lr == XFS_BB_RIGHTSIB)
1127 block->bb_u.l.bb_rightsib = ptr->l;
1128 else
1129 block->bb_u.l.bb_leftsib = ptr->l;
1130 } else {
1131 if (lr == XFS_BB_RIGHTSIB)
1132 block->bb_u.s.bb_rightsib = ptr->s;
1133 else
1134 block->bb_u.s.bb_leftsib = ptr->s;
1135 }
1136}
1137
1138void
1139xfs_btree_init_block_int(
1140 struct xfs_mount *mp,
1141 struct xfs_btree_block *buf,
1142 xfs_daddr_t blkno,
1143 xfs_btnum_t btnum,
1144 __u16 level,
1145 __u16 numrecs,
1146 __u64 owner,
1147 unsigned int flags)
1148{
1149 int crc = xfs_sb_version_hascrc(&mp->m_sb);
1150 __u32 magic = xfs_btree_magic(crc, btnum);
1151
1152 buf->bb_magic = cpu_to_be32(magic);
1153 buf->bb_level = cpu_to_be16(level);
1154 buf->bb_numrecs = cpu_to_be16(numrecs);
1155
1156 if (flags & XFS_BTREE_LONG_PTRS) {
1157 buf->bb_u.l.bb_leftsib = cpu_to_be64(NULLFSBLOCK);
1158 buf->bb_u.l.bb_rightsib = cpu_to_be64(NULLFSBLOCK);
1159 if (crc) {
1160 buf->bb_u.l.bb_blkno = cpu_to_be64(blkno);
1161 buf->bb_u.l.bb_owner = cpu_to_be64(owner);
1162 uuid_copy(&buf->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid);
1163 buf->bb_u.l.bb_pad = 0;
1164 buf->bb_u.l.bb_lsn = 0;
1165 }
1166 } else {
1167
1168 __u32 __owner = (__u32)owner;
1169
1170 buf->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK);
1171 buf->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
1172 if (crc) {
1173 buf->bb_u.s.bb_blkno = cpu_to_be64(blkno);
1174 buf->bb_u.s.bb_owner = cpu_to_be32(__owner);
1175 uuid_copy(&buf->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid);
1176 buf->bb_u.s.bb_lsn = 0;
1177 }
1178 }
1179}
1180
1181void
1182xfs_btree_init_block(
1183 struct xfs_mount *mp,
1184 struct xfs_buf *bp,
1185 xfs_btnum_t btnum,
1186 __u16 level,
1187 __u16 numrecs,
1188 __u64 owner,
1189 unsigned int flags)
1190{
1191 xfs_btree_init_block_int(mp, XFS_BUF_TO_BLOCK(bp), bp->b_bn,
1192 btnum, level, numrecs, owner, flags);
1193}
1194
1195STATIC void
1196xfs_btree_init_block_cur(
1197 struct xfs_btree_cur *cur,
1198 struct xfs_buf *bp,
1199 int level,
1200 int numrecs)
1201{
1202 __u64 owner;
1203
1204
1205
1206
1207
1208
1209
1210 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
1211 owner = cur->bc_private.b.ip->i_ino;
1212 else
1213 owner = cur->bc_private.a.agno;
1214
1215 xfs_btree_init_block_int(cur->bc_mp, XFS_BUF_TO_BLOCK(bp), bp->b_bn,
1216 cur->bc_btnum, level, numrecs,
1217 owner, cur->bc_flags);
1218}
1219
1220
1221
1222
1223
1224
1225STATIC int
1226xfs_btree_is_lastrec(
1227 struct xfs_btree_cur *cur,
1228 struct xfs_btree_block *block,
1229 int level)
1230{
1231 union xfs_btree_ptr ptr;
1232
1233 if (level > 0)
1234 return 0;
1235 if (!(cur->bc_flags & XFS_BTREE_LASTREC_UPDATE))
1236 return 0;
1237
1238 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
1239 if (!xfs_btree_ptr_is_null(cur, &ptr))
1240 return 0;
1241 return 1;
1242}
1243
1244STATIC void
1245xfs_btree_buf_to_ptr(
1246 struct xfs_btree_cur *cur,
1247 struct xfs_buf *bp,
1248 union xfs_btree_ptr *ptr)
1249{
1250 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
1251 ptr->l = cpu_to_be64(XFS_DADDR_TO_FSB(cur->bc_mp,
1252 XFS_BUF_ADDR(bp)));
1253 else {
1254 ptr->s = cpu_to_be32(xfs_daddr_to_agbno(cur->bc_mp,
1255 XFS_BUF_ADDR(bp)));
1256 }
1257}
1258
1259STATIC void
1260xfs_btree_set_refs(
1261 struct xfs_btree_cur *cur,
1262 struct xfs_buf *bp)
1263{
1264 switch (cur->bc_btnum) {
1265 case XFS_BTNUM_BNO:
1266 case XFS_BTNUM_CNT:
1267 xfs_buf_set_ref(bp, XFS_ALLOC_BTREE_REF);
1268 break;
1269 case XFS_BTNUM_INO:
1270 case XFS_BTNUM_FINO:
1271 xfs_buf_set_ref(bp, XFS_INO_BTREE_REF);
1272 break;
1273 case XFS_BTNUM_BMAP:
1274 xfs_buf_set_ref(bp, XFS_BMAP_BTREE_REF);
1275 break;
1276 case XFS_BTNUM_RMAP:
1277 xfs_buf_set_ref(bp, XFS_RMAP_BTREE_REF);
1278 break;
1279 case XFS_BTNUM_REFC:
1280 xfs_buf_set_ref(bp, XFS_REFC_BTREE_REF);
1281 break;
1282 default:
1283 ASSERT(0);
1284 }
1285}
1286
1287STATIC int
1288xfs_btree_get_buf_block(
1289 struct xfs_btree_cur *cur,
1290 union xfs_btree_ptr *ptr,
1291 int flags,
1292 struct xfs_btree_block **block,
1293 struct xfs_buf **bpp)
1294{
1295 struct xfs_mount *mp = cur->bc_mp;
1296 xfs_daddr_t d;
1297 int error;
1298
1299
1300 ASSERT(!(flags & XBF_TRYLOCK));
1301
1302 error = xfs_btree_ptr_to_daddr(cur, ptr, &d);
1303 if (error)
1304 return error;
1305 *bpp = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d,
1306 mp->m_bsize, flags);
1307
1308 if (!*bpp)
1309 return -ENOMEM;
1310
1311 (*bpp)->b_ops = cur->bc_ops->buf_ops;
1312 *block = XFS_BUF_TO_BLOCK(*bpp);
1313 return 0;
1314}
1315
1316
1317
1318
1319
1320STATIC int
1321xfs_btree_read_buf_block(
1322 struct xfs_btree_cur *cur,
1323 union xfs_btree_ptr *ptr,
1324 int flags,
1325 struct xfs_btree_block **block,
1326 struct xfs_buf **bpp)
1327{
1328 struct xfs_mount *mp = cur->bc_mp;
1329 xfs_daddr_t d;
1330 int error;
1331
1332
1333 ASSERT(!(flags & XBF_TRYLOCK));
1334
1335 error = xfs_btree_ptr_to_daddr(cur, ptr, &d);
1336 if (error)
1337 return error;
1338 error = xfs_trans_read_buf(mp, cur->bc_tp, mp->m_ddev_targp, d,
1339 mp->m_bsize, flags, bpp,
1340 cur->bc_ops->buf_ops);
1341 if (error)
1342 return error;
1343
1344 xfs_btree_set_refs(cur, *bpp);
1345 *block = XFS_BUF_TO_BLOCK(*bpp);
1346 return 0;
1347}
1348
1349
1350
1351
1352STATIC void
1353xfs_btree_copy_keys(
1354 struct xfs_btree_cur *cur,
1355 union xfs_btree_key *dst_key,
1356 union xfs_btree_key *src_key,
1357 int numkeys)
1358{
1359 ASSERT(numkeys >= 0);
1360 memcpy(dst_key, src_key, numkeys * cur->bc_ops->key_len);
1361}
1362
1363
1364
1365
1366STATIC void
1367xfs_btree_copy_recs(
1368 struct xfs_btree_cur *cur,
1369 union xfs_btree_rec *dst_rec,
1370 union xfs_btree_rec *src_rec,
1371 int numrecs)
1372{
1373 ASSERT(numrecs >= 0);
1374 memcpy(dst_rec, src_rec, numrecs * cur->bc_ops->rec_len);
1375}
1376
1377
1378
1379
1380STATIC void
1381xfs_btree_copy_ptrs(
1382 struct xfs_btree_cur *cur,
1383 union xfs_btree_ptr *dst_ptr,
1384 union xfs_btree_ptr *src_ptr,
1385 int numptrs)
1386{
1387 ASSERT(numptrs >= 0);
1388 memcpy(dst_ptr, src_ptr, numptrs * xfs_btree_ptr_len(cur));
1389}
1390
1391
1392
1393
1394STATIC void
1395xfs_btree_shift_keys(
1396 struct xfs_btree_cur *cur,
1397 union xfs_btree_key *key,
1398 int dir,
1399 int numkeys)
1400{
1401 char *dst_key;
1402
1403 ASSERT(numkeys >= 0);
1404 ASSERT(dir == 1 || dir == -1);
1405
1406 dst_key = (char *)key + (dir * cur->bc_ops->key_len);
1407 memmove(dst_key, key, numkeys * cur->bc_ops->key_len);
1408}
1409
1410
1411
1412
1413STATIC void
1414xfs_btree_shift_recs(
1415 struct xfs_btree_cur *cur,
1416 union xfs_btree_rec *rec,
1417 int dir,
1418 int numrecs)
1419{
1420 char *dst_rec;
1421
1422 ASSERT(numrecs >= 0);
1423 ASSERT(dir == 1 || dir == -1);
1424
1425 dst_rec = (char *)rec + (dir * cur->bc_ops->rec_len);
1426 memmove(dst_rec, rec, numrecs * cur->bc_ops->rec_len);
1427}
1428
1429
1430
1431
1432STATIC void
1433xfs_btree_shift_ptrs(
1434 struct xfs_btree_cur *cur,
1435 union xfs_btree_ptr *ptr,
1436 int dir,
1437 int numptrs)
1438{
1439 char *dst_ptr;
1440
1441 ASSERT(numptrs >= 0);
1442 ASSERT(dir == 1 || dir == -1);
1443
1444 dst_ptr = (char *)ptr + (dir * xfs_btree_ptr_len(cur));
1445 memmove(dst_ptr, ptr, numptrs * xfs_btree_ptr_len(cur));
1446}
1447
1448
1449
1450
1451STATIC void
1452xfs_btree_log_keys(
1453 struct xfs_btree_cur *cur,
1454 struct xfs_buf *bp,
1455 int first,
1456 int last)
1457{
1458
1459 if (bp) {
1460 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
1461 xfs_trans_log_buf(cur->bc_tp, bp,
1462 xfs_btree_key_offset(cur, first),
1463 xfs_btree_key_offset(cur, last + 1) - 1);
1464 } else {
1465 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
1466 xfs_ilog_fbroot(cur->bc_private.b.whichfork));
1467 }
1468}
1469
1470
1471
1472
1473void
1474xfs_btree_log_recs(
1475 struct xfs_btree_cur *cur,
1476 struct xfs_buf *bp,
1477 int first,
1478 int last)
1479{
1480
1481 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
1482 xfs_trans_log_buf(cur->bc_tp, bp,
1483 xfs_btree_rec_offset(cur, first),
1484 xfs_btree_rec_offset(cur, last + 1) - 1);
1485
1486}
1487
1488
1489
1490
1491STATIC void
1492xfs_btree_log_ptrs(
1493 struct xfs_btree_cur *cur,
1494 struct xfs_buf *bp,
1495 int first,
1496 int last)
1497{
1498
1499 if (bp) {
1500 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
1501 int level = xfs_btree_get_level(block);
1502
1503 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
1504 xfs_trans_log_buf(cur->bc_tp, bp,
1505 xfs_btree_ptr_offset(cur, first, level),
1506 xfs_btree_ptr_offset(cur, last + 1, level) - 1);
1507 } else {
1508 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
1509 xfs_ilog_fbroot(cur->bc_private.b.whichfork));
1510 }
1511
1512}
1513
1514
1515
1516
1517void
1518xfs_btree_log_block(
1519 struct xfs_btree_cur *cur,
1520 struct xfs_buf *bp,
1521 int fields)
1522{
1523 int first;
1524 int last;
1525 static const short soffsets[] = {
1526 offsetof(struct xfs_btree_block, bb_magic),
1527 offsetof(struct xfs_btree_block, bb_level),
1528 offsetof(struct xfs_btree_block, bb_numrecs),
1529 offsetof(struct xfs_btree_block, bb_u.s.bb_leftsib),
1530 offsetof(struct xfs_btree_block, bb_u.s.bb_rightsib),
1531 offsetof(struct xfs_btree_block, bb_u.s.bb_blkno),
1532 offsetof(struct xfs_btree_block, bb_u.s.bb_lsn),
1533 offsetof(struct xfs_btree_block, bb_u.s.bb_uuid),
1534 offsetof(struct xfs_btree_block, bb_u.s.bb_owner),
1535 offsetof(struct xfs_btree_block, bb_u.s.bb_crc),
1536 XFS_BTREE_SBLOCK_CRC_LEN
1537 };
1538 static const short loffsets[] = {
1539 offsetof(struct xfs_btree_block, bb_magic),
1540 offsetof(struct xfs_btree_block, bb_level),
1541 offsetof(struct xfs_btree_block, bb_numrecs),
1542 offsetof(struct xfs_btree_block, bb_u.l.bb_leftsib),
1543 offsetof(struct xfs_btree_block, bb_u.l.bb_rightsib),
1544 offsetof(struct xfs_btree_block, bb_u.l.bb_blkno),
1545 offsetof(struct xfs_btree_block, bb_u.l.bb_lsn),
1546 offsetof(struct xfs_btree_block, bb_u.l.bb_uuid),
1547 offsetof(struct xfs_btree_block, bb_u.l.bb_owner),
1548 offsetof(struct xfs_btree_block, bb_u.l.bb_crc),
1549 offsetof(struct xfs_btree_block, bb_u.l.bb_pad),
1550 XFS_BTREE_LBLOCK_CRC_LEN
1551 };
1552
1553 if (bp) {
1554 int nbits;
1555
1556 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) {
1557
1558
1559
1560
1561
1562
1563
1564 if (fields == XFS_BB_ALL_BITS)
1565 fields = XFS_BB_ALL_BITS_CRC;
1566 nbits = XFS_BB_NUM_BITS_CRC;
1567 } else {
1568 nbits = XFS_BB_NUM_BITS;
1569 }
1570 xfs_btree_offsets(fields,
1571 (cur->bc_flags & XFS_BTREE_LONG_PTRS) ?
1572 loffsets : soffsets,
1573 nbits, &first, &last);
1574 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
1575 xfs_trans_log_buf(cur->bc_tp, bp, first, last);
1576 } else {
1577 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
1578 xfs_ilog_fbroot(cur->bc_private.b.whichfork));
1579 }
1580}
1581
1582
1583
1584
1585
1586int
1587xfs_btree_increment(
1588 struct xfs_btree_cur *cur,
1589 int level,
1590 int *stat)
1591{
1592 struct xfs_btree_block *block;
1593 union xfs_btree_ptr ptr;
1594 struct xfs_buf *bp;
1595 int error;
1596 int lev;
1597
1598 ASSERT(level < cur->bc_nlevels);
1599
1600
1601 xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
1602
1603
1604 block = xfs_btree_get_block(cur, level, &bp);
1605
1606#ifdef DEBUG
1607 error = xfs_btree_check_block(cur, block, level, bp);
1608 if (error)
1609 goto error0;
1610#endif
1611
1612
1613 if (++cur->bc_ptrs[level] <= xfs_btree_get_numrecs(block))
1614 goto out1;
1615
1616
1617 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
1618 if (xfs_btree_ptr_is_null(cur, &ptr))
1619 goto out0;
1620
1621 XFS_BTREE_STATS_INC(cur, increment);
1622
1623
1624
1625
1626
1627 for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
1628 block = xfs_btree_get_block(cur, lev, &bp);
1629
1630#ifdef DEBUG
1631 error = xfs_btree_check_block(cur, block, lev, bp);
1632 if (error)
1633 goto error0;
1634#endif
1635
1636 if (++cur->bc_ptrs[lev] <= xfs_btree_get_numrecs(block))
1637 break;
1638
1639
1640 xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA);
1641 }
1642
1643
1644
1645
1646
1647 if (lev == cur->bc_nlevels) {
1648 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
1649 goto out0;
1650 ASSERT(0);
1651 error = -EFSCORRUPTED;
1652 goto error0;
1653 }
1654 ASSERT(lev < cur->bc_nlevels);
1655
1656
1657
1658
1659
1660 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) {
1661 union xfs_btree_ptr *ptrp;
1662
1663 ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block);
1664 --lev;
1665 error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp);
1666 if (error)
1667 goto error0;
1668
1669 xfs_btree_setbuf(cur, lev, bp);
1670 cur->bc_ptrs[lev] = 1;
1671 }
1672out1:
1673 *stat = 1;
1674 return 0;
1675
1676out0:
1677 *stat = 0;
1678 return 0;
1679
1680error0:
1681 return error;
1682}
1683
1684
1685
1686
1687
1688int
1689xfs_btree_decrement(
1690 struct xfs_btree_cur *cur,
1691 int level,
1692 int *stat)
1693{
1694 struct xfs_btree_block *block;
1695 xfs_buf_t *bp;
1696 int error;
1697 int lev;
1698 union xfs_btree_ptr ptr;
1699
1700 ASSERT(level < cur->bc_nlevels);
1701
1702
1703 xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA);
1704
1705
1706 if (--cur->bc_ptrs[level] > 0)
1707 goto out1;
1708
1709
1710 block = xfs_btree_get_block(cur, level, &bp);
1711
1712#ifdef DEBUG
1713 error = xfs_btree_check_block(cur, block, level, bp);
1714 if (error)
1715 goto error0;
1716#endif
1717
1718
1719 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB);
1720 if (xfs_btree_ptr_is_null(cur, &ptr))
1721 goto out0;
1722
1723 XFS_BTREE_STATS_INC(cur, decrement);
1724
1725
1726
1727
1728
1729 for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
1730 if (--cur->bc_ptrs[lev] > 0)
1731 break;
1732
1733 xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA);
1734 }
1735
1736
1737
1738
1739
1740 if (lev == cur->bc_nlevels) {
1741 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
1742 goto out0;
1743 ASSERT(0);
1744 error = -EFSCORRUPTED;
1745 goto error0;
1746 }
1747 ASSERT(lev < cur->bc_nlevels);
1748
1749
1750
1751
1752
1753 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) {
1754 union xfs_btree_ptr *ptrp;
1755
1756 ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block);
1757 --lev;
1758 error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp);
1759 if (error)
1760 goto error0;
1761 xfs_btree_setbuf(cur, lev, bp);
1762 cur->bc_ptrs[lev] = xfs_btree_get_numrecs(block);
1763 }
1764out1:
1765 *stat = 1;
1766 return 0;
1767
1768out0:
1769 *stat = 0;
1770 return 0;
1771
1772error0:
1773 return error;
1774}
1775
1776int
1777xfs_btree_lookup_get_block(
1778 struct xfs_btree_cur *cur,
1779 int level,
1780 union xfs_btree_ptr *pp,
1781 struct xfs_btree_block **blkp)
1782{
1783 struct xfs_buf *bp;
1784 xfs_daddr_t daddr;
1785 int error = 0;
1786
1787
1788 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
1789 (level == cur->bc_nlevels - 1)) {
1790 *blkp = xfs_btree_get_iroot(cur);
1791 return 0;
1792 }
1793
1794
1795
1796
1797
1798
1799
1800 bp = cur->bc_bufs[level];
1801 error = xfs_btree_ptr_to_daddr(cur, pp, &daddr);
1802 if (error)
1803 return error;
1804 if (bp && XFS_BUF_ADDR(bp) == daddr) {
1805 *blkp = XFS_BUF_TO_BLOCK(bp);
1806 return 0;
1807 }
1808
1809 error = xfs_btree_read_buf_block(cur, pp, 0, blkp, &bp);
1810 if (error)
1811 return error;
1812
1813
1814 if (xfs_sb_version_hascrc(&cur->bc_mp->m_sb) &&
1815 !(cur->bc_private.b.flags & XFS_BTCUR_BPRV_INVALID_OWNER) &&
1816 (cur->bc_flags & XFS_BTREE_LONG_PTRS) &&
1817 be64_to_cpu((*blkp)->bb_u.l.bb_owner) !=
1818 cur->bc_private.b.ip->i_ino)
1819 goto out_bad;
1820
1821
1822 if (be16_to_cpu((*blkp)->bb_level) != level)
1823 goto out_bad;
1824
1825
1826 if (level != 0 && be16_to_cpu((*blkp)->bb_numrecs) == 0)
1827 goto out_bad;
1828
1829 xfs_btree_setbuf(cur, level, bp);
1830 return 0;
1831
1832out_bad:
1833 *blkp = NULL;
1834 xfs_trans_brelse(cur->bc_tp, bp);
1835 return -EFSCORRUPTED;
1836}
1837
1838
1839
1840
1841
1842
1843STATIC union xfs_btree_key *
1844xfs_lookup_get_search_key(
1845 struct xfs_btree_cur *cur,
1846 int level,
1847 int keyno,
1848 struct xfs_btree_block *block,
1849 union xfs_btree_key *kp)
1850{
1851 if (level == 0) {
1852 cur->bc_ops->init_key_from_rec(kp,
1853 xfs_btree_rec_addr(cur, keyno, block));
1854 return kp;
1855 }
1856
1857 return xfs_btree_key_addr(cur, keyno, block);
1858}
1859
1860
1861
1862
1863
1864int
1865xfs_btree_lookup(
1866 struct xfs_btree_cur *cur,
1867 xfs_lookup_t dir,
1868 int *stat)
1869{
1870 struct xfs_btree_block *block;
1871 int64_t diff;
1872 int error;
1873 int keyno;
1874 int level;
1875 union xfs_btree_ptr *pp;
1876 union xfs_btree_ptr ptr;
1877
1878 XFS_BTREE_STATS_INC(cur, lookup);
1879
1880
1881 if (cur->bc_nlevels == 0)
1882 return -EFSCORRUPTED;
1883
1884 block = NULL;
1885 keyno = 0;
1886
1887
1888 cur->bc_ops->init_ptr_from_cur(cur, &ptr);
1889 pp = &ptr;
1890
1891
1892
1893
1894
1895
1896
1897 for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) {
1898
1899 error = xfs_btree_lookup_get_block(cur, level, pp, &block);
1900 if (error)
1901 goto error0;
1902
1903 if (diff == 0) {
1904
1905
1906
1907
1908 keyno = 1;
1909 } else {
1910
1911
1912 int high;
1913 int low;
1914
1915
1916 low = 1;
1917 high = xfs_btree_get_numrecs(block);
1918 if (!high) {
1919
1920 if (level != 0 || cur->bc_nlevels != 1) {
1921 XFS_CORRUPTION_ERROR(__func__,
1922 XFS_ERRLEVEL_LOW,
1923 cur->bc_mp, block,
1924 sizeof(*block));
1925 return -EFSCORRUPTED;
1926 }
1927
1928 cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE;
1929 *stat = 0;
1930 return 0;
1931 }
1932
1933
1934 while (low <= high) {
1935 union xfs_btree_key key;
1936 union xfs_btree_key *kp;
1937
1938 XFS_BTREE_STATS_INC(cur, compare);
1939
1940
1941 keyno = (low + high) >> 1;
1942
1943
1944 kp = xfs_lookup_get_search_key(cur, level,
1945 keyno, block, &key);
1946
1947
1948
1949
1950
1951
1952
1953 diff = cur->bc_ops->key_diff(cur, kp);
1954 if (diff < 0)
1955 low = keyno + 1;
1956 else if (diff > 0)
1957 high = keyno - 1;
1958 else
1959 break;
1960 }
1961 }
1962
1963
1964
1965
1966
1967 if (level > 0) {
1968
1969
1970
1971
1972 if (diff > 0 && --keyno < 1)
1973 keyno = 1;
1974 pp = xfs_btree_ptr_addr(cur, keyno, block);
1975
1976 error = xfs_btree_debug_check_ptr(cur, pp, 0, level);
1977 if (error)
1978 goto error0;
1979
1980 cur->bc_ptrs[level] = keyno;
1981 }
1982 }
1983
1984
1985 if (dir != XFS_LOOKUP_LE && diff < 0) {
1986 keyno++;
1987
1988
1989
1990
1991 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
1992 if (dir == XFS_LOOKUP_GE &&
1993 keyno > xfs_btree_get_numrecs(block) &&
1994 !xfs_btree_ptr_is_null(cur, &ptr)) {
1995 int i;
1996
1997 cur->bc_ptrs[0] = keyno;
1998 error = xfs_btree_increment(cur, 0, &i);
1999 if (error)
2000 goto error0;
2001 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
2002 *stat = 1;
2003 return 0;
2004 }
2005 } else if (dir == XFS_LOOKUP_LE && diff > 0)
2006 keyno--;
2007 cur->bc_ptrs[0] = keyno;
2008
2009
2010 if (keyno == 0 || keyno > xfs_btree_get_numrecs(block))
2011 *stat = 0;
2012 else if (dir != XFS_LOOKUP_EQ || diff == 0)
2013 *stat = 1;
2014 else
2015 *stat = 0;
2016 return 0;
2017
2018error0:
2019 return error;
2020}
2021
2022
2023union xfs_btree_key *
2024xfs_btree_high_key_from_key(
2025 struct xfs_btree_cur *cur,
2026 union xfs_btree_key *key)
2027{
2028 ASSERT(cur->bc_flags & XFS_BTREE_OVERLAPPING);
2029 return (union xfs_btree_key *)((char *)key +
2030 (cur->bc_ops->key_len / 2));
2031}
2032
2033
2034STATIC void
2035xfs_btree_get_leaf_keys(
2036 struct xfs_btree_cur *cur,
2037 struct xfs_btree_block *block,
2038 union xfs_btree_key *key)
2039{
2040 union xfs_btree_key max_hkey;
2041 union xfs_btree_key hkey;
2042 union xfs_btree_rec *rec;
2043 union xfs_btree_key *high;
2044 int n;
2045
2046 rec = xfs_btree_rec_addr(cur, 1, block);
2047 cur->bc_ops->init_key_from_rec(key, rec);
2048
2049 if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
2050
2051 cur->bc_ops->init_high_key_from_rec(&max_hkey, rec);
2052 for (n = 2; n <= xfs_btree_get_numrecs(block); n++) {
2053 rec = xfs_btree_rec_addr(cur, n, block);
2054 cur->bc_ops->init_high_key_from_rec(&hkey, rec);
2055 if (cur->bc_ops->diff_two_keys(cur, &hkey, &max_hkey)
2056 > 0)
2057 max_hkey = hkey;
2058 }
2059
2060 high = xfs_btree_high_key_from_key(cur, key);
2061 memcpy(high, &max_hkey, cur->bc_ops->key_len / 2);
2062 }
2063}
2064
2065
2066STATIC void
2067xfs_btree_get_node_keys(
2068 struct xfs_btree_cur *cur,
2069 struct xfs_btree_block *block,
2070 union xfs_btree_key *key)
2071{
2072 union xfs_btree_key *hkey;
2073 union xfs_btree_key *max_hkey;
2074 union xfs_btree_key *high;
2075 int n;
2076
2077 if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
2078 memcpy(key, xfs_btree_key_addr(cur, 1, block),
2079 cur->bc_ops->key_len / 2);
2080
2081 max_hkey = xfs_btree_high_key_addr(cur, 1, block);
2082 for (n = 2; n <= xfs_btree_get_numrecs(block); n++) {
2083 hkey = xfs_btree_high_key_addr(cur, n, block);
2084 if (cur->bc_ops->diff_two_keys(cur, hkey, max_hkey) > 0)
2085 max_hkey = hkey;
2086 }
2087
2088 high = xfs_btree_high_key_from_key(cur, key);
2089 memcpy(high, max_hkey, cur->bc_ops->key_len / 2);
2090 } else {
2091 memcpy(key, xfs_btree_key_addr(cur, 1, block),
2092 cur->bc_ops->key_len);
2093 }
2094}
2095
2096
2097void
2098xfs_btree_get_keys(
2099 struct xfs_btree_cur *cur,
2100 struct xfs_btree_block *block,
2101 union xfs_btree_key *key)
2102{
2103 if (be16_to_cpu(block->bb_level) == 0)
2104 xfs_btree_get_leaf_keys(cur, block, key);
2105 else
2106 xfs_btree_get_node_keys(cur, block, key);
2107}
2108
2109
2110
2111
2112
2113
2114
2115
2116static inline bool
2117xfs_btree_needs_key_update(
2118 struct xfs_btree_cur *cur,
2119 int ptr)
2120{
2121 return (cur->bc_flags & XFS_BTREE_OVERLAPPING) || ptr == 1;
2122}
2123
2124
2125
2126
2127
2128
2129STATIC int
2130__xfs_btree_updkeys(
2131 struct xfs_btree_cur *cur,
2132 int level,
2133 struct xfs_btree_block *block,
2134 struct xfs_buf *bp0,
2135 bool force_all)
2136{
2137 union xfs_btree_key key;
2138 union xfs_btree_key *lkey;
2139 union xfs_btree_key *hkey;
2140 union xfs_btree_key *nlkey;
2141 union xfs_btree_key *nhkey;
2142 struct xfs_buf *bp;
2143 int ptr;
2144
2145 ASSERT(cur->bc_flags & XFS_BTREE_OVERLAPPING);
2146
2147
2148 if (level + 1 >= cur->bc_nlevels)
2149 return 0;
2150
2151 trace_xfs_btree_updkeys(cur, level, bp0);
2152
2153 lkey = &key;
2154 hkey = xfs_btree_high_key_from_key(cur, lkey);
2155 xfs_btree_get_keys(cur, block, lkey);
2156 for (level++; level < cur->bc_nlevels; level++) {
2157#ifdef DEBUG
2158 int error;
2159#endif
2160 block = xfs_btree_get_block(cur, level, &bp);
2161 trace_xfs_btree_updkeys(cur, level, bp);
2162#ifdef DEBUG
2163 error = xfs_btree_check_block(cur, block, level, bp);
2164 if (error)
2165 return error;
2166#endif
2167 ptr = cur->bc_ptrs[level];
2168 nlkey = xfs_btree_key_addr(cur, ptr, block);
2169 nhkey = xfs_btree_high_key_addr(cur, ptr, block);
2170 if (!force_all &&
2171 !(cur->bc_ops->diff_two_keys(cur, nlkey, lkey) != 0 ||
2172 cur->bc_ops->diff_two_keys(cur, nhkey, hkey) != 0))
2173 break;
2174 xfs_btree_copy_keys(cur, nlkey, lkey, 1);
2175 xfs_btree_log_keys(cur, bp, ptr, ptr);
2176 if (level + 1 >= cur->bc_nlevels)
2177 break;
2178 xfs_btree_get_node_keys(cur, block, lkey);
2179 }
2180
2181 return 0;
2182}
2183
2184
2185STATIC int
2186xfs_btree_updkeys_force(
2187 struct xfs_btree_cur *cur,
2188 int level)
2189{
2190 struct xfs_buf *bp;
2191 struct xfs_btree_block *block;
2192
2193 block = xfs_btree_get_block(cur, level, &bp);
2194 return __xfs_btree_updkeys(cur, level, block, bp, true);
2195}
2196
2197
2198
2199
2200STATIC int
2201xfs_btree_update_keys(
2202 struct xfs_btree_cur *cur,
2203 int level)
2204{
2205 struct xfs_btree_block *block;
2206 struct xfs_buf *bp;
2207 union xfs_btree_key *kp;
2208 union xfs_btree_key key;
2209 int ptr;
2210
2211 ASSERT(level >= 0);
2212
2213 block = xfs_btree_get_block(cur, level, &bp);
2214 if (cur->bc_flags & XFS_BTREE_OVERLAPPING)
2215 return __xfs_btree_updkeys(cur, level, block, bp, false);
2216
2217
2218
2219
2220
2221
2222
2223 xfs_btree_get_keys(cur, block, &key);
2224 for (level++, ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) {
2225#ifdef DEBUG
2226 int error;
2227#endif
2228 block = xfs_btree_get_block(cur, level, &bp);
2229#ifdef DEBUG
2230 error = xfs_btree_check_block(cur, block, level, bp);
2231 if (error)
2232 return error;
2233#endif
2234 ptr = cur->bc_ptrs[level];
2235 kp = xfs_btree_key_addr(cur, ptr, block);
2236 xfs_btree_copy_keys(cur, kp, &key, 1);
2237 xfs_btree_log_keys(cur, bp, ptr, ptr);
2238 }
2239
2240 return 0;
2241}
2242
2243
2244
2245
2246
2247
2248int
2249xfs_btree_update(
2250 struct xfs_btree_cur *cur,
2251 union xfs_btree_rec *rec)
2252{
2253 struct xfs_btree_block *block;
2254 struct xfs_buf *bp;
2255 int error;
2256 int ptr;
2257 union xfs_btree_rec *rp;
2258
2259
2260 block = xfs_btree_get_block(cur, 0, &bp);
2261
2262#ifdef DEBUG
2263 error = xfs_btree_check_block(cur, block, 0, bp);
2264 if (error)
2265 goto error0;
2266#endif
2267
2268 ptr = cur->bc_ptrs[0];
2269 rp = xfs_btree_rec_addr(cur, ptr, block);
2270
2271
2272 xfs_btree_copy_recs(cur, rp, rec, 1);
2273 xfs_btree_log_recs(cur, bp, ptr, ptr);
2274
2275
2276
2277
2278
2279 if (xfs_btree_is_lastrec(cur, block, 0)) {
2280 cur->bc_ops->update_lastrec(cur, block, rec,
2281 ptr, LASTREC_UPDATE);
2282 }
2283
2284
2285 if (xfs_btree_needs_key_update(cur, ptr)) {
2286 error = xfs_btree_update_keys(cur, 0);
2287 if (error)
2288 goto error0;
2289 }
2290
2291 return 0;
2292
2293error0:
2294 return error;
2295}
2296
2297
2298
2299
2300
2301STATIC int
2302xfs_btree_lshift(
2303 struct xfs_btree_cur *cur,
2304 int level,
2305 int *stat)
2306{
2307 struct xfs_buf *lbp;
2308 struct xfs_btree_block *left;
2309 int lrecs;
2310 struct xfs_buf *rbp;
2311 struct xfs_btree_block *right;
2312 struct xfs_btree_cur *tcur;
2313 int rrecs;
2314 union xfs_btree_ptr lptr;
2315 union xfs_btree_key *rkp = NULL;
2316 union xfs_btree_ptr *rpp = NULL;
2317 union xfs_btree_rec *rrp = NULL;
2318 int error;
2319 int i;
2320
2321 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
2322 level == cur->bc_nlevels - 1)
2323 goto out0;
2324
2325
2326 right = xfs_btree_get_block(cur, level, &rbp);
2327
2328#ifdef DEBUG
2329 error = xfs_btree_check_block(cur, right, level, rbp);
2330 if (error)
2331 goto error0;
2332#endif
2333
2334
2335 xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
2336 if (xfs_btree_ptr_is_null(cur, &lptr))
2337 goto out0;
2338
2339
2340
2341
2342
2343 if (cur->bc_ptrs[level] <= 1)
2344 goto out0;
2345
2346
2347 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
2348 if (error)
2349 goto error0;
2350
2351
2352 lrecs = xfs_btree_get_numrecs(left);
2353 if (lrecs == cur->bc_ops->get_maxrecs(cur, level))
2354 goto out0;
2355
2356 rrecs = xfs_btree_get_numrecs(right);
2357
2358
2359
2360
2361
2362
2363 lrecs++;
2364 rrecs--;
2365
2366 XFS_BTREE_STATS_INC(cur, lshift);
2367 XFS_BTREE_STATS_ADD(cur, moves, 1);
2368
2369
2370
2371
2372
2373 if (level > 0) {
2374
2375 union xfs_btree_key *lkp;
2376 union xfs_btree_ptr *lpp;
2377
2378 lkp = xfs_btree_key_addr(cur, lrecs, left);
2379 rkp = xfs_btree_key_addr(cur, 1, right);
2380
2381 lpp = xfs_btree_ptr_addr(cur, lrecs, left);
2382 rpp = xfs_btree_ptr_addr(cur, 1, right);
2383
2384 error = xfs_btree_debug_check_ptr(cur, rpp, 0, level);
2385 if (error)
2386 goto error0;
2387
2388 xfs_btree_copy_keys(cur, lkp, rkp, 1);
2389 xfs_btree_copy_ptrs(cur, lpp, rpp, 1);
2390
2391 xfs_btree_log_keys(cur, lbp, lrecs, lrecs);
2392 xfs_btree_log_ptrs(cur, lbp, lrecs, lrecs);
2393
2394 ASSERT(cur->bc_ops->keys_inorder(cur,
2395 xfs_btree_key_addr(cur, lrecs - 1, left), lkp));
2396 } else {
2397
2398 union xfs_btree_rec *lrp;
2399
2400 lrp = xfs_btree_rec_addr(cur, lrecs, left);
2401 rrp = xfs_btree_rec_addr(cur, 1, right);
2402
2403 xfs_btree_copy_recs(cur, lrp, rrp, 1);
2404 xfs_btree_log_recs(cur, lbp, lrecs, lrecs);
2405
2406 ASSERT(cur->bc_ops->recs_inorder(cur,
2407 xfs_btree_rec_addr(cur, lrecs - 1, left), lrp));
2408 }
2409
2410 xfs_btree_set_numrecs(left, lrecs);
2411 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS);
2412
2413 xfs_btree_set_numrecs(right, rrecs);
2414 xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS);
2415
2416
2417
2418
2419 XFS_BTREE_STATS_ADD(cur, moves, rrecs - 1);
2420 if (level > 0) {
2421
2422 int i;
2423
2424 for (i = 0; i < rrecs; i++) {
2425 error = xfs_btree_debug_check_ptr(cur, rpp, i + 1, level);
2426 if (error)
2427 goto error0;
2428 }
2429
2430 xfs_btree_shift_keys(cur,
2431 xfs_btree_key_addr(cur, 2, right),
2432 -1, rrecs);
2433 xfs_btree_shift_ptrs(cur,
2434 xfs_btree_ptr_addr(cur, 2, right),
2435 -1, rrecs);
2436
2437 xfs_btree_log_keys(cur, rbp, 1, rrecs);
2438 xfs_btree_log_ptrs(cur, rbp, 1, rrecs);
2439 } else {
2440
2441 xfs_btree_shift_recs(cur,
2442 xfs_btree_rec_addr(cur, 2, right),
2443 -1, rrecs);
2444 xfs_btree_log_recs(cur, rbp, 1, rrecs);
2445 }
2446
2447
2448
2449
2450
2451 if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
2452 error = xfs_btree_dup_cursor(cur, &tcur);
2453 if (error)
2454 goto error0;
2455 i = xfs_btree_firstrec(tcur, level);
2456 XFS_WANT_CORRUPTED_GOTO(tcur->bc_mp, i == 1, error0);
2457
2458 error = xfs_btree_decrement(tcur, level, &i);
2459 if (error)
2460 goto error1;
2461
2462
2463 error = xfs_btree_update_keys(tcur, level);
2464 if (error)
2465 goto error1;
2466
2467 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
2468 }
2469
2470
2471 error = xfs_btree_update_keys(cur, level);
2472 if (error)
2473 goto error0;
2474
2475
2476 cur->bc_ptrs[level]--;
2477
2478 *stat = 1;
2479 return 0;
2480
2481out0:
2482 *stat = 0;
2483 return 0;
2484
2485error0:
2486 return error;
2487
2488error1:
2489 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
2490 return error;
2491}
2492
2493
2494
2495
2496
2497STATIC int
2498xfs_btree_rshift(
2499 struct xfs_btree_cur *cur,
2500 int level,
2501 int *stat)
2502{
2503 struct xfs_buf *lbp;
2504 struct xfs_btree_block *left;
2505 struct xfs_buf *rbp;
2506 struct xfs_btree_block *right;
2507 struct xfs_btree_cur *tcur;
2508 union xfs_btree_ptr rptr;
2509 union xfs_btree_key *rkp;
2510 int rrecs;
2511 int lrecs;
2512 int error;
2513 int i;
2514
2515 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
2516 (level == cur->bc_nlevels - 1))
2517 goto out0;
2518
2519
2520 left = xfs_btree_get_block(cur, level, &lbp);
2521
2522#ifdef DEBUG
2523 error = xfs_btree_check_block(cur, left, level, lbp);
2524 if (error)
2525 goto error0;
2526#endif
2527
2528
2529 xfs_btree_get_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB);
2530 if (xfs_btree_ptr_is_null(cur, &rptr))
2531 goto out0;
2532
2533
2534
2535
2536
2537 lrecs = xfs_btree_get_numrecs(left);
2538 if (cur->bc_ptrs[level] >= lrecs)
2539 goto out0;
2540
2541
2542 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
2543 if (error)
2544 goto error0;
2545
2546
2547 rrecs = xfs_btree_get_numrecs(right);
2548 if (rrecs == cur->bc_ops->get_maxrecs(cur, level))
2549 goto out0;
2550
2551 XFS_BTREE_STATS_INC(cur, rshift);
2552 XFS_BTREE_STATS_ADD(cur, moves, rrecs);
2553
2554
2555
2556
2557
2558 if (level > 0) {
2559
2560 union xfs_btree_key *lkp;
2561 union xfs_btree_ptr *lpp;
2562 union xfs_btree_ptr *rpp;
2563
2564 lkp = xfs_btree_key_addr(cur, lrecs, left);
2565 lpp = xfs_btree_ptr_addr(cur, lrecs, left);
2566 rkp = xfs_btree_key_addr(cur, 1, right);
2567 rpp = xfs_btree_ptr_addr(cur, 1, right);
2568
2569 for (i = rrecs - 1; i >= 0; i--) {
2570 error = xfs_btree_debug_check_ptr(cur, rpp, i, level);
2571 if (error)
2572 goto error0;
2573 }
2574
2575 xfs_btree_shift_keys(cur, rkp, 1, rrecs);
2576 xfs_btree_shift_ptrs(cur, rpp, 1, rrecs);
2577
2578 error = xfs_btree_debug_check_ptr(cur, lpp, 0, level);
2579 if (error)
2580 goto error0;
2581
2582
2583 xfs_btree_copy_keys(cur, rkp, lkp, 1);
2584 xfs_btree_copy_ptrs(cur, rpp, lpp, 1);
2585
2586 xfs_btree_log_keys(cur, rbp, 1, rrecs + 1);
2587 xfs_btree_log_ptrs(cur, rbp, 1, rrecs + 1);
2588
2589 ASSERT(cur->bc_ops->keys_inorder(cur, rkp,
2590 xfs_btree_key_addr(cur, 2, right)));
2591 } else {
2592
2593 union xfs_btree_rec *lrp;
2594 union xfs_btree_rec *rrp;
2595
2596 lrp = xfs_btree_rec_addr(cur, lrecs, left);
2597 rrp = xfs_btree_rec_addr(cur, 1, right);
2598
2599 xfs_btree_shift_recs(cur, rrp, 1, rrecs);
2600
2601
2602 xfs_btree_copy_recs(cur, rrp, lrp, 1);
2603 xfs_btree_log_recs(cur, rbp, 1, rrecs + 1);
2604 }
2605
2606
2607
2608
2609 xfs_btree_set_numrecs(left, --lrecs);
2610 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS);
2611
2612 xfs_btree_set_numrecs(right, ++rrecs);
2613 xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS);
2614
2615
2616
2617
2618
2619 error = xfs_btree_dup_cursor(cur, &tcur);
2620 if (error)
2621 goto error0;
2622 i = xfs_btree_lastrec(tcur, level);
2623 XFS_WANT_CORRUPTED_GOTO(tcur->bc_mp, i == 1, error0);
2624
2625 error = xfs_btree_increment(tcur, level, &i);
2626 if (error)
2627 goto error1;
2628
2629
2630 if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
2631 error = xfs_btree_update_keys(cur, level);
2632 if (error)
2633 goto error1;
2634 }
2635
2636
2637 error = xfs_btree_update_keys(tcur, level);
2638 if (error)
2639 goto error1;
2640
2641 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
2642
2643 *stat = 1;
2644 return 0;
2645
2646out0:
2647 *stat = 0;
2648 return 0;
2649
2650error0:
2651 return error;
2652
2653error1:
2654 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
2655 return error;
2656}
2657
2658
2659
2660
2661
2662
2663STATIC int
2664__xfs_btree_split(
2665 struct xfs_btree_cur *cur,
2666 int level,
2667 union xfs_btree_ptr *ptrp,
2668 union xfs_btree_key *key,
2669 struct xfs_btree_cur **curp,
2670 int *stat)
2671{
2672 union xfs_btree_ptr lptr;
2673 struct xfs_buf *lbp;
2674 struct xfs_btree_block *left;
2675 union xfs_btree_ptr rptr;
2676 struct xfs_buf *rbp;
2677 struct xfs_btree_block *right;
2678 union xfs_btree_ptr rrptr;
2679 struct xfs_buf *rrbp;
2680 struct xfs_btree_block *rrblock;
2681 int lrecs;
2682 int rrecs;
2683 int src_index;
2684 int error;
2685 int i;
2686
2687 XFS_BTREE_STATS_INC(cur, split);
2688
2689
2690 left = xfs_btree_get_block(cur, level, &lbp);
2691
2692#ifdef DEBUG
2693 error = xfs_btree_check_block(cur, left, level, lbp);
2694 if (error)
2695 goto error0;
2696#endif
2697
2698 xfs_btree_buf_to_ptr(cur, lbp, &lptr);
2699
2700
2701 error = cur->bc_ops->alloc_block(cur, &lptr, &rptr, stat);
2702 if (error)
2703 goto error0;
2704 if (*stat == 0)
2705 goto out0;
2706 XFS_BTREE_STATS_INC(cur, alloc);
2707
2708
2709 error = xfs_btree_get_buf_block(cur, &rptr, 0, &right, &rbp);
2710 if (error)
2711 goto error0;
2712
2713
2714 xfs_btree_init_block_cur(cur, rbp, xfs_btree_get_level(left), 0);
2715
2716
2717
2718
2719
2720
2721 lrecs = xfs_btree_get_numrecs(left);
2722 rrecs = lrecs / 2;
2723 if ((lrecs & 1) && cur->bc_ptrs[level] <= rrecs + 1)
2724 rrecs++;
2725 src_index = (lrecs - rrecs + 1);
2726
2727 XFS_BTREE_STATS_ADD(cur, moves, rrecs);
2728
2729
2730 lrecs -= rrecs;
2731 xfs_btree_set_numrecs(left, lrecs);
2732 xfs_btree_set_numrecs(right, xfs_btree_get_numrecs(right) + rrecs);
2733
2734
2735
2736
2737
2738
2739 if (level > 0) {
2740
2741 union xfs_btree_key *lkp;
2742 union xfs_btree_ptr *lpp;
2743 union xfs_btree_key *rkp;
2744 union xfs_btree_ptr *rpp;
2745
2746 lkp = xfs_btree_key_addr(cur, src_index, left);
2747 lpp = xfs_btree_ptr_addr(cur, src_index, left);
2748 rkp = xfs_btree_key_addr(cur, 1, right);
2749 rpp = xfs_btree_ptr_addr(cur, 1, right);
2750
2751 for (i = src_index; i < rrecs; i++) {
2752 error = xfs_btree_debug_check_ptr(cur, lpp, i, level);
2753 if (error)
2754 goto error0;
2755 }
2756
2757
2758 xfs_btree_copy_keys(cur, rkp, lkp, rrecs);
2759 xfs_btree_copy_ptrs(cur, rpp, lpp, rrecs);
2760
2761 xfs_btree_log_keys(cur, rbp, 1, rrecs);
2762 xfs_btree_log_ptrs(cur, rbp, 1, rrecs);
2763
2764
2765 xfs_btree_get_node_keys(cur, right, key);
2766 } else {
2767
2768 union xfs_btree_rec *lrp;
2769 union xfs_btree_rec *rrp;
2770
2771 lrp = xfs_btree_rec_addr(cur, src_index, left);
2772 rrp = xfs_btree_rec_addr(cur, 1, right);
2773
2774
2775 xfs_btree_copy_recs(cur, rrp, lrp, rrecs);
2776 xfs_btree_log_recs(cur, rbp, 1, rrecs);
2777
2778
2779 xfs_btree_get_leaf_keys(cur, right, key);
2780 }
2781
2782
2783
2784
2785
2786 xfs_btree_get_sibling(cur, left, &rrptr, XFS_BB_RIGHTSIB);
2787 xfs_btree_set_sibling(cur, right, &rrptr, XFS_BB_RIGHTSIB);
2788 xfs_btree_set_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
2789 xfs_btree_set_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB);
2790
2791 xfs_btree_log_block(cur, rbp, XFS_BB_ALL_BITS);
2792 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
2793
2794
2795
2796
2797
2798 if (!xfs_btree_ptr_is_null(cur, &rrptr)) {
2799 error = xfs_btree_read_buf_block(cur, &rrptr,
2800 0, &rrblock, &rrbp);
2801 if (error)
2802 goto error0;
2803 xfs_btree_set_sibling(cur, rrblock, &rptr, XFS_BB_LEFTSIB);
2804 xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB);
2805 }
2806
2807
2808 if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
2809 error = xfs_btree_update_keys(cur, level);
2810 if (error)
2811 goto error0;
2812 }
2813
2814
2815
2816
2817
2818
2819 if (cur->bc_ptrs[level] > lrecs + 1) {
2820 xfs_btree_setbuf(cur, level, rbp);
2821 cur->bc_ptrs[level] -= lrecs;
2822 }
2823
2824
2825
2826
2827 if (level + 1 < cur->bc_nlevels) {
2828 error = xfs_btree_dup_cursor(cur, curp);
2829 if (error)
2830 goto error0;
2831 (*curp)->bc_ptrs[level + 1]++;
2832 }
2833 *ptrp = rptr;
2834 *stat = 1;
2835 return 0;
2836out0:
2837 *stat = 0;
2838 return 0;
2839
2840error0:
2841 return error;
2842}
2843
2844struct xfs_btree_split_args {
2845 struct xfs_btree_cur *cur;
2846 int level;
2847 union xfs_btree_ptr *ptrp;
2848 union xfs_btree_key *key;
2849 struct xfs_btree_cur **curp;
2850 int *stat;
2851 int result;
2852 bool kswapd;
2853 struct completion *done;
2854 struct work_struct work;
2855};
2856
2857
2858
2859
2860static void
2861xfs_btree_split_worker(
2862 struct work_struct *work)
2863{
2864 struct xfs_btree_split_args *args = container_of(work,
2865 struct xfs_btree_split_args, work);
2866 unsigned long pflags;
2867 unsigned long new_pflags = PF_MEMALLOC_NOFS;
2868
2869
2870
2871
2872
2873
2874
2875 if (args->kswapd)
2876 new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
2877
2878 current_set_flags_nested(&pflags, new_pflags);
2879
2880 args->result = __xfs_btree_split(args->cur, args->level, args->ptrp,
2881 args->key, args->curp, args->stat);
2882 complete(args->done);
2883
2884 current_restore_flags_nested(&pflags, new_pflags);
2885}
2886
2887
2888
2889
2890
2891
2892STATIC int
2893xfs_btree_split(
2894 struct xfs_btree_cur *cur,
2895 int level,
2896 union xfs_btree_ptr *ptrp,
2897 union xfs_btree_key *key,
2898 struct xfs_btree_cur **curp,
2899 int *stat)
2900{
2901 struct xfs_btree_split_args args;
2902 DECLARE_COMPLETION_ONSTACK(done);
2903
2904 if (cur->bc_btnum != XFS_BTNUM_BMAP)
2905 return __xfs_btree_split(cur, level, ptrp, key, curp, stat);
2906
2907 args.cur = cur;
2908 args.level = level;
2909 args.ptrp = ptrp;
2910 args.key = key;
2911 args.curp = curp;
2912 args.stat = stat;
2913 args.done = &done;
2914 args.kswapd = current_is_kswapd();
2915 INIT_WORK_ONSTACK(&args.work, xfs_btree_split_worker);
2916 queue_work(xfs_alloc_wq, &args.work);
2917 wait_for_completion(&done);
2918 destroy_work_on_stack(&args.work);
2919 return args.result;
2920}
2921
2922
2923
2924
2925
2926
2927int
2928xfs_btree_new_iroot(
2929 struct xfs_btree_cur *cur,
2930 int *logflags,
2931 int *stat)
2932{
2933 struct xfs_buf *cbp;
2934 struct xfs_btree_block *block;
2935 struct xfs_btree_block *cblock;
2936 union xfs_btree_key *ckp;
2937 union xfs_btree_ptr *cpp;
2938 union xfs_btree_key *kp;
2939 union xfs_btree_ptr *pp;
2940 union xfs_btree_ptr nptr;
2941 int level;
2942 int error;
2943 int i;
2944
2945 XFS_BTREE_STATS_INC(cur, newroot);
2946
2947 ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
2948
2949 level = cur->bc_nlevels - 1;
2950
2951 block = xfs_btree_get_iroot(cur);
2952 pp = xfs_btree_ptr_addr(cur, 1, block);
2953
2954
2955 error = cur->bc_ops->alloc_block(cur, pp, &nptr, stat);
2956 if (error)
2957 goto error0;
2958 if (*stat == 0)
2959 return 0;
2960
2961 XFS_BTREE_STATS_INC(cur, alloc);
2962
2963
2964 error = xfs_btree_get_buf_block(cur, &nptr, 0, &cblock, &cbp);
2965 if (error)
2966 goto error0;
2967
2968
2969
2970
2971
2972 memcpy(cblock, block, xfs_btree_block_len(cur));
2973 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) {
2974 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
2975 cblock->bb_u.l.bb_blkno = cpu_to_be64(cbp->b_bn);
2976 else
2977 cblock->bb_u.s.bb_blkno = cpu_to_be64(cbp->b_bn);
2978 }
2979
2980 be16_add_cpu(&block->bb_level, 1);
2981 xfs_btree_set_numrecs(block, 1);
2982 cur->bc_nlevels++;
2983 cur->bc_ptrs[level + 1] = 1;
2984
2985 kp = xfs_btree_key_addr(cur, 1, block);
2986 ckp = xfs_btree_key_addr(cur, 1, cblock);
2987 xfs_btree_copy_keys(cur, ckp, kp, xfs_btree_get_numrecs(cblock));
2988
2989 cpp = xfs_btree_ptr_addr(cur, 1, cblock);
2990 for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) {
2991 error = xfs_btree_debug_check_ptr(cur, pp, i, level);
2992 if (error)
2993 goto error0;
2994 }
2995
2996 xfs_btree_copy_ptrs(cur, cpp, pp, xfs_btree_get_numrecs(cblock));
2997
2998 error = xfs_btree_debug_check_ptr(cur, &nptr, 0, level);
2999 if (error)
3000 goto error0;
3001
3002 xfs_btree_copy_ptrs(cur, pp, &nptr, 1);
3003
3004 xfs_iroot_realloc(cur->bc_private.b.ip,
3005 1 - xfs_btree_get_numrecs(cblock),
3006 cur->bc_private.b.whichfork);
3007
3008 xfs_btree_setbuf(cur, level, cbp);
3009
3010
3011
3012
3013
3014 xfs_btree_log_block(cur, cbp, XFS_BB_ALL_BITS);
3015 xfs_btree_log_keys(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs));
3016 xfs_btree_log_ptrs(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs));
3017
3018 *logflags |=
3019 XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork);
3020 *stat = 1;
3021 return 0;
3022error0:
3023 return error;
3024}
3025
3026
3027
3028
3029STATIC int
3030xfs_btree_new_root(
3031 struct xfs_btree_cur *cur,
3032 int *stat)
3033{
3034 struct xfs_btree_block *block;
3035 struct xfs_buf *bp;
3036 int error;
3037 struct xfs_buf *lbp;
3038 struct xfs_btree_block *left;
3039 struct xfs_buf *nbp;
3040 struct xfs_btree_block *new;
3041 int nptr;
3042 struct xfs_buf *rbp;
3043 struct xfs_btree_block *right;
3044 union xfs_btree_ptr rptr;
3045 union xfs_btree_ptr lptr;
3046
3047 XFS_BTREE_STATS_INC(cur, newroot);
3048
3049
3050 cur->bc_ops->init_ptr_from_cur(cur, &rptr);
3051
3052
3053 error = cur->bc_ops->alloc_block(cur, &rptr, &lptr, stat);
3054 if (error)
3055 goto error0;
3056 if (*stat == 0)
3057 goto out0;
3058 XFS_BTREE_STATS_INC(cur, alloc);
3059
3060
3061 error = xfs_btree_get_buf_block(cur, &lptr, 0, &new, &nbp);
3062 if (error)
3063 goto error0;
3064
3065
3066 cur->bc_ops->set_root(cur, &lptr, 1);
3067
3068
3069
3070
3071
3072
3073
3074 block = xfs_btree_get_block(cur, cur->bc_nlevels - 1, &bp);
3075
3076#ifdef DEBUG
3077 error = xfs_btree_check_block(cur, block, cur->bc_nlevels - 1, bp);
3078 if (error)
3079 goto error0;
3080#endif
3081
3082 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
3083 if (!xfs_btree_ptr_is_null(cur, &rptr)) {
3084
3085 lbp = bp;
3086 xfs_btree_buf_to_ptr(cur, lbp, &lptr);
3087 left = block;
3088 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
3089 if (error)
3090 goto error0;
3091 bp = rbp;
3092 nptr = 1;
3093 } else {
3094
3095 rbp = bp;
3096 xfs_btree_buf_to_ptr(cur, rbp, &rptr);
3097 right = block;
3098 xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
3099 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
3100 if (error)
3101 goto error0;
3102 bp = lbp;
3103 nptr = 2;
3104 }
3105
3106
3107 xfs_btree_init_block_cur(cur, nbp, cur->bc_nlevels, 2);
3108 xfs_btree_log_block(cur, nbp, XFS_BB_ALL_BITS);
3109 ASSERT(!xfs_btree_ptr_is_null(cur, &lptr) &&
3110 !xfs_btree_ptr_is_null(cur, &rptr));
3111
3112
3113 if (xfs_btree_get_level(left) > 0) {
3114
3115
3116
3117
3118 xfs_btree_get_node_keys(cur, left,
3119 xfs_btree_key_addr(cur, 1, new));
3120 xfs_btree_get_node_keys(cur, right,
3121 xfs_btree_key_addr(cur, 2, new));
3122 } else {
3123
3124
3125
3126
3127
3128 xfs_btree_get_leaf_keys(cur, left,
3129 xfs_btree_key_addr(cur, 1, new));
3130 xfs_btree_get_leaf_keys(cur, right,
3131 xfs_btree_key_addr(cur, 2, new));
3132 }
3133 xfs_btree_log_keys(cur, nbp, 1, 2);
3134
3135
3136 xfs_btree_copy_ptrs(cur,
3137 xfs_btree_ptr_addr(cur, 1, new), &lptr, 1);
3138 xfs_btree_copy_ptrs(cur,
3139 xfs_btree_ptr_addr(cur, 2, new), &rptr, 1);
3140 xfs_btree_log_ptrs(cur, nbp, 1, 2);
3141
3142
3143 xfs_btree_setbuf(cur, cur->bc_nlevels, nbp);
3144 cur->bc_ptrs[cur->bc_nlevels] = nptr;
3145 cur->bc_nlevels++;
3146 *stat = 1;
3147 return 0;
3148error0:
3149 return error;
3150out0:
3151 *stat = 0;
3152 return 0;
3153}
3154
3155STATIC int
3156xfs_btree_make_block_unfull(
3157 struct xfs_btree_cur *cur,
3158 int level,
3159 int numrecs,
3160 int *oindex,
3161 int *index,
3162 union xfs_btree_ptr *nptr,
3163 struct xfs_btree_cur **ncur,
3164 union xfs_btree_key *key,
3165 int *stat)
3166{
3167 int error = 0;
3168
3169 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
3170 level == cur->bc_nlevels - 1) {
3171 struct xfs_inode *ip = cur->bc_private.b.ip;
3172
3173 if (numrecs < cur->bc_ops->get_dmaxrecs(cur, level)) {
3174
3175 xfs_iroot_realloc(ip, 1, cur->bc_private.b.whichfork);
3176 *stat = 1;
3177 } else {
3178
3179 int logflags = 0;
3180
3181 error = xfs_btree_new_iroot(cur, &logflags, stat);
3182 if (error || *stat == 0)
3183 return error;
3184
3185 xfs_trans_log_inode(cur->bc_tp, ip, logflags);
3186 }
3187
3188 return 0;
3189 }
3190
3191
3192 error = xfs_btree_rshift(cur, level, stat);
3193 if (error || *stat)
3194 return error;
3195
3196
3197 error = xfs_btree_lshift(cur, level, stat);
3198 if (error)
3199 return error;
3200
3201 if (*stat) {
3202 *oindex = *index = cur->bc_ptrs[level];
3203 return 0;
3204 }
3205
3206
3207
3208
3209
3210
3211
3212 error = xfs_btree_split(cur, level, nptr, key, ncur, stat);
3213 if (error || *stat == 0)
3214 return error;
3215
3216
3217 *index = cur->bc_ptrs[level];
3218 return 0;
3219}
3220
3221
3222
3223
3224
3225STATIC int
3226xfs_btree_insrec(
3227 struct xfs_btree_cur *cur,
3228 int level,
3229 union xfs_btree_ptr *ptrp,
3230 union xfs_btree_rec *rec,
3231 union xfs_btree_key *key,
3232 struct xfs_btree_cur **curp,
3233 int *stat)
3234{
3235 struct xfs_btree_block *block;
3236 struct xfs_buf *bp;
3237 union xfs_btree_ptr nptr;
3238 struct xfs_btree_cur *ncur;
3239 union xfs_btree_key nkey;
3240 union xfs_btree_key *lkey;
3241 int optr;
3242 int ptr;
3243 int numrecs;
3244 int error;
3245 int i;
3246 xfs_daddr_t old_bn;
3247
3248 ncur = NULL;
3249 lkey = &nkey;
3250
3251
3252
3253
3254
3255 if (!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
3256 (level >= cur->bc_nlevels)) {
3257 error = xfs_btree_new_root(cur, stat);
3258 xfs_btree_set_ptr_null(cur, ptrp);
3259
3260 return error;
3261 }
3262
3263
3264 ptr = cur->bc_ptrs[level];
3265 if (ptr == 0) {
3266 *stat = 0;
3267 return 0;
3268 }
3269
3270 optr = ptr;
3271
3272 XFS_BTREE_STATS_INC(cur, insrec);
3273
3274
3275 block = xfs_btree_get_block(cur, level, &bp);
3276 old_bn = bp ? bp->b_bn : XFS_BUF_DADDR_NULL;
3277 numrecs = xfs_btree_get_numrecs(block);
3278
3279#ifdef DEBUG
3280 error = xfs_btree_check_block(cur, block, level, bp);
3281 if (error)
3282 goto error0;
3283
3284
3285 if (ptr <= numrecs) {
3286 if (level == 0) {
3287 ASSERT(cur->bc_ops->recs_inorder(cur, rec,
3288 xfs_btree_rec_addr(cur, ptr, block)));
3289 } else {
3290 ASSERT(cur->bc_ops->keys_inorder(cur, key,
3291 xfs_btree_key_addr(cur, ptr, block)));
3292 }
3293 }
3294#endif
3295
3296
3297
3298
3299
3300 xfs_btree_set_ptr_null(cur, &nptr);
3301 if (numrecs == cur->bc_ops->get_maxrecs(cur, level)) {
3302 error = xfs_btree_make_block_unfull(cur, level, numrecs,
3303 &optr, &ptr, &nptr, &ncur, lkey, stat);
3304 if (error || *stat == 0)
3305 goto error0;
3306 }
3307
3308
3309
3310
3311
3312 block = xfs_btree_get_block(cur, level, &bp);
3313 numrecs = xfs_btree_get_numrecs(block);
3314
3315#ifdef DEBUG
3316 error = xfs_btree_check_block(cur, block, level, bp);
3317 if (error)
3318 return error;
3319#endif
3320
3321
3322
3323
3324
3325 XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr + 1);
3326
3327 if (level > 0) {
3328
3329 union xfs_btree_key *kp;
3330 union xfs_btree_ptr *pp;
3331
3332 kp = xfs_btree_key_addr(cur, ptr, block);
3333 pp = xfs_btree_ptr_addr(cur, ptr, block);
3334
3335 for (i = numrecs - ptr; i >= 0; i--) {
3336 error = xfs_btree_debug_check_ptr(cur, pp, i, level);
3337 if (error)
3338 return error;
3339 }
3340
3341 xfs_btree_shift_keys(cur, kp, 1, numrecs - ptr + 1);
3342 xfs_btree_shift_ptrs(cur, pp, 1, numrecs - ptr + 1);
3343
3344 error = xfs_btree_debug_check_ptr(cur, ptrp, 0, level);
3345 if (error)
3346 goto error0;
3347
3348
3349 xfs_btree_copy_keys(cur, kp, key, 1);
3350 xfs_btree_copy_ptrs(cur, pp, ptrp, 1);
3351 numrecs++;
3352 xfs_btree_set_numrecs(block, numrecs);
3353 xfs_btree_log_ptrs(cur, bp, ptr, numrecs);
3354 xfs_btree_log_keys(cur, bp, ptr, numrecs);
3355#ifdef DEBUG
3356 if (ptr < numrecs) {
3357 ASSERT(cur->bc_ops->keys_inorder(cur, kp,
3358 xfs_btree_key_addr(cur, ptr + 1, block)));
3359 }
3360#endif
3361 } else {
3362
3363 union xfs_btree_rec *rp;
3364
3365 rp = xfs_btree_rec_addr(cur, ptr, block);
3366
3367 xfs_btree_shift_recs(cur, rp, 1, numrecs - ptr + 1);
3368
3369
3370 xfs_btree_copy_recs(cur, rp, rec, 1);
3371 xfs_btree_set_numrecs(block, ++numrecs);
3372 xfs_btree_log_recs(cur, bp, ptr, numrecs);
3373#ifdef DEBUG
3374 if (ptr < numrecs) {
3375 ASSERT(cur->bc_ops->recs_inorder(cur, rp,
3376 xfs_btree_rec_addr(cur, ptr + 1, block)));
3377 }
3378#endif
3379 }
3380
3381
3382 xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392 if (bp && bp->b_bn != old_bn) {
3393 xfs_btree_get_keys(cur, block, lkey);
3394 } else if (xfs_btree_needs_key_update(cur, optr)) {
3395 error = xfs_btree_update_keys(cur, level);
3396 if (error)
3397 goto error0;
3398 }
3399
3400
3401
3402
3403
3404 if (xfs_btree_is_lastrec(cur, block, level)) {
3405 cur->bc_ops->update_lastrec(cur, block, rec,
3406 ptr, LASTREC_INSREC);
3407 }
3408
3409
3410
3411
3412
3413 *ptrp = nptr;
3414 if (!xfs_btree_ptr_is_null(cur, &nptr)) {
3415 xfs_btree_copy_keys(cur, key, lkey, 1);
3416 *curp = ncur;
3417 }
3418
3419 *stat = 1;
3420 return 0;
3421
3422error0:
3423 return error;
3424}
3425
3426
3427
3428
3429
3430
3431
3432
3433int
3434xfs_btree_insert(
3435 struct xfs_btree_cur *cur,
3436 int *stat)
3437{
3438 int error;
3439 int i;
3440 int level;
3441 union xfs_btree_ptr nptr;
3442 struct xfs_btree_cur *ncur;
3443 struct xfs_btree_cur *pcur;
3444 union xfs_btree_key bkey;
3445 union xfs_btree_key *key;
3446 union xfs_btree_rec rec;
3447
3448 level = 0;
3449 ncur = NULL;
3450 pcur = cur;
3451 key = &bkey;
3452
3453 xfs_btree_set_ptr_null(cur, &nptr);
3454
3455
3456 cur->bc_ops->init_rec_from_cur(cur, &rec);
3457 cur->bc_ops->init_key_from_rec(key, &rec);
3458
3459
3460
3461
3462
3463
3464 do {
3465
3466
3467
3468
3469 error = xfs_btree_insrec(pcur, level, &nptr, &rec, key,
3470 &ncur, &i);
3471 if (error) {
3472 if (pcur != cur)
3473 xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR);
3474 goto error0;
3475 }
3476
3477 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
3478 level++;
3479
3480
3481
3482
3483
3484
3485 if (pcur != cur &&
3486 (ncur || xfs_btree_ptr_is_null(cur, &nptr))) {
3487
3488 if (cur->bc_ops->update_cursor)
3489 cur->bc_ops->update_cursor(pcur, cur);
3490 cur->bc_nlevels = pcur->bc_nlevels;
3491 xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR);
3492 }
3493
3494 if (ncur) {
3495 pcur = ncur;
3496 ncur = NULL;
3497 }
3498 } while (!xfs_btree_ptr_is_null(cur, &nptr));
3499
3500 *stat = i;
3501 return 0;
3502error0:
3503 return error;
3504}
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514STATIC int
3515xfs_btree_kill_iroot(
3516 struct xfs_btree_cur *cur)
3517{
3518 int whichfork = cur->bc_private.b.whichfork;
3519 struct xfs_inode *ip = cur->bc_private.b.ip;
3520 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
3521 struct xfs_btree_block *block;
3522 struct xfs_btree_block *cblock;
3523 union xfs_btree_key *kp;
3524 union xfs_btree_key *ckp;
3525 union xfs_btree_ptr *pp;
3526 union xfs_btree_ptr *cpp;
3527 struct xfs_buf *cbp;
3528 int level;
3529 int index;
3530 int numrecs;
3531 int error;
3532#ifdef DEBUG
3533 union xfs_btree_ptr ptr;
3534#endif
3535 int i;
3536
3537 ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
3538 ASSERT(cur->bc_nlevels > 1);
3539
3540
3541
3542
3543
3544 level = cur->bc_nlevels - 1;
3545 if (level == 1)
3546 goto out0;
3547
3548
3549
3550
3551 block = xfs_btree_get_iroot(cur);
3552 if (xfs_btree_get_numrecs(block) != 1)
3553 goto out0;
3554
3555 cblock = xfs_btree_get_block(cur, level - 1, &cbp);
3556 numrecs = xfs_btree_get_numrecs(cblock);
3557
3558
3559
3560
3561
3562
3563 if (numrecs > cur->bc_ops->get_dmaxrecs(cur, level))
3564 goto out0;
3565
3566 XFS_BTREE_STATS_INC(cur, killroot);
3567
3568#ifdef DEBUG
3569 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB);
3570 ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
3571 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
3572 ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
3573#endif
3574
3575 index = numrecs - cur->bc_ops->get_maxrecs(cur, level);
3576 if (index) {
3577 xfs_iroot_realloc(cur->bc_private.b.ip, index,
3578 cur->bc_private.b.whichfork);
3579 block = ifp->if_broot;
3580 }
3581
3582 be16_add_cpu(&block->bb_numrecs, index);
3583 ASSERT(block->bb_numrecs == cblock->bb_numrecs);
3584
3585 kp = xfs_btree_key_addr(cur, 1, block);
3586 ckp = xfs_btree_key_addr(cur, 1, cblock);
3587 xfs_btree_copy_keys(cur, kp, ckp, numrecs);
3588
3589 pp = xfs_btree_ptr_addr(cur, 1, block);
3590 cpp = xfs_btree_ptr_addr(cur, 1, cblock);
3591
3592 for (i = 0; i < numrecs; i++) {
3593 error = xfs_btree_debug_check_ptr(cur, cpp, i, level - 1);
3594 if (error)
3595 return error;
3596 }
3597
3598 xfs_btree_copy_ptrs(cur, pp, cpp, numrecs);
3599
3600 error = xfs_btree_free_block(cur, cbp);
3601 if (error)
3602 return error;
3603
3604 cur->bc_bufs[level - 1] = NULL;
3605 be16_add_cpu(&block->bb_level, -1);
3606 xfs_trans_log_inode(cur->bc_tp, ip,
3607 XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork));
3608 cur->bc_nlevels--;
3609out0:
3610 return 0;
3611}
3612
3613
3614
3615
3616STATIC int
3617xfs_btree_kill_root(
3618 struct xfs_btree_cur *cur,
3619 struct xfs_buf *bp,
3620 int level,
3621 union xfs_btree_ptr *newroot)
3622{
3623 int error;
3624
3625 XFS_BTREE_STATS_INC(cur, killroot);
3626
3627
3628
3629
3630
3631 cur->bc_ops->set_root(cur, newroot, -1);
3632
3633 error = xfs_btree_free_block(cur, bp);
3634 if (error)
3635 return error;
3636
3637 cur->bc_bufs[level] = NULL;
3638 cur->bc_ra[level] = 0;
3639 cur->bc_nlevels--;
3640
3641 return 0;
3642}
3643
3644STATIC int
3645xfs_btree_dec_cursor(
3646 struct xfs_btree_cur *cur,
3647 int level,
3648 int *stat)
3649{
3650 int error;
3651 int i;
3652
3653 if (level > 0) {
3654 error = xfs_btree_decrement(cur, level, &i);
3655 if (error)
3656 return error;
3657 }
3658
3659 *stat = 1;
3660 return 0;
3661}
3662
3663
3664
3665
3666
3667
3668
3669STATIC int
3670xfs_btree_delrec(
3671 struct xfs_btree_cur *cur,
3672 int level,
3673 int *stat)
3674{
3675 struct xfs_btree_block *block;
3676 union xfs_btree_ptr cptr;
3677 struct xfs_buf *bp;
3678 int error;
3679 int i;
3680 union xfs_btree_ptr lptr;
3681 struct xfs_buf *lbp;
3682 struct xfs_btree_block *left;
3683 int lrecs = 0;
3684 int ptr;
3685 union xfs_btree_ptr rptr;
3686 struct xfs_buf *rbp;
3687 struct xfs_btree_block *right;
3688 struct xfs_btree_block *rrblock;
3689 struct xfs_buf *rrbp;
3690 int rrecs = 0;
3691 struct xfs_btree_cur *tcur;
3692 int numrecs;
3693
3694 tcur = NULL;
3695
3696
3697 ptr = cur->bc_ptrs[level];
3698 if (ptr == 0) {
3699 *stat = 0;
3700 return 0;
3701 }
3702
3703
3704 block = xfs_btree_get_block(cur, level, &bp);
3705 numrecs = xfs_btree_get_numrecs(block);
3706
3707#ifdef DEBUG
3708 error = xfs_btree_check_block(cur, block, level, bp);
3709 if (error)
3710 goto error0;
3711#endif
3712
3713
3714 if (ptr > numrecs) {
3715 *stat = 0;
3716 return 0;
3717 }
3718
3719 XFS_BTREE_STATS_INC(cur, delrec);
3720 XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr);
3721
3722
3723 if (level > 0) {
3724
3725 union xfs_btree_key *lkp;
3726 union xfs_btree_ptr *lpp;
3727
3728 lkp = xfs_btree_key_addr(cur, ptr + 1, block);
3729 lpp = xfs_btree_ptr_addr(cur, ptr + 1, block);
3730
3731 for (i = 0; i < numrecs - ptr; i++) {
3732 error = xfs_btree_debug_check_ptr(cur, lpp, i, level);
3733 if (error)
3734 goto error0;
3735 }
3736
3737 if (ptr < numrecs) {
3738 xfs_btree_shift_keys(cur, lkp, -1, numrecs - ptr);
3739 xfs_btree_shift_ptrs(cur, lpp, -1, numrecs - ptr);
3740 xfs_btree_log_keys(cur, bp, ptr, numrecs - 1);
3741 xfs_btree_log_ptrs(cur, bp, ptr, numrecs - 1);
3742 }
3743 } else {
3744
3745 if (ptr < numrecs) {
3746 xfs_btree_shift_recs(cur,
3747 xfs_btree_rec_addr(cur, ptr + 1, block),
3748 -1, numrecs - ptr);
3749 xfs_btree_log_recs(cur, bp, ptr, numrecs - 1);
3750 }
3751 }
3752
3753
3754
3755
3756 xfs_btree_set_numrecs(block, --numrecs);
3757 xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
3758
3759
3760
3761
3762
3763 if (xfs_btree_is_lastrec(cur, block, level)) {
3764 cur->bc_ops->update_lastrec(cur, block, NULL,
3765 ptr, LASTREC_DELREC);
3766 }
3767
3768
3769
3770
3771
3772
3773 if (level == cur->bc_nlevels - 1) {
3774 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
3775 xfs_iroot_realloc(cur->bc_private.b.ip, -1,
3776 cur->bc_private.b.whichfork);
3777
3778 error = xfs_btree_kill_iroot(cur);
3779 if (error)
3780 goto error0;
3781
3782 error = xfs_btree_dec_cursor(cur, level, stat);
3783 if (error)
3784 goto error0;
3785 *stat = 1;
3786 return 0;
3787 }
3788
3789
3790
3791
3792
3793
3794 if (numrecs == 1 && level > 0) {
3795 union xfs_btree_ptr *pp;
3796
3797
3798
3799
3800 pp = xfs_btree_ptr_addr(cur, 1, block);
3801 error = xfs_btree_kill_root(cur, bp, level, pp);
3802 if (error)
3803 goto error0;
3804 } else if (level > 0) {
3805 error = xfs_btree_dec_cursor(cur, level, stat);
3806 if (error)
3807 goto error0;
3808 }
3809 *stat = 1;
3810 return 0;
3811 }
3812
3813
3814
3815
3816
3817 if (xfs_btree_needs_key_update(cur, ptr)) {
3818 error = xfs_btree_update_keys(cur, level);
3819 if (error)
3820 goto error0;
3821 }
3822
3823
3824
3825
3826
3827 if (numrecs >= cur->bc_ops->get_minrecs(cur, level)) {
3828 error = xfs_btree_dec_cursor(cur, level, stat);
3829 if (error)
3830 goto error0;
3831 return 0;
3832 }
3833
3834
3835
3836
3837
3838
3839 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
3840 xfs_btree_get_sibling(cur, block, &lptr, XFS_BB_LEFTSIB);
3841
3842 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
3843
3844
3845
3846
3847
3848 if (xfs_btree_ptr_is_null(cur, &rptr) &&
3849 xfs_btree_ptr_is_null(cur, &lptr) &&
3850 level == cur->bc_nlevels - 2) {
3851 error = xfs_btree_kill_iroot(cur);
3852 if (!error)
3853 error = xfs_btree_dec_cursor(cur, level, stat);
3854 if (error)
3855 goto error0;
3856 return 0;
3857 }
3858 }
3859
3860 ASSERT(!xfs_btree_ptr_is_null(cur, &rptr) ||
3861 !xfs_btree_ptr_is_null(cur, &lptr));
3862
3863
3864
3865
3866
3867 error = xfs_btree_dup_cursor(cur, &tcur);
3868 if (error)
3869 goto error0;
3870
3871
3872
3873
3874
3875 if (!xfs_btree_ptr_is_null(cur, &rptr)) {
3876
3877
3878
3879
3880 i = xfs_btree_lastrec(tcur, level);
3881 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
3882
3883 error = xfs_btree_increment(tcur, level, &i);
3884 if (error)
3885 goto error0;
3886 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
3887
3888 i = xfs_btree_lastrec(tcur, level);
3889 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
3890
3891
3892 right = xfs_btree_get_block(tcur, level, &rbp);
3893#ifdef DEBUG
3894 error = xfs_btree_check_block(tcur, right, level, rbp);
3895 if (error)
3896 goto error0;
3897#endif
3898
3899 xfs_btree_get_sibling(tcur, right, &cptr, XFS_BB_LEFTSIB);
3900
3901
3902
3903
3904
3905
3906 if (xfs_btree_get_numrecs(right) - 1 >=
3907 cur->bc_ops->get_minrecs(tcur, level)) {
3908 error = xfs_btree_lshift(tcur, level, &i);
3909 if (error)
3910 goto error0;
3911 if (i) {
3912 ASSERT(xfs_btree_get_numrecs(block) >=
3913 cur->bc_ops->get_minrecs(tcur, level));
3914
3915 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
3916 tcur = NULL;
3917
3918 error = xfs_btree_dec_cursor(cur, level, stat);
3919 if (error)
3920 goto error0;
3921 return 0;
3922 }
3923 }
3924
3925
3926
3927
3928
3929
3930 rrecs = xfs_btree_get_numrecs(right);
3931 if (!xfs_btree_ptr_is_null(cur, &lptr)) {
3932 i = xfs_btree_firstrec(tcur, level);
3933 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
3934
3935 error = xfs_btree_decrement(tcur, level, &i);
3936 if (error)
3937 goto error0;
3938 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
3939 }
3940 }
3941
3942
3943
3944
3945
3946 if (!xfs_btree_ptr_is_null(cur, &lptr)) {
3947
3948
3949
3950
3951 i = xfs_btree_firstrec(tcur, level);
3952 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
3953
3954 error = xfs_btree_decrement(tcur, level, &i);
3955 if (error)
3956 goto error0;
3957 i = xfs_btree_firstrec(tcur, level);
3958 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
3959
3960
3961 left = xfs_btree_get_block(tcur, level, &lbp);
3962#ifdef DEBUG
3963 error = xfs_btree_check_block(cur, left, level, lbp);
3964 if (error)
3965 goto error0;
3966#endif
3967
3968 xfs_btree_get_sibling(tcur, left, &cptr, XFS_BB_RIGHTSIB);
3969
3970
3971
3972
3973
3974
3975 if (xfs_btree_get_numrecs(left) - 1 >=
3976 cur->bc_ops->get_minrecs(tcur, level)) {
3977 error = xfs_btree_rshift(tcur, level, &i);
3978 if (error)
3979 goto error0;
3980 if (i) {
3981 ASSERT(xfs_btree_get_numrecs(block) >=
3982 cur->bc_ops->get_minrecs(tcur, level));
3983 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
3984 tcur = NULL;
3985 if (level == 0)
3986 cur->bc_ptrs[0]++;
3987
3988 *stat = 1;
3989 return 0;
3990 }
3991 }
3992
3993
3994
3995
3996
3997 lrecs = xfs_btree_get_numrecs(left);
3998 }
3999
4000
4001 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
4002 tcur = NULL;
4003
4004
4005 ASSERT(!xfs_btree_ptr_is_null(cur, &cptr));
4006
4007 if (!xfs_btree_ptr_is_null(cur, &lptr) &&
4008 lrecs + xfs_btree_get_numrecs(block) <=
4009 cur->bc_ops->get_maxrecs(cur, level)) {
4010
4011
4012
4013
4014 rptr = cptr;
4015 right = block;
4016 rbp = bp;
4017 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
4018 if (error)
4019 goto error0;
4020
4021
4022
4023
4024 } else if (!xfs_btree_ptr_is_null(cur, &rptr) &&
4025 rrecs + xfs_btree_get_numrecs(block) <=
4026 cur->bc_ops->get_maxrecs(cur, level)) {
4027
4028
4029
4030
4031 lptr = cptr;
4032 left = block;
4033 lbp = bp;
4034 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
4035 if (error)
4036 goto error0;
4037
4038
4039
4040
4041
4042 } else {
4043 error = xfs_btree_dec_cursor(cur, level, stat);
4044 if (error)
4045 goto error0;
4046 return 0;
4047 }
4048
4049 rrecs = xfs_btree_get_numrecs(right);
4050 lrecs = xfs_btree_get_numrecs(left);
4051
4052
4053
4054
4055
4056 XFS_BTREE_STATS_ADD(cur, moves, rrecs);
4057 if (level > 0) {
4058
4059 union xfs_btree_key *lkp;
4060 union xfs_btree_ptr *lpp;
4061 union xfs_btree_key *rkp;
4062 union xfs_btree_ptr *rpp;
4063
4064 lkp = xfs_btree_key_addr(cur, lrecs + 1, left);
4065 lpp = xfs_btree_ptr_addr(cur, lrecs + 1, left);
4066 rkp = xfs_btree_key_addr(cur, 1, right);
4067 rpp = xfs_btree_ptr_addr(cur, 1, right);
4068
4069 for (i = 1; i < rrecs; i++) {
4070 error = xfs_btree_debug_check_ptr(cur, rpp, i, level);
4071 if (error)
4072 goto error0;
4073 }
4074
4075 xfs_btree_copy_keys(cur, lkp, rkp, rrecs);
4076 xfs_btree_copy_ptrs(cur, lpp, rpp, rrecs);
4077
4078 xfs_btree_log_keys(cur, lbp, lrecs + 1, lrecs + rrecs);
4079 xfs_btree_log_ptrs(cur, lbp, lrecs + 1, lrecs + rrecs);
4080 } else {
4081
4082 union xfs_btree_rec *lrp;
4083 union xfs_btree_rec *rrp;
4084
4085 lrp = xfs_btree_rec_addr(cur, lrecs + 1, left);
4086 rrp = xfs_btree_rec_addr(cur, 1, right);
4087
4088 xfs_btree_copy_recs(cur, lrp, rrp, rrecs);
4089 xfs_btree_log_recs(cur, lbp, lrecs + 1, lrecs + rrecs);
4090 }
4091
4092 XFS_BTREE_STATS_INC(cur, join);
4093
4094
4095
4096
4097
4098 xfs_btree_set_numrecs(left, lrecs + rrecs);
4099 xfs_btree_get_sibling(cur, right, &cptr, XFS_BB_RIGHTSIB),
4100 xfs_btree_set_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB);
4101 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
4102
4103
4104 xfs_btree_get_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB);
4105 if (!xfs_btree_ptr_is_null(cur, &cptr)) {
4106 error = xfs_btree_read_buf_block(cur, &cptr, 0, &rrblock, &rrbp);
4107 if (error)
4108 goto error0;
4109 xfs_btree_set_sibling(cur, rrblock, &lptr, XFS_BB_LEFTSIB);
4110 xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB);
4111 }
4112
4113
4114 error = xfs_btree_free_block(cur, rbp);
4115 if (error)
4116 goto error0;
4117
4118
4119
4120
4121
4122 if (bp != lbp) {
4123 cur->bc_bufs[level] = lbp;
4124 cur->bc_ptrs[level] += lrecs;
4125 cur->bc_ra[level] = 0;
4126 }
4127
4128
4129
4130
4131 else if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) ||
4132 (level + 1 < cur->bc_nlevels)) {
4133 error = xfs_btree_increment(cur, level + 1, &i);
4134 if (error)
4135 goto error0;
4136 }
4137
4138
4139
4140
4141
4142
4143
4144 if (level > 0)
4145 cur->bc_ptrs[level]--;
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158 *stat = 2;
4159 return 0;
4160
4161error0:
4162 if (tcur)
4163 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
4164 return error;
4165}
4166
4167
4168
4169
4170
4171
4172int
4173xfs_btree_delete(
4174 struct xfs_btree_cur *cur,
4175 int *stat)
4176{
4177 int error;
4178 int level;
4179 int i;
4180 bool joined = false;
4181
4182
4183
4184
4185
4186
4187
4188 for (level = 0, i = 2; i == 2; level++) {
4189 error = xfs_btree_delrec(cur, level, &i);
4190 if (error)
4191 goto error0;
4192 if (i == 2)
4193 joined = true;
4194 }
4195
4196
4197
4198
4199
4200 if (joined && (cur->bc_flags & XFS_BTREE_OVERLAPPING)) {
4201 error = xfs_btree_updkeys_force(cur, 0);
4202 if (error)
4203 goto error0;
4204 }
4205
4206 if (i == 0) {
4207 for (level = 1; level < cur->bc_nlevels; level++) {
4208 if (cur->bc_ptrs[level] == 0) {
4209 error = xfs_btree_decrement(cur, level, &i);
4210 if (error)
4211 goto error0;
4212 break;
4213 }
4214 }
4215 }
4216
4217 *stat = i;
4218 return 0;
4219error0:
4220 return error;
4221}
4222
4223
4224
4225
4226int
4227xfs_btree_get_rec(
4228 struct xfs_btree_cur *cur,
4229 union xfs_btree_rec **recp,
4230 int *stat)
4231{
4232 struct xfs_btree_block *block;
4233 struct xfs_buf *bp;
4234 int ptr;
4235#ifdef DEBUG
4236 int error;
4237#endif
4238
4239 ptr = cur->bc_ptrs[0];
4240 block = xfs_btree_get_block(cur, 0, &bp);
4241
4242#ifdef DEBUG
4243 error = xfs_btree_check_block(cur, block, 0, bp);
4244 if (error)
4245 return error;
4246#endif
4247
4248
4249
4250
4251 if (ptr > xfs_btree_get_numrecs(block) || ptr <= 0) {
4252 *stat = 0;
4253 return 0;
4254 }
4255
4256
4257
4258
4259 *recp = xfs_btree_rec_addr(cur, ptr, block);
4260 *stat = 1;
4261 return 0;
4262}
4263
4264
4265STATIC int
4266xfs_btree_visit_block(
4267 struct xfs_btree_cur *cur,
4268 int level,
4269 xfs_btree_visit_blocks_fn fn,
4270 void *data)
4271{
4272 struct xfs_btree_block *block;
4273 struct xfs_buf *bp;
4274 union xfs_btree_ptr rptr;
4275 int error;
4276
4277
4278 xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
4279 block = xfs_btree_get_block(cur, level, &bp);
4280
4281
4282 error = fn(cur, level, data);
4283 if (error)
4284 return error;
4285
4286
4287 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
4288 if (xfs_btree_ptr_is_null(cur, &rptr))
4289 return -ENOENT;
4290
4291 return xfs_btree_lookup_get_block(cur, level, &rptr, &block);
4292}
4293
4294
4295
4296int
4297xfs_btree_visit_blocks(
4298 struct xfs_btree_cur *cur,
4299 xfs_btree_visit_blocks_fn fn,
4300 void *data)
4301{
4302 union xfs_btree_ptr lptr;
4303 int level;
4304 struct xfs_btree_block *block = NULL;
4305 int error = 0;
4306
4307 cur->bc_ops->init_ptr_from_cur(cur, &lptr);
4308
4309
4310 for (level = cur->bc_nlevels - 1; level >= 0; level--) {
4311
4312 error = xfs_btree_lookup_get_block(cur, level, &lptr, &block);
4313 if (error)
4314 return error;
4315
4316
4317 if (level > 0) {
4318 union xfs_btree_ptr *ptr;
4319
4320 ptr = xfs_btree_ptr_addr(cur, 1, block);
4321 xfs_btree_readahead_ptr(cur, ptr, 1);
4322
4323
4324 xfs_btree_copy_ptrs(cur, &lptr, ptr, 1);
4325 }
4326
4327
4328 do {
4329 error = xfs_btree_visit_block(cur, level, fn, data);
4330 } while (!error);
4331
4332 if (error != -ENOENT)
4333 return error;
4334 }
4335
4336 return 0;
4337}
4338
4339
4340
4341
4342
4343
4344
4345
4346
4347
4348
4349
4350
4351
4352
4353
4354
4355
4356
4357
4358
4359
4360
4361
4362
4363struct xfs_btree_block_change_owner_info {
4364 uint64_t new_owner;
4365 struct list_head *buffer_list;
4366};
4367
4368static int
4369xfs_btree_block_change_owner(
4370 struct xfs_btree_cur *cur,
4371 int level,
4372 void *data)
4373{
4374 struct xfs_btree_block_change_owner_info *bbcoi = data;
4375 struct xfs_btree_block *block;
4376 struct xfs_buf *bp;
4377
4378
4379 block = xfs_btree_get_block(cur, level, &bp);
4380 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
4381 if (block->bb_u.l.bb_owner == cpu_to_be64(bbcoi->new_owner))
4382 return 0;
4383 block->bb_u.l.bb_owner = cpu_to_be64(bbcoi->new_owner);
4384 } else {
4385 if (block->bb_u.s.bb_owner == cpu_to_be32(bbcoi->new_owner))
4386 return 0;
4387 block->bb_u.s.bb_owner = cpu_to_be32(bbcoi->new_owner);
4388 }
4389
4390
4391
4392
4393
4394
4395
4396
4397 if (!bp) {
4398 ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
4399 ASSERT(level == cur->bc_nlevels - 1);
4400 return 0;
4401 }
4402
4403 if (cur->bc_tp) {
4404 if (!xfs_trans_ordered_buf(cur->bc_tp, bp)) {
4405 xfs_btree_log_block(cur, bp, XFS_BB_OWNER);
4406 return -EAGAIN;
4407 }
4408 } else {
4409 xfs_buf_delwri_queue(bp, bbcoi->buffer_list);
4410 }
4411
4412 return 0;
4413}
4414
4415int
4416xfs_btree_change_owner(
4417 struct xfs_btree_cur *cur,
4418 uint64_t new_owner,
4419 struct list_head *buffer_list)
4420{
4421 struct xfs_btree_block_change_owner_info bbcoi;
4422
4423 bbcoi.new_owner = new_owner;
4424 bbcoi.buffer_list = buffer_list;
4425
4426 return xfs_btree_visit_blocks(cur, xfs_btree_block_change_owner,
4427 &bbcoi);
4428}
4429
4430
4431xfs_failaddr_t
4432xfs_btree_lblock_v5hdr_verify(
4433 struct xfs_buf *bp,
4434 uint64_t owner)
4435{
4436 struct xfs_mount *mp = bp->b_target->bt_mount;
4437 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
4438
4439 if (!xfs_sb_version_hascrc(&mp->m_sb))
4440 return __this_address;
4441 if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid))
4442 return __this_address;
4443 if (block->bb_u.l.bb_blkno != cpu_to_be64(bp->b_bn))
4444 return __this_address;
4445 if (owner != XFS_RMAP_OWN_UNKNOWN &&
4446 be64_to_cpu(block->bb_u.l.bb_owner) != owner)
4447 return __this_address;
4448 return NULL;
4449}
4450
4451
4452xfs_failaddr_t
4453xfs_btree_lblock_verify(
4454 struct xfs_buf *bp,
4455 unsigned int max_recs)
4456{
4457 struct xfs_mount *mp = bp->b_target->bt_mount;
4458 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
4459
4460
4461 if (be16_to_cpu(block->bb_numrecs) > max_recs)
4462 return __this_address;
4463
4464
4465 if (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLFSBLOCK) &&
4466 !xfs_verify_fsbno(mp, be64_to_cpu(block->bb_u.l.bb_leftsib)))
4467 return __this_address;
4468 if (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK) &&
4469 !xfs_verify_fsbno(mp, be64_to_cpu(block->bb_u.l.bb_rightsib)))
4470 return __this_address;
4471
4472 return NULL;
4473}
4474
4475
4476
4477
4478
4479
4480
4481
4482
4483xfs_failaddr_t
4484xfs_btree_sblock_v5hdr_verify(
4485 struct xfs_buf *bp)
4486{
4487 struct xfs_mount *mp = bp->b_target->bt_mount;
4488 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
4489 struct xfs_perag *pag = bp->b_pag;
4490
4491 if (!xfs_sb_version_hascrc(&mp->m_sb))
4492 return __this_address;
4493 if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid))
4494 return __this_address;
4495 if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn))
4496 return __this_address;
4497 if (pag && be32_to_cpu(block->bb_u.s.bb_owner) != pag->pag_agno)
4498 return __this_address;
4499 return NULL;
4500}
4501
4502
4503
4504
4505
4506
4507
4508xfs_failaddr_t
4509xfs_btree_sblock_verify(
4510 struct xfs_buf *bp,
4511 unsigned int max_recs)
4512{
4513 struct xfs_mount *mp = bp->b_target->bt_mount;
4514 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
4515 xfs_agblock_t agno;
4516
4517
4518 if (be16_to_cpu(block->bb_numrecs) > max_recs)
4519 return __this_address;
4520
4521
4522 agno = xfs_daddr_to_agno(mp, XFS_BUF_ADDR(bp));
4523 if (block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK) &&
4524 !xfs_verify_agbno(mp, agno, be32_to_cpu(block->bb_u.s.bb_leftsib)))
4525 return __this_address;
4526 if (block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK) &&
4527 !xfs_verify_agbno(mp, agno, be32_to_cpu(block->bb_u.s.bb_rightsib)))
4528 return __this_address;
4529
4530 return NULL;
4531}
4532
4533
4534
4535
4536
4537uint
4538xfs_btree_compute_maxlevels(
4539 uint *limits,
4540 unsigned long len)
4541{
4542 uint level;
4543 unsigned long maxblocks;
4544
4545 maxblocks = (len + limits[0] - 1) / limits[0];
4546 for (level = 1; maxblocks > 1; level++)
4547 maxblocks = (maxblocks + limits[1] - 1) / limits[1];
4548 return level;
4549}
4550
4551
4552
4553
4554
4555
4556STATIC int
4557xfs_btree_simple_query_range(
4558 struct xfs_btree_cur *cur,
4559 union xfs_btree_key *low_key,
4560 union xfs_btree_key *high_key,
4561 xfs_btree_query_range_fn fn,
4562 void *priv)
4563{
4564 union xfs_btree_rec *recp;
4565 union xfs_btree_key rec_key;
4566 int64_t diff;
4567 int stat;
4568 bool firstrec = true;
4569 int error;
4570
4571 ASSERT(cur->bc_ops->init_high_key_from_rec);
4572 ASSERT(cur->bc_ops->diff_two_keys);
4573
4574
4575
4576
4577
4578 stat = 0;
4579 error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, &stat);
4580 if (error)
4581 goto out;
4582
4583
4584 if (!stat) {
4585 error = xfs_btree_increment(cur, 0, &stat);
4586 if (error)
4587 goto out;
4588 }
4589
4590 while (stat) {
4591
4592 error = xfs_btree_get_rec(cur, &recp, &stat);
4593 if (error || !stat)
4594 break;
4595
4596
4597 if (firstrec) {
4598 cur->bc_ops->init_high_key_from_rec(&rec_key, recp);
4599 firstrec = false;
4600 diff = cur->bc_ops->diff_two_keys(cur, low_key,
4601 &rec_key);
4602 if (diff > 0)
4603 goto advloop;
4604 }
4605
4606
4607 cur->bc_ops->init_key_from_rec(&rec_key, recp);
4608 diff = cur->bc_ops->diff_two_keys(cur, &rec_key, high_key);
4609 if (diff > 0)
4610 break;
4611
4612
4613 error = fn(cur, recp, priv);
4614 if (error < 0 || error == XFS_BTREE_QUERY_RANGE_ABORT)
4615 break;
4616
4617advloop:
4618
4619 error = xfs_btree_increment(cur, 0, &stat);
4620 if (error)
4621 break;
4622 }
4623
4624out:
4625 return error;
4626}
4627
4628
4629
4630
4631
4632
4633
4634
4635
4636
4637
4638
4639
4640
4641
4642
4643
4644
4645
4646
4647STATIC int
4648xfs_btree_overlapped_query_range(
4649 struct xfs_btree_cur *cur,
4650 union xfs_btree_key *low_key,
4651 union xfs_btree_key *high_key,
4652 xfs_btree_query_range_fn fn,
4653 void *priv)
4654{
4655 union xfs_btree_ptr ptr;
4656 union xfs_btree_ptr *pp;
4657 union xfs_btree_key rec_key;
4658 union xfs_btree_key rec_hkey;
4659 union xfs_btree_key *lkp;
4660 union xfs_btree_key *hkp;
4661 union xfs_btree_rec *recp;
4662 struct xfs_btree_block *block;
4663 int64_t ldiff;
4664 int64_t hdiff;
4665 int level;
4666 struct xfs_buf *bp;
4667 int i;
4668 int error;
4669
4670
4671 level = cur->bc_nlevels - 1;
4672 cur->bc_ops->init_ptr_from_cur(cur, &ptr);
4673 error = xfs_btree_lookup_get_block(cur, level, &ptr, &block);
4674 if (error)
4675 return error;
4676 xfs_btree_get_block(cur, level, &bp);
4677 trace_xfs_btree_overlapped_query_range(cur, level, bp);
4678#ifdef DEBUG
4679 error = xfs_btree_check_block(cur, block, level, bp);
4680 if (error)
4681 goto out;
4682#endif
4683 cur->bc_ptrs[level] = 1;
4684
4685 while (level < cur->bc_nlevels) {
4686 block = xfs_btree_get_block(cur, level, &bp);
4687
4688
4689 if (cur->bc_ptrs[level] > be16_to_cpu(block->bb_numrecs)) {
4690pop_up:
4691 if (level < cur->bc_nlevels - 1)
4692 cur->bc_ptrs[level + 1]++;
4693 level++;
4694 continue;
4695 }
4696
4697 if (level == 0) {
4698
4699 recp = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block);
4700
4701 cur->bc_ops->init_high_key_from_rec(&rec_hkey, recp);
4702 ldiff = cur->bc_ops->diff_two_keys(cur, &rec_hkey,
4703 low_key);
4704
4705 cur->bc_ops->init_key_from_rec(&rec_key, recp);
4706 hdiff = cur->bc_ops->diff_two_keys(cur, high_key,
4707 &rec_key);
4708
4709
4710
4711
4712
4713
4714 if (ldiff >= 0 && hdiff >= 0) {
4715 error = fn(cur, recp, priv);
4716 if (error < 0 ||
4717 error == XFS_BTREE_QUERY_RANGE_ABORT)
4718 break;
4719 } else if (hdiff < 0) {
4720
4721 goto pop_up;
4722 }
4723 cur->bc_ptrs[level]++;
4724 continue;
4725 }
4726
4727
4728 lkp = xfs_btree_key_addr(cur, cur->bc_ptrs[level], block);
4729 hkp = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level], block);
4730 pp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[level], block);
4731
4732 ldiff = cur->bc_ops->diff_two_keys(cur, hkp, low_key);
4733 hdiff = cur->bc_ops->diff_two_keys(cur, high_key, lkp);
4734
4735
4736
4737
4738
4739
4740 if (ldiff >= 0 && hdiff >= 0) {
4741 level--;
4742 error = xfs_btree_lookup_get_block(cur, level, pp,
4743 &block);
4744 if (error)
4745 goto out;
4746 xfs_btree_get_block(cur, level, &bp);
4747 trace_xfs_btree_overlapped_query_range(cur, level, bp);
4748#ifdef DEBUG
4749 error = xfs_btree_check_block(cur, block, level, bp);
4750 if (error)
4751 goto out;
4752#endif
4753 cur->bc_ptrs[level] = 1;
4754 continue;
4755 } else if (hdiff < 0) {
4756
4757 goto pop_up;
4758 }
4759 cur->bc_ptrs[level]++;
4760 }
4761
4762out:
4763
4764
4765
4766
4767
4768
4769
4770 if (cur->bc_bufs[0] == NULL) {
4771 for (i = 0; i < cur->bc_nlevels; i++) {
4772 if (cur->bc_bufs[i]) {
4773 xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[i]);
4774 cur->bc_bufs[i] = NULL;
4775 cur->bc_ptrs[i] = 0;
4776 cur->bc_ra[i] = 0;
4777 }
4778 }
4779 }
4780
4781 return error;
4782}
4783
4784
4785
4786
4787
4788
4789
4790
4791int
4792xfs_btree_query_range(
4793 struct xfs_btree_cur *cur,
4794 union xfs_btree_irec *low_rec,
4795 union xfs_btree_irec *high_rec,
4796 xfs_btree_query_range_fn fn,
4797 void *priv)
4798{
4799 union xfs_btree_rec rec;
4800 union xfs_btree_key low_key;
4801 union xfs_btree_key high_key;
4802
4803
4804 cur->bc_rec = *high_rec;
4805 cur->bc_ops->init_rec_from_cur(cur, &rec);
4806 cur->bc_ops->init_key_from_rec(&high_key, &rec);
4807
4808 cur->bc_rec = *low_rec;
4809 cur->bc_ops->init_rec_from_cur(cur, &rec);
4810 cur->bc_ops->init_key_from_rec(&low_key, &rec);
4811
4812
4813 if (cur->bc_ops->diff_two_keys(cur, &low_key, &high_key) > 0)
4814 return -EINVAL;
4815
4816 if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
4817 return xfs_btree_simple_query_range(cur, &low_key,
4818 &high_key, fn, priv);
4819 return xfs_btree_overlapped_query_range(cur, &low_key, &high_key,
4820 fn, priv);
4821}
4822
4823
4824int
4825xfs_btree_query_all(
4826 struct xfs_btree_cur *cur,
4827 xfs_btree_query_range_fn fn,
4828 void *priv)
4829{
4830 union xfs_btree_key low_key;
4831 union xfs_btree_key high_key;
4832
4833 memset(&cur->bc_rec, 0, sizeof(cur->bc_rec));
4834 memset(&low_key, 0, sizeof(low_key));
4835 memset(&high_key, 0xFF, sizeof(high_key));
4836
4837 return xfs_btree_simple_query_range(cur, &low_key, &high_key, fn, priv);
4838}
4839
4840
4841
4842
4843
4844unsigned long long
4845xfs_btree_calc_size(
4846 uint *limits,
4847 unsigned long long len)
4848{
4849 int level;
4850 int maxrecs;
4851 unsigned long long rval;
4852
4853 maxrecs = limits[0];
4854 for (level = 0, rval = 0; len > 1; level++) {
4855 len += maxrecs - 1;
4856 do_div(len, maxrecs);
4857 maxrecs = limits[1];
4858 rval += len;
4859 }
4860 return rval;
4861}
4862
4863static int
4864xfs_btree_count_blocks_helper(
4865 struct xfs_btree_cur *cur,
4866 int level,
4867 void *data)
4868{
4869 xfs_extlen_t *blocks = data;
4870 (*blocks)++;
4871
4872 return 0;
4873}
4874
4875
4876int
4877xfs_btree_count_blocks(
4878 struct xfs_btree_cur *cur,
4879 xfs_extlen_t *blocks)
4880{
4881 *blocks = 0;
4882 return xfs_btree_visit_blocks(cur, xfs_btree_count_blocks_helper,
4883 blocks);
4884}
4885
4886
4887int64_t
4888xfs_btree_diff_two_ptrs(
4889 struct xfs_btree_cur *cur,
4890 const union xfs_btree_ptr *a,
4891 const union xfs_btree_ptr *b)
4892{
4893 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
4894 return (int64_t)be64_to_cpu(a->l) - be64_to_cpu(b->l);
4895 return (int64_t)be32_to_cpu(a->s) - be32_to_cpu(b->s);
4896}
4897
4898
4899STATIC int
4900xfs_btree_has_record_helper(
4901 struct xfs_btree_cur *cur,
4902 union xfs_btree_rec *rec,
4903 void *priv)
4904{
4905 return XFS_BTREE_QUERY_RANGE_ABORT;
4906}
4907
4908
4909int
4910xfs_btree_has_record(
4911 struct xfs_btree_cur *cur,
4912 union xfs_btree_irec *low,
4913 union xfs_btree_irec *high,
4914 bool *exists)
4915{
4916 int error;
4917
4918 error = xfs_btree_query_range(cur, low, high,
4919 &xfs_btree_has_record_helper, NULL);
4920 if (error == XFS_BTREE_QUERY_RANGE_ABORT) {
4921 *exists = true;
4922 return 0;
4923 }
4924 *exists = false;
4925 return error;
4926}
4927
4928
4929bool
4930xfs_btree_has_more_records(
4931 struct xfs_btree_cur *cur)
4932{
4933 struct xfs_btree_block *block;
4934 struct xfs_buf *bp;
4935
4936 block = xfs_btree_get_block(cur, 0, &bp);
4937
4938
4939 if (cur->bc_ptrs[0] < xfs_btree_get_numrecs(block))
4940 return true;
4941
4942
4943 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
4944 return block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK);
4945 else
4946 return block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK);
4947}
4948