1
2
3
4
5
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_bit.h"
13#include "xfs_mount.h"
14#include "xfs_inode.h"
15#include "xfs_trans.h"
16#include "xfs_buf_item.h"
17#include "xfs_btree.h"
18#include "xfs_errortag.h"
19#include "xfs_error.h"
20#include "xfs_trace.h"
21#include "xfs_alloc.h"
22#include "xfs_log.h"
23
24
25
26
27kmem_zone_t *xfs_btree_cur_zone;
28
29
30
31
32static const uint32_t xfs_magics[2][XFS_BTNUM_MAX] = {
33 { XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, 0, XFS_BMAP_MAGIC, XFS_IBT_MAGIC,
34 XFS_FIBT_MAGIC, 0 },
35 { XFS_ABTB_CRC_MAGIC, XFS_ABTC_CRC_MAGIC, XFS_RMAP_CRC_MAGIC,
36 XFS_BMAP_CRC_MAGIC, XFS_IBT_CRC_MAGIC, XFS_FIBT_CRC_MAGIC,
37 XFS_REFC_CRC_MAGIC }
38};
39
40uint32_t
41xfs_btree_magic(
42 int crc,
43 xfs_btnum_t btnum)
44{
45 uint32_t magic = xfs_magics[crc][btnum];
46
47
48 ASSERT(magic != 0);
49 return magic;
50}
51
52
53
54
55
56xfs_failaddr_t
57__xfs_btree_check_lblock(
58 struct xfs_btree_cur *cur,
59 struct xfs_btree_block *block,
60 int level,
61 struct xfs_buf *bp)
62{
63 struct xfs_mount *mp = cur->bc_mp;
64 xfs_btnum_t btnum = cur->bc_btnum;
65 int crc = xfs_sb_version_hascrc(&mp->m_sb);
66
67 if (crc) {
68 if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid))
69 return __this_address;
70 if (block->bb_u.l.bb_blkno !=
71 cpu_to_be64(bp ? bp->b_bn : XFS_BUF_DADDR_NULL))
72 return __this_address;
73 if (block->bb_u.l.bb_pad != cpu_to_be32(0))
74 return __this_address;
75 }
76
77 if (be32_to_cpu(block->bb_magic) != xfs_btree_magic(crc, btnum))
78 return __this_address;
79 if (be16_to_cpu(block->bb_level) != level)
80 return __this_address;
81 if (be16_to_cpu(block->bb_numrecs) >
82 cur->bc_ops->get_maxrecs(cur, level))
83 return __this_address;
84 if (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLFSBLOCK) &&
85 !xfs_btree_check_lptr(cur, be64_to_cpu(block->bb_u.l.bb_leftsib),
86 level + 1))
87 return __this_address;
88 if (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK) &&
89 !xfs_btree_check_lptr(cur, be64_to_cpu(block->bb_u.l.bb_rightsib),
90 level + 1))
91 return __this_address;
92
93 return NULL;
94}
95
96
97static int
98xfs_btree_check_lblock(
99 struct xfs_btree_cur *cur,
100 struct xfs_btree_block *block,
101 int level,
102 struct xfs_buf *bp)
103{
104 struct xfs_mount *mp = cur->bc_mp;
105 xfs_failaddr_t fa;
106
107 fa = __xfs_btree_check_lblock(cur, block, level, bp);
108 if (XFS_IS_CORRUPT(mp, fa != NULL) ||
109 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BTREE_CHECK_LBLOCK)) {
110 if (bp)
111 trace_xfs_btree_corrupt(bp, _RET_IP_);
112 return -EFSCORRUPTED;
113 }
114 return 0;
115}
116
117
118
119
120
121xfs_failaddr_t
122__xfs_btree_check_sblock(
123 struct xfs_btree_cur *cur,
124 struct xfs_btree_block *block,
125 int level,
126 struct xfs_buf *bp)
127{
128 struct xfs_mount *mp = cur->bc_mp;
129 xfs_btnum_t btnum = cur->bc_btnum;
130 int crc = xfs_sb_version_hascrc(&mp->m_sb);
131
132 if (crc) {
133 if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid))
134 return __this_address;
135 if (block->bb_u.s.bb_blkno !=
136 cpu_to_be64(bp ? bp->b_bn : XFS_BUF_DADDR_NULL))
137 return __this_address;
138 }
139
140 if (be32_to_cpu(block->bb_magic) != xfs_btree_magic(crc, btnum))
141 return __this_address;
142 if (be16_to_cpu(block->bb_level) != level)
143 return __this_address;
144 if (be16_to_cpu(block->bb_numrecs) >
145 cur->bc_ops->get_maxrecs(cur, level))
146 return __this_address;
147 if (block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK) &&
148 !xfs_btree_check_sptr(cur, be32_to_cpu(block->bb_u.s.bb_leftsib),
149 level + 1))
150 return __this_address;
151 if (block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK) &&
152 !xfs_btree_check_sptr(cur, be32_to_cpu(block->bb_u.s.bb_rightsib),
153 level + 1))
154 return __this_address;
155
156 return NULL;
157}
158
159
160STATIC int
161xfs_btree_check_sblock(
162 struct xfs_btree_cur *cur,
163 struct xfs_btree_block *block,
164 int level,
165 struct xfs_buf *bp)
166{
167 struct xfs_mount *mp = cur->bc_mp;
168 xfs_failaddr_t fa;
169
170 fa = __xfs_btree_check_sblock(cur, block, level, bp);
171 if (XFS_IS_CORRUPT(mp, fa != NULL) ||
172 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BTREE_CHECK_SBLOCK)) {
173 if (bp)
174 trace_xfs_btree_corrupt(bp, _RET_IP_);
175 return -EFSCORRUPTED;
176 }
177 return 0;
178}
179
180
181
182
183int
184xfs_btree_check_block(
185 struct xfs_btree_cur *cur,
186 struct xfs_btree_block *block,
187 int level,
188 struct xfs_buf *bp)
189{
190 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
191 return xfs_btree_check_lblock(cur, block, level, bp);
192 else
193 return xfs_btree_check_sblock(cur, block, level, bp);
194}
195
196
197bool
198xfs_btree_check_lptr(
199 struct xfs_btree_cur *cur,
200 xfs_fsblock_t fsbno,
201 int level)
202{
203 if (level <= 0)
204 return false;
205 return xfs_verify_fsbno(cur->bc_mp, fsbno);
206}
207
208
209bool
210xfs_btree_check_sptr(
211 struct xfs_btree_cur *cur,
212 xfs_agblock_t agbno,
213 int level)
214{
215 if (level <= 0)
216 return false;
217 return xfs_verify_agbno(cur->bc_mp, cur->bc_private.a.agno, agbno);
218}
219
220
221
222
223
224static int
225xfs_btree_check_ptr(
226 struct xfs_btree_cur *cur,
227 union xfs_btree_ptr *ptr,
228 int index,
229 int level)
230{
231 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
232 if (xfs_btree_check_lptr(cur, be64_to_cpu((&ptr->l)[index]),
233 level))
234 return 0;
235 xfs_err(cur->bc_mp,
236"Inode %llu fork %d: Corrupt btree %d pointer at level %d index %d.",
237 cur->bc_private.b.ip->i_ino,
238 cur->bc_private.b.whichfork, cur->bc_btnum,
239 level, index);
240 } else {
241 if (xfs_btree_check_sptr(cur, be32_to_cpu((&ptr->s)[index]),
242 level))
243 return 0;
244 xfs_err(cur->bc_mp,
245"AG %u: Corrupt btree %d pointer at level %d index %d.",
246 cur->bc_private.a.agno, cur->bc_btnum,
247 level, index);
248 }
249
250 return -EFSCORRUPTED;
251}
252
253#ifdef DEBUG
254# define xfs_btree_debug_check_ptr xfs_btree_check_ptr
255#else
256# define xfs_btree_debug_check_ptr(...) (0)
257#endif
258
259
260
261
262
263
264
265
266
267void
268xfs_btree_lblock_calc_crc(
269 struct xfs_buf *bp)
270{
271 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
272 struct xfs_buf_log_item *bip = bp->b_log_item;
273
274 if (!xfs_sb_version_hascrc(&bp->b_mount->m_sb))
275 return;
276 if (bip)
277 block->bb_u.l.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
278 xfs_buf_update_cksum(bp, XFS_BTREE_LBLOCK_CRC_OFF);
279}
280
281bool
282xfs_btree_lblock_verify_crc(
283 struct xfs_buf *bp)
284{
285 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
286 struct xfs_mount *mp = bp->b_mount;
287
288 if (xfs_sb_version_hascrc(&mp->m_sb)) {
289 if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.l.bb_lsn)))
290 return false;
291 return xfs_buf_verify_cksum(bp, XFS_BTREE_LBLOCK_CRC_OFF);
292 }
293
294 return true;
295}
296
297
298
299
300
301
302
303
304
305void
306xfs_btree_sblock_calc_crc(
307 struct xfs_buf *bp)
308{
309 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
310 struct xfs_buf_log_item *bip = bp->b_log_item;
311
312 if (!xfs_sb_version_hascrc(&bp->b_mount->m_sb))
313 return;
314 if (bip)
315 block->bb_u.s.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
316 xfs_buf_update_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF);
317}
318
319bool
320xfs_btree_sblock_verify_crc(
321 struct xfs_buf *bp)
322{
323 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
324 struct xfs_mount *mp = bp->b_mount;
325
326 if (xfs_sb_version_hascrc(&mp->m_sb)) {
327 if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.s.bb_lsn)))
328 return false;
329 return xfs_buf_verify_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF);
330 }
331
332 return true;
333}
334
335static int
336xfs_btree_free_block(
337 struct xfs_btree_cur *cur,
338 struct xfs_buf *bp)
339{
340 int error;
341
342 error = cur->bc_ops->free_block(cur, bp);
343 if (!error) {
344 xfs_trans_binval(cur->bc_tp, bp);
345 XFS_BTREE_STATS_INC(cur, free);
346 }
347 return error;
348}
349
350
351
352
353void
354xfs_btree_del_cursor(
355 xfs_btree_cur_t *cur,
356 int error)
357{
358 int i;
359
360
361
362
363
364
365
366
367
368
369
370 for (i = 0; i < cur->bc_nlevels; i++) {
371 if (cur->bc_bufs[i])
372 xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[i]);
373 else if (!error)
374 break;
375 }
376
377
378
379
380 ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP ||
381 cur->bc_private.b.allocated == 0);
382
383
384
385 kmem_cache_free(xfs_btree_cur_zone, cur);
386}
387
388
389
390
391
392int
393xfs_btree_dup_cursor(
394 xfs_btree_cur_t *cur,
395 xfs_btree_cur_t **ncur)
396{
397 xfs_buf_t *bp;
398 int error;
399 int i;
400 xfs_mount_t *mp;
401 xfs_btree_cur_t *new;
402 xfs_trans_t *tp;
403
404 tp = cur->bc_tp;
405 mp = cur->bc_mp;
406
407
408
409
410 new = cur->bc_ops->dup_cursor(cur);
411
412
413
414
415 new->bc_rec = cur->bc_rec;
416
417
418
419
420 for (i = 0; i < new->bc_nlevels; i++) {
421 new->bc_ptrs[i] = cur->bc_ptrs[i];
422 new->bc_ra[i] = cur->bc_ra[i];
423 bp = cur->bc_bufs[i];
424 if (bp) {
425 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
426 XFS_BUF_ADDR(bp), mp->m_bsize,
427 0, &bp,
428 cur->bc_ops->buf_ops);
429 if (error) {
430 xfs_btree_del_cursor(new, error);
431 *ncur = NULL;
432 return error;
433 }
434 }
435 new->bc_bufs[i] = bp;
436 }
437 *ncur = new;
438 return 0;
439}
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518static inline size_t xfs_btree_block_len(struct xfs_btree_cur *cur)
519{
520 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
521 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS)
522 return XFS_BTREE_LBLOCK_CRC_LEN;
523 return XFS_BTREE_LBLOCK_LEN;
524 }
525 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS)
526 return XFS_BTREE_SBLOCK_CRC_LEN;
527 return XFS_BTREE_SBLOCK_LEN;
528}
529
530
531
532
533static inline size_t xfs_btree_ptr_len(struct xfs_btree_cur *cur)
534{
535 return (cur->bc_flags & XFS_BTREE_LONG_PTRS) ?
536 sizeof(__be64) : sizeof(__be32);
537}
538
539
540
541
542STATIC size_t
543xfs_btree_rec_offset(
544 struct xfs_btree_cur *cur,
545 int n)
546{
547 return xfs_btree_block_len(cur) +
548 (n - 1) * cur->bc_ops->rec_len;
549}
550
551
552
553
554STATIC size_t
555xfs_btree_key_offset(
556 struct xfs_btree_cur *cur,
557 int n)
558{
559 return xfs_btree_block_len(cur) +
560 (n - 1) * cur->bc_ops->key_len;
561}
562
563
564
565
566STATIC size_t
567xfs_btree_high_key_offset(
568 struct xfs_btree_cur *cur,
569 int n)
570{
571 return xfs_btree_block_len(cur) +
572 (n - 1) * cur->bc_ops->key_len + (cur->bc_ops->key_len / 2);
573}
574
575
576
577
578STATIC size_t
579xfs_btree_ptr_offset(
580 struct xfs_btree_cur *cur,
581 int n,
582 int level)
583{
584 return xfs_btree_block_len(cur) +
585 cur->bc_ops->get_maxrecs(cur, level) * cur->bc_ops->key_len +
586 (n - 1) * xfs_btree_ptr_len(cur);
587}
588
589
590
591
592union xfs_btree_rec *
593xfs_btree_rec_addr(
594 struct xfs_btree_cur *cur,
595 int n,
596 struct xfs_btree_block *block)
597{
598 return (union xfs_btree_rec *)
599 ((char *)block + xfs_btree_rec_offset(cur, n));
600}
601
602
603
604
605union xfs_btree_key *
606xfs_btree_key_addr(
607 struct xfs_btree_cur *cur,
608 int n,
609 struct xfs_btree_block *block)
610{
611 return (union xfs_btree_key *)
612 ((char *)block + xfs_btree_key_offset(cur, n));
613}
614
615
616
617
618union xfs_btree_key *
619xfs_btree_high_key_addr(
620 struct xfs_btree_cur *cur,
621 int n,
622 struct xfs_btree_block *block)
623{
624 return (union xfs_btree_key *)
625 ((char *)block + xfs_btree_high_key_offset(cur, n));
626}
627
628
629
630
631union xfs_btree_ptr *
632xfs_btree_ptr_addr(
633 struct xfs_btree_cur *cur,
634 int n,
635 struct xfs_btree_block *block)
636{
637 int level = xfs_btree_get_level(block);
638
639 ASSERT(block->bb_level != 0);
640
641 return (union xfs_btree_ptr *)
642 ((char *)block + xfs_btree_ptr_offset(cur, n, level));
643}
644
645
646
647
648
649
650
651STATIC struct xfs_btree_block *
652xfs_btree_get_iroot(
653 struct xfs_btree_cur *cur)
654{
655 struct xfs_ifork *ifp;
656
657 ifp = XFS_IFORK_PTR(cur->bc_private.b.ip, cur->bc_private.b.whichfork);
658 return (struct xfs_btree_block *)ifp->if_broot;
659}
660
661
662
663
664
665struct xfs_btree_block *
666xfs_btree_get_block(
667 struct xfs_btree_cur *cur,
668 int level,
669 struct xfs_buf **bpp)
670{
671 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
672 (level == cur->bc_nlevels - 1)) {
673 *bpp = NULL;
674 return xfs_btree_get_iroot(cur);
675 }
676
677 *bpp = cur->bc_bufs[level];
678 return XFS_BUF_TO_BLOCK(*bpp);
679}
680
681
682
683
684
685STATIC int
686xfs_btree_firstrec(
687 xfs_btree_cur_t *cur,
688 int level)
689{
690 struct xfs_btree_block *block;
691 xfs_buf_t *bp;
692
693
694
695
696 block = xfs_btree_get_block(cur, level, &bp);
697 if (xfs_btree_check_block(cur, block, level, bp))
698 return 0;
699
700
701
702 if (!block->bb_numrecs)
703 return 0;
704
705
706
707 cur->bc_ptrs[level] = 1;
708 return 1;
709}
710
711
712
713
714
715STATIC int
716xfs_btree_lastrec(
717 xfs_btree_cur_t *cur,
718 int level)
719{
720 struct xfs_btree_block *block;
721 xfs_buf_t *bp;
722
723
724
725
726 block = xfs_btree_get_block(cur, level, &bp);
727 if (xfs_btree_check_block(cur, block, level, bp))
728 return 0;
729
730
731
732 if (!block->bb_numrecs)
733 return 0;
734
735
736
737 cur->bc_ptrs[level] = be16_to_cpu(block->bb_numrecs);
738 return 1;
739}
740
741
742
743
744
745void
746xfs_btree_offsets(
747 int64_t fields,
748 const short *offsets,
749 int nbits,
750 int *first,
751 int *last)
752{
753 int i;
754 int64_t imask;
755
756 ASSERT(fields != 0);
757
758
759
760 for (i = 0, imask = 1LL; ; i++, imask <<= 1) {
761 if (imask & fields) {
762 *first = offsets[i];
763 break;
764 }
765 }
766
767
768
769 for (i = nbits - 1, imask = 1LL << i; ; i--, imask >>= 1) {
770 if (imask & fields) {
771 *last = offsets[i + 1] - 1;
772 break;
773 }
774 }
775}
776
777
778
779
780
781int
782xfs_btree_read_bufl(
783 struct xfs_mount *mp,
784 struct xfs_trans *tp,
785 xfs_fsblock_t fsbno,
786 struct xfs_buf **bpp,
787 int refval,
788 const struct xfs_buf_ops *ops)
789{
790 struct xfs_buf *bp;
791 xfs_daddr_t d;
792 int error;
793
794 if (!xfs_verify_fsbno(mp, fsbno))
795 return -EFSCORRUPTED;
796 d = XFS_FSB_TO_DADDR(mp, fsbno);
797 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d,
798 mp->m_bsize, 0, &bp, ops);
799 if (error)
800 return error;
801 if (bp)
802 xfs_buf_set_ref(bp, refval);
803 *bpp = bp;
804 return 0;
805}
806
807
808
809
810
811
812void
813xfs_btree_reada_bufl(
814 struct xfs_mount *mp,
815 xfs_fsblock_t fsbno,
816 xfs_extlen_t count,
817 const struct xfs_buf_ops *ops)
818{
819 xfs_daddr_t d;
820
821 ASSERT(fsbno != NULLFSBLOCK);
822 d = XFS_FSB_TO_DADDR(mp, fsbno);
823 xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count, ops);
824}
825
826
827
828
829
830
831void
832xfs_btree_reada_bufs(
833 struct xfs_mount *mp,
834 xfs_agnumber_t agno,
835 xfs_agblock_t agbno,
836 xfs_extlen_t count,
837 const struct xfs_buf_ops *ops)
838{
839 xfs_daddr_t d;
840
841 ASSERT(agno != NULLAGNUMBER);
842 ASSERT(agbno != NULLAGBLOCK);
843 d = XFS_AGB_TO_DADDR(mp, agno, agbno);
844 xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count, ops);
845}
846
847STATIC int
848xfs_btree_readahead_lblock(
849 struct xfs_btree_cur *cur,
850 int lr,
851 struct xfs_btree_block *block)
852{
853 int rval = 0;
854 xfs_fsblock_t left = be64_to_cpu(block->bb_u.l.bb_leftsib);
855 xfs_fsblock_t right = be64_to_cpu(block->bb_u.l.bb_rightsib);
856
857 if ((lr & XFS_BTCUR_LEFTRA) && left != NULLFSBLOCK) {
858 xfs_btree_reada_bufl(cur->bc_mp, left, 1,
859 cur->bc_ops->buf_ops);
860 rval++;
861 }
862
863 if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLFSBLOCK) {
864 xfs_btree_reada_bufl(cur->bc_mp, right, 1,
865 cur->bc_ops->buf_ops);
866 rval++;
867 }
868
869 return rval;
870}
871
872STATIC int
873xfs_btree_readahead_sblock(
874 struct xfs_btree_cur *cur,
875 int lr,
876 struct xfs_btree_block *block)
877{
878 int rval = 0;
879 xfs_agblock_t left = be32_to_cpu(block->bb_u.s.bb_leftsib);
880 xfs_agblock_t right = be32_to_cpu(block->bb_u.s.bb_rightsib);
881
882
883 if ((lr & XFS_BTCUR_LEFTRA) && left != NULLAGBLOCK) {
884 xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
885 left, 1, cur->bc_ops->buf_ops);
886 rval++;
887 }
888
889 if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLAGBLOCK) {
890 xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
891 right, 1, cur->bc_ops->buf_ops);
892 rval++;
893 }
894
895 return rval;
896}
897
898
899
900
901
902STATIC int
903xfs_btree_readahead(
904 struct xfs_btree_cur *cur,
905 int lev,
906 int lr)
907{
908 struct xfs_btree_block *block;
909
910
911
912
913
914 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
915 (lev == cur->bc_nlevels - 1))
916 return 0;
917
918 if ((cur->bc_ra[lev] | lr) == cur->bc_ra[lev])
919 return 0;
920
921 cur->bc_ra[lev] |= lr;
922 block = XFS_BUF_TO_BLOCK(cur->bc_bufs[lev]);
923
924 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
925 return xfs_btree_readahead_lblock(cur, lr, block);
926 return xfs_btree_readahead_sblock(cur, lr, block);
927}
928
929STATIC int
930xfs_btree_ptr_to_daddr(
931 struct xfs_btree_cur *cur,
932 union xfs_btree_ptr *ptr,
933 xfs_daddr_t *daddr)
934{
935 xfs_fsblock_t fsbno;
936 xfs_agblock_t agbno;
937 int error;
938
939 error = xfs_btree_check_ptr(cur, ptr, 0, 1);
940 if (error)
941 return error;
942
943 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
944 fsbno = be64_to_cpu(ptr->l);
945 *daddr = XFS_FSB_TO_DADDR(cur->bc_mp, fsbno);
946 } else {
947 agbno = be32_to_cpu(ptr->s);
948 *daddr = XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_private.a.agno,
949 agbno);
950 }
951
952 return 0;
953}
954
955
956
957
958
959
960
961STATIC void
962xfs_btree_readahead_ptr(
963 struct xfs_btree_cur *cur,
964 union xfs_btree_ptr *ptr,
965 xfs_extlen_t count)
966{
967 xfs_daddr_t daddr;
968
969 if (xfs_btree_ptr_to_daddr(cur, ptr, &daddr))
970 return;
971 xfs_buf_readahead(cur->bc_mp->m_ddev_targp, daddr,
972 cur->bc_mp->m_bsize * count, cur->bc_ops->buf_ops);
973}
974
975
976
977
978
979STATIC void
980xfs_btree_setbuf(
981 xfs_btree_cur_t *cur,
982 int lev,
983 xfs_buf_t *bp)
984{
985 struct xfs_btree_block *b;
986
987 if (cur->bc_bufs[lev])
988 xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[lev]);
989 cur->bc_bufs[lev] = bp;
990 cur->bc_ra[lev] = 0;
991
992 b = XFS_BUF_TO_BLOCK(bp);
993 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
994 if (b->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK))
995 cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA;
996 if (b->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK))
997 cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA;
998 } else {
999 if (b->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK))
1000 cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA;
1001 if (b->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK))
1002 cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA;
1003 }
1004}
1005
1006bool
1007xfs_btree_ptr_is_null(
1008 struct xfs_btree_cur *cur,
1009 union xfs_btree_ptr *ptr)
1010{
1011 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
1012 return ptr->l == cpu_to_be64(NULLFSBLOCK);
1013 else
1014 return ptr->s == cpu_to_be32(NULLAGBLOCK);
1015}
1016
1017STATIC void
1018xfs_btree_set_ptr_null(
1019 struct xfs_btree_cur *cur,
1020 union xfs_btree_ptr *ptr)
1021{
1022 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
1023 ptr->l = cpu_to_be64(NULLFSBLOCK);
1024 else
1025 ptr->s = cpu_to_be32(NULLAGBLOCK);
1026}
1027
1028
1029
1030
1031void
1032xfs_btree_get_sibling(
1033 struct xfs_btree_cur *cur,
1034 struct xfs_btree_block *block,
1035 union xfs_btree_ptr *ptr,
1036 int lr)
1037{
1038 ASSERT(lr == XFS_BB_LEFTSIB || lr == XFS_BB_RIGHTSIB);
1039
1040 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
1041 if (lr == XFS_BB_RIGHTSIB)
1042 ptr->l = block->bb_u.l.bb_rightsib;
1043 else
1044 ptr->l = block->bb_u.l.bb_leftsib;
1045 } else {
1046 if (lr == XFS_BB_RIGHTSIB)
1047 ptr->s = block->bb_u.s.bb_rightsib;
1048 else
1049 ptr->s = block->bb_u.s.bb_leftsib;
1050 }
1051}
1052
1053STATIC void
1054xfs_btree_set_sibling(
1055 struct xfs_btree_cur *cur,
1056 struct xfs_btree_block *block,
1057 union xfs_btree_ptr *ptr,
1058 int lr)
1059{
1060 ASSERT(lr == XFS_BB_LEFTSIB || lr == XFS_BB_RIGHTSIB);
1061
1062 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
1063 if (lr == XFS_BB_RIGHTSIB)
1064 block->bb_u.l.bb_rightsib = ptr->l;
1065 else
1066 block->bb_u.l.bb_leftsib = ptr->l;
1067 } else {
1068 if (lr == XFS_BB_RIGHTSIB)
1069 block->bb_u.s.bb_rightsib = ptr->s;
1070 else
1071 block->bb_u.s.bb_leftsib = ptr->s;
1072 }
1073}
1074
1075void
1076xfs_btree_init_block_int(
1077 struct xfs_mount *mp,
1078 struct xfs_btree_block *buf,
1079 xfs_daddr_t blkno,
1080 xfs_btnum_t btnum,
1081 __u16 level,
1082 __u16 numrecs,
1083 __u64 owner,
1084 unsigned int flags)
1085{
1086 int crc = xfs_sb_version_hascrc(&mp->m_sb);
1087 __u32 magic = xfs_btree_magic(crc, btnum);
1088
1089 buf->bb_magic = cpu_to_be32(magic);
1090 buf->bb_level = cpu_to_be16(level);
1091 buf->bb_numrecs = cpu_to_be16(numrecs);
1092
1093 if (flags & XFS_BTREE_LONG_PTRS) {
1094 buf->bb_u.l.bb_leftsib = cpu_to_be64(NULLFSBLOCK);
1095 buf->bb_u.l.bb_rightsib = cpu_to_be64(NULLFSBLOCK);
1096 if (crc) {
1097 buf->bb_u.l.bb_blkno = cpu_to_be64(blkno);
1098 buf->bb_u.l.bb_owner = cpu_to_be64(owner);
1099 uuid_copy(&buf->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid);
1100 buf->bb_u.l.bb_pad = 0;
1101 buf->bb_u.l.bb_lsn = 0;
1102 }
1103 } else {
1104
1105 __u32 __owner = (__u32)owner;
1106
1107 buf->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK);
1108 buf->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
1109 if (crc) {
1110 buf->bb_u.s.bb_blkno = cpu_to_be64(blkno);
1111 buf->bb_u.s.bb_owner = cpu_to_be32(__owner);
1112 uuid_copy(&buf->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid);
1113 buf->bb_u.s.bb_lsn = 0;
1114 }
1115 }
1116}
1117
1118void
1119xfs_btree_init_block(
1120 struct xfs_mount *mp,
1121 struct xfs_buf *bp,
1122 xfs_btnum_t btnum,
1123 __u16 level,
1124 __u16 numrecs,
1125 __u64 owner)
1126{
1127 xfs_btree_init_block_int(mp, XFS_BUF_TO_BLOCK(bp), bp->b_bn,
1128 btnum, level, numrecs, owner, 0);
1129}
1130
1131STATIC void
1132xfs_btree_init_block_cur(
1133 struct xfs_btree_cur *cur,
1134 struct xfs_buf *bp,
1135 int level,
1136 int numrecs)
1137{
1138 __u64 owner;
1139
1140
1141
1142
1143
1144
1145
1146 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
1147 owner = cur->bc_private.b.ip->i_ino;
1148 else
1149 owner = cur->bc_private.a.agno;
1150
1151 xfs_btree_init_block_int(cur->bc_mp, XFS_BUF_TO_BLOCK(bp), bp->b_bn,
1152 cur->bc_btnum, level, numrecs,
1153 owner, cur->bc_flags);
1154}
1155
1156
1157
1158
1159
1160
1161STATIC int
1162xfs_btree_is_lastrec(
1163 struct xfs_btree_cur *cur,
1164 struct xfs_btree_block *block,
1165 int level)
1166{
1167 union xfs_btree_ptr ptr;
1168
1169 if (level > 0)
1170 return 0;
1171 if (!(cur->bc_flags & XFS_BTREE_LASTREC_UPDATE))
1172 return 0;
1173
1174 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
1175 if (!xfs_btree_ptr_is_null(cur, &ptr))
1176 return 0;
1177 return 1;
1178}
1179
1180STATIC void
1181xfs_btree_buf_to_ptr(
1182 struct xfs_btree_cur *cur,
1183 struct xfs_buf *bp,
1184 union xfs_btree_ptr *ptr)
1185{
1186 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
1187 ptr->l = cpu_to_be64(XFS_DADDR_TO_FSB(cur->bc_mp,
1188 XFS_BUF_ADDR(bp)));
1189 else {
1190 ptr->s = cpu_to_be32(xfs_daddr_to_agbno(cur->bc_mp,
1191 XFS_BUF_ADDR(bp)));
1192 }
1193}
1194
1195STATIC void
1196xfs_btree_set_refs(
1197 struct xfs_btree_cur *cur,
1198 struct xfs_buf *bp)
1199{
1200 switch (cur->bc_btnum) {
1201 case XFS_BTNUM_BNO:
1202 case XFS_BTNUM_CNT:
1203 xfs_buf_set_ref(bp, XFS_ALLOC_BTREE_REF);
1204 break;
1205 case XFS_BTNUM_INO:
1206 case XFS_BTNUM_FINO:
1207 xfs_buf_set_ref(bp, XFS_INO_BTREE_REF);
1208 break;
1209 case XFS_BTNUM_BMAP:
1210 xfs_buf_set_ref(bp, XFS_BMAP_BTREE_REF);
1211 break;
1212 case XFS_BTNUM_RMAP:
1213 xfs_buf_set_ref(bp, XFS_RMAP_BTREE_REF);
1214 break;
1215 case XFS_BTNUM_REFC:
1216 xfs_buf_set_ref(bp, XFS_REFC_BTREE_REF);
1217 break;
1218 default:
1219 ASSERT(0);
1220 }
1221}
1222
1223STATIC int
1224xfs_btree_get_buf_block(
1225 struct xfs_btree_cur *cur,
1226 union xfs_btree_ptr *ptr,
1227 struct xfs_btree_block **block,
1228 struct xfs_buf **bpp)
1229{
1230 struct xfs_mount *mp = cur->bc_mp;
1231 xfs_daddr_t d;
1232 int error;
1233
1234 error = xfs_btree_ptr_to_daddr(cur, ptr, &d);
1235 if (error)
1236 return error;
1237 error = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d, mp->m_bsize,
1238 0, bpp);
1239 if (error)
1240 return error;
1241
1242 (*bpp)->b_ops = cur->bc_ops->buf_ops;
1243 *block = XFS_BUF_TO_BLOCK(*bpp);
1244 return 0;
1245}
1246
1247
1248
1249
1250
1251STATIC int
1252xfs_btree_read_buf_block(
1253 struct xfs_btree_cur *cur,
1254 union xfs_btree_ptr *ptr,
1255 int flags,
1256 struct xfs_btree_block **block,
1257 struct xfs_buf **bpp)
1258{
1259 struct xfs_mount *mp = cur->bc_mp;
1260 xfs_daddr_t d;
1261 int error;
1262
1263
1264 ASSERT(!(flags & XBF_TRYLOCK));
1265
1266 error = xfs_btree_ptr_to_daddr(cur, ptr, &d);
1267 if (error)
1268 return error;
1269 error = xfs_trans_read_buf(mp, cur->bc_tp, mp->m_ddev_targp, d,
1270 mp->m_bsize, flags, bpp,
1271 cur->bc_ops->buf_ops);
1272 if (error)
1273 return error;
1274
1275 xfs_btree_set_refs(cur, *bpp);
1276 *block = XFS_BUF_TO_BLOCK(*bpp);
1277 return 0;
1278}
1279
1280
1281
1282
1283STATIC void
1284xfs_btree_copy_keys(
1285 struct xfs_btree_cur *cur,
1286 union xfs_btree_key *dst_key,
1287 union xfs_btree_key *src_key,
1288 int numkeys)
1289{
1290 ASSERT(numkeys >= 0);
1291 memcpy(dst_key, src_key, numkeys * cur->bc_ops->key_len);
1292}
1293
1294
1295
1296
1297STATIC void
1298xfs_btree_copy_recs(
1299 struct xfs_btree_cur *cur,
1300 union xfs_btree_rec *dst_rec,
1301 union xfs_btree_rec *src_rec,
1302 int numrecs)
1303{
1304 ASSERT(numrecs >= 0);
1305 memcpy(dst_rec, src_rec, numrecs * cur->bc_ops->rec_len);
1306}
1307
1308
1309
1310
1311STATIC void
1312xfs_btree_copy_ptrs(
1313 struct xfs_btree_cur *cur,
1314 union xfs_btree_ptr *dst_ptr,
1315 union xfs_btree_ptr *src_ptr,
1316 int numptrs)
1317{
1318 ASSERT(numptrs >= 0);
1319 memcpy(dst_ptr, src_ptr, numptrs * xfs_btree_ptr_len(cur));
1320}
1321
1322
1323
1324
1325STATIC void
1326xfs_btree_shift_keys(
1327 struct xfs_btree_cur *cur,
1328 union xfs_btree_key *key,
1329 int dir,
1330 int numkeys)
1331{
1332 char *dst_key;
1333
1334 ASSERT(numkeys >= 0);
1335 ASSERT(dir == 1 || dir == -1);
1336
1337 dst_key = (char *)key + (dir * cur->bc_ops->key_len);
1338 memmove(dst_key, key, numkeys * cur->bc_ops->key_len);
1339}
1340
1341
1342
1343
1344STATIC void
1345xfs_btree_shift_recs(
1346 struct xfs_btree_cur *cur,
1347 union xfs_btree_rec *rec,
1348 int dir,
1349 int numrecs)
1350{
1351 char *dst_rec;
1352
1353 ASSERT(numrecs >= 0);
1354 ASSERT(dir == 1 || dir == -1);
1355
1356 dst_rec = (char *)rec + (dir * cur->bc_ops->rec_len);
1357 memmove(dst_rec, rec, numrecs * cur->bc_ops->rec_len);
1358}
1359
1360
1361
1362
1363STATIC void
1364xfs_btree_shift_ptrs(
1365 struct xfs_btree_cur *cur,
1366 union xfs_btree_ptr *ptr,
1367 int dir,
1368 int numptrs)
1369{
1370 char *dst_ptr;
1371
1372 ASSERT(numptrs >= 0);
1373 ASSERT(dir == 1 || dir == -1);
1374
1375 dst_ptr = (char *)ptr + (dir * xfs_btree_ptr_len(cur));
1376 memmove(dst_ptr, ptr, numptrs * xfs_btree_ptr_len(cur));
1377}
1378
1379
1380
1381
1382STATIC void
1383xfs_btree_log_keys(
1384 struct xfs_btree_cur *cur,
1385 struct xfs_buf *bp,
1386 int first,
1387 int last)
1388{
1389
1390 if (bp) {
1391 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
1392 xfs_trans_log_buf(cur->bc_tp, bp,
1393 xfs_btree_key_offset(cur, first),
1394 xfs_btree_key_offset(cur, last + 1) - 1);
1395 } else {
1396 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
1397 xfs_ilog_fbroot(cur->bc_private.b.whichfork));
1398 }
1399}
1400
1401
1402
1403
1404void
1405xfs_btree_log_recs(
1406 struct xfs_btree_cur *cur,
1407 struct xfs_buf *bp,
1408 int first,
1409 int last)
1410{
1411
1412 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
1413 xfs_trans_log_buf(cur->bc_tp, bp,
1414 xfs_btree_rec_offset(cur, first),
1415 xfs_btree_rec_offset(cur, last + 1) - 1);
1416
1417}
1418
1419
1420
1421
1422STATIC void
1423xfs_btree_log_ptrs(
1424 struct xfs_btree_cur *cur,
1425 struct xfs_buf *bp,
1426 int first,
1427 int last)
1428{
1429
1430 if (bp) {
1431 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
1432 int level = xfs_btree_get_level(block);
1433
1434 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
1435 xfs_trans_log_buf(cur->bc_tp, bp,
1436 xfs_btree_ptr_offset(cur, first, level),
1437 xfs_btree_ptr_offset(cur, last + 1, level) - 1);
1438 } else {
1439 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
1440 xfs_ilog_fbroot(cur->bc_private.b.whichfork));
1441 }
1442
1443}
1444
1445
1446
1447
1448void
1449xfs_btree_log_block(
1450 struct xfs_btree_cur *cur,
1451 struct xfs_buf *bp,
1452 int fields)
1453{
1454 int first;
1455 int last;
1456 static const short soffsets[] = {
1457 offsetof(struct xfs_btree_block, bb_magic),
1458 offsetof(struct xfs_btree_block, bb_level),
1459 offsetof(struct xfs_btree_block, bb_numrecs),
1460 offsetof(struct xfs_btree_block, bb_u.s.bb_leftsib),
1461 offsetof(struct xfs_btree_block, bb_u.s.bb_rightsib),
1462 offsetof(struct xfs_btree_block, bb_u.s.bb_blkno),
1463 offsetof(struct xfs_btree_block, bb_u.s.bb_lsn),
1464 offsetof(struct xfs_btree_block, bb_u.s.bb_uuid),
1465 offsetof(struct xfs_btree_block, bb_u.s.bb_owner),
1466 offsetof(struct xfs_btree_block, bb_u.s.bb_crc),
1467 XFS_BTREE_SBLOCK_CRC_LEN
1468 };
1469 static const short loffsets[] = {
1470 offsetof(struct xfs_btree_block, bb_magic),
1471 offsetof(struct xfs_btree_block, bb_level),
1472 offsetof(struct xfs_btree_block, bb_numrecs),
1473 offsetof(struct xfs_btree_block, bb_u.l.bb_leftsib),
1474 offsetof(struct xfs_btree_block, bb_u.l.bb_rightsib),
1475 offsetof(struct xfs_btree_block, bb_u.l.bb_blkno),
1476 offsetof(struct xfs_btree_block, bb_u.l.bb_lsn),
1477 offsetof(struct xfs_btree_block, bb_u.l.bb_uuid),
1478 offsetof(struct xfs_btree_block, bb_u.l.bb_owner),
1479 offsetof(struct xfs_btree_block, bb_u.l.bb_crc),
1480 offsetof(struct xfs_btree_block, bb_u.l.bb_pad),
1481 XFS_BTREE_LBLOCK_CRC_LEN
1482 };
1483
1484 if (bp) {
1485 int nbits;
1486
1487 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) {
1488
1489
1490
1491
1492
1493
1494
1495 if (fields == XFS_BB_ALL_BITS)
1496 fields = XFS_BB_ALL_BITS_CRC;
1497 nbits = XFS_BB_NUM_BITS_CRC;
1498 } else {
1499 nbits = XFS_BB_NUM_BITS;
1500 }
1501 xfs_btree_offsets(fields,
1502 (cur->bc_flags & XFS_BTREE_LONG_PTRS) ?
1503 loffsets : soffsets,
1504 nbits, &first, &last);
1505 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
1506 xfs_trans_log_buf(cur->bc_tp, bp, first, last);
1507 } else {
1508 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
1509 xfs_ilog_fbroot(cur->bc_private.b.whichfork));
1510 }
1511}
1512
1513
1514
1515
1516
1517int
1518xfs_btree_increment(
1519 struct xfs_btree_cur *cur,
1520 int level,
1521 int *stat)
1522{
1523 struct xfs_btree_block *block;
1524 union xfs_btree_ptr ptr;
1525 struct xfs_buf *bp;
1526 int error;
1527 int lev;
1528
1529 ASSERT(level < cur->bc_nlevels);
1530
1531
1532 xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
1533
1534
1535 block = xfs_btree_get_block(cur, level, &bp);
1536
1537#ifdef DEBUG
1538 error = xfs_btree_check_block(cur, block, level, bp);
1539 if (error)
1540 goto error0;
1541#endif
1542
1543
1544 if (++cur->bc_ptrs[level] <= xfs_btree_get_numrecs(block))
1545 goto out1;
1546
1547
1548 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
1549 if (xfs_btree_ptr_is_null(cur, &ptr))
1550 goto out0;
1551
1552 XFS_BTREE_STATS_INC(cur, increment);
1553
1554
1555
1556
1557
1558 for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
1559 block = xfs_btree_get_block(cur, lev, &bp);
1560
1561#ifdef DEBUG
1562 error = xfs_btree_check_block(cur, block, lev, bp);
1563 if (error)
1564 goto error0;
1565#endif
1566
1567 if (++cur->bc_ptrs[lev] <= xfs_btree_get_numrecs(block))
1568 break;
1569
1570
1571 xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA);
1572 }
1573
1574
1575
1576
1577
1578 if (lev == cur->bc_nlevels) {
1579 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
1580 goto out0;
1581 ASSERT(0);
1582 error = -EFSCORRUPTED;
1583 goto error0;
1584 }
1585 ASSERT(lev < cur->bc_nlevels);
1586
1587
1588
1589
1590
1591 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) {
1592 union xfs_btree_ptr *ptrp;
1593
1594 ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block);
1595 --lev;
1596 error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp);
1597 if (error)
1598 goto error0;
1599
1600 xfs_btree_setbuf(cur, lev, bp);
1601 cur->bc_ptrs[lev] = 1;
1602 }
1603out1:
1604 *stat = 1;
1605 return 0;
1606
1607out0:
1608 *stat = 0;
1609 return 0;
1610
1611error0:
1612 return error;
1613}
1614
1615
1616
1617
1618
1619int
1620xfs_btree_decrement(
1621 struct xfs_btree_cur *cur,
1622 int level,
1623 int *stat)
1624{
1625 struct xfs_btree_block *block;
1626 xfs_buf_t *bp;
1627 int error;
1628 int lev;
1629 union xfs_btree_ptr ptr;
1630
1631 ASSERT(level < cur->bc_nlevels);
1632
1633
1634 xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA);
1635
1636
1637 if (--cur->bc_ptrs[level] > 0)
1638 goto out1;
1639
1640
1641 block = xfs_btree_get_block(cur, level, &bp);
1642
1643#ifdef DEBUG
1644 error = xfs_btree_check_block(cur, block, level, bp);
1645 if (error)
1646 goto error0;
1647#endif
1648
1649
1650 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB);
1651 if (xfs_btree_ptr_is_null(cur, &ptr))
1652 goto out0;
1653
1654 XFS_BTREE_STATS_INC(cur, decrement);
1655
1656
1657
1658
1659
1660 for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
1661 if (--cur->bc_ptrs[lev] > 0)
1662 break;
1663
1664 xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA);
1665 }
1666
1667
1668
1669
1670
1671 if (lev == cur->bc_nlevels) {
1672 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
1673 goto out0;
1674 ASSERT(0);
1675 error = -EFSCORRUPTED;
1676 goto error0;
1677 }
1678 ASSERT(lev < cur->bc_nlevels);
1679
1680
1681
1682
1683
1684 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) {
1685 union xfs_btree_ptr *ptrp;
1686
1687 ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block);
1688 --lev;
1689 error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp);
1690 if (error)
1691 goto error0;
1692 xfs_btree_setbuf(cur, lev, bp);
1693 cur->bc_ptrs[lev] = xfs_btree_get_numrecs(block);
1694 }
1695out1:
1696 *stat = 1;
1697 return 0;
1698
1699out0:
1700 *stat = 0;
1701 return 0;
1702
1703error0:
1704 return error;
1705}
1706
1707int
1708xfs_btree_lookup_get_block(
1709 struct xfs_btree_cur *cur,
1710 int level,
1711 union xfs_btree_ptr *pp,
1712 struct xfs_btree_block **blkp)
1713{
1714 struct xfs_buf *bp;
1715 xfs_daddr_t daddr;
1716 int error = 0;
1717
1718
1719 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
1720 (level == cur->bc_nlevels - 1)) {
1721 *blkp = xfs_btree_get_iroot(cur);
1722 return 0;
1723 }
1724
1725
1726
1727
1728
1729
1730
1731 bp = cur->bc_bufs[level];
1732 error = xfs_btree_ptr_to_daddr(cur, pp, &daddr);
1733 if (error)
1734 return error;
1735 if (bp && XFS_BUF_ADDR(bp) == daddr) {
1736 *blkp = XFS_BUF_TO_BLOCK(bp);
1737 return 0;
1738 }
1739
1740 error = xfs_btree_read_buf_block(cur, pp, 0, blkp, &bp);
1741 if (error)
1742 return error;
1743
1744
1745 if (xfs_sb_version_hascrc(&cur->bc_mp->m_sb) &&
1746 !(cur->bc_private.b.flags & XFS_BTCUR_BPRV_INVALID_OWNER) &&
1747 (cur->bc_flags & XFS_BTREE_LONG_PTRS) &&
1748 be64_to_cpu((*blkp)->bb_u.l.bb_owner) !=
1749 cur->bc_private.b.ip->i_ino)
1750 goto out_bad;
1751
1752
1753 if (be16_to_cpu((*blkp)->bb_level) != level)
1754 goto out_bad;
1755
1756
1757 if (level != 0 && be16_to_cpu((*blkp)->bb_numrecs) == 0)
1758 goto out_bad;
1759
1760 xfs_btree_setbuf(cur, level, bp);
1761 return 0;
1762
1763out_bad:
1764 *blkp = NULL;
1765 xfs_buf_corruption_error(bp);
1766 xfs_trans_brelse(cur->bc_tp, bp);
1767 return -EFSCORRUPTED;
1768}
1769
1770
1771
1772
1773
1774
1775STATIC union xfs_btree_key *
1776xfs_lookup_get_search_key(
1777 struct xfs_btree_cur *cur,
1778 int level,
1779 int keyno,
1780 struct xfs_btree_block *block,
1781 union xfs_btree_key *kp)
1782{
1783 if (level == 0) {
1784 cur->bc_ops->init_key_from_rec(kp,
1785 xfs_btree_rec_addr(cur, keyno, block));
1786 return kp;
1787 }
1788
1789 return xfs_btree_key_addr(cur, keyno, block);
1790}
1791
1792
1793
1794
1795
1796int
1797xfs_btree_lookup(
1798 struct xfs_btree_cur *cur,
1799 xfs_lookup_t dir,
1800 int *stat)
1801{
1802 struct xfs_btree_block *block;
1803 int64_t diff;
1804 int error;
1805 int keyno;
1806 int level;
1807 union xfs_btree_ptr *pp;
1808 union xfs_btree_ptr ptr;
1809
1810 XFS_BTREE_STATS_INC(cur, lookup);
1811
1812
1813 if (XFS_IS_CORRUPT(cur->bc_mp, cur->bc_nlevels == 0))
1814 return -EFSCORRUPTED;
1815
1816 block = NULL;
1817 keyno = 0;
1818
1819
1820 cur->bc_ops->init_ptr_from_cur(cur, &ptr);
1821 pp = &ptr;
1822
1823
1824
1825
1826
1827
1828
1829 for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) {
1830
1831 error = xfs_btree_lookup_get_block(cur, level, pp, &block);
1832 if (error)
1833 goto error0;
1834
1835 if (diff == 0) {
1836
1837
1838
1839
1840 keyno = 1;
1841 } else {
1842
1843
1844 int high;
1845 int low;
1846
1847
1848 low = 1;
1849 high = xfs_btree_get_numrecs(block);
1850 if (!high) {
1851
1852 if (level != 0 || cur->bc_nlevels != 1) {
1853 XFS_CORRUPTION_ERROR(__func__,
1854 XFS_ERRLEVEL_LOW,
1855 cur->bc_mp, block,
1856 sizeof(*block));
1857 return -EFSCORRUPTED;
1858 }
1859
1860 cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE;
1861 *stat = 0;
1862 return 0;
1863 }
1864
1865
1866 while (low <= high) {
1867 union xfs_btree_key key;
1868 union xfs_btree_key *kp;
1869
1870 XFS_BTREE_STATS_INC(cur, compare);
1871
1872
1873 keyno = (low + high) >> 1;
1874
1875
1876 kp = xfs_lookup_get_search_key(cur, level,
1877 keyno, block, &key);
1878
1879
1880
1881
1882
1883
1884
1885 diff = cur->bc_ops->key_diff(cur, kp);
1886 if (diff < 0)
1887 low = keyno + 1;
1888 else if (diff > 0)
1889 high = keyno - 1;
1890 else
1891 break;
1892 }
1893 }
1894
1895
1896
1897
1898
1899 if (level > 0) {
1900
1901
1902
1903
1904 if (diff > 0 && --keyno < 1)
1905 keyno = 1;
1906 pp = xfs_btree_ptr_addr(cur, keyno, block);
1907
1908 error = xfs_btree_debug_check_ptr(cur, pp, 0, level);
1909 if (error)
1910 goto error0;
1911
1912 cur->bc_ptrs[level] = keyno;
1913 }
1914 }
1915
1916
1917 if (dir != XFS_LOOKUP_LE && diff < 0) {
1918 keyno++;
1919
1920
1921
1922
1923 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
1924 if (dir == XFS_LOOKUP_GE &&
1925 keyno > xfs_btree_get_numrecs(block) &&
1926 !xfs_btree_ptr_is_null(cur, &ptr)) {
1927 int i;
1928
1929 cur->bc_ptrs[0] = keyno;
1930 error = xfs_btree_increment(cur, 0, &i);
1931 if (error)
1932 goto error0;
1933 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1934 return -EFSCORRUPTED;
1935 *stat = 1;
1936 return 0;
1937 }
1938 } else if (dir == XFS_LOOKUP_LE && diff > 0)
1939 keyno--;
1940 cur->bc_ptrs[0] = keyno;
1941
1942
1943 if (keyno == 0 || keyno > xfs_btree_get_numrecs(block))
1944 *stat = 0;
1945 else if (dir != XFS_LOOKUP_EQ || diff == 0)
1946 *stat = 1;
1947 else
1948 *stat = 0;
1949 return 0;
1950
1951error0:
1952 return error;
1953}
1954
1955
1956union xfs_btree_key *
1957xfs_btree_high_key_from_key(
1958 struct xfs_btree_cur *cur,
1959 union xfs_btree_key *key)
1960{
1961 ASSERT(cur->bc_flags & XFS_BTREE_OVERLAPPING);
1962 return (union xfs_btree_key *)((char *)key +
1963 (cur->bc_ops->key_len / 2));
1964}
1965
1966
1967STATIC void
1968xfs_btree_get_leaf_keys(
1969 struct xfs_btree_cur *cur,
1970 struct xfs_btree_block *block,
1971 union xfs_btree_key *key)
1972{
1973 union xfs_btree_key max_hkey;
1974 union xfs_btree_key hkey;
1975 union xfs_btree_rec *rec;
1976 union xfs_btree_key *high;
1977 int n;
1978
1979 rec = xfs_btree_rec_addr(cur, 1, block);
1980 cur->bc_ops->init_key_from_rec(key, rec);
1981
1982 if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
1983
1984 cur->bc_ops->init_high_key_from_rec(&max_hkey, rec);
1985 for (n = 2; n <= xfs_btree_get_numrecs(block); n++) {
1986 rec = xfs_btree_rec_addr(cur, n, block);
1987 cur->bc_ops->init_high_key_from_rec(&hkey, rec);
1988 if (cur->bc_ops->diff_two_keys(cur, &hkey, &max_hkey)
1989 > 0)
1990 max_hkey = hkey;
1991 }
1992
1993 high = xfs_btree_high_key_from_key(cur, key);
1994 memcpy(high, &max_hkey, cur->bc_ops->key_len / 2);
1995 }
1996}
1997
1998
1999STATIC void
2000xfs_btree_get_node_keys(
2001 struct xfs_btree_cur *cur,
2002 struct xfs_btree_block *block,
2003 union xfs_btree_key *key)
2004{
2005 union xfs_btree_key *hkey;
2006 union xfs_btree_key *max_hkey;
2007 union xfs_btree_key *high;
2008 int n;
2009
2010 if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
2011 memcpy(key, xfs_btree_key_addr(cur, 1, block),
2012 cur->bc_ops->key_len / 2);
2013
2014 max_hkey = xfs_btree_high_key_addr(cur, 1, block);
2015 for (n = 2; n <= xfs_btree_get_numrecs(block); n++) {
2016 hkey = xfs_btree_high_key_addr(cur, n, block);
2017 if (cur->bc_ops->diff_two_keys(cur, hkey, max_hkey) > 0)
2018 max_hkey = hkey;
2019 }
2020
2021 high = xfs_btree_high_key_from_key(cur, key);
2022 memcpy(high, max_hkey, cur->bc_ops->key_len / 2);
2023 } else {
2024 memcpy(key, xfs_btree_key_addr(cur, 1, block),
2025 cur->bc_ops->key_len);
2026 }
2027}
2028
2029
2030void
2031xfs_btree_get_keys(
2032 struct xfs_btree_cur *cur,
2033 struct xfs_btree_block *block,
2034 union xfs_btree_key *key)
2035{
2036 if (be16_to_cpu(block->bb_level) == 0)
2037 xfs_btree_get_leaf_keys(cur, block, key);
2038 else
2039 xfs_btree_get_node_keys(cur, block, key);
2040}
2041
2042
2043
2044
2045
2046
2047
2048
2049static inline bool
2050xfs_btree_needs_key_update(
2051 struct xfs_btree_cur *cur,
2052 int ptr)
2053{
2054 return (cur->bc_flags & XFS_BTREE_OVERLAPPING) || ptr == 1;
2055}
2056
2057
2058
2059
2060
2061
2062STATIC int
2063__xfs_btree_updkeys(
2064 struct xfs_btree_cur *cur,
2065 int level,
2066 struct xfs_btree_block *block,
2067 struct xfs_buf *bp0,
2068 bool force_all)
2069{
2070 union xfs_btree_key key;
2071 union xfs_btree_key *lkey;
2072 union xfs_btree_key *hkey;
2073 union xfs_btree_key *nlkey;
2074 union xfs_btree_key *nhkey;
2075 struct xfs_buf *bp;
2076 int ptr;
2077
2078 ASSERT(cur->bc_flags & XFS_BTREE_OVERLAPPING);
2079
2080
2081 if (level + 1 >= cur->bc_nlevels)
2082 return 0;
2083
2084 trace_xfs_btree_updkeys(cur, level, bp0);
2085
2086 lkey = &key;
2087 hkey = xfs_btree_high_key_from_key(cur, lkey);
2088 xfs_btree_get_keys(cur, block, lkey);
2089 for (level++; level < cur->bc_nlevels; level++) {
2090#ifdef DEBUG
2091 int error;
2092#endif
2093 block = xfs_btree_get_block(cur, level, &bp);
2094 trace_xfs_btree_updkeys(cur, level, bp);
2095#ifdef DEBUG
2096 error = xfs_btree_check_block(cur, block, level, bp);
2097 if (error)
2098 return error;
2099#endif
2100 ptr = cur->bc_ptrs[level];
2101 nlkey = xfs_btree_key_addr(cur, ptr, block);
2102 nhkey = xfs_btree_high_key_addr(cur, ptr, block);
2103 if (!force_all &&
2104 !(cur->bc_ops->diff_two_keys(cur, nlkey, lkey) != 0 ||
2105 cur->bc_ops->diff_two_keys(cur, nhkey, hkey) != 0))
2106 break;
2107 xfs_btree_copy_keys(cur, nlkey, lkey, 1);
2108 xfs_btree_log_keys(cur, bp, ptr, ptr);
2109 if (level + 1 >= cur->bc_nlevels)
2110 break;
2111 xfs_btree_get_node_keys(cur, block, lkey);
2112 }
2113
2114 return 0;
2115}
2116
2117
2118STATIC int
2119xfs_btree_updkeys_force(
2120 struct xfs_btree_cur *cur,
2121 int level)
2122{
2123 struct xfs_buf *bp;
2124 struct xfs_btree_block *block;
2125
2126 block = xfs_btree_get_block(cur, level, &bp);
2127 return __xfs_btree_updkeys(cur, level, block, bp, true);
2128}
2129
2130
2131
2132
2133STATIC int
2134xfs_btree_update_keys(
2135 struct xfs_btree_cur *cur,
2136 int level)
2137{
2138 struct xfs_btree_block *block;
2139 struct xfs_buf *bp;
2140 union xfs_btree_key *kp;
2141 union xfs_btree_key key;
2142 int ptr;
2143
2144 ASSERT(level >= 0);
2145
2146 block = xfs_btree_get_block(cur, level, &bp);
2147 if (cur->bc_flags & XFS_BTREE_OVERLAPPING)
2148 return __xfs_btree_updkeys(cur, level, block, bp, false);
2149
2150
2151
2152
2153
2154
2155
2156 xfs_btree_get_keys(cur, block, &key);
2157 for (level++, ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) {
2158#ifdef DEBUG
2159 int error;
2160#endif
2161 block = xfs_btree_get_block(cur, level, &bp);
2162#ifdef DEBUG
2163 error = xfs_btree_check_block(cur, block, level, bp);
2164 if (error)
2165 return error;
2166#endif
2167 ptr = cur->bc_ptrs[level];
2168 kp = xfs_btree_key_addr(cur, ptr, block);
2169 xfs_btree_copy_keys(cur, kp, &key, 1);
2170 xfs_btree_log_keys(cur, bp, ptr, ptr);
2171 }
2172
2173 return 0;
2174}
2175
2176
2177
2178
2179
2180
2181int
2182xfs_btree_update(
2183 struct xfs_btree_cur *cur,
2184 union xfs_btree_rec *rec)
2185{
2186 struct xfs_btree_block *block;
2187 struct xfs_buf *bp;
2188 int error;
2189 int ptr;
2190 union xfs_btree_rec *rp;
2191
2192
2193 block = xfs_btree_get_block(cur, 0, &bp);
2194
2195#ifdef DEBUG
2196 error = xfs_btree_check_block(cur, block, 0, bp);
2197 if (error)
2198 goto error0;
2199#endif
2200
2201 ptr = cur->bc_ptrs[0];
2202 rp = xfs_btree_rec_addr(cur, ptr, block);
2203
2204
2205 xfs_btree_copy_recs(cur, rp, rec, 1);
2206 xfs_btree_log_recs(cur, bp, ptr, ptr);
2207
2208
2209
2210
2211
2212 if (xfs_btree_is_lastrec(cur, block, 0)) {
2213 cur->bc_ops->update_lastrec(cur, block, rec,
2214 ptr, LASTREC_UPDATE);
2215 }
2216
2217
2218 if (xfs_btree_needs_key_update(cur, ptr)) {
2219 error = xfs_btree_update_keys(cur, 0);
2220 if (error)
2221 goto error0;
2222 }
2223
2224 return 0;
2225
2226error0:
2227 return error;
2228}
2229
2230
2231
2232
2233
2234STATIC int
2235xfs_btree_lshift(
2236 struct xfs_btree_cur *cur,
2237 int level,
2238 int *stat)
2239{
2240 struct xfs_buf *lbp;
2241 struct xfs_btree_block *left;
2242 int lrecs;
2243 struct xfs_buf *rbp;
2244 struct xfs_btree_block *right;
2245 struct xfs_btree_cur *tcur;
2246 int rrecs;
2247 union xfs_btree_ptr lptr;
2248 union xfs_btree_key *rkp = NULL;
2249 union xfs_btree_ptr *rpp = NULL;
2250 union xfs_btree_rec *rrp = NULL;
2251 int error;
2252 int i;
2253
2254 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
2255 level == cur->bc_nlevels - 1)
2256 goto out0;
2257
2258
2259 right = xfs_btree_get_block(cur, level, &rbp);
2260
2261#ifdef DEBUG
2262 error = xfs_btree_check_block(cur, right, level, rbp);
2263 if (error)
2264 goto error0;
2265#endif
2266
2267
2268 xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
2269 if (xfs_btree_ptr_is_null(cur, &lptr))
2270 goto out0;
2271
2272
2273
2274
2275
2276 if (cur->bc_ptrs[level] <= 1)
2277 goto out0;
2278
2279
2280 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
2281 if (error)
2282 goto error0;
2283
2284
2285 lrecs = xfs_btree_get_numrecs(left);
2286 if (lrecs == cur->bc_ops->get_maxrecs(cur, level))
2287 goto out0;
2288
2289 rrecs = xfs_btree_get_numrecs(right);
2290
2291
2292
2293
2294
2295
2296 lrecs++;
2297 rrecs--;
2298
2299 XFS_BTREE_STATS_INC(cur, lshift);
2300 XFS_BTREE_STATS_ADD(cur, moves, 1);
2301
2302
2303
2304
2305
2306 if (level > 0) {
2307
2308 union xfs_btree_key *lkp;
2309 union xfs_btree_ptr *lpp;
2310
2311 lkp = xfs_btree_key_addr(cur, lrecs, left);
2312 rkp = xfs_btree_key_addr(cur, 1, right);
2313
2314 lpp = xfs_btree_ptr_addr(cur, lrecs, left);
2315 rpp = xfs_btree_ptr_addr(cur, 1, right);
2316
2317 error = xfs_btree_debug_check_ptr(cur, rpp, 0, level);
2318 if (error)
2319 goto error0;
2320
2321 xfs_btree_copy_keys(cur, lkp, rkp, 1);
2322 xfs_btree_copy_ptrs(cur, lpp, rpp, 1);
2323
2324 xfs_btree_log_keys(cur, lbp, lrecs, lrecs);
2325 xfs_btree_log_ptrs(cur, lbp, lrecs, lrecs);
2326
2327 ASSERT(cur->bc_ops->keys_inorder(cur,
2328 xfs_btree_key_addr(cur, lrecs - 1, left), lkp));
2329 } else {
2330
2331 union xfs_btree_rec *lrp;
2332
2333 lrp = xfs_btree_rec_addr(cur, lrecs, left);
2334 rrp = xfs_btree_rec_addr(cur, 1, right);
2335
2336 xfs_btree_copy_recs(cur, lrp, rrp, 1);
2337 xfs_btree_log_recs(cur, lbp, lrecs, lrecs);
2338
2339 ASSERT(cur->bc_ops->recs_inorder(cur,
2340 xfs_btree_rec_addr(cur, lrecs - 1, left), lrp));
2341 }
2342
2343 xfs_btree_set_numrecs(left, lrecs);
2344 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS);
2345
2346 xfs_btree_set_numrecs(right, rrecs);
2347 xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS);
2348
2349
2350
2351
2352 XFS_BTREE_STATS_ADD(cur, moves, rrecs - 1);
2353 if (level > 0) {
2354
2355 for (i = 0; i < rrecs; i++) {
2356 error = xfs_btree_debug_check_ptr(cur, rpp, i + 1, level);
2357 if (error)
2358 goto error0;
2359 }
2360
2361 xfs_btree_shift_keys(cur,
2362 xfs_btree_key_addr(cur, 2, right),
2363 -1, rrecs);
2364 xfs_btree_shift_ptrs(cur,
2365 xfs_btree_ptr_addr(cur, 2, right),
2366 -1, rrecs);
2367
2368 xfs_btree_log_keys(cur, rbp, 1, rrecs);
2369 xfs_btree_log_ptrs(cur, rbp, 1, rrecs);
2370 } else {
2371
2372 xfs_btree_shift_recs(cur,
2373 xfs_btree_rec_addr(cur, 2, right),
2374 -1, rrecs);
2375 xfs_btree_log_recs(cur, rbp, 1, rrecs);
2376 }
2377
2378
2379
2380
2381
2382 if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
2383 error = xfs_btree_dup_cursor(cur, &tcur);
2384 if (error)
2385 goto error0;
2386 i = xfs_btree_firstrec(tcur, level);
2387 if (XFS_IS_CORRUPT(tcur->bc_mp, i != 1)) {
2388 error = -EFSCORRUPTED;
2389 goto error0;
2390 }
2391
2392 error = xfs_btree_decrement(tcur, level, &i);
2393 if (error)
2394 goto error1;
2395
2396
2397 error = xfs_btree_update_keys(tcur, level);
2398 if (error)
2399 goto error1;
2400
2401 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
2402 }
2403
2404
2405 error = xfs_btree_update_keys(cur, level);
2406 if (error)
2407 goto error0;
2408
2409
2410 cur->bc_ptrs[level]--;
2411
2412 *stat = 1;
2413 return 0;
2414
2415out0:
2416 *stat = 0;
2417 return 0;
2418
2419error0:
2420 return error;
2421
2422error1:
2423 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
2424 return error;
2425}
2426
2427
2428
2429
2430
2431STATIC int
2432xfs_btree_rshift(
2433 struct xfs_btree_cur *cur,
2434 int level,
2435 int *stat)
2436{
2437 struct xfs_buf *lbp;
2438 struct xfs_btree_block *left;
2439 struct xfs_buf *rbp;
2440 struct xfs_btree_block *right;
2441 struct xfs_btree_cur *tcur;
2442 union xfs_btree_ptr rptr;
2443 union xfs_btree_key *rkp;
2444 int rrecs;
2445 int lrecs;
2446 int error;
2447 int i;
2448
2449 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
2450 (level == cur->bc_nlevels - 1))
2451 goto out0;
2452
2453
2454 left = xfs_btree_get_block(cur, level, &lbp);
2455
2456#ifdef DEBUG
2457 error = xfs_btree_check_block(cur, left, level, lbp);
2458 if (error)
2459 goto error0;
2460#endif
2461
2462
2463 xfs_btree_get_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB);
2464 if (xfs_btree_ptr_is_null(cur, &rptr))
2465 goto out0;
2466
2467
2468
2469
2470
2471 lrecs = xfs_btree_get_numrecs(left);
2472 if (cur->bc_ptrs[level] >= lrecs)
2473 goto out0;
2474
2475
2476 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
2477 if (error)
2478 goto error0;
2479
2480
2481 rrecs = xfs_btree_get_numrecs(right);
2482 if (rrecs == cur->bc_ops->get_maxrecs(cur, level))
2483 goto out0;
2484
2485 XFS_BTREE_STATS_INC(cur, rshift);
2486 XFS_BTREE_STATS_ADD(cur, moves, rrecs);
2487
2488
2489
2490
2491
2492 if (level > 0) {
2493
2494 union xfs_btree_key *lkp;
2495 union xfs_btree_ptr *lpp;
2496 union xfs_btree_ptr *rpp;
2497
2498 lkp = xfs_btree_key_addr(cur, lrecs, left);
2499 lpp = xfs_btree_ptr_addr(cur, lrecs, left);
2500 rkp = xfs_btree_key_addr(cur, 1, right);
2501 rpp = xfs_btree_ptr_addr(cur, 1, right);
2502
2503 for (i = rrecs - 1; i >= 0; i--) {
2504 error = xfs_btree_debug_check_ptr(cur, rpp, i, level);
2505 if (error)
2506 goto error0;
2507 }
2508
2509 xfs_btree_shift_keys(cur, rkp, 1, rrecs);
2510 xfs_btree_shift_ptrs(cur, rpp, 1, rrecs);
2511
2512 error = xfs_btree_debug_check_ptr(cur, lpp, 0, level);
2513 if (error)
2514 goto error0;
2515
2516
2517 xfs_btree_copy_keys(cur, rkp, lkp, 1);
2518 xfs_btree_copy_ptrs(cur, rpp, lpp, 1);
2519
2520 xfs_btree_log_keys(cur, rbp, 1, rrecs + 1);
2521 xfs_btree_log_ptrs(cur, rbp, 1, rrecs + 1);
2522
2523 ASSERT(cur->bc_ops->keys_inorder(cur, rkp,
2524 xfs_btree_key_addr(cur, 2, right)));
2525 } else {
2526
2527 union xfs_btree_rec *lrp;
2528 union xfs_btree_rec *rrp;
2529
2530 lrp = xfs_btree_rec_addr(cur, lrecs, left);
2531 rrp = xfs_btree_rec_addr(cur, 1, right);
2532
2533 xfs_btree_shift_recs(cur, rrp, 1, rrecs);
2534
2535
2536 xfs_btree_copy_recs(cur, rrp, lrp, 1);
2537 xfs_btree_log_recs(cur, rbp, 1, rrecs + 1);
2538 }
2539
2540
2541
2542
2543 xfs_btree_set_numrecs(left, --lrecs);
2544 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS);
2545
2546 xfs_btree_set_numrecs(right, ++rrecs);
2547 xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS);
2548
2549
2550
2551
2552
2553 error = xfs_btree_dup_cursor(cur, &tcur);
2554 if (error)
2555 goto error0;
2556 i = xfs_btree_lastrec(tcur, level);
2557 if (XFS_IS_CORRUPT(tcur->bc_mp, i != 1)) {
2558 error = -EFSCORRUPTED;
2559 goto error0;
2560 }
2561
2562 error = xfs_btree_increment(tcur, level, &i);
2563 if (error)
2564 goto error1;
2565
2566
2567 if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
2568 error = xfs_btree_update_keys(cur, level);
2569 if (error)
2570 goto error1;
2571 }
2572
2573
2574 error = xfs_btree_update_keys(tcur, level);
2575 if (error)
2576 goto error1;
2577
2578 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
2579
2580 *stat = 1;
2581 return 0;
2582
2583out0:
2584 *stat = 0;
2585 return 0;
2586
2587error0:
2588 return error;
2589
2590error1:
2591 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
2592 return error;
2593}
2594
2595
2596
2597
2598
2599
2600STATIC int
2601__xfs_btree_split(
2602 struct xfs_btree_cur *cur,
2603 int level,
2604 union xfs_btree_ptr *ptrp,
2605 union xfs_btree_key *key,
2606 struct xfs_btree_cur **curp,
2607 int *stat)
2608{
2609 union xfs_btree_ptr lptr;
2610 struct xfs_buf *lbp;
2611 struct xfs_btree_block *left;
2612 union xfs_btree_ptr rptr;
2613 struct xfs_buf *rbp;
2614 struct xfs_btree_block *right;
2615 union xfs_btree_ptr rrptr;
2616 struct xfs_buf *rrbp;
2617 struct xfs_btree_block *rrblock;
2618 int lrecs;
2619 int rrecs;
2620 int src_index;
2621 int error;
2622 int i;
2623
2624 XFS_BTREE_STATS_INC(cur, split);
2625
2626
2627 left = xfs_btree_get_block(cur, level, &lbp);
2628
2629#ifdef DEBUG
2630 error = xfs_btree_check_block(cur, left, level, lbp);
2631 if (error)
2632 goto error0;
2633#endif
2634
2635 xfs_btree_buf_to_ptr(cur, lbp, &lptr);
2636
2637
2638 error = cur->bc_ops->alloc_block(cur, &lptr, &rptr, stat);
2639 if (error)
2640 goto error0;
2641 if (*stat == 0)
2642 goto out0;
2643 XFS_BTREE_STATS_INC(cur, alloc);
2644
2645
2646 error = xfs_btree_get_buf_block(cur, &rptr, &right, &rbp);
2647 if (error)
2648 goto error0;
2649
2650
2651 xfs_btree_init_block_cur(cur, rbp, xfs_btree_get_level(left), 0);
2652
2653
2654
2655
2656
2657
2658 lrecs = xfs_btree_get_numrecs(left);
2659 rrecs = lrecs / 2;
2660 if ((lrecs & 1) && cur->bc_ptrs[level] <= rrecs + 1)
2661 rrecs++;
2662 src_index = (lrecs - rrecs + 1);
2663
2664 XFS_BTREE_STATS_ADD(cur, moves, rrecs);
2665
2666
2667 lrecs -= rrecs;
2668 xfs_btree_set_numrecs(left, lrecs);
2669 xfs_btree_set_numrecs(right, xfs_btree_get_numrecs(right) + rrecs);
2670
2671
2672
2673
2674
2675
2676 if (level > 0) {
2677
2678 union xfs_btree_key *lkp;
2679 union xfs_btree_ptr *lpp;
2680 union xfs_btree_key *rkp;
2681 union xfs_btree_ptr *rpp;
2682
2683 lkp = xfs_btree_key_addr(cur, src_index, left);
2684 lpp = xfs_btree_ptr_addr(cur, src_index, left);
2685 rkp = xfs_btree_key_addr(cur, 1, right);
2686 rpp = xfs_btree_ptr_addr(cur, 1, right);
2687
2688 for (i = src_index; i < rrecs; i++) {
2689 error = xfs_btree_debug_check_ptr(cur, lpp, i, level);
2690 if (error)
2691 goto error0;
2692 }
2693
2694
2695 xfs_btree_copy_keys(cur, rkp, lkp, rrecs);
2696 xfs_btree_copy_ptrs(cur, rpp, lpp, rrecs);
2697
2698 xfs_btree_log_keys(cur, rbp, 1, rrecs);
2699 xfs_btree_log_ptrs(cur, rbp, 1, rrecs);
2700
2701
2702 xfs_btree_get_node_keys(cur, right, key);
2703 } else {
2704
2705 union xfs_btree_rec *lrp;
2706 union xfs_btree_rec *rrp;
2707
2708 lrp = xfs_btree_rec_addr(cur, src_index, left);
2709 rrp = xfs_btree_rec_addr(cur, 1, right);
2710
2711
2712 xfs_btree_copy_recs(cur, rrp, lrp, rrecs);
2713 xfs_btree_log_recs(cur, rbp, 1, rrecs);
2714
2715
2716 xfs_btree_get_leaf_keys(cur, right, key);
2717 }
2718
2719
2720
2721
2722
2723 xfs_btree_get_sibling(cur, left, &rrptr, XFS_BB_RIGHTSIB);
2724 xfs_btree_set_sibling(cur, right, &rrptr, XFS_BB_RIGHTSIB);
2725 xfs_btree_set_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
2726 xfs_btree_set_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB);
2727
2728 xfs_btree_log_block(cur, rbp, XFS_BB_ALL_BITS);
2729 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
2730
2731
2732
2733
2734
2735 if (!xfs_btree_ptr_is_null(cur, &rrptr)) {
2736 error = xfs_btree_read_buf_block(cur, &rrptr,
2737 0, &rrblock, &rrbp);
2738 if (error)
2739 goto error0;
2740 xfs_btree_set_sibling(cur, rrblock, &rptr, XFS_BB_LEFTSIB);
2741 xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB);
2742 }
2743
2744
2745 if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
2746 error = xfs_btree_update_keys(cur, level);
2747 if (error)
2748 goto error0;
2749 }
2750
2751
2752
2753
2754
2755
2756 if (cur->bc_ptrs[level] > lrecs + 1) {
2757 xfs_btree_setbuf(cur, level, rbp);
2758 cur->bc_ptrs[level] -= lrecs;
2759 }
2760
2761
2762
2763
2764 if (level + 1 < cur->bc_nlevels) {
2765 error = xfs_btree_dup_cursor(cur, curp);
2766 if (error)
2767 goto error0;
2768 (*curp)->bc_ptrs[level + 1]++;
2769 }
2770 *ptrp = rptr;
2771 *stat = 1;
2772 return 0;
2773out0:
2774 *stat = 0;
2775 return 0;
2776
2777error0:
2778 return error;
2779}
2780
2781struct xfs_btree_split_args {
2782 struct xfs_btree_cur *cur;
2783 int level;
2784 union xfs_btree_ptr *ptrp;
2785 union xfs_btree_key *key;
2786 struct xfs_btree_cur **curp;
2787 int *stat;
2788 int result;
2789 bool kswapd;
2790 struct completion *done;
2791 struct work_struct work;
2792};
2793
2794
2795
2796
2797static void
2798xfs_btree_split_worker(
2799 struct work_struct *work)
2800{
2801 struct xfs_btree_split_args *args = container_of(work,
2802 struct xfs_btree_split_args, work);
2803 unsigned long pflags;
2804 unsigned long new_pflags = PF_MEMALLOC_NOFS;
2805
2806
2807
2808
2809
2810
2811
2812 if (args->kswapd)
2813 new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
2814
2815 current_set_flags_nested(&pflags, new_pflags);
2816
2817 args->result = __xfs_btree_split(args->cur, args->level, args->ptrp,
2818 args->key, args->curp, args->stat);
2819 complete(args->done);
2820
2821 current_restore_flags_nested(&pflags, new_pflags);
2822}
2823
2824
2825
2826
2827
2828
2829STATIC int
2830xfs_btree_split(
2831 struct xfs_btree_cur *cur,
2832 int level,
2833 union xfs_btree_ptr *ptrp,
2834 union xfs_btree_key *key,
2835 struct xfs_btree_cur **curp,
2836 int *stat)
2837{
2838 struct xfs_btree_split_args args;
2839 DECLARE_COMPLETION_ONSTACK(done);
2840
2841 if (cur->bc_btnum != XFS_BTNUM_BMAP)
2842 return __xfs_btree_split(cur, level, ptrp, key, curp, stat);
2843
2844 args.cur = cur;
2845 args.level = level;
2846 args.ptrp = ptrp;
2847 args.key = key;
2848 args.curp = curp;
2849 args.stat = stat;
2850 args.done = &done;
2851 args.kswapd = current_is_kswapd();
2852 INIT_WORK_ONSTACK(&args.work, xfs_btree_split_worker);
2853 queue_work(xfs_alloc_wq, &args.work);
2854 wait_for_completion(&done);
2855 destroy_work_on_stack(&args.work);
2856 return args.result;
2857}
2858
2859
2860
2861
2862
2863
2864int
2865xfs_btree_new_iroot(
2866 struct xfs_btree_cur *cur,
2867 int *logflags,
2868 int *stat)
2869{
2870 struct xfs_buf *cbp;
2871 struct xfs_btree_block *block;
2872 struct xfs_btree_block *cblock;
2873 union xfs_btree_key *ckp;
2874 union xfs_btree_ptr *cpp;
2875 union xfs_btree_key *kp;
2876 union xfs_btree_ptr *pp;
2877 union xfs_btree_ptr nptr;
2878 int level;
2879 int error;
2880 int i;
2881
2882 XFS_BTREE_STATS_INC(cur, newroot);
2883
2884 ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
2885
2886 level = cur->bc_nlevels - 1;
2887
2888 block = xfs_btree_get_iroot(cur);
2889 pp = xfs_btree_ptr_addr(cur, 1, block);
2890
2891
2892 error = cur->bc_ops->alloc_block(cur, pp, &nptr, stat);
2893 if (error)
2894 goto error0;
2895 if (*stat == 0)
2896 return 0;
2897
2898 XFS_BTREE_STATS_INC(cur, alloc);
2899
2900
2901 error = xfs_btree_get_buf_block(cur, &nptr, &cblock, &cbp);
2902 if (error)
2903 goto error0;
2904
2905
2906
2907
2908
2909 memcpy(cblock, block, xfs_btree_block_len(cur));
2910 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) {
2911 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
2912 cblock->bb_u.l.bb_blkno = cpu_to_be64(cbp->b_bn);
2913 else
2914 cblock->bb_u.s.bb_blkno = cpu_to_be64(cbp->b_bn);
2915 }
2916
2917 be16_add_cpu(&block->bb_level, 1);
2918 xfs_btree_set_numrecs(block, 1);
2919 cur->bc_nlevels++;
2920 cur->bc_ptrs[level + 1] = 1;
2921
2922 kp = xfs_btree_key_addr(cur, 1, block);
2923 ckp = xfs_btree_key_addr(cur, 1, cblock);
2924 xfs_btree_copy_keys(cur, ckp, kp, xfs_btree_get_numrecs(cblock));
2925
2926 cpp = xfs_btree_ptr_addr(cur, 1, cblock);
2927 for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) {
2928 error = xfs_btree_debug_check_ptr(cur, pp, i, level);
2929 if (error)
2930 goto error0;
2931 }
2932
2933 xfs_btree_copy_ptrs(cur, cpp, pp, xfs_btree_get_numrecs(cblock));
2934
2935 error = xfs_btree_debug_check_ptr(cur, &nptr, 0, level);
2936 if (error)
2937 goto error0;
2938
2939 xfs_btree_copy_ptrs(cur, pp, &nptr, 1);
2940
2941 xfs_iroot_realloc(cur->bc_private.b.ip,
2942 1 - xfs_btree_get_numrecs(cblock),
2943 cur->bc_private.b.whichfork);
2944
2945 xfs_btree_setbuf(cur, level, cbp);
2946
2947
2948
2949
2950
2951 xfs_btree_log_block(cur, cbp, XFS_BB_ALL_BITS);
2952 xfs_btree_log_keys(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs));
2953 xfs_btree_log_ptrs(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs));
2954
2955 *logflags |=
2956 XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork);
2957 *stat = 1;
2958 return 0;
2959error0:
2960 return error;
2961}
2962
2963
2964
2965
2966STATIC int
2967xfs_btree_new_root(
2968 struct xfs_btree_cur *cur,
2969 int *stat)
2970{
2971 struct xfs_btree_block *block;
2972 struct xfs_buf *bp;
2973 int error;
2974 struct xfs_buf *lbp;
2975 struct xfs_btree_block *left;
2976 struct xfs_buf *nbp;
2977 struct xfs_btree_block *new;
2978 int nptr;
2979 struct xfs_buf *rbp;
2980 struct xfs_btree_block *right;
2981 union xfs_btree_ptr rptr;
2982 union xfs_btree_ptr lptr;
2983
2984 XFS_BTREE_STATS_INC(cur, newroot);
2985
2986
2987 cur->bc_ops->init_ptr_from_cur(cur, &rptr);
2988
2989
2990 error = cur->bc_ops->alloc_block(cur, &rptr, &lptr, stat);
2991 if (error)
2992 goto error0;
2993 if (*stat == 0)
2994 goto out0;
2995 XFS_BTREE_STATS_INC(cur, alloc);
2996
2997
2998 error = xfs_btree_get_buf_block(cur, &lptr, &new, &nbp);
2999 if (error)
3000 goto error0;
3001
3002
3003 cur->bc_ops->set_root(cur, &lptr, 1);
3004
3005
3006
3007
3008
3009
3010
3011 block = xfs_btree_get_block(cur, cur->bc_nlevels - 1, &bp);
3012
3013#ifdef DEBUG
3014 error = xfs_btree_check_block(cur, block, cur->bc_nlevels - 1, bp);
3015 if (error)
3016 goto error0;
3017#endif
3018
3019 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
3020 if (!xfs_btree_ptr_is_null(cur, &rptr)) {
3021
3022 lbp = bp;
3023 xfs_btree_buf_to_ptr(cur, lbp, &lptr);
3024 left = block;
3025 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
3026 if (error)
3027 goto error0;
3028 bp = rbp;
3029 nptr = 1;
3030 } else {
3031
3032 rbp = bp;
3033 xfs_btree_buf_to_ptr(cur, rbp, &rptr);
3034 right = block;
3035 xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
3036 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
3037 if (error)
3038 goto error0;
3039 bp = lbp;
3040 nptr = 2;
3041 }
3042
3043
3044 xfs_btree_init_block_cur(cur, nbp, cur->bc_nlevels, 2);
3045 xfs_btree_log_block(cur, nbp, XFS_BB_ALL_BITS);
3046 ASSERT(!xfs_btree_ptr_is_null(cur, &lptr) &&
3047 !xfs_btree_ptr_is_null(cur, &rptr));
3048
3049
3050 if (xfs_btree_get_level(left) > 0) {
3051
3052
3053
3054
3055 xfs_btree_get_node_keys(cur, left,
3056 xfs_btree_key_addr(cur, 1, new));
3057 xfs_btree_get_node_keys(cur, right,
3058 xfs_btree_key_addr(cur, 2, new));
3059 } else {
3060
3061
3062
3063
3064
3065 xfs_btree_get_leaf_keys(cur, left,
3066 xfs_btree_key_addr(cur, 1, new));
3067 xfs_btree_get_leaf_keys(cur, right,
3068 xfs_btree_key_addr(cur, 2, new));
3069 }
3070 xfs_btree_log_keys(cur, nbp, 1, 2);
3071
3072
3073 xfs_btree_copy_ptrs(cur,
3074 xfs_btree_ptr_addr(cur, 1, new), &lptr, 1);
3075 xfs_btree_copy_ptrs(cur,
3076 xfs_btree_ptr_addr(cur, 2, new), &rptr, 1);
3077 xfs_btree_log_ptrs(cur, nbp, 1, 2);
3078
3079
3080 xfs_btree_setbuf(cur, cur->bc_nlevels, nbp);
3081 cur->bc_ptrs[cur->bc_nlevels] = nptr;
3082 cur->bc_nlevels++;
3083 *stat = 1;
3084 return 0;
3085error0:
3086 return error;
3087out0:
3088 *stat = 0;
3089 return 0;
3090}
3091
3092STATIC int
3093xfs_btree_make_block_unfull(
3094 struct xfs_btree_cur *cur,
3095 int level,
3096 int numrecs,
3097 int *oindex,
3098 int *index,
3099 union xfs_btree_ptr *nptr,
3100 struct xfs_btree_cur **ncur,
3101 union xfs_btree_key *key,
3102 int *stat)
3103{
3104 int error = 0;
3105
3106 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
3107 level == cur->bc_nlevels - 1) {
3108 struct xfs_inode *ip = cur->bc_private.b.ip;
3109
3110 if (numrecs < cur->bc_ops->get_dmaxrecs(cur, level)) {
3111
3112 xfs_iroot_realloc(ip, 1, cur->bc_private.b.whichfork);
3113 *stat = 1;
3114 } else {
3115
3116 int logflags = 0;
3117
3118 error = xfs_btree_new_iroot(cur, &logflags, stat);
3119 if (error || *stat == 0)
3120 return error;
3121
3122 xfs_trans_log_inode(cur->bc_tp, ip, logflags);
3123 }
3124
3125 return 0;
3126 }
3127
3128
3129 error = xfs_btree_rshift(cur, level, stat);
3130 if (error || *stat)
3131 return error;
3132
3133
3134 error = xfs_btree_lshift(cur, level, stat);
3135 if (error)
3136 return error;
3137
3138 if (*stat) {
3139 *oindex = *index = cur->bc_ptrs[level];
3140 return 0;
3141 }
3142
3143
3144
3145
3146
3147
3148
3149 error = xfs_btree_split(cur, level, nptr, key, ncur, stat);
3150 if (error || *stat == 0)
3151 return error;
3152
3153
3154 *index = cur->bc_ptrs[level];
3155 return 0;
3156}
3157
3158
3159
3160
3161
3162STATIC int
3163xfs_btree_insrec(
3164 struct xfs_btree_cur *cur,
3165 int level,
3166 union xfs_btree_ptr *ptrp,
3167 union xfs_btree_rec *rec,
3168 union xfs_btree_key *key,
3169 struct xfs_btree_cur **curp,
3170 int *stat)
3171{
3172 struct xfs_btree_block *block;
3173 struct xfs_buf *bp;
3174 union xfs_btree_ptr nptr;
3175 struct xfs_btree_cur *ncur;
3176 union xfs_btree_key nkey;
3177 union xfs_btree_key *lkey;
3178 int optr;
3179 int ptr;
3180 int numrecs;
3181 int error;
3182 int i;
3183 xfs_daddr_t old_bn;
3184
3185 ncur = NULL;
3186 lkey = &nkey;
3187
3188
3189
3190
3191
3192 if (!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
3193 (level >= cur->bc_nlevels)) {
3194 error = xfs_btree_new_root(cur, stat);
3195 xfs_btree_set_ptr_null(cur, ptrp);
3196
3197 return error;
3198 }
3199
3200
3201 ptr = cur->bc_ptrs[level];
3202 if (ptr == 0) {
3203 *stat = 0;
3204 return 0;
3205 }
3206
3207 optr = ptr;
3208
3209 XFS_BTREE_STATS_INC(cur, insrec);
3210
3211
3212 block = xfs_btree_get_block(cur, level, &bp);
3213 old_bn = bp ? bp->b_bn : XFS_BUF_DADDR_NULL;
3214 numrecs = xfs_btree_get_numrecs(block);
3215
3216#ifdef DEBUG
3217 error = xfs_btree_check_block(cur, block, level, bp);
3218 if (error)
3219 goto error0;
3220
3221
3222 if (ptr <= numrecs) {
3223 if (level == 0) {
3224 ASSERT(cur->bc_ops->recs_inorder(cur, rec,
3225 xfs_btree_rec_addr(cur, ptr, block)));
3226 } else {
3227 ASSERT(cur->bc_ops->keys_inorder(cur, key,
3228 xfs_btree_key_addr(cur, ptr, block)));
3229 }
3230 }
3231#endif
3232
3233
3234
3235
3236
3237 xfs_btree_set_ptr_null(cur, &nptr);
3238 if (numrecs == cur->bc_ops->get_maxrecs(cur, level)) {
3239 error = xfs_btree_make_block_unfull(cur, level, numrecs,
3240 &optr, &ptr, &nptr, &ncur, lkey, stat);
3241 if (error || *stat == 0)
3242 goto error0;
3243 }
3244
3245
3246
3247
3248
3249 block = xfs_btree_get_block(cur, level, &bp);
3250 numrecs = xfs_btree_get_numrecs(block);
3251
3252#ifdef DEBUG
3253 error = xfs_btree_check_block(cur, block, level, bp);
3254 if (error)
3255 return error;
3256#endif
3257
3258
3259
3260
3261
3262 XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr + 1);
3263
3264 if (level > 0) {
3265
3266 union xfs_btree_key *kp;
3267 union xfs_btree_ptr *pp;
3268
3269 kp = xfs_btree_key_addr(cur, ptr, block);
3270 pp = xfs_btree_ptr_addr(cur, ptr, block);
3271
3272 for (i = numrecs - ptr; i >= 0; i--) {
3273 error = xfs_btree_debug_check_ptr(cur, pp, i, level);
3274 if (error)
3275 return error;
3276 }
3277
3278 xfs_btree_shift_keys(cur, kp, 1, numrecs - ptr + 1);
3279 xfs_btree_shift_ptrs(cur, pp, 1, numrecs - ptr + 1);
3280
3281 error = xfs_btree_debug_check_ptr(cur, ptrp, 0, level);
3282 if (error)
3283 goto error0;
3284
3285
3286 xfs_btree_copy_keys(cur, kp, key, 1);
3287 xfs_btree_copy_ptrs(cur, pp, ptrp, 1);
3288 numrecs++;
3289 xfs_btree_set_numrecs(block, numrecs);
3290 xfs_btree_log_ptrs(cur, bp, ptr, numrecs);
3291 xfs_btree_log_keys(cur, bp, ptr, numrecs);
3292#ifdef DEBUG
3293 if (ptr < numrecs) {
3294 ASSERT(cur->bc_ops->keys_inorder(cur, kp,
3295 xfs_btree_key_addr(cur, ptr + 1, block)));
3296 }
3297#endif
3298 } else {
3299
3300 union xfs_btree_rec *rp;
3301
3302 rp = xfs_btree_rec_addr(cur, ptr, block);
3303
3304 xfs_btree_shift_recs(cur, rp, 1, numrecs - ptr + 1);
3305
3306
3307 xfs_btree_copy_recs(cur, rp, rec, 1);
3308 xfs_btree_set_numrecs(block, ++numrecs);
3309 xfs_btree_log_recs(cur, bp, ptr, numrecs);
3310#ifdef DEBUG
3311 if (ptr < numrecs) {
3312 ASSERT(cur->bc_ops->recs_inorder(cur, rp,
3313 xfs_btree_rec_addr(cur, ptr + 1, block)));
3314 }
3315#endif
3316 }
3317
3318
3319 xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329 if (bp && bp->b_bn != old_bn) {
3330 xfs_btree_get_keys(cur, block, lkey);
3331 } else if (xfs_btree_needs_key_update(cur, optr)) {
3332 error = xfs_btree_update_keys(cur, level);
3333 if (error)
3334 goto error0;
3335 }
3336
3337
3338
3339
3340
3341 if (xfs_btree_is_lastrec(cur, block, level)) {
3342 cur->bc_ops->update_lastrec(cur, block, rec,
3343 ptr, LASTREC_INSREC);
3344 }
3345
3346
3347
3348
3349
3350 *ptrp = nptr;
3351 if (!xfs_btree_ptr_is_null(cur, &nptr)) {
3352 xfs_btree_copy_keys(cur, key, lkey, 1);
3353 *curp = ncur;
3354 }
3355
3356 *stat = 1;
3357 return 0;
3358
3359error0:
3360 return error;
3361}
3362
3363
3364
3365
3366
3367
3368
3369
3370int
3371xfs_btree_insert(
3372 struct xfs_btree_cur *cur,
3373 int *stat)
3374{
3375 int error;
3376 int i;
3377 int level;
3378 union xfs_btree_ptr nptr;
3379 struct xfs_btree_cur *ncur;
3380 struct xfs_btree_cur *pcur;
3381 union xfs_btree_key bkey;
3382 union xfs_btree_key *key;
3383 union xfs_btree_rec rec;
3384
3385 level = 0;
3386 ncur = NULL;
3387 pcur = cur;
3388 key = &bkey;
3389
3390 xfs_btree_set_ptr_null(cur, &nptr);
3391
3392
3393 cur->bc_ops->init_rec_from_cur(cur, &rec);
3394 cur->bc_ops->init_key_from_rec(key, &rec);
3395
3396
3397
3398
3399
3400
3401 do {
3402
3403
3404
3405
3406 error = xfs_btree_insrec(pcur, level, &nptr, &rec, key,
3407 &ncur, &i);
3408 if (error) {
3409 if (pcur != cur)
3410 xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR);
3411 goto error0;
3412 }
3413
3414 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
3415 error = -EFSCORRUPTED;
3416 goto error0;
3417 }
3418 level++;
3419
3420
3421
3422
3423
3424
3425 if (pcur != cur &&
3426 (ncur || xfs_btree_ptr_is_null(cur, &nptr))) {
3427
3428 if (cur->bc_ops->update_cursor)
3429 cur->bc_ops->update_cursor(pcur, cur);
3430 cur->bc_nlevels = pcur->bc_nlevels;
3431 xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR);
3432 }
3433
3434 if (ncur) {
3435 pcur = ncur;
3436 ncur = NULL;
3437 }
3438 } while (!xfs_btree_ptr_is_null(cur, &nptr));
3439
3440 *stat = i;
3441 return 0;
3442error0:
3443 return error;
3444}
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454STATIC int
3455xfs_btree_kill_iroot(
3456 struct xfs_btree_cur *cur)
3457{
3458 int whichfork = cur->bc_private.b.whichfork;
3459 struct xfs_inode *ip = cur->bc_private.b.ip;
3460 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
3461 struct xfs_btree_block *block;
3462 struct xfs_btree_block *cblock;
3463 union xfs_btree_key *kp;
3464 union xfs_btree_key *ckp;
3465 union xfs_btree_ptr *pp;
3466 union xfs_btree_ptr *cpp;
3467 struct xfs_buf *cbp;
3468 int level;
3469 int index;
3470 int numrecs;
3471 int error;
3472#ifdef DEBUG
3473 union xfs_btree_ptr ptr;
3474#endif
3475 int i;
3476
3477 ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
3478 ASSERT(cur->bc_nlevels > 1);
3479
3480
3481
3482
3483
3484 level = cur->bc_nlevels - 1;
3485 if (level == 1)
3486 goto out0;
3487
3488
3489
3490
3491 block = xfs_btree_get_iroot(cur);
3492 if (xfs_btree_get_numrecs(block) != 1)
3493 goto out0;
3494
3495 cblock = xfs_btree_get_block(cur, level - 1, &cbp);
3496 numrecs = xfs_btree_get_numrecs(cblock);
3497
3498
3499
3500
3501
3502
3503 if (numrecs > cur->bc_ops->get_dmaxrecs(cur, level))
3504 goto out0;
3505
3506 XFS_BTREE_STATS_INC(cur, killroot);
3507
3508#ifdef DEBUG
3509 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB);
3510 ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
3511 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
3512 ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
3513#endif
3514
3515 index = numrecs - cur->bc_ops->get_maxrecs(cur, level);
3516 if (index) {
3517 xfs_iroot_realloc(cur->bc_private.b.ip, index,
3518 cur->bc_private.b.whichfork);
3519 block = ifp->if_broot;
3520 }
3521
3522 be16_add_cpu(&block->bb_numrecs, index);
3523 ASSERT(block->bb_numrecs == cblock->bb_numrecs);
3524
3525 kp = xfs_btree_key_addr(cur, 1, block);
3526 ckp = xfs_btree_key_addr(cur, 1, cblock);
3527 xfs_btree_copy_keys(cur, kp, ckp, numrecs);
3528
3529 pp = xfs_btree_ptr_addr(cur, 1, block);
3530 cpp = xfs_btree_ptr_addr(cur, 1, cblock);
3531
3532 for (i = 0; i < numrecs; i++) {
3533 error = xfs_btree_debug_check_ptr(cur, cpp, i, level - 1);
3534 if (error)
3535 return error;
3536 }
3537
3538 xfs_btree_copy_ptrs(cur, pp, cpp, numrecs);
3539
3540 error = xfs_btree_free_block(cur, cbp);
3541 if (error)
3542 return error;
3543
3544 cur->bc_bufs[level - 1] = NULL;
3545 be16_add_cpu(&block->bb_level, -1);
3546 xfs_trans_log_inode(cur->bc_tp, ip,
3547 XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork));
3548 cur->bc_nlevels--;
3549out0:
3550 return 0;
3551}
3552
3553
3554
3555
3556STATIC int
3557xfs_btree_kill_root(
3558 struct xfs_btree_cur *cur,
3559 struct xfs_buf *bp,
3560 int level,
3561 union xfs_btree_ptr *newroot)
3562{
3563 int error;
3564
3565 XFS_BTREE_STATS_INC(cur, killroot);
3566
3567
3568
3569
3570
3571 cur->bc_ops->set_root(cur, newroot, -1);
3572
3573 error = xfs_btree_free_block(cur, bp);
3574 if (error)
3575 return error;
3576
3577 cur->bc_bufs[level] = NULL;
3578 cur->bc_ra[level] = 0;
3579 cur->bc_nlevels--;
3580
3581 return 0;
3582}
3583
3584STATIC int
3585xfs_btree_dec_cursor(
3586 struct xfs_btree_cur *cur,
3587 int level,
3588 int *stat)
3589{
3590 int error;
3591 int i;
3592
3593 if (level > 0) {
3594 error = xfs_btree_decrement(cur, level, &i);
3595 if (error)
3596 return error;
3597 }
3598
3599 *stat = 1;
3600 return 0;
3601}
3602
3603
3604
3605
3606
3607
3608
3609STATIC int
3610xfs_btree_delrec(
3611 struct xfs_btree_cur *cur,
3612 int level,
3613 int *stat)
3614{
3615 struct xfs_btree_block *block;
3616 union xfs_btree_ptr cptr;
3617 struct xfs_buf *bp;
3618 int error;
3619 int i;
3620 union xfs_btree_ptr lptr;
3621 struct xfs_buf *lbp;
3622 struct xfs_btree_block *left;
3623 int lrecs = 0;
3624 int ptr;
3625 union xfs_btree_ptr rptr;
3626 struct xfs_buf *rbp;
3627 struct xfs_btree_block *right;
3628 struct xfs_btree_block *rrblock;
3629 struct xfs_buf *rrbp;
3630 int rrecs = 0;
3631 struct xfs_btree_cur *tcur;
3632 int numrecs;
3633
3634 tcur = NULL;
3635
3636
3637 ptr = cur->bc_ptrs[level];
3638 if (ptr == 0) {
3639 *stat = 0;
3640 return 0;
3641 }
3642
3643
3644 block = xfs_btree_get_block(cur, level, &bp);
3645 numrecs = xfs_btree_get_numrecs(block);
3646
3647#ifdef DEBUG
3648 error = xfs_btree_check_block(cur, block, level, bp);
3649 if (error)
3650 goto error0;
3651#endif
3652
3653
3654 if (ptr > numrecs) {
3655 *stat = 0;
3656 return 0;
3657 }
3658
3659 XFS_BTREE_STATS_INC(cur, delrec);
3660 XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr);
3661
3662
3663 if (level > 0) {
3664
3665 union xfs_btree_key *lkp;
3666 union xfs_btree_ptr *lpp;
3667
3668 lkp = xfs_btree_key_addr(cur, ptr + 1, block);
3669 lpp = xfs_btree_ptr_addr(cur, ptr + 1, block);
3670
3671 for (i = 0; i < numrecs - ptr; i++) {
3672 error = xfs_btree_debug_check_ptr(cur, lpp, i, level);
3673 if (error)
3674 goto error0;
3675 }
3676
3677 if (ptr < numrecs) {
3678 xfs_btree_shift_keys(cur, lkp, -1, numrecs - ptr);
3679 xfs_btree_shift_ptrs(cur, lpp, -1, numrecs - ptr);
3680 xfs_btree_log_keys(cur, bp, ptr, numrecs - 1);
3681 xfs_btree_log_ptrs(cur, bp, ptr, numrecs - 1);
3682 }
3683 } else {
3684
3685 if (ptr < numrecs) {
3686 xfs_btree_shift_recs(cur,
3687 xfs_btree_rec_addr(cur, ptr + 1, block),
3688 -1, numrecs - ptr);
3689 xfs_btree_log_recs(cur, bp, ptr, numrecs - 1);
3690 }
3691 }
3692
3693
3694
3695
3696 xfs_btree_set_numrecs(block, --numrecs);
3697 xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
3698
3699
3700
3701
3702
3703 if (xfs_btree_is_lastrec(cur, block, level)) {
3704 cur->bc_ops->update_lastrec(cur, block, NULL,
3705 ptr, LASTREC_DELREC);
3706 }
3707
3708
3709
3710
3711
3712
3713 if (level == cur->bc_nlevels - 1) {
3714 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
3715 xfs_iroot_realloc(cur->bc_private.b.ip, -1,
3716 cur->bc_private.b.whichfork);
3717
3718 error = xfs_btree_kill_iroot(cur);
3719 if (error)
3720 goto error0;
3721
3722 error = xfs_btree_dec_cursor(cur, level, stat);
3723 if (error)
3724 goto error0;
3725 *stat = 1;
3726 return 0;
3727 }
3728
3729
3730
3731
3732
3733
3734 if (numrecs == 1 && level > 0) {
3735 union xfs_btree_ptr *pp;
3736
3737
3738
3739
3740 pp = xfs_btree_ptr_addr(cur, 1, block);
3741 error = xfs_btree_kill_root(cur, bp, level, pp);
3742 if (error)
3743 goto error0;
3744 } else if (level > 0) {
3745 error = xfs_btree_dec_cursor(cur, level, stat);
3746 if (error)
3747 goto error0;
3748 }
3749 *stat = 1;
3750 return 0;
3751 }
3752
3753
3754
3755
3756
3757 if (xfs_btree_needs_key_update(cur, ptr)) {
3758 error = xfs_btree_update_keys(cur, level);
3759 if (error)
3760 goto error0;
3761 }
3762
3763
3764
3765
3766
3767 if (numrecs >= cur->bc_ops->get_minrecs(cur, level)) {
3768 error = xfs_btree_dec_cursor(cur, level, stat);
3769 if (error)
3770 goto error0;
3771 return 0;
3772 }
3773
3774
3775
3776
3777
3778
3779 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
3780 xfs_btree_get_sibling(cur, block, &lptr, XFS_BB_LEFTSIB);
3781
3782 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
3783
3784
3785
3786
3787
3788 if (xfs_btree_ptr_is_null(cur, &rptr) &&
3789 xfs_btree_ptr_is_null(cur, &lptr) &&
3790 level == cur->bc_nlevels - 2) {
3791 error = xfs_btree_kill_iroot(cur);
3792 if (!error)
3793 error = xfs_btree_dec_cursor(cur, level, stat);
3794 if (error)
3795 goto error0;
3796 return 0;
3797 }
3798 }
3799
3800 ASSERT(!xfs_btree_ptr_is_null(cur, &rptr) ||
3801 !xfs_btree_ptr_is_null(cur, &lptr));
3802
3803
3804
3805
3806
3807 error = xfs_btree_dup_cursor(cur, &tcur);
3808 if (error)
3809 goto error0;
3810
3811
3812
3813
3814
3815 if (!xfs_btree_ptr_is_null(cur, &rptr)) {
3816
3817
3818
3819
3820 i = xfs_btree_lastrec(tcur, level);
3821 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
3822 error = -EFSCORRUPTED;
3823 goto error0;
3824 }
3825
3826 error = xfs_btree_increment(tcur, level, &i);
3827 if (error)
3828 goto error0;
3829 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
3830 error = -EFSCORRUPTED;
3831 goto error0;
3832 }
3833
3834 i = xfs_btree_lastrec(tcur, level);
3835 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
3836 error = -EFSCORRUPTED;
3837 goto error0;
3838 }
3839
3840
3841 right = xfs_btree_get_block(tcur, level, &rbp);
3842#ifdef DEBUG
3843 error = xfs_btree_check_block(tcur, right, level, rbp);
3844 if (error)
3845 goto error0;
3846#endif
3847
3848 xfs_btree_get_sibling(tcur, right, &cptr, XFS_BB_LEFTSIB);
3849
3850
3851
3852
3853
3854
3855 if (xfs_btree_get_numrecs(right) - 1 >=
3856 cur->bc_ops->get_minrecs(tcur, level)) {
3857 error = xfs_btree_lshift(tcur, level, &i);
3858 if (error)
3859 goto error0;
3860 if (i) {
3861 ASSERT(xfs_btree_get_numrecs(block) >=
3862 cur->bc_ops->get_minrecs(tcur, level));
3863
3864 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
3865 tcur = NULL;
3866
3867 error = xfs_btree_dec_cursor(cur, level, stat);
3868 if (error)
3869 goto error0;
3870 return 0;
3871 }
3872 }
3873
3874
3875
3876
3877
3878
3879 rrecs = xfs_btree_get_numrecs(right);
3880 if (!xfs_btree_ptr_is_null(cur, &lptr)) {
3881 i = xfs_btree_firstrec(tcur, level);
3882 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
3883 error = -EFSCORRUPTED;
3884 goto error0;
3885 }
3886
3887 error = xfs_btree_decrement(tcur, level, &i);
3888 if (error)
3889 goto error0;
3890 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
3891 error = -EFSCORRUPTED;
3892 goto error0;
3893 }
3894 }
3895 }
3896
3897
3898
3899
3900
3901 if (!xfs_btree_ptr_is_null(cur, &lptr)) {
3902
3903
3904
3905
3906 i = xfs_btree_firstrec(tcur, level);
3907 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
3908 error = -EFSCORRUPTED;
3909 goto error0;
3910 }
3911
3912 error = xfs_btree_decrement(tcur, level, &i);
3913 if (error)
3914 goto error0;
3915 i = xfs_btree_firstrec(tcur, level);
3916 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
3917 error = -EFSCORRUPTED;
3918 goto error0;
3919 }
3920
3921
3922 left = xfs_btree_get_block(tcur, level, &lbp);
3923#ifdef DEBUG
3924 error = xfs_btree_check_block(cur, left, level, lbp);
3925 if (error)
3926 goto error0;
3927#endif
3928
3929 xfs_btree_get_sibling(tcur, left, &cptr, XFS_BB_RIGHTSIB);
3930
3931
3932
3933
3934
3935
3936 if (xfs_btree_get_numrecs(left) - 1 >=
3937 cur->bc_ops->get_minrecs(tcur, level)) {
3938 error = xfs_btree_rshift(tcur, level, &i);
3939 if (error)
3940 goto error0;
3941 if (i) {
3942 ASSERT(xfs_btree_get_numrecs(block) >=
3943 cur->bc_ops->get_minrecs(tcur, level));
3944 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
3945 tcur = NULL;
3946 if (level == 0)
3947 cur->bc_ptrs[0]++;
3948
3949 *stat = 1;
3950 return 0;
3951 }
3952 }
3953
3954
3955
3956
3957
3958 lrecs = xfs_btree_get_numrecs(left);
3959 }
3960
3961
3962 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
3963 tcur = NULL;
3964
3965
3966 ASSERT(!xfs_btree_ptr_is_null(cur, &cptr));
3967
3968 if (!xfs_btree_ptr_is_null(cur, &lptr) &&
3969 lrecs + xfs_btree_get_numrecs(block) <=
3970 cur->bc_ops->get_maxrecs(cur, level)) {
3971
3972
3973
3974
3975 rptr = cptr;
3976 right = block;
3977 rbp = bp;
3978 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
3979 if (error)
3980 goto error0;
3981
3982
3983
3984
3985 } else if (!xfs_btree_ptr_is_null(cur, &rptr) &&
3986 rrecs + xfs_btree_get_numrecs(block) <=
3987 cur->bc_ops->get_maxrecs(cur, level)) {
3988
3989
3990
3991
3992 lptr = cptr;
3993 left = block;
3994 lbp = bp;
3995 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
3996 if (error)
3997 goto error0;
3998
3999
4000
4001
4002
4003 } else {
4004 error = xfs_btree_dec_cursor(cur, level, stat);
4005 if (error)
4006 goto error0;
4007 return 0;
4008 }
4009
4010 rrecs = xfs_btree_get_numrecs(right);
4011 lrecs = xfs_btree_get_numrecs(left);
4012
4013
4014
4015
4016
4017 XFS_BTREE_STATS_ADD(cur, moves, rrecs);
4018 if (level > 0) {
4019
4020 union xfs_btree_key *lkp;
4021 union xfs_btree_ptr *lpp;
4022 union xfs_btree_key *rkp;
4023 union xfs_btree_ptr *rpp;
4024
4025 lkp = xfs_btree_key_addr(cur, lrecs + 1, left);
4026 lpp = xfs_btree_ptr_addr(cur, lrecs + 1, left);
4027 rkp = xfs_btree_key_addr(cur, 1, right);
4028 rpp = xfs_btree_ptr_addr(cur, 1, right);
4029
4030 for (i = 1; i < rrecs; i++) {
4031 error = xfs_btree_debug_check_ptr(cur, rpp, i, level);
4032 if (error)
4033 goto error0;
4034 }
4035
4036 xfs_btree_copy_keys(cur, lkp, rkp, rrecs);
4037 xfs_btree_copy_ptrs(cur, lpp, rpp, rrecs);
4038
4039 xfs_btree_log_keys(cur, lbp, lrecs + 1, lrecs + rrecs);
4040 xfs_btree_log_ptrs(cur, lbp, lrecs + 1, lrecs + rrecs);
4041 } else {
4042
4043 union xfs_btree_rec *lrp;
4044 union xfs_btree_rec *rrp;
4045
4046 lrp = xfs_btree_rec_addr(cur, lrecs + 1, left);
4047 rrp = xfs_btree_rec_addr(cur, 1, right);
4048
4049 xfs_btree_copy_recs(cur, lrp, rrp, rrecs);
4050 xfs_btree_log_recs(cur, lbp, lrecs + 1, lrecs + rrecs);
4051 }
4052
4053 XFS_BTREE_STATS_INC(cur, join);
4054
4055
4056
4057
4058
4059 xfs_btree_set_numrecs(left, lrecs + rrecs);
4060 xfs_btree_get_sibling(cur, right, &cptr, XFS_BB_RIGHTSIB),
4061 xfs_btree_set_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB);
4062 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
4063
4064
4065 xfs_btree_get_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB);
4066 if (!xfs_btree_ptr_is_null(cur, &cptr)) {
4067 error = xfs_btree_read_buf_block(cur, &cptr, 0, &rrblock, &rrbp);
4068 if (error)
4069 goto error0;
4070 xfs_btree_set_sibling(cur, rrblock, &lptr, XFS_BB_LEFTSIB);
4071 xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB);
4072 }
4073
4074
4075 error = xfs_btree_free_block(cur, rbp);
4076 if (error)
4077 goto error0;
4078
4079
4080
4081
4082
4083 if (bp != lbp) {
4084 cur->bc_bufs[level] = lbp;
4085 cur->bc_ptrs[level] += lrecs;
4086 cur->bc_ra[level] = 0;
4087 }
4088
4089
4090
4091
4092 else if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) ||
4093 (level + 1 < cur->bc_nlevels)) {
4094 error = xfs_btree_increment(cur, level + 1, &i);
4095 if (error)
4096 goto error0;
4097 }
4098
4099
4100
4101
4102
4103
4104
4105 if (level > 0)
4106 cur->bc_ptrs[level]--;
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116
4117
4118
4119 *stat = 2;
4120 return 0;
4121
4122error0:
4123 if (tcur)
4124 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
4125 return error;
4126}
4127
4128
4129
4130
4131
4132
4133int
4134xfs_btree_delete(
4135 struct xfs_btree_cur *cur,
4136 int *stat)
4137{
4138 int error;
4139 int level;
4140 int i;
4141 bool joined = false;
4142
4143
4144
4145
4146
4147
4148
4149 for (level = 0, i = 2; i == 2; level++) {
4150 error = xfs_btree_delrec(cur, level, &i);
4151 if (error)
4152 goto error0;
4153 if (i == 2)
4154 joined = true;
4155 }
4156
4157
4158
4159
4160
4161 if (joined && (cur->bc_flags & XFS_BTREE_OVERLAPPING)) {
4162 error = xfs_btree_updkeys_force(cur, 0);
4163 if (error)
4164 goto error0;
4165 }
4166
4167 if (i == 0) {
4168 for (level = 1; level < cur->bc_nlevels; level++) {
4169 if (cur->bc_ptrs[level] == 0) {
4170 error = xfs_btree_decrement(cur, level, &i);
4171 if (error)
4172 goto error0;
4173 break;
4174 }
4175 }
4176 }
4177
4178 *stat = i;
4179 return 0;
4180error0:
4181 return error;
4182}
4183
4184
4185
4186
4187int
4188xfs_btree_get_rec(
4189 struct xfs_btree_cur *cur,
4190 union xfs_btree_rec **recp,
4191 int *stat)
4192{
4193 struct xfs_btree_block *block;
4194 struct xfs_buf *bp;
4195 int ptr;
4196#ifdef DEBUG
4197 int error;
4198#endif
4199
4200 ptr = cur->bc_ptrs[0];
4201 block = xfs_btree_get_block(cur, 0, &bp);
4202
4203#ifdef DEBUG
4204 error = xfs_btree_check_block(cur, block, 0, bp);
4205 if (error)
4206 return error;
4207#endif
4208
4209
4210
4211
4212 if (ptr > xfs_btree_get_numrecs(block) || ptr <= 0) {
4213 *stat = 0;
4214 return 0;
4215 }
4216
4217
4218
4219
4220 *recp = xfs_btree_rec_addr(cur, ptr, block);
4221 *stat = 1;
4222 return 0;
4223}
4224
4225
4226STATIC int
4227xfs_btree_visit_block(
4228 struct xfs_btree_cur *cur,
4229 int level,
4230 xfs_btree_visit_blocks_fn fn,
4231 void *data)
4232{
4233 struct xfs_btree_block *block;
4234 struct xfs_buf *bp;
4235 union xfs_btree_ptr rptr;
4236 int error;
4237
4238
4239 xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
4240 block = xfs_btree_get_block(cur, level, &bp);
4241
4242
4243 error = fn(cur, level, data);
4244 if (error)
4245 return error;
4246
4247
4248 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
4249 if (xfs_btree_ptr_is_null(cur, &rptr))
4250 return -ENOENT;
4251
4252 return xfs_btree_lookup_get_block(cur, level, &rptr, &block);
4253}
4254
4255
4256
4257int
4258xfs_btree_visit_blocks(
4259 struct xfs_btree_cur *cur,
4260 xfs_btree_visit_blocks_fn fn,
4261 unsigned int flags,
4262 void *data)
4263{
4264 union xfs_btree_ptr lptr;
4265 int level;
4266 struct xfs_btree_block *block = NULL;
4267 int error = 0;
4268
4269 cur->bc_ops->init_ptr_from_cur(cur, &lptr);
4270
4271
4272 for (level = cur->bc_nlevels - 1; level >= 0; level--) {
4273
4274 error = xfs_btree_lookup_get_block(cur, level, &lptr, &block);
4275 if (error)
4276 return error;
4277
4278
4279 if (level > 0) {
4280 union xfs_btree_ptr *ptr;
4281
4282 ptr = xfs_btree_ptr_addr(cur, 1, block);
4283 xfs_btree_readahead_ptr(cur, ptr, 1);
4284
4285
4286 xfs_btree_copy_ptrs(cur, &lptr, ptr, 1);
4287
4288 if (!(flags & XFS_BTREE_VISIT_LEAVES))
4289 continue;
4290 } else if (!(flags & XFS_BTREE_VISIT_RECORDS)) {
4291 continue;
4292 }
4293
4294
4295 do {
4296 error = xfs_btree_visit_block(cur, level, fn, data);
4297 } while (!error);
4298
4299 if (error != -ENOENT)
4300 return error;
4301 }
4302
4303 return 0;
4304}
4305
4306
4307
4308
4309
4310
4311
4312
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322
4323
4324
4325
4326
4327
4328
4329
4330struct xfs_btree_block_change_owner_info {
4331 uint64_t new_owner;
4332 struct list_head *buffer_list;
4333};
4334
4335static int
4336xfs_btree_block_change_owner(
4337 struct xfs_btree_cur *cur,
4338 int level,
4339 void *data)
4340{
4341 struct xfs_btree_block_change_owner_info *bbcoi = data;
4342 struct xfs_btree_block *block;
4343 struct xfs_buf *bp;
4344
4345
4346 block = xfs_btree_get_block(cur, level, &bp);
4347 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
4348 if (block->bb_u.l.bb_owner == cpu_to_be64(bbcoi->new_owner))
4349 return 0;
4350 block->bb_u.l.bb_owner = cpu_to_be64(bbcoi->new_owner);
4351 } else {
4352 if (block->bb_u.s.bb_owner == cpu_to_be32(bbcoi->new_owner))
4353 return 0;
4354 block->bb_u.s.bb_owner = cpu_to_be32(bbcoi->new_owner);
4355 }
4356
4357
4358
4359
4360
4361
4362
4363
4364 if (!bp) {
4365 ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
4366 ASSERT(level == cur->bc_nlevels - 1);
4367 return 0;
4368 }
4369
4370 if (cur->bc_tp) {
4371 if (!xfs_trans_ordered_buf(cur->bc_tp, bp)) {
4372 xfs_btree_log_block(cur, bp, XFS_BB_OWNER);
4373 return -EAGAIN;
4374 }
4375 } else {
4376 xfs_buf_delwri_queue(bp, bbcoi->buffer_list);
4377 }
4378
4379 return 0;
4380}
4381
4382int
4383xfs_btree_change_owner(
4384 struct xfs_btree_cur *cur,
4385 uint64_t new_owner,
4386 struct list_head *buffer_list)
4387{
4388 struct xfs_btree_block_change_owner_info bbcoi;
4389
4390 bbcoi.new_owner = new_owner;
4391 bbcoi.buffer_list = buffer_list;
4392
4393 return xfs_btree_visit_blocks(cur, xfs_btree_block_change_owner,
4394 XFS_BTREE_VISIT_ALL, &bbcoi);
4395}
4396
4397
4398xfs_failaddr_t
4399xfs_btree_lblock_v5hdr_verify(
4400 struct xfs_buf *bp,
4401 uint64_t owner)
4402{
4403 struct xfs_mount *mp = bp->b_mount;
4404 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
4405
4406 if (!xfs_sb_version_hascrc(&mp->m_sb))
4407 return __this_address;
4408 if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid))
4409 return __this_address;
4410 if (block->bb_u.l.bb_blkno != cpu_to_be64(bp->b_bn))
4411 return __this_address;
4412 if (owner != XFS_RMAP_OWN_UNKNOWN &&
4413 be64_to_cpu(block->bb_u.l.bb_owner) != owner)
4414 return __this_address;
4415 return NULL;
4416}
4417
4418
4419xfs_failaddr_t
4420xfs_btree_lblock_verify(
4421 struct xfs_buf *bp,
4422 unsigned int max_recs)
4423{
4424 struct xfs_mount *mp = bp->b_mount;
4425 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
4426
4427
4428 if (be16_to_cpu(block->bb_numrecs) > max_recs)
4429 return __this_address;
4430
4431
4432 if (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLFSBLOCK) &&
4433 !xfs_verify_fsbno(mp, be64_to_cpu(block->bb_u.l.bb_leftsib)))
4434 return __this_address;
4435 if (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK) &&
4436 !xfs_verify_fsbno(mp, be64_to_cpu(block->bb_u.l.bb_rightsib)))
4437 return __this_address;
4438
4439 return NULL;
4440}
4441
4442
4443
4444
4445
4446
4447
4448xfs_failaddr_t
4449xfs_btree_sblock_v5hdr_verify(
4450 struct xfs_buf *bp)
4451{
4452 struct xfs_mount *mp = bp->b_mount;
4453 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
4454 struct xfs_perag *pag = bp->b_pag;
4455
4456 if (!xfs_sb_version_hascrc(&mp->m_sb))
4457 return __this_address;
4458 if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid))
4459 return __this_address;
4460 if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn))
4461 return __this_address;
4462 if (pag && be32_to_cpu(block->bb_u.s.bb_owner) != pag->pag_agno)
4463 return __this_address;
4464 return NULL;
4465}
4466
4467
4468
4469
4470
4471
4472
4473xfs_failaddr_t
4474xfs_btree_sblock_verify(
4475 struct xfs_buf *bp,
4476 unsigned int max_recs)
4477{
4478 struct xfs_mount *mp = bp->b_mount;
4479 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
4480 xfs_agblock_t agno;
4481
4482
4483 if (be16_to_cpu(block->bb_numrecs) > max_recs)
4484 return __this_address;
4485
4486
4487 agno = xfs_daddr_to_agno(mp, XFS_BUF_ADDR(bp));
4488 if (block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK) &&
4489 !xfs_verify_agbno(mp, agno, be32_to_cpu(block->bb_u.s.bb_leftsib)))
4490 return __this_address;
4491 if (block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK) &&
4492 !xfs_verify_agbno(mp, agno, be32_to_cpu(block->bb_u.s.bb_rightsib)))
4493 return __this_address;
4494
4495 return NULL;
4496}
4497
4498
4499
4500
4501
4502uint
4503xfs_btree_compute_maxlevels(
4504 uint *limits,
4505 unsigned long len)
4506{
4507 uint level;
4508 unsigned long maxblocks;
4509
4510 maxblocks = (len + limits[0] - 1) / limits[0];
4511 for (level = 1; maxblocks > 1; level++)
4512 maxblocks = (maxblocks + limits[1] - 1) / limits[1];
4513 return level;
4514}
4515
4516
4517
4518
4519
4520
4521STATIC int
4522xfs_btree_simple_query_range(
4523 struct xfs_btree_cur *cur,
4524 union xfs_btree_key *low_key,
4525 union xfs_btree_key *high_key,
4526 xfs_btree_query_range_fn fn,
4527 void *priv)
4528{
4529 union xfs_btree_rec *recp;
4530 union xfs_btree_key rec_key;
4531 int64_t diff;
4532 int stat;
4533 bool firstrec = true;
4534 int error;
4535
4536 ASSERT(cur->bc_ops->init_high_key_from_rec);
4537 ASSERT(cur->bc_ops->diff_two_keys);
4538
4539
4540
4541
4542
4543 stat = 0;
4544 error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, &stat);
4545 if (error)
4546 goto out;
4547
4548
4549 if (!stat) {
4550 error = xfs_btree_increment(cur, 0, &stat);
4551 if (error)
4552 goto out;
4553 }
4554
4555 while (stat) {
4556
4557 error = xfs_btree_get_rec(cur, &recp, &stat);
4558 if (error || !stat)
4559 break;
4560
4561
4562 if (firstrec) {
4563 cur->bc_ops->init_high_key_from_rec(&rec_key, recp);
4564 firstrec = false;
4565 diff = cur->bc_ops->diff_two_keys(cur, low_key,
4566 &rec_key);
4567 if (diff > 0)
4568 goto advloop;
4569 }
4570
4571
4572 cur->bc_ops->init_key_from_rec(&rec_key, recp);
4573 diff = cur->bc_ops->diff_two_keys(cur, &rec_key, high_key);
4574 if (diff > 0)
4575 break;
4576
4577
4578 error = fn(cur, recp, priv);
4579 if (error)
4580 break;
4581
4582advloop:
4583
4584 error = xfs_btree_increment(cur, 0, &stat);
4585 if (error)
4586 break;
4587 }
4588
4589out:
4590 return error;
4591}
4592
4593
4594
4595
4596
4597
4598
4599
4600
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611
4612STATIC int
4613xfs_btree_overlapped_query_range(
4614 struct xfs_btree_cur *cur,
4615 union xfs_btree_key *low_key,
4616 union xfs_btree_key *high_key,
4617 xfs_btree_query_range_fn fn,
4618 void *priv)
4619{
4620 union xfs_btree_ptr ptr;
4621 union xfs_btree_ptr *pp;
4622 union xfs_btree_key rec_key;
4623 union xfs_btree_key rec_hkey;
4624 union xfs_btree_key *lkp;
4625 union xfs_btree_key *hkp;
4626 union xfs_btree_rec *recp;
4627 struct xfs_btree_block *block;
4628 int64_t ldiff;
4629 int64_t hdiff;
4630 int level;
4631 struct xfs_buf *bp;
4632 int i;
4633 int error;
4634
4635
4636 level = cur->bc_nlevels - 1;
4637 cur->bc_ops->init_ptr_from_cur(cur, &ptr);
4638 error = xfs_btree_lookup_get_block(cur, level, &ptr, &block);
4639 if (error)
4640 return error;
4641 xfs_btree_get_block(cur, level, &bp);
4642 trace_xfs_btree_overlapped_query_range(cur, level, bp);
4643#ifdef DEBUG
4644 error = xfs_btree_check_block(cur, block, level, bp);
4645 if (error)
4646 goto out;
4647#endif
4648 cur->bc_ptrs[level] = 1;
4649
4650 while (level < cur->bc_nlevels) {
4651 block = xfs_btree_get_block(cur, level, &bp);
4652
4653
4654 if (cur->bc_ptrs[level] > be16_to_cpu(block->bb_numrecs)) {
4655pop_up:
4656 if (level < cur->bc_nlevels - 1)
4657 cur->bc_ptrs[level + 1]++;
4658 level++;
4659 continue;
4660 }
4661
4662 if (level == 0) {
4663
4664 recp = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block);
4665
4666 cur->bc_ops->init_high_key_from_rec(&rec_hkey, recp);
4667 ldiff = cur->bc_ops->diff_two_keys(cur, &rec_hkey,
4668 low_key);
4669
4670 cur->bc_ops->init_key_from_rec(&rec_key, recp);
4671 hdiff = cur->bc_ops->diff_two_keys(cur, high_key,
4672 &rec_key);
4673
4674
4675
4676
4677
4678
4679 if (ldiff >= 0 && hdiff >= 0) {
4680 error = fn(cur, recp, priv);
4681 if (error)
4682 break;
4683 } else if (hdiff < 0) {
4684
4685 goto pop_up;
4686 }
4687 cur->bc_ptrs[level]++;
4688 continue;
4689 }
4690
4691
4692 lkp = xfs_btree_key_addr(cur, cur->bc_ptrs[level], block);
4693 hkp = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level], block);
4694 pp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[level], block);
4695
4696 ldiff = cur->bc_ops->diff_two_keys(cur, hkp, low_key);
4697 hdiff = cur->bc_ops->diff_two_keys(cur, high_key, lkp);
4698
4699
4700
4701
4702
4703
4704 if (ldiff >= 0 && hdiff >= 0) {
4705 level--;
4706 error = xfs_btree_lookup_get_block(cur, level, pp,
4707 &block);
4708 if (error)
4709 goto out;
4710 xfs_btree_get_block(cur, level, &bp);
4711 trace_xfs_btree_overlapped_query_range(cur, level, bp);
4712#ifdef DEBUG
4713 error = xfs_btree_check_block(cur, block, level, bp);
4714 if (error)
4715 goto out;
4716#endif
4717 cur->bc_ptrs[level] = 1;
4718 continue;
4719 } else if (hdiff < 0) {
4720
4721 goto pop_up;
4722 }
4723 cur->bc_ptrs[level]++;
4724 }
4725
4726out:
4727
4728
4729
4730
4731
4732
4733
4734 if (cur->bc_bufs[0] == NULL) {
4735 for (i = 0; i < cur->bc_nlevels; i++) {
4736 if (cur->bc_bufs[i]) {
4737 xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[i]);
4738 cur->bc_bufs[i] = NULL;
4739 cur->bc_ptrs[i] = 0;
4740 cur->bc_ra[i] = 0;
4741 }
4742 }
4743 }
4744
4745 return error;
4746}
4747
4748
4749
4750
4751
4752
4753
4754int
4755xfs_btree_query_range(
4756 struct xfs_btree_cur *cur,
4757 union xfs_btree_irec *low_rec,
4758 union xfs_btree_irec *high_rec,
4759 xfs_btree_query_range_fn fn,
4760 void *priv)
4761{
4762 union xfs_btree_rec rec;
4763 union xfs_btree_key low_key;
4764 union xfs_btree_key high_key;
4765
4766
4767 cur->bc_rec = *high_rec;
4768 cur->bc_ops->init_rec_from_cur(cur, &rec);
4769 cur->bc_ops->init_key_from_rec(&high_key, &rec);
4770
4771 cur->bc_rec = *low_rec;
4772 cur->bc_ops->init_rec_from_cur(cur, &rec);
4773 cur->bc_ops->init_key_from_rec(&low_key, &rec);
4774
4775
4776 if (cur->bc_ops->diff_two_keys(cur, &low_key, &high_key) > 0)
4777 return -EINVAL;
4778
4779 if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
4780 return xfs_btree_simple_query_range(cur, &low_key,
4781 &high_key, fn, priv);
4782 return xfs_btree_overlapped_query_range(cur, &low_key, &high_key,
4783 fn, priv);
4784}
4785
4786
4787int
4788xfs_btree_query_all(
4789 struct xfs_btree_cur *cur,
4790 xfs_btree_query_range_fn fn,
4791 void *priv)
4792{
4793 union xfs_btree_key low_key;
4794 union xfs_btree_key high_key;
4795
4796 memset(&cur->bc_rec, 0, sizeof(cur->bc_rec));
4797 memset(&low_key, 0, sizeof(low_key));
4798 memset(&high_key, 0xFF, sizeof(high_key));
4799
4800 return xfs_btree_simple_query_range(cur, &low_key, &high_key, fn, priv);
4801}
4802
4803
4804
4805
4806
4807unsigned long long
4808xfs_btree_calc_size(
4809 uint *limits,
4810 unsigned long long len)
4811{
4812 int level;
4813 int maxrecs;
4814 unsigned long long rval;
4815
4816 maxrecs = limits[0];
4817 for (level = 0, rval = 0; len > 1; level++) {
4818 len += maxrecs - 1;
4819 do_div(len, maxrecs);
4820 maxrecs = limits[1];
4821 rval += len;
4822 }
4823 return rval;
4824}
4825
4826static int
4827xfs_btree_count_blocks_helper(
4828 struct xfs_btree_cur *cur,
4829 int level,
4830 void *data)
4831{
4832 xfs_extlen_t *blocks = data;
4833 (*blocks)++;
4834
4835 return 0;
4836}
4837
4838
4839int
4840xfs_btree_count_blocks(
4841 struct xfs_btree_cur *cur,
4842 xfs_extlen_t *blocks)
4843{
4844 *blocks = 0;
4845 return xfs_btree_visit_blocks(cur, xfs_btree_count_blocks_helper,
4846 XFS_BTREE_VISIT_ALL, blocks);
4847}
4848
4849
4850int64_t
4851xfs_btree_diff_two_ptrs(
4852 struct xfs_btree_cur *cur,
4853 const union xfs_btree_ptr *a,
4854 const union xfs_btree_ptr *b)
4855{
4856 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
4857 return (int64_t)be64_to_cpu(a->l) - be64_to_cpu(b->l);
4858 return (int64_t)be32_to_cpu(a->s) - be32_to_cpu(b->s);
4859}
4860
4861
4862STATIC int
4863xfs_btree_has_record_helper(
4864 struct xfs_btree_cur *cur,
4865 union xfs_btree_rec *rec,
4866 void *priv)
4867{
4868 return -ECANCELED;
4869}
4870
4871
4872int
4873xfs_btree_has_record(
4874 struct xfs_btree_cur *cur,
4875 union xfs_btree_irec *low,
4876 union xfs_btree_irec *high,
4877 bool *exists)
4878{
4879 int error;
4880
4881 error = xfs_btree_query_range(cur, low, high,
4882 &xfs_btree_has_record_helper, NULL);
4883 if (error == -ECANCELED) {
4884 *exists = true;
4885 return 0;
4886 }
4887 *exists = false;
4888 return error;
4889}
4890
4891
4892bool
4893xfs_btree_has_more_records(
4894 struct xfs_btree_cur *cur)
4895{
4896 struct xfs_btree_block *block;
4897 struct xfs_buf *bp;
4898
4899 block = xfs_btree_get_block(cur, 0, &bp);
4900
4901
4902 if (cur->bc_ptrs[0] < xfs_btree_get_numrecs(block))
4903 return true;
4904
4905
4906 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
4907 return block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK);
4908 else
4909 return block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK);
4910}
4911