1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_shared.h"
21#include "xfs_format.h"
22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h"
24#include "xfs_bit.h"
25#include "xfs_mount.h"
26#include "xfs_defer.h"
27#include "xfs_inode.h"
28#include "xfs_trans.h"
29#include "xfs_inode_item.h"
30#include "xfs_buf_item.h"
31#include "xfs_btree.h"
32#include "xfs_error.h"
33#include "xfs_trace.h"
34#include "xfs_cksum.h"
35#include "xfs_alloc.h"
36#include "xfs_log.h"
37
38
39
40
41kmem_zone_t *xfs_btree_cur_zone;
42
43
44
45
46static const __uint32_t xfs_magics[2][XFS_BTNUM_MAX] = {
47 { XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, 0, XFS_BMAP_MAGIC, XFS_IBT_MAGIC,
48 XFS_FIBT_MAGIC, 0 },
49 { XFS_ABTB_CRC_MAGIC, XFS_ABTC_CRC_MAGIC, XFS_RMAP_CRC_MAGIC,
50 XFS_BMAP_CRC_MAGIC, XFS_IBT_CRC_MAGIC, XFS_FIBT_CRC_MAGIC,
51 XFS_REFC_CRC_MAGIC }
52};
53#define xfs_btree_magic(cur) \
54 xfs_magics[!!((cur)->bc_flags & XFS_BTREE_CRC_BLOCKS)][cur->bc_btnum]
55
56STATIC int
57xfs_btree_check_lblock(
58 struct xfs_btree_cur *cur,
59 struct xfs_btree_block *block,
60 int level,
61 struct xfs_buf *bp)
62{
63 int lblock_ok = 1;
64 struct xfs_mount *mp;
65
66 mp = cur->bc_mp;
67
68 if (xfs_sb_version_hascrc(&mp->m_sb)) {
69 lblock_ok = lblock_ok &&
70 uuid_equal(&block->bb_u.l.bb_uuid,
71 &mp->m_sb.sb_meta_uuid) &&
72 block->bb_u.l.bb_blkno == cpu_to_be64(
73 bp ? bp->b_bn : XFS_BUF_DADDR_NULL);
74 }
75
76 lblock_ok = lblock_ok &&
77 be32_to_cpu(block->bb_magic) == xfs_btree_magic(cur) &&
78 be16_to_cpu(block->bb_level) == level &&
79 be16_to_cpu(block->bb_numrecs) <=
80 cur->bc_ops->get_maxrecs(cur, level) &&
81 block->bb_u.l.bb_leftsib &&
82 (block->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK) ||
83 XFS_FSB_SANITY_CHECK(mp,
84 be64_to_cpu(block->bb_u.l.bb_leftsib))) &&
85 block->bb_u.l.bb_rightsib &&
86 (block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK) ||
87 XFS_FSB_SANITY_CHECK(mp,
88 be64_to_cpu(block->bb_u.l.bb_rightsib)));
89
90 if (unlikely(XFS_TEST_ERROR(!lblock_ok, mp,
91 XFS_ERRTAG_BTREE_CHECK_LBLOCK,
92 XFS_RANDOM_BTREE_CHECK_LBLOCK))) {
93 if (bp)
94 trace_xfs_btree_corrupt(bp, _RET_IP_);
95 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
96 return -EFSCORRUPTED;
97 }
98 return 0;
99}
100
101STATIC int
102xfs_btree_check_sblock(
103 struct xfs_btree_cur *cur,
104 struct xfs_btree_block *block,
105 int level,
106 struct xfs_buf *bp)
107{
108 struct xfs_mount *mp;
109 struct xfs_buf *agbp;
110 struct xfs_agf *agf;
111 xfs_agblock_t agflen;
112 int sblock_ok = 1;
113
114 mp = cur->bc_mp;
115 agbp = cur->bc_private.a.agbp;
116 agf = XFS_BUF_TO_AGF(agbp);
117 agflen = be32_to_cpu(agf->agf_length);
118
119 if (xfs_sb_version_hascrc(&mp->m_sb)) {
120 sblock_ok = sblock_ok &&
121 uuid_equal(&block->bb_u.s.bb_uuid,
122 &mp->m_sb.sb_meta_uuid) &&
123 block->bb_u.s.bb_blkno == cpu_to_be64(
124 bp ? bp->b_bn : XFS_BUF_DADDR_NULL);
125 }
126
127 sblock_ok = sblock_ok &&
128 be32_to_cpu(block->bb_magic) == xfs_btree_magic(cur) &&
129 be16_to_cpu(block->bb_level) == level &&
130 be16_to_cpu(block->bb_numrecs) <=
131 cur->bc_ops->get_maxrecs(cur, level) &&
132 (block->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK) ||
133 be32_to_cpu(block->bb_u.s.bb_leftsib) < agflen) &&
134 block->bb_u.s.bb_leftsib &&
135 (block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK) ||
136 be32_to_cpu(block->bb_u.s.bb_rightsib) < agflen) &&
137 block->bb_u.s.bb_rightsib;
138
139 if (unlikely(XFS_TEST_ERROR(!sblock_ok, mp,
140 XFS_ERRTAG_BTREE_CHECK_SBLOCK,
141 XFS_RANDOM_BTREE_CHECK_SBLOCK))) {
142 if (bp)
143 trace_xfs_btree_corrupt(bp, _RET_IP_);
144 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
145 return -EFSCORRUPTED;
146 }
147 return 0;
148}
149
150
151
152
153int
154xfs_btree_check_block(
155 struct xfs_btree_cur *cur,
156 struct xfs_btree_block *block,
157 int level,
158 struct xfs_buf *bp)
159{
160 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
161 return xfs_btree_check_lblock(cur, block, level, bp);
162 else
163 return xfs_btree_check_sblock(cur, block, level, bp);
164}
165
166
167
168
169int
170xfs_btree_check_lptr(
171 struct xfs_btree_cur *cur,
172 xfs_fsblock_t bno,
173 int level)
174{
175 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp,
176 level > 0 &&
177 bno != NULLFSBLOCK &&
178 XFS_FSB_SANITY_CHECK(cur->bc_mp, bno));
179 return 0;
180}
181
182#ifdef DEBUG
183
184
185
186STATIC int
187xfs_btree_check_sptr(
188 struct xfs_btree_cur *cur,
189 xfs_agblock_t bno,
190 int level)
191{
192 xfs_agblock_t agblocks = cur->bc_mp->m_sb.sb_agblocks;
193
194 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp,
195 level > 0 &&
196 bno != NULLAGBLOCK &&
197 bno != 0 &&
198 bno < agblocks);
199 return 0;
200}
201
202
203
204
205STATIC int
206xfs_btree_check_ptr(
207 struct xfs_btree_cur *cur,
208 union xfs_btree_ptr *ptr,
209 int index,
210 int level)
211{
212 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
213 return xfs_btree_check_lptr(cur,
214 be64_to_cpu((&ptr->l)[index]), level);
215 } else {
216 return xfs_btree_check_sptr(cur,
217 be32_to_cpu((&ptr->s)[index]), level);
218 }
219}
220#endif
221
222
223
224
225
226
227
228
229
230void
231xfs_btree_lblock_calc_crc(
232 struct xfs_buf *bp)
233{
234 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
235 struct xfs_buf_log_item *bip = bp->b_fspriv;
236
237 if (!xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb))
238 return;
239 if (bip)
240 block->bb_u.l.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
241 xfs_buf_update_cksum(bp, XFS_BTREE_LBLOCK_CRC_OFF);
242}
243
244bool
245xfs_btree_lblock_verify_crc(
246 struct xfs_buf *bp)
247{
248 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
249 struct xfs_mount *mp = bp->b_target->bt_mount;
250
251 if (xfs_sb_version_hascrc(&mp->m_sb)) {
252 if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.l.bb_lsn)))
253 return false;
254 return xfs_buf_verify_cksum(bp, XFS_BTREE_LBLOCK_CRC_OFF);
255 }
256
257 return true;
258}
259
260
261
262
263
264
265
266
267
268void
269xfs_btree_sblock_calc_crc(
270 struct xfs_buf *bp)
271{
272 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
273 struct xfs_buf_log_item *bip = bp->b_fspriv;
274
275 if (!xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb))
276 return;
277 if (bip)
278 block->bb_u.s.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
279 xfs_buf_update_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF);
280}
281
282bool
283xfs_btree_sblock_verify_crc(
284 struct xfs_buf *bp)
285{
286 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
287 struct xfs_mount *mp = bp->b_target->bt_mount;
288
289 if (xfs_sb_version_hascrc(&mp->m_sb)) {
290 if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.s.bb_lsn)))
291 return false;
292 return xfs_buf_verify_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF);
293 }
294
295 return true;
296}
297
298static int
299xfs_btree_free_block(
300 struct xfs_btree_cur *cur,
301 struct xfs_buf *bp)
302{
303 int error;
304
305 error = cur->bc_ops->free_block(cur, bp);
306 if (!error) {
307 xfs_trans_binval(cur->bc_tp, bp);
308 XFS_BTREE_STATS_INC(cur, free);
309 }
310 return error;
311}
312
313
314
315
316void
317xfs_btree_del_cursor(
318 xfs_btree_cur_t *cur,
319 int error)
320{
321 int i;
322
323
324
325
326
327
328
329
330
331
332
333 for (i = 0; i < cur->bc_nlevels; i++) {
334 if (cur->bc_bufs[i])
335 xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[i]);
336 else if (!error)
337 break;
338 }
339
340
341
342
343 ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP ||
344 cur->bc_private.b.allocated == 0);
345
346
347
348 kmem_zone_free(xfs_btree_cur_zone, cur);
349}
350
351
352
353
354
355int
356xfs_btree_dup_cursor(
357 xfs_btree_cur_t *cur,
358 xfs_btree_cur_t **ncur)
359{
360 xfs_buf_t *bp;
361 int error;
362 int i;
363 xfs_mount_t *mp;
364 xfs_btree_cur_t *new;
365 xfs_trans_t *tp;
366
367 tp = cur->bc_tp;
368 mp = cur->bc_mp;
369
370
371
372
373 new = cur->bc_ops->dup_cursor(cur);
374
375
376
377
378 new->bc_rec = cur->bc_rec;
379
380
381
382
383 for (i = 0; i < new->bc_nlevels; i++) {
384 new->bc_ptrs[i] = cur->bc_ptrs[i];
385 new->bc_ra[i] = cur->bc_ra[i];
386 bp = cur->bc_bufs[i];
387 if (bp) {
388 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
389 XFS_BUF_ADDR(bp), mp->m_bsize,
390 0, &bp,
391 cur->bc_ops->buf_ops);
392 if (error) {
393 xfs_btree_del_cursor(new, error);
394 *ncur = NULL;
395 return error;
396 }
397 }
398 new->bc_bufs[i] = bp;
399 }
400 *ncur = new;
401 return 0;
402}
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481static inline size_t xfs_btree_block_len(struct xfs_btree_cur *cur)
482{
483 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
484 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS)
485 return XFS_BTREE_LBLOCK_CRC_LEN;
486 return XFS_BTREE_LBLOCK_LEN;
487 }
488 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS)
489 return XFS_BTREE_SBLOCK_CRC_LEN;
490 return XFS_BTREE_SBLOCK_LEN;
491}
492
493
494
495
496static inline size_t xfs_btree_ptr_len(struct xfs_btree_cur *cur)
497{
498 return (cur->bc_flags & XFS_BTREE_LONG_PTRS) ?
499 sizeof(__be64) : sizeof(__be32);
500}
501
502
503
504
505STATIC size_t
506xfs_btree_rec_offset(
507 struct xfs_btree_cur *cur,
508 int n)
509{
510 return xfs_btree_block_len(cur) +
511 (n - 1) * cur->bc_ops->rec_len;
512}
513
514
515
516
517STATIC size_t
518xfs_btree_key_offset(
519 struct xfs_btree_cur *cur,
520 int n)
521{
522 return xfs_btree_block_len(cur) +
523 (n - 1) * cur->bc_ops->key_len;
524}
525
526
527
528
529STATIC size_t
530xfs_btree_high_key_offset(
531 struct xfs_btree_cur *cur,
532 int n)
533{
534 return xfs_btree_block_len(cur) +
535 (n - 1) * cur->bc_ops->key_len + (cur->bc_ops->key_len / 2);
536}
537
538
539
540
541STATIC size_t
542xfs_btree_ptr_offset(
543 struct xfs_btree_cur *cur,
544 int n,
545 int level)
546{
547 return xfs_btree_block_len(cur) +
548 cur->bc_ops->get_maxrecs(cur, level) * cur->bc_ops->key_len +
549 (n - 1) * xfs_btree_ptr_len(cur);
550}
551
552
553
554
555STATIC union xfs_btree_rec *
556xfs_btree_rec_addr(
557 struct xfs_btree_cur *cur,
558 int n,
559 struct xfs_btree_block *block)
560{
561 return (union xfs_btree_rec *)
562 ((char *)block + xfs_btree_rec_offset(cur, n));
563}
564
565
566
567
568STATIC union xfs_btree_key *
569xfs_btree_key_addr(
570 struct xfs_btree_cur *cur,
571 int n,
572 struct xfs_btree_block *block)
573{
574 return (union xfs_btree_key *)
575 ((char *)block + xfs_btree_key_offset(cur, n));
576}
577
578
579
580
581STATIC union xfs_btree_key *
582xfs_btree_high_key_addr(
583 struct xfs_btree_cur *cur,
584 int n,
585 struct xfs_btree_block *block)
586{
587 return (union xfs_btree_key *)
588 ((char *)block + xfs_btree_high_key_offset(cur, n));
589}
590
591
592
593
594STATIC union xfs_btree_ptr *
595xfs_btree_ptr_addr(
596 struct xfs_btree_cur *cur,
597 int n,
598 struct xfs_btree_block *block)
599{
600 int level = xfs_btree_get_level(block);
601
602 ASSERT(block->bb_level != 0);
603
604 return (union xfs_btree_ptr *)
605 ((char *)block + xfs_btree_ptr_offset(cur, n, level));
606}
607
608
609
610
611
612
613
614STATIC struct xfs_btree_block *
615xfs_btree_get_iroot(
616 struct xfs_btree_cur *cur)
617{
618 struct xfs_ifork *ifp;
619
620 ifp = XFS_IFORK_PTR(cur->bc_private.b.ip, cur->bc_private.b.whichfork);
621 return (struct xfs_btree_block *)ifp->if_broot;
622}
623
624
625
626
627
628STATIC struct xfs_btree_block *
629xfs_btree_get_block(
630 struct xfs_btree_cur *cur,
631 int level,
632 struct xfs_buf **bpp)
633{
634 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
635 (level == cur->bc_nlevels - 1)) {
636 *bpp = NULL;
637 return xfs_btree_get_iroot(cur);
638 }
639
640 *bpp = cur->bc_bufs[level];
641 return XFS_BUF_TO_BLOCK(*bpp);
642}
643
644
645
646
647
648xfs_buf_t *
649xfs_btree_get_bufl(
650 xfs_mount_t *mp,
651 xfs_trans_t *tp,
652 xfs_fsblock_t fsbno,
653 uint lock)
654{
655 xfs_daddr_t d;
656
657 ASSERT(fsbno != NULLFSBLOCK);
658 d = XFS_FSB_TO_DADDR(mp, fsbno);
659 return xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock);
660}
661
662
663
664
665
666xfs_buf_t *
667xfs_btree_get_bufs(
668 xfs_mount_t *mp,
669 xfs_trans_t *tp,
670 xfs_agnumber_t agno,
671 xfs_agblock_t agbno,
672 uint lock)
673{
674 xfs_daddr_t d;
675
676 ASSERT(agno != NULLAGNUMBER);
677 ASSERT(agbno != NULLAGBLOCK);
678 d = XFS_AGB_TO_DADDR(mp, agno, agbno);
679 return xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock);
680}
681
682
683
684
685int
686xfs_btree_islastblock(
687 xfs_btree_cur_t *cur,
688 int level)
689{
690 struct xfs_btree_block *block;
691 xfs_buf_t *bp;
692
693 block = xfs_btree_get_block(cur, level, &bp);
694 xfs_btree_check_block(cur, block, level, bp);
695 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
696 return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK);
697 else
698 return block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK);
699}
700
701
702
703
704
705STATIC int
706xfs_btree_firstrec(
707 xfs_btree_cur_t *cur,
708 int level)
709{
710 struct xfs_btree_block *block;
711 xfs_buf_t *bp;
712
713
714
715
716 block = xfs_btree_get_block(cur, level, &bp);
717 xfs_btree_check_block(cur, block, level, bp);
718
719
720
721 if (!block->bb_numrecs)
722 return 0;
723
724
725
726 cur->bc_ptrs[level] = 1;
727 return 1;
728}
729
730
731
732
733
734STATIC int
735xfs_btree_lastrec(
736 xfs_btree_cur_t *cur,
737 int level)
738{
739 struct xfs_btree_block *block;
740 xfs_buf_t *bp;
741
742
743
744
745 block = xfs_btree_get_block(cur, level, &bp);
746 xfs_btree_check_block(cur, block, level, bp);
747
748
749
750 if (!block->bb_numrecs)
751 return 0;
752
753
754
755 cur->bc_ptrs[level] = be16_to_cpu(block->bb_numrecs);
756 return 1;
757}
758
759
760
761
762
763void
764xfs_btree_offsets(
765 __int64_t fields,
766 const short *offsets,
767 int nbits,
768 int *first,
769 int *last)
770{
771 int i;
772 __int64_t imask;
773
774 ASSERT(fields != 0);
775
776
777
778 for (i = 0, imask = 1LL; ; i++, imask <<= 1) {
779 if (imask & fields) {
780 *first = offsets[i];
781 break;
782 }
783 }
784
785
786
787 for (i = nbits - 1, imask = 1LL << i; ; i--, imask >>= 1) {
788 if (imask & fields) {
789 *last = offsets[i + 1] - 1;
790 break;
791 }
792 }
793}
794
795
796
797
798
799int
800xfs_btree_read_bufl(
801 struct xfs_mount *mp,
802 struct xfs_trans *tp,
803 xfs_fsblock_t fsbno,
804 uint lock,
805 struct xfs_buf **bpp,
806 int refval,
807 const struct xfs_buf_ops *ops)
808{
809 struct xfs_buf *bp;
810 xfs_daddr_t d;
811 int error;
812
813 ASSERT(fsbno != NULLFSBLOCK);
814 d = XFS_FSB_TO_DADDR(mp, fsbno);
815 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d,
816 mp->m_bsize, lock, &bp, ops);
817 if (error)
818 return error;
819 if (bp)
820 xfs_buf_set_ref(bp, refval);
821 *bpp = bp;
822 return 0;
823}
824
825
826
827
828
829
830void
831xfs_btree_reada_bufl(
832 struct xfs_mount *mp,
833 xfs_fsblock_t fsbno,
834 xfs_extlen_t count,
835 const struct xfs_buf_ops *ops)
836{
837 xfs_daddr_t d;
838
839 ASSERT(fsbno != NULLFSBLOCK);
840 d = XFS_FSB_TO_DADDR(mp, fsbno);
841 xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count, ops);
842}
843
844
845
846
847
848
849void
850xfs_btree_reada_bufs(
851 struct xfs_mount *mp,
852 xfs_agnumber_t agno,
853 xfs_agblock_t agbno,
854 xfs_extlen_t count,
855 const struct xfs_buf_ops *ops)
856{
857 xfs_daddr_t d;
858
859 ASSERT(agno != NULLAGNUMBER);
860 ASSERT(agbno != NULLAGBLOCK);
861 d = XFS_AGB_TO_DADDR(mp, agno, agbno);
862 xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count, ops);
863}
864
865STATIC int
866xfs_btree_readahead_lblock(
867 struct xfs_btree_cur *cur,
868 int lr,
869 struct xfs_btree_block *block)
870{
871 int rval = 0;
872 xfs_fsblock_t left = be64_to_cpu(block->bb_u.l.bb_leftsib);
873 xfs_fsblock_t right = be64_to_cpu(block->bb_u.l.bb_rightsib);
874
875 if ((lr & XFS_BTCUR_LEFTRA) && left != NULLFSBLOCK) {
876 xfs_btree_reada_bufl(cur->bc_mp, left, 1,
877 cur->bc_ops->buf_ops);
878 rval++;
879 }
880
881 if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLFSBLOCK) {
882 xfs_btree_reada_bufl(cur->bc_mp, right, 1,
883 cur->bc_ops->buf_ops);
884 rval++;
885 }
886
887 return rval;
888}
889
890STATIC int
891xfs_btree_readahead_sblock(
892 struct xfs_btree_cur *cur,
893 int lr,
894 struct xfs_btree_block *block)
895{
896 int rval = 0;
897 xfs_agblock_t left = be32_to_cpu(block->bb_u.s.bb_leftsib);
898 xfs_agblock_t right = be32_to_cpu(block->bb_u.s.bb_rightsib);
899
900
901 if ((lr & XFS_BTCUR_LEFTRA) && left != NULLAGBLOCK) {
902 xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
903 left, 1, cur->bc_ops->buf_ops);
904 rval++;
905 }
906
907 if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLAGBLOCK) {
908 xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
909 right, 1, cur->bc_ops->buf_ops);
910 rval++;
911 }
912
913 return rval;
914}
915
916
917
918
919
920STATIC int
921xfs_btree_readahead(
922 struct xfs_btree_cur *cur,
923 int lev,
924 int lr)
925{
926 struct xfs_btree_block *block;
927
928
929
930
931
932 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
933 (lev == cur->bc_nlevels - 1))
934 return 0;
935
936 if ((cur->bc_ra[lev] | lr) == cur->bc_ra[lev])
937 return 0;
938
939 cur->bc_ra[lev] |= lr;
940 block = XFS_BUF_TO_BLOCK(cur->bc_bufs[lev]);
941
942 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
943 return xfs_btree_readahead_lblock(cur, lr, block);
944 return xfs_btree_readahead_sblock(cur, lr, block);
945}
946
947STATIC xfs_daddr_t
948xfs_btree_ptr_to_daddr(
949 struct xfs_btree_cur *cur,
950 union xfs_btree_ptr *ptr)
951{
952 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
953 ASSERT(ptr->l != cpu_to_be64(NULLFSBLOCK));
954
955 return XFS_FSB_TO_DADDR(cur->bc_mp, be64_to_cpu(ptr->l));
956 } else {
957 ASSERT(cur->bc_private.a.agno != NULLAGNUMBER);
958 ASSERT(ptr->s != cpu_to_be32(NULLAGBLOCK));
959
960 return XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_private.a.agno,
961 be32_to_cpu(ptr->s));
962 }
963}
964
965
966
967
968
969
970
971STATIC void
972xfs_btree_readahead_ptr(
973 struct xfs_btree_cur *cur,
974 union xfs_btree_ptr *ptr,
975 xfs_extlen_t count)
976{
977 xfs_buf_readahead(cur->bc_mp->m_ddev_targp,
978 xfs_btree_ptr_to_daddr(cur, ptr),
979 cur->bc_mp->m_bsize * count, cur->bc_ops->buf_ops);
980}
981
982
983
984
985
986STATIC void
987xfs_btree_setbuf(
988 xfs_btree_cur_t *cur,
989 int lev,
990 xfs_buf_t *bp)
991{
992 struct xfs_btree_block *b;
993
994 if (cur->bc_bufs[lev])
995 xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[lev]);
996 cur->bc_bufs[lev] = bp;
997 cur->bc_ra[lev] = 0;
998
999 b = XFS_BUF_TO_BLOCK(bp);
1000 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
1001 if (b->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK))
1002 cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA;
1003 if (b->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK))
1004 cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA;
1005 } else {
1006 if (b->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK))
1007 cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA;
1008 if (b->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK))
1009 cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA;
1010 }
1011}
1012
1013STATIC int
1014xfs_btree_ptr_is_null(
1015 struct xfs_btree_cur *cur,
1016 union xfs_btree_ptr *ptr)
1017{
1018 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
1019 return ptr->l == cpu_to_be64(NULLFSBLOCK);
1020 else
1021 return ptr->s == cpu_to_be32(NULLAGBLOCK);
1022}
1023
1024STATIC void
1025xfs_btree_set_ptr_null(
1026 struct xfs_btree_cur *cur,
1027 union xfs_btree_ptr *ptr)
1028{
1029 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
1030 ptr->l = cpu_to_be64(NULLFSBLOCK);
1031 else
1032 ptr->s = cpu_to_be32(NULLAGBLOCK);
1033}
1034
1035
1036
1037
1038STATIC void
1039xfs_btree_get_sibling(
1040 struct xfs_btree_cur *cur,
1041 struct xfs_btree_block *block,
1042 union xfs_btree_ptr *ptr,
1043 int lr)
1044{
1045 ASSERT(lr == XFS_BB_LEFTSIB || lr == XFS_BB_RIGHTSIB);
1046
1047 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
1048 if (lr == XFS_BB_RIGHTSIB)
1049 ptr->l = block->bb_u.l.bb_rightsib;
1050 else
1051 ptr->l = block->bb_u.l.bb_leftsib;
1052 } else {
1053 if (lr == XFS_BB_RIGHTSIB)
1054 ptr->s = block->bb_u.s.bb_rightsib;
1055 else
1056 ptr->s = block->bb_u.s.bb_leftsib;
1057 }
1058}
1059
1060STATIC void
1061xfs_btree_set_sibling(
1062 struct xfs_btree_cur *cur,
1063 struct xfs_btree_block *block,
1064 union xfs_btree_ptr *ptr,
1065 int lr)
1066{
1067 ASSERT(lr == XFS_BB_LEFTSIB || lr == XFS_BB_RIGHTSIB);
1068
1069 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
1070 if (lr == XFS_BB_RIGHTSIB)
1071 block->bb_u.l.bb_rightsib = ptr->l;
1072 else
1073 block->bb_u.l.bb_leftsib = ptr->l;
1074 } else {
1075 if (lr == XFS_BB_RIGHTSIB)
1076 block->bb_u.s.bb_rightsib = ptr->s;
1077 else
1078 block->bb_u.s.bb_leftsib = ptr->s;
1079 }
1080}
1081
1082void
1083xfs_btree_init_block_int(
1084 struct xfs_mount *mp,
1085 struct xfs_btree_block *buf,
1086 xfs_daddr_t blkno,
1087 __u32 magic,
1088 __u16 level,
1089 __u16 numrecs,
1090 __u64 owner,
1091 unsigned int flags)
1092{
1093 buf->bb_magic = cpu_to_be32(magic);
1094 buf->bb_level = cpu_to_be16(level);
1095 buf->bb_numrecs = cpu_to_be16(numrecs);
1096
1097 if (flags & XFS_BTREE_LONG_PTRS) {
1098 buf->bb_u.l.bb_leftsib = cpu_to_be64(NULLFSBLOCK);
1099 buf->bb_u.l.bb_rightsib = cpu_to_be64(NULLFSBLOCK);
1100 if (flags & XFS_BTREE_CRC_BLOCKS) {
1101 buf->bb_u.l.bb_blkno = cpu_to_be64(blkno);
1102 buf->bb_u.l.bb_owner = cpu_to_be64(owner);
1103 uuid_copy(&buf->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid);
1104 buf->bb_u.l.bb_pad = 0;
1105 buf->bb_u.l.bb_lsn = 0;
1106 }
1107 } else {
1108
1109 __u32 __owner = (__u32)owner;
1110
1111 buf->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK);
1112 buf->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
1113 if (flags & XFS_BTREE_CRC_BLOCKS) {
1114 buf->bb_u.s.bb_blkno = cpu_to_be64(blkno);
1115 buf->bb_u.s.bb_owner = cpu_to_be32(__owner);
1116 uuid_copy(&buf->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid);
1117 buf->bb_u.s.bb_lsn = 0;
1118 }
1119 }
1120}
1121
1122void
1123xfs_btree_init_block(
1124 struct xfs_mount *mp,
1125 struct xfs_buf *bp,
1126 __u32 magic,
1127 __u16 level,
1128 __u16 numrecs,
1129 __u64 owner,
1130 unsigned int flags)
1131{
1132 xfs_btree_init_block_int(mp, XFS_BUF_TO_BLOCK(bp), bp->b_bn,
1133 magic, level, numrecs, owner, flags);
1134}
1135
1136STATIC void
1137xfs_btree_init_block_cur(
1138 struct xfs_btree_cur *cur,
1139 struct xfs_buf *bp,
1140 int level,
1141 int numrecs)
1142{
1143 __u64 owner;
1144
1145
1146
1147
1148
1149
1150
1151 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
1152 owner = cur->bc_private.b.ip->i_ino;
1153 else
1154 owner = cur->bc_private.a.agno;
1155
1156 xfs_btree_init_block_int(cur->bc_mp, XFS_BUF_TO_BLOCK(bp), bp->b_bn,
1157 xfs_btree_magic(cur), level, numrecs,
1158 owner, cur->bc_flags);
1159}
1160
1161
1162
1163
1164
1165
1166STATIC int
1167xfs_btree_is_lastrec(
1168 struct xfs_btree_cur *cur,
1169 struct xfs_btree_block *block,
1170 int level)
1171{
1172 union xfs_btree_ptr ptr;
1173
1174 if (level > 0)
1175 return 0;
1176 if (!(cur->bc_flags & XFS_BTREE_LASTREC_UPDATE))
1177 return 0;
1178
1179 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
1180 if (!xfs_btree_ptr_is_null(cur, &ptr))
1181 return 0;
1182 return 1;
1183}
1184
1185STATIC void
1186xfs_btree_buf_to_ptr(
1187 struct xfs_btree_cur *cur,
1188 struct xfs_buf *bp,
1189 union xfs_btree_ptr *ptr)
1190{
1191 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
1192 ptr->l = cpu_to_be64(XFS_DADDR_TO_FSB(cur->bc_mp,
1193 XFS_BUF_ADDR(bp)));
1194 else {
1195 ptr->s = cpu_to_be32(xfs_daddr_to_agbno(cur->bc_mp,
1196 XFS_BUF_ADDR(bp)));
1197 }
1198}
1199
1200STATIC void
1201xfs_btree_set_refs(
1202 struct xfs_btree_cur *cur,
1203 struct xfs_buf *bp)
1204{
1205 switch (cur->bc_btnum) {
1206 case XFS_BTNUM_BNO:
1207 case XFS_BTNUM_CNT:
1208 xfs_buf_set_ref(bp, XFS_ALLOC_BTREE_REF);
1209 break;
1210 case XFS_BTNUM_INO:
1211 case XFS_BTNUM_FINO:
1212 xfs_buf_set_ref(bp, XFS_INO_BTREE_REF);
1213 break;
1214 case XFS_BTNUM_BMAP:
1215 xfs_buf_set_ref(bp, XFS_BMAP_BTREE_REF);
1216 break;
1217 case XFS_BTNUM_RMAP:
1218 xfs_buf_set_ref(bp, XFS_RMAP_BTREE_REF);
1219 break;
1220 case XFS_BTNUM_REFC:
1221 xfs_buf_set_ref(bp, XFS_REFC_BTREE_REF);
1222 break;
1223 default:
1224 ASSERT(0);
1225 }
1226}
1227
1228STATIC int
1229xfs_btree_get_buf_block(
1230 struct xfs_btree_cur *cur,
1231 union xfs_btree_ptr *ptr,
1232 int flags,
1233 struct xfs_btree_block **block,
1234 struct xfs_buf **bpp)
1235{
1236 struct xfs_mount *mp = cur->bc_mp;
1237 xfs_daddr_t d;
1238
1239
1240 ASSERT(!(flags & XBF_TRYLOCK));
1241
1242 d = xfs_btree_ptr_to_daddr(cur, ptr);
1243 *bpp = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d,
1244 mp->m_bsize, flags);
1245
1246 if (!*bpp)
1247 return -ENOMEM;
1248
1249 (*bpp)->b_ops = cur->bc_ops->buf_ops;
1250 *block = XFS_BUF_TO_BLOCK(*bpp);
1251 return 0;
1252}
1253
1254
1255
1256
1257
1258STATIC int
1259xfs_btree_read_buf_block(
1260 struct xfs_btree_cur *cur,
1261 union xfs_btree_ptr *ptr,
1262 int flags,
1263 struct xfs_btree_block **block,
1264 struct xfs_buf **bpp)
1265{
1266 struct xfs_mount *mp = cur->bc_mp;
1267 xfs_daddr_t d;
1268 int error;
1269
1270
1271 ASSERT(!(flags & XBF_TRYLOCK));
1272
1273 d = xfs_btree_ptr_to_daddr(cur, ptr);
1274 error = xfs_trans_read_buf(mp, cur->bc_tp, mp->m_ddev_targp, d,
1275 mp->m_bsize, flags, bpp,
1276 cur->bc_ops->buf_ops);
1277 if (error)
1278 return error;
1279
1280 xfs_btree_set_refs(cur, *bpp);
1281 *block = XFS_BUF_TO_BLOCK(*bpp);
1282 return 0;
1283}
1284
1285
1286
1287
1288STATIC void
1289xfs_btree_copy_keys(
1290 struct xfs_btree_cur *cur,
1291 union xfs_btree_key *dst_key,
1292 union xfs_btree_key *src_key,
1293 int numkeys)
1294{
1295 ASSERT(numkeys >= 0);
1296 memcpy(dst_key, src_key, numkeys * cur->bc_ops->key_len);
1297}
1298
1299
1300
1301
1302STATIC void
1303xfs_btree_copy_recs(
1304 struct xfs_btree_cur *cur,
1305 union xfs_btree_rec *dst_rec,
1306 union xfs_btree_rec *src_rec,
1307 int numrecs)
1308{
1309 ASSERT(numrecs >= 0);
1310 memcpy(dst_rec, src_rec, numrecs * cur->bc_ops->rec_len);
1311}
1312
1313
1314
1315
1316STATIC void
1317xfs_btree_copy_ptrs(
1318 struct xfs_btree_cur *cur,
1319 union xfs_btree_ptr *dst_ptr,
1320 union xfs_btree_ptr *src_ptr,
1321 int numptrs)
1322{
1323 ASSERT(numptrs >= 0);
1324 memcpy(dst_ptr, src_ptr, numptrs * xfs_btree_ptr_len(cur));
1325}
1326
1327
1328
1329
1330STATIC void
1331xfs_btree_shift_keys(
1332 struct xfs_btree_cur *cur,
1333 union xfs_btree_key *key,
1334 int dir,
1335 int numkeys)
1336{
1337 char *dst_key;
1338
1339 ASSERT(numkeys >= 0);
1340 ASSERT(dir == 1 || dir == -1);
1341
1342 dst_key = (char *)key + (dir * cur->bc_ops->key_len);
1343 memmove(dst_key, key, numkeys * cur->bc_ops->key_len);
1344}
1345
1346
1347
1348
1349STATIC void
1350xfs_btree_shift_recs(
1351 struct xfs_btree_cur *cur,
1352 union xfs_btree_rec *rec,
1353 int dir,
1354 int numrecs)
1355{
1356 char *dst_rec;
1357
1358 ASSERT(numrecs >= 0);
1359 ASSERT(dir == 1 || dir == -1);
1360
1361 dst_rec = (char *)rec + (dir * cur->bc_ops->rec_len);
1362 memmove(dst_rec, rec, numrecs * cur->bc_ops->rec_len);
1363}
1364
1365
1366
1367
1368STATIC void
1369xfs_btree_shift_ptrs(
1370 struct xfs_btree_cur *cur,
1371 union xfs_btree_ptr *ptr,
1372 int dir,
1373 int numptrs)
1374{
1375 char *dst_ptr;
1376
1377 ASSERT(numptrs >= 0);
1378 ASSERT(dir == 1 || dir == -1);
1379
1380 dst_ptr = (char *)ptr + (dir * xfs_btree_ptr_len(cur));
1381 memmove(dst_ptr, ptr, numptrs * xfs_btree_ptr_len(cur));
1382}
1383
1384
1385
1386
1387STATIC void
1388xfs_btree_log_keys(
1389 struct xfs_btree_cur *cur,
1390 struct xfs_buf *bp,
1391 int first,
1392 int last)
1393{
1394 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
1395 XFS_BTREE_TRACE_ARGBII(cur, bp, first, last);
1396
1397 if (bp) {
1398 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
1399 xfs_trans_log_buf(cur->bc_tp, bp,
1400 xfs_btree_key_offset(cur, first),
1401 xfs_btree_key_offset(cur, last + 1) - 1);
1402 } else {
1403 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
1404 xfs_ilog_fbroot(cur->bc_private.b.whichfork));
1405 }
1406
1407 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1408}
1409
1410
1411
1412
1413void
1414xfs_btree_log_recs(
1415 struct xfs_btree_cur *cur,
1416 struct xfs_buf *bp,
1417 int first,
1418 int last)
1419{
1420 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
1421 XFS_BTREE_TRACE_ARGBII(cur, bp, first, last);
1422
1423 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
1424 xfs_trans_log_buf(cur->bc_tp, bp,
1425 xfs_btree_rec_offset(cur, first),
1426 xfs_btree_rec_offset(cur, last + 1) - 1);
1427
1428 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1429}
1430
1431
1432
1433
1434STATIC void
1435xfs_btree_log_ptrs(
1436 struct xfs_btree_cur *cur,
1437 struct xfs_buf *bp,
1438 int first,
1439 int last)
1440{
1441 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
1442 XFS_BTREE_TRACE_ARGBII(cur, bp, first, last);
1443
1444 if (bp) {
1445 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
1446 int level = xfs_btree_get_level(block);
1447
1448 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
1449 xfs_trans_log_buf(cur->bc_tp, bp,
1450 xfs_btree_ptr_offset(cur, first, level),
1451 xfs_btree_ptr_offset(cur, last + 1, level) - 1);
1452 } else {
1453 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
1454 xfs_ilog_fbroot(cur->bc_private.b.whichfork));
1455 }
1456
1457 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1458}
1459
1460
1461
1462
1463void
1464xfs_btree_log_block(
1465 struct xfs_btree_cur *cur,
1466 struct xfs_buf *bp,
1467 int fields)
1468{
1469 int first;
1470 int last;
1471 static const short soffsets[] = {
1472 offsetof(struct xfs_btree_block, bb_magic),
1473 offsetof(struct xfs_btree_block, bb_level),
1474 offsetof(struct xfs_btree_block, bb_numrecs),
1475 offsetof(struct xfs_btree_block, bb_u.s.bb_leftsib),
1476 offsetof(struct xfs_btree_block, bb_u.s.bb_rightsib),
1477 offsetof(struct xfs_btree_block, bb_u.s.bb_blkno),
1478 offsetof(struct xfs_btree_block, bb_u.s.bb_lsn),
1479 offsetof(struct xfs_btree_block, bb_u.s.bb_uuid),
1480 offsetof(struct xfs_btree_block, bb_u.s.bb_owner),
1481 offsetof(struct xfs_btree_block, bb_u.s.bb_crc),
1482 XFS_BTREE_SBLOCK_CRC_LEN
1483 };
1484 static const short loffsets[] = {
1485 offsetof(struct xfs_btree_block, bb_magic),
1486 offsetof(struct xfs_btree_block, bb_level),
1487 offsetof(struct xfs_btree_block, bb_numrecs),
1488 offsetof(struct xfs_btree_block, bb_u.l.bb_leftsib),
1489 offsetof(struct xfs_btree_block, bb_u.l.bb_rightsib),
1490 offsetof(struct xfs_btree_block, bb_u.l.bb_blkno),
1491 offsetof(struct xfs_btree_block, bb_u.l.bb_lsn),
1492 offsetof(struct xfs_btree_block, bb_u.l.bb_uuid),
1493 offsetof(struct xfs_btree_block, bb_u.l.bb_owner),
1494 offsetof(struct xfs_btree_block, bb_u.l.bb_crc),
1495 offsetof(struct xfs_btree_block, bb_u.l.bb_pad),
1496 XFS_BTREE_LBLOCK_CRC_LEN
1497 };
1498
1499 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
1500 XFS_BTREE_TRACE_ARGBI(cur, bp, fields);
1501
1502 if (bp) {
1503 int nbits;
1504
1505 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) {
1506
1507
1508
1509
1510
1511
1512
1513 if (fields == XFS_BB_ALL_BITS)
1514 fields = XFS_BB_ALL_BITS_CRC;
1515 nbits = XFS_BB_NUM_BITS_CRC;
1516 } else {
1517 nbits = XFS_BB_NUM_BITS;
1518 }
1519 xfs_btree_offsets(fields,
1520 (cur->bc_flags & XFS_BTREE_LONG_PTRS) ?
1521 loffsets : soffsets,
1522 nbits, &first, &last);
1523 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
1524 xfs_trans_log_buf(cur->bc_tp, bp, first, last);
1525 } else {
1526 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
1527 xfs_ilog_fbroot(cur->bc_private.b.whichfork));
1528 }
1529
1530 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1531}
1532
1533
1534
1535
1536
1537int
1538xfs_btree_increment(
1539 struct xfs_btree_cur *cur,
1540 int level,
1541 int *stat)
1542{
1543 struct xfs_btree_block *block;
1544 union xfs_btree_ptr ptr;
1545 struct xfs_buf *bp;
1546 int error;
1547 int lev;
1548
1549 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
1550 XFS_BTREE_TRACE_ARGI(cur, level);
1551
1552 ASSERT(level < cur->bc_nlevels);
1553
1554
1555 xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
1556
1557
1558 block = xfs_btree_get_block(cur, level, &bp);
1559
1560#ifdef DEBUG
1561 error = xfs_btree_check_block(cur, block, level, bp);
1562 if (error)
1563 goto error0;
1564#endif
1565
1566
1567 if (++cur->bc_ptrs[level] <= xfs_btree_get_numrecs(block))
1568 goto out1;
1569
1570
1571 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
1572 if (xfs_btree_ptr_is_null(cur, &ptr))
1573 goto out0;
1574
1575 XFS_BTREE_STATS_INC(cur, increment);
1576
1577
1578
1579
1580
1581 for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
1582 block = xfs_btree_get_block(cur, lev, &bp);
1583
1584#ifdef DEBUG
1585 error = xfs_btree_check_block(cur, block, lev, bp);
1586 if (error)
1587 goto error0;
1588#endif
1589
1590 if (++cur->bc_ptrs[lev] <= xfs_btree_get_numrecs(block))
1591 break;
1592
1593
1594 xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA);
1595 }
1596
1597
1598
1599
1600
1601 if (lev == cur->bc_nlevels) {
1602 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
1603 goto out0;
1604 ASSERT(0);
1605 error = -EFSCORRUPTED;
1606 goto error0;
1607 }
1608 ASSERT(lev < cur->bc_nlevels);
1609
1610
1611
1612
1613
1614 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) {
1615 union xfs_btree_ptr *ptrp;
1616
1617 ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block);
1618 --lev;
1619 error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp);
1620 if (error)
1621 goto error0;
1622
1623 xfs_btree_setbuf(cur, lev, bp);
1624 cur->bc_ptrs[lev] = 1;
1625 }
1626out1:
1627 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1628 *stat = 1;
1629 return 0;
1630
1631out0:
1632 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1633 *stat = 0;
1634 return 0;
1635
1636error0:
1637 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
1638 return error;
1639}
1640
1641
1642
1643
1644
1645int
1646xfs_btree_decrement(
1647 struct xfs_btree_cur *cur,
1648 int level,
1649 int *stat)
1650{
1651 struct xfs_btree_block *block;
1652 xfs_buf_t *bp;
1653 int error;
1654 int lev;
1655 union xfs_btree_ptr ptr;
1656
1657 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
1658 XFS_BTREE_TRACE_ARGI(cur, level);
1659
1660 ASSERT(level < cur->bc_nlevels);
1661
1662
1663 xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA);
1664
1665
1666 if (--cur->bc_ptrs[level] > 0)
1667 goto out1;
1668
1669
1670 block = xfs_btree_get_block(cur, level, &bp);
1671
1672#ifdef DEBUG
1673 error = xfs_btree_check_block(cur, block, level, bp);
1674 if (error)
1675 goto error0;
1676#endif
1677
1678
1679 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB);
1680 if (xfs_btree_ptr_is_null(cur, &ptr))
1681 goto out0;
1682
1683 XFS_BTREE_STATS_INC(cur, decrement);
1684
1685
1686
1687
1688
1689 for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
1690 if (--cur->bc_ptrs[lev] > 0)
1691 break;
1692
1693 xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA);
1694 }
1695
1696
1697
1698
1699
1700 if (lev == cur->bc_nlevels) {
1701 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
1702 goto out0;
1703 ASSERT(0);
1704 error = -EFSCORRUPTED;
1705 goto error0;
1706 }
1707 ASSERT(lev < cur->bc_nlevels);
1708
1709
1710
1711
1712
1713 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) {
1714 union xfs_btree_ptr *ptrp;
1715
1716 ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block);
1717 --lev;
1718 error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp);
1719 if (error)
1720 goto error0;
1721 xfs_btree_setbuf(cur, lev, bp);
1722 cur->bc_ptrs[lev] = xfs_btree_get_numrecs(block);
1723 }
1724out1:
1725 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1726 *stat = 1;
1727 return 0;
1728
1729out0:
1730 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1731 *stat = 0;
1732 return 0;
1733
1734error0:
1735 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
1736 return error;
1737}
1738
1739STATIC int
1740xfs_btree_lookup_get_block(
1741 struct xfs_btree_cur *cur,
1742 int level,
1743 union xfs_btree_ptr *pp,
1744 struct xfs_btree_block **blkp)
1745{
1746 struct xfs_buf *bp;
1747 int error = 0;
1748
1749
1750 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
1751 (level == cur->bc_nlevels - 1)) {
1752 *blkp = xfs_btree_get_iroot(cur);
1753 return 0;
1754 }
1755
1756
1757
1758
1759
1760
1761
1762 bp = cur->bc_bufs[level];
1763 if (bp && XFS_BUF_ADDR(bp) == xfs_btree_ptr_to_daddr(cur, pp)) {
1764 *blkp = XFS_BUF_TO_BLOCK(bp);
1765 return 0;
1766 }
1767
1768 error = xfs_btree_read_buf_block(cur, pp, 0, blkp, &bp);
1769 if (error)
1770 return error;
1771
1772
1773 if (xfs_sb_version_hascrc(&cur->bc_mp->m_sb) &&
1774 (cur->bc_flags & XFS_BTREE_LONG_PTRS) &&
1775 be64_to_cpu((*blkp)->bb_u.l.bb_owner) !=
1776 cur->bc_private.b.ip->i_ino)
1777 goto out_bad;
1778
1779
1780 if (be16_to_cpu((*blkp)->bb_level) != level)
1781 goto out_bad;
1782
1783
1784 if (level != 0 && be16_to_cpu((*blkp)->bb_numrecs) == 0)
1785 goto out_bad;
1786
1787 xfs_btree_setbuf(cur, level, bp);
1788 return 0;
1789
1790out_bad:
1791 *blkp = NULL;
1792 xfs_trans_brelse(cur->bc_tp, bp);
1793 return -EFSCORRUPTED;
1794}
1795
1796
1797
1798
1799
1800
1801STATIC union xfs_btree_key *
1802xfs_lookup_get_search_key(
1803 struct xfs_btree_cur *cur,
1804 int level,
1805 int keyno,
1806 struct xfs_btree_block *block,
1807 union xfs_btree_key *kp)
1808{
1809 if (level == 0) {
1810 cur->bc_ops->init_key_from_rec(kp,
1811 xfs_btree_rec_addr(cur, keyno, block));
1812 return kp;
1813 }
1814
1815 return xfs_btree_key_addr(cur, keyno, block);
1816}
1817
1818
1819
1820
1821
1822int
1823xfs_btree_lookup(
1824 struct xfs_btree_cur *cur,
1825 xfs_lookup_t dir,
1826 int *stat)
1827{
1828 struct xfs_btree_block *block;
1829 __int64_t diff;
1830 int error;
1831 int keyno;
1832 int level;
1833 union xfs_btree_ptr *pp;
1834 union xfs_btree_ptr ptr;
1835
1836 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
1837 XFS_BTREE_TRACE_ARGI(cur, dir);
1838
1839 XFS_BTREE_STATS_INC(cur, lookup);
1840
1841
1842 if (cur->bc_nlevels == 0)
1843 return -EFSCORRUPTED;
1844
1845 block = NULL;
1846 keyno = 0;
1847
1848
1849 cur->bc_ops->init_ptr_from_cur(cur, &ptr);
1850 pp = &ptr;
1851
1852
1853
1854
1855
1856
1857
1858 for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) {
1859
1860 error = xfs_btree_lookup_get_block(cur, level, pp, &block);
1861 if (error)
1862 goto error0;
1863
1864 if (diff == 0) {
1865
1866
1867
1868
1869 keyno = 1;
1870 } else {
1871
1872
1873 int high;
1874 int low;
1875
1876
1877 low = 1;
1878 high = xfs_btree_get_numrecs(block);
1879 if (!high) {
1880
1881 ASSERT(level == 0 && cur->bc_nlevels == 1);
1882
1883 cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE;
1884 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1885 *stat = 0;
1886 return 0;
1887 }
1888
1889
1890 while (low <= high) {
1891 union xfs_btree_key key;
1892 union xfs_btree_key *kp;
1893
1894 XFS_BTREE_STATS_INC(cur, compare);
1895
1896
1897 keyno = (low + high) >> 1;
1898
1899
1900 kp = xfs_lookup_get_search_key(cur, level,
1901 keyno, block, &key);
1902
1903
1904
1905
1906
1907
1908
1909 diff = cur->bc_ops->key_diff(cur, kp);
1910 if (diff < 0)
1911 low = keyno + 1;
1912 else if (diff > 0)
1913 high = keyno - 1;
1914 else
1915 break;
1916 }
1917 }
1918
1919
1920
1921
1922
1923 if (level > 0) {
1924
1925
1926
1927
1928 if (diff > 0 && --keyno < 1)
1929 keyno = 1;
1930 pp = xfs_btree_ptr_addr(cur, keyno, block);
1931
1932#ifdef DEBUG
1933 error = xfs_btree_check_ptr(cur, pp, 0, level);
1934 if (error)
1935 goto error0;
1936#endif
1937 cur->bc_ptrs[level] = keyno;
1938 }
1939 }
1940
1941
1942 if (dir != XFS_LOOKUP_LE && diff < 0) {
1943 keyno++;
1944
1945
1946
1947
1948 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
1949 if (dir == XFS_LOOKUP_GE &&
1950 keyno > xfs_btree_get_numrecs(block) &&
1951 !xfs_btree_ptr_is_null(cur, &ptr)) {
1952 int i;
1953
1954 cur->bc_ptrs[0] = keyno;
1955 error = xfs_btree_increment(cur, 0, &i);
1956 if (error)
1957 goto error0;
1958 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
1959 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1960 *stat = 1;
1961 return 0;
1962 }
1963 } else if (dir == XFS_LOOKUP_LE && diff > 0)
1964 keyno--;
1965 cur->bc_ptrs[0] = keyno;
1966
1967
1968 if (keyno == 0 || keyno > xfs_btree_get_numrecs(block))
1969 *stat = 0;
1970 else if (dir != XFS_LOOKUP_EQ || diff == 0)
1971 *stat = 1;
1972 else
1973 *stat = 0;
1974 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1975 return 0;
1976
1977error0:
1978 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
1979 return error;
1980}
1981
1982
1983STATIC union xfs_btree_key *
1984xfs_btree_high_key_from_key(
1985 struct xfs_btree_cur *cur,
1986 union xfs_btree_key *key)
1987{
1988 ASSERT(cur->bc_flags & XFS_BTREE_OVERLAPPING);
1989 return (union xfs_btree_key *)((char *)key +
1990 (cur->bc_ops->key_len / 2));
1991}
1992
1993
1994STATIC void
1995xfs_btree_get_leaf_keys(
1996 struct xfs_btree_cur *cur,
1997 struct xfs_btree_block *block,
1998 union xfs_btree_key *key)
1999{
2000 union xfs_btree_key max_hkey;
2001 union xfs_btree_key hkey;
2002 union xfs_btree_rec *rec;
2003 union xfs_btree_key *high;
2004 int n;
2005
2006 rec = xfs_btree_rec_addr(cur, 1, block);
2007 cur->bc_ops->init_key_from_rec(key, rec);
2008
2009 if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
2010
2011 cur->bc_ops->init_high_key_from_rec(&max_hkey, rec);
2012 for (n = 2; n <= xfs_btree_get_numrecs(block); n++) {
2013 rec = xfs_btree_rec_addr(cur, n, block);
2014 cur->bc_ops->init_high_key_from_rec(&hkey, rec);
2015 if (cur->bc_ops->diff_two_keys(cur, &hkey, &max_hkey)
2016 > 0)
2017 max_hkey = hkey;
2018 }
2019
2020 high = xfs_btree_high_key_from_key(cur, key);
2021 memcpy(high, &max_hkey, cur->bc_ops->key_len / 2);
2022 }
2023}
2024
2025
2026STATIC void
2027xfs_btree_get_node_keys(
2028 struct xfs_btree_cur *cur,
2029 struct xfs_btree_block *block,
2030 union xfs_btree_key *key)
2031{
2032 union xfs_btree_key *hkey;
2033 union xfs_btree_key *max_hkey;
2034 union xfs_btree_key *high;
2035 int n;
2036
2037 if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
2038 memcpy(key, xfs_btree_key_addr(cur, 1, block),
2039 cur->bc_ops->key_len / 2);
2040
2041 max_hkey = xfs_btree_high_key_addr(cur, 1, block);
2042 for (n = 2; n <= xfs_btree_get_numrecs(block); n++) {
2043 hkey = xfs_btree_high_key_addr(cur, n, block);
2044 if (cur->bc_ops->diff_two_keys(cur, hkey, max_hkey) > 0)
2045 max_hkey = hkey;
2046 }
2047
2048 high = xfs_btree_high_key_from_key(cur, key);
2049 memcpy(high, max_hkey, cur->bc_ops->key_len / 2);
2050 } else {
2051 memcpy(key, xfs_btree_key_addr(cur, 1, block),
2052 cur->bc_ops->key_len);
2053 }
2054}
2055
2056
2057STATIC void
2058xfs_btree_get_keys(
2059 struct xfs_btree_cur *cur,
2060 struct xfs_btree_block *block,
2061 union xfs_btree_key *key)
2062{
2063 if (be16_to_cpu(block->bb_level) == 0)
2064 xfs_btree_get_leaf_keys(cur, block, key);
2065 else
2066 xfs_btree_get_node_keys(cur, block, key);
2067}
2068
2069
2070
2071
2072
2073
2074
2075
2076static inline bool
2077xfs_btree_needs_key_update(
2078 struct xfs_btree_cur *cur,
2079 int ptr)
2080{
2081 return (cur->bc_flags & XFS_BTREE_OVERLAPPING) || ptr == 1;
2082}
2083
2084
2085
2086
2087
2088
2089STATIC int
2090__xfs_btree_updkeys(
2091 struct xfs_btree_cur *cur,
2092 int level,
2093 struct xfs_btree_block *block,
2094 struct xfs_buf *bp0,
2095 bool force_all)
2096{
2097 union xfs_btree_key key;
2098 union xfs_btree_key *lkey;
2099 union xfs_btree_key *hkey;
2100 union xfs_btree_key *nlkey;
2101 union xfs_btree_key *nhkey;
2102 struct xfs_buf *bp;
2103 int ptr;
2104
2105 ASSERT(cur->bc_flags & XFS_BTREE_OVERLAPPING);
2106
2107
2108 if (level + 1 >= cur->bc_nlevels)
2109 return 0;
2110
2111 trace_xfs_btree_updkeys(cur, level, bp0);
2112
2113 lkey = &key;
2114 hkey = xfs_btree_high_key_from_key(cur, lkey);
2115 xfs_btree_get_keys(cur, block, lkey);
2116 for (level++; level < cur->bc_nlevels; level++) {
2117#ifdef DEBUG
2118 int error;
2119#endif
2120 block = xfs_btree_get_block(cur, level, &bp);
2121 trace_xfs_btree_updkeys(cur, level, bp);
2122#ifdef DEBUG
2123 error = xfs_btree_check_block(cur, block, level, bp);
2124 if (error) {
2125 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
2126 return error;
2127 }
2128#endif
2129 ptr = cur->bc_ptrs[level];
2130 nlkey = xfs_btree_key_addr(cur, ptr, block);
2131 nhkey = xfs_btree_high_key_addr(cur, ptr, block);
2132 if (!force_all &&
2133 !(cur->bc_ops->diff_two_keys(cur, nlkey, lkey) != 0 ||
2134 cur->bc_ops->diff_two_keys(cur, nhkey, hkey) != 0))
2135 break;
2136 xfs_btree_copy_keys(cur, nlkey, lkey, 1);
2137 xfs_btree_log_keys(cur, bp, ptr, ptr);
2138 if (level + 1 >= cur->bc_nlevels)
2139 break;
2140 xfs_btree_get_node_keys(cur, block, lkey);
2141 }
2142
2143 return 0;
2144}
2145
2146
2147STATIC int
2148xfs_btree_updkeys_force(
2149 struct xfs_btree_cur *cur,
2150 int level)
2151{
2152 struct xfs_buf *bp;
2153 struct xfs_btree_block *block;
2154
2155 block = xfs_btree_get_block(cur, level, &bp);
2156 return __xfs_btree_updkeys(cur, level, block, bp, true);
2157}
2158
2159
2160
2161
2162STATIC int
2163xfs_btree_update_keys(
2164 struct xfs_btree_cur *cur,
2165 int level)
2166{
2167 struct xfs_btree_block *block;
2168 struct xfs_buf *bp;
2169 union xfs_btree_key *kp;
2170 union xfs_btree_key key;
2171 int ptr;
2172
2173 ASSERT(level >= 0);
2174
2175 block = xfs_btree_get_block(cur, level, &bp);
2176 if (cur->bc_flags & XFS_BTREE_OVERLAPPING)
2177 return __xfs_btree_updkeys(cur, level, block, bp, false);
2178
2179 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
2180 XFS_BTREE_TRACE_ARGIK(cur, level, keyp);
2181
2182
2183
2184
2185
2186
2187
2188 xfs_btree_get_keys(cur, block, &key);
2189 for (level++, ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) {
2190#ifdef DEBUG
2191 int error;
2192#endif
2193 block = xfs_btree_get_block(cur, level, &bp);
2194#ifdef DEBUG
2195 error = xfs_btree_check_block(cur, block, level, bp);
2196 if (error) {
2197 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
2198 return error;
2199 }
2200#endif
2201 ptr = cur->bc_ptrs[level];
2202 kp = xfs_btree_key_addr(cur, ptr, block);
2203 xfs_btree_copy_keys(cur, kp, &key, 1);
2204 xfs_btree_log_keys(cur, bp, ptr, ptr);
2205 }
2206
2207 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2208 return 0;
2209}
2210
2211
2212
2213
2214
2215
2216int
2217xfs_btree_update(
2218 struct xfs_btree_cur *cur,
2219 union xfs_btree_rec *rec)
2220{
2221 struct xfs_btree_block *block;
2222 struct xfs_buf *bp;
2223 int error;
2224 int ptr;
2225 union xfs_btree_rec *rp;
2226
2227 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
2228 XFS_BTREE_TRACE_ARGR(cur, rec);
2229
2230
2231 block = xfs_btree_get_block(cur, 0, &bp);
2232
2233#ifdef DEBUG
2234 error = xfs_btree_check_block(cur, block, 0, bp);
2235 if (error)
2236 goto error0;
2237#endif
2238
2239 ptr = cur->bc_ptrs[0];
2240 rp = xfs_btree_rec_addr(cur, ptr, block);
2241
2242
2243 xfs_btree_copy_recs(cur, rp, rec, 1);
2244 xfs_btree_log_recs(cur, bp, ptr, ptr);
2245
2246
2247
2248
2249
2250 if (xfs_btree_is_lastrec(cur, block, 0)) {
2251 cur->bc_ops->update_lastrec(cur, block, rec,
2252 ptr, LASTREC_UPDATE);
2253 }
2254
2255
2256 if (xfs_btree_needs_key_update(cur, ptr)) {
2257 error = xfs_btree_update_keys(cur, 0);
2258 if (error)
2259 goto error0;
2260 }
2261
2262 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2263 return 0;
2264
2265error0:
2266 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
2267 return error;
2268}
2269
2270
2271
2272
2273
2274STATIC int
2275xfs_btree_lshift(
2276 struct xfs_btree_cur *cur,
2277 int level,
2278 int *stat)
2279{
2280 struct xfs_buf *lbp;
2281 struct xfs_btree_block *left;
2282 int lrecs;
2283 struct xfs_buf *rbp;
2284 struct xfs_btree_block *right;
2285 struct xfs_btree_cur *tcur;
2286 int rrecs;
2287 union xfs_btree_ptr lptr;
2288 union xfs_btree_key *rkp = NULL;
2289 union xfs_btree_ptr *rpp = NULL;
2290 union xfs_btree_rec *rrp = NULL;
2291 int error;
2292 int i;
2293
2294 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
2295 XFS_BTREE_TRACE_ARGI(cur, level);
2296
2297 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
2298 level == cur->bc_nlevels - 1)
2299 goto out0;
2300
2301
2302 right = xfs_btree_get_block(cur, level, &rbp);
2303
2304#ifdef DEBUG
2305 error = xfs_btree_check_block(cur, right, level, rbp);
2306 if (error)
2307 goto error0;
2308#endif
2309
2310
2311 xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
2312 if (xfs_btree_ptr_is_null(cur, &lptr))
2313 goto out0;
2314
2315
2316
2317
2318
2319 if (cur->bc_ptrs[level] <= 1)
2320 goto out0;
2321
2322
2323 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
2324 if (error)
2325 goto error0;
2326
2327
2328 lrecs = xfs_btree_get_numrecs(left);
2329 if (lrecs == cur->bc_ops->get_maxrecs(cur, level))
2330 goto out0;
2331
2332 rrecs = xfs_btree_get_numrecs(right);
2333
2334
2335
2336
2337
2338
2339 lrecs++;
2340 rrecs--;
2341
2342 XFS_BTREE_STATS_INC(cur, lshift);
2343 XFS_BTREE_STATS_ADD(cur, moves, 1);
2344
2345
2346
2347
2348
2349 if (level > 0) {
2350
2351 union xfs_btree_key *lkp;
2352 union xfs_btree_ptr *lpp;
2353
2354 lkp = xfs_btree_key_addr(cur, lrecs, left);
2355 rkp = xfs_btree_key_addr(cur, 1, right);
2356
2357 lpp = xfs_btree_ptr_addr(cur, lrecs, left);
2358 rpp = xfs_btree_ptr_addr(cur, 1, right);
2359#ifdef DEBUG
2360 error = xfs_btree_check_ptr(cur, rpp, 0, level);
2361 if (error)
2362 goto error0;
2363#endif
2364 xfs_btree_copy_keys(cur, lkp, rkp, 1);
2365 xfs_btree_copy_ptrs(cur, lpp, rpp, 1);
2366
2367 xfs_btree_log_keys(cur, lbp, lrecs, lrecs);
2368 xfs_btree_log_ptrs(cur, lbp, lrecs, lrecs);
2369
2370 ASSERT(cur->bc_ops->keys_inorder(cur,
2371 xfs_btree_key_addr(cur, lrecs - 1, left), lkp));
2372 } else {
2373
2374 union xfs_btree_rec *lrp;
2375
2376 lrp = xfs_btree_rec_addr(cur, lrecs, left);
2377 rrp = xfs_btree_rec_addr(cur, 1, right);
2378
2379 xfs_btree_copy_recs(cur, lrp, rrp, 1);
2380 xfs_btree_log_recs(cur, lbp, lrecs, lrecs);
2381
2382 ASSERT(cur->bc_ops->recs_inorder(cur,
2383 xfs_btree_rec_addr(cur, lrecs - 1, left), lrp));
2384 }
2385
2386 xfs_btree_set_numrecs(left, lrecs);
2387 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS);
2388
2389 xfs_btree_set_numrecs(right, rrecs);
2390 xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS);
2391
2392
2393
2394
2395 XFS_BTREE_STATS_ADD(cur, moves, rrecs - 1);
2396 if (level > 0) {
2397
2398#ifdef DEBUG
2399 int i;
2400
2401 for (i = 0; i < rrecs; i++) {
2402 error = xfs_btree_check_ptr(cur, rpp, i + 1, level);
2403 if (error)
2404 goto error0;
2405 }
2406#endif
2407 xfs_btree_shift_keys(cur,
2408 xfs_btree_key_addr(cur, 2, right),
2409 -1, rrecs);
2410 xfs_btree_shift_ptrs(cur,
2411 xfs_btree_ptr_addr(cur, 2, right),
2412 -1, rrecs);
2413
2414 xfs_btree_log_keys(cur, rbp, 1, rrecs);
2415 xfs_btree_log_ptrs(cur, rbp, 1, rrecs);
2416 } else {
2417
2418 xfs_btree_shift_recs(cur,
2419 xfs_btree_rec_addr(cur, 2, right),
2420 -1, rrecs);
2421 xfs_btree_log_recs(cur, rbp, 1, rrecs);
2422 }
2423
2424
2425
2426
2427
2428 if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
2429 error = xfs_btree_dup_cursor(cur, &tcur);
2430 if (error)
2431 goto error0;
2432 i = xfs_btree_firstrec(tcur, level);
2433 XFS_WANT_CORRUPTED_GOTO(tcur->bc_mp, i == 1, error0);
2434
2435 error = xfs_btree_decrement(tcur, level, &i);
2436 if (error)
2437 goto error1;
2438
2439
2440 error = xfs_btree_update_keys(tcur, level);
2441 if (error)
2442 goto error1;
2443
2444 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
2445 }
2446
2447
2448 error = xfs_btree_update_keys(cur, level);
2449 if (error)
2450 goto error0;
2451
2452
2453 cur->bc_ptrs[level]--;
2454
2455 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2456 *stat = 1;
2457 return 0;
2458
2459out0:
2460 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2461 *stat = 0;
2462 return 0;
2463
2464error0:
2465 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
2466 return error;
2467
2468error1:
2469 XFS_BTREE_TRACE_CURSOR(tcur, XBT_ERROR);
2470 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
2471 return error;
2472}
2473
2474
2475
2476
2477
2478STATIC int
2479xfs_btree_rshift(
2480 struct xfs_btree_cur *cur,
2481 int level,
2482 int *stat)
2483{
2484 struct xfs_buf *lbp;
2485 struct xfs_btree_block *left;
2486 struct xfs_buf *rbp;
2487 struct xfs_btree_block *right;
2488 struct xfs_btree_cur *tcur;
2489 union xfs_btree_ptr rptr;
2490 union xfs_btree_key *rkp;
2491 int rrecs;
2492 int lrecs;
2493 int error;
2494 int i;
2495
2496 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
2497 XFS_BTREE_TRACE_ARGI(cur, level);
2498
2499 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
2500 (level == cur->bc_nlevels - 1))
2501 goto out0;
2502
2503
2504 left = xfs_btree_get_block(cur, level, &lbp);
2505
2506#ifdef DEBUG
2507 error = xfs_btree_check_block(cur, left, level, lbp);
2508 if (error)
2509 goto error0;
2510#endif
2511
2512
2513 xfs_btree_get_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB);
2514 if (xfs_btree_ptr_is_null(cur, &rptr))
2515 goto out0;
2516
2517
2518
2519
2520
2521 lrecs = xfs_btree_get_numrecs(left);
2522 if (cur->bc_ptrs[level] >= lrecs)
2523 goto out0;
2524
2525
2526 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
2527 if (error)
2528 goto error0;
2529
2530
2531 rrecs = xfs_btree_get_numrecs(right);
2532 if (rrecs == cur->bc_ops->get_maxrecs(cur, level))
2533 goto out0;
2534
2535 XFS_BTREE_STATS_INC(cur, rshift);
2536 XFS_BTREE_STATS_ADD(cur, moves, rrecs);
2537
2538
2539
2540
2541
2542 if (level > 0) {
2543
2544 union xfs_btree_key *lkp;
2545 union xfs_btree_ptr *lpp;
2546 union xfs_btree_ptr *rpp;
2547
2548 lkp = xfs_btree_key_addr(cur, lrecs, left);
2549 lpp = xfs_btree_ptr_addr(cur, lrecs, left);
2550 rkp = xfs_btree_key_addr(cur, 1, right);
2551 rpp = xfs_btree_ptr_addr(cur, 1, right);
2552
2553#ifdef DEBUG
2554 for (i = rrecs - 1; i >= 0; i--) {
2555 error = xfs_btree_check_ptr(cur, rpp, i, level);
2556 if (error)
2557 goto error0;
2558 }
2559#endif
2560
2561 xfs_btree_shift_keys(cur, rkp, 1, rrecs);
2562 xfs_btree_shift_ptrs(cur, rpp, 1, rrecs);
2563
2564#ifdef DEBUG
2565 error = xfs_btree_check_ptr(cur, lpp, 0, level);
2566 if (error)
2567 goto error0;
2568#endif
2569
2570
2571 xfs_btree_copy_keys(cur, rkp, lkp, 1);
2572 xfs_btree_copy_ptrs(cur, rpp, lpp, 1);
2573
2574 xfs_btree_log_keys(cur, rbp, 1, rrecs + 1);
2575 xfs_btree_log_ptrs(cur, rbp, 1, rrecs + 1);
2576
2577 ASSERT(cur->bc_ops->keys_inorder(cur, rkp,
2578 xfs_btree_key_addr(cur, 2, right)));
2579 } else {
2580
2581 union xfs_btree_rec *lrp;
2582 union xfs_btree_rec *rrp;
2583
2584 lrp = xfs_btree_rec_addr(cur, lrecs, left);
2585 rrp = xfs_btree_rec_addr(cur, 1, right);
2586
2587 xfs_btree_shift_recs(cur, rrp, 1, rrecs);
2588
2589
2590 xfs_btree_copy_recs(cur, rrp, lrp, 1);
2591 xfs_btree_log_recs(cur, rbp, 1, rrecs + 1);
2592 }
2593
2594
2595
2596
2597 xfs_btree_set_numrecs(left, --lrecs);
2598 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS);
2599
2600 xfs_btree_set_numrecs(right, ++rrecs);
2601 xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS);
2602
2603
2604
2605
2606
2607 error = xfs_btree_dup_cursor(cur, &tcur);
2608 if (error)
2609 goto error0;
2610 i = xfs_btree_lastrec(tcur, level);
2611 XFS_WANT_CORRUPTED_GOTO(tcur->bc_mp, i == 1, error0);
2612
2613 error = xfs_btree_increment(tcur, level, &i);
2614 if (error)
2615 goto error1;
2616
2617
2618 if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
2619 error = xfs_btree_update_keys(cur, level);
2620 if (error)
2621 goto error1;
2622 }
2623
2624
2625 error = xfs_btree_update_keys(tcur, level);
2626 if (error)
2627 goto error1;
2628
2629 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
2630
2631 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2632 *stat = 1;
2633 return 0;
2634
2635out0:
2636 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2637 *stat = 0;
2638 return 0;
2639
2640error0:
2641 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
2642 return error;
2643
2644error1:
2645 XFS_BTREE_TRACE_CURSOR(tcur, XBT_ERROR);
2646 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
2647 return error;
2648}
2649
2650
2651
2652
2653
2654
2655STATIC int
2656__xfs_btree_split(
2657 struct xfs_btree_cur *cur,
2658 int level,
2659 union xfs_btree_ptr *ptrp,
2660 union xfs_btree_key *key,
2661 struct xfs_btree_cur **curp,
2662 int *stat)
2663{
2664 union xfs_btree_ptr lptr;
2665 struct xfs_buf *lbp;
2666 struct xfs_btree_block *left;
2667 union xfs_btree_ptr rptr;
2668 struct xfs_buf *rbp;
2669 struct xfs_btree_block *right;
2670 union xfs_btree_ptr rrptr;
2671 struct xfs_buf *rrbp;
2672 struct xfs_btree_block *rrblock;
2673 int lrecs;
2674 int rrecs;
2675 int src_index;
2676 int error;
2677#ifdef DEBUG
2678 int i;
2679#endif
2680
2681 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
2682 XFS_BTREE_TRACE_ARGIPK(cur, level, *ptrp, key);
2683
2684 XFS_BTREE_STATS_INC(cur, split);
2685
2686
2687 left = xfs_btree_get_block(cur, level, &lbp);
2688
2689#ifdef DEBUG
2690 error = xfs_btree_check_block(cur, left, level, lbp);
2691 if (error)
2692 goto error0;
2693#endif
2694
2695 xfs_btree_buf_to_ptr(cur, lbp, &lptr);
2696
2697
2698 error = cur->bc_ops->alloc_block(cur, &lptr, &rptr, stat);
2699 if (error)
2700 goto error0;
2701 if (*stat == 0)
2702 goto out0;
2703 XFS_BTREE_STATS_INC(cur, alloc);
2704
2705
2706 error = xfs_btree_get_buf_block(cur, &rptr, 0, &right, &rbp);
2707 if (error)
2708 goto error0;
2709
2710
2711 xfs_btree_init_block_cur(cur, rbp, xfs_btree_get_level(left), 0);
2712
2713
2714
2715
2716
2717
2718 lrecs = xfs_btree_get_numrecs(left);
2719 rrecs = lrecs / 2;
2720 if ((lrecs & 1) && cur->bc_ptrs[level] <= rrecs + 1)
2721 rrecs++;
2722 src_index = (lrecs - rrecs + 1);
2723
2724 XFS_BTREE_STATS_ADD(cur, moves, rrecs);
2725
2726
2727 lrecs -= rrecs;
2728 xfs_btree_set_numrecs(left, lrecs);
2729 xfs_btree_set_numrecs(right, xfs_btree_get_numrecs(right) + rrecs);
2730
2731
2732
2733
2734
2735
2736 if (level > 0) {
2737
2738 union xfs_btree_key *lkp;
2739 union xfs_btree_ptr *lpp;
2740 union xfs_btree_key *rkp;
2741 union xfs_btree_ptr *rpp;
2742
2743 lkp = xfs_btree_key_addr(cur, src_index, left);
2744 lpp = xfs_btree_ptr_addr(cur, src_index, left);
2745 rkp = xfs_btree_key_addr(cur, 1, right);
2746 rpp = xfs_btree_ptr_addr(cur, 1, right);
2747
2748#ifdef DEBUG
2749 for (i = src_index; i < rrecs; i++) {
2750 error = xfs_btree_check_ptr(cur, lpp, i, level);
2751 if (error)
2752 goto error0;
2753 }
2754#endif
2755
2756
2757 xfs_btree_copy_keys(cur, rkp, lkp, rrecs);
2758 xfs_btree_copy_ptrs(cur, rpp, lpp, rrecs);
2759
2760 xfs_btree_log_keys(cur, rbp, 1, rrecs);
2761 xfs_btree_log_ptrs(cur, rbp, 1, rrecs);
2762
2763
2764 xfs_btree_get_node_keys(cur, right, key);
2765 } else {
2766
2767 union xfs_btree_rec *lrp;
2768 union xfs_btree_rec *rrp;
2769
2770 lrp = xfs_btree_rec_addr(cur, src_index, left);
2771 rrp = xfs_btree_rec_addr(cur, 1, right);
2772
2773
2774 xfs_btree_copy_recs(cur, rrp, lrp, rrecs);
2775 xfs_btree_log_recs(cur, rbp, 1, rrecs);
2776
2777
2778 xfs_btree_get_leaf_keys(cur, right, key);
2779 }
2780
2781
2782
2783
2784
2785 xfs_btree_get_sibling(cur, left, &rrptr, XFS_BB_RIGHTSIB);
2786 xfs_btree_set_sibling(cur, right, &rrptr, XFS_BB_RIGHTSIB);
2787 xfs_btree_set_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
2788 xfs_btree_set_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB);
2789
2790 xfs_btree_log_block(cur, rbp, XFS_BB_ALL_BITS);
2791 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
2792
2793
2794
2795
2796
2797 if (!xfs_btree_ptr_is_null(cur, &rrptr)) {
2798 error = xfs_btree_read_buf_block(cur, &rrptr,
2799 0, &rrblock, &rrbp);
2800 if (error)
2801 goto error0;
2802 xfs_btree_set_sibling(cur, rrblock, &rptr, XFS_BB_LEFTSIB);
2803 xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB);
2804 }
2805
2806
2807 if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
2808 error = xfs_btree_update_keys(cur, level);
2809 if (error)
2810 goto error0;
2811 }
2812
2813
2814
2815
2816
2817
2818 if (cur->bc_ptrs[level] > lrecs + 1) {
2819 xfs_btree_setbuf(cur, level, rbp);
2820 cur->bc_ptrs[level] -= lrecs;
2821 }
2822
2823
2824
2825
2826 if (level + 1 < cur->bc_nlevels) {
2827 error = xfs_btree_dup_cursor(cur, curp);
2828 if (error)
2829 goto error0;
2830 (*curp)->bc_ptrs[level + 1]++;
2831 }
2832 *ptrp = rptr;
2833 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2834 *stat = 1;
2835 return 0;
2836out0:
2837 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2838 *stat = 0;
2839 return 0;
2840
2841error0:
2842 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
2843 return error;
2844}
2845
2846struct xfs_btree_split_args {
2847 struct xfs_btree_cur *cur;
2848 int level;
2849 union xfs_btree_ptr *ptrp;
2850 union xfs_btree_key *key;
2851 struct xfs_btree_cur **curp;
2852 int *stat;
2853 int result;
2854 bool kswapd;
2855 struct completion *done;
2856 struct work_struct work;
2857};
2858
2859
2860
2861
2862static void
2863xfs_btree_split_worker(
2864 struct work_struct *work)
2865{
2866 struct xfs_btree_split_args *args = container_of(work,
2867 struct xfs_btree_split_args, work);
2868 unsigned long pflags;
2869 unsigned long new_pflags = PF_FSTRANS;
2870
2871
2872
2873
2874
2875
2876
2877 if (args->kswapd)
2878 new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
2879
2880 current_set_flags_nested(&pflags, new_pflags);
2881
2882 args->result = __xfs_btree_split(args->cur, args->level, args->ptrp,
2883 args->key, args->curp, args->stat);
2884 complete(args->done);
2885
2886 current_restore_flags_nested(&pflags, new_pflags);
2887}
2888
2889
2890
2891
2892
2893
2894STATIC int
2895xfs_btree_split(
2896 struct xfs_btree_cur *cur,
2897 int level,
2898 union xfs_btree_ptr *ptrp,
2899 union xfs_btree_key *key,
2900 struct xfs_btree_cur **curp,
2901 int *stat)
2902{
2903 struct xfs_btree_split_args args;
2904 DECLARE_COMPLETION_ONSTACK(done);
2905
2906 if (cur->bc_btnum != XFS_BTNUM_BMAP)
2907 return __xfs_btree_split(cur, level, ptrp, key, curp, stat);
2908
2909 args.cur = cur;
2910 args.level = level;
2911 args.ptrp = ptrp;
2912 args.key = key;
2913 args.curp = curp;
2914 args.stat = stat;
2915 args.done = &done;
2916 args.kswapd = current_is_kswapd();
2917 INIT_WORK_ONSTACK(&args.work, xfs_btree_split_worker);
2918 queue_work(xfs_alloc_wq, &args.work);
2919 wait_for_completion(&done);
2920 destroy_work_on_stack(&args.work);
2921 return args.result;
2922}
2923
2924
2925
2926
2927
2928
2929int
2930xfs_btree_new_iroot(
2931 struct xfs_btree_cur *cur,
2932 int *logflags,
2933 int *stat)
2934{
2935 struct xfs_buf *cbp;
2936 struct xfs_btree_block *block;
2937 struct xfs_btree_block *cblock;
2938 union xfs_btree_key *ckp;
2939 union xfs_btree_ptr *cpp;
2940 union xfs_btree_key *kp;
2941 union xfs_btree_ptr *pp;
2942 union xfs_btree_ptr nptr;
2943 int level;
2944 int error;
2945#ifdef DEBUG
2946 int i;
2947#endif
2948
2949 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
2950 XFS_BTREE_STATS_INC(cur, newroot);
2951
2952 ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
2953
2954 level = cur->bc_nlevels - 1;
2955
2956 block = xfs_btree_get_iroot(cur);
2957 pp = xfs_btree_ptr_addr(cur, 1, block);
2958
2959
2960 error = cur->bc_ops->alloc_block(cur, pp, &nptr, stat);
2961 if (error)
2962 goto error0;
2963 if (*stat == 0) {
2964 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2965 return 0;
2966 }
2967 XFS_BTREE_STATS_INC(cur, alloc);
2968
2969
2970 error = xfs_btree_get_buf_block(cur, &nptr, 0, &cblock, &cbp);
2971 if (error)
2972 goto error0;
2973
2974
2975
2976
2977
2978 memcpy(cblock, block, xfs_btree_block_len(cur));
2979 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) {
2980 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
2981 cblock->bb_u.l.bb_blkno = cpu_to_be64(cbp->b_bn);
2982 else
2983 cblock->bb_u.s.bb_blkno = cpu_to_be64(cbp->b_bn);
2984 }
2985
2986 be16_add_cpu(&block->bb_level, 1);
2987 xfs_btree_set_numrecs(block, 1);
2988 cur->bc_nlevels++;
2989 cur->bc_ptrs[level + 1] = 1;
2990
2991 kp = xfs_btree_key_addr(cur, 1, block);
2992 ckp = xfs_btree_key_addr(cur, 1, cblock);
2993 xfs_btree_copy_keys(cur, ckp, kp, xfs_btree_get_numrecs(cblock));
2994
2995 cpp = xfs_btree_ptr_addr(cur, 1, cblock);
2996#ifdef DEBUG
2997 for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) {
2998 error = xfs_btree_check_ptr(cur, pp, i, level);
2999 if (error)
3000 goto error0;
3001 }
3002#endif
3003 xfs_btree_copy_ptrs(cur, cpp, pp, xfs_btree_get_numrecs(cblock));
3004
3005#ifdef DEBUG
3006 error = xfs_btree_check_ptr(cur, &nptr, 0, level);
3007 if (error)
3008 goto error0;
3009#endif
3010 xfs_btree_copy_ptrs(cur, pp, &nptr, 1);
3011
3012 xfs_iroot_realloc(cur->bc_private.b.ip,
3013 1 - xfs_btree_get_numrecs(cblock),
3014 cur->bc_private.b.whichfork);
3015
3016 xfs_btree_setbuf(cur, level, cbp);
3017
3018
3019
3020
3021
3022 xfs_btree_log_block(cur, cbp, XFS_BB_ALL_BITS);
3023 xfs_btree_log_keys(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs));
3024 xfs_btree_log_ptrs(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs));
3025
3026 *logflags |=
3027 XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork);
3028 *stat = 1;
3029 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3030 return 0;
3031error0:
3032 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
3033 return error;
3034}
3035
3036
3037
3038
3039STATIC int
3040xfs_btree_new_root(
3041 struct xfs_btree_cur *cur,
3042 int *stat)
3043{
3044 struct xfs_btree_block *block;
3045 struct xfs_buf *bp;
3046 int error;
3047 struct xfs_buf *lbp;
3048 struct xfs_btree_block *left;
3049 struct xfs_buf *nbp;
3050 struct xfs_btree_block *new;
3051 int nptr;
3052 struct xfs_buf *rbp;
3053 struct xfs_btree_block *right;
3054 union xfs_btree_ptr rptr;
3055 union xfs_btree_ptr lptr;
3056
3057 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
3058 XFS_BTREE_STATS_INC(cur, newroot);
3059
3060
3061 cur->bc_ops->init_ptr_from_cur(cur, &rptr);
3062
3063
3064 error = cur->bc_ops->alloc_block(cur, &rptr, &lptr, stat);
3065 if (error)
3066 goto error0;
3067 if (*stat == 0)
3068 goto out0;
3069 XFS_BTREE_STATS_INC(cur, alloc);
3070
3071
3072 error = xfs_btree_get_buf_block(cur, &lptr, 0, &new, &nbp);
3073 if (error)
3074 goto error0;
3075
3076
3077 cur->bc_ops->set_root(cur, &lptr, 1);
3078
3079
3080
3081
3082
3083
3084
3085 block = xfs_btree_get_block(cur, cur->bc_nlevels - 1, &bp);
3086
3087#ifdef DEBUG
3088 error = xfs_btree_check_block(cur, block, cur->bc_nlevels - 1, bp);
3089 if (error)
3090 goto error0;
3091#endif
3092
3093 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
3094 if (!xfs_btree_ptr_is_null(cur, &rptr)) {
3095
3096 lbp = bp;
3097 xfs_btree_buf_to_ptr(cur, lbp, &lptr);
3098 left = block;
3099 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
3100 if (error)
3101 goto error0;
3102 bp = rbp;
3103 nptr = 1;
3104 } else {
3105
3106 rbp = bp;
3107 xfs_btree_buf_to_ptr(cur, rbp, &rptr);
3108 right = block;
3109 xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
3110 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
3111 if (error)
3112 goto error0;
3113 bp = lbp;
3114 nptr = 2;
3115 }
3116
3117
3118 xfs_btree_init_block_cur(cur, nbp, cur->bc_nlevels, 2);
3119 xfs_btree_log_block(cur, nbp, XFS_BB_ALL_BITS);
3120 ASSERT(!xfs_btree_ptr_is_null(cur, &lptr) &&
3121 !xfs_btree_ptr_is_null(cur, &rptr));
3122
3123
3124 if (xfs_btree_get_level(left) > 0) {
3125
3126
3127
3128
3129 xfs_btree_get_node_keys(cur, left,
3130 xfs_btree_key_addr(cur, 1, new));
3131 xfs_btree_get_node_keys(cur, right,
3132 xfs_btree_key_addr(cur, 2, new));
3133 } else {
3134
3135
3136
3137
3138
3139 xfs_btree_get_leaf_keys(cur, left,
3140 xfs_btree_key_addr(cur, 1, new));
3141 xfs_btree_get_leaf_keys(cur, right,
3142 xfs_btree_key_addr(cur, 2, new));
3143 }
3144 xfs_btree_log_keys(cur, nbp, 1, 2);
3145
3146
3147 xfs_btree_copy_ptrs(cur,
3148 xfs_btree_ptr_addr(cur, 1, new), &lptr, 1);
3149 xfs_btree_copy_ptrs(cur,
3150 xfs_btree_ptr_addr(cur, 2, new), &rptr, 1);
3151 xfs_btree_log_ptrs(cur, nbp, 1, 2);
3152
3153
3154 xfs_btree_setbuf(cur, cur->bc_nlevels, nbp);
3155 cur->bc_ptrs[cur->bc_nlevels] = nptr;
3156 cur->bc_nlevels++;
3157 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3158 *stat = 1;
3159 return 0;
3160error0:
3161 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
3162 return error;
3163out0:
3164 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3165 *stat = 0;
3166 return 0;
3167}
3168
3169STATIC int
3170xfs_btree_make_block_unfull(
3171 struct xfs_btree_cur *cur,
3172 int level,
3173 int numrecs,
3174 int *oindex,
3175 int *index,
3176 union xfs_btree_ptr *nptr,
3177 struct xfs_btree_cur **ncur,
3178 union xfs_btree_key *key,
3179 int *stat)
3180{
3181 int error = 0;
3182
3183 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
3184 level == cur->bc_nlevels - 1) {
3185 struct xfs_inode *ip = cur->bc_private.b.ip;
3186
3187 if (numrecs < cur->bc_ops->get_dmaxrecs(cur, level)) {
3188
3189 xfs_iroot_realloc(ip, 1, cur->bc_private.b.whichfork);
3190 *stat = 1;
3191 } else {
3192
3193 int logflags = 0;
3194
3195 error = xfs_btree_new_iroot(cur, &logflags, stat);
3196 if (error || *stat == 0)
3197 return error;
3198
3199 xfs_trans_log_inode(cur->bc_tp, ip, logflags);
3200 }
3201
3202 return 0;
3203 }
3204
3205
3206 error = xfs_btree_rshift(cur, level, stat);
3207 if (error || *stat)
3208 return error;
3209
3210
3211 error = xfs_btree_lshift(cur, level, stat);
3212 if (error)
3213 return error;
3214
3215 if (*stat) {
3216 *oindex = *index = cur->bc_ptrs[level];
3217 return 0;
3218 }
3219
3220
3221
3222
3223
3224
3225
3226 error = xfs_btree_split(cur, level, nptr, key, ncur, stat);
3227 if (error || *stat == 0)
3228 return error;
3229
3230
3231 *index = cur->bc_ptrs[level];
3232 return 0;
3233}
3234
3235
3236
3237
3238
3239STATIC int
3240xfs_btree_insrec(
3241 struct xfs_btree_cur *cur,
3242 int level,
3243 union xfs_btree_ptr *ptrp,
3244 union xfs_btree_rec *rec,
3245 union xfs_btree_key *key,
3246 struct xfs_btree_cur **curp,
3247 int *stat)
3248{
3249 struct xfs_btree_block *block;
3250 struct xfs_buf *bp;
3251 union xfs_btree_ptr nptr;
3252 struct xfs_btree_cur *ncur;
3253 union xfs_btree_key nkey;
3254 union xfs_btree_key *lkey;
3255 int optr;
3256 int ptr;
3257 int numrecs;
3258 int error;
3259#ifdef DEBUG
3260 int i;
3261#endif
3262 xfs_daddr_t old_bn;
3263
3264 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
3265 XFS_BTREE_TRACE_ARGIPR(cur, level, *ptrp, &rec);
3266
3267 ncur = NULL;
3268 lkey = &nkey;
3269
3270
3271
3272
3273
3274 if (!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
3275 (level >= cur->bc_nlevels)) {
3276 error = xfs_btree_new_root(cur, stat);
3277 xfs_btree_set_ptr_null(cur, ptrp);
3278
3279 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3280 return error;
3281 }
3282
3283
3284 ptr = cur->bc_ptrs[level];
3285 if (ptr == 0) {
3286 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3287 *stat = 0;
3288 return 0;
3289 }
3290
3291 optr = ptr;
3292
3293 XFS_BTREE_STATS_INC(cur, insrec);
3294
3295
3296 block = xfs_btree_get_block(cur, level, &bp);
3297 old_bn = bp ? bp->b_bn : XFS_BUF_DADDR_NULL;
3298 numrecs = xfs_btree_get_numrecs(block);
3299
3300#ifdef DEBUG
3301 error = xfs_btree_check_block(cur, block, level, bp);
3302 if (error)
3303 goto error0;
3304
3305
3306 if (ptr <= numrecs) {
3307 if (level == 0) {
3308 ASSERT(cur->bc_ops->recs_inorder(cur, rec,
3309 xfs_btree_rec_addr(cur, ptr, block)));
3310 } else {
3311 ASSERT(cur->bc_ops->keys_inorder(cur, key,
3312 xfs_btree_key_addr(cur, ptr, block)));
3313 }
3314 }
3315#endif
3316
3317
3318
3319
3320
3321 xfs_btree_set_ptr_null(cur, &nptr);
3322 if (numrecs == cur->bc_ops->get_maxrecs(cur, level)) {
3323 error = xfs_btree_make_block_unfull(cur, level, numrecs,
3324 &optr, &ptr, &nptr, &ncur, lkey, stat);
3325 if (error || *stat == 0)
3326 goto error0;
3327 }
3328
3329
3330
3331
3332
3333 block = xfs_btree_get_block(cur, level, &bp);
3334 numrecs = xfs_btree_get_numrecs(block);
3335
3336#ifdef DEBUG
3337 error = xfs_btree_check_block(cur, block, level, bp);
3338 if (error)
3339 return error;
3340#endif
3341
3342
3343
3344
3345
3346 XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr + 1);
3347
3348 if (level > 0) {
3349
3350 union xfs_btree_key *kp;
3351 union xfs_btree_ptr *pp;
3352
3353 kp = xfs_btree_key_addr(cur, ptr, block);
3354 pp = xfs_btree_ptr_addr(cur, ptr, block);
3355
3356#ifdef DEBUG
3357 for (i = numrecs - ptr; i >= 0; i--) {
3358 error = xfs_btree_check_ptr(cur, pp, i, level);
3359 if (error)
3360 return error;
3361 }
3362#endif
3363
3364 xfs_btree_shift_keys(cur, kp, 1, numrecs - ptr + 1);
3365 xfs_btree_shift_ptrs(cur, pp, 1, numrecs - ptr + 1);
3366
3367#ifdef DEBUG
3368 error = xfs_btree_check_ptr(cur, ptrp, 0, level);
3369 if (error)
3370 goto error0;
3371#endif
3372
3373
3374 xfs_btree_copy_keys(cur, kp, key, 1);
3375 xfs_btree_copy_ptrs(cur, pp, ptrp, 1);
3376 numrecs++;
3377 xfs_btree_set_numrecs(block, numrecs);
3378 xfs_btree_log_ptrs(cur, bp, ptr, numrecs);
3379 xfs_btree_log_keys(cur, bp, ptr, numrecs);
3380#ifdef DEBUG
3381 if (ptr < numrecs) {
3382 ASSERT(cur->bc_ops->keys_inorder(cur, kp,
3383 xfs_btree_key_addr(cur, ptr + 1, block)));
3384 }
3385#endif
3386 } else {
3387
3388 union xfs_btree_rec *rp;
3389
3390 rp = xfs_btree_rec_addr(cur, ptr, block);
3391
3392 xfs_btree_shift_recs(cur, rp, 1, numrecs - ptr + 1);
3393
3394
3395 xfs_btree_copy_recs(cur, rp, rec, 1);
3396 xfs_btree_set_numrecs(block, ++numrecs);
3397 xfs_btree_log_recs(cur, bp, ptr, numrecs);
3398#ifdef DEBUG
3399 if (ptr < numrecs) {
3400 ASSERT(cur->bc_ops->recs_inorder(cur, rp,
3401 xfs_btree_rec_addr(cur, ptr + 1, block)));
3402 }
3403#endif
3404 }
3405
3406
3407 xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417 if (bp && bp->b_bn != old_bn) {
3418 xfs_btree_get_keys(cur, block, lkey);
3419 } else if (xfs_btree_needs_key_update(cur, optr)) {
3420 error = xfs_btree_update_keys(cur, level);
3421 if (error)
3422 goto error0;
3423 }
3424
3425
3426
3427
3428
3429 if (xfs_btree_is_lastrec(cur, block, level)) {
3430 cur->bc_ops->update_lastrec(cur, block, rec,
3431 ptr, LASTREC_INSREC);
3432 }
3433
3434
3435
3436
3437
3438 *ptrp = nptr;
3439 if (!xfs_btree_ptr_is_null(cur, &nptr)) {
3440 xfs_btree_copy_keys(cur, key, lkey, 1);
3441 *curp = ncur;
3442 }
3443
3444 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3445 *stat = 1;
3446 return 0;
3447
3448error0:
3449 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
3450 return error;
3451}
3452
3453
3454
3455
3456
3457
3458
3459
3460int
3461xfs_btree_insert(
3462 struct xfs_btree_cur *cur,
3463 int *stat)
3464{
3465 int error;
3466 int i;
3467 int level;
3468 union xfs_btree_ptr nptr;
3469 struct xfs_btree_cur *ncur;
3470 struct xfs_btree_cur *pcur;
3471 union xfs_btree_key bkey;
3472 union xfs_btree_key *key;
3473 union xfs_btree_rec rec;
3474
3475 level = 0;
3476 ncur = NULL;
3477 pcur = cur;
3478 key = &bkey;
3479
3480 xfs_btree_set_ptr_null(cur, &nptr);
3481
3482
3483 cur->bc_ops->init_rec_from_cur(cur, &rec);
3484 cur->bc_ops->init_key_from_rec(key, &rec);
3485
3486
3487
3488
3489
3490
3491 do {
3492
3493
3494
3495
3496 error = xfs_btree_insrec(pcur, level, &nptr, &rec, key,
3497 &ncur, &i);
3498 if (error) {
3499 if (pcur != cur)
3500 xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR);
3501 goto error0;
3502 }
3503
3504 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
3505 level++;
3506
3507
3508
3509
3510
3511
3512 if (pcur != cur &&
3513 (ncur || xfs_btree_ptr_is_null(cur, &nptr))) {
3514
3515 if (cur->bc_ops->update_cursor)
3516 cur->bc_ops->update_cursor(pcur, cur);
3517 cur->bc_nlevels = pcur->bc_nlevels;
3518 xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR);
3519 }
3520
3521 if (ncur) {
3522 pcur = ncur;
3523 ncur = NULL;
3524 }
3525 } while (!xfs_btree_ptr_is_null(cur, &nptr));
3526
3527 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3528 *stat = i;
3529 return 0;
3530error0:
3531 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
3532 return error;
3533}
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543STATIC int
3544xfs_btree_kill_iroot(
3545 struct xfs_btree_cur *cur)
3546{
3547 int whichfork = cur->bc_private.b.whichfork;
3548 struct xfs_inode *ip = cur->bc_private.b.ip;
3549 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
3550 struct xfs_btree_block *block;
3551 struct xfs_btree_block *cblock;
3552 union xfs_btree_key *kp;
3553 union xfs_btree_key *ckp;
3554 union xfs_btree_ptr *pp;
3555 union xfs_btree_ptr *cpp;
3556 struct xfs_buf *cbp;
3557 int level;
3558 int index;
3559 int numrecs;
3560 int error;
3561#ifdef DEBUG
3562 union xfs_btree_ptr ptr;
3563 int i;
3564#endif
3565
3566 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
3567
3568 ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
3569 ASSERT(cur->bc_nlevels > 1);
3570
3571
3572
3573
3574
3575 level = cur->bc_nlevels - 1;
3576 if (level == 1)
3577 goto out0;
3578
3579
3580
3581
3582 block = xfs_btree_get_iroot(cur);
3583 if (xfs_btree_get_numrecs(block) != 1)
3584 goto out0;
3585
3586 cblock = xfs_btree_get_block(cur, level - 1, &cbp);
3587 numrecs = xfs_btree_get_numrecs(cblock);
3588
3589
3590
3591
3592
3593
3594 if (numrecs > cur->bc_ops->get_dmaxrecs(cur, level))
3595 goto out0;
3596
3597 XFS_BTREE_STATS_INC(cur, killroot);
3598
3599#ifdef DEBUG
3600 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB);
3601 ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
3602 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
3603 ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
3604#endif
3605
3606 index = numrecs - cur->bc_ops->get_maxrecs(cur, level);
3607 if (index) {
3608 xfs_iroot_realloc(cur->bc_private.b.ip, index,
3609 cur->bc_private.b.whichfork);
3610 block = ifp->if_broot;
3611 }
3612
3613 be16_add_cpu(&block->bb_numrecs, index);
3614 ASSERT(block->bb_numrecs == cblock->bb_numrecs);
3615
3616 kp = xfs_btree_key_addr(cur, 1, block);
3617 ckp = xfs_btree_key_addr(cur, 1, cblock);
3618 xfs_btree_copy_keys(cur, kp, ckp, numrecs);
3619
3620 pp = xfs_btree_ptr_addr(cur, 1, block);
3621 cpp = xfs_btree_ptr_addr(cur, 1, cblock);
3622#ifdef DEBUG
3623 for (i = 0; i < numrecs; i++) {
3624 error = xfs_btree_check_ptr(cur, cpp, i, level - 1);
3625 if (error) {
3626 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
3627 return error;
3628 }
3629 }
3630#endif
3631 xfs_btree_copy_ptrs(cur, pp, cpp, numrecs);
3632
3633 error = xfs_btree_free_block(cur, cbp);
3634 if (error) {
3635 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
3636 return error;
3637 }
3638
3639 cur->bc_bufs[level - 1] = NULL;
3640 be16_add_cpu(&block->bb_level, -1);
3641 xfs_trans_log_inode(cur->bc_tp, ip,
3642 XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork));
3643 cur->bc_nlevels--;
3644out0:
3645 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3646 return 0;
3647}
3648
3649
3650
3651
3652STATIC int
3653xfs_btree_kill_root(
3654 struct xfs_btree_cur *cur,
3655 struct xfs_buf *bp,
3656 int level,
3657 union xfs_btree_ptr *newroot)
3658{
3659 int error;
3660
3661 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
3662 XFS_BTREE_STATS_INC(cur, killroot);
3663
3664
3665
3666
3667
3668 cur->bc_ops->set_root(cur, newroot, -1);
3669
3670 error = xfs_btree_free_block(cur, bp);
3671 if (error) {
3672 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
3673 return error;
3674 }
3675
3676 cur->bc_bufs[level] = NULL;
3677 cur->bc_ra[level] = 0;
3678 cur->bc_nlevels--;
3679
3680 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3681 return 0;
3682}
3683
3684STATIC int
3685xfs_btree_dec_cursor(
3686 struct xfs_btree_cur *cur,
3687 int level,
3688 int *stat)
3689{
3690 int error;
3691 int i;
3692
3693 if (level > 0) {
3694 error = xfs_btree_decrement(cur, level, &i);
3695 if (error)
3696 return error;
3697 }
3698
3699 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3700 *stat = 1;
3701 return 0;
3702}
3703
3704
3705
3706
3707
3708
3709
3710STATIC int
3711xfs_btree_delrec(
3712 struct xfs_btree_cur *cur,
3713 int level,
3714 int *stat)
3715{
3716 struct xfs_btree_block *block;
3717 union xfs_btree_ptr cptr;
3718 struct xfs_buf *bp;
3719 int error;
3720 int i;
3721 union xfs_btree_ptr lptr;
3722 struct xfs_buf *lbp;
3723 struct xfs_btree_block *left;
3724 int lrecs = 0;
3725 int ptr;
3726 union xfs_btree_ptr rptr;
3727 struct xfs_buf *rbp;
3728 struct xfs_btree_block *right;
3729 struct xfs_btree_block *rrblock;
3730 struct xfs_buf *rrbp;
3731 int rrecs = 0;
3732 struct xfs_btree_cur *tcur;
3733 int numrecs;
3734
3735 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
3736 XFS_BTREE_TRACE_ARGI(cur, level);
3737
3738 tcur = NULL;
3739
3740
3741 ptr = cur->bc_ptrs[level];
3742 if (ptr == 0) {
3743 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3744 *stat = 0;
3745 return 0;
3746 }
3747
3748
3749 block = xfs_btree_get_block(cur, level, &bp);
3750 numrecs = xfs_btree_get_numrecs(block);
3751
3752#ifdef DEBUG
3753 error = xfs_btree_check_block(cur, block, level, bp);
3754 if (error)
3755 goto error0;
3756#endif
3757
3758
3759 if (ptr > numrecs) {
3760 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3761 *stat = 0;
3762 return 0;
3763 }
3764
3765 XFS_BTREE_STATS_INC(cur, delrec);
3766 XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr);
3767
3768
3769 if (level > 0) {
3770
3771 union xfs_btree_key *lkp;
3772 union xfs_btree_ptr *lpp;
3773
3774 lkp = xfs_btree_key_addr(cur, ptr + 1, block);
3775 lpp = xfs_btree_ptr_addr(cur, ptr + 1, block);
3776
3777#ifdef DEBUG
3778 for (i = 0; i < numrecs - ptr; i++) {
3779 error = xfs_btree_check_ptr(cur, lpp, i, level);
3780 if (error)
3781 goto error0;
3782 }
3783#endif
3784
3785 if (ptr < numrecs) {
3786 xfs_btree_shift_keys(cur, lkp, -1, numrecs - ptr);
3787 xfs_btree_shift_ptrs(cur, lpp, -1, numrecs - ptr);
3788 xfs_btree_log_keys(cur, bp, ptr, numrecs - 1);
3789 xfs_btree_log_ptrs(cur, bp, ptr, numrecs - 1);
3790 }
3791 } else {
3792
3793 if (ptr < numrecs) {
3794 xfs_btree_shift_recs(cur,
3795 xfs_btree_rec_addr(cur, ptr + 1, block),
3796 -1, numrecs - ptr);
3797 xfs_btree_log_recs(cur, bp, ptr, numrecs - 1);
3798 }
3799 }
3800
3801
3802
3803
3804 xfs_btree_set_numrecs(block, --numrecs);
3805 xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
3806
3807
3808
3809
3810
3811 if (xfs_btree_is_lastrec(cur, block, level)) {
3812 cur->bc_ops->update_lastrec(cur, block, NULL,
3813 ptr, LASTREC_DELREC);
3814 }
3815
3816
3817
3818
3819
3820
3821 if (level == cur->bc_nlevels - 1) {
3822 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
3823 xfs_iroot_realloc(cur->bc_private.b.ip, -1,
3824 cur->bc_private.b.whichfork);
3825
3826 error = xfs_btree_kill_iroot(cur);
3827 if (error)
3828 goto error0;
3829
3830 error = xfs_btree_dec_cursor(cur, level, stat);
3831 if (error)
3832 goto error0;
3833 *stat = 1;
3834 return 0;
3835 }
3836
3837
3838
3839
3840
3841
3842 if (numrecs == 1 && level > 0) {
3843 union xfs_btree_ptr *pp;
3844
3845
3846
3847
3848 pp = xfs_btree_ptr_addr(cur, 1, block);
3849 error = xfs_btree_kill_root(cur, bp, level, pp);
3850 if (error)
3851 goto error0;
3852 } else if (level > 0) {
3853 error = xfs_btree_dec_cursor(cur, level, stat);
3854 if (error)
3855 goto error0;
3856 }
3857 *stat = 1;
3858 return 0;
3859 }
3860
3861
3862
3863
3864
3865 if (xfs_btree_needs_key_update(cur, ptr)) {
3866 error = xfs_btree_update_keys(cur, level);
3867 if (error)
3868 goto error0;
3869 }
3870
3871
3872
3873
3874
3875 if (numrecs >= cur->bc_ops->get_minrecs(cur, level)) {
3876 error = xfs_btree_dec_cursor(cur, level, stat);
3877 if (error)
3878 goto error0;
3879 return 0;
3880 }
3881
3882
3883
3884
3885
3886
3887 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
3888 xfs_btree_get_sibling(cur, block, &lptr, XFS_BB_LEFTSIB);
3889
3890 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
3891
3892
3893
3894
3895
3896 if (xfs_btree_ptr_is_null(cur, &rptr) &&
3897 xfs_btree_ptr_is_null(cur, &lptr) &&
3898 level == cur->bc_nlevels - 2) {
3899 error = xfs_btree_kill_iroot(cur);
3900 if (!error)
3901 error = xfs_btree_dec_cursor(cur, level, stat);
3902 if (error)
3903 goto error0;
3904 return 0;
3905 }
3906 }
3907
3908 ASSERT(!xfs_btree_ptr_is_null(cur, &rptr) ||
3909 !xfs_btree_ptr_is_null(cur, &lptr));
3910
3911
3912
3913
3914
3915 error = xfs_btree_dup_cursor(cur, &tcur);
3916 if (error)
3917 goto error0;
3918
3919
3920
3921
3922
3923 if (!xfs_btree_ptr_is_null(cur, &rptr)) {
3924
3925
3926
3927
3928 i = xfs_btree_lastrec(tcur, level);
3929 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
3930
3931 error = xfs_btree_increment(tcur, level, &i);
3932 if (error)
3933 goto error0;
3934 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
3935
3936 i = xfs_btree_lastrec(tcur, level);
3937 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
3938
3939
3940 right = xfs_btree_get_block(tcur, level, &rbp);
3941#ifdef DEBUG
3942 error = xfs_btree_check_block(tcur, right, level, rbp);
3943 if (error)
3944 goto error0;
3945#endif
3946
3947 xfs_btree_get_sibling(tcur, right, &cptr, XFS_BB_LEFTSIB);
3948
3949
3950
3951
3952
3953
3954 if (xfs_btree_get_numrecs(right) - 1 >=
3955 cur->bc_ops->get_minrecs(tcur, level)) {
3956 error = xfs_btree_lshift(tcur, level, &i);
3957 if (error)
3958 goto error0;
3959 if (i) {
3960 ASSERT(xfs_btree_get_numrecs(block) >=
3961 cur->bc_ops->get_minrecs(tcur, level));
3962
3963 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
3964 tcur = NULL;
3965
3966 error = xfs_btree_dec_cursor(cur, level, stat);
3967 if (error)
3968 goto error0;
3969 return 0;
3970 }
3971 }
3972
3973
3974
3975
3976
3977
3978 rrecs = xfs_btree_get_numrecs(right);
3979 if (!xfs_btree_ptr_is_null(cur, &lptr)) {
3980 i = xfs_btree_firstrec(tcur, level);
3981 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
3982
3983 error = xfs_btree_decrement(tcur, level, &i);
3984 if (error)
3985 goto error0;
3986 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
3987 }
3988 }
3989
3990
3991
3992
3993
3994 if (!xfs_btree_ptr_is_null(cur, &lptr)) {
3995
3996
3997
3998
3999 i = xfs_btree_firstrec(tcur, level);
4000 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
4001
4002 error = xfs_btree_decrement(tcur, level, &i);
4003 if (error)
4004 goto error0;
4005 i = xfs_btree_firstrec(tcur, level);
4006 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
4007
4008
4009 left = xfs_btree_get_block(tcur, level, &lbp);
4010#ifdef DEBUG
4011 error = xfs_btree_check_block(cur, left, level, lbp);
4012 if (error)
4013 goto error0;
4014#endif
4015
4016 xfs_btree_get_sibling(tcur, left, &cptr, XFS_BB_RIGHTSIB);
4017
4018
4019
4020
4021
4022
4023 if (xfs_btree_get_numrecs(left) - 1 >=
4024 cur->bc_ops->get_minrecs(tcur, level)) {
4025 error = xfs_btree_rshift(tcur, level, &i);
4026 if (error)
4027 goto error0;
4028 if (i) {
4029 ASSERT(xfs_btree_get_numrecs(block) >=
4030 cur->bc_ops->get_minrecs(tcur, level));
4031 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
4032 tcur = NULL;
4033 if (level == 0)
4034 cur->bc_ptrs[0]++;
4035 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
4036 *stat = 1;
4037 return 0;
4038 }
4039 }
4040
4041
4042
4043
4044
4045 lrecs = xfs_btree_get_numrecs(left);
4046 }
4047
4048
4049 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
4050 tcur = NULL;
4051
4052
4053 ASSERT(!xfs_btree_ptr_is_null(cur, &cptr));
4054
4055 if (!xfs_btree_ptr_is_null(cur, &lptr) &&
4056 lrecs + xfs_btree_get_numrecs(block) <=
4057 cur->bc_ops->get_maxrecs(cur, level)) {
4058
4059
4060
4061
4062 rptr = cptr;
4063 right = block;
4064 rbp = bp;
4065 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
4066 if (error)
4067 goto error0;
4068
4069
4070
4071
4072 } else if (!xfs_btree_ptr_is_null(cur, &rptr) &&
4073 rrecs + xfs_btree_get_numrecs(block) <=
4074 cur->bc_ops->get_maxrecs(cur, level)) {
4075
4076
4077
4078
4079 lptr = cptr;
4080 left = block;
4081 lbp = bp;
4082 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
4083 if (error)
4084 goto error0;
4085
4086
4087
4088
4089
4090 } else {
4091 error = xfs_btree_dec_cursor(cur, level, stat);
4092 if (error)
4093 goto error0;
4094 return 0;
4095 }
4096
4097 rrecs = xfs_btree_get_numrecs(right);
4098 lrecs = xfs_btree_get_numrecs(left);
4099
4100
4101
4102
4103
4104 XFS_BTREE_STATS_ADD(cur, moves, rrecs);
4105 if (level > 0) {
4106
4107 union xfs_btree_key *lkp;
4108 union xfs_btree_ptr *lpp;
4109 union xfs_btree_key *rkp;
4110 union xfs_btree_ptr *rpp;
4111
4112 lkp = xfs_btree_key_addr(cur, lrecs + 1, left);
4113 lpp = xfs_btree_ptr_addr(cur, lrecs + 1, left);
4114 rkp = xfs_btree_key_addr(cur, 1, right);
4115 rpp = xfs_btree_ptr_addr(cur, 1, right);
4116#ifdef DEBUG
4117 for (i = 1; i < rrecs; i++) {
4118 error = xfs_btree_check_ptr(cur, rpp, i, level);
4119 if (error)
4120 goto error0;
4121 }
4122#endif
4123 xfs_btree_copy_keys(cur, lkp, rkp, rrecs);
4124 xfs_btree_copy_ptrs(cur, lpp, rpp, rrecs);
4125
4126 xfs_btree_log_keys(cur, lbp, lrecs + 1, lrecs + rrecs);
4127 xfs_btree_log_ptrs(cur, lbp, lrecs + 1, lrecs + rrecs);
4128 } else {
4129
4130 union xfs_btree_rec *lrp;
4131 union xfs_btree_rec *rrp;
4132
4133 lrp = xfs_btree_rec_addr(cur, lrecs + 1, left);
4134 rrp = xfs_btree_rec_addr(cur, 1, right);
4135
4136 xfs_btree_copy_recs(cur, lrp, rrp, rrecs);
4137 xfs_btree_log_recs(cur, lbp, lrecs + 1, lrecs + rrecs);
4138 }
4139
4140 XFS_BTREE_STATS_INC(cur, join);
4141
4142
4143
4144
4145
4146 xfs_btree_set_numrecs(left, lrecs + rrecs);
4147 xfs_btree_get_sibling(cur, right, &cptr, XFS_BB_RIGHTSIB),
4148 xfs_btree_set_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB);
4149 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
4150
4151
4152 xfs_btree_get_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB);
4153 if (!xfs_btree_ptr_is_null(cur, &cptr)) {
4154 error = xfs_btree_read_buf_block(cur, &cptr, 0, &rrblock, &rrbp);
4155 if (error)
4156 goto error0;
4157 xfs_btree_set_sibling(cur, rrblock, &lptr, XFS_BB_LEFTSIB);
4158 xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB);
4159 }
4160
4161
4162 error = xfs_btree_free_block(cur, rbp);
4163 if (error)
4164 goto error0;
4165
4166
4167
4168
4169
4170 if (bp != lbp) {
4171 cur->bc_bufs[level] = lbp;
4172 cur->bc_ptrs[level] += lrecs;
4173 cur->bc_ra[level] = 0;
4174 }
4175
4176
4177
4178
4179 else if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) ||
4180 (level + 1 < cur->bc_nlevels)) {
4181 error = xfs_btree_increment(cur, level + 1, &i);
4182 if (error)
4183 goto error0;
4184 }
4185
4186
4187
4188
4189
4190
4191
4192 if (level > 0)
4193 cur->bc_ptrs[level]--;
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203
4204
4205 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
4206
4207 *stat = 2;
4208 return 0;
4209
4210error0:
4211 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
4212 if (tcur)
4213 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
4214 return error;
4215}
4216
4217
4218
4219
4220
4221
4222int
4223xfs_btree_delete(
4224 struct xfs_btree_cur *cur,
4225 int *stat)
4226{
4227 int error;
4228 int level;
4229 int i;
4230 bool joined = false;
4231
4232 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
4233
4234
4235
4236
4237
4238
4239
4240 for (level = 0, i = 2; i == 2; level++) {
4241 error = xfs_btree_delrec(cur, level, &i);
4242 if (error)
4243 goto error0;
4244 if (i == 2)
4245 joined = true;
4246 }
4247
4248
4249
4250
4251
4252 if (joined && (cur->bc_flags & XFS_BTREE_OVERLAPPING)) {
4253 error = xfs_btree_updkeys_force(cur, 0);
4254 if (error)
4255 goto error0;
4256 }
4257
4258 if (i == 0) {
4259 for (level = 1; level < cur->bc_nlevels; level++) {
4260 if (cur->bc_ptrs[level] == 0) {
4261 error = xfs_btree_decrement(cur, level, &i);
4262 if (error)
4263 goto error0;
4264 break;
4265 }
4266 }
4267 }
4268
4269 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
4270 *stat = i;
4271 return 0;
4272error0:
4273 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
4274 return error;
4275}
4276
4277
4278
4279
4280int
4281xfs_btree_get_rec(
4282 struct xfs_btree_cur *cur,
4283 union xfs_btree_rec **recp,
4284 int *stat)
4285{
4286 struct xfs_btree_block *block;
4287 struct xfs_buf *bp;
4288 int ptr;
4289#ifdef DEBUG
4290 int error;
4291#endif
4292
4293 ptr = cur->bc_ptrs[0];
4294 block = xfs_btree_get_block(cur, 0, &bp);
4295
4296#ifdef DEBUG
4297 error = xfs_btree_check_block(cur, block, 0, bp);
4298 if (error)
4299 return error;
4300#endif
4301
4302
4303
4304
4305 if (ptr > xfs_btree_get_numrecs(block) || ptr <= 0) {
4306 *stat = 0;
4307 return 0;
4308 }
4309
4310
4311
4312
4313 *recp = xfs_btree_rec_addr(cur, ptr, block);
4314 *stat = 1;
4315 return 0;
4316}
4317
4318
4319STATIC int
4320xfs_btree_visit_block(
4321 struct xfs_btree_cur *cur,
4322 int level,
4323 xfs_btree_visit_blocks_fn fn,
4324 void *data)
4325{
4326 struct xfs_btree_block *block;
4327 struct xfs_buf *bp;
4328 union xfs_btree_ptr rptr;
4329 int error;
4330
4331
4332 xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
4333 block = xfs_btree_get_block(cur, level, &bp);
4334
4335
4336 error = fn(cur, level, data);
4337 if (error)
4338 return error;
4339
4340
4341 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
4342 if (xfs_btree_ptr_is_null(cur, &rptr))
4343 return -ENOENT;
4344
4345 return xfs_btree_lookup_get_block(cur, level, &rptr, &block);
4346}
4347
4348
4349
4350int
4351xfs_btree_visit_blocks(
4352 struct xfs_btree_cur *cur,
4353 xfs_btree_visit_blocks_fn fn,
4354 void *data)
4355{
4356 union xfs_btree_ptr lptr;
4357 int level;
4358 struct xfs_btree_block *block = NULL;
4359 int error = 0;
4360
4361 cur->bc_ops->init_ptr_from_cur(cur, &lptr);
4362
4363
4364 for (level = cur->bc_nlevels - 1; level >= 0; level--) {
4365
4366 error = xfs_btree_lookup_get_block(cur, level, &lptr, &block);
4367 if (error)
4368 return error;
4369
4370
4371 if (level > 0) {
4372 union xfs_btree_ptr *ptr;
4373
4374 ptr = xfs_btree_ptr_addr(cur, 1, block);
4375 xfs_btree_readahead_ptr(cur, ptr, 1);
4376
4377
4378 lptr = *ptr;
4379 }
4380
4381
4382 do {
4383 error = xfs_btree_visit_block(cur, level, fn, data);
4384 } while (!error);
4385
4386 if (error != -ENOENT)
4387 return error;
4388 }
4389
4390 return 0;
4391}
4392
4393
4394
4395
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415
4416
4417struct xfs_btree_block_change_owner_info {
4418 __uint64_t new_owner;
4419 struct list_head *buffer_list;
4420};
4421
4422static int
4423xfs_btree_block_change_owner(
4424 struct xfs_btree_cur *cur,
4425 int level,
4426 void *data)
4427{
4428 struct xfs_btree_block_change_owner_info *bbcoi = data;
4429 struct xfs_btree_block *block;
4430 struct xfs_buf *bp;
4431
4432
4433 block = xfs_btree_get_block(cur, level, &bp);
4434 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
4435 block->bb_u.l.bb_owner = cpu_to_be64(bbcoi->new_owner);
4436 else
4437 block->bb_u.s.bb_owner = cpu_to_be32(bbcoi->new_owner);
4438
4439
4440
4441
4442
4443
4444
4445
4446 if (bp) {
4447 if (cur->bc_tp) {
4448 xfs_trans_ordered_buf(cur->bc_tp, bp);
4449 xfs_btree_log_block(cur, bp, XFS_BB_OWNER);
4450 } else {
4451 xfs_buf_delwri_queue(bp, bbcoi->buffer_list);
4452 }
4453 } else {
4454 ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
4455 ASSERT(level == cur->bc_nlevels - 1);
4456 }
4457
4458 return 0;
4459}
4460
4461int
4462xfs_btree_change_owner(
4463 struct xfs_btree_cur *cur,
4464 __uint64_t new_owner,
4465 struct list_head *buffer_list)
4466{
4467 struct xfs_btree_block_change_owner_info bbcoi;
4468
4469 bbcoi.new_owner = new_owner;
4470 bbcoi.buffer_list = buffer_list;
4471
4472 return xfs_btree_visit_blocks(cur, xfs_btree_block_change_owner,
4473 &bbcoi);
4474}
4475
4476
4477
4478
4479
4480
4481
4482
4483
4484bool
4485xfs_btree_sblock_v5hdr_verify(
4486 struct xfs_buf *bp)
4487{
4488 struct xfs_mount *mp = bp->b_target->bt_mount;
4489 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
4490 struct xfs_perag *pag = bp->b_pag;
4491
4492 if (!xfs_sb_version_hascrc(&mp->m_sb))
4493 return false;
4494 if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid))
4495 return false;
4496 if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn))
4497 return false;
4498 if (pag && be32_to_cpu(block->bb_u.s.bb_owner) != pag->pag_agno)
4499 return false;
4500 return true;
4501}
4502
4503
4504
4505
4506
4507
4508
4509bool
4510xfs_btree_sblock_verify(
4511 struct xfs_buf *bp,
4512 unsigned int max_recs)
4513{
4514 struct xfs_mount *mp = bp->b_target->bt_mount;
4515 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
4516
4517
4518 if (be16_to_cpu(block->bb_numrecs) > max_recs)
4519 return false;
4520
4521
4522 if (!block->bb_u.s.bb_leftsib ||
4523 (be32_to_cpu(block->bb_u.s.bb_leftsib) >= mp->m_sb.sb_agblocks &&
4524 block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK)))
4525 return false;
4526 if (!block->bb_u.s.bb_rightsib ||
4527 (be32_to_cpu(block->bb_u.s.bb_rightsib) >= mp->m_sb.sb_agblocks &&
4528 block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK)))
4529 return false;
4530
4531 return true;
4532}
4533
4534
4535
4536
4537
4538uint
4539xfs_btree_compute_maxlevels(
4540 struct xfs_mount *mp,
4541 uint *limits,
4542 unsigned long len)
4543{
4544 uint level;
4545 unsigned long maxblocks;
4546
4547 maxblocks = (len + limits[0] - 1) / limits[0];
4548 for (level = 1; maxblocks > 1; level++)
4549 maxblocks = (maxblocks + limits[1] - 1) / limits[1];
4550 return level;
4551}
4552
4553
4554
4555
4556
4557
4558STATIC int
4559xfs_btree_simple_query_range(
4560 struct xfs_btree_cur *cur,
4561 union xfs_btree_key *low_key,
4562 union xfs_btree_key *high_key,
4563 xfs_btree_query_range_fn fn,
4564 void *priv)
4565{
4566 union xfs_btree_rec *recp;
4567 union xfs_btree_key rec_key;
4568 __int64_t diff;
4569 int stat;
4570 bool firstrec = true;
4571 int error;
4572
4573 ASSERT(cur->bc_ops->init_high_key_from_rec);
4574 ASSERT(cur->bc_ops->diff_two_keys);
4575
4576
4577
4578
4579
4580 stat = 0;
4581 error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, &stat);
4582 if (error)
4583 goto out;
4584
4585
4586 if (!stat) {
4587 error = xfs_btree_increment(cur, 0, &stat);
4588 if (error)
4589 goto out;
4590 }
4591
4592 while (stat) {
4593
4594 error = xfs_btree_get_rec(cur, &recp, &stat);
4595 if (error || !stat)
4596 break;
4597
4598
4599 if (firstrec) {
4600 cur->bc_ops->init_high_key_from_rec(&rec_key, recp);
4601 firstrec = false;
4602 diff = cur->bc_ops->diff_two_keys(cur, low_key,
4603 &rec_key);
4604 if (diff > 0)
4605 goto advloop;
4606 }
4607
4608
4609 cur->bc_ops->init_key_from_rec(&rec_key, recp);
4610 diff = cur->bc_ops->diff_two_keys(cur, &rec_key, high_key);
4611 if (diff > 0)
4612 break;
4613
4614
4615 error = fn(cur, recp, priv);
4616 if (error < 0 || error == XFS_BTREE_QUERY_RANGE_ABORT)
4617 break;
4618
4619advloop:
4620
4621 error = xfs_btree_increment(cur, 0, &stat);
4622 if (error)
4623 break;
4624 }
4625
4626out:
4627 return error;
4628}
4629
4630
4631
4632
4633
4634
4635
4636
4637
4638
4639
4640
4641
4642
4643
4644
4645
4646
4647
4648
4649STATIC int
4650xfs_btree_overlapped_query_range(
4651 struct xfs_btree_cur *cur,
4652 union xfs_btree_key *low_key,
4653 union xfs_btree_key *high_key,
4654 xfs_btree_query_range_fn fn,
4655 void *priv)
4656{
4657 union xfs_btree_ptr ptr;
4658 union xfs_btree_ptr *pp;
4659 union xfs_btree_key rec_key;
4660 union xfs_btree_key rec_hkey;
4661 union xfs_btree_key *lkp;
4662 union xfs_btree_key *hkp;
4663 union xfs_btree_rec *recp;
4664 struct xfs_btree_block *block;
4665 __int64_t ldiff;
4666 __int64_t hdiff;
4667 int level;
4668 struct xfs_buf *bp;
4669 int i;
4670 int error;
4671
4672
4673 level = cur->bc_nlevels - 1;
4674 cur->bc_ops->init_ptr_from_cur(cur, &ptr);
4675 error = xfs_btree_lookup_get_block(cur, level, &ptr, &block);
4676 if (error)
4677 return error;
4678 xfs_btree_get_block(cur, level, &bp);
4679 trace_xfs_btree_overlapped_query_range(cur, level, bp);
4680#ifdef DEBUG
4681 error = xfs_btree_check_block(cur, block, level, bp);
4682 if (error)
4683 goto out;
4684#endif
4685 cur->bc_ptrs[level] = 1;
4686
4687 while (level < cur->bc_nlevels) {
4688 block = xfs_btree_get_block(cur, level, &bp);
4689
4690
4691 if (cur->bc_ptrs[level] > be16_to_cpu(block->bb_numrecs)) {
4692pop_up:
4693 if (level < cur->bc_nlevels - 1)
4694 cur->bc_ptrs[level + 1]++;
4695 level++;
4696 continue;
4697 }
4698
4699 if (level == 0) {
4700
4701 recp = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block);
4702
4703 cur->bc_ops->init_high_key_from_rec(&rec_hkey, recp);
4704 ldiff = cur->bc_ops->diff_two_keys(cur, &rec_hkey,
4705 low_key);
4706
4707 cur->bc_ops->init_key_from_rec(&rec_key, recp);
4708 hdiff = cur->bc_ops->diff_two_keys(cur, high_key,
4709 &rec_key);
4710
4711
4712
4713
4714
4715
4716 if (ldiff >= 0 && hdiff >= 0) {
4717 error = fn(cur, recp, priv);
4718 if (error < 0 ||
4719 error == XFS_BTREE_QUERY_RANGE_ABORT)
4720 break;
4721 } else if (hdiff < 0) {
4722
4723 goto pop_up;
4724 }
4725 cur->bc_ptrs[level]++;
4726 continue;
4727 }
4728
4729
4730 lkp = xfs_btree_key_addr(cur, cur->bc_ptrs[level], block);
4731 hkp = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level], block);
4732 pp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[level], block);
4733
4734 ldiff = cur->bc_ops->diff_two_keys(cur, hkp, low_key);
4735 hdiff = cur->bc_ops->diff_two_keys(cur, high_key, lkp);
4736
4737
4738
4739
4740
4741
4742 if (ldiff >= 0 && hdiff >= 0) {
4743 level--;
4744 error = xfs_btree_lookup_get_block(cur, level, pp,
4745 &block);
4746 if (error)
4747 goto out;
4748 xfs_btree_get_block(cur, level, &bp);
4749 trace_xfs_btree_overlapped_query_range(cur, level, bp);
4750#ifdef DEBUG
4751 error = xfs_btree_check_block(cur, block, level, bp);
4752 if (error)
4753 goto out;
4754#endif
4755 cur->bc_ptrs[level] = 1;
4756 continue;
4757 } else if (hdiff < 0) {
4758
4759 goto pop_up;
4760 }
4761 cur->bc_ptrs[level]++;
4762 }
4763
4764out:
4765
4766
4767
4768
4769
4770
4771
4772 if (cur->bc_bufs[0] == NULL) {
4773 for (i = 0; i < cur->bc_nlevels; i++) {
4774 if (cur->bc_bufs[i]) {
4775 xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[i]);
4776 cur->bc_bufs[i] = NULL;
4777 cur->bc_ptrs[i] = 0;
4778 cur->bc_ra[i] = 0;
4779 }
4780 }
4781 }
4782
4783 return error;
4784}
4785
4786
4787
4788
4789
4790
4791
4792
4793int
4794xfs_btree_query_range(
4795 struct xfs_btree_cur *cur,
4796 union xfs_btree_irec *low_rec,
4797 union xfs_btree_irec *high_rec,
4798 xfs_btree_query_range_fn fn,
4799 void *priv)
4800{
4801 union xfs_btree_rec rec;
4802 union xfs_btree_key low_key;
4803 union xfs_btree_key high_key;
4804
4805
4806 cur->bc_rec = *high_rec;
4807 cur->bc_ops->init_rec_from_cur(cur, &rec);
4808 cur->bc_ops->init_key_from_rec(&high_key, &rec);
4809
4810 cur->bc_rec = *low_rec;
4811 cur->bc_ops->init_rec_from_cur(cur, &rec);
4812 cur->bc_ops->init_key_from_rec(&low_key, &rec);
4813
4814
4815 if (cur->bc_ops->diff_two_keys(cur, &low_key, &high_key) > 0)
4816 return -EINVAL;
4817
4818 if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
4819 return xfs_btree_simple_query_range(cur, &low_key,
4820 &high_key, fn, priv);
4821 return xfs_btree_overlapped_query_range(cur, &low_key, &high_key,
4822 fn, priv);
4823}
4824
4825
4826
4827
4828
4829xfs_extlen_t
4830xfs_btree_calc_size(
4831 struct xfs_mount *mp,
4832 uint *limits,
4833 unsigned long long len)
4834{
4835 int level;
4836 int maxrecs;
4837 xfs_extlen_t rval;
4838
4839 maxrecs = limits[0];
4840 for (level = 0, rval = 0; len > 1; level++) {
4841 len += maxrecs - 1;
4842 do_div(len, maxrecs);
4843 maxrecs = limits[1];
4844 rval += len;
4845 }
4846 return rval;
4847}
4848
4849static int
4850xfs_btree_count_blocks_helper(
4851 struct xfs_btree_cur *cur,
4852 int level,
4853 void *data)
4854{
4855 xfs_extlen_t *blocks = data;
4856 (*blocks)++;
4857
4858 return 0;
4859}
4860
4861
4862int
4863xfs_btree_count_blocks(
4864 struct xfs_btree_cur *cur,
4865 xfs_extlen_t *blocks)
4866{
4867 *blocks = 0;
4868 return xfs_btree_visit_blocks(cur, xfs_btree_count_blocks_helper,
4869 blocks);
4870}
4871