1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_shared.h"
21#include "xfs_format.h"
22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h"
24#include "xfs_bit.h"
25#include "xfs_mount.h"
26#include "xfs_defer.h"
27#include "xfs_inode.h"
28#include "xfs_trans.h"
29#include "xfs_inode_item.h"
30#include "xfs_buf_item.h"
31#include "xfs_btree.h"
32#include "xfs_error.h"
33#include "xfs_trace.h"
34#include "xfs_cksum.h"
35#include "xfs_alloc.h"
36#include "xfs_log.h"
37
38
39
40
41kmem_zone_t *xfs_btree_cur_zone;
42
43
44
45
46static const __uint32_t xfs_magics[2][XFS_BTNUM_MAX] = {
47 { XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, 0, XFS_BMAP_MAGIC, XFS_IBT_MAGIC,
48 XFS_FIBT_MAGIC, 0 },
49 { XFS_ABTB_CRC_MAGIC, XFS_ABTC_CRC_MAGIC, XFS_RMAP_CRC_MAGIC,
50 XFS_BMAP_CRC_MAGIC, XFS_IBT_CRC_MAGIC, XFS_FIBT_CRC_MAGIC,
51 XFS_REFC_CRC_MAGIC }
52};
53#define xfs_btree_magic(cur) \
54 xfs_magics[!!((cur)->bc_flags & XFS_BTREE_CRC_BLOCKS)][cur->bc_btnum]
55
56STATIC int
57xfs_btree_check_lblock(
58 struct xfs_btree_cur *cur,
59 struct xfs_btree_block *block,
60 int level,
61 struct xfs_buf *bp)
62{
63 int lblock_ok = 1;
64 struct xfs_mount *mp;
65
66 mp = cur->bc_mp;
67
68 if (xfs_sb_version_hascrc(&mp->m_sb)) {
69 lblock_ok = lblock_ok &&
70 uuid_equal(&block->bb_u.l.bb_uuid,
71 &mp->m_sb.sb_meta_uuid) &&
72 block->bb_u.l.bb_blkno == cpu_to_be64(
73 bp ? bp->b_bn : XFS_BUF_DADDR_NULL);
74 }
75
76 lblock_ok = lblock_ok &&
77 be32_to_cpu(block->bb_magic) == xfs_btree_magic(cur) &&
78 be16_to_cpu(block->bb_level) == level &&
79 be16_to_cpu(block->bb_numrecs) <=
80 cur->bc_ops->get_maxrecs(cur, level) &&
81 block->bb_u.l.bb_leftsib &&
82 (block->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK) ||
83 XFS_FSB_SANITY_CHECK(mp,
84 be64_to_cpu(block->bb_u.l.bb_leftsib))) &&
85 block->bb_u.l.bb_rightsib &&
86 (block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK) ||
87 XFS_FSB_SANITY_CHECK(mp,
88 be64_to_cpu(block->bb_u.l.bb_rightsib)));
89
90 if (unlikely(XFS_TEST_ERROR(!lblock_ok, mp,
91 XFS_ERRTAG_BTREE_CHECK_LBLOCK,
92 XFS_RANDOM_BTREE_CHECK_LBLOCK))) {
93 if (bp)
94 trace_xfs_btree_corrupt(bp, _RET_IP_);
95 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
96 return -EFSCORRUPTED;
97 }
98 return 0;
99}
100
101STATIC int
102xfs_btree_check_sblock(
103 struct xfs_btree_cur *cur,
104 struct xfs_btree_block *block,
105 int level,
106 struct xfs_buf *bp)
107{
108 struct xfs_mount *mp;
109 struct xfs_buf *agbp;
110 struct xfs_agf *agf;
111 xfs_agblock_t agflen;
112 int sblock_ok = 1;
113
114 mp = cur->bc_mp;
115 agbp = cur->bc_private.a.agbp;
116 agf = XFS_BUF_TO_AGF(agbp);
117 agflen = be32_to_cpu(agf->agf_length);
118
119 if (xfs_sb_version_hascrc(&mp->m_sb)) {
120 sblock_ok = sblock_ok &&
121 uuid_equal(&block->bb_u.s.bb_uuid,
122 &mp->m_sb.sb_meta_uuid) &&
123 block->bb_u.s.bb_blkno == cpu_to_be64(
124 bp ? bp->b_bn : XFS_BUF_DADDR_NULL);
125 }
126
127 sblock_ok = sblock_ok &&
128 be32_to_cpu(block->bb_magic) == xfs_btree_magic(cur) &&
129 be16_to_cpu(block->bb_level) == level &&
130 be16_to_cpu(block->bb_numrecs) <=
131 cur->bc_ops->get_maxrecs(cur, level) &&
132 (block->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK) ||
133 be32_to_cpu(block->bb_u.s.bb_leftsib) < agflen) &&
134 block->bb_u.s.bb_leftsib &&
135 (block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK) ||
136 be32_to_cpu(block->bb_u.s.bb_rightsib) < agflen) &&
137 block->bb_u.s.bb_rightsib;
138
139 if (unlikely(XFS_TEST_ERROR(!sblock_ok, mp,
140 XFS_ERRTAG_BTREE_CHECK_SBLOCK,
141 XFS_RANDOM_BTREE_CHECK_SBLOCK))) {
142 if (bp)
143 trace_xfs_btree_corrupt(bp, _RET_IP_);
144 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
145 return -EFSCORRUPTED;
146 }
147 return 0;
148}
149
150
151
152
153int
154xfs_btree_check_block(
155 struct xfs_btree_cur *cur,
156 struct xfs_btree_block *block,
157 int level,
158 struct xfs_buf *bp)
159{
160 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
161 return xfs_btree_check_lblock(cur, block, level, bp);
162 else
163 return xfs_btree_check_sblock(cur, block, level, bp);
164}
165
166
167
168
169int
170xfs_btree_check_lptr(
171 struct xfs_btree_cur *cur,
172 xfs_fsblock_t bno,
173 int level)
174{
175 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp,
176 level > 0 &&
177 bno != NULLFSBLOCK &&
178 XFS_FSB_SANITY_CHECK(cur->bc_mp, bno));
179 return 0;
180}
181
182#ifdef DEBUG
183
184
185
186STATIC int
187xfs_btree_check_sptr(
188 struct xfs_btree_cur *cur,
189 xfs_agblock_t bno,
190 int level)
191{
192 xfs_agblock_t agblocks = cur->bc_mp->m_sb.sb_agblocks;
193
194 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp,
195 level > 0 &&
196 bno != NULLAGBLOCK &&
197 bno != 0 &&
198 bno < agblocks);
199 return 0;
200}
201
202
203
204
205STATIC int
206xfs_btree_check_ptr(
207 struct xfs_btree_cur *cur,
208 union xfs_btree_ptr *ptr,
209 int index,
210 int level)
211{
212 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
213 return xfs_btree_check_lptr(cur,
214 be64_to_cpu((&ptr->l)[index]), level);
215 } else {
216 return xfs_btree_check_sptr(cur,
217 be32_to_cpu((&ptr->s)[index]), level);
218 }
219}
220#endif
221
222
223
224
225
226
227
228
229
230void
231xfs_btree_lblock_calc_crc(
232 struct xfs_buf *bp)
233{
234 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
235 struct xfs_buf_log_item *bip = bp->b_fspriv;
236
237 if (!xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb))
238 return;
239 if (bip)
240 block->bb_u.l.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
241 xfs_buf_update_cksum(bp, XFS_BTREE_LBLOCK_CRC_OFF);
242}
243
244bool
245xfs_btree_lblock_verify_crc(
246 struct xfs_buf *bp)
247{
248 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
249 struct xfs_mount *mp = bp->b_target->bt_mount;
250
251 if (xfs_sb_version_hascrc(&mp->m_sb)) {
252 if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.l.bb_lsn)))
253 return false;
254 return xfs_buf_verify_cksum(bp, XFS_BTREE_LBLOCK_CRC_OFF);
255 }
256
257 return true;
258}
259
260
261
262
263
264
265
266
267
268void
269xfs_btree_sblock_calc_crc(
270 struct xfs_buf *bp)
271{
272 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
273 struct xfs_buf_log_item *bip = bp->b_fspriv;
274
275 if (!xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb))
276 return;
277 if (bip)
278 block->bb_u.s.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
279 xfs_buf_update_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF);
280}
281
282bool
283xfs_btree_sblock_verify_crc(
284 struct xfs_buf *bp)
285{
286 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
287 struct xfs_mount *mp = bp->b_target->bt_mount;
288
289 if (xfs_sb_version_hascrc(&mp->m_sb)) {
290 if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.s.bb_lsn)))
291 return false;
292 return xfs_buf_verify_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF);
293 }
294
295 return true;
296}
297
298static int
299xfs_btree_free_block(
300 struct xfs_btree_cur *cur,
301 struct xfs_buf *bp)
302{
303 int error;
304
305 error = cur->bc_ops->free_block(cur, bp);
306 if (!error) {
307 xfs_trans_binval(cur->bc_tp, bp);
308 XFS_BTREE_STATS_INC(cur, free);
309 }
310 return error;
311}
312
313
314
315
316void
317xfs_btree_del_cursor(
318 xfs_btree_cur_t *cur,
319 int error)
320{
321 int i;
322
323
324
325
326
327
328
329
330
331
332
333 for (i = 0; i < cur->bc_nlevels; i++) {
334 if (cur->bc_bufs[i])
335 xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[i]);
336 else if (!error)
337 break;
338 }
339
340
341
342
343 ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP ||
344 cur->bc_private.b.allocated == 0);
345
346
347
348 kmem_zone_free(xfs_btree_cur_zone, cur);
349}
350
351
352
353
354
355int
356xfs_btree_dup_cursor(
357 xfs_btree_cur_t *cur,
358 xfs_btree_cur_t **ncur)
359{
360 xfs_buf_t *bp;
361 int error;
362 int i;
363 xfs_mount_t *mp;
364 xfs_btree_cur_t *new;
365 xfs_trans_t *tp;
366
367 tp = cur->bc_tp;
368 mp = cur->bc_mp;
369
370
371
372
373 new = cur->bc_ops->dup_cursor(cur);
374
375
376
377
378 new->bc_rec = cur->bc_rec;
379
380
381
382
383 for (i = 0; i < new->bc_nlevels; i++) {
384 new->bc_ptrs[i] = cur->bc_ptrs[i];
385 new->bc_ra[i] = cur->bc_ra[i];
386 bp = cur->bc_bufs[i];
387 if (bp) {
388 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
389 XFS_BUF_ADDR(bp), mp->m_bsize,
390 0, &bp,
391 cur->bc_ops->buf_ops);
392 if (error) {
393 xfs_btree_del_cursor(new, error);
394 *ncur = NULL;
395 return error;
396 }
397 }
398 new->bc_bufs[i] = bp;
399 }
400 *ncur = new;
401 return 0;
402}
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481static inline size_t xfs_btree_block_len(struct xfs_btree_cur *cur)
482{
483 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
484 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS)
485 return XFS_BTREE_LBLOCK_CRC_LEN;
486 return XFS_BTREE_LBLOCK_LEN;
487 }
488 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS)
489 return XFS_BTREE_SBLOCK_CRC_LEN;
490 return XFS_BTREE_SBLOCK_LEN;
491}
492
493
494
495
496static inline size_t xfs_btree_ptr_len(struct xfs_btree_cur *cur)
497{
498 return (cur->bc_flags & XFS_BTREE_LONG_PTRS) ?
499 sizeof(__be64) : sizeof(__be32);
500}
501
502
503
504
505STATIC size_t
506xfs_btree_rec_offset(
507 struct xfs_btree_cur *cur,
508 int n)
509{
510 return xfs_btree_block_len(cur) +
511 (n - 1) * cur->bc_ops->rec_len;
512}
513
514
515
516
517STATIC size_t
518xfs_btree_key_offset(
519 struct xfs_btree_cur *cur,
520 int n)
521{
522 return xfs_btree_block_len(cur) +
523 (n - 1) * cur->bc_ops->key_len;
524}
525
526
527
528
529STATIC size_t
530xfs_btree_high_key_offset(
531 struct xfs_btree_cur *cur,
532 int n)
533{
534 return xfs_btree_block_len(cur) +
535 (n - 1) * cur->bc_ops->key_len + (cur->bc_ops->key_len / 2);
536}
537
538
539
540
541STATIC size_t
542xfs_btree_ptr_offset(
543 struct xfs_btree_cur *cur,
544 int n,
545 int level)
546{
547 return xfs_btree_block_len(cur) +
548 cur->bc_ops->get_maxrecs(cur, level) * cur->bc_ops->key_len +
549 (n - 1) * xfs_btree_ptr_len(cur);
550}
551
552
553
554
555STATIC union xfs_btree_rec *
556xfs_btree_rec_addr(
557 struct xfs_btree_cur *cur,
558 int n,
559 struct xfs_btree_block *block)
560{
561 return (union xfs_btree_rec *)
562 ((char *)block + xfs_btree_rec_offset(cur, n));
563}
564
565
566
567
568STATIC union xfs_btree_key *
569xfs_btree_key_addr(
570 struct xfs_btree_cur *cur,
571 int n,
572 struct xfs_btree_block *block)
573{
574 return (union xfs_btree_key *)
575 ((char *)block + xfs_btree_key_offset(cur, n));
576}
577
578
579
580
581STATIC union xfs_btree_key *
582xfs_btree_high_key_addr(
583 struct xfs_btree_cur *cur,
584 int n,
585 struct xfs_btree_block *block)
586{
587 return (union xfs_btree_key *)
588 ((char *)block + xfs_btree_high_key_offset(cur, n));
589}
590
591
592
593
594STATIC union xfs_btree_ptr *
595xfs_btree_ptr_addr(
596 struct xfs_btree_cur *cur,
597 int n,
598 struct xfs_btree_block *block)
599{
600 int level = xfs_btree_get_level(block);
601
602 ASSERT(block->bb_level != 0);
603
604 return (union xfs_btree_ptr *)
605 ((char *)block + xfs_btree_ptr_offset(cur, n, level));
606}
607
608
609
610
611
612
613
614STATIC struct xfs_btree_block *
615xfs_btree_get_iroot(
616 struct xfs_btree_cur *cur)
617{
618 struct xfs_ifork *ifp;
619
620 ifp = XFS_IFORK_PTR(cur->bc_private.b.ip, cur->bc_private.b.whichfork);
621 return (struct xfs_btree_block *)ifp->if_broot;
622}
623
624
625
626
627
628STATIC struct xfs_btree_block *
629xfs_btree_get_block(
630 struct xfs_btree_cur *cur,
631 int level,
632 struct xfs_buf **bpp)
633{
634 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
635 (level == cur->bc_nlevels - 1)) {
636 *bpp = NULL;
637 return xfs_btree_get_iroot(cur);
638 }
639
640 *bpp = cur->bc_bufs[level];
641 return XFS_BUF_TO_BLOCK(*bpp);
642}
643
644
645
646
647
648xfs_buf_t *
649xfs_btree_get_bufl(
650 xfs_mount_t *mp,
651 xfs_trans_t *tp,
652 xfs_fsblock_t fsbno,
653 uint lock)
654{
655 xfs_daddr_t d;
656
657 ASSERT(fsbno != NULLFSBLOCK);
658 d = XFS_FSB_TO_DADDR(mp, fsbno);
659 return xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock);
660}
661
662
663
664
665
666xfs_buf_t *
667xfs_btree_get_bufs(
668 xfs_mount_t *mp,
669 xfs_trans_t *tp,
670 xfs_agnumber_t agno,
671 xfs_agblock_t agbno,
672 uint lock)
673{
674 xfs_daddr_t d;
675
676 ASSERT(agno != NULLAGNUMBER);
677 ASSERT(agbno != NULLAGBLOCK);
678 d = XFS_AGB_TO_DADDR(mp, agno, agbno);
679 return xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock);
680}
681
682
683
684
685int
686xfs_btree_islastblock(
687 xfs_btree_cur_t *cur,
688 int level)
689{
690 struct xfs_btree_block *block;
691 xfs_buf_t *bp;
692
693 block = xfs_btree_get_block(cur, level, &bp);
694 xfs_btree_check_block(cur, block, level, bp);
695 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
696 return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK);
697 else
698 return block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK);
699}
700
701
702
703
704
705STATIC int
706xfs_btree_firstrec(
707 xfs_btree_cur_t *cur,
708 int level)
709{
710 struct xfs_btree_block *block;
711 xfs_buf_t *bp;
712
713
714
715
716 block = xfs_btree_get_block(cur, level, &bp);
717 xfs_btree_check_block(cur, block, level, bp);
718
719
720
721 if (!block->bb_numrecs)
722 return 0;
723
724
725
726 cur->bc_ptrs[level] = 1;
727 return 1;
728}
729
730
731
732
733
734STATIC int
735xfs_btree_lastrec(
736 xfs_btree_cur_t *cur,
737 int level)
738{
739 struct xfs_btree_block *block;
740 xfs_buf_t *bp;
741
742
743
744
745 block = xfs_btree_get_block(cur, level, &bp);
746 xfs_btree_check_block(cur, block, level, bp);
747
748
749
750 if (!block->bb_numrecs)
751 return 0;
752
753
754
755 cur->bc_ptrs[level] = be16_to_cpu(block->bb_numrecs);
756 return 1;
757}
758
759
760
761
762
763void
764xfs_btree_offsets(
765 __int64_t fields,
766 const short *offsets,
767 int nbits,
768 int *first,
769 int *last)
770{
771 int i;
772 __int64_t imask;
773
774 ASSERT(fields != 0);
775
776
777
778 for (i = 0, imask = 1LL; ; i++, imask <<= 1) {
779 if (imask & fields) {
780 *first = offsets[i];
781 break;
782 }
783 }
784
785
786
787 for (i = nbits - 1, imask = 1LL << i; ; i--, imask >>= 1) {
788 if (imask & fields) {
789 *last = offsets[i + 1] - 1;
790 break;
791 }
792 }
793}
794
795
796
797
798
799int
800xfs_btree_read_bufl(
801 struct xfs_mount *mp,
802 struct xfs_trans *tp,
803 xfs_fsblock_t fsbno,
804 uint lock,
805 struct xfs_buf **bpp,
806 int refval,
807 const struct xfs_buf_ops *ops)
808{
809 struct xfs_buf *bp;
810 xfs_daddr_t d;
811 int error;
812
813 ASSERT(fsbno != NULLFSBLOCK);
814 d = XFS_FSB_TO_DADDR(mp, fsbno);
815 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d,
816 mp->m_bsize, lock, &bp, ops);
817 if (error)
818 return error;
819 if (bp)
820 xfs_buf_set_ref(bp, refval);
821 *bpp = bp;
822 return 0;
823}
824
825
826
827
828
829
830void
831xfs_btree_reada_bufl(
832 struct xfs_mount *mp,
833 xfs_fsblock_t fsbno,
834 xfs_extlen_t count,
835 const struct xfs_buf_ops *ops)
836{
837 xfs_daddr_t d;
838
839 ASSERT(fsbno != NULLFSBLOCK);
840 d = XFS_FSB_TO_DADDR(mp, fsbno);
841 xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count, ops);
842}
843
844
845
846
847
848
849void
850xfs_btree_reada_bufs(
851 struct xfs_mount *mp,
852 xfs_agnumber_t agno,
853 xfs_agblock_t agbno,
854 xfs_extlen_t count,
855 const struct xfs_buf_ops *ops)
856{
857 xfs_daddr_t d;
858
859 ASSERT(agno != NULLAGNUMBER);
860 ASSERT(agbno != NULLAGBLOCK);
861 d = XFS_AGB_TO_DADDR(mp, agno, agbno);
862 xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count, ops);
863}
864
865STATIC int
866xfs_btree_readahead_lblock(
867 struct xfs_btree_cur *cur,
868 int lr,
869 struct xfs_btree_block *block)
870{
871 int rval = 0;
872 xfs_fsblock_t left = be64_to_cpu(block->bb_u.l.bb_leftsib);
873 xfs_fsblock_t right = be64_to_cpu(block->bb_u.l.bb_rightsib);
874
875 if ((lr & XFS_BTCUR_LEFTRA) && left != NULLFSBLOCK) {
876 xfs_btree_reada_bufl(cur->bc_mp, left, 1,
877 cur->bc_ops->buf_ops);
878 rval++;
879 }
880
881 if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLFSBLOCK) {
882 xfs_btree_reada_bufl(cur->bc_mp, right, 1,
883 cur->bc_ops->buf_ops);
884 rval++;
885 }
886
887 return rval;
888}
889
890STATIC int
891xfs_btree_readahead_sblock(
892 struct xfs_btree_cur *cur,
893 int lr,
894 struct xfs_btree_block *block)
895{
896 int rval = 0;
897 xfs_agblock_t left = be32_to_cpu(block->bb_u.s.bb_leftsib);
898 xfs_agblock_t right = be32_to_cpu(block->bb_u.s.bb_rightsib);
899
900
901 if ((lr & XFS_BTCUR_LEFTRA) && left != NULLAGBLOCK) {
902 xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
903 left, 1, cur->bc_ops->buf_ops);
904 rval++;
905 }
906
907 if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLAGBLOCK) {
908 xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
909 right, 1, cur->bc_ops->buf_ops);
910 rval++;
911 }
912
913 return rval;
914}
915
916
917
918
919
920STATIC int
921xfs_btree_readahead(
922 struct xfs_btree_cur *cur,
923 int lev,
924 int lr)
925{
926 struct xfs_btree_block *block;
927
928
929
930
931
932 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
933 (lev == cur->bc_nlevels - 1))
934 return 0;
935
936 if ((cur->bc_ra[lev] | lr) == cur->bc_ra[lev])
937 return 0;
938
939 cur->bc_ra[lev] |= lr;
940 block = XFS_BUF_TO_BLOCK(cur->bc_bufs[lev]);
941
942 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
943 return xfs_btree_readahead_lblock(cur, lr, block);
944 return xfs_btree_readahead_sblock(cur, lr, block);
945}
946
947STATIC xfs_daddr_t
948xfs_btree_ptr_to_daddr(
949 struct xfs_btree_cur *cur,
950 union xfs_btree_ptr *ptr)
951{
952 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
953 ASSERT(ptr->l != cpu_to_be64(NULLFSBLOCK));
954
955 return XFS_FSB_TO_DADDR(cur->bc_mp, be64_to_cpu(ptr->l));
956 } else {
957 ASSERT(cur->bc_private.a.agno != NULLAGNUMBER);
958 ASSERT(ptr->s != cpu_to_be32(NULLAGBLOCK));
959
960 return XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_private.a.agno,
961 be32_to_cpu(ptr->s));
962 }
963}
964
965
966
967
968
969
970
971STATIC void
972xfs_btree_readahead_ptr(
973 struct xfs_btree_cur *cur,
974 union xfs_btree_ptr *ptr,
975 xfs_extlen_t count)
976{
977 xfs_buf_readahead(cur->bc_mp->m_ddev_targp,
978 xfs_btree_ptr_to_daddr(cur, ptr),
979 cur->bc_mp->m_bsize * count, cur->bc_ops->buf_ops);
980}
981
982
983
984
985
986STATIC void
987xfs_btree_setbuf(
988 xfs_btree_cur_t *cur,
989 int lev,
990 xfs_buf_t *bp)
991{
992 struct xfs_btree_block *b;
993
994 if (cur->bc_bufs[lev])
995 xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[lev]);
996 cur->bc_bufs[lev] = bp;
997 cur->bc_ra[lev] = 0;
998
999 b = XFS_BUF_TO_BLOCK(bp);
1000 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
1001 if (b->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK))
1002 cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA;
1003 if (b->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK))
1004 cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA;
1005 } else {
1006 if (b->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK))
1007 cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA;
1008 if (b->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK))
1009 cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA;
1010 }
1011}
1012
1013STATIC int
1014xfs_btree_ptr_is_null(
1015 struct xfs_btree_cur *cur,
1016 union xfs_btree_ptr *ptr)
1017{
1018 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
1019 return ptr->l == cpu_to_be64(NULLFSBLOCK);
1020 else
1021 return ptr->s == cpu_to_be32(NULLAGBLOCK);
1022}
1023
1024STATIC void
1025xfs_btree_set_ptr_null(
1026 struct xfs_btree_cur *cur,
1027 union xfs_btree_ptr *ptr)
1028{
1029 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
1030 ptr->l = cpu_to_be64(NULLFSBLOCK);
1031 else
1032 ptr->s = cpu_to_be32(NULLAGBLOCK);
1033}
1034
1035
1036
1037
1038STATIC void
1039xfs_btree_get_sibling(
1040 struct xfs_btree_cur *cur,
1041 struct xfs_btree_block *block,
1042 union xfs_btree_ptr *ptr,
1043 int lr)
1044{
1045 ASSERT(lr == XFS_BB_LEFTSIB || lr == XFS_BB_RIGHTSIB);
1046
1047 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
1048 if (lr == XFS_BB_RIGHTSIB)
1049 ptr->l = block->bb_u.l.bb_rightsib;
1050 else
1051 ptr->l = block->bb_u.l.bb_leftsib;
1052 } else {
1053 if (lr == XFS_BB_RIGHTSIB)
1054 ptr->s = block->bb_u.s.bb_rightsib;
1055 else
1056 ptr->s = block->bb_u.s.bb_leftsib;
1057 }
1058}
1059
1060STATIC void
1061xfs_btree_set_sibling(
1062 struct xfs_btree_cur *cur,
1063 struct xfs_btree_block *block,
1064 union xfs_btree_ptr *ptr,
1065 int lr)
1066{
1067 ASSERT(lr == XFS_BB_LEFTSIB || lr == XFS_BB_RIGHTSIB);
1068
1069 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
1070 if (lr == XFS_BB_RIGHTSIB)
1071 block->bb_u.l.bb_rightsib = ptr->l;
1072 else
1073 block->bb_u.l.bb_leftsib = ptr->l;
1074 } else {
1075 if (lr == XFS_BB_RIGHTSIB)
1076 block->bb_u.s.bb_rightsib = ptr->s;
1077 else
1078 block->bb_u.s.bb_leftsib = ptr->s;
1079 }
1080}
1081
1082void
1083xfs_btree_init_block_int(
1084 struct xfs_mount *mp,
1085 struct xfs_btree_block *buf,
1086 xfs_daddr_t blkno,
1087 __u32 magic,
1088 __u16 level,
1089 __u16 numrecs,
1090 __u64 owner,
1091 unsigned int flags)
1092{
1093 buf->bb_magic = cpu_to_be32(magic);
1094 buf->bb_level = cpu_to_be16(level);
1095 buf->bb_numrecs = cpu_to_be16(numrecs);
1096
1097 if (flags & XFS_BTREE_LONG_PTRS) {
1098 buf->bb_u.l.bb_leftsib = cpu_to_be64(NULLFSBLOCK);
1099 buf->bb_u.l.bb_rightsib = cpu_to_be64(NULLFSBLOCK);
1100 if (flags & XFS_BTREE_CRC_BLOCKS) {
1101 buf->bb_u.l.bb_blkno = cpu_to_be64(blkno);
1102 buf->bb_u.l.bb_owner = cpu_to_be64(owner);
1103 uuid_copy(&buf->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid);
1104 buf->bb_u.l.bb_pad = 0;
1105 buf->bb_u.l.bb_lsn = 0;
1106 }
1107 } else {
1108
1109 __u32 __owner = (__u32)owner;
1110
1111 buf->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK);
1112 buf->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
1113 if (flags & XFS_BTREE_CRC_BLOCKS) {
1114 buf->bb_u.s.bb_blkno = cpu_to_be64(blkno);
1115 buf->bb_u.s.bb_owner = cpu_to_be32(__owner);
1116 uuid_copy(&buf->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid);
1117 buf->bb_u.s.bb_lsn = 0;
1118 }
1119 }
1120}
1121
1122void
1123xfs_btree_init_block(
1124 struct xfs_mount *mp,
1125 struct xfs_buf *bp,
1126 __u32 magic,
1127 __u16 level,
1128 __u16 numrecs,
1129 __u64 owner,
1130 unsigned int flags)
1131{
1132 xfs_btree_init_block_int(mp, XFS_BUF_TO_BLOCK(bp), bp->b_bn,
1133 magic, level, numrecs, owner, flags);
1134}
1135
1136STATIC void
1137xfs_btree_init_block_cur(
1138 struct xfs_btree_cur *cur,
1139 struct xfs_buf *bp,
1140 int level,
1141 int numrecs)
1142{
1143 __u64 owner;
1144
1145
1146
1147
1148
1149
1150
1151 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
1152 owner = cur->bc_private.b.ip->i_ino;
1153 else
1154 owner = cur->bc_private.a.agno;
1155
1156 xfs_btree_init_block_int(cur->bc_mp, XFS_BUF_TO_BLOCK(bp), bp->b_bn,
1157 xfs_btree_magic(cur), level, numrecs,
1158 owner, cur->bc_flags);
1159}
1160
1161
1162
1163
1164
1165
1166STATIC int
1167xfs_btree_is_lastrec(
1168 struct xfs_btree_cur *cur,
1169 struct xfs_btree_block *block,
1170 int level)
1171{
1172 union xfs_btree_ptr ptr;
1173
1174 if (level > 0)
1175 return 0;
1176 if (!(cur->bc_flags & XFS_BTREE_LASTREC_UPDATE))
1177 return 0;
1178
1179 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
1180 if (!xfs_btree_ptr_is_null(cur, &ptr))
1181 return 0;
1182 return 1;
1183}
1184
1185STATIC void
1186xfs_btree_buf_to_ptr(
1187 struct xfs_btree_cur *cur,
1188 struct xfs_buf *bp,
1189 union xfs_btree_ptr *ptr)
1190{
1191 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
1192 ptr->l = cpu_to_be64(XFS_DADDR_TO_FSB(cur->bc_mp,
1193 XFS_BUF_ADDR(bp)));
1194 else {
1195 ptr->s = cpu_to_be32(xfs_daddr_to_agbno(cur->bc_mp,
1196 XFS_BUF_ADDR(bp)));
1197 }
1198}
1199
1200STATIC void
1201xfs_btree_set_refs(
1202 struct xfs_btree_cur *cur,
1203 struct xfs_buf *bp)
1204{
1205 switch (cur->bc_btnum) {
1206 case XFS_BTNUM_BNO:
1207 case XFS_BTNUM_CNT:
1208 xfs_buf_set_ref(bp, XFS_ALLOC_BTREE_REF);
1209 break;
1210 case XFS_BTNUM_INO:
1211 case XFS_BTNUM_FINO:
1212 xfs_buf_set_ref(bp, XFS_INO_BTREE_REF);
1213 break;
1214 case XFS_BTNUM_BMAP:
1215 xfs_buf_set_ref(bp, XFS_BMAP_BTREE_REF);
1216 break;
1217 case XFS_BTNUM_RMAP:
1218 xfs_buf_set_ref(bp, XFS_RMAP_BTREE_REF);
1219 break;
1220 case XFS_BTNUM_REFC:
1221 xfs_buf_set_ref(bp, XFS_REFC_BTREE_REF);
1222 break;
1223 default:
1224 ASSERT(0);
1225 }
1226}
1227
1228STATIC int
1229xfs_btree_get_buf_block(
1230 struct xfs_btree_cur *cur,
1231 union xfs_btree_ptr *ptr,
1232 int flags,
1233 struct xfs_btree_block **block,
1234 struct xfs_buf **bpp)
1235{
1236 struct xfs_mount *mp = cur->bc_mp;
1237 xfs_daddr_t d;
1238
1239
1240 ASSERT(!(flags & XBF_TRYLOCK));
1241
1242 d = xfs_btree_ptr_to_daddr(cur, ptr);
1243 *bpp = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d,
1244 mp->m_bsize, flags);
1245
1246 if (!*bpp)
1247 return -ENOMEM;
1248
1249 (*bpp)->b_ops = cur->bc_ops->buf_ops;
1250 *block = XFS_BUF_TO_BLOCK(*bpp);
1251 return 0;
1252}
1253
1254
1255
1256
1257
1258STATIC int
1259xfs_btree_read_buf_block(
1260 struct xfs_btree_cur *cur,
1261 union xfs_btree_ptr *ptr,
1262 int flags,
1263 struct xfs_btree_block **block,
1264 struct xfs_buf **bpp)
1265{
1266 struct xfs_mount *mp = cur->bc_mp;
1267 xfs_daddr_t d;
1268 int error;
1269
1270
1271 ASSERT(!(flags & XBF_TRYLOCK));
1272
1273 d = xfs_btree_ptr_to_daddr(cur, ptr);
1274 error = xfs_trans_read_buf(mp, cur->bc_tp, mp->m_ddev_targp, d,
1275 mp->m_bsize, flags, bpp,
1276 cur->bc_ops->buf_ops);
1277 if (error)
1278 return error;
1279
1280 xfs_btree_set_refs(cur, *bpp);
1281 *block = XFS_BUF_TO_BLOCK(*bpp);
1282 return 0;
1283}
1284
1285
1286
1287
1288STATIC void
1289xfs_btree_copy_keys(
1290 struct xfs_btree_cur *cur,
1291 union xfs_btree_key *dst_key,
1292 union xfs_btree_key *src_key,
1293 int numkeys)
1294{
1295 ASSERT(numkeys >= 0);
1296 memcpy(dst_key, src_key, numkeys * cur->bc_ops->key_len);
1297}
1298
1299
1300
1301
1302STATIC void
1303xfs_btree_copy_recs(
1304 struct xfs_btree_cur *cur,
1305 union xfs_btree_rec *dst_rec,
1306 union xfs_btree_rec *src_rec,
1307 int numrecs)
1308{
1309 ASSERT(numrecs >= 0);
1310 memcpy(dst_rec, src_rec, numrecs * cur->bc_ops->rec_len);
1311}
1312
1313
1314
1315
1316STATIC void
1317xfs_btree_copy_ptrs(
1318 struct xfs_btree_cur *cur,
1319 union xfs_btree_ptr *dst_ptr,
1320 union xfs_btree_ptr *src_ptr,
1321 int numptrs)
1322{
1323 ASSERT(numptrs >= 0);
1324 memcpy(dst_ptr, src_ptr, numptrs * xfs_btree_ptr_len(cur));
1325}
1326
1327
1328
1329
1330STATIC void
1331xfs_btree_shift_keys(
1332 struct xfs_btree_cur *cur,
1333 union xfs_btree_key *key,
1334 int dir,
1335 int numkeys)
1336{
1337 char *dst_key;
1338
1339 ASSERT(numkeys >= 0);
1340 ASSERT(dir == 1 || dir == -1);
1341
1342 dst_key = (char *)key + (dir * cur->bc_ops->key_len);
1343 memmove(dst_key, key, numkeys * cur->bc_ops->key_len);
1344}
1345
1346
1347
1348
1349STATIC void
1350xfs_btree_shift_recs(
1351 struct xfs_btree_cur *cur,
1352 union xfs_btree_rec *rec,
1353 int dir,
1354 int numrecs)
1355{
1356 char *dst_rec;
1357
1358 ASSERT(numrecs >= 0);
1359 ASSERT(dir == 1 || dir == -1);
1360
1361 dst_rec = (char *)rec + (dir * cur->bc_ops->rec_len);
1362 memmove(dst_rec, rec, numrecs * cur->bc_ops->rec_len);
1363}
1364
1365
1366
1367
1368STATIC void
1369xfs_btree_shift_ptrs(
1370 struct xfs_btree_cur *cur,
1371 union xfs_btree_ptr *ptr,
1372 int dir,
1373 int numptrs)
1374{
1375 char *dst_ptr;
1376
1377 ASSERT(numptrs >= 0);
1378 ASSERT(dir == 1 || dir == -1);
1379
1380 dst_ptr = (char *)ptr + (dir * xfs_btree_ptr_len(cur));
1381 memmove(dst_ptr, ptr, numptrs * xfs_btree_ptr_len(cur));
1382}
1383
1384
1385
1386
1387STATIC void
1388xfs_btree_log_keys(
1389 struct xfs_btree_cur *cur,
1390 struct xfs_buf *bp,
1391 int first,
1392 int last)
1393{
1394 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
1395 XFS_BTREE_TRACE_ARGBII(cur, bp, first, last);
1396
1397 if (bp) {
1398 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
1399 xfs_trans_log_buf(cur->bc_tp, bp,
1400 xfs_btree_key_offset(cur, first),
1401 xfs_btree_key_offset(cur, last + 1) - 1);
1402 } else {
1403 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
1404 xfs_ilog_fbroot(cur->bc_private.b.whichfork));
1405 }
1406
1407 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1408}
1409
1410
1411
1412
1413void
1414xfs_btree_log_recs(
1415 struct xfs_btree_cur *cur,
1416 struct xfs_buf *bp,
1417 int first,
1418 int last)
1419{
1420 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
1421 XFS_BTREE_TRACE_ARGBII(cur, bp, first, last);
1422
1423 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
1424 xfs_trans_log_buf(cur->bc_tp, bp,
1425 xfs_btree_rec_offset(cur, first),
1426 xfs_btree_rec_offset(cur, last + 1) - 1);
1427
1428 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1429}
1430
1431
1432
1433
1434STATIC void
1435xfs_btree_log_ptrs(
1436 struct xfs_btree_cur *cur,
1437 struct xfs_buf *bp,
1438 int first,
1439 int last)
1440{
1441 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
1442 XFS_BTREE_TRACE_ARGBII(cur, bp, first, last);
1443
1444 if (bp) {
1445 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
1446 int level = xfs_btree_get_level(block);
1447
1448 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
1449 xfs_trans_log_buf(cur->bc_tp, bp,
1450 xfs_btree_ptr_offset(cur, first, level),
1451 xfs_btree_ptr_offset(cur, last + 1, level) - 1);
1452 } else {
1453 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
1454 xfs_ilog_fbroot(cur->bc_private.b.whichfork));
1455 }
1456
1457 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1458}
1459
1460
1461
1462
1463void
1464xfs_btree_log_block(
1465 struct xfs_btree_cur *cur,
1466 struct xfs_buf *bp,
1467 int fields)
1468{
1469 int first;
1470 int last;
1471 static const short soffsets[] = {
1472 offsetof(struct xfs_btree_block, bb_magic),
1473 offsetof(struct xfs_btree_block, bb_level),
1474 offsetof(struct xfs_btree_block, bb_numrecs),
1475 offsetof(struct xfs_btree_block, bb_u.s.bb_leftsib),
1476 offsetof(struct xfs_btree_block, bb_u.s.bb_rightsib),
1477 offsetof(struct xfs_btree_block, bb_u.s.bb_blkno),
1478 offsetof(struct xfs_btree_block, bb_u.s.bb_lsn),
1479 offsetof(struct xfs_btree_block, bb_u.s.bb_uuid),
1480 offsetof(struct xfs_btree_block, bb_u.s.bb_owner),
1481 offsetof(struct xfs_btree_block, bb_u.s.bb_crc),
1482 XFS_BTREE_SBLOCK_CRC_LEN
1483 };
1484 static const short loffsets[] = {
1485 offsetof(struct xfs_btree_block, bb_magic),
1486 offsetof(struct xfs_btree_block, bb_level),
1487 offsetof(struct xfs_btree_block, bb_numrecs),
1488 offsetof(struct xfs_btree_block, bb_u.l.bb_leftsib),
1489 offsetof(struct xfs_btree_block, bb_u.l.bb_rightsib),
1490 offsetof(struct xfs_btree_block, bb_u.l.bb_blkno),
1491 offsetof(struct xfs_btree_block, bb_u.l.bb_lsn),
1492 offsetof(struct xfs_btree_block, bb_u.l.bb_uuid),
1493 offsetof(struct xfs_btree_block, bb_u.l.bb_owner),
1494 offsetof(struct xfs_btree_block, bb_u.l.bb_crc),
1495 offsetof(struct xfs_btree_block, bb_u.l.bb_pad),
1496 XFS_BTREE_LBLOCK_CRC_LEN
1497 };
1498
1499 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
1500 XFS_BTREE_TRACE_ARGBI(cur, bp, fields);
1501
1502 if (bp) {
1503 int nbits;
1504
1505 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) {
1506
1507
1508
1509
1510
1511
1512
1513 if (fields == XFS_BB_ALL_BITS)
1514 fields = XFS_BB_ALL_BITS_CRC;
1515 nbits = XFS_BB_NUM_BITS_CRC;
1516 } else {
1517 nbits = XFS_BB_NUM_BITS;
1518 }
1519 xfs_btree_offsets(fields,
1520 (cur->bc_flags & XFS_BTREE_LONG_PTRS) ?
1521 loffsets : soffsets,
1522 nbits, &first, &last);
1523 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
1524 xfs_trans_log_buf(cur->bc_tp, bp, first, last);
1525 } else {
1526 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
1527 xfs_ilog_fbroot(cur->bc_private.b.whichfork));
1528 }
1529
1530 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1531}
1532
1533
1534
1535
1536
1537int
1538xfs_btree_increment(
1539 struct xfs_btree_cur *cur,
1540 int level,
1541 int *stat)
1542{
1543 struct xfs_btree_block *block;
1544 union xfs_btree_ptr ptr;
1545 struct xfs_buf *bp;
1546 int error;
1547 int lev;
1548
1549 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
1550 XFS_BTREE_TRACE_ARGI(cur, level);
1551
1552 ASSERT(level < cur->bc_nlevels);
1553
1554
1555 xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
1556
1557
1558 block = xfs_btree_get_block(cur, level, &bp);
1559
1560#ifdef DEBUG
1561 error = xfs_btree_check_block(cur, block, level, bp);
1562 if (error)
1563 goto error0;
1564#endif
1565
1566
1567 if (++cur->bc_ptrs[level] <= xfs_btree_get_numrecs(block))
1568 goto out1;
1569
1570
1571 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
1572 if (xfs_btree_ptr_is_null(cur, &ptr))
1573 goto out0;
1574
1575 XFS_BTREE_STATS_INC(cur, increment);
1576
1577
1578
1579
1580
1581 for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
1582 block = xfs_btree_get_block(cur, lev, &bp);
1583
1584#ifdef DEBUG
1585 error = xfs_btree_check_block(cur, block, lev, bp);
1586 if (error)
1587 goto error0;
1588#endif
1589
1590 if (++cur->bc_ptrs[lev] <= xfs_btree_get_numrecs(block))
1591 break;
1592
1593
1594 xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA);
1595 }
1596
1597
1598
1599
1600
1601 if (lev == cur->bc_nlevels) {
1602 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
1603 goto out0;
1604 ASSERT(0);
1605 error = -EFSCORRUPTED;
1606 goto error0;
1607 }
1608 ASSERT(lev < cur->bc_nlevels);
1609
1610
1611
1612
1613
1614 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) {
1615 union xfs_btree_ptr *ptrp;
1616
1617 ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block);
1618 --lev;
1619 error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp);
1620 if (error)
1621 goto error0;
1622
1623 xfs_btree_setbuf(cur, lev, bp);
1624 cur->bc_ptrs[lev] = 1;
1625 }
1626out1:
1627 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1628 *stat = 1;
1629 return 0;
1630
1631out0:
1632 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1633 *stat = 0;
1634 return 0;
1635
1636error0:
1637 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
1638 return error;
1639}
1640
1641
1642
1643
1644
1645int
1646xfs_btree_decrement(
1647 struct xfs_btree_cur *cur,
1648 int level,
1649 int *stat)
1650{
1651 struct xfs_btree_block *block;
1652 xfs_buf_t *bp;
1653 int error;
1654 int lev;
1655 union xfs_btree_ptr ptr;
1656
1657 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
1658 XFS_BTREE_TRACE_ARGI(cur, level);
1659
1660 ASSERT(level < cur->bc_nlevels);
1661
1662
1663 xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA);
1664
1665
1666 if (--cur->bc_ptrs[level] > 0)
1667 goto out1;
1668
1669
1670 block = xfs_btree_get_block(cur, level, &bp);
1671
1672#ifdef DEBUG
1673 error = xfs_btree_check_block(cur, block, level, bp);
1674 if (error)
1675 goto error0;
1676#endif
1677
1678
1679 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB);
1680 if (xfs_btree_ptr_is_null(cur, &ptr))
1681 goto out0;
1682
1683 XFS_BTREE_STATS_INC(cur, decrement);
1684
1685
1686
1687
1688
1689 for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
1690 if (--cur->bc_ptrs[lev] > 0)
1691 break;
1692
1693 xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA);
1694 }
1695
1696
1697
1698
1699
1700 if (lev == cur->bc_nlevels) {
1701 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
1702 goto out0;
1703 ASSERT(0);
1704 error = -EFSCORRUPTED;
1705 goto error0;
1706 }
1707 ASSERT(lev < cur->bc_nlevels);
1708
1709
1710
1711
1712
1713 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) {
1714 union xfs_btree_ptr *ptrp;
1715
1716 ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block);
1717 --lev;
1718 error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp);
1719 if (error)
1720 goto error0;
1721 xfs_btree_setbuf(cur, lev, bp);
1722 cur->bc_ptrs[lev] = xfs_btree_get_numrecs(block);
1723 }
1724out1:
1725 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1726 *stat = 1;
1727 return 0;
1728
1729out0:
1730 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1731 *stat = 0;
1732 return 0;
1733
1734error0:
1735 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
1736 return error;
1737}
1738
1739STATIC int
1740xfs_btree_lookup_get_block(
1741 struct xfs_btree_cur *cur,
1742 int level,
1743 union xfs_btree_ptr *pp,
1744 struct xfs_btree_block **blkp)
1745{
1746 struct xfs_buf *bp;
1747 int error = 0;
1748
1749
1750 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
1751 (level == cur->bc_nlevels - 1)) {
1752 *blkp = xfs_btree_get_iroot(cur);
1753 return 0;
1754 }
1755
1756
1757
1758
1759
1760
1761
1762 bp = cur->bc_bufs[level];
1763 if (bp && XFS_BUF_ADDR(bp) == xfs_btree_ptr_to_daddr(cur, pp)) {
1764 *blkp = XFS_BUF_TO_BLOCK(bp);
1765 return 0;
1766 }
1767
1768 error = xfs_btree_read_buf_block(cur, pp, 0, blkp, &bp);
1769 if (error)
1770 return error;
1771
1772 xfs_btree_setbuf(cur, level, bp);
1773 return 0;
1774}
1775
1776
1777
1778
1779
1780
1781STATIC union xfs_btree_key *
1782xfs_lookup_get_search_key(
1783 struct xfs_btree_cur *cur,
1784 int level,
1785 int keyno,
1786 struct xfs_btree_block *block,
1787 union xfs_btree_key *kp)
1788{
1789 if (level == 0) {
1790 cur->bc_ops->init_key_from_rec(kp,
1791 xfs_btree_rec_addr(cur, keyno, block));
1792 return kp;
1793 }
1794
1795 return xfs_btree_key_addr(cur, keyno, block);
1796}
1797
1798
1799
1800
1801
1802int
1803xfs_btree_lookup(
1804 struct xfs_btree_cur *cur,
1805 xfs_lookup_t dir,
1806 int *stat)
1807{
1808 struct xfs_btree_block *block;
1809 __int64_t diff;
1810 int error;
1811 int keyno;
1812 int level;
1813 union xfs_btree_ptr *pp;
1814 union xfs_btree_ptr ptr;
1815
1816 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
1817 XFS_BTREE_TRACE_ARGI(cur, dir);
1818
1819 XFS_BTREE_STATS_INC(cur, lookup);
1820
1821
1822 if (cur->bc_nlevels == 0)
1823 return -EFSCORRUPTED;
1824
1825 block = NULL;
1826 keyno = 0;
1827
1828
1829 cur->bc_ops->init_ptr_from_cur(cur, &ptr);
1830 pp = &ptr;
1831
1832
1833
1834
1835
1836
1837
1838 for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) {
1839
1840 error = xfs_btree_lookup_get_block(cur, level, pp, &block);
1841 if (error)
1842 goto error0;
1843
1844 if (diff == 0) {
1845
1846
1847
1848
1849 keyno = 1;
1850 } else {
1851
1852
1853 int high;
1854 int low;
1855
1856
1857 low = 1;
1858 high = xfs_btree_get_numrecs(block);
1859 if (!high) {
1860
1861 ASSERT(level == 0 && cur->bc_nlevels == 1);
1862
1863 cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE;
1864 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1865 *stat = 0;
1866 return 0;
1867 }
1868
1869
1870 while (low <= high) {
1871 union xfs_btree_key key;
1872 union xfs_btree_key *kp;
1873
1874 XFS_BTREE_STATS_INC(cur, compare);
1875
1876
1877 keyno = (low + high) >> 1;
1878
1879
1880 kp = xfs_lookup_get_search_key(cur, level,
1881 keyno, block, &key);
1882
1883
1884
1885
1886
1887
1888
1889 diff = cur->bc_ops->key_diff(cur, kp);
1890 if (diff < 0)
1891 low = keyno + 1;
1892 else if (diff > 0)
1893 high = keyno - 1;
1894 else
1895 break;
1896 }
1897 }
1898
1899
1900
1901
1902
1903 if (level > 0) {
1904
1905
1906
1907
1908 if (diff > 0 && --keyno < 1)
1909 keyno = 1;
1910 pp = xfs_btree_ptr_addr(cur, keyno, block);
1911
1912#ifdef DEBUG
1913 error = xfs_btree_check_ptr(cur, pp, 0, level);
1914 if (error)
1915 goto error0;
1916#endif
1917 cur->bc_ptrs[level] = keyno;
1918 }
1919 }
1920
1921
1922 if (dir != XFS_LOOKUP_LE && diff < 0) {
1923 keyno++;
1924
1925
1926
1927
1928 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
1929 if (dir == XFS_LOOKUP_GE &&
1930 keyno > xfs_btree_get_numrecs(block) &&
1931 !xfs_btree_ptr_is_null(cur, &ptr)) {
1932 int i;
1933
1934 cur->bc_ptrs[0] = keyno;
1935 error = xfs_btree_increment(cur, 0, &i);
1936 if (error)
1937 goto error0;
1938 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
1939 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1940 *stat = 1;
1941 return 0;
1942 }
1943 } else if (dir == XFS_LOOKUP_LE && diff > 0)
1944 keyno--;
1945 cur->bc_ptrs[0] = keyno;
1946
1947
1948 if (keyno == 0 || keyno > xfs_btree_get_numrecs(block))
1949 *stat = 0;
1950 else if (dir != XFS_LOOKUP_EQ || diff == 0)
1951 *stat = 1;
1952 else
1953 *stat = 0;
1954 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1955 return 0;
1956
1957error0:
1958 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
1959 return error;
1960}
1961
1962
1963STATIC union xfs_btree_key *
1964xfs_btree_high_key_from_key(
1965 struct xfs_btree_cur *cur,
1966 union xfs_btree_key *key)
1967{
1968 ASSERT(cur->bc_flags & XFS_BTREE_OVERLAPPING);
1969 return (union xfs_btree_key *)((char *)key +
1970 (cur->bc_ops->key_len / 2));
1971}
1972
1973
1974STATIC void
1975xfs_btree_get_leaf_keys(
1976 struct xfs_btree_cur *cur,
1977 struct xfs_btree_block *block,
1978 union xfs_btree_key *key)
1979{
1980 union xfs_btree_key max_hkey;
1981 union xfs_btree_key hkey;
1982 union xfs_btree_rec *rec;
1983 union xfs_btree_key *high;
1984 int n;
1985
1986 rec = xfs_btree_rec_addr(cur, 1, block);
1987 cur->bc_ops->init_key_from_rec(key, rec);
1988
1989 if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
1990
1991 cur->bc_ops->init_high_key_from_rec(&max_hkey, rec);
1992 for (n = 2; n <= xfs_btree_get_numrecs(block); n++) {
1993 rec = xfs_btree_rec_addr(cur, n, block);
1994 cur->bc_ops->init_high_key_from_rec(&hkey, rec);
1995 if (cur->bc_ops->diff_two_keys(cur, &hkey, &max_hkey)
1996 > 0)
1997 max_hkey = hkey;
1998 }
1999
2000 high = xfs_btree_high_key_from_key(cur, key);
2001 memcpy(high, &max_hkey, cur->bc_ops->key_len / 2);
2002 }
2003}
2004
2005
2006STATIC void
2007xfs_btree_get_node_keys(
2008 struct xfs_btree_cur *cur,
2009 struct xfs_btree_block *block,
2010 union xfs_btree_key *key)
2011{
2012 union xfs_btree_key *hkey;
2013 union xfs_btree_key *max_hkey;
2014 union xfs_btree_key *high;
2015 int n;
2016
2017 if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
2018 memcpy(key, xfs_btree_key_addr(cur, 1, block),
2019 cur->bc_ops->key_len / 2);
2020
2021 max_hkey = xfs_btree_high_key_addr(cur, 1, block);
2022 for (n = 2; n <= xfs_btree_get_numrecs(block); n++) {
2023 hkey = xfs_btree_high_key_addr(cur, n, block);
2024 if (cur->bc_ops->diff_two_keys(cur, hkey, max_hkey) > 0)
2025 max_hkey = hkey;
2026 }
2027
2028 high = xfs_btree_high_key_from_key(cur, key);
2029 memcpy(high, max_hkey, cur->bc_ops->key_len / 2);
2030 } else {
2031 memcpy(key, xfs_btree_key_addr(cur, 1, block),
2032 cur->bc_ops->key_len);
2033 }
2034}
2035
2036
2037STATIC void
2038xfs_btree_get_keys(
2039 struct xfs_btree_cur *cur,
2040 struct xfs_btree_block *block,
2041 union xfs_btree_key *key)
2042{
2043 if (be16_to_cpu(block->bb_level) == 0)
2044 xfs_btree_get_leaf_keys(cur, block, key);
2045 else
2046 xfs_btree_get_node_keys(cur, block, key);
2047}
2048
2049
2050
2051
2052
2053
2054
2055
2056static inline bool
2057xfs_btree_needs_key_update(
2058 struct xfs_btree_cur *cur,
2059 int ptr)
2060{
2061 return (cur->bc_flags & XFS_BTREE_OVERLAPPING) || ptr == 1;
2062}
2063
2064
2065
2066
2067
2068
2069STATIC int
2070__xfs_btree_updkeys(
2071 struct xfs_btree_cur *cur,
2072 int level,
2073 struct xfs_btree_block *block,
2074 struct xfs_buf *bp0,
2075 bool force_all)
2076{
2077 union xfs_btree_key key;
2078 union xfs_btree_key *lkey;
2079 union xfs_btree_key *hkey;
2080 union xfs_btree_key *nlkey;
2081 union xfs_btree_key *nhkey;
2082 struct xfs_buf *bp;
2083 int ptr;
2084
2085 ASSERT(cur->bc_flags & XFS_BTREE_OVERLAPPING);
2086
2087
2088 if (level + 1 >= cur->bc_nlevels)
2089 return 0;
2090
2091 trace_xfs_btree_updkeys(cur, level, bp0);
2092
2093 lkey = &key;
2094 hkey = xfs_btree_high_key_from_key(cur, lkey);
2095 xfs_btree_get_keys(cur, block, lkey);
2096 for (level++; level < cur->bc_nlevels; level++) {
2097#ifdef DEBUG
2098 int error;
2099#endif
2100 block = xfs_btree_get_block(cur, level, &bp);
2101 trace_xfs_btree_updkeys(cur, level, bp);
2102#ifdef DEBUG
2103 error = xfs_btree_check_block(cur, block, level, bp);
2104 if (error) {
2105 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
2106 return error;
2107 }
2108#endif
2109 ptr = cur->bc_ptrs[level];
2110 nlkey = xfs_btree_key_addr(cur, ptr, block);
2111 nhkey = xfs_btree_high_key_addr(cur, ptr, block);
2112 if (!force_all &&
2113 !(cur->bc_ops->diff_two_keys(cur, nlkey, lkey) != 0 ||
2114 cur->bc_ops->diff_two_keys(cur, nhkey, hkey) != 0))
2115 break;
2116 xfs_btree_copy_keys(cur, nlkey, lkey, 1);
2117 xfs_btree_log_keys(cur, bp, ptr, ptr);
2118 if (level + 1 >= cur->bc_nlevels)
2119 break;
2120 xfs_btree_get_node_keys(cur, block, lkey);
2121 }
2122
2123 return 0;
2124}
2125
2126
2127STATIC int
2128xfs_btree_updkeys_force(
2129 struct xfs_btree_cur *cur,
2130 int level)
2131{
2132 struct xfs_buf *bp;
2133 struct xfs_btree_block *block;
2134
2135 block = xfs_btree_get_block(cur, level, &bp);
2136 return __xfs_btree_updkeys(cur, level, block, bp, true);
2137}
2138
2139
2140
2141
2142STATIC int
2143xfs_btree_update_keys(
2144 struct xfs_btree_cur *cur,
2145 int level)
2146{
2147 struct xfs_btree_block *block;
2148 struct xfs_buf *bp;
2149 union xfs_btree_key *kp;
2150 union xfs_btree_key key;
2151 int ptr;
2152
2153 ASSERT(level >= 0);
2154
2155 block = xfs_btree_get_block(cur, level, &bp);
2156 if (cur->bc_flags & XFS_BTREE_OVERLAPPING)
2157 return __xfs_btree_updkeys(cur, level, block, bp, false);
2158
2159 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
2160 XFS_BTREE_TRACE_ARGIK(cur, level, keyp);
2161
2162
2163
2164
2165
2166
2167
2168 xfs_btree_get_keys(cur, block, &key);
2169 for (level++, ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) {
2170#ifdef DEBUG
2171 int error;
2172#endif
2173 block = xfs_btree_get_block(cur, level, &bp);
2174#ifdef DEBUG
2175 error = xfs_btree_check_block(cur, block, level, bp);
2176 if (error) {
2177 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
2178 return error;
2179 }
2180#endif
2181 ptr = cur->bc_ptrs[level];
2182 kp = xfs_btree_key_addr(cur, ptr, block);
2183 xfs_btree_copy_keys(cur, kp, &key, 1);
2184 xfs_btree_log_keys(cur, bp, ptr, ptr);
2185 }
2186
2187 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2188 return 0;
2189}
2190
2191
2192
2193
2194
2195
2196int
2197xfs_btree_update(
2198 struct xfs_btree_cur *cur,
2199 union xfs_btree_rec *rec)
2200{
2201 struct xfs_btree_block *block;
2202 struct xfs_buf *bp;
2203 int error;
2204 int ptr;
2205 union xfs_btree_rec *rp;
2206
2207 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
2208 XFS_BTREE_TRACE_ARGR(cur, rec);
2209
2210
2211 block = xfs_btree_get_block(cur, 0, &bp);
2212
2213#ifdef DEBUG
2214 error = xfs_btree_check_block(cur, block, 0, bp);
2215 if (error)
2216 goto error0;
2217#endif
2218
2219 ptr = cur->bc_ptrs[0];
2220 rp = xfs_btree_rec_addr(cur, ptr, block);
2221
2222
2223 xfs_btree_copy_recs(cur, rp, rec, 1);
2224 xfs_btree_log_recs(cur, bp, ptr, ptr);
2225
2226
2227
2228
2229
2230 if (xfs_btree_is_lastrec(cur, block, 0)) {
2231 cur->bc_ops->update_lastrec(cur, block, rec,
2232 ptr, LASTREC_UPDATE);
2233 }
2234
2235
2236 if (xfs_btree_needs_key_update(cur, ptr)) {
2237 error = xfs_btree_update_keys(cur, 0);
2238 if (error)
2239 goto error0;
2240 }
2241
2242 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2243 return 0;
2244
2245error0:
2246 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
2247 return error;
2248}
2249
2250
2251
2252
2253
2254STATIC int
2255xfs_btree_lshift(
2256 struct xfs_btree_cur *cur,
2257 int level,
2258 int *stat)
2259{
2260 struct xfs_buf *lbp;
2261 struct xfs_btree_block *left;
2262 int lrecs;
2263 struct xfs_buf *rbp;
2264 struct xfs_btree_block *right;
2265 struct xfs_btree_cur *tcur;
2266 int rrecs;
2267 union xfs_btree_ptr lptr;
2268 union xfs_btree_key *rkp = NULL;
2269 union xfs_btree_ptr *rpp = NULL;
2270 union xfs_btree_rec *rrp = NULL;
2271 int error;
2272 int i;
2273
2274 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
2275 XFS_BTREE_TRACE_ARGI(cur, level);
2276
2277 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
2278 level == cur->bc_nlevels - 1)
2279 goto out0;
2280
2281
2282 right = xfs_btree_get_block(cur, level, &rbp);
2283
2284#ifdef DEBUG
2285 error = xfs_btree_check_block(cur, right, level, rbp);
2286 if (error)
2287 goto error0;
2288#endif
2289
2290
2291 xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
2292 if (xfs_btree_ptr_is_null(cur, &lptr))
2293 goto out0;
2294
2295
2296
2297
2298
2299 if (cur->bc_ptrs[level] <= 1)
2300 goto out0;
2301
2302
2303 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
2304 if (error)
2305 goto error0;
2306
2307
2308 lrecs = xfs_btree_get_numrecs(left);
2309 if (lrecs == cur->bc_ops->get_maxrecs(cur, level))
2310 goto out0;
2311
2312 rrecs = xfs_btree_get_numrecs(right);
2313
2314
2315
2316
2317
2318
2319 lrecs++;
2320 rrecs--;
2321
2322 XFS_BTREE_STATS_INC(cur, lshift);
2323 XFS_BTREE_STATS_ADD(cur, moves, 1);
2324
2325
2326
2327
2328
2329 if (level > 0) {
2330
2331 union xfs_btree_key *lkp;
2332 union xfs_btree_ptr *lpp;
2333
2334 lkp = xfs_btree_key_addr(cur, lrecs, left);
2335 rkp = xfs_btree_key_addr(cur, 1, right);
2336
2337 lpp = xfs_btree_ptr_addr(cur, lrecs, left);
2338 rpp = xfs_btree_ptr_addr(cur, 1, right);
2339#ifdef DEBUG
2340 error = xfs_btree_check_ptr(cur, rpp, 0, level);
2341 if (error)
2342 goto error0;
2343#endif
2344 xfs_btree_copy_keys(cur, lkp, rkp, 1);
2345 xfs_btree_copy_ptrs(cur, lpp, rpp, 1);
2346
2347 xfs_btree_log_keys(cur, lbp, lrecs, lrecs);
2348 xfs_btree_log_ptrs(cur, lbp, lrecs, lrecs);
2349
2350 ASSERT(cur->bc_ops->keys_inorder(cur,
2351 xfs_btree_key_addr(cur, lrecs - 1, left), lkp));
2352 } else {
2353
2354 union xfs_btree_rec *lrp;
2355
2356 lrp = xfs_btree_rec_addr(cur, lrecs, left);
2357 rrp = xfs_btree_rec_addr(cur, 1, right);
2358
2359 xfs_btree_copy_recs(cur, lrp, rrp, 1);
2360 xfs_btree_log_recs(cur, lbp, lrecs, lrecs);
2361
2362 ASSERT(cur->bc_ops->recs_inorder(cur,
2363 xfs_btree_rec_addr(cur, lrecs - 1, left), lrp));
2364 }
2365
2366 xfs_btree_set_numrecs(left, lrecs);
2367 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS);
2368
2369 xfs_btree_set_numrecs(right, rrecs);
2370 xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS);
2371
2372
2373
2374
2375 XFS_BTREE_STATS_ADD(cur, moves, rrecs - 1);
2376 if (level > 0) {
2377
2378#ifdef DEBUG
2379 int i;
2380
2381 for (i = 0; i < rrecs; i++) {
2382 error = xfs_btree_check_ptr(cur, rpp, i + 1, level);
2383 if (error)
2384 goto error0;
2385 }
2386#endif
2387 xfs_btree_shift_keys(cur,
2388 xfs_btree_key_addr(cur, 2, right),
2389 -1, rrecs);
2390 xfs_btree_shift_ptrs(cur,
2391 xfs_btree_ptr_addr(cur, 2, right),
2392 -1, rrecs);
2393
2394 xfs_btree_log_keys(cur, rbp, 1, rrecs);
2395 xfs_btree_log_ptrs(cur, rbp, 1, rrecs);
2396 } else {
2397
2398 xfs_btree_shift_recs(cur,
2399 xfs_btree_rec_addr(cur, 2, right),
2400 -1, rrecs);
2401 xfs_btree_log_recs(cur, rbp, 1, rrecs);
2402 }
2403
2404
2405
2406
2407
2408 if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
2409 error = xfs_btree_dup_cursor(cur, &tcur);
2410 if (error)
2411 goto error0;
2412 i = xfs_btree_firstrec(tcur, level);
2413 XFS_WANT_CORRUPTED_GOTO(tcur->bc_mp, i == 1, error0);
2414
2415 error = xfs_btree_decrement(tcur, level, &i);
2416 if (error)
2417 goto error1;
2418
2419
2420 error = xfs_btree_update_keys(tcur, level);
2421 if (error)
2422 goto error1;
2423
2424 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
2425 }
2426
2427
2428 error = xfs_btree_update_keys(cur, level);
2429 if (error)
2430 goto error0;
2431
2432
2433 cur->bc_ptrs[level]--;
2434
2435 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2436 *stat = 1;
2437 return 0;
2438
2439out0:
2440 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2441 *stat = 0;
2442 return 0;
2443
2444error0:
2445 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
2446 return error;
2447
2448error1:
2449 XFS_BTREE_TRACE_CURSOR(tcur, XBT_ERROR);
2450 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
2451 return error;
2452}
2453
2454
2455
2456
2457
2458STATIC int
2459xfs_btree_rshift(
2460 struct xfs_btree_cur *cur,
2461 int level,
2462 int *stat)
2463{
2464 struct xfs_buf *lbp;
2465 struct xfs_btree_block *left;
2466 struct xfs_buf *rbp;
2467 struct xfs_btree_block *right;
2468 struct xfs_btree_cur *tcur;
2469 union xfs_btree_ptr rptr;
2470 union xfs_btree_key *rkp;
2471 int rrecs;
2472 int lrecs;
2473 int error;
2474 int i;
2475
2476 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
2477 XFS_BTREE_TRACE_ARGI(cur, level);
2478
2479 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
2480 (level == cur->bc_nlevels - 1))
2481 goto out0;
2482
2483
2484 left = xfs_btree_get_block(cur, level, &lbp);
2485
2486#ifdef DEBUG
2487 error = xfs_btree_check_block(cur, left, level, lbp);
2488 if (error)
2489 goto error0;
2490#endif
2491
2492
2493 xfs_btree_get_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB);
2494 if (xfs_btree_ptr_is_null(cur, &rptr))
2495 goto out0;
2496
2497
2498
2499
2500
2501 lrecs = xfs_btree_get_numrecs(left);
2502 if (cur->bc_ptrs[level] >= lrecs)
2503 goto out0;
2504
2505
2506 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
2507 if (error)
2508 goto error0;
2509
2510
2511 rrecs = xfs_btree_get_numrecs(right);
2512 if (rrecs == cur->bc_ops->get_maxrecs(cur, level))
2513 goto out0;
2514
2515 XFS_BTREE_STATS_INC(cur, rshift);
2516 XFS_BTREE_STATS_ADD(cur, moves, rrecs);
2517
2518
2519
2520
2521
2522 if (level > 0) {
2523
2524 union xfs_btree_key *lkp;
2525 union xfs_btree_ptr *lpp;
2526 union xfs_btree_ptr *rpp;
2527
2528 lkp = xfs_btree_key_addr(cur, lrecs, left);
2529 lpp = xfs_btree_ptr_addr(cur, lrecs, left);
2530 rkp = xfs_btree_key_addr(cur, 1, right);
2531 rpp = xfs_btree_ptr_addr(cur, 1, right);
2532
2533#ifdef DEBUG
2534 for (i = rrecs - 1; i >= 0; i--) {
2535 error = xfs_btree_check_ptr(cur, rpp, i, level);
2536 if (error)
2537 goto error0;
2538 }
2539#endif
2540
2541 xfs_btree_shift_keys(cur, rkp, 1, rrecs);
2542 xfs_btree_shift_ptrs(cur, rpp, 1, rrecs);
2543
2544#ifdef DEBUG
2545 error = xfs_btree_check_ptr(cur, lpp, 0, level);
2546 if (error)
2547 goto error0;
2548#endif
2549
2550
2551 xfs_btree_copy_keys(cur, rkp, lkp, 1);
2552 xfs_btree_copy_ptrs(cur, rpp, lpp, 1);
2553
2554 xfs_btree_log_keys(cur, rbp, 1, rrecs + 1);
2555 xfs_btree_log_ptrs(cur, rbp, 1, rrecs + 1);
2556
2557 ASSERT(cur->bc_ops->keys_inorder(cur, rkp,
2558 xfs_btree_key_addr(cur, 2, right)));
2559 } else {
2560
2561 union xfs_btree_rec *lrp;
2562 union xfs_btree_rec *rrp;
2563
2564 lrp = xfs_btree_rec_addr(cur, lrecs, left);
2565 rrp = xfs_btree_rec_addr(cur, 1, right);
2566
2567 xfs_btree_shift_recs(cur, rrp, 1, rrecs);
2568
2569
2570 xfs_btree_copy_recs(cur, rrp, lrp, 1);
2571 xfs_btree_log_recs(cur, rbp, 1, rrecs + 1);
2572 }
2573
2574
2575
2576
2577 xfs_btree_set_numrecs(left, --lrecs);
2578 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS);
2579
2580 xfs_btree_set_numrecs(right, ++rrecs);
2581 xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS);
2582
2583
2584
2585
2586
2587 error = xfs_btree_dup_cursor(cur, &tcur);
2588 if (error)
2589 goto error0;
2590 i = xfs_btree_lastrec(tcur, level);
2591 XFS_WANT_CORRUPTED_GOTO(tcur->bc_mp, i == 1, error0);
2592
2593 error = xfs_btree_increment(tcur, level, &i);
2594 if (error)
2595 goto error1;
2596
2597
2598 if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
2599 error = xfs_btree_update_keys(cur, level);
2600 if (error)
2601 goto error1;
2602 }
2603
2604
2605 error = xfs_btree_update_keys(tcur, level);
2606 if (error)
2607 goto error1;
2608
2609 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
2610
2611 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2612 *stat = 1;
2613 return 0;
2614
2615out0:
2616 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2617 *stat = 0;
2618 return 0;
2619
2620error0:
2621 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
2622 return error;
2623
2624error1:
2625 XFS_BTREE_TRACE_CURSOR(tcur, XBT_ERROR);
2626 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
2627 return error;
2628}
2629
2630
2631
2632
2633
2634
2635STATIC int
2636__xfs_btree_split(
2637 struct xfs_btree_cur *cur,
2638 int level,
2639 union xfs_btree_ptr *ptrp,
2640 union xfs_btree_key *key,
2641 struct xfs_btree_cur **curp,
2642 int *stat)
2643{
2644 union xfs_btree_ptr lptr;
2645 struct xfs_buf *lbp;
2646 struct xfs_btree_block *left;
2647 union xfs_btree_ptr rptr;
2648 struct xfs_buf *rbp;
2649 struct xfs_btree_block *right;
2650 union xfs_btree_ptr rrptr;
2651 struct xfs_buf *rrbp;
2652 struct xfs_btree_block *rrblock;
2653 int lrecs;
2654 int rrecs;
2655 int src_index;
2656 int error;
2657#ifdef DEBUG
2658 int i;
2659#endif
2660
2661 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
2662 XFS_BTREE_TRACE_ARGIPK(cur, level, *ptrp, key);
2663
2664 XFS_BTREE_STATS_INC(cur, split);
2665
2666
2667 left = xfs_btree_get_block(cur, level, &lbp);
2668
2669#ifdef DEBUG
2670 error = xfs_btree_check_block(cur, left, level, lbp);
2671 if (error)
2672 goto error0;
2673#endif
2674
2675 xfs_btree_buf_to_ptr(cur, lbp, &lptr);
2676
2677
2678 error = cur->bc_ops->alloc_block(cur, &lptr, &rptr, stat);
2679 if (error)
2680 goto error0;
2681 if (*stat == 0)
2682 goto out0;
2683 XFS_BTREE_STATS_INC(cur, alloc);
2684
2685
2686 error = xfs_btree_get_buf_block(cur, &rptr, 0, &right, &rbp);
2687 if (error)
2688 goto error0;
2689
2690
2691 xfs_btree_init_block_cur(cur, rbp, xfs_btree_get_level(left), 0);
2692
2693
2694
2695
2696
2697
2698 lrecs = xfs_btree_get_numrecs(left);
2699 rrecs = lrecs / 2;
2700 if ((lrecs & 1) && cur->bc_ptrs[level] <= rrecs + 1)
2701 rrecs++;
2702 src_index = (lrecs - rrecs + 1);
2703
2704 XFS_BTREE_STATS_ADD(cur, moves, rrecs);
2705
2706
2707 lrecs -= rrecs;
2708 xfs_btree_set_numrecs(left, lrecs);
2709 xfs_btree_set_numrecs(right, xfs_btree_get_numrecs(right) + rrecs);
2710
2711
2712
2713
2714
2715
2716 if (level > 0) {
2717
2718 union xfs_btree_key *lkp;
2719 union xfs_btree_ptr *lpp;
2720 union xfs_btree_key *rkp;
2721 union xfs_btree_ptr *rpp;
2722
2723 lkp = xfs_btree_key_addr(cur, src_index, left);
2724 lpp = xfs_btree_ptr_addr(cur, src_index, left);
2725 rkp = xfs_btree_key_addr(cur, 1, right);
2726 rpp = xfs_btree_ptr_addr(cur, 1, right);
2727
2728#ifdef DEBUG
2729 for (i = src_index; i < rrecs; i++) {
2730 error = xfs_btree_check_ptr(cur, lpp, i, level);
2731 if (error)
2732 goto error0;
2733 }
2734#endif
2735
2736
2737 xfs_btree_copy_keys(cur, rkp, lkp, rrecs);
2738 xfs_btree_copy_ptrs(cur, rpp, lpp, rrecs);
2739
2740 xfs_btree_log_keys(cur, rbp, 1, rrecs);
2741 xfs_btree_log_ptrs(cur, rbp, 1, rrecs);
2742
2743
2744 xfs_btree_get_node_keys(cur, right, key);
2745 } else {
2746
2747 union xfs_btree_rec *lrp;
2748 union xfs_btree_rec *rrp;
2749
2750 lrp = xfs_btree_rec_addr(cur, src_index, left);
2751 rrp = xfs_btree_rec_addr(cur, 1, right);
2752
2753
2754 xfs_btree_copy_recs(cur, rrp, lrp, rrecs);
2755 xfs_btree_log_recs(cur, rbp, 1, rrecs);
2756
2757
2758 xfs_btree_get_leaf_keys(cur, right, key);
2759 }
2760
2761
2762
2763
2764
2765 xfs_btree_get_sibling(cur, left, &rrptr, XFS_BB_RIGHTSIB);
2766 xfs_btree_set_sibling(cur, right, &rrptr, XFS_BB_RIGHTSIB);
2767 xfs_btree_set_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
2768 xfs_btree_set_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB);
2769
2770 xfs_btree_log_block(cur, rbp, XFS_BB_ALL_BITS);
2771 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
2772
2773
2774
2775
2776
2777 if (!xfs_btree_ptr_is_null(cur, &rrptr)) {
2778 error = xfs_btree_read_buf_block(cur, &rrptr,
2779 0, &rrblock, &rrbp);
2780 if (error)
2781 goto error0;
2782 xfs_btree_set_sibling(cur, rrblock, &rptr, XFS_BB_LEFTSIB);
2783 xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB);
2784 }
2785
2786
2787 if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
2788 error = xfs_btree_update_keys(cur, level);
2789 if (error)
2790 goto error0;
2791 }
2792
2793
2794
2795
2796
2797
2798 if (cur->bc_ptrs[level] > lrecs + 1) {
2799 xfs_btree_setbuf(cur, level, rbp);
2800 cur->bc_ptrs[level] -= lrecs;
2801 }
2802
2803
2804
2805
2806 if (level + 1 < cur->bc_nlevels) {
2807 error = xfs_btree_dup_cursor(cur, curp);
2808 if (error)
2809 goto error0;
2810 (*curp)->bc_ptrs[level + 1]++;
2811 }
2812 *ptrp = rptr;
2813 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2814 *stat = 1;
2815 return 0;
2816out0:
2817 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2818 *stat = 0;
2819 return 0;
2820
2821error0:
2822 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
2823 return error;
2824}
2825
2826struct xfs_btree_split_args {
2827 struct xfs_btree_cur *cur;
2828 int level;
2829 union xfs_btree_ptr *ptrp;
2830 union xfs_btree_key *key;
2831 struct xfs_btree_cur **curp;
2832 int *stat;
2833 int result;
2834 bool kswapd;
2835 struct completion *done;
2836 struct work_struct work;
2837};
2838
2839
2840
2841
2842static void
2843xfs_btree_split_worker(
2844 struct work_struct *work)
2845{
2846 struct xfs_btree_split_args *args = container_of(work,
2847 struct xfs_btree_split_args, work);
2848 unsigned long pflags;
2849 unsigned long new_pflags = PF_FSTRANS;
2850
2851
2852
2853
2854
2855
2856
2857 if (args->kswapd)
2858 new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
2859
2860 current_set_flags_nested(&pflags, new_pflags);
2861
2862 args->result = __xfs_btree_split(args->cur, args->level, args->ptrp,
2863 args->key, args->curp, args->stat);
2864 complete(args->done);
2865
2866 current_restore_flags_nested(&pflags, new_pflags);
2867}
2868
2869
2870
2871
2872
2873
2874STATIC int
2875xfs_btree_split(
2876 struct xfs_btree_cur *cur,
2877 int level,
2878 union xfs_btree_ptr *ptrp,
2879 union xfs_btree_key *key,
2880 struct xfs_btree_cur **curp,
2881 int *stat)
2882{
2883 struct xfs_btree_split_args args;
2884 DECLARE_COMPLETION_ONSTACK(done);
2885
2886 if (cur->bc_btnum != XFS_BTNUM_BMAP)
2887 return __xfs_btree_split(cur, level, ptrp, key, curp, stat);
2888
2889 args.cur = cur;
2890 args.level = level;
2891 args.ptrp = ptrp;
2892 args.key = key;
2893 args.curp = curp;
2894 args.stat = stat;
2895 args.done = &done;
2896 args.kswapd = current_is_kswapd();
2897 INIT_WORK_ONSTACK(&args.work, xfs_btree_split_worker);
2898 queue_work(xfs_alloc_wq, &args.work);
2899 wait_for_completion(&done);
2900 destroy_work_on_stack(&args.work);
2901 return args.result;
2902}
2903
2904
2905
2906
2907
2908
2909int
2910xfs_btree_new_iroot(
2911 struct xfs_btree_cur *cur,
2912 int *logflags,
2913 int *stat)
2914{
2915 struct xfs_buf *cbp;
2916 struct xfs_btree_block *block;
2917 struct xfs_btree_block *cblock;
2918 union xfs_btree_key *ckp;
2919 union xfs_btree_ptr *cpp;
2920 union xfs_btree_key *kp;
2921 union xfs_btree_ptr *pp;
2922 union xfs_btree_ptr nptr;
2923 int level;
2924 int error;
2925#ifdef DEBUG
2926 int i;
2927#endif
2928
2929 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
2930 XFS_BTREE_STATS_INC(cur, newroot);
2931
2932 ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
2933
2934 level = cur->bc_nlevels - 1;
2935
2936 block = xfs_btree_get_iroot(cur);
2937 pp = xfs_btree_ptr_addr(cur, 1, block);
2938
2939
2940 error = cur->bc_ops->alloc_block(cur, pp, &nptr, stat);
2941 if (error)
2942 goto error0;
2943 if (*stat == 0) {
2944 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2945 return 0;
2946 }
2947 XFS_BTREE_STATS_INC(cur, alloc);
2948
2949
2950 error = xfs_btree_get_buf_block(cur, &nptr, 0, &cblock, &cbp);
2951 if (error)
2952 goto error0;
2953
2954
2955
2956
2957
2958 memcpy(cblock, block, xfs_btree_block_len(cur));
2959 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) {
2960 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
2961 cblock->bb_u.l.bb_blkno = cpu_to_be64(cbp->b_bn);
2962 else
2963 cblock->bb_u.s.bb_blkno = cpu_to_be64(cbp->b_bn);
2964 }
2965
2966 be16_add_cpu(&block->bb_level, 1);
2967 xfs_btree_set_numrecs(block, 1);
2968 cur->bc_nlevels++;
2969 cur->bc_ptrs[level + 1] = 1;
2970
2971 kp = xfs_btree_key_addr(cur, 1, block);
2972 ckp = xfs_btree_key_addr(cur, 1, cblock);
2973 xfs_btree_copy_keys(cur, ckp, kp, xfs_btree_get_numrecs(cblock));
2974
2975 cpp = xfs_btree_ptr_addr(cur, 1, cblock);
2976#ifdef DEBUG
2977 for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) {
2978 error = xfs_btree_check_ptr(cur, pp, i, level);
2979 if (error)
2980 goto error0;
2981 }
2982#endif
2983 xfs_btree_copy_ptrs(cur, cpp, pp, xfs_btree_get_numrecs(cblock));
2984
2985#ifdef DEBUG
2986 error = xfs_btree_check_ptr(cur, &nptr, 0, level);
2987 if (error)
2988 goto error0;
2989#endif
2990 xfs_btree_copy_ptrs(cur, pp, &nptr, 1);
2991
2992 xfs_iroot_realloc(cur->bc_private.b.ip,
2993 1 - xfs_btree_get_numrecs(cblock),
2994 cur->bc_private.b.whichfork);
2995
2996 xfs_btree_setbuf(cur, level, cbp);
2997
2998
2999
3000
3001
3002 xfs_btree_log_block(cur, cbp, XFS_BB_ALL_BITS);
3003 xfs_btree_log_keys(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs));
3004 xfs_btree_log_ptrs(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs));
3005
3006 *logflags |=
3007 XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork);
3008 *stat = 1;
3009 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3010 return 0;
3011error0:
3012 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
3013 return error;
3014}
3015
3016
3017
3018
3019STATIC int
3020xfs_btree_new_root(
3021 struct xfs_btree_cur *cur,
3022 int *stat)
3023{
3024 struct xfs_btree_block *block;
3025 struct xfs_buf *bp;
3026 int error;
3027 struct xfs_buf *lbp;
3028 struct xfs_btree_block *left;
3029 struct xfs_buf *nbp;
3030 struct xfs_btree_block *new;
3031 int nptr;
3032 struct xfs_buf *rbp;
3033 struct xfs_btree_block *right;
3034 union xfs_btree_ptr rptr;
3035 union xfs_btree_ptr lptr;
3036
3037 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
3038 XFS_BTREE_STATS_INC(cur, newroot);
3039
3040
3041 cur->bc_ops->init_ptr_from_cur(cur, &rptr);
3042
3043
3044 error = cur->bc_ops->alloc_block(cur, &rptr, &lptr, stat);
3045 if (error)
3046 goto error0;
3047 if (*stat == 0)
3048 goto out0;
3049 XFS_BTREE_STATS_INC(cur, alloc);
3050
3051
3052 error = xfs_btree_get_buf_block(cur, &lptr, 0, &new, &nbp);
3053 if (error)
3054 goto error0;
3055
3056
3057 cur->bc_ops->set_root(cur, &lptr, 1);
3058
3059
3060
3061
3062
3063
3064
3065 block = xfs_btree_get_block(cur, cur->bc_nlevels - 1, &bp);
3066
3067#ifdef DEBUG
3068 error = xfs_btree_check_block(cur, block, cur->bc_nlevels - 1, bp);
3069 if (error)
3070 goto error0;
3071#endif
3072
3073 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
3074 if (!xfs_btree_ptr_is_null(cur, &rptr)) {
3075
3076 lbp = bp;
3077 xfs_btree_buf_to_ptr(cur, lbp, &lptr);
3078 left = block;
3079 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
3080 if (error)
3081 goto error0;
3082 bp = rbp;
3083 nptr = 1;
3084 } else {
3085
3086 rbp = bp;
3087 xfs_btree_buf_to_ptr(cur, rbp, &rptr);
3088 right = block;
3089 xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
3090 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
3091 if (error)
3092 goto error0;
3093 bp = lbp;
3094 nptr = 2;
3095 }
3096
3097
3098 xfs_btree_init_block_cur(cur, nbp, cur->bc_nlevels, 2);
3099 xfs_btree_log_block(cur, nbp, XFS_BB_ALL_BITS);
3100 ASSERT(!xfs_btree_ptr_is_null(cur, &lptr) &&
3101 !xfs_btree_ptr_is_null(cur, &rptr));
3102
3103
3104 if (xfs_btree_get_level(left) > 0) {
3105
3106
3107
3108
3109 xfs_btree_get_node_keys(cur, left,
3110 xfs_btree_key_addr(cur, 1, new));
3111 xfs_btree_get_node_keys(cur, right,
3112 xfs_btree_key_addr(cur, 2, new));
3113 } else {
3114
3115
3116
3117
3118
3119 xfs_btree_get_leaf_keys(cur, left,
3120 xfs_btree_key_addr(cur, 1, new));
3121 xfs_btree_get_leaf_keys(cur, right,
3122 xfs_btree_key_addr(cur, 2, new));
3123 }
3124 xfs_btree_log_keys(cur, nbp, 1, 2);
3125
3126
3127 xfs_btree_copy_ptrs(cur,
3128 xfs_btree_ptr_addr(cur, 1, new), &lptr, 1);
3129 xfs_btree_copy_ptrs(cur,
3130 xfs_btree_ptr_addr(cur, 2, new), &rptr, 1);
3131 xfs_btree_log_ptrs(cur, nbp, 1, 2);
3132
3133
3134 xfs_btree_setbuf(cur, cur->bc_nlevels, nbp);
3135 cur->bc_ptrs[cur->bc_nlevels] = nptr;
3136 cur->bc_nlevels++;
3137 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3138 *stat = 1;
3139 return 0;
3140error0:
3141 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
3142 return error;
3143out0:
3144 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3145 *stat = 0;
3146 return 0;
3147}
3148
3149STATIC int
3150xfs_btree_make_block_unfull(
3151 struct xfs_btree_cur *cur,
3152 int level,
3153 int numrecs,
3154 int *oindex,
3155 int *index,
3156 union xfs_btree_ptr *nptr,
3157 struct xfs_btree_cur **ncur,
3158 union xfs_btree_key *key,
3159 int *stat)
3160{
3161 int error = 0;
3162
3163 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
3164 level == cur->bc_nlevels - 1) {
3165 struct xfs_inode *ip = cur->bc_private.b.ip;
3166
3167 if (numrecs < cur->bc_ops->get_dmaxrecs(cur, level)) {
3168
3169 xfs_iroot_realloc(ip, 1, cur->bc_private.b.whichfork);
3170 *stat = 1;
3171 } else {
3172
3173 int logflags = 0;
3174
3175 error = xfs_btree_new_iroot(cur, &logflags, stat);
3176 if (error || *stat == 0)
3177 return error;
3178
3179 xfs_trans_log_inode(cur->bc_tp, ip, logflags);
3180 }
3181
3182 return 0;
3183 }
3184
3185
3186 error = xfs_btree_rshift(cur, level, stat);
3187 if (error || *stat)
3188 return error;
3189
3190
3191 error = xfs_btree_lshift(cur, level, stat);
3192 if (error)
3193 return error;
3194
3195 if (*stat) {
3196 *oindex = *index = cur->bc_ptrs[level];
3197 return 0;
3198 }
3199
3200
3201
3202
3203
3204
3205
3206 error = xfs_btree_split(cur, level, nptr, key, ncur, stat);
3207 if (error || *stat == 0)
3208 return error;
3209
3210
3211 *index = cur->bc_ptrs[level];
3212 return 0;
3213}
3214
3215
3216
3217
3218
3219STATIC int
3220xfs_btree_insrec(
3221 struct xfs_btree_cur *cur,
3222 int level,
3223 union xfs_btree_ptr *ptrp,
3224 union xfs_btree_rec *rec,
3225 union xfs_btree_key *key,
3226 struct xfs_btree_cur **curp,
3227 int *stat)
3228{
3229 struct xfs_btree_block *block;
3230 struct xfs_buf *bp;
3231 union xfs_btree_ptr nptr;
3232 struct xfs_btree_cur *ncur;
3233 union xfs_btree_key nkey;
3234 union xfs_btree_key *lkey;
3235 int optr;
3236 int ptr;
3237 int numrecs;
3238 int error;
3239#ifdef DEBUG
3240 int i;
3241#endif
3242 xfs_daddr_t old_bn;
3243
3244 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
3245 XFS_BTREE_TRACE_ARGIPR(cur, level, *ptrp, &rec);
3246
3247 ncur = NULL;
3248 lkey = &nkey;
3249
3250
3251
3252
3253
3254 if (!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
3255 (level >= cur->bc_nlevels)) {
3256 error = xfs_btree_new_root(cur, stat);
3257 xfs_btree_set_ptr_null(cur, ptrp);
3258
3259 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3260 return error;
3261 }
3262
3263
3264 ptr = cur->bc_ptrs[level];
3265 if (ptr == 0) {
3266 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3267 *stat = 0;
3268 return 0;
3269 }
3270
3271 optr = ptr;
3272
3273 XFS_BTREE_STATS_INC(cur, insrec);
3274
3275
3276 block = xfs_btree_get_block(cur, level, &bp);
3277 old_bn = bp ? bp->b_bn : XFS_BUF_DADDR_NULL;
3278 numrecs = xfs_btree_get_numrecs(block);
3279
3280#ifdef DEBUG
3281 error = xfs_btree_check_block(cur, block, level, bp);
3282 if (error)
3283 goto error0;
3284
3285
3286 if (ptr <= numrecs) {
3287 if (level == 0) {
3288 ASSERT(cur->bc_ops->recs_inorder(cur, rec,
3289 xfs_btree_rec_addr(cur, ptr, block)));
3290 } else {
3291 ASSERT(cur->bc_ops->keys_inorder(cur, key,
3292 xfs_btree_key_addr(cur, ptr, block)));
3293 }
3294 }
3295#endif
3296
3297
3298
3299
3300
3301 xfs_btree_set_ptr_null(cur, &nptr);
3302 if (numrecs == cur->bc_ops->get_maxrecs(cur, level)) {
3303 error = xfs_btree_make_block_unfull(cur, level, numrecs,
3304 &optr, &ptr, &nptr, &ncur, lkey, stat);
3305 if (error || *stat == 0)
3306 goto error0;
3307 }
3308
3309
3310
3311
3312
3313 block = xfs_btree_get_block(cur, level, &bp);
3314 numrecs = xfs_btree_get_numrecs(block);
3315
3316#ifdef DEBUG
3317 error = xfs_btree_check_block(cur, block, level, bp);
3318 if (error)
3319 return error;
3320#endif
3321
3322
3323
3324
3325
3326 XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr + 1);
3327
3328 if (level > 0) {
3329
3330 union xfs_btree_key *kp;
3331 union xfs_btree_ptr *pp;
3332
3333 kp = xfs_btree_key_addr(cur, ptr, block);
3334 pp = xfs_btree_ptr_addr(cur, ptr, block);
3335
3336#ifdef DEBUG
3337 for (i = numrecs - ptr; i >= 0; i--) {
3338 error = xfs_btree_check_ptr(cur, pp, i, level);
3339 if (error)
3340 return error;
3341 }
3342#endif
3343
3344 xfs_btree_shift_keys(cur, kp, 1, numrecs - ptr + 1);
3345 xfs_btree_shift_ptrs(cur, pp, 1, numrecs - ptr + 1);
3346
3347#ifdef DEBUG
3348 error = xfs_btree_check_ptr(cur, ptrp, 0, level);
3349 if (error)
3350 goto error0;
3351#endif
3352
3353
3354 xfs_btree_copy_keys(cur, kp, key, 1);
3355 xfs_btree_copy_ptrs(cur, pp, ptrp, 1);
3356 numrecs++;
3357 xfs_btree_set_numrecs(block, numrecs);
3358 xfs_btree_log_ptrs(cur, bp, ptr, numrecs);
3359 xfs_btree_log_keys(cur, bp, ptr, numrecs);
3360#ifdef DEBUG
3361 if (ptr < numrecs) {
3362 ASSERT(cur->bc_ops->keys_inorder(cur, kp,
3363 xfs_btree_key_addr(cur, ptr + 1, block)));
3364 }
3365#endif
3366 } else {
3367
3368 union xfs_btree_rec *rp;
3369
3370 rp = xfs_btree_rec_addr(cur, ptr, block);
3371
3372 xfs_btree_shift_recs(cur, rp, 1, numrecs - ptr + 1);
3373
3374
3375 xfs_btree_copy_recs(cur, rp, rec, 1);
3376 xfs_btree_set_numrecs(block, ++numrecs);
3377 xfs_btree_log_recs(cur, bp, ptr, numrecs);
3378#ifdef DEBUG
3379 if (ptr < numrecs) {
3380 ASSERT(cur->bc_ops->recs_inorder(cur, rp,
3381 xfs_btree_rec_addr(cur, ptr + 1, block)));
3382 }
3383#endif
3384 }
3385
3386
3387 xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397 if (bp && bp->b_bn != old_bn) {
3398 xfs_btree_get_keys(cur, block, lkey);
3399 } else if (xfs_btree_needs_key_update(cur, optr)) {
3400 error = xfs_btree_update_keys(cur, level);
3401 if (error)
3402 goto error0;
3403 }
3404
3405
3406
3407
3408
3409 if (xfs_btree_is_lastrec(cur, block, level)) {
3410 cur->bc_ops->update_lastrec(cur, block, rec,
3411 ptr, LASTREC_INSREC);
3412 }
3413
3414
3415
3416
3417
3418 *ptrp = nptr;
3419 if (!xfs_btree_ptr_is_null(cur, &nptr)) {
3420 xfs_btree_copy_keys(cur, key, lkey, 1);
3421 *curp = ncur;
3422 }
3423
3424 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3425 *stat = 1;
3426 return 0;
3427
3428error0:
3429 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
3430 return error;
3431}
3432
3433
3434
3435
3436
3437
3438
3439
3440int
3441xfs_btree_insert(
3442 struct xfs_btree_cur *cur,
3443 int *stat)
3444{
3445 int error;
3446 int i;
3447 int level;
3448 union xfs_btree_ptr nptr;
3449 struct xfs_btree_cur *ncur;
3450 struct xfs_btree_cur *pcur;
3451 union xfs_btree_key bkey;
3452 union xfs_btree_key *key;
3453 union xfs_btree_rec rec;
3454
3455 level = 0;
3456 ncur = NULL;
3457 pcur = cur;
3458 key = &bkey;
3459
3460 xfs_btree_set_ptr_null(cur, &nptr);
3461
3462
3463 cur->bc_ops->init_rec_from_cur(cur, &rec);
3464 cur->bc_ops->init_key_from_rec(key, &rec);
3465
3466
3467
3468
3469
3470
3471 do {
3472
3473
3474
3475
3476 error = xfs_btree_insrec(pcur, level, &nptr, &rec, key,
3477 &ncur, &i);
3478 if (error) {
3479 if (pcur != cur)
3480 xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR);
3481 goto error0;
3482 }
3483
3484 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
3485 level++;
3486
3487
3488
3489
3490
3491
3492 if (pcur != cur &&
3493 (ncur || xfs_btree_ptr_is_null(cur, &nptr))) {
3494
3495 if (cur->bc_ops->update_cursor)
3496 cur->bc_ops->update_cursor(pcur, cur);
3497 cur->bc_nlevels = pcur->bc_nlevels;
3498 xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR);
3499 }
3500
3501 if (ncur) {
3502 pcur = ncur;
3503 ncur = NULL;
3504 }
3505 } while (!xfs_btree_ptr_is_null(cur, &nptr));
3506
3507 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3508 *stat = i;
3509 return 0;
3510error0:
3511 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
3512 return error;
3513}
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523STATIC int
3524xfs_btree_kill_iroot(
3525 struct xfs_btree_cur *cur)
3526{
3527 int whichfork = cur->bc_private.b.whichfork;
3528 struct xfs_inode *ip = cur->bc_private.b.ip;
3529 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
3530 struct xfs_btree_block *block;
3531 struct xfs_btree_block *cblock;
3532 union xfs_btree_key *kp;
3533 union xfs_btree_key *ckp;
3534 union xfs_btree_ptr *pp;
3535 union xfs_btree_ptr *cpp;
3536 struct xfs_buf *cbp;
3537 int level;
3538 int index;
3539 int numrecs;
3540 int error;
3541#ifdef DEBUG
3542 union xfs_btree_ptr ptr;
3543 int i;
3544#endif
3545
3546 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
3547
3548 ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
3549 ASSERT(cur->bc_nlevels > 1);
3550
3551
3552
3553
3554
3555 level = cur->bc_nlevels - 1;
3556 if (level == 1)
3557 goto out0;
3558
3559
3560
3561
3562 block = xfs_btree_get_iroot(cur);
3563 if (xfs_btree_get_numrecs(block) != 1)
3564 goto out0;
3565
3566 cblock = xfs_btree_get_block(cur, level - 1, &cbp);
3567 numrecs = xfs_btree_get_numrecs(cblock);
3568
3569
3570
3571
3572
3573
3574 if (numrecs > cur->bc_ops->get_dmaxrecs(cur, level))
3575 goto out0;
3576
3577 XFS_BTREE_STATS_INC(cur, killroot);
3578
3579#ifdef DEBUG
3580 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB);
3581 ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
3582 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
3583 ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
3584#endif
3585
3586 index = numrecs - cur->bc_ops->get_maxrecs(cur, level);
3587 if (index) {
3588 xfs_iroot_realloc(cur->bc_private.b.ip, index,
3589 cur->bc_private.b.whichfork);
3590 block = ifp->if_broot;
3591 }
3592
3593 be16_add_cpu(&block->bb_numrecs, index);
3594 ASSERT(block->bb_numrecs == cblock->bb_numrecs);
3595
3596 kp = xfs_btree_key_addr(cur, 1, block);
3597 ckp = xfs_btree_key_addr(cur, 1, cblock);
3598 xfs_btree_copy_keys(cur, kp, ckp, numrecs);
3599
3600 pp = xfs_btree_ptr_addr(cur, 1, block);
3601 cpp = xfs_btree_ptr_addr(cur, 1, cblock);
3602#ifdef DEBUG
3603 for (i = 0; i < numrecs; i++) {
3604 error = xfs_btree_check_ptr(cur, cpp, i, level - 1);
3605 if (error) {
3606 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
3607 return error;
3608 }
3609 }
3610#endif
3611 xfs_btree_copy_ptrs(cur, pp, cpp, numrecs);
3612
3613 error = xfs_btree_free_block(cur, cbp);
3614 if (error) {
3615 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
3616 return error;
3617 }
3618
3619 cur->bc_bufs[level - 1] = NULL;
3620 be16_add_cpu(&block->bb_level, -1);
3621 xfs_trans_log_inode(cur->bc_tp, ip,
3622 XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork));
3623 cur->bc_nlevels--;
3624out0:
3625 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3626 return 0;
3627}
3628
3629
3630
3631
3632STATIC int
3633xfs_btree_kill_root(
3634 struct xfs_btree_cur *cur,
3635 struct xfs_buf *bp,
3636 int level,
3637 union xfs_btree_ptr *newroot)
3638{
3639 int error;
3640
3641 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
3642 XFS_BTREE_STATS_INC(cur, killroot);
3643
3644
3645
3646
3647
3648 cur->bc_ops->set_root(cur, newroot, -1);
3649
3650 error = xfs_btree_free_block(cur, bp);
3651 if (error) {
3652 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
3653 return error;
3654 }
3655
3656 cur->bc_bufs[level] = NULL;
3657 cur->bc_ra[level] = 0;
3658 cur->bc_nlevels--;
3659
3660 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3661 return 0;
3662}
3663
3664STATIC int
3665xfs_btree_dec_cursor(
3666 struct xfs_btree_cur *cur,
3667 int level,
3668 int *stat)
3669{
3670 int error;
3671 int i;
3672
3673 if (level > 0) {
3674 error = xfs_btree_decrement(cur, level, &i);
3675 if (error)
3676 return error;
3677 }
3678
3679 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3680 *stat = 1;
3681 return 0;
3682}
3683
3684
3685
3686
3687
3688
3689
3690STATIC int
3691xfs_btree_delrec(
3692 struct xfs_btree_cur *cur,
3693 int level,
3694 int *stat)
3695{
3696 struct xfs_btree_block *block;
3697 union xfs_btree_ptr cptr;
3698 struct xfs_buf *bp;
3699 int error;
3700 int i;
3701 union xfs_btree_ptr lptr;
3702 struct xfs_buf *lbp;
3703 struct xfs_btree_block *left;
3704 int lrecs = 0;
3705 int ptr;
3706 union xfs_btree_ptr rptr;
3707 struct xfs_buf *rbp;
3708 struct xfs_btree_block *right;
3709 struct xfs_btree_block *rrblock;
3710 struct xfs_buf *rrbp;
3711 int rrecs = 0;
3712 struct xfs_btree_cur *tcur;
3713 int numrecs;
3714
3715 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
3716 XFS_BTREE_TRACE_ARGI(cur, level);
3717
3718 tcur = NULL;
3719
3720
3721 ptr = cur->bc_ptrs[level];
3722 if (ptr == 0) {
3723 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3724 *stat = 0;
3725 return 0;
3726 }
3727
3728
3729 block = xfs_btree_get_block(cur, level, &bp);
3730 numrecs = xfs_btree_get_numrecs(block);
3731
3732#ifdef DEBUG
3733 error = xfs_btree_check_block(cur, block, level, bp);
3734 if (error)
3735 goto error0;
3736#endif
3737
3738
3739 if (ptr > numrecs) {
3740 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3741 *stat = 0;
3742 return 0;
3743 }
3744
3745 XFS_BTREE_STATS_INC(cur, delrec);
3746 XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr);
3747
3748
3749 if (level > 0) {
3750
3751 union xfs_btree_key *lkp;
3752 union xfs_btree_ptr *lpp;
3753
3754 lkp = xfs_btree_key_addr(cur, ptr + 1, block);
3755 lpp = xfs_btree_ptr_addr(cur, ptr + 1, block);
3756
3757#ifdef DEBUG
3758 for (i = 0; i < numrecs - ptr; i++) {
3759 error = xfs_btree_check_ptr(cur, lpp, i, level);
3760 if (error)
3761 goto error0;
3762 }
3763#endif
3764
3765 if (ptr < numrecs) {
3766 xfs_btree_shift_keys(cur, lkp, -1, numrecs - ptr);
3767 xfs_btree_shift_ptrs(cur, lpp, -1, numrecs - ptr);
3768 xfs_btree_log_keys(cur, bp, ptr, numrecs - 1);
3769 xfs_btree_log_ptrs(cur, bp, ptr, numrecs - 1);
3770 }
3771 } else {
3772
3773 if (ptr < numrecs) {
3774 xfs_btree_shift_recs(cur,
3775 xfs_btree_rec_addr(cur, ptr + 1, block),
3776 -1, numrecs - ptr);
3777 xfs_btree_log_recs(cur, bp, ptr, numrecs - 1);
3778 }
3779 }
3780
3781
3782
3783
3784 xfs_btree_set_numrecs(block, --numrecs);
3785 xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
3786
3787
3788
3789
3790
3791 if (xfs_btree_is_lastrec(cur, block, level)) {
3792 cur->bc_ops->update_lastrec(cur, block, NULL,
3793 ptr, LASTREC_DELREC);
3794 }
3795
3796
3797
3798
3799
3800
3801 if (level == cur->bc_nlevels - 1) {
3802 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
3803 xfs_iroot_realloc(cur->bc_private.b.ip, -1,
3804 cur->bc_private.b.whichfork);
3805
3806 error = xfs_btree_kill_iroot(cur);
3807 if (error)
3808 goto error0;
3809
3810 error = xfs_btree_dec_cursor(cur, level, stat);
3811 if (error)
3812 goto error0;
3813 *stat = 1;
3814 return 0;
3815 }
3816
3817
3818
3819
3820
3821
3822 if (numrecs == 1 && level > 0) {
3823 union xfs_btree_ptr *pp;
3824
3825
3826
3827
3828 pp = xfs_btree_ptr_addr(cur, 1, block);
3829 error = xfs_btree_kill_root(cur, bp, level, pp);
3830 if (error)
3831 goto error0;
3832 } else if (level > 0) {
3833 error = xfs_btree_dec_cursor(cur, level, stat);
3834 if (error)
3835 goto error0;
3836 }
3837 *stat = 1;
3838 return 0;
3839 }
3840
3841
3842
3843
3844
3845 if (xfs_btree_needs_key_update(cur, ptr)) {
3846 error = xfs_btree_update_keys(cur, level);
3847 if (error)
3848 goto error0;
3849 }
3850
3851
3852
3853
3854
3855 if (numrecs >= cur->bc_ops->get_minrecs(cur, level)) {
3856 error = xfs_btree_dec_cursor(cur, level, stat);
3857 if (error)
3858 goto error0;
3859 return 0;
3860 }
3861
3862
3863
3864
3865
3866
3867 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
3868 xfs_btree_get_sibling(cur, block, &lptr, XFS_BB_LEFTSIB);
3869
3870 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
3871
3872
3873
3874
3875
3876 if (xfs_btree_ptr_is_null(cur, &rptr) &&
3877 xfs_btree_ptr_is_null(cur, &lptr) &&
3878 level == cur->bc_nlevels - 2) {
3879 error = xfs_btree_kill_iroot(cur);
3880 if (!error)
3881 error = xfs_btree_dec_cursor(cur, level, stat);
3882 if (error)
3883 goto error0;
3884 return 0;
3885 }
3886 }
3887
3888 ASSERT(!xfs_btree_ptr_is_null(cur, &rptr) ||
3889 !xfs_btree_ptr_is_null(cur, &lptr));
3890
3891
3892
3893
3894
3895 error = xfs_btree_dup_cursor(cur, &tcur);
3896 if (error)
3897 goto error0;
3898
3899
3900
3901
3902
3903 if (!xfs_btree_ptr_is_null(cur, &rptr)) {
3904
3905
3906
3907
3908 i = xfs_btree_lastrec(tcur, level);
3909 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
3910
3911 error = xfs_btree_increment(tcur, level, &i);
3912 if (error)
3913 goto error0;
3914 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
3915
3916 i = xfs_btree_lastrec(tcur, level);
3917 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
3918
3919
3920 right = xfs_btree_get_block(tcur, level, &rbp);
3921#ifdef DEBUG
3922 error = xfs_btree_check_block(tcur, right, level, rbp);
3923 if (error)
3924 goto error0;
3925#endif
3926
3927 xfs_btree_get_sibling(tcur, right, &cptr, XFS_BB_LEFTSIB);
3928
3929
3930
3931
3932
3933
3934 if (xfs_btree_get_numrecs(right) - 1 >=
3935 cur->bc_ops->get_minrecs(tcur, level)) {
3936 error = xfs_btree_lshift(tcur, level, &i);
3937 if (error)
3938 goto error0;
3939 if (i) {
3940 ASSERT(xfs_btree_get_numrecs(block) >=
3941 cur->bc_ops->get_minrecs(tcur, level));
3942
3943 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
3944 tcur = NULL;
3945
3946 error = xfs_btree_dec_cursor(cur, level, stat);
3947 if (error)
3948 goto error0;
3949 return 0;
3950 }
3951 }
3952
3953
3954
3955
3956
3957
3958 rrecs = xfs_btree_get_numrecs(right);
3959 if (!xfs_btree_ptr_is_null(cur, &lptr)) {
3960 i = xfs_btree_firstrec(tcur, level);
3961 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
3962
3963 error = xfs_btree_decrement(tcur, level, &i);
3964 if (error)
3965 goto error0;
3966 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
3967 }
3968 }
3969
3970
3971
3972
3973
3974 if (!xfs_btree_ptr_is_null(cur, &lptr)) {
3975
3976
3977
3978
3979 i = xfs_btree_firstrec(tcur, level);
3980 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
3981
3982 error = xfs_btree_decrement(tcur, level, &i);
3983 if (error)
3984 goto error0;
3985 i = xfs_btree_firstrec(tcur, level);
3986 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
3987
3988
3989 left = xfs_btree_get_block(tcur, level, &lbp);
3990#ifdef DEBUG
3991 error = xfs_btree_check_block(cur, left, level, lbp);
3992 if (error)
3993 goto error0;
3994#endif
3995
3996 xfs_btree_get_sibling(tcur, left, &cptr, XFS_BB_RIGHTSIB);
3997
3998
3999
4000
4001
4002
4003 if (xfs_btree_get_numrecs(left) - 1 >=
4004 cur->bc_ops->get_minrecs(tcur, level)) {
4005 error = xfs_btree_rshift(tcur, level, &i);
4006 if (error)
4007 goto error0;
4008 if (i) {
4009 ASSERT(xfs_btree_get_numrecs(block) >=
4010 cur->bc_ops->get_minrecs(tcur, level));
4011 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
4012 tcur = NULL;
4013 if (level == 0)
4014 cur->bc_ptrs[0]++;
4015 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
4016 *stat = 1;
4017 return 0;
4018 }
4019 }
4020
4021
4022
4023
4024
4025 lrecs = xfs_btree_get_numrecs(left);
4026 }
4027
4028
4029 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
4030 tcur = NULL;
4031
4032
4033 ASSERT(!xfs_btree_ptr_is_null(cur, &cptr));
4034
4035 if (!xfs_btree_ptr_is_null(cur, &lptr) &&
4036 lrecs + xfs_btree_get_numrecs(block) <=
4037 cur->bc_ops->get_maxrecs(cur, level)) {
4038
4039
4040
4041
4042 rptr = cptr;
4043 right = block;
4044 rbp = bp;
4045 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
4046 if (error)
4047 goto error0;
4048
4049
4050
4051
4052 } else if (!xfs_btree_ptr_is_null(cur, &rptr) &&
4053 rrecs + xfs_btree_get_numrecs(block) <=
4054 cur->bc_ops->get_maxrecs(cur, level)) {
4055
4056
4057
4058
4059 lptr = cptr;
4060 left = block;
4061 lbp = bp;
4062 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
4063 if (error)
4064 goto error0;
4065
4066
4067
4068
4069
4070 } else {
4071 error = xfs_btree_dec_cursor(cur, level, stat);
4072 if (error)
4073 goto error0;
4074 return 0;
4075 }
4076
4077 rrecs = xfs_btree_get_numrecs(right);
4078 lrecs = xfs_btree_get_numrecs(left);
4079
4080
4081
4082
4083
4084 XFS_BTREE_STATS_ADD(cur, moves, rrecs);
4085 if (level > 0) {
4086
4087 union xfs_btree_key *lkp;
4088 union xfs_btree_ptr *lpp;
4089 union xfs_btree_key *rkp;
4090 union xfs_btree_ptr *rpp;
4091
4092 lkp = xfs_btree_key_addr(cur, lrecs + 1, left);
4093 lpp = xfs_btree_ptr_addr(cur, lrecs + 1, left);
4094 rkp = xfs_btree_key_addr(cur, 1, right);
4095 rpp = xfs_btree_ptr_addr(cur, 1, right);
4096#ifdef DEBUG
4097 for (i = 1; i < rrecs; i++) {
4098 error = xfs_btree_check_ptr(cur, rpp, i, level);
4099 if (error)
4100 goto error0;
4101 }
4102#endif
4103 xfs_btree_copy_keys(cur, lkp, rkp, rrecs);
4104 xfs_btree_copy_ptrs(cur, lpp, rpp, rrecs);
4105
4106 xfs_btree_log_keys(cur, lbp, lrecs + 1, lrecs + rrecs);
4107 xfs_btree_log_ptrs(cur, lbp, lrecs + 1, lrecs + rrecs);
4108 } else {
4109
4110 union xfs_btree_rec *lrp;
4111 union xfs_btree_rec *rrp;
4112
4113 lrp = xfs_btree_rec_addr(cur, lrecs + 1, left);
4114 rrp = xfs_btree_rec_addr(cur, 1, right);
4115
4116 xfs_btree_copy_recs(cur, lrp, rrp, rrecs);
4117 xfs_btree_log_recs(cur, lbp, lrecs + 1, lrecs + rrecs);
4118 }
4119
4120 XFS_BTREE_STATS_INC(cur, join);
4121
4122
4123
4124
4125
4126 xfs_btree_set_numrecs(left, lrecs + rrecs);
4127 xfs_btree_get_sibling(cur, right, &cptr, XFS_BB_RIGHTSIB),
4128 xfs_btree_set_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB);
4129 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
4130
4131
4132 xfs_btree_get_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB);
4133 if (!xfs_btree_ptr_is_null(cur, &cptr)) {
4134 error = xfs_btree_read_buf_block(cur, &cptr, 0, &rrblock, &rrbp);
4135 if (error)
4136 goto error0;
4137 xfs_btree_set_sibling(cur, rrblock, &lptr, XFS_BB_LEFTSIB);
4138 xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB);
4139 }
4140
4141
4142 error = xfs_btree_free_block(cur, rbp);
4143 if (error)
4144 goto error0;
4145
4146
4147
4148
4149
4150 if (bp != lbp) {
4151 cur->bc_bufs[level] = lbp;
4152 cur->bc_ptrs[level] += lrecs;
4153 cur->bc_ra[level] = 0;
4154 }
4155
4156
4157
4158
4159 else if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) ||
4160 (level + 1 < cur->bc_nlevels)) {
4161 error = xfs_btree_increment(cur, level + 1, &i);
4162 if (error)
4163 goto error0;
4164 }
4165
4166
4167
4168
4169
4170
4171
4172 if (level > 0)
4173 cur->bc_ptrs[level]--;
4174
4175
4176
4177
4178
4179
4180
4181
4182
4183
4184
4185 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
4186
4187 *stat = 2;
4188 return 0;
4189
4190error0:
4191 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
4192 if (tcur)
4193 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
4194 return error;
4195}
4196
4197
4198
4199
4200
4201
4202int
4203xfs_btree_delete(
4204 struct xfs_btree_cur *cur,
4205 int *stat)
4206{
4207 int error;
4208 int level;
4209 int i;
4210 bool joined = false;
4211
4212 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
4213
4214
4215
4216
4217
4218
4219
4220 for (level = 0, i = 2; i == 2; level++) {
4221 error = xfs_btree_delrec(cur, level, &i);
4222 if (error)
4223 goto error0;
4224 if (i == 2)
4225 joined = true;
4226 }
4227
4228
4229
4230
4231
4232 if (joined && (cur->bc_flags & XFS_BTREE_OVERLAPPING)) {
4233 error = xfs_btree_updkeys_force(cur, 0);
4234 if (error)
4235 goto error0;
4236 }
4237
4238 if (i == 0) {
4239 for (level = 1; level < cur->bc_nlevels; level++) {
4240 if (cur->bc_ptrs[level] == 0) {
4241 error = xfs_btree_decrement(cur, level, &i);
4242 if (error)
4243 goto error0;
4244 break;
4245 }
4246 }
4247 }
4248
4249 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
4250 *stat = i;
4251 return 0;
4252error0:
4253 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
4254 return error;
4255}
4256
4257
4258
4259
4260int
4261xfs_btree_get_rec(
4262 struct xfs_btree_cur *cur,
4263 union xfs_btree_rec **recp,
4264 int *stat)
4265{
4266 struct xfs_btree_block *block;
4267 struct xfs_buf *bp;
4268 int ptr;
4269#ifdef DEBUG
4270 int error;
4271#endif
4272
4273 ptr = cur->bc_ptrs[0];
4274 block = xfs_btree_get_block(cur, 0, &bp);
4275
4276#ifdef DEBUG
4277 error = xfs_btree_check_block(cur, block, 0, bp);
4278 if (error)
4279 return error;
4280#endif
4281
4282
4283
4284
4285 if (ptr > xfs_btree_get_numrecs(block) || ptr <= 0) {
4286 *stat = 0;
4287 return 0;
4288 }
4289
4290
4291
4292
4293 *recp = xfs_btree_rec_addr(cur, ptr, block);
4294 *stat = 1;
4295 return 0;
4296}
4297
4298
4299STATIC int
4300xfs_btree_visit_block(
4301 struct xfs_btree_cur *cur,
4302 int level,
4303 xfs_btree_visit_blocks_fn fn,
4304 void *data)
4305{
4306 struct xfs_btree_block *block;
4307 struct xfs_buf *bp;
4308 union xfs_btree_ptr rptr;
4309 int error;
4310
4311
4312 xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
4313 block = xfs_btree_get_block(cur, level, &bp);
4314
4315
4316 error = fn(cur, level, data);
4317 if (error)
4318 return error;
4319
4320
4321 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
4322 if (xfs_btree_ptr_is_null(cur, &rptr))
4323 return -ENOENT;
4324
4325 return xfs_btree_lookup_get_block(cur, level, &rptr, &block);
4326}
4327
4328
4329
4330int
4331xfs_btree_visit_blocks(
4332 struct xfs_btree_cur *cur,
4333 xfs_btree_visit_blocks_fn fn,
4334 void *data)
4335{
4336 union xfs_btree_ptr lptr;
4337 int level;
4338 struct xfs_btree_block *block = NULL;
4339 int error = 0;
4340
4341 cur->bc_ops->init_ptr_from_cur(cur, &lptr);
4342
4343
4344 for (level = cur->bc_nlevels - 1; level >= 0; level--) {
4345
4346 error = xfs_btree_lookup_get_block(cur, level, &lptr, &block);
4347 if (error)
4348 return error;
4349
4350
4351 if (level > 0) {
4352 union xfs_btree_ptr *ptr;
4353
4354 ptr = xfs_btree_ptr_addr(cur, 1, block);
4355 xfs_btree_readahead_ptr(cur, ptr, 1);
4356
4357
4358 lptr = *ptr;
4359 }
4360
4361
4362 do {
4363 error = xfs_btree_visit_block(cur, level, fn, data);
4364 } while (!error);
4365
4366 if (error != -ENOENT)
4367 return error;
4368 }
4369
4370 return 0;
4371}
4372
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384
4385
4386
4387
4388
4389
4390
4391
4392
4393
4394
4395
4396
4397struct xfs_btree_block_change_owner_info {
4398 __uint64_t new_owner;
4399 struct list_head *buffer_list;
4400};
4401
4402static int
4403xfs_btree_block_change_owner(
4404 struct xfs_btree_cur *cur,
4405 int level,
4406 void *data)
4407{
4408 struct xfs_btree_block_change_owner_info *bbcoi = data;
4409 struct xfs_btree_block *block;
4410 struct xfs_buf *bp;
4411
4412
4413 block = xfs_btree_get_block(cur, level, &bp);
4414 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
4415 block->bb_u.l.bb_owner = cpu_to_be64(bbcoi->new_owner);
4416 else
4417 block->bb_u.s.bb_owner = cpu_to_be32(bbcoi->new_owner);
4418
4419
4420
4421
4422
4423
4424
4425
4426 if (bp) {
4427 if (cur->bc_tp) {
4428 xfs_trans_ordered_buf(cur->bc_tp, bp);
4429 xfs_btree_log_block(cur, bp, XFS_BB_OWNER);
4430 } else {
4431 xfs_buf_delwri_queue(bp, bbcoi->buffer_list);
4432 }
4433 } else {
4434 ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
4435 ASSERT(level == cur->bc_nlevels - 1);
4436 }
4437
4438 return 0;
4439}
4440
4441int
4442xfs_btree_change_owner(
4443 struct xfs_btree_cur *cur,
4444 __uint64_t new_owner,
4445 struct list_head *buffer_list)
4446{
4447 struct xfs_btree_block_change_owner_info bbcoi;
4448
4449 bbcoi.new_owner = new_owner;
4450 bbcoi.buffer_list = buffer_list;
4451
4452 return xfs_btree_visit_blocks(cur, xfs_btree_block_change_owner,
4453 &bbcoi);
4454}
4455
4456
4457
4458
4459
4460
4461
4462
4463
4464bool
4465xfs_btree_sblock_v5hdr_verify(
4466 struct xfs_buf *bp)
4467{
4468 struct xfs_mount *mp = bp->b_target->bt_mount;
4469 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
4470 struct xfs_perag *pag = bp->b_pag;
4471
4472 if (!xfs_sb_version_hascrc(&mp->m_sb))
4473 return false;
4474 if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid))
4475 return false;
4476 if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn))
4477 return false;
4478 if (pag && be32_to_cpu(block->bb_u.s.bb_owner) != pag->pag_agno)
4479 return false;
4480 return true;
4481}
4482
4483
4484
4485
4486
4487
4488
4489bool
4490xfs_btree_sblock_verify(
4491 struct xfs_buf *bp,
4492 unsigned int max_recs)
4493{
4494 struct xfs_mount *mp = bp->b_target->bt_mount;
4495 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
4496
4497
4498 if (be16_to_cpu(block->bb_numrecs) > max_recs)
4499 return false;
4500
4501
4502 if (!block->bb_u.s.bb_leftsib ||
4503 (be32_to_cpu(block->bb_u.s.bb_leftsib) >= mp->m_sb.sb_agblocks &&
4504 block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK)))
4505 return false;
4506 if (!block->bb_u.s.bb_rightsib ||
4507 (be32_to_cpu(block->bb_u.s.bb_rightsib) >= mp->m_sb.sb_agblocks &&
4508 block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK)))
4509 return false;
4510
4511 return true;
4512}
4513
4514
4515
4516
4517
4518uint
4519xfs_btree_compute_maxlevels(
4520 struct xfs_mount *mp,
4521 uint *limits,
4522 unsigned long len)
4523{
4524 uint level;
4525 unsigned long maxblocks;
4526
4527 maxblocks = (len + limits[0] - 1) / limits[0];
4528 for (level = 1; maxblocks > 1; level++)
4529 maxblocks = (maxblocks + limits[1] - 1) / limits[1];
4530 return level;
4531}
4532
4533
4534
4535
4536
4537
4538STATIC int
4539xfs_btree_simple_query_range(
4540 struct xfs_btree_cur *cur,
4541 union xfs_btree_key *low_key,
4542 union xfs_btree_key *high_key,
4543 xfs_btree_query_range_fn fn,
4544 void *priv)
4545{
4546 union xfs_btree_rec *recp;
4547 union xfs_btree_key rec_key;
4548 __int64_t diff;
4549 int stat;
4550 bool firstrec = true;
4551 int error;
4552
4553 ASSERT(cur->bc_ops->init_high_key_from_rec);
4554 ASSERT(cur->bc_ops->diff_two_keys);
4555
4556
4557
4558
4559
4560 stat = 0;
4561 error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, &stat);
4562 if (error)
4563 goto out;
4564
4565
4566 if (!stat) {
4567 error = xfs_btree_increment(cur, 0, &stat);
4568 if (error)
4569 goto out;
4570 }
4571
4572 while (stat) {
4573
4574 error = xfs_btree_get_rec(cur, &recp, &stat);
4575 if (error || !stat)
4576 break;
4577
4578
4579 if (firstrec) {
4580 cur->bc_ops->init_high_key_from_rec(&rec_key, recp);
4581 firstrec = false;
4582 diff = cur->bc_ops->diff_two_keys(cur, low_key,
4583 &rec_key);
4584 if (diff > 0)
4585 goto advloop;
4586 }
4587
4588
4589 cur->bc_ops->init_key_from_rec(&rec_key, recp);
4590 diff = cur->bc_ops->diff_two_keys(cur, &rec_key, high_key);
4591 if (diff > 0)
4592 break;
4593
4594
4595 error = fn(cur, recp, priv);
4596 if (error < 0 || error == XFS_BTREE_QUERY_RANGE_ABORT)
4597 break;
4598
4599advloop:
4600
4601 error = xfs_btree_increment(cur, 0, &stat);
4602 if (error)
4603 break;
4604 }
4605
4606out:
4607 return error;
4608}
4609
4610
4611
4612
4613
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628
4629STATIC int
4630xfs_btree_overlapped_query_range(
4631 struct xfs_btree_cur *cur,
4632 union xfs_btree_key *low_key,
4633 union xfs_btree_key *high_key,
4634 xfs_btree_query_range_fn fn,
4635 void *priv)
4636{
4637 union xfs_btree_ptr ptr;
4638 union xfs_btree_ptr *pp;
4639 union xfs_btree_key rec_key;
4640 union xfs_btree_key rec_hkey;
4641 union xfs_btree_key *lkp;
4642 union xfs_btree_key *hkp;
4643 union xfs_btree_rec *recp;
4644 struct xfs_btree_block *block;
4645 __int64_t ldiff;
4646 __int64_t hdiff;
4647 int level;
4648 struct xfs_buf *bp;
4649 int i;
4650 int error;
4651
4652
4653 level = cur->bc_nlevels - 1;
4654 cur->bc_ops->init_ptr_from_cur(cur, &ptr);
4655 error = xfs_btree_lookup_get_block(cur, level, &ptr, &block);
4656 if (error)
4657 return error;
4658 xfs_btree_get_block(cur, level, &bp);
4659 trace_xfs_btree_overlapped_query_range(cur, level, bp);
4660#ifdef DEBUG
4661 error = xfs_btree_check_block(cur, block, level, bp);
4662 if (error)
4663 goto out;
4664#endif
4665 cur->bc_ptrs[level] = 1;
4666
4667 while (level < cur->bc_nlevels) {
4668 block = xfs_btree_get_block(cur, level, &bp);
4669
4670
4671 if (cur->bc_ptrs[level] > be16_to_cpu(block->bb_numrecs)) {
4672pop_up:
4673 if (level < cur->bc_nlevels - 1)
4674 cur->bc_ptrs[level + 1]++;
4675 level++;
4676 continue;
4677 }
4678
4679 if (level == 0) {
4680
4681 recp = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block);
4682
4683 cur->bc_ops->init_high_key_from_rec(&rec_hkey, recp);
4684 ldiff = cur->bc_ops->diff_two_keys(cur, &rec_hkey,
4685 low_key);
4686
4687 cur->bc_ops->init_key_from_rec(&rec_key, recp);
4688 hdiff = cur->bc_ops->diff_two_keys(cur, high_key,
4689 &rec_key);
4690
4691
4692
4693
4694
4695
4696 if (ldiff >= 0 && hdiff >= 0) {
4697 error = fn(cur, recp, priv);
4698 if (error < 0 ||
4699 error == XFS_BTREE_QUERY_RANGE_ABORT)
4700 break;
4701 } else if (hdiff < 0) {
4702
4703 goto pop_up;
4704 }
4705 cur->bc_ptrs[level]++;
4706 continue;
4707 }
4708
4709
4710 lkp = xfs_btree_key_addr(cur, cur->bc_ptrs[level], block);
4711 hkp = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level], block);
4712 pp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[level], block);
4713
4714 ldiff = cur->bc_ops->diff_two_keys(cur, hkp, low_key);
4715 hdiff = cur->bc_ops->diff_two_keys(cur, high_key, lkp);
4716
4717
4718
4719
4720
4721
4722 if (ldiff >= 0 && hdiff >= 0) {
4723 level--;
4724 error = xfs_btree_lookup_get_block(cur, level, pp,
4725 &block);
4726 if (error)
4727 goto out;
4728 xfs_btree_get_block(cur, level, &bp);
4729 trace_xfs_btree_overlapped_query_range(cur, level, bp);
4730#ifdef DEBUG
4731 error = xfs_btree_check_block(cur, block, level, bp);
4732 if (error)
4733 goto out;
4734#endif
4735 cur->bc_ptrs[level] = 1;
4736 continue;
4737 } else if (hdiff < 0) {
4738
4739 goto pop_up;
4740 }
4741 cur->bc_ptrs[level]++;
4742 }
4743
4744out:
4745
4746
4747
4748
4749
4750
4751
4752 if (cur->bc_bufs[0] == NULL) {
4753 for (i = 0; i < cur->bc_nlevels; i++) {
4754 if (cur->bc_bufs[i]) {
4755 xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[i]);
4756 cur->bc_bufs[i] = NULL;
4757 cur->bc_ptrs[i] = 0;
4758 cur->bc_ra[i] = 0;
4759 }
4760 }
4761 }
4762
4763 return error;
4764}
4765
4766
4767
4768
4769
4770
4771
4772
4773int
4774xfs_btree_query_range(
4775 struct xfs_btree_cur *cur,
4776 union xfs_btree_irec *low_rec,
4777 union xfs_btree_irec *high_rec,
4778 xfs_btree_query_range_fn fn,
4779 void *priv)
4780{
4781 union xfs_btree_rec rec;
4782 union xfs_btree_key low_key;
4783 union xfs_btree_key high_key;
4784
4785
4786 cur->bc_rec = *high_rec;
4787 cur->bc_ops->init_rec_from_cur(cur, &rec);
4788 cur->bc_ops->init_key_from_rec(&high_key, &rec);
4789
4790 cur->bc_rec = *low_rec;
4791 cur->bc_ops->init_rec_from_cur(cur, &rec);
4792 cur->bc_ops->init_key_from_rec(&low_key, &rec);
4793
4794
4795 if (cur->bc_ops->diff_two_keys(cur, &low_key, &high_key) > 0)
4796 return -EINVAL;
4797
4798 if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
4799 return xfs_btree_simple_query_range(cur, &low_key,
4800 &high_key, fn, priv);
4801 return xfs_btree_overlapped_query_range(cur, &low_key, &high_key,
4802 fn, priv);
4803}
4804
4805
4806
4807
4808
4809xfs_extlen_t
4810xfs_btree_calc_size(
4811 struct xfs_mount *mp,
4812 uint *limits,
4813 unsigned long long len)
4814{
4815 int level;
4816 int maxrecs;
4817 xfs_extlen_t rval;
4818
4819 maxrecs = limits[0];
4820 for (level = 0, rval = 0; len > 1; level++) {
4821 len += maxrecs - 1;
4822 do_div(len, maxrecs);
4823 maxrecs = limits[1];
4824 rval += len;
4825 }
4826 return rval;
4827}
4828
4829static int
4830xfs_btree_count_blocks_helper(
4831 struct xfs_btree_cur *cur,
4832 int level,
4833 void *data)
4834{
4835 xfs_extlen_t *blocks = data;
4836 (*blocks)++;
4837
4838 return 0;
4839}
4840
4841
4842int
4843xfs_btree_count_blocks(
4844 struct xfs_btree_cur *cur,
4845 xfs_extlen_t *blocks)
4846{
4847 *blocks = 0;
4848 return xfs_btree_visit_blocks(cur, xfs_btree_count_blocks_helper,
4849 blocks);
4850}
4851