1
2
3
4
5
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_bit.h"
13#include "xfs_mount.h"
14#include "xfs_inode.h"
15#include "xfs_trans.h"
16#include "xfs_buf_item.h"
17#include "xfs_btree.h"
18#include "xfs_errortag.h"
19#include "xfs_error.h"
20#include "xfs_trace.h"
21#include "xfs_alloc.h"
22#include "xfs_log.h"
23#include "xfs_btree_staging.h"
24#include "xfs_ag.h"
25
26
27
28
29kmem_zone_t *xfs_btree_cur_zone;
30
31
32
33
34static const uint32_t xfs_magics[2][XFS_BTNUM_MAX] = {
35 { XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, 0, XFS_BMAP_MAGIC, XFS_IBT_MAGIC,
36 XFS_FIBT_MAGIC, 0 },
37 { XFS_ABTB_CRC_MAGIC, XFS_ABTC_CRC_MAGIC, XFS_RMAP_CRC_MAGIC,
38 XFS_BMAP_CRC_MAGIC, XFS_IBT_CRC_MAGIC, XFS_FIBT_CRC_MAGIC,
39 XFS_REFC_CRC_MAGIC }
40};
41
42uint32_t
43xfs_btree_magic(
44 int crc,
45 xfs_btnum_t btnum)
46{
47 uint32_t magic = xfs_magics[crc][btnum];
48
49
50 ASSERT(magic != 0);
51 return magic;
52}
53
54
55
56
57
58xfs_failaddr_t
59__xfs_btree_check_lblock(
60 struct xfs_btree_cur *cur,
61 struct xfs_btree_block *block,
62 int level,
63 struct xfs_buf *bp)
64{
65 struct xfs_mount *mp = cur->bc_mp;
66 xfs_btnum_t btnum = cur->bc_btnum;
67 int crc = xfs_sb_version_hascrc(&mp->m_sb);
68
69 if (crc) {
70 if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid))
71 return __this_address;
72 if (block->bb_u.l.bb_blkno !=
73 cpu_to_be64(bp ? bp->b_bn : XFS_BUF_DADDR_NULL))
74 return __this_address;
75 if (block->bb_u.l.bb_pad != cpu_to_be32(0))
76 return __this_address;
77 }
78
79 if (be32_to_cpu(block->bb_magic) != xfs_btree_magic(crc, btnum))
80 return __this_address;
81 if (be16_to_cpu(block->bb_level) != level)
82 return __this_address;
83 if (be16_to_cpu(block->bb_numrecs) >
84 cur->bc_ops->get_maxrecs(cur, level))
85 return __this_address;
86 if (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLFSBLOCK) &&
87 !xfs_btree_check_lptr(cur, be64_to_cpu(block->bb_u.l.bb_leftsib),
88 level + 1))
89 return __this_address;
90 if (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK) &&
91 !xfs_btree_check_lptr(cur, be64_to_cpu(block->bb_u.l.bb_rightsib),
92 level + 1))
93 return __this_address;
94
95 return NULL;
96}
97
98
99static int
100xfs_btree_check_lblock(
101 struct xfs_btree_cur *cur,
102 struct xfs_btree_block *block,
103 int level,
104 struct xfs_buf *bp)
105{
106 struct xfs_mount *mp = cur->bc_mp;
107 xfs_failaddr_t fa;
108
109 fa = __xfs_btree_check_lblock(cur, block, level, bp);
110 if (XFS_IS_CORRUPT(mp, fa != NULL) ||
111 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BTREE_CHECK_LBLOCK)) {
112 if (bp)
113 trace_xfs_btree_corrupt(bp, _RET_IP_);
114 return -EFSCORRUPTED;
115 }
116 return 0;
117}
118
119
120
121
122
123xfs_failaddr_t
124__xfs_btree_check_sblock(
125 struct xfs_btree_cur *cur,
126 struct xfs_btree_block *block,
127 int level,
128 struct xfs_buf *bp)
129{
130 struct xfs_mount *mp = cur->bc_mp;
131 xfs_btnum_t btnum = cur->bc_btnum;
132 int crc = xfs_sb_version_hascrc(&mp->m_sb);
133
134 if (crc) {
135 if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid))
136 return __this_address;
137 if (block->bb_u.s.bb_blkno !=
138 cpu_to_be64(bp ? bp->b_bn : XFS_BUF_DADDR_NULL))
139 return __this_address;
140 }
141
142 if (be32_to_cpu(block->bb_magic) != xfs_btree_magic(crc, btnum))
143 return __this_address;
144 if (be16_to_cpu(block->bb_level) != level)
145 return __this_address;
146 if (be16_to_cpu(block->bb_numrecs) >
147 cur->bc_ops->get_maxrecs(cur, level))
148 return __this_address;
149 if (block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK) &&
150 !xfs_btree_check_sptr(cur, be32_to_cpu(block->bb_u.s.bb_leftsib),
151 level + 1))
152 return __this_address;
153 if (block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK) &&
154 !xfs_btree_check_sptr(cur, be32_to_cpu(block->bb_u.s.bb_rightsib),
155 level + 1))
156 return __this_address;
157
158 return NULL;
159}
160
161
162STATIC int
163xfs_btree_check_sblock(
164 struct xfs_btree_cur *cur,
165 struct xfs_btree_block *block,
166 int level,
167 struct xfs_buf *bp)
168{
169 struct xfs_mount *mp = cur->bc_mp;
170 xfs_failaddr_t fa;
171
172 fa = __xfs_btree_check_sblock(cur, block, level, bp);
173 if (XFS_IS_CORRUPT(mp, fa != NULL) ||
174 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BTREE_CHECK_SBLOCK)) {
175 if (bp)
176 trace_xfs_btree_corrupt(bp, _RET_IP_);
177 return -EFSCORRUPTED;
178 }
179 return 0;
180}
181
182
183
184
185int
186xfs_btree_check_block(
187 struct xfs_btree_cur *cur,
188 struct xfs_btree_block *block,
189 int level,
190 struct xfs_buf *bp)
191{
192 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
193 return xfs_btree_check_lblock(cur, block, level, bp);
194 else
195 return xfs_btree_check_sblock(cur, block, level, bp);
196}
197
198
199bool
200xfs_btree_check_lptr(
201 struct xfs_btree_cur *cur,
202 xfs_fsblock_t fsbno,
203 int level)
204{
205 if (level <= 0)
206 return false;
207 return xfs_verify_fsbno(cur->bc_mp, fsbno);
208}
209
210
211bool
212xfs_btree_check_sptr(
213 struct xfs_btree_cur *cur,
214 xfs_agblock_t agbno,
215 int level)
216{
217 if (level <= 0)
218 return false;
219 return xfs_verify_agbno(cur->bc_mp, cur->bc_ag.pag->pag_agno, agbno);
220}
221
222
223
224
225
226static int
227xfs_btree_check_ptr(
228 struct xfs_btree_cur *cur,
229 union xfs_btree_ptr *ptr,
230 int index,
231 int level)
232{
233 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
234 if (xfs_btree_check_lptr(cur, be64_to_cpu((&ptr->l)[index]),
235 level))
236 return 0;
237 xfs_err(cur->bc_mp,
238"Inode %llu fork %d: Corrupt btree %d pointer at level %d index %d.",
239 cur->bc_ino.ip->i_ino,
240 cur->bc_ino.whichfork, cur->bc_btnum,
241 level, index);
242 } else {
243 if (xfs_btree_check_sptr(cur, be32_to_cpu((&ptr->s)[index]),
244 level))
245 return 0;
246 xfs_err(cur->bc_mp,
247"AG %u: Corrupt btree %d pointer at level %d index %d.",
248 cur->bc_ag.pag->pag_agno, cur->bc_btnum,
249 level, index);
250 }
251
252 return -EFSCORRUPTED;
253}
254
255#ifdef DEBUG
256# define xfs_btree_debug_check_ptr xfs_btree_check_ptr
257#else
258# define xfs_btree_debug_check_ptr(...) (0)
259#endif
260
261
262
263
264
265
266
267
268
269void
270xfs_btree_lblock_calc_crc(
271 struct xfs_buf *bp)
272{
273 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
274 struct xfs_buf_log_item *bip = bp->b_log_item;
275
276 if (!xfs_sb_version_hascrc(&bp->b_mount->m_sb))
277 return;
278 if (bip)
279 block->bb_u.l.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
280 xfs_buf_update_cksum(bp, XFS_BTREE_LBLOCK_CRC_OFF);
281}
282
283bool
284xfs_btree_lblock_verify_crc(
285 struct xfs_buf *bp)
286{
287 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
288 struct xfs_mount *mp = bp->b_mount;
289
290 if (xfs_sb_version_hascrc(&mp->m_sb)) {
291 if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.l.bb_lsn)))
292 return false;
293 return xfs_buf_verify_cksum(bp, XFS_BTREE_LBLOCK_CRC_OFF);
294 }
295
296 return true;
297}
298
299
300
301
302
303
304
305
306
307void
308xfs_btree_sblock_calc_crc(
309 struct xfs_buf *bp)
310{
311 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
312 struct xfs_buf_log_item *bip = bp->b_log_item;
313
314 if (!xfs_sb_version_hascrc(&bp->b_mount->m_sb))
315 return;
316 if (bip)
317 block->bb_u.s.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
318 xfs_buf_update_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF);
319}
320
321bool
322xfs_btree_sblock_verify_crc(
323 struct xfs_buf *bp)
324{
325 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
326 struct xfs_mount *mp = bp->b_mount;
327
328 if (xfs_sb_version_hascrc(&mp->m_sb)) {
329 if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.s.bb_lsn)))
330 return false;
331 return xfs_buf_verify_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF);
332 }
333
334 return true;
335}
336
337static int
338xfs_btree_free_block(
339 struct xfs_btree_cur *cur,
340 struct xfs_buf *bp)
341{
342 int error;
343
344 error = cur->bc_ops->free_block(cur, bp);
345 if (!error) {
346 xfs_trans_binval(cur->bc_tp, bp);
347 XFS_BTREE_STATS_INC(cur, free);
348 }
349 return error;
350}
351
352
353
354
355void
356xfs_btree_del_cursor(
357 struct xfs_btree_cur *cur,
358 int error)
359{
360 int i;
361
362
363
364
365
366
367
368
369 for (i = 0; i < cur->bc_nlevels; i++) {
370 if (cur->bc_bufs[i])
371 xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[i]);
372 else if (!error)
373 break;
374 }
375
376 ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP || cur->bc_ino.allocated == 0 ||
377 XFS_FORCED_SHUTDOWN(cur->bc_mp));
378 if (unlikely(cur->bc_flags & XFS_BTREE_STAGING))
379 kmem_free(cur->bc_ops);
380 if (!(cur->bc_flags & XFS_BTREE_LONG_PTRS) && cur->bc_ag.pag)
381 xfs_perag_put(cur->bc_ag.pag);
382 kmem_cache_free(xfs_btree_cur_zone, cur);
383}
384
385
386
387
388
389int
390xfs_btree_dup_cursor(
391 xfs_btree_cur_t *cur,
392 xfs_btree_cur_t **ncur)
393{
394 struct xfs_buf *bp;
395 int error;
396 int i;
397 xfs_mount_t *mp;
398 xfs_btree_cur_t *new;
399 xfs_trans_t *tp;
400
401 tp = cur->bc_tp;
402 mp = cur->bc_mp;
403
404
405
406
407 new = cur->bc_ops->dup_cursor(cur);
408
409
410
411
412 new->bc_rec = cur->bc_rec;
413
414
415
416
417 for (i = 0; i < new->bc_nlevels; i++) {
418 new->bc_ptrs[i] = cur->bc_ptrs[i];
419 new->bc_ra[i] = cur->bc_ra[i];
420 bp = cur->bc_bufs[i];
421 if (bp) {
422 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
423 XFS_BUF_ADDR(bp), mp->m_bsize,
424 0, &bp,
425 cur->bc_ops->buf_ops);
426 if (error) {
427 xfs_btree_del_cursor(new, error);
428 *ncur = NULL;
429 return error;
430 }
431 }
432 new->bc_bufs[i] = bp;
433 }
434 *ncur = new;
435 return 0;
436}
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515static inline size_t xfs_btree_block_len(struct xfs_btree_cur *cur)
516{
517 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
518 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS)
519 return XFS_BTREE_LBLOCK_CRC_LEN;
520 return XFS_BTREE_LBLOCK_LEN;
521 }
522 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS)
523 return XFS_BTREE_SBLOCK_CRC_LEN;
524 return XFS_BTREE_SBLOCK_LEN;
525}
526
527
528
529
530static inline size_t xfs_btree_ptr_len(struct xfs_btree_cur *cur)
531{
532 return (cur->bc_flags & XFS_BTREE_LONG_PTRS) ?
533 sizeof(__be64) : sizeof(__be32);
534}
535
536
537
538
539STATIC size_t
540xfs_btree_rec_offset(
541 struct xfs_btree_cur *cur,
542 int n)
543{
544 return xfs_btree_block_len(cur) +
545 (n - 1) * cur->bc_ops->rec_len;
546}
547
548
549
550
551STATIC size_t
552xfs_btree_key_offset(
553 struct xfs_btree_cur *cur,
554 int n)
555{
556 return xfs_btree_block_len(cur) +
557 (n - 1) * cur->bc_ops->key_len;
558}
559
560
561
562
563STATIC size_t
564xfs_btree_high_key_offset(
565 struct xfs_btree_cur *cur,
566 int n)
567{
568 return xfs_btree_block_len(cur) +
569 (n - 1) * cur->bc_ops->key_len + (cur->bc_ops->key_len / 2);
570}
571
572
573
574
575STATIC size_t
576xfs_btree_ptr_offset(
577 struct xfs_btree_cur *cur,
578 int n,
579 int level)
580{
581 return xfs_btree_block_len(cur) +
582 cur->bc_ops->get_maxrecs(cur, level) * cur->bc_ops->key_len +
583 (n - 1) * xfs_btree_ptr_len(cur);
584}
585
586
587
588
589union xfs_btree_rec *
590xfs_btree_rec_addr(
591 struct xfs_btree_cur *cur,
592 int n,
593 struct xfs_btree_block *block)
594{
595 return (union xfs_btree_rec *)
596 ((char *)block + xfs_btree_rec_offset(cur, n));
597}
598
599
600
601
602union xfs_btree_key *
603xfs_btree_key_addr(
604 struct xfs_btree_cur *cur,
605 int n,
606 struct xfs_btree_block *block)
607{
608 return (union xfs_btree_key *)
609 ((char *)block + xfs_btree_key_offset(cur, n));
610}
611
612
613
614
615union xfs_btree_key *
616xfs_btree_high_key_addr(
617 struct xfs_btree_cur *cur,
618 int n,
619 struct xfs_btree_block *block)
620{
621 return (union xfs_btree_key *)
622 ((char *)block + xfs_btree_high_key_offset(cur, n));
623}
624
625
626
627
628union xfs_btree_ptr *
629xfs_btree_ptr_addr(
630 struct xfs_btree_cur *cur,
631 int n,
632 struct xfs_btree_block *block)
633{
634 int level = xfs_btree_get_level(block);
635
636 ASSERT(block->bb_level != 0);
637
638 return (union xfs_btree_ptr *)
639 ((char *)block + xfs_btree_ptr_offset(cur, n, level));
640}
641
642struct xfs_ifork *
643xfs_btree_ifork_ptr(
644 struct xfs_btree_cur *cur)
645{
646 ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
647
648 if (cur->bc_flags & XFS_BTREE_STAGING)
649 return cur->bc_ino.ifake->if_fork;
650 return XFS_IFORK_PTR(cur->bc_ino.ip, cur->bc_ino.whichfork);
651}
652
653
654
655
656
657
658
659STATIC struct xfs_btree_block *
660xfs_btree_get_iroot(
661 struct xfs_btree_cur *cur)
662{
663 struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
664
665 return (struct xfs_btree_block *)ifp->if_broot;
666}
667
668
669
670
671
672struct xfs_btree_block *
673xfs_btree_get_block(
674 struct xfs_btree_cur *cur,
675 int level,
676 struct xfs_buf **bpp)
677{
678 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
679 (level == cur->bc_nlevels - 1)) {
680 *bpp = NULL;
681 return xfs_btree_get_iroot(cur);
682 }
683
684 *bpp = cur->bc_bufs[level];
685 return XFS_BUF_TO_BLOCK(*bpp);
686}
687
688
689
690
691
692STATIC int
693xfs_btree_firstrec(
694 xfs_btree_cur_t *cur,
695 int level)
696{
697 struct xfs_btree_block *block;
698 struct xfs_buf *bp;
699
700
701
702
703 block = xfs_btree_get_block(cur, level, &bp);
704 if (xfs_btree_check_block(cur, block, level, bp))
705 return 0;
706
707
708
709 if (!block->bb_numrecs)
710 return 0;
711
712
713
714 cur->bc_ptrs[level] = 1;
715 return 1;
716}
717
718
719
720
721
722STATIC int
723xfs_btree_lastrec(
724 xfs_btree_cur_t *cur,
725 int level)
726{
727 struct xfs_btree_block *block;
728 struct xfs_buf *bp;
729
730
731
732
733 block = xfs_btree_get_block(cur, level, &bp);
734 if (xfs_btree_check_block(cur, block, level, bp))
735 return 0;
736
737
738
739 if (!block->bb_numrecs)
740 return 0;
741
742
743
744 cur->bc_ptrs[level] = be16_to_cpu(block->bb_numrecs);
745 return 1;
746}
747
748
749
750
751
752void
753xfs_btree_offsets(
754 int64_t fields,
755 const short *offsets,
756 int nbits,
757 int *first,
758 int *last)
759{
760 int i;
761 int64_t imask;
762
763 ASSERT(fields != 0);
764
765
766
767 for (i = 0, imask = 1LL; ; i++, imask <<= 1) {
768 if (imask & fields) {
769 *first = offsets[i];
770 break;
771 }
772 }
773
774
775
776 for (i = nbits - 1, imask = 1LL << i; ; i--, imask >>= 1) {
777 if (imask & fields) {
778 *last = offsets[i + 1] - 1;
779 break;
780 }
781 }
782}
783
784
785
786
787
788int
789xfs_btree_read_bufl(
790 struct xfs_mount *mp,
791 struct xfs_trans *tp,
792 xfs_fsblock_t fsbno,
793 struct xfs_buf **bpp,
794 int refval,
795 const struct xfs_buf_ops *ops)
796{
797 struct xfs_buf *bp;
798 xfs_daddr_t d;
799 int error;
800
801 if (!xfs_verify_fsbno(mp, fsbno))
802 return -EFSCORRUPTED;
803 d = XFS_FSB_TO_DADDR(mp, fsbno);
804 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d,
805 mp->m_bsize, 0, &bp, ops);
806 if (error)
807 return error;
808 if (bp)
809 xfs_buf_set_ref(bp, refval);
810 *bpp = bp;
811 return 0;
812}
813
814
815
816
817
818
819void
820xfs_btree_reada_bufl(
821 struct xfs_mount *mp,
822 xfs_fsblock_t fsbno,
823 xfs_extlen_t count,
824 const struct xfs_buf_ops *ops)
825{
826 xfs_daddr_t d;
827
828 ASSERT(fsbno != NULLFSBLOCK);
829 d = XFS_FSB_TO_DADDR(mp, fsbno);
830 xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count, ops);
831}
832
833
834
835
836
837
838void
839xfs_btree_reada_bufs(
840 struct xfs_mount *mp,
841 xfs_agnumber_t agno,
842 xfs_agblock_t agbno,
843 xfs_extlen_t count,
844 const struct xfs_buf_ops *ops)
845{
846 xfs_daddr_t d;
847
848 ASSERT(agno != NULLAGNUMBER);
849 ASSERT(agbno != NULLAGBLOCK);
850 d = XFS_AGB_TO_DADDR(mp, agno, agbno);
851 xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count, ops);
852}
853
854STATIC int
855xfs_btree_readahead_lblock(
856 struct xfs_btree_cur *cur,
857 int lr,
858 struct xfs_btree_block *block)
859{
860 int rval = 0;
861 xfs_fsblock_t left = be64_to_cpu(block->bb_u.l.bb_leftsib);
862 xfs_fsblock_t right = be64_to_cpu(block->bb_u.l.bb_rightsib);
863
864 if ((lr & XFS_BTCUR_LEFTRA) && left != NULLFSBLOCK) {
865 xfs_btree_reada_bufl(cur->bc_mp, left, 1,
866 cur->bc_ops->buf_ops);
867 rval++;
868 }
869
870 if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLFSBLOCK) {
871 xfs_btree_reada_bufl(cur->bc_mp, right, 1,
872 cur->bc_ops->buf_ops);
873 rval++;
874 }
875
876 return rval;
877}
878
879STATIC int
880xfs_btree_readahead_sblock(
881 struct xfs_btree_cur *cur,
882 int lr,
883 struct xfs_btree_block *block)
884{
885 int rval = 0;
886 xfs_agblock_t left = be32_to_cpu(block->bb_u.s.bb_leftsib);
887 xfs_agblock_t right = be32_to_cpu(block->bb_u.s.bb_rightsib);
888
889
890 if ((lr & XFS_BTCUR_LEFTRA) && left != NULLAGBLOCK) {
891 xfs_btree_reada_bufs(cur->bc_mp, cur->bc_ag.pag->pag_agno,
892 left, 1, cur->bc_ops->buf_ops);
893 rval++;
894 }
895
896 if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLAGBLOCK) {
897 xfs_btree_reada_bufs(cur->bc_mp, cur->bc_ag.pag->pag_agno,
898 right, 1, cur->bc_ops->buf_ops);
899 rval++;
900 }
901
902 return rval;
903}
904
905
906
907
908
909STATIC int
910xfs_btree_readahead(
911 struct xfs_btree_cur *cur,
912 int lev,
913 int lr)
914{
915 struct xfs_btree_block *block;
916
917
918
919
920
921 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
922 (lev == cur->bc_nlevels - 1))
923 return 0;
924
925 if ((cur->bc_ra[lev] | lr) == cur->bc_ra[lev])
926 return 0;
927
928 cur->bc_ra[lev] |= lr;
929 block = XFS_BUF_TO_BLOCK(cur->bc_bufs[lev]);
930
931 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
932 return xfs_btree_readahead_lblock(cur, lr, block);
933 return xfs_btree_readahead_sblock(cur, lr, block);
934}
935
936STATIC int
937xfs_btree_ptr_to_daddr(
938 struct xfs_btree_cur *cur,
939 union xfs_btree_ptr *ptr,
940 xfs_daddr_t *daddr)
941{
942 xfs_fsblock_t fsbno;
943 xfs_agblock_t agbno;
944 int error;
945
946 error = xfs_btree_check_ptr(cur, ptr, 0, 1);
947 if (error)
948 return error;
949
950 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
951 fsbno = be64_to_cpu(ptr->l);
952 *daddr = XFS_FSB_TO_DADDR(cur->bc_mp, fsbno);
953 } else {
954 agbno = be32_to_cpu(ptr->s);
955 *daddr = XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_ag.pag->pag_agno,
956 agbno);
957 }
958
959 return 0;
960}
961
962
963
964
965
966
967
968STATIC void
969xfs_btree_readahead_ptr(
970 struct xfs_btree_cur *cur,
971 union xfs_btree_ptr *ptr,
972 xfs_extlen_t count)
973{
974 xfs_daddr_t daddr;
975
976 if (xfs_btree_ptr_to_daddr(cur, ptr, &daddr))
977 return;
978 xfs_buf_readahead(cur->bc_mp->m_ddev_targp, daddr,
979 cur->bc_mp->m_bsize * count, cur->bc_ops->buf_ops);
980}
981
982
983
984
985
986STATIC void
987xfs_btree_setbuf(
988 xfs_btree_cur_t *cur,
989 int lev,
990 struct xfs_buf *bp)
991{
992 struct xfs_btree_block *b;
993
994 if (cur->bc_bufs[lev])
995 xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[lev]);
996 cur->bc_bufs[lev] = bp;
997 cur->bc_ra[lev] = 0;
998
999 b = XFS_BUF_TO_BLOCK(bp);
1000 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
1001 if (b->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK))
1002 cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA;
1003 if (b->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK))
1004 cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA;
1005 } else {
1006 if (b->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK))
1007 cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA;
1008 if (b->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK))
1009 cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA;
1010 }
1011}
1012
1013bool
1014xfs_btree_ptr_is_null(
1015 struct xfs_btree_cur *cur,
1016 union xfs_btree_ptr *ptr)
1017{
1018 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
1019 return ptr->l == cpu_to_be64(NULLFSBLOCK);
1020 else
1021 return ptr->s == cpu_to_be32(NULLAGBLOCK);
1022}
1023
1024void
1025xfs_btree_set_ptr_null(
1026 struct xfs_btree_cur *cur,
1027 union xfs_btree_ptr *ptr)
1028{
1029 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
1030 ptr->l = cpu_to_be64(NULLFSBLOCK);
1031 else
1032 ptr->s = cpu_to_be32(NULLAGBLOCK);
1033}
1034
1035
1036
1037
1038void
1039xfs_btree_get_sibling(
1040 struct xfs_btree_cur *cur,
1041 struct xfs_btree_block *block,
1042 union xfs_btree_ptr *ptr,
1043 int lr)
1044{
1045 ASSERT(lr == XFS_BB_LEFTSIB || lr == XFS_BB_RIGHTSIB);
1046
1047 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
1048 if (lr == XFS_BB_RIGHTSIB)
1049 ptr->l = block->bb_u.l.bb_rightsib;
1050 else
1051 ptr->l = block->bb_u.l.bb_leftsib;
1052 } else {
1053 if (lr == XFS_BB_RIGHTSIB)
1054 ptr->s = block->bb_u.s.bb_rightsib;
1055 else
1056 ptr->s = block->bb_u.s.bb_leftsib;
1057 }
1058}
1059
1060void
1061xfs_btree_set_sibling(
1062 struct xfs_btree_cur *cur,
1063 struct xfs_btree_block *block,
1064 union xfs_btree_ptr *ptr,
1065 int lr)
1066{
1067 ASSERT(lr == XFS_BB_LEFTSIB || lr == XFS_BB_RIGHTSIB);
1068
1069 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
1070 if (lr == XFS_BB_RIGHTSIB)
1071 block->bb_u.l.bb_rightsib = ptr->l;
1072 else
1073 block->bb_u.l.bb_leftsib = ptr->l;
1074 } else {
1075 if (lr == XFS_BB_RIGHTSIB)
1076 block->bb_u.s.bb_rightsib = ptr->s;
1077 else
1078 block->bb_u.s.bb_leftsib = ptr->s;
1079 }
1080}
1081
1082void
1083xfs_btree_init_block_int(
1084 struct xfs_mount *mp,
1085 struct xfs_btree_block *buf,
1086 xfs_daddr_t blkno,
1087 xfs_btnum_t btnum,
1088 __u16 level,
1089 __u16 numrecs,
1090 __u64 owner,
1091 unsigned int flags)
1092{
1093 int crc = xfs_sb_version_hascrc(&mp->m_sb);
1094 __u32 magic = xfs_btree_magic(crc, btnum);
1095
1096 buf->bb_magic = cpu_to_be32(magic);
1097 buf->bb_level = cpu_to_be16(level);
1098 buf->bb_numrecs = cpu_to_be16(numrecs);
1099
1100 if (flags & XFS_BTREE_LONG_PTRS) {
1101 buf->bb_u.l.bb_leftsib = cpu_to_be64(NULLFSBLOCK);
1102 buf->bb_u.l.bb_rightsib = cpu_to_be64(NULLFSBLOCK);
1103 if (crc) {
1104 buf->bb_u.l.bb_blkno = cpu_to_be64(blkno);
1105 buf->bb_u.l.bb_owner = cpu_to_be64(owner);
1106 uuid_copy(&buf->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid);
1107 buf->bb_u.l.bb_pad = 0;
1108 buf->bb_u.l.bb_lsn = 0;
1109 }
1110 } else {
1111
1112 __u32 __owner = (__u32)owner;
1113
1114 buf->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK);
1115 buf->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
1116 if (crc) {
1117 buf->bb_u.s.bb_blkno = cpu_to_be64(blkno);
1118 buf->bb_u.s.bb_owner = cpu_to_be32(__owner);
1119 uuid_copy(&buf->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid);
1120 buf->bb_u.s.bb_lsn = 0;
1121 }
1122 }
1123}
1124
1125void
1126xfs_btree_init_block(
1127 struct xfs_mount *mp,
1128 struct xfs_buf *bp,
1129 xfs_btnum_t btnum,
1130 __u16 level,
1131 __u16 numrecs,
1132 __u64 owner)
1133{
1134 xfs_btree_init_block_int(mp, XFS_BUF_TO_BLOCK(bp), bp->b_bn,
1135 btnum, level, numrecs, owner, 0);
1136}
1137
1138void
1139xfs_btree_init_block_cur(
1140 struct xfs_btree_cur *cur,
1141 struct xfs_buf *bp,
1142 int level,
1143 int numrecs)
1144{
1145 __u64 owner;
1146
1147
1148
1149
1150
1151
1152
1153 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
1154 owner = cur->bc_ino.ip->i_ino;
1155 else
1156 owner = cur->bc_ag.pag->pag_agno;
1157
1158 xfs_btree_init_block_int(cur->bc_mp, XFS_BUF_TO_BLOCK(bp), bp->b_bn,
1159 cur->bc_btnum, level, numrecs,
1160 owner, cur->bc_flags);
1161}
1162
1163
1164
1165
1166
1167
1168STATIC int
1169xfs_btree_is_lastrec(
1170 struct xfs_btree_cur *cur,
1171 struct xfs_btree_block *block,
1172 int level)
1173{
1174 union xfs_btree_ptr ptr;
1175
1176 if (level > 0)
1177 return 0;
1178 if (!(cur->bc_flags & XFS_BTREE_LASTREC_UPDATE))
1179 return 0;
1180
1181 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
1182 if (!xfs_btree_ptr_is_null(cur, &ptr))
1183 return 0;
1184 return 1;
1185}
1186
1187STATIC void
1188xfs_btree_buf_to_ptr(
1189 struct xfs_btree_cur *cur,
1190 struct xfs_buf *bp,
1191 union xfs_btree_ptr *ptr)
1192{
1193 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
1194 ptr->l = cpu_to_be64(XFS_DADDR_TO_FSB(cur->bc_mp,
1195 XFS_BUF_ADDR(bp)));
1196 else {
1197 ptr->s = cpu_to_be32(xfs_daddr_to_agbno(cur->bc_mp,
1198 XFS_BUF_ADDR(bp)));
1199 }
1200}
1201
1202STATIC void
1203xfs_btree_set_refs(
1204 struct xfs_btree_cur *cur,
1205 struct xfs_buf *bp)
1206{
1207 switch (cur->bc_btnum) {
1208 case XFS_BTNUM_BNO:
1209 case XFS_BTNUM_CNT:
1210 xfs_buf_set_ref(bp, XFS_ALLOC_BTREE_REF);
1211 break;
1212 case XFS_BTNUM_INO:
1213 case XFS_BTNUM_FINO:
1214 xfs_buf_set_ref(bp, XFS_INO_BTREE_REF);
1215 break;
1216 case XFS_BTNUM_BMAP:
1217 xfs_buf_set_ref(bp, XFS_BMAP_BTREE_REF);
1218 break;
1219 case XFS_BTNUM_RMAP:
1220 xfs_buf_set_ref(bp, XFS_RMAP_BTREE_REF);
1221 break;
1222 case XFS_BTNUM_REFC:
1223 xfs_buf_set_ref(bp, XFS_REFC_BTREE_REF);
1224 break;
1225 default:
1226 ASSERT(0);
1227 }
1228}
1229
1230int
1231xfs_btree_get_buf_block(
1232 struct xfs_btree_cur *cur,
1233 union xfs_btree_ptr *ptr,
1234 struct xfs_btree_block **block,
1235 struct xfs_buf **bpp)
1236{
1237 struct xfs_mount *mp = cur->bc_mp;
1238 xfs_daddr_t d;
1239 int error;
1240
1241 error = xfs_btree_ptr_to_daddr(cur, ptr, &d);
1242 if (error)
1243 return error;
1244 error = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d, mp->m_bsize,
1245 0, bpp);
1246 if (error)
1247 return error;
1248
1249 (*bpp)->b_ops = cur->bc_ops->buf_ops;
1250 *block = XFS_BUF_TO_BLOCK(*bpp);
1251 return 0;
1252}
1253
1254
1255
1256
1257
1258STATIC int
1259xfs_btree_read_buf_block(
1260 struct xfs_btree_cur *cur,
1261 union xfs_btree_ptr *ptr,
1262 int flags,
1263 struct xfs_btree_block **block,
1264 struct xfs_buf **bpp)
1265{
1266 struct xfs_mount *mp = cur->bc_mp;
1267 xfs_daddr_t d;
1268 int error;
1269
1270
1271 ASSERT(!(flags & XBF_TRYLOCK));
1272
1273 error = xfs_btree_ptr_to_daddr(cur, ptr, &d);
1274 if (error)
1275 return error;
1276 error = xfs_trans_read_buf(mp, cur->bc_tp, mp->m_ddev_targp, d,
1277 mp->m_bsize, flags, bpp,
1278 cur->bc_ops->buf_ops);
1279 if (error)
1280 return error;
1281
1282 xfs_btree_set_refs(cur, *bpp);
1283 *block = XFS_BUF_TO_BLOCK(*bpp);
1284 return 0;
1285}
1286
1287
1288
1289
1290void
1291xfs_btree_copy_keys(
1292 struct xfs_btree_cur *cur,
1293 union xfs_btree_key *dst_key,
1294 union xfs_btree_key *src_key,
1295 int numkeys)
1296{
1297 ASSERT(numkeys >= 0);
1298 memcpy(dst_key, src_key, numkeys * cur->bc_ops->key_len);
1299}
1300
1301
1302
1303
1304STATIC void
1305xfs_btree_copy_recs(
1306 struct xfs_btree_cur *cur,
1307 union xfs_btree_rec *dst_rec,
1308 union xfs_btree_rec *src_rec,
1309 int numrecs)
1310{
1311 ASSERT(numrecs >= 0);
1312 memcpy(dst_rec, src_rec, numrecs * cur->bc_ops->rec_len);
1313}
1314
1315
1316
1317
1318void
1319xfs_btree_copy_ptrs(
1320 struct xfs_btree_cur *cur,
1321 union xfs_btree_ptr *dst_ptr,
1322 const union xfs_btree_ptr *src_ptr,
1323 int numptrs)
1324{
1325 ASSERT(numptrs >= 0);
1326 memcpy(dst_ptr, src_ptr, numptrs * xfs_btree_ptr_len(cur));
1327}
1328
1329
1330
1331
1332STATIC void
1333xfs_btree_shift_keys(
1334 struct xfs_btree_cur *cur,
1335 union xfs_btree_key *key,
1336 int dir,
1337 int numkeys)
1338{
1339 char *dst_key;
1340
1341 ASSERT(numkeys >= 0);
1342 ASSERT(dir == 1 || dir == -1);
1343
1344 dst_key = (char *)key + (dir * cur->bc_ops->key_len);
1345 memmove(dst_key, key, numkeys * cur->bc_ops->key_len);
1346}
1347
1348
1349
1350
1351STATIC void
1352xfs_btree_shift_recs(
1353 struct xfs_btree_cur *cur,
1354 union xfs_btree_rec *rec,
1355 int dir,
1356 int numrecs)
1357{
1358 char *dst_rec;
1359
1360 ASSERT(numrecs >= 0);
1361 ASSERT(dir == 1 || dir == -1);
1362
1363 dst_rec = (char *)rec + (dir * cur->bc_ops->rec_len);
1364 memmove(dst_rec, rec, numrecs * cur->bc_ops->rec_len);
1365}
1366
1367
1368
1369
1370STATIC void
1371xfs_btree_shift_ptrs(
1372 struct xfs_btree_cur *cur,
1373 union xfs_btree_ptr *ptr,
1374 int dir,
1375 int numptrs)
1376{
1377 char *dst_ptr;
1378
1379 ASSERT(numptrs >= 0);
1380 ASSERT(dir == 1 || dir == -1);
1381
1382 dst_ptr = (char *)ptr + (dir * xfs_btree_ptr_len(cur));
1383 memmove(dst_ptr, ptr, numptrs * xfs_btree_ptr_len(cur));
1384}
1385
1386
1387
1388
1389STATIC void
1390xfs_btree_log_keys(
1391 struct xfs_btree_cur *cur,
1392 struct xfs_buf *bp,
1393 int first,
1394 int last)
1395{
1396
1397 if (bp) {
1398 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
1399 xfs_trans_log_buf(cur->bc_tp, bp,
1400 xfs_btree_key_offset(cur, first),
1401 xfs_btree_key_offset(cur, last + 1) - 1);
1402 } else {
1403 xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip,
1404 xfs_ilog_fbroot(cur->bc_ino.whichfork));
1405 }
1406}
1407
1408
1409
1410
1411void
1412xfs_btree_log_recs(
1413 struct xfs_btree_cur *cur,
1414 struct xfs_buf *bp,
1415 int first,
1416 int last)
1417{
1418
1419 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
1420 xfs_trans_log_buf(cur->bc_tp, bp,
1421 xfs_btree_rec_offset(cur, first),
1422 xfs_btree_rec_offset(cur, last + 1) - 1);
1423
1424}
1425
1426
1427
1428
1429STATIC void
1430xfs_btree_log_ptrs(
1431 struct xfs_btree_cur *cur,
1432 struct xfs_buf *bp,
1433 int first,
1434 int last)
1435{
1436
1437 if (bp) {
1438 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
1439 int level = xfs_btree_get_level(block);
1440
1441 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
1442 xfs_trans_log_buf(cur->bc_tp, bp,
1443 xfs_btree_ptr_offset(cur, first, level),
1444 xfs_btree_ptr_offset(cur, last + 1, level) - 1);
1445 } else {
1446 xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip,
1447 xfs_ilog_fbroot(cur->bc_ino.whichfork));
1448 }
1449
1450}
1451
1452
1453
1454
1455void
1456xfs_btree_log_block(
1457 struct xfs_btree_cur *cur,
1458 struct xfs_buf *bp,
1459 int fields)
1460{
1461 int first;
1462 int last;
1463 static const short soffsets[] = {
1464 offsetof(struct xfs_btree_block, bb_magic),
1465 offsetof(struct xfs_btree_block, bb_level),
1466 offsetof(struct xfs_btree_block, bb_numrecs),
1467 offsetof(struct xfs_btree_block, bb_u.s.bb_leftsib),
1468 offsetof(struct xfs_btree_block, bb_u.s.bb_rightsib),
1469 offsetof(struct xfs_btree_block, bb_u.s.bb_blkno),
1470 offsetof(struct xfs_btree_block, bb_u.s.bb_lsn),
1471 offsetof(struct xfs_btree_block, bb_u.s.bb_uuid),
1472 offsetof(struct xfs_btree_block, bb_u.s.bb_owner),
1473 offsetof(struct xfs_btree_block, bb_u.s.bb_crc),
1474 XFS_BTREE_SBLOCK_CRC_LEN
1475 };
1476 static const short loffsets[] = {
1477 offsetof(struct xfs_btree_block, bb_magic),
1478 offsetof(struct xfs_btree_block, bb_level),
1479 offsetof(struct xfs_btree_block, bb_numrecs),
1480 offsetof(struct xfs_btree_block, bb_u.l.bb_leftsib),
1481 offsetof(struct xfs_btree_block, bb_u.l.bb_rightsib),
1482 offsetof(struct xfs_btree_block, bb_u.l.bb_blkno),
1483 offsetof(struct xfs_btree_block, bb_u.l.bb_lsn),
1484 offsetof(struct xfs_btree_block, bb_u.l.bb_uuid),
1485 offsetof(struct xfs_btree_block, bb_u.l.bb_owner),
1486 offsetof(struct xfs_btree_block, bb_u.l.bb_crc),
1487 offsetof(struct xfs_btree_block, bb_u.l.bb_pad),
1488 XFS_BTREE_LBLOCK_CRC_LEN
1489 };
1490
1491 if (bp) {
1492 int nbits;
1493
1494 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) {
1495
1496
1497
1498
1499
1500
1501
1502 if (fields == XFS_BB_ALL_BITS)
1503 fields = XFS_BB_ALL_BITS_CRC;
1504 nbits = XFS_BB_NUM_BITS_CRC;
1505 } else {
1506 nbits = XFS_BB_NUM_BITS;
1507 }
1508 xfs_btree_offsets(fields,
1509 (cur->bc_flags & XFS_BTREE_LONG_PTRS) ?
1510 loffsets : soffsets,
1511 nbits, &first, &last);
1512 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
1513 xfs_trans_log_buf(cur->bc_tp, bp, first, last);
1514 } else {
1515 xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip,
1516 xfs_ilog_fbroot(cur->bc_ino.whichfork));
1517 }
1518}
1519
1520
1521
1522
1523
1524int
1525xfs_btree_increment(
1526 struct xfs_btree_cur *cur,
1527 int level,
1528 int *stat)
1529{
1530 struct xfs_btree_block *block;
1531 union xfs_btree_ptr ptr;
1532 struct xfs_buf *bp;
1533 int error;
1534 int lev;
1535
1536 ASSERT(level < cur->bc_nlevels);
1537
1538
1539 xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
1540
1541
1542 block = xfs_btree_get_block(cur, level, &bp);
1543
1544#ifdef DEBUG
1545 error = xfs_btree_check_block(cur, block, level, bp);
1546 if (error)
1547 goto error0;
1548#endif
1549
1550
1551 if (++cur->bc_ptrs[level] <= xfs_btree_get_numrecs(block))
1552 goto out1;
1553
1554
1555 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
1556 if (xfs_btree_ptr_is_null(cur, &ptr))
1557 goto out0;
1558
1559 XFS_BTREE_STATS_INC(cur, increment);
1560
1561
1562
1563
1564
1565 for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
1566 block = xfs_btree_get_block(cur, lev, &bp);
1567
1568#ifdef DEBUG
1569 error = xfs_btree_check_block(cur, block, lev, bp);
1570 if (error)
1571 goto error0;
1572#endif
1573
1574 if (++cur->bc_ptrs[lev] <= xfs_btree_get_numrecs(block))
1575 break;
1576
1577
1578 xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA);
1579 }
1580
1581
1582
1583
1584
1585 if (lev == cur->bc_nlevels) {
1586 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
1587 goto out0;
1588 ASSERT(0);
1589 error = -EFSCORRUPTED;
1590 goto error0;
1591 }
1592 ASSERT(lev < cur->bc_nlevels);
1593
1594
1595
1596
1597
1598 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) {
1599 union xfs_btree_ptr *ptrp;
1600
1601 ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block);
1602 --lev;
1603 error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp);
1604 if (error)
1605 goto error0;
1606
1607 xfs_btree_setbuf(cur, lev, bp);
1608 cur->bc_ptrs[lev] = 1;
1609 }
1610out1:
1611 *stat = 1;
1612 return 0;
1613
1614out0:
1615 *stat = 0;
1616 return 0;
1617
1618error0:
1619 return error;
1620}
1621
1622
1623
1624
1625
1626int
1627xfs_btree_decrement(
1628 struct xfs_btree_cur *cur,
1629 int level,
1630 int *stat)
1631{
1632 struct xfs_btree_block *block;
1633 struct xfs_buf *bp;
1634 int error;
1635 int lev;
1636 union xfs_btree_ptr ptr;
1637
1638 ASSERT(level < cur->bc_nlevels);
1639
1640
1641 xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA);
1642
1643
1644 if (--cur->bc_ptrs[level] > 0)
1645 goto out1;
1646
1647
1648 block = xfs_btree_get_block(cur, level, &bp);
1649
1650#ifdef DEBUG
1651 error = xfs_btree_check_block(cur, block, level, bp);
1652 if (error)
1653 goto error0;
1654#endif
1655
1656
1657 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB);
1658 if (xfs_btree_ptr_is_null(cur, &ptr))
1659 goto out0;
1660
1661 XFS_BTREE_STATS_INC(cur, decrement);
1662
1663
1664
1665
1666
1667 for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
1668 if (--cur->bc_ptrs[lev] > 0)
1669 break;
1670
1671 xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA);
1672 }
1673
1674
1675
1676
1677
1678 if (lev == cur->bc_nlevels) {
1679 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
1680 goto out0;
1681 ASSERT(0);
1682 error = -EFSCORRUPTED;
1683 goto error0;
1684 }
1685 ASSERT(lev < cur->bc_nlevels);
1686
1687
1688
1689
1690
1691 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) {
1692 union xfs_btree_ptr *ptrp;
1693
1694 ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block);
1695 --lev;
1696 error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp);
1697 if (error)
1698 goto error0;
1699 xfs_btree_setbuf(cur, lev, bp);
1700 cur->bc_ptrs[lev] = xfs_btree_get_numrecs(block);
1701 }
1702out1:
1703 *stat = 1;
1704 return 0;
1705
1706out0:
1707 *stat = 0;
1708 return 0;
1709
1710error0:
1711 return error;
1712}
1713
1714int
1715xfs_btree_lookup_get_block(
1716 struct xfs_btree_cur *cur,
1717 int level,
1718 union xfs_btree_ptr *pp,
1719 struct xfs_btree_block **blkp)
1720{
1721 struct xfs_buf *bp;
1722 xfs_daddr_t daddr;
1723 int error = 0;
1724
1725
1726 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
1727 (level == cur->bc_nlevels - 1)) {
1728 *blkp = xfs_btree_get_iroot(cur);
1729 return 0;
1730 }
1731
1732
1733
1734
1735
1736
1737
1738 bp = cur->bc_bufs[level];
1739 error = xfs_btree_ptr_to_daddr(cur, pp, &daddr);
1740 if (error)
1741 return error;
1742 if (bp && XFS_BUF_ADDR(bp) == daddr) {
1743 *blkp = XFS_BUF_TO_BLOCK(bp);
1744 return 0;
1745 }
1746
1747 error = xfs_btree_read_buf_block(cur, pp, 0, blkp, &bp);
1748 if (error)
1749 return error;
1750
1751
1752 if (xfs_sb_version_hascrc(&cur->bc_mp->m_sb) &&
1753 !(cur->bc_ino.flags & XFS_BTCUR_BMBT_INVALID_OWNER) &&
1754 (cur->bc_flags & XFS_BTREE_LONG_PTRS) &&
1755 be64_to_cpu((*blkp)->bb_u.l.bb_owner) !=
1756 cur->bc_ino.ip->i_ino)
1757 goto out_bad;
1758
1759
1760 if (be16_to_cpu((*blkp)->bb_level) != level)
1761 goto out_bad;
1762
1763
1764 if (level != 0 && be16_to_cpu((*blkp)->bb_numrecs) == 0)
1765 goto out_bad;
1766
1767 xfs_btree_setbuf(cur, level, bp);
1768 return 0;
1769
1770out_bad:
1771 *blkp = NULL;
1772 xfs_buf_mark_corrupt(bp);
1773 xfs_trans_brelse(cur->bc_tp, bp);
1774 return -EFSCORRUPTED;
1775}
1776
1777
1778
1779
1780
1781
1782STATIC union xfs_btree_key *
1783xfs_lookup_get_search_key(
1784 struct xfs_btree_cur *cur,
1785 int level,
1786 int keyno,
1787 struct xfs_btree_block *block,
1788 union xfs_btree_key *kp)
1789{
1790 if (level == 0) {
1791 cur->bc_ops->init_key_from_rec(kp,
1792 xfs_btree_rec_addr(cur, keyno, block));
1793 return kp;
1794 }
1795
1796 return xfs_btree_key_addr(cur, keyno, block);
1797}
1798
1799
1800
1801
1802
1803int
1804xfs_btree_lookup(
1805 struct xfs_btree_cur *cur,
1806 xfs_lookup_t dir,
1807 int *stat)
1808{
1809 struct xfs_btree_block *block;
1810 int64_t diff;
1811 int error;
1812 int keyno;
1813 int level;
1814 union xfs_btree_ptr *pp;
1815 union xfs_btree_ptr ptr;
1816
1817 XFS_BTREE_STATS_INC(cur, lookup);
1818
1819
1820 if (XFS_IS_CORRUPT(cur->bc_mp, cur->bc_nlevels == 0))
1821 return -EFSCORRUPTED;
1822
1823 block = NULL;
1824 keyno = 0;
1825
1826
1827 cur->bc_ops->init_ptr_from_cur(cur, &ptr);
1828 pp = &ptr;
1829
1830
1831
1832
1833
1834
1835
1836 for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) {
1837
1838 error = xfs_btree_lookup_get_block(cur, level, pp, &block);
1839 if (error)
1840 goto error0;
1841
1842 if (diff == 0) {
1843
1844
1845
1846
1847 keyno = 1;
1848 } else {
1849
1850
1851 int high;
1852 int low;
1853
1854
1855 low = 1;
1856 high = xfs_btree_get_numrecs(block);
1857 if (!high) {
1858
1859 if (level != 0 || cur->bc_nlevels != 1) {
1860 XFS_CORRUPTION_ERROR(__func__,
1861 XFS_ERRLEVEL_LOW,
1862 cur->bc_mp, block,
1863 sizeof(*block));
1864 return -EFSCORRUPTED;
1865 }
1866
1867 cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE;
1868 *stat = 0;
1869 return 0;
1870 }
1871
1872
1873 while (low <= high) {
1874 union xfs_btree_key key;
1875 union xfs_btree_key *kp;
1876
1877 XFS_BTREE_STATS_INC(cur, compare);
1878
1879
1880 keyno = (low + high) >> 1;
1881
1882
1883 kp = xfs_lookup_get_search_key(cur, level,
1884 keyno, block, &key);
1885
1886
1887
1888
1889
1890
1891
1892 diff = cur->bc_ops->key_diff(cur, kp);
1893 if (diff < 0)
1894 low = keyno + 1;
1895 else if (diff > 0)
1896 high = keyno - 1;
1897 else
1898 break;
1899 }
1900 }
1901
1902
1903
1904
1905
1906 if (level > 0) {
1907
1908
1909
1910
1911 if (diff > 0 && --keyno < 1)
1912 keyno = 1;
1913 pp = xfs_btree_ptr_addr(cur, keyno, block);
1914
1915 error = xfs_btree_debug_check_ptr(cur, pp, 0, level);
1916 if (error)
1917 goto error0;
1918
1919 cur->bc_ptrs[level] = keyno;
1920 }
1921 }
1922
1923
1924 if (dir != XFS_LOOKUP_LE && diff < 0) {
1925 keyno++;
1926
1927
1928
1929
1930 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
1931 if (dir == XFS_LOOKUP_GE &&
1932 keyno > xfs_btree_get_numrecs(block) &&
1933 !xfs_btree_ptr_is_null(cur, &ptr)) {
1934 int i;
1935
1936 cur->bc_ptrs[0] = keyno;
1937 error = xfs_btree_increment(cur, 0, &i);
1938 if (error)
1939 goto error0;
1940 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1941 return -EFSCORRUPTED;
1942 *stat = 1;
1943 return 0;
1944 }
1945 } else if (dir == XFS_LOOKUP_LE && diff > 0)
1946 keyno--;
1947 cur->bc_ptrs[0] = keyno;
1948
1949
1950 if (keyno == 0 || keyno > xfs_btree_get_numrecs(block))
1951 *stat = 0;
1952 else if (dir != XFS_LOOKUP_EQ || diff == 0)
1953 *stat = 1;
1954 else
1955 *stat = 0;
1956 return 0;
1957
1958error0:
1959 return error;
1960}
1961
1962
1963union xfs_btree_key *
1964xfs_btree_high_key_from_key(
1965 struct xfs_btree_cur *cur,
1966 union xfs_btree_key *key)
1967{
1968 ASSERT(cur->bc_flags & XFS_BTREE_OVERLAPPING);
1969 return (union xfs_btree_key *)((char *)key +
1970 (cur->bc_ops->key_len / 2));
1971}
1972
1973
1974STATIC void
1975xfs_btree_get_leaf_keys(
1976 struct xfs_btree_cur *cur,
1977 struct xfs_btree_block *block,
1978 union xfs_btree_key *key)
1979{
1980 union xfs_btree_key max_hkey;
1981 union xfs_btree_key hkey;
1982 union xfs_btree_rec *rec;
1983 union xfs_btree_key *high;
1984 int n;
1985
1986 rec = xfs_btree_rec_addr(cur, 1, block);
1987 cur->bc_ops->init_key_from_rec(key, rec);
1988
1989 if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
1990
1991 cur->bc_ops->init_high_key_from_rec(&max_hkey, rec);
1992 for (n = 2; n <= xfs_btree_get_numrecs(block); n++) {
1993 rec = xfs_btree_rec_addr(cur, n, block);
1994 cur->bc_ops->init_high_key_from_rec(&hkey, rec);
1995 if (cur->bc_ops->diff_two_keys(cur, &hkey, &max_hkey)
1996 > 0)
1997 max_hkey = hkey;
1998 }
1999
2000 high = xfs_btree_high_key_from_key(cur, key);
2001 memcpy(high, &max_hkey, cur->bc_ops->key_len / 2);
2002 }
2003}
2004
2005
2006STATIC void
2007xfs_btree_get_node_keys(
2008 struct xfs_btree_cur *cur,
2009 struct xfs_btree_block *block,
2010 union xfs_btree_key *key)
2011{
2012 union xfs_btree_key *hkey;
2013 union xfs_btree_key *max_hkey;
2014 union xfs_btree_key *high;
2015 int n;
2016
2017 if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
2018 memcpy(key, xfs_btree_key_addr(cur, 1, block),
2019 cur->bc_ops->key_len / 2);
2020
2021 max_hkey = xfs_btree_high_key_addr(cur, 1, block);
2022 for (n = 2; n <= xfs_btree_get_numrecs(block); n++) {
2023 hkey = xfs_btree_high_key_addr(cur, n, block);
2024 if (cur->bc_ops->diff_two_keys(cur, hkey, max_hkey) > 0)
2025 max_hkey = hkey;
2026 }
2027
2028 high = xfs_btree_high_key_from_key(cur, key);
2029 memcpy(high, max_hkey, cur->bc_ops->key_len / 2);
2030 } else {
2031 memcpy(key, xfs_btree_key_addr(cur, 1, block),
2032 cur->bc_ops->key_len);
2033 }
2034}
2035
2036
2037void
2038xfs_btree_get_keys(
2039 struct xfs_btree_cur *cur,
2040 struct xfs_btree_block *block,
2041 union xfs_btree_key *key)
2042{
2043 if (be16_to_cpu(block->bb_level) == 0)
2044 xfs_btree_get_leaf_keys(cur, block, key);
2045 else
2046 xfs_btree_get_node_keys(cur, block, key);
2047}
2048
2049
2050
2051
2052
2053
2054
2055
2056static inline bool
2057xfs_btree_needs_key_update(
2058 struct xfs_btree_cur *cur,
2059 int ptr)
2060{
2061 return (cur->bc_flags & XFS_BTREE_OVERLAPPING) || ptr == 1;
2062}
2063
2064
2065
2066
2067
2068
2069STATIC int
2070__xfs_btree_updkeys(
2071 struct xfs_btree_cur *cur,
2072 int level,
2073 struct xfs_btree_block *block,
2074 struct xfs_buf *bp0,
2075 bool force_all)
2076{
2077 union xfs_btree_key key;
2078 union xfs_btree_key *lkey;
2079 union xfs_btree_key *hkey;
2080 union xfs_btree_key *nlkey;
2081 union xfs_btree_key *nhkey;
2082 struct xfs_buf *bp;
2083 int ptr;
2084
2085 ASSERT(cur->bc_flags & XFS_BTREE_OVERLAPPING);
2086
2087
2088 if (level + 1 >= cur->bc_nlevels)
2089 return 0;
2090
2091 trace_xfs_btree_updkeys(cur, level, bp0);
2092
2093 lkey = &key;
2094 hkey = xfs_btree_high_key_from_key(cur, lkey);
2095 xfs_btree_get_keys(cur, block, lkey);
2096 for (level++; level < cur->bc_nlevels; level++) {
2097#ifdef DEBUG
2098 int error;
2099#endif
2100 block = xfs_btree_get_block(cur, level, &bp);
2101 trace_xfs_btree_updkeys(cur, level, bp);
2102#ifdef DEBUG
2103 error = xfs_btree_check_block(cur, block, level, bp);
2104 if (error)
2105 return error;
2106#endif
2107 ptr = cur->bc_ptrs[level];
2108 nlkey = xfs_btree_key_addr(cur, ptr, block);
2109 nhkey = xfs_btree_high_key_addr(cur, ptr, block);
2110 if (!force_all &&
2111 !(cur->bc_ops->diff_two_keys(cur, nlkey, lkey) != 0 ||
2112 cur->bc_ops->diff_two_keys(cur, nhkey, hkey) != 0))
2113 break;
2114 xfs_btree_copy_keys(cur, nlkey, lkey, 1);
2115 xfs_btree_log_keys(cur, bp, ptr, ptr);
2116 if (level + 1 >= cur->bc_nlevels)
2117 break;
2118 xfs_btree_get_node_keys(cur, block, lkey);
2119 }
2120
2121 return 0;
2122}
2123
2124
2125STATIC int
2126xfs_btree_updkeys_force(
2127 struct xfs_btree_cur *cur,
2128 int level)
2129{
2130 struct xfs_buf *bp;
2131 struct xfs_btree_block *block;
2132
2133 block = xfs_btree_get_block(cur, level, &bp);
2134 return __xfs_btree_updkeys(cur, level, block, bp, true);
2135}
2136
2137
2138
2139
2140STATIC int
2141xfs_btree_update_keys(
2142 struct xfs_btree_cur *cur,
2143 int level)
2144{
2145 struct xfs_btree_block *block;
2146 struct xfs_buf *bp;
2147 union xfs_btree_key *kp;
2148 union xfs_btree_key key;
2149 int ptr;
2150
2151 ASSERT(level >= 0);
2152
2153 block = xfs_btree_get_block(cur, level, &bp);
2154 if (cur->bc_flags & XFS_BTREE_OVERLAPPING)
2155 return __xfs_btree_updkeys(cur, level, block, bp, false);
2156
2157
2158
2159
2160
2161
2162
2163 xfs_btree_get_keys(cur, block, &key);
2164 for (level++, ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) {
2165#ifdef DEBUG
2166 int error;
2167#endif
2168 block = xfs_btree_get_block(cur, level, &bp);
2169#ifdef DEBUG
2170 error = xfs_btree_check_block(cur, block, level, bp);
2171 if (error)
2172 return error;
2173#endif
2174 ptr = cur->bc_ptrs[level];
2175 kp = xfs_btree_key_addr(cur, ptr, block);
2176 xfs_btree_copy_keys(cur, kp, &key, 1);
2177 xfs_btree_log_keys(cur, bp, ptr, ptr);
2178 }
2179
2180 return 0;
2181}
2182
2183
2184
2185
2186
2187
2188int
2189xfs_btree_update(
2190 struct xfs_btree_cur *cur,
2191 union xfs_btree_rec *rec)
2192{
2193 struct xfs_btree_block *block;
2194 struct xfs_buf *bp;
2195 int error;
2196 int ptr;
2197 union xfs_btree_rec *rp;
2198
2199
2200 block = xfs_btree_get_block(cur, 0, &bp);
2201
2202#ifdef DEBUG
2203 error = xfs_btree_check_block(cur, block, 0, bp);
2204 if (error)
2205 goto error0;
2206#endif
2207
2208 ptr = cur->bc_ptrs[0];
2209 rp = xfs_btree_rec_addr(cur, ptr, block);
2210
2211
2212 xfs_btree_copy_recs(cur, rp, rec, 1);
2213 xfs_btree_log_recs(cur, bp, ptr, ptr);
2214
2215
2216
2217
2218
2219 if (xfs_btree_is_lastrec(cur, block, 0)) {
2220 cur->bc_ops->update_lastrec(cur, block, rec,
2221 ptr, LASTREC_UPDATE);
2222 }
2223
2224
2225 if (xfs_btree_needs_key_update(cur, ptr)) {
2226 error = xfs_btree_update_keys(cur, 0);
2227 if (error)
2228 goto error0;
2229 }
2230
2231 return 0;
2232
2233error0:
2234 return error;
2235}
2236
2237
2238
2239
2240
2241STATIC int
2242xfs_btree_lshift(
2243 struct xfs_btree_cur *cur,
2244 int level,
2245 int *stat)
2246{
2247 struct xfs_buf *lbp;
2248 struct xfs_btree_block *left;
2249 int lrecs;
2250 struct xfs_buf *rbp;
2251 struct xfs_btree_block *right;
2252 struct xfs_btree_cur *tcur;
2253 int rrecs;
2254 union xfs_btree_ptr lptr;
2255 union xfs_btree_key *rkp = NULL;
2256 union xfs_btree_ptr *rpp = NULL;
2257 union xfs_btree_rec *rrp = NULL;
2258 int error;
2259 int i;
2260
2261 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
2262 level == cur->bc_nlevels - 1)
2263 goto out0;
2264
2265
2266 right = xfs_btree_get_block(cur, level, &rbp);
2267
2268#ifdef DEBUG
2269 error = xfs_btree_check_block(cur, right, level, rbp);
2270 if (error)
2271 goto error0;
2272#endif
2273
2274
2275 xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
2276 if (xfs_btree_ptr_is_null(cur, &lptr))
2277 goto out0;
2278
2279
2280
2281
2282
2283 if (cur->bc_ptrs[level] <= 1)
2284 goto out0;
2285
2286
2287 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
2288 if (error)
2289 goto error0;
2290
2291
2292 lrecs = xfs_btree_get_numrecs(left);
2293 if (lrecs == cur->bc_ops->get_maxrecs(cur, level))
2294 goto out0;
2295
2296 rrecs = xfs_btree_get_numrecs(right);
2297
2298
2299
2300
2301
2302
2303 lrecs++;
2304 rrecs--;
2305
2306 XFS_BTREE_STATS_INC(cur, lshift);
2307 XFS_BTREE_STATS_ADD(cur, moves, 1);
2308
2309
2310
2311
2312
2313 if (level > 0) {
2314
2315 union xfs_btree_key *lkp;
2316 union xfs_btree_ptr *lpp;
2317
2318 lkp = xfs_btree_key_addr(cur, lrecs, left);
2319 rkp = xfs_btree_key_addr(cur, 1, right);
2320
2321 lpp = xfs_btree_ptr_addr(cur, lrecs, left);
2322 rpp = xfs_btree_ptr_addr(cur, 1, right);
2323
2324 error = xfs_btree_debug_check_ptr(cur, rpp, 0, level);
2325 if (error)
2326 goto error0;
2327
2328 xfs_btree_copy_keys(cur, lkp, rkp, 1);
2329 xfs_btree_copy_ptrs(cur, lpp, rpp, 1);
2330
2331 xfs_btree_log_keys(cur, lbp, lrecs, lrecs);
2332 xfs_btree_log_ptrs(cur, lbp, lrecs, lrecs);
2333
2334 ASSERT(cur->bc_ops->keys_inorder(cur,
2335 xfs_btree_key_addr(cur, lrecs - 1, left), lkp));
2336 } else {
2337
2338 union xfs_btree_rec *lrp;
2339
2340 lrp = xfs_btree_rec_addr(cur, lrecs, left);
2341 rrp = xfs_btree_rec_addr(cur, 1, right);
2342
2343 xfs_btree_copy_recs(cur, lrp, rrp, 1);
2344 xfs_btree_log_recs(cur, lbp, lrecs, lrecs);
2345
2346 ASSERT(cur->bc_ops->recs_inorder(cur,
2347 xfs_btree_rec_addr(cur, lrecs - 1, left), lrp));
2348 }
2349
2350 xfs_btree_set_numrecs(left, lrecs);
2351 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS);
2352
2353 xfs_btree_set_numrecs(right, rrecs);
2354 xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS);
2355
2356
2357
2358
2359 XFS_BTREE_STATS_ADD(cur, moves, rrecs - 1);
2360 if (level > 0) {
2361
2362 for (i = 0; i < rrecs; i++) {
2363 error = xfs_btree_debug_check_ptr(cur, rpp, i + 1, level);
2364 if (error)
2365 goto error0;
2366 }
2367
2368 xfs_btree_shift_keys(cur,
2369 xfs_btree_key_addr(cur, 2, right),
2370 -1, rrecs);
2371 xfs_btree_shift_ptrs(cur,
2372 xfs_btree_ptr_addr(cur, 2, right),
2373 -1, rrecs);
2374
2375 xfs_btree_log_keys(cur, rbp, 1, rrecs);
2376 xfs_btree_log_ptrs(cur, rbp, 1, rrecs);
2377 } else {
2378
2379 xfs_btree_shift_recs(cur,
2380 xfs_btree_rec_addr(cur, 2, right),
2381 -1, rrecs);
2382 xfs_btree_log_recs(cur, rbp, 1, rrecs);
2383 }
2384
2385
2386
2387
2388
2389 if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
2390 error = xfs_btree_dup_cursor(cur, &tcur);
2391 if (error)
2392 goto error0;
2393 i = xfs_btree_firstrec(tcur, level);
2394 if (XFS_IS_CORRUPT(tcur->bc_mp, i != 1)) {
2395 error = -EFSCORRUPTED;
2396 goto error0;
2397 }
2398
2399 error = xfs_btree_decrement(tcur, level, &i);
2400 if (error)
2401 goto error1;
2402
2403
2404 error = xfs_btree_update_keys(tcur, level);
2405 if (error)
2406 goto error1;
2407
2408 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
2409 }
2410
2411
2412 error = xfs_btree_update_keys(cur, level);
2413 if (error)
2414 goto error0;
2415
2416
2417 cur->bc_ptrs[level]--;
2418
2419 *stat = 1;
2420 return 0;
2421
2422out0:
2423 *stat = 0;
2424 return 0;
2425
2426error0:
2427 return error;
2428
2429error1:
2430 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
2431 return error;
2432}
2433
2434
2435
2436
2437
2438STATIC int
2439xfs_btree_rshift(
2440 struct xfs_btree_cur *cur,
2441 int level,
2442 int *stat)
2443{
2444 struct xfs_buf *lbp;
2445 struct xfs_btree_block *left;
2446 struct xfs_buf *rbp;
2447 struct xfs_btree_block *right;
2448 struct xfs_btree_cur *tcur;
2449 union xfs_btree_ptr rptr;
2450 union xfs_btree_key *rkp;
2451 int rrecs;
2452 int lrecs;
2453 int error;
2454 int i;
2455
2456 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
2457 (level == cur->bc_nlevels - 1))
2458 goto out0;
2459
2460
2461 left = xfs_btree_get_block(cur, level, &lbp);
2462
2463#ifdef DEBUG
2464 error = xfs_btree_check_block(cur, left, level, lbp);
2465 if (error)
2466 goto error0;
2467#endif
2468
2469
2470 xfs_btree_get_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB);
2471 if (xfs_btree_ptr_is_null(cur, &rptr))
2472 goto out0;
2473
2474
2475
2476
2477
2478 lrecs = xfs_btree_get_numrecs(left);
2479 if (cur->bc_ptrs[level] >= lrecs)
2480 goto out0;
2481
2482
2483 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
2484 if (error)
2485 goto error0;
2486
2487
2488 rrecs = xfs_btree_get_numrecs(right);
2489 if (rrecs == cur->bc_ops->get_maxrecs(cur, level))
2490 goto out0;
2491
2492 XFS_BTREE_STATS_INC(cur, rshift);
2493 XFS_BTREE_STATS_ADD(cur, moves, rrecs);
2494
2495
2496
2497
2498
2499 if (level > 0) {
2500
2501 union xfs_btree_key *lkp;
2502 union xfs_btree_ptr *lpp;
2503 union xfs_btree_ptr *rpp;
2504
2505 lkp = xfs_btree_key_addr(cur, lrecs, left);
2506 lpp = xfs_btree_ptr_addr(cur, lrecs, left);
2507 rkp = xfs_btree_key_addr(cur, 1, right);
2508 rpp = xfs_btree_ptr_addr(cur, 1, right);
2509
2510 for (i = rrecs - 1; i >= 0; i--) {
2511 error = xfs_btree_debug_check_ptr(cur, rpp, i, level);
2512 if (error)
2513 goto error0;
2514 }
2515
2516 xfs_btree_shift_keys(cur, rkp, 1, rrecs);
2517 xfs_btree_shift_ptrs(cur, rpp, 1, rrecs);
2518
2519 error = xfs_btree_debug_check_ptr(cur, lpp, 0, level);
2520 if (error)
2521 goto error0;
2522
2523
2524 xfs_btree_copy_keys(cur, rkp, lkp, 1);
2525 xfs_btree_copy_ptrs(cur, rpp, lpp, 1);
2526
2527 xfs_btree_log_keys(cur, rbp, 1, rrecs + 1);
2528 xfs_btree_log_ptrs(cur, rbp, 1, rrecs + 1);
2529
2530 ASSERT(cur->bc_ops->keys_inorder(cur, rkp,
2531 xfs_btree_key_addr(cur, 2, right)));
2532 } else {
2533
2534 union xfs_btree_rec *lrp;
2535 union xfs_btree_rec *rrp;
2536
2537 lrp = xfs_btree_rec_addr(cur, lrecs, left);
2538 rrp = xfs_btree_rec_addr(cur, 1, right);
2539
2540 xfs_btree_shift_recs(cur, rrp, 1, rrecs);
2541
2542
2543 xfs_btree_copy_recs(cur, rrp, lrp, 1);
2544 xfs_btree_log_recs(cur, rbp, 1, rrecs + 1);
2545 }
2546
2547
2548
2549
2550 xfs_btree_set_numrecs(left, --lrecs);
2551 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS);
2552
2553 xfs_btree_set_numrecs(right, ++rrecs);
2554 xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS);
2555
2556
2557
2558
2559
2560 error = xfs_btree_dup_cursor(cur, &tcur);
2561 if (error)
2562 goto error0;
2563 i = xfs_btree_lastrec(tcur, level);
2564 if (XFS_IS_CORRUPT(tcur->bc_mp, i != 1)) {
2565 error = -EFSCORRUPTED;
2566 goto error0;
2567 }
2568
2569 error = xfs_btree_increment(tcur, level, &i);
2570 if (error)
2571 goto error1;
2572
2573
2574 if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
2575 error = xfs_btree_update_keys(cur, level);
2576 if (error)
2577 goto error1;
2578 }
2579
2580
2581 error = xfs_btree_update_keys(tcur, level);
2582 if (error)
2583 goto error1;
2584
2585 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
2586
2587 *stat = 1;
2588 return 0;
2589
2590out0:
2591 *stat = 0;
2592 return 0;
2593
2594error0:
2595 return error;
2596
2597error1:
2598 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
2599 return error;
2600}
2601
2602
2603
2604
2605
2606
2607STATIC int
2608__xfs_btree_split(
2609 struct xfs_btree_cur *cur,
2610 int level,
2611 union xfs_btree_ptr *ptrp,
2612 union xfs_btree_key *key,
2613 struct xfs_btree_cur **curp,
2614 int *stat)
2615{
2616 union xfs_btree_ptr lptr;
2617 struct xfs_buf *lbp;
2618 struct xfs_btree_block *left;
2619 union xfs_btree_ptr rptr;
2620 struct xfs_buf *rbp;
2621 struct xfs_btree_block *right;
2622 union xfs_btree_ptr rrptr;
2623 struct xfs_buf *rrbp;
2624 struct xfs_btree_block *rrblock;
2625 int lrecs;
2626 int rrecs;
2627 int src_index;
2628 int error;
2629 int i;
2630
2631 XFS_BTREE_STATS_INC(cur, split);
2632
2633
2634 left = xfs_btree_get_block(cur, level, &lbp);
2635
2636#ifdef DEBUG
2637 error = xfs_btree_check_block(cur, left, level, lbp);
2638 if (error)
2639 goto error0;
2640#endif
2641
2642 xfs_btree_buf_to_ptr(cur, lbp, &lptr);
2643
2644
2645 error = cur->bc_ops->alloc_block(cur, &lptr, &rptr, stat);
2646 if (error)
2647 goto error0;
2648 if (*stat == 0)
2649 goto out0;
2650 XFS_BTREE_STATS_INC(cur, alloc);
2651
2652
2653 error = xfs_btree_get_buf_block(cur, &rptr, &right, &rbp);
2654 if (error)
2655 goto error0;
2656
2657
2658 xfs_btree_init_block_cur(cur, rbp, xfs_btree_get_level(left), 0);
2659
2660
2661
2662
2663
2664
2665 lrecs = xfs_btree_get_numrecs(left);
2666 rrecs = lrecs / 2;
2667 if ((lrecs & 1) && cur->bc_ptrs[level] <= rrecs + 1)
2668 rrecs++;
2669 src_index = (lrecs - rrecs + 1);
2670
2671 XFS_BTREE_STATS_ADD(cur, moves, rrecs);
2672
2673
2674 lrecs -= rrecs;
2675 xfs_btree_set_numrecs(left, lrecs);
2676 xfs_btree_set_numrecs(right, xfs_btree_get_numrecs(right) + rrecs);
2677
2678
2679
2680
2681
2682
2683 if (level > 0) {
2684
2685 union xfs_btree_key *lkp;
2686 union xfs_btree_ptr *lpp;
2687 union xfs_btree_key *rkp;
2688 union xfs_btree_ptr *rpp;
2689
2690 lkp = xfs_btree_key_addr(cur, src_index, left);
2691 lpp = xfs_btree_ptr_addr(cur, src_index, left);
2692 rkp = xfs_btree_key_addr(cur, 1, right);
2693 rpp = xfs_btree_ptr_addr(cur, 1, right);
2694
2695 for (i = src_index; i < rrecs; i++) {
2696 error = xfs_btree_debug_check_ptr(cur, lpp, i, level);
2697 if (error)
2698 goto error0;
2699 }
2700
2701
2702 xfs_btree_copy_keys(cur, rkp, lkp, rrecs);
2703 xfs_btree_copy_ptrs(cur, rpp, lpp, rrecs);
2704
2705 xfs_btree_log_keys(cur, rbp, 1, rrecs);
2706 xfs_btree_log_ptrs(cur, rbp, 1, rrecs);
2707
2708
2709 xfs_btree_get_node_keys(cur, right, key);
2710 } else {
2711
2712 union xfs_btree_rec *lrp;
2713 union xfs_btree_rec *rrp;
2714
2715 lrp = xfs_btree_rec_addr(cur, src_index, left);
2716 rrp = xfs_btree_rec_addr(cur, 1, right);
2717
2718
2719 xfs_btree_copy_recs(cur, rrp, lrp, rrecs);
2720 xfs_btree_log_recs(cur, rbp, 1, rrecs);
2721
2722
2723 xfs_btree_get_leaf_keys(cur, right, key);
2724 }
2725
2726
2727
2728
2729
2730 xfs_btree_get_sibling(cur, left, &rrptr, XFS_BB_RIGHTSIB);
2731 xfs_btree_set_sibling(cur, right, &rrptr, XFS_BB_RIGHTSIB);
2732 xfs_btree_set_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
2733 xfs_btree_set_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB);
2734
2735 xfs_btree_log_block(cur, rbp, XFS_BB_ALL_BITS);
2736 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
2737
2738
2739
2740
2741
2742 if (!xfs_btree_ptr_is_null(cur, &rrptr)) {
2743 error = xfs_btree_read_buf_block(cur, &rrptr,
2744 0, &rrblock, &rrbp);
2745 if (error)
2746 goto error0;
2747 xfs_btree_set_sibling(cur, rrblock, &rptr, XFS_BB_LEFTSIB);
2748 xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB);
2749 }
2750
2751
2752 if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
2753 error = xfs_btree_update_keys(cur, level);
2754 if (error)
2755 goto error0;
2756 }
2757
2758
2759
2760
2761
2762
2763 if (cur->bc_ptrs[level] > lrecs + 1) {
2764 xfs_btree_setbuf(cur, level, rbp);
2765 cur->bc_ptrs[level] -= lrecs;
2766 }
2767
2768
2769
2770
2771 if (level + 1 < cur->bc_nlevels) {
2772 error = xfs_btree_dup_cursor(cur, curp);
2773 if (error)
2774 goto error0;
2775 (*curp)->bc_ptrs[level + 1]++;
2776 }
2777 *ptrp = rptr;
2778 *stat = 1;
2779 return 0;
2780out0:
2781 *stat = 0;
2782 return 0;
2783
2784error0:
2785 return error;
2786}
2787
2788struct xfs_btree_split_args {
2789 struct xfs_btree_cur *cur;
2790 int level;
2791 union xfs_btree_ptr *ptrp;
2792 union xfs_btree_key *key;
2793 struct xfs_btree_cur **curp;
2794 int *stat;
2795 int result;
2796 bool kswapd;
2797 struct completion *done;
2798 struct work_struct work;
2799};
2800
2801
2802
2803
2804static void
2805xfs_btree_split_worker(
2806 struct work_struct *work)
2807{
2808 struct xfs_btree_split_args *args = container_of(work,
2809 struct xfs_btree_split_args, work);
2810 unsigned long pflags;
2811 unsigned long new_pflags = 0;
2812
2813
2814
2815
2816
2817
2818
2819 if (args->kswapd)
2820 new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
2821
2822 current_set_flags_nested(&pflags, new_pflags);
2823 xfs_trans_set_context(args->cur->bc_tp);
2824
2825 args->result = __xfs_btree_split(args->cur, args->level, args->ptrp,
2826 args->key, args->curp, args->stat);
2827
2828 xfs_trans_clear_context(args->cur->bc_tp);
2829 current_restore_flags_nested(&pflags, new_pflags);
2830
2831
2832
2833
2834
2835 complete(args->done);
2836
2837}
2838
2839
2840
2841
2842
2843
2844STATIC int
2845xfs_btree_split(
2846 struct xfs_btree_cur *cur,
2847 int level,
2848 union xfs_btree_ptr *ptrp,
2849 union xfs_btree_key *key,
2850 struct xfs_btree_cur **curp,
2851 int *stat)
2852{
2853 struct xfs_btree_split_args args;
2854 DECLARE_COMPLETION_ONSTACK(done);
2855
2856 if (cur->bc_btnum != XFS_BTNUM_BMAP)
2857 return __xfs_btree_split(cur, level, ptrp, key, curp, stat);
2858
2859 args.cur = cur;
2860 args.level = level;
2861 args.ptrp = ptrp;
2862 args.key = key;
2863 args.curp = curp;
2864 args.stat = stat;
2865 args.done = &done;
2866 args.kswapd = current_is_kswapd();
2867 INIT_WORK_ONSTACK(&args.work, xfs_btree_split_worker);
2868 queue_work(xfs_alloc_wq, &args.work);
2869 wait_for_completion(&done);
2870 destroy_work_on_stack(&args.work);
2871 return args.result;
2872}
2873
2874
2875
2876
2877
2878
2879int
2880xfs_btree_new_iroot(
2881 struct xfs_btree_cur *cur,
2882 int *logflags,
2883 int *stat)
2884{
2885 struct xfs_buf *cbp;
2886 struct xfs_btree_block *block;
2887 struct xfs_btree_block *cblock;
2888 union xfs_btree_key *ckp;
2889 union xfs_btree_ptr *cpp;
2890 union xfs_btree_key *kp;
2891 union xfs_btree_ptr *pp;
2892 union xfs_btree_ptr nptr;
2893 int level;
2894 int error;
2895 int i;
2896
2897 XFS_BTREE_STATS_INC(cur, newroot);
2898
2899 ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
2900
2901 level = cur->bc_nlevels - 1;
2902
2903 block = xfs_btree_get_iroot(cur);
2904 pp = xfs_btree_ptr_addr(cur, 1, block);
2905
2906
2907 error = cur->bc_ops->alloc_block(cur, pp, &nptr, stat);
2908 if (error)
2909 goto error0;
2910 if (*stat == 0)
2911 return 0;
2912
2913 XFS_BTREE_STATS_INC(cur, alloc);
2914
2915
2916 error = xfs_btree_get_buf_block(cur, &nptr, &cblock, &cbp);
2917 if (error)
2918 goto error0;
2919
2920
2921
2922
2923
2924 memcpy(cblock, block, xfs_btree_block_len(cur));
2925 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) {
2926 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
2927 cblock->bb_u.l.bb_blkno = cpu_to_be64(cbp->b_bn);
2928 else
2929 cblock->bb_u.s.bb_blkno = cpu_to_be64(cbp->b_bn);
2930 }
2931
2932 be16_add_cpu(&block->bb_level, 1);
2933 xfs_btree_set_numrecs(block, 1);
2934 cur->bc_nlevels++;
2935 cur->bc_ptrs[level + 1] = 1;
2936
2937 kp = xfs_btree_key_addr(cur, 1, block);
2938 ckp = xfs_btree_key_addr(cur, 1, cblock);
2939 xfs_btree_copy_keys(cur, ckp, kp, xfs_btree_get_numrecs(cblock));
2940
2941 cpp = xfs_btree_ptr_addr(cur, 1, cblock);
2942 for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) {
2943 error = xfs_btree_debug_check_ptr(cur, pp, i, level);
2944 if (error)
2945 goto error0;
2946 }
2947
2948 xfs_btree_copy_ptrs(cur, cpp, pp, xfs_btree_get_numrecs(cblock));
2949
2950 error = xfs_btree_debug_check_ptr(cur, &nptr, 0, level);
2951 if (error)
2952 goto error0;
2953
2954 xfs_btree_copy_ptrs(cur, pp, &nptr, 1);
2955
2956 xfs_iroot_realloc(cur->bc_ino.ip,
2957 1 - xfs_btree_get_numrecs(cblock),
2958 cur->bc_ino.whichfork);
2959
2960 xfs_btree_setbuf(cur, level, cbp);
2961
2962
2963
2964
2965
2966 xfs_btree_log_block(cur, cbp, XFS_BB_ALL_BITS);
2967 xfs_btree_log_keys(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs));
2968 xfs_btree_log_ptrs(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs));
2969
2970 *logflags |=
2971 XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_ino.whichfork);
2972 *stat = 1;
2973 return 0;
2974error0:
2975 return error;
2976}
2977
2978
2979
2980
2981STATIC int
2982xfs_btree_new_root(
2983 struct xfs_btree_cur *cur,
2984 int *stat)
2985{
2986 struct xfs_btree_block *block;
2987 struct xfs_buf *bp;
2988 int error;
2989 struct xfs_buf *lbp;
2990 struct xfs_btree_block *left;
2991 struct xfs_buf *nbp;
2992 struct xfs_btree_block *new;
2993 int nptr;
2994 struct xfs_buf *rbp;
2995 struct xfs_btree_block *right;
2996 union xfs_btree_ptr rptr;
2997 union xfs_btree_ptr lptr;
2998
2999 XFS_BTREE_STATS_INC(cur, newroot);
3000
3001
3002 cur->bc_ops->init_ptr_from_cur(cur, &rptr);
3003
3004
3005 error = cur->bc_ops->alloc_block(cur, &rptr, &lptr, stat);
3006 if (error)
3007 goto error0;
3008 if (*stat == 0)
3009 goto out0;
3010 XFS_BTREE_STATS_INC(cur, alloc);
3011
3012
3013 error = xfs_btree_get_buf_block(cur, &lptr, &new, &nbp);
3014 if (error)
3015 goto error0;
3016
3017
3018 cur->bc_ops->set_root(cur, &lptr, 1);
3019
3020
3021
3022
3023
3024
3025
3026 block = xfs_btree_get_block(cur, cur->bc_nlevels - 1, &bp);
3027
3028#ifdef DEBUG
3029 error = xfs_btree_check_block(cur, block, cur->bc_nlevels - 1, bp);
3030 if (error)
3031 goto error0;
3032#endif
3033
3034 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
3035 if (!xfs_btree_ptr_is_null(cur, &rptr)) {
3036
3037 lbp = bp;
3038 xfs_btree_buf_to_ptr(cur, lbp, &lptr);
3039 left = block;
3040 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
3041 if (error)
3042 goto error0;
3043 bp = rbp;
3044 nptr = 1;
3045 } else {
3046
3047 rbp = bp;
3048 xfs_btree_buf_to_ptr(cur, rbp, &rptr);
3049 right = block;
3050 xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
3051 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
3052 if (error)
3053 goto error0;
3054 bp = lbp;
3055 nptr = 2;
3056 }
3057
3058
3059 xfs_btree_init_block_cur(cur, nbp, cur->bc_nlevels, 2);
3060 xfs_btree_log_block(cur, nbp, XFS_BB_ALL_BITS);
3061 ASSERT(!xfs_btree_ptr_is_null(cur, &lptr) &&
3062 !xfs_btree_ptr_is_null(cur, &rptr));
3063
3064
3065 if (xfs_btree_get_level(left) > 0) {
3066
3067
3068
3069
3070 xfs_btree_get_node_keys(cur, left,
3071 xfs_btree_key_addr(cur, 1, new));
3072 xfs_btree_get_node_keys(cur, right,
3073 xfs_btree_key_addr(cur, 2, new));
3074 } else {
3075
3076
3077
3078
3079
3080 xfs_btree_get_leaf_keys(cur, left,
3081 xfs_btree_key_addr(cur, 1, new));
3082 xfs_btree_get_leaf_keys(cur, right,
3083 xfs_btree_key_addr(cur, 2, new));
3084 }
3085 xfs_btree_log_keys(cur, nbp, 1, 2);
3086
3087
3088 xfs_btree_copy_ptrs(cur,
3089 xfs_btree_ptr_addr(cur, 1, new), &lptr, 1);
3090 xfs_btree_copy_ptrs(cur,
3091 xfs_btree_ptr_addr(cur, 2, new), &rptr, 1);
3092 xfs_btree_log_ptrs(cur, nbp, 1, 2);
3093
3094
3095 xfs_btree_setbuf(cur, cur->bc_nlevels, nbp);
3096 cur->bc_ptrs[cur->bc_nlevels] = nptr;
3097 cur->bc_nlevels++;
3098 *stat = 1;
3099 return 0;
3100error0:
3101 return error;
3102out0:
3103 *stat = 0;
3104 return 0;
3105}
3106
3107STATIC int
3108xfs_btree_make_block_unfull(
3109 struct xfs_btree_cur *cur,
3110 int level,
3111 int numrecs,
3112 int *oindex,
3113 int *index,
3114 union xfs_btree_ptr *nptr,
3115 struct xfs_btree_cur **ncur,
3116 union xfs_btree_key *key,
3117 int *stat)
3118{
3119 int error = 0;
3120
3121 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
3122 level == cur->bc_nlevels - 1) {
3123 struct xfs_inode *ip = cur->bc_ino.ip;
3124
3125 if (numrecs < cur->bc_ops->get_dmaxrecs(cur, level)) {
3126
3127 xfs_iroot_realloc(ip, 1, cur->bc_ino.whichfork);
3128 *stat = 1;
3129 } else {
3130
3131 int logflags = 0;
3132
3133 error = xfs_btree_new_iroot(cur, &logflags, stat);
3134 if (error || *stat == 0)
3135 return error;
3136
3137 xfs_trans_log_inode(cur->bc_tp, ip, logflags);
3138 }
3139
3140 return 0;
3141 }
3142
3143
3144 error = xfs_btree_rshift(cur, level, stat);
3145 if (error || *stat)
3146 return error;
3147
3148
3149 error = xfs_btree_lshift(cur, level, stat);
3150 if (error)
3151 return error;
3152
3153 if (*stat) {
3154 *oindex = *index = cur->bc_ptrs[level];
3155 return 0;
3156 }
3157
3158
3159
3160
3161
3162
3163
3164 error = xfs_btree_split(cur, level, nptr, key, ncur, stat);
3165 if (error || *stat == 0)
3166 return error;
3167
3168
3169 *index = cur->bc_ptrs[level];
3170 return 0;
3171}
3172
3173
3174
3175
3176
3177STATIC int
3178xfs_btree_insrec(
3179 struct xfs_btree_cur *cur,
3180 int level,
3181 union xfs_btree_ptr *ptrp,
3182 union xfs_btree_rec *rec,
3183 union xfs_btree_key *key,
3184 struct xfs_btree_cur **curp,
3185 int *stat)
3186{
3187 struct xfs_btree_block *block;
3188 struct xfs_buf *bp;
3189 union xfs_btree_ptr nptr;
3190 struct xfs_btree_cur *ncur;
3191 union xfs_btree_key nkey;
3192 union xfs_btree_key *lkey;
3193 int optr;
3194 int ptr;
3195 int numrecs;
3196 int error;
3197 int i;
3198 xfs_daddr_t old_bn;
3199
3200 ncur = NULL;
3201 lkey = &nkey;
3202
3203
3204
3205
3206
3207 if (!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
3208 (level >= cur->bc_nlevels)) {
3209 error = xfs_btree_new_root(cur, stat);
3210 xfs_btree_set_ptr_null(cur, ptrp);
3211
3212 return error;
3213 }
3214
3215
3216 ptr = cur->bc_ptrs[level];
3217 if (ptr == 0) {
3218 *stat = 0;
3219 return 0;
3220 }
3221
3222 optr = ptr;
3223
3224 XFS_BTREE_STATS_INC(cur, insrec);
3225
3226
3227 block = xfs_btree_get_block(cur, level, &bp);
3228 old_bn = bp ? bp->b_bn : XFS_BUF_DADDR_NULL;
3229 numrecs = xfs_btree_get_numrecs(block);
3230
3231#ifdef DEBUG
3232 error = xfs_btree_check_block(cur, block, level, bp);
3233 if (error)
3234 goto error0;
3235
3236
3237 if (ptr <= numrecs) {
3238 if (level == 0) {
3239 ASSERT(cur->bc_ops->recs_inorder(cur, rec,
3240 xfs_btree_rec_addr(cur, ptr, block)));
3241 } else {
3242 ASSERT(cur->bc_ops->keys_inorder(cur, key,
3243 xfs_btree_key_addr(cur, ptr, block)));
3244 }
3245 }
3246#endif
3247
3248
3249
3250
3251
3252 xfs_btree_set_ptr_null(cur, &nptr);
3253 if (numrecs == cur->bc_ops->get_maxrecs(cur, level)) {
3254 error = xfs_btree_make_block_unfull(cur, level, numrecs,
3255 &optr, &ptr, &nptr, &ncur, lkey, stat);
3256 if (error || *stat == 0)
3257 goto error0;
3258 }
3259
3260
3261
3262
3263
3264 block = xfs_btree_get_block(cur, level, &bp);
3265 numrecs = xfs_btree_get_numrecs(block);
3266
3267#ifdef DEBUG
3268 error = xfs_btree_check_block(cur, block, level, bp);
3269 if (error)
3270 return error;
3271#endif
3272
3273
3274
3275
3276
3277 XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr + 1);
3278
3279 if (level > 0) {
3280
3281 union xfs_btree_key *kp;
3282 union xfs_btree_ptr *pp;
3283
3284 kp = xfs_btree_key_addr(cur, ptr, block);
3285 pp = xfs_btree_ptr_addr(cur, ptr, block);
3286
3287 for (i = numrecs - ptr; i >= 0; i--) {
3288 error = xfs_btree_debug_check_ptr(cur, pp, i, level);
3289 if (error)
3290 return error;
3291 }
3292
3293 xfs_btree_shift_keys(cur, kp, 1, numrecs - ptr + 1);
3294 xfs_btree_shift_ptrs(cur, pp, 1, numrecs - ptr + 1);
3295
3296 error = xfs_btree_debug_check_ptr(cur, ptrp, 0, level);
3297 if (error)
3298 goto error0;
3299
3300
3301 xfs_btree_copy_keys(cur, kp, key, 1);
3302 xfs_btree_copy_ptrs(cur, pp, ptrp, 1);
3303 numrecs++;
3304 xfs_btree_set_numrecs(block, numrecs);
3305 xfs_btree_log_ptrs(cur, bp, ptr, numrecs);
3306 xfs_btree_log_keys(cur, bp, ptr, numrecs);
3307#ifdef DEBUG
3308 if (ptr < numrecs) {
3309 ASSERT(cur->bc_ops->keys_inorder(cur, kp,
3310 xfs_btree_key_addr(cur, ptr + 1, block)));
3311 }
3312#endif
3313 } else {
3314
3315 union xfs_btree_rec *rp;
3316
3317 rp = xfs_btree_rec_addr(cur, ptr, block);
3318
3319 xfs_btree_shift_recs(cur, rp, 1, numrecs - ptr + 1);
3320
3321
3322 xfs_btree_copy_recs(cur, rp, rec, 1);
3323 xfs_btree_set_numrecs(block, ++numrecs);
3324 xfs_btree_log_recs(cur, bp, ptr, numrecs);
3325#ifdef DEBUG
3326 if (ptr < numrecs) {
3327 ASSERT(cur->bc_ops->recs_inorder(cur, rp,
3328 xfs_btree_rec_addr(cur, ptr + 1, block)));
3329 }
3330#endif
3331 }
3332
3333
3334 xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344 if (bp && bp->b_bn != old_bn) {
3345 xfs_btree_get_keys(cur, block, lkey);
3346 } else if (xfs_btree_needs_key_update(cur, optr)) {
3347 error = xfs_btree_update_keys(cur, level);
3348 if (error)
3349 goto error0;
3350 }
3351
3352
3353
3354
3355
3356 if (xfs_btree_is_lastrec(cur, block, level)) {
3357 cur->bc_ops->update_lastrec(cur, block, rec,
3358 ptr, LASTREC_INSREC);
3359 }
3360
3361
3362
3363
3364
3365 *ptrp = nptr;
3366 if (!xfs_btree_ptr_is_null(cur, &nptr)) {
3367 xfs_btree_copy_keys(cur, key, lkey, 1);
3368 *curp = ncur;
3369 }
3370
3371 *stat = 1;
3372 return 0;
3373
3374error0:
3375 return error;
3376}
3377
3378
3379
3380
3381
3382
3383
3384
3385int
3386xfs_btree_insert(
3387 struct xfs_btree_cur *cur,
3388 int *stat)
3389{
3390 int error;
3391 int i;
3392 int level;
3393 union xfs_btree_ptr nptr;
3394 struct xfs_btree_cur *ncur;
3395 struct xfs_btree_cur *pcur;
3396 union xfs_btree_key bkey;
3397 union xfs_btree_key *key;
3398 union xfs_btree_rec rec;
3399
3400 level = 0;
3401 ncur = NULL;
3402 pcur = cur;
3403 key = &bkey;
3404
3405 xfs_btree_set_ptr_null(cur, &nptr);
3406
3407
3408 cur->bc_ops->init_rec_from_cur(cur, &rec);
3409 cur->bc_ops->init_key_from_rec(key, &rec);
3410
3411
3412
3413
3414
3415
3416 do {
3417
3418
3419
3420
3421 error = xfs_btree_insrec(pcur, level, &nptr, &rec, key,
3422 &ncur, &i);
3423 if (error) {
3424 if (pcur != cur)
3425 xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR);
3426 goto error0;
3427 }
3428
3429 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
3430 error = -EFSCORRUPTED;
3431 goto error0;
3432 }
3433 level++;
3434
3435
3436
3437
3438
3439
3440 if (pcur != cur &&
3441 (ncur || xfs_btree_ptr_is_null(cur, &nptr))) {
3442
3443 if (cur->bc_ops->update_cursor)
3444 cur->bc_ops->update_cursor(pcur, cur);
3445 cur->bc_nlevels = pcur->bc_nlevels;
3446 xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR);
3447 }
3448
3449 if (ncur) {
3450 pcur = ncur;
3451 ncur = NULL;
3452 }
3453 } while (!xfs_btree_ptr_is_null(cur, &nptr));
3454
3455 *stat = i;
3456 return 0;
3457error0:
3458 return error;
3459}
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469STATIC int
3470xfs_btree_kill_iroot(
3471 struct xfs_btree_cur *cur)
3472{
3473 int whichfork = cur->bc_ino.whichfork;
3474 struct xfs_inode *ip = cur->bc_ino.ip;
3475 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
3476 struct xfs_btree_block *block;
3477 struct xfs_btree_block *cblock;
3478 union xfs_btree_key *kp;
3479 union xfs_btree_key *ckp;
3480 union xfs_btree_ptr *pp;
3481 union xfs_btree_ptr *cpp;
3482 struct xfs_buf *cbp;
3483 int level;
3484 int index;
3485 int numrecs;
3486 int error;
3487#ifdef DEBUG
3488 union xfs_btree_ptr ptr;
3489#endif
3490 int i;
3491
3492 ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
3493 ASSERT(cur->bc_nlevels > 1);
3494
3495
3496
3497
3498
3499 level = cur->bc_nlevels - 1;
3500 if (level == 1)
3501 goto out0;
3502
3503
3504
3505
3506 block = xfs_btree_get_iroot(cur);
3507 if (xfs_btree_get_numrecs(block) != 1)
3508 goto out0;
3509
3510 cblock = xfs_btree_get_block(cur, level - 1, &cbp);
3511 numrecs = xfs_btree_get_numrecs(cblock);
3512
3513
3514
3515
3516
3517
3518 if (numrecs > cur->bc_ops->get_dmaxrecs(cur, level))
3519 goto out0;
3520
3521 XFS_BTREE_STATS_INC(cur, killroot);
3522
3523#ifdef DEBUG
3524 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB);
3525 ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
3526 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
3527 ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
3528#endif
3529
3530 index = numrecs - cur->bc_ops->get_maxrecs(cur, level);
3531 if (index) {
3532 xfs_iroot_realloc(cur->bc_ino.ip, index,
3533 cur->bc_ino.whichfork);
3534 block = ifp->if_broot;
3535 }
3536
3537 be16_add_cpu(&block->bb_numrecs, index);
3538 ASSERT(block->bb_numrecs == cblock->bb_numrecs);
3539
3540 kp = xfs_btree_key_addr(cur, 1, block);
3541 ckp = xfs_btree_key_addr(cur, 1, cblock);
3542 xfs_btree_copy_keys(cur, kp, ckp, numrecs);
3543
3544 pp = xfs_btree_ptr_addr(cur, 1, block);
3545 cpp = xfs_btree_ptr_addr(cur, 1, cblock);
3546
3547 for (i = 0; i < numrecs; i++) {
3548 error = xfs_btree_debug_check_ptr(cur, cpp, i, level - 1);
3549 if (error)
3550 return error;
3551 }
3552
3553 xfs_btree_copy_ptrs(cur, pp, cpp, numrecs);
3554
3555 error = xfs_btree_free_block(cur, cbp);
3556 if (error)
3557 return error;
3558
3559 cur->bc_bufs[level - 1] = NULL;
3560 be16_add_cpu(&block->bb_level, -1);
3561 xfs_trans_log_inode(cur->bc_tp, ip,
3562 XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_ino.whichfork));
3563 cur->bc_nlevels--;
3564out0:
3565 return 0;
3566}
3567
3568
3569
3570
3571STATIC int
3572xfs_btree_kill_root(
3573 struct xfs_btree_cur *cur,
3574 struct xfs_buf *bp,
3575 int level,
3576 union xfs_btree_ptr *newroot)
3577{
3578 int error;
3579
3580 XFS_BTREE_STATS_INC(cur, killroot);
3581
3582
3583
3584
3585
3586 cur->bc_ops->set_root(cur, newroot, -1);
3587
3588 error = xfs_btree_free_block(cur, bp);
3589 if (error)
3590 return error;
3591
3592 cur->bc_bufs[level] = NULL;
3593 cur->bc_ra[level] = 0;
3594 cur->bc_nlevels--;
3595
3596 return 0;
3597}
3598
3599STATIC int
3600xfs_btree_dec_cursor(
3601 struct xfs_btree_cur *cur,
3602 int level,
3603 int *stat)
3604{
3605 int error;
3606 int i;
3607
3608 if (level > 0) {
3609 error = xfs_btree_decrement(cur, level, &i);
3610 if (error)
3611 return error;
3612 }
3613
3614 *stat = 1;
3615 return 0;
3616}
3617
3618
3619
3620
3621
3622
3623
3624STATIC int
3625xfs_btree_delrec(
3626 struct xfs_btree_cur *cur,
3627 int level,
3628 int *stat)
3629{
3630 struct xfs_btree_block *block;
3631 union xfs_btree_ptr cptr;
3632 struct xfs_buf *bp;
3633 int error;
3634 int i;
3635 union xfs_btree_ptr lptr;
3636 struct xfs_buf *lbp;
3637 struct xfs_btree_block *left;
3638 int lrecs = 0;
3639 int ptr;
3640 union xfs_btree_ptr rptr;
3641 struct xfs_buf *rbp;
3642 struct xfs_btree_block *right;
3643 struct xfs_btree_block *rrblock;
3644 struct xfs_buf *rrbp;
3645 int rrecs = 0;
3646 struct xfs_btree_cur *tcur;
3647 int numrecs;
3648
3649 tcur = NULL;
3650
3651
3652 ptr = cur->bc_ptrs[level];
3653 if (ptr == 0) {
3654 *stat = 0;
3655 return 0;
3656 }
3657
3658
3659 block = xfs_btree_get_block(cur, level, &bp);
3660 numrecs = xfs_btree_get_numrecs(block);
3661
3662#ifdef DEBUG
3663 error = xfs_btree_check_block(cur, block, level, bp);
3664 if (error)
3665 goto error0;
3666#endif
3667
3668
3669 if (ptr > numrecs) {
3670 *stat = 0;
3671 return 0;
3672 }
3673
3674 XFS_BTREE_STATS_INC(cur, delrec);
3675 XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr);
3676
3677
3678 if (level > 0) {
3679
3680 union xfs_btree_key *lkp;
3681 union xfs_btree_ptr *lpp;
3682
3683 lkp = xfs_btree_key_addr(cur, ptr + 1, block);
3684 lpp = xfs_btree_ptr_addr(cur, ptr + 1, block);
3685
3686 for (i = 0; i < numrecs - ptr; i++) {
3687 error = xfs_btree_debug_check_ptr(cur, lpp, i, level);
3688 if (error)
3689 goto error0;
3690 }
3691
3692 if (ptr < numrecs) {
3693 xfs_btree_shift_keys(cur, lkp, -1, numrecs - ptr);
3694 xfs_btree_shift_ptrs(cur, lpp, -1, numrecs - ptr);
3695 xfs_btree_log_keys(cur, bp, ptr, numrecs - 1);
3696 xfs_btree_log_ptrs(cur, bp, ptr, numrecs - 1);
3697 }
3698 } else {
3699
3700 if (ptr < numrecs) {
3701 xfs_btree_shift_recs(cur,
3702 xfs_btree_rec_addr(cur, ptr + 1, block),
3703 -1, numrecs - ptr);
3704 xfs_btree_log_recs(cur, bp, ptr, numrecs - 1);
3705 }
3706 }
3707
3708
3709
3710
3711 xfs_btree_set_numrecs(block, --numrecs);
3712 xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
3713
3714
3715
3716
3717
3718 if (xfs_btree_is_lastrec(cur, block, level)) {
3719 cur->bc_ops->update_lastrec(cur, block, NULL,
3720 ptr, LASTREC_DELREC);
3721 }
3722
3723
3724
3725
3726
3727
3728 if (level == cur->bc_nlevels - 1) {
3729 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
3730 xfs_iroot_realloc(cur->bc_ino.ip, -1,
3731 cur->bc_ino.whichfork);
3732
3733 error = xfs_btree_kill_iroot(cur);
3734 if (error)
3735 goto error0;
3736
3737 error = xfs_btree_dec_cursor(cur, level, stat);
3738 if (error)
3739 goto error0;
3740 *stat = 1;
3741 return 0;
3742 }
3743
3744
3745
3746
3747
3748
3749 if (numrecs == 1 && level > 0) {
3750 union xfs_btree_ptr *pp;
3751
3752
3753
3754
3755 pp = xfs_btree_ptr_addr(cur, 1, block);
3756 error = xfs_btree_kill_root(cur, bp, level, pp);
3757 if (error)
3758 goto error0;
3759 } else if (level > 0) {
3760 error = xfs_btree_dec_cursor(cur, level, stat);
3761 if (error)
3762 goto error0;
3763 }
3764 *stat = 1;
3765 return 0;
3766 }
3767
3768
3769
3770
3771
3772 if (xfs_btree_needs_key_update(cur, ptr)) {
3773 error = xfs_btree_update_keys(cur, level);
3774 if (error)
3775 goto error0;
3776 }
3777
3778
3779
3780
3781
3782 if (numrecs >= cur->bc_ops->get_minrecs(cur, level)) {
3783 error = xfs_btree_dec_cursor(cur, level, stat);
3784 if (error)
3785 goto error0;
3786 return 0;
3787 }
3788
3789
3790
3791
3792
3793
3794 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
3795 xfs_btree_get_sibling(cur, block, &lptr, XFS_BB_LEFTSIB);
3796
3797 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
3798
3799
3800
3801
3802
3803 if (xfs_btree_ptr_is_null(cur, &rptr) &&
3804 xfs_btree_ptr_is_null(cur, &lptr) &&
3805 level == cur->bc_nlevels - 2) {
3806 error = xfs_btree_kill_iroot(cur);
3807 if (!error)
3808 error = xfs_btree_dec_cursor(cur, level, stat);
3809 if (error)
3810 goto error0;
3811 return 0;
3812 }
3813 }
3814
3815 ASSERT(!xfs_btree_ptr_is_null(cur, &rptr) ||
3816 !xfs_btree_ptr_is_null(cur, &lptr));
3817
3818
3819
3820
3821
3822 error = xfs_btree_dup_cursor(cur, &tcur);
3823 if (error)
3824 goto error0;
3825
3826
3827
3828
3829
3830 if (!xfs_btree_ptr_is_null(cur, &rptr)) {
3831
3832
3833
3834
3835 i = xfs_btree_lastrec(tcur, level);
3836 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
3837 error = -EFSCORRUPTED;
3838 goto error0;
3839 }
3840
3841 error = xfs_btree_increment(tcur, level, &i);
3842 if (error)
3843 goto error0;
3844 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
3845 error = -EFSCORRUPTED;
3846 goto error0;
3847 }
3848
3849 i = xfs_btree_lastrec(tcur, level);
3850 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
3851 error = -EFSCORRUPTED;
3852 goto error0;
3853 }
3854
3855
3856 right = xfs_btree_get_block(tcur, level, &rbp);
3857#ifdef DEBUG
3858 error = xfs_btree_check_block(tcur, right, level, rbp);
3859 if (error)
3860 goto error0;
3861#endif
3862
3863 xfs_btree_get_sibling(tcur, right, &cptr, XFS_BB_LEFTSIB);
3864
3865
3866
3867
3868
3869
3870 if (xfs_btree_get_numrecs(right) - 1 >=
3871 cur->bc_ops->get_minrecs(tcur, level)) {
3872 error = xfs_btree_lshift(tcur, level, &i);
3873 if (error)
3874 goto error0;
3875 if (i) {
3876 ASSERT(xfs_btree_get_numrecs(block) >=
3877 cur->bc_ops->get_minrecs(tcur, level));
3878
3879 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
3880 tcur = NULL;
3881
3882 error = xfs_btree_dec_cursor(cur, level, stat);
3883 if (error)
3884 goto error0;
3885 return 0;
3886 }
3887 }
3888
3889
3890
3891
3892
3893
3894 rrecs = xfs_btree_get_numrecs(right);
3895 if (!xfs_btree_ptr_is_null(cur, &lptr)) {
3896 i = xfs_btree_firstrec(tcur, level);
3897 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
3898 error = -EFSCORRUPTED;
3899 goto error0;
3900 }
3901
3902 error = xfs_btree_decrement(tcur, level, &i);
3903 if (error)
3904 goto error0;
3905 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
3906 error = -EFSCORRUPTED;
3907 goto error0;
3908 }
3909 }
3910 }
3911
3912
3913
3914
3915
3916 if (!xfs_btree_ptr_is_null(cur, &lptr)) {
3917
3918
3919
3920
3921 i = xfs_btree_firstrec(tcur, level);
3922 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
3923 error = -EFSCORRUPTED;
3924 goto error0;
3925 }
3926
3927 error = xfs_btree_decrement(tcur, level, &i);
3928 if (error)
3929 goto error0;
3930 i = xfs_btree_firstrec(tcur, level);
3931 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
3932 error = -EFSCORRUPTED;
3933 goto error0;
3934 }
3935
3936
3937 left = xfs_btree_get_block(tcur, level, &lbp);
3938#ifdef DEBUG
3939 error = xfs_btree_check_block(cur, left, level, lbp);
3940 if (error)
3941 goto error0;
3942#endif
3943
3944 xfs_btree_get_sibling(tcur, left, &cptr, XFS_BB_RIGHTSIB);
3945
3946
3947
3948
3949
3950
3951 if (xfs_btree_get_numrecs(left) - 1 >=
3952 cur->bc_ops->get_minrecs(tcur, level)) {
3953 error = xfs_btree_rshift(tcur, level, &i);
3954 if (error)
3955 goto error0;
3956 if (i) {
3957 ASSERT(xfs_btree_get_numrecs(block) >=
3958 cur->bc_ops->get_minrecs(tcur, level));
3959 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
3960 tcur = NULL;
3961 if (level == 0)
3962 cur->bc_ptrs[0]++;
3963
3964 *stat = 1;
3965 return 0;
3966 }
3967 }
3968
3969
3970
3971
3972
3973 lrecs = xfs_btree_get_numrecs(left);
3974 }
3975
3976
3977 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
3978 tcur = NULL;
3979
3980
3981 ASSERT(!xfs_btree_ptr_is_null(cur, &cptr));
3982
3983 if (!xfs_btree_ptr_is_null(cur, &lptr) &&
3984 lrecs + xfs_btree_get_numrecs(block) <=
3985 cur->bc_ops->get_maxrecs(cur, level)) {
3986
3987
3988
3989
3990 rptr = cptr;
3991 right = block;
3992 rbp = bp;
3993 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
3994 if (error)
3995 goto error0;
3996
3997
3998
3999
4000 } else if (!xfs_btree_ptr_is_null(cur, &rptr) &&
4001 rrecs + xfs_btree_get_numrecs(block) <=
4002 cur->bc_ops->get_maxrecs(cur, level)) {
4003
4004
4005
4006
4007 lptr = cptr;
4008 left = block;
4009 lbp = bp;
4010 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
4011 if (error)
4012 goto error0;
4013
4014
4015
4016
4017
4018 } else {
4019 error = xfs_btree_dec_cursor(cur, level, stat);
4020 if (error)
4021 goto error0;
4022 return 0;
4023 }
4024
4025 rrecs = xfs_btree_get_numrecs(right);
4026 lrecs = xfs_btree_get_numrecs(left);
4027
4028
4029
4030
4031
4032 XFS_BTREE_STATS_ADD(cur, moves, rrecs);
4033 if (level > 0) {
4034
4035 union xfs_btree_key *lkp;
4036 union xfs_btree_ptr *lpp;
4037 union xfs_btree_key *rkp;
4038 union xfs_btree_ptr *rpp;
4039
4040 lkp = xfs_btree_key_addr(cur, lrecs + 1, left);
4041 lpp = xfs_btree_ptr_addr(cur, lrecs + 1, left);
4042 rkp = xfs_btree_key_addr(cur, 1, right);
4043 rpp = xfs_btree_ptr_addr(cur, 1, right);
4044
4045 for (i = 1; i < rrecs; i++) {
4046 error = xfs_btree_debug_check_ptr(cur, rpp, i, level);
4047 if (error)
4048 goto error0;
4049 }
4050
4051 xfs_btree_copy_keys(cur, lkp, rkp, rrecs);
4052 xfs_btree_copy_ptrs(cur, lpp, rpp, rrecs);
4053
4054 xfs_btree_log_keys(cur, lbp, lrecs + 1, lrecs + rrecs);
4055 xfs_btree_log_ptrs(cur, lbp, lrecs + 1, lrecs + rrecs);
4056 } else {
4057
4058 union xfs_btree_rec *lrp;
4059 union xfs_btree_rec *rrp;
4060
4061 lrp = xfs_btree_rec_addr(cur, lrecs + 1, left);
4062 rrp = xfs_btree_rec_addr(cur, 1, right);
4063
4064 xfs_btree_copy_recs(cur, lrp, rrp, rrecs);
4065 xfs_btree_log_recs(cur, lbp, lrecs + 1, lrecs + rrecs);
4066 }
4067
4068 XFS_BTREE_STATS_INC(cur, join);
4069
4070
4071
4072
4073
4074 xfs_btree_set_numrecs(left, lrecs + rrecs);
4075 xfs_btree_get_sibling(cur, right, &cptr, XFS_BB_RIGHTSIB);
4076 xfs_btree_set_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB);
4077 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
4078
4079
4080 xfs_btree_get_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB);
4081 if (!xfs_btree_ptr_is_null(cur, &cptr)) {
4082 error = xfs_btree_read_buf_block(cur, &cptr, 0, &rrblock, &rrbp);
4083 if (error)
4084 goto error0;
4085 xfs_btree_set_sibling(cur, rrblock, &lptr, XFS_BB_LEFTSIB);
4086 xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB);
4087 }
4088
4089
4090 error = xfs_btree_free_block(cur, rbp);
4091 if (error)
4092 goto error0;
4093
4094
4095
4096
4097
4098 if (bp != lbp) {
4099 cur->bc_bufs[level] = lbp;
4100 cur->bc_ptrs[level] += lrecs;
4101 cur->bc_ra[level] = 0;
4102 }
4103
4104
4105
4106
4107 else if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) ||
4108 (level + 1 < cur->bc_nlevels)) {
4109 error = xfs_btree_increment(cur, level + 1, &i);
4110 if (error)
4111 goto error0;
4112 }
4113
4114
4115
4116
4117
4118
4119
4120 if (level > 0)
4121 cur->bc_ptrs[level]--;
4122
4123
4124
4125
4126
4127
4128
4129
4130
4131
4132
4133
4134 *stat = 2;
4135 return 0;
4136
4137error0:
4138 if (tcur)
4139 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
4140 return error;
4141}
4142
4143
4144
4145
4146
4147
4148int
4149xfs_btree_delete(
4150 struct xfs_btree_cur *cur,
4151 int *stat)
4152{
4153 int error;
4154 int level;
4155 int i;
4156 bool joined = false;
4157
4158
4159
4160
4161
4162
4163
4164 for (level = 0, i = 2; i == 2; level++) {
4165 error = xfs_btree_delrec(cur, level, &i);
4166 if (error)
4167 goto error0;
4168 if (i == 2)
4169 joined = true;
4170 }
4171
4172
4173
4174
4175
4176 if (joined && (cur->bc_flags & XFS_BTREE_OVERLAPPING)) {
4177 error = xfs_btree_updkeys_force(cur, 0);
4178 if (error)
4179 goto error0;
4180 }
4181
4182 if (i == 0) {
4183 for (level = 1; level < cur->bc_nlevels; level++) {
4184 if (cur->bc_ptrs[level] == 0) {
4185 error = xfs_btree_decrement(cur, level, &i);
4186 if (error)
4187 goto error0;
4188 break;
4189 }
4190 }
4191 }
4192
4193 *stat = i;
4194 return 0;
4195error0:
4196 return error;
4197}
4198
4199
4200
4201
4202int
4203xfs_btree_get_rec(
4204 struct xfs_btree_cur *cur,
4205 union xfs_btree_rec **recp,
4206 int *stat)
4207{
4208 struct xfs_btree_block *block;
4209 struct xfs_buf *bp;
4210 int ptr;
4211#ifdef DEBUG
4212 int error;
4213#endif
4214
4215 ptr = cur->bc_ptrs[0];
4216 block = xfs_btree_get_block(cur, 0, &bp);
4217
4218#ifdef DEBUG
4219 error = xfs_btree_check_block(cur, block, 0, bp);
4220 if (error)
4221 return error;
4222#endif
4223
4224
4225
4226
4227 if (ptr > xfs_btree_get_numrecs(block) || ptr <= 0) {
4228 *stat = 0;
4229 return 0;
4230 }
4231
4232
4233
4234
4235 *recp = xfs_btree_rec_addr(cur, ptr, block);
4236 *stat = 1;
4237 return 0;
4238}
4239
4240
4241STATIC int
4242xfs_btree_visit_block(
4243 struct xfs_btree_cur *cur,
4244 int level,
4245 xfs_btree_visit_blocks_fn fn,
4246 void *data)
4247{
4248 struct xfs_btree_block *block;
4249 struct xfs_buf *bp;
4250 union xfs_btree_ptr rptr;
4251 int error;
4252
4253
4254 xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
4255 block = xfs_btree_get_block(cur, level, &bp);
4256
4257
4258 error = fn(cur, level, data);
4259 if (error)
4260 return error;
4261
4262
4263 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
4264 if (xfs_btree_ptr_is_null(cur, &rptr))
4265 return -ENOENT;
4266
4267 return xfs_btree_lookup_get_block(cur, level, &rptr, &block);
4268}
4269
4270
4271
4272int
4273xfs_btree_visit_blocks(
4274 struct xfs_btree_cur *cur,
4275 xfs_btree_visit_blocks_fn fn,
4276 unsigned int flags,
4277 void *data)
4278{
4279 union xfs_btree_ptr lptr;
4280 int level;
4281 struct xfs_btree_block *block = NULL;
4282 int error = 0;
4283
4284 cur->bc_ops->init_ptr_from_cur(cur, &lptr);
4285
4286
4287 for (level = cur->bc_nlevels - 1; level >= 0; level--) {
4288
4289 error = xfs_btree_lookup_get_block(cur, level, &lptr, &block);
4290 if (error)
4291 return error;
4292
4293
4294 if (level > 0) {
4295 union xfs_btree_ptr *ptr;
4296
4297 ptr = xfs_btree_ptr_addr(cur, 1, block);
4298 xfs_btree_readahead_ptr(cur, ptr, 1);
4299
4300
4301 xfs_btree_copy_ptrs(cur, &lptr, ptr, 1);
4302
4303 if (!(flags & XFS_BTREE_VISIT_LEAVES))
4304 continue;
4305 } else if (!(flags & XFS_BTREE_VISIT_RECORDS)) {
4306 continue;
4307 }
4308
4309
4310 do {
4311 error = xfs_btree_visit_block(cur, level, fn, data);
4312 } while (!error);
4313
4314 if (error != -ENOENT)
4315 return error;
4316 }
4317
4318 return 0;
4319}
4320
4321
4322
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345struct xfs_btree_block_change_owner_info {
4346 uint64_t new_owner;
4347 struct list_head *buffer_list;
4348};
4349
4350static int
4351xfs_btree_block_change_owner(
4352 struct xfs_btree_cur *cur,
4353 int level,
4354 void *data)
4355{
4356 struct xfs_btree_block_change_owner_info *bbcoi = data;
4357 struct xfs_btree_block *block;
4358 struct xfs_buf *bp;
4359
4360
4361 block = xfs_btree_get_block(cur, level, &bp);
4362 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
4363 if (block->bb_u.l.bb_owner == cpu_to_be64(bbcoi->new_owner))
4364 return 0;
4365 block->bb_u.l.bb_owner = cpu_to_be64(bbcoi->new_owner);
4366 } else {
4367 if (block->bb_u.s.bb_owner == cpu_to_be32(bbcoi->new_owner))
4368 return 0;
4369 block->bb_u.s.bb_owner = cpu_to_be32(bbcoi->new_owner);
4370 }
4371
4372
4373
4374
4375
4376
4377
4378
4379 if (!bp) {
4380 ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
4381 ASSERT(level == cur->bc_nlevels - 1);
4382 return 0;
4383 }
4384
4385 if (cur->bc_tp) {
4386 if (!xfs_trans_ordered_buf(cur->bc_tp, bp)) {
4387 xfs_btree_log_block(cur, bp, XFS_BB_OWNER);
4388 return -EAGAIN;
4389 }
4390 } else {
4391 xfs_buf_delwri_queue(bp, bbcoi->buffer_list);
4392 }
4393
4394 return 0;
4395}
4396
4397int
4398xfs_btree_change_owner(
4399 struct xfs_btree_cur *cur,
4400 uint64_t new_owner,
4401 struct list_head *buffer_list)
4402{
4403 struct xfs_btree_block_change_owner_info bbcoi;
4404
4405 bbcoi.new_owner = new_owner;
4406 bbcoi.buffer_list = buffer_list;
4407
4408 return xfs_btree_visit_blocks(cur, xfs_btree_block_change_owner,
4409 XFS_BTREE_VISIT_ALL, &bbcoi);
4410}
4411
4412
4413xfs_failaddr_t
4414xfs_btree_lblock_v5hdr_verify(
4415 struct xfs_buf *bp,
4416 uint64_t owner)
4417{
4418 struct xfs_mount *mp = bp->b_mount;
4419 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
4420
4421 if (!xfs_sb_version_hascrc(&mp->m_sb))
4422 return __this_address;
4423 if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid))
4424 return __this_address;
4425 if (block->bb_u.l.bb_blkno != cpu_to_be64(bp->b_bn))
4426 return __this_address;
4427 if (owner != XFS_RMAP_OWN_UNKNOWN &&
4428 be64_to_cpu(block->bb_u.l.bb_owner) != owner)
4429 return __this_address;
4430 return NULL;
4431}
4432
4433
4434xfs_failaddr_t
4435xfs_btree_lblock_verify(
4436 struct xfs_buf *bp,
4437 unsigned int max_recs)
4438{
4439 struct xfs_mount *mp = bp->b_mount;
4440 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
4441
4442
4443 if (be16_to_cpu(block->bb_numrecs) > max_recs)
4444 return __this_address;
4445
4446
4447 if (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLFSBLOCK) &&
4448 !xfs_verify_fsbno(mp, be64_to_cpu(block->bb_u.l.bb_leftsib)))
4449 return __this_address;
4450 if (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK) &&
4451 !xfs_verify_fsbno(mp, be64_to_cpu(block->bb_u.l.bb_rightsib)))
4452 return __this_address;
4453
4454 return NULL;
4455}
4456
4457
4458
4459
4460
4461
4462
4463xfs_failaddr_t
4464xfs_btree_sblock_v5hdr_verify(
4465 struct xfs_buf *bp)
4466{
4467 struct xfs_mount *mp = bp->b_mount;
4468 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
4469 struct xfs_perag *pag = bp->b_pag;
4470
4471 if (!xfs_sb_version_hascrc(&mp->m_sb))
4472 return __this_address;
4473 if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid))
4474 return __this_address;
4475 if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn))
4476 return __this_address;
4477 if (pag && be32_to_cpu(block->bb_u.s.bb_owner) != pag->pag_agno)
4478 return __this_address;
4479 return NULL;
4480}
4481
4482
4483
4484
4485
4486
4487
4488xfs_failaddr_t
4489xfs_btree_sblock_verify(
4490 struct xfs_buf *bp,
4491 unsigned int max_recs)
4492{
4493 struct xfs_mount *mp = bp->b_mount;
4494 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
4495 xfs_agblock_t agno;
4496
4497
4498 if (be16_to_cpu(block->bb_numrecs) > max_recs)
4499 return __this_address;
4500
4501
4502 agno = xfs_daddr_to_agno(mp, XFS_BUF_ADDR(bp));
4503 if (block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK) &&
4504 !xfs_verify_agbno(mp, agno, be32_to_cpu(block->bb_u.s.bb_leftsib)))
4505 return __this_address;
4506 if (block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK) &&
4507 !xfs_verify_agbno(mp, agno, be32_to_cpu(block->bb_u.s.bb_rightsib)))
4508 return __this_address;
4509
4510 return NULL;
4511}
4512
4513
4514
4515
4516
4517uint
4518xfs_btree_compute_maxlevels(
4519 uint *limits,
4520 unsigned long len)
4521{
4522 uint level;
4523 unsigned long maxblocks;
4524
4525 maxblocks = (len + limits[0] - 1) / limits[0];
4526 for (level = 1; maxblocks > 1; level++)
4527 maxblocks = (maxblocks + limits[1] - 1) / limits[1];
4528 return level;
4529}
4530
4531
4532
4533
4534
4535
4536STATIC int
4537xfs_btree_simple_query_range(
4538 struct xfs_btree_cur *cur,
4539 union xfs_btree_key *low_key,
4540 union xfs_btree_key *high_key,
4541 xfs_btree_query_range_fn fn,
4542 void *priv)
4543{
4544 union xfs_btree_rec *recp;
4545 union xfs_btree_key rec_key;
4546 int64_t diff;
4547 int stat;
4548 bool firstrec = true;
4549 int error;
4550
4551 ASSERT(cur->bc_ops->init_high_key_from_rec);
4552 ASSERT(cur->bc_ops->diff_two_keys);
4553
4554
4555
4556
4557
4558 stat = 0;
4559 error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, &stat);
4560 if (error)
4561 goto out;
4562
4563
4564 if (!stat) {
4565 error = xfs_btree_increment(cur, 0, &stat);
4566 if (error)
4567 goto out;
4568 }
4569
4570 while (stat) {
4571
4572 error = xfs_btree_get_rec(cur, &recp, &stat);
4573 if (error || !stat)
4574 break;
4575
4576
4577 if (firstrec) {
4578 cur->bc_ops->init_high_key_from_rec(&rec_key, recp);
4579 firstrec = false;
4580 diff = cur->bc_ops->diff_two_keys(cur, low_key,
4581 &rec_key);
4582 if (diff > 0)
4583 goto advloop;
4584 }
4585
4586
4587 cur->bc_ops->init_key_from_rec(&rec_key, recp);
4588 diff = cur->bc_ops->diff_two_keys(cur, &rec_key, high_key);
4589 if (diff > 0)
4590 break;
4591
4592
4593 error = fn(cur, recp, priv);
4594 if (error)
4595 break;
4596
4597advloop:
4598
4599 error = xfs_btree_increment(cur, 0, &stat);
4600 if (error)
4601 break;
4602 }
4603
4604out:
4605 return error;
4606}
4607
4608
4609
4610
4611
4612
4613
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626
4627STATIC int
4628xfs_btree_overlapped_query_range(
4629 struct xfs_btree_cur *cur,
4630 union xfs_btree_key *low_key,
4631 union xfs_btree_key *high_key,
4632 xfs_btree_query_range_fn fn,
4633 void *priv)
4634{
4635 union xfs_btree_ptr ptr;
4636 union xfs_btree_ptr *pp;
4637 union xfs_btree_key rec_key;
4638 union xfs_btree_key rec_hkey;
4639 union xfs_btree_key *lkp;
4640 union xfs_btree_key *hkp;
4641 union xfs_btree_rec *recp;
4642 struct xfs_btree_block *block;
4643 int64_t ldiff;
4644 int64_t hdiff;
4645 int level;
4646 struct xfs_buf *bp;
4647 int i;
4648 int error;
4649
4650
4651 level = cur->bc_nlevels - 1;
4652 cur->bc_ops->init_ptr_from_cur(cur, &ptr);
4653 error = xfs_btree_lookup_get_block(cur, level, &ptr, &block);
4654 if (error)
4655 return error;
4656 xfs_btree_get_block(cur, level, &bp);
4657 trace_xfs_btree_overlapped_query_range(cur, level, bp);
4658#ifdef DEBUG
4659 error = xfs_btree_check_block(cur, block, level, bp);
4660 if (error)
4661 goto out;
4662#endif
4663 cur->bc_ptrs[level] = 1;
4664
4665 while (level < cur->bc_nlevels) {
4666 block = xfs_btree_get_block(cur, level, &bp);
4667
4668
4669 if (cur->bc_ptrs[level] > be16_to_cpu(block->bb_numrecs)) {
4670pop_up:
4671 if (level < cur->bc_nlevels - 1)
4672 cur->bc_ptrs[level + 1]++;
4673 level++;
4674 continue;
4675 }
4676
4677 if (level == 0) {
4678
4679 recp = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block);
4680
4681 cur->bc_ops->init_high_key_from_rec(&rec_hkey, recp);
4682 ldiff = cur->bc_ops->diff_two_keys(cur, &rec_hkey,
4683 low_key);
4684
4685 cur->bc_ops->init_key_from_rec(&rec_key, recp);
4686 hdiff = cur->bc_ops->diff_two_keys(cur, high_key,
4687 &rec_key);
4688
4689
4690
4691
4692
4693
4694 if (ldiff >= 0 && hdiff >= 0) {
4695 error = fn(cur, recp, priv);
4696 if (error)
4697 break;
4698 } else if (hdiff < 0) {
4699
4700 goto pop_up;
4701 }
4702 cur->bc_ptrs[level]++;
4703 continue;
4704 }
4705
4706
4707 lkp = xfs_btree_key_addr(cur, cur->bc_ptrs[level], block);
4708 hkp = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level], block);
4709 pp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[level], block);
4710
4711 ldiff = cur->bc_ops->diff_two_keys(cur, hkp, low_key);
4712 hdiff = cur->bc_ops->diff_two_keys(cur, high_key, lkp);
4713
4714
4715
4716
4717
4718
4719 if (ldiff >= 0 && hdiff >= 0) {
4720 level--;
4721 error = xfs_btree_lookup_get_block(cur, level, pp,
4722 &block);
4723 if (error)
4724 goto out;
4725 xfs_btree_get_block(cur, level, &bp);
4726 trace_xfs_btree_overlapped_query_range(cur, level, bp);
4727#ifdef DEBUG
4728 error = xfs_btree_check_block(cur, block, level, bp);
4729 if (error)
4730 goto out;
4731#endif
4732 cur->bc_ptrs[level] = 1;
4733 continue;
4734 } else if (hdiff < 0) {
4735
4736 goto pop_up;
4737 }
4738 cur->bc_ptrs[level]++;
4739 }
4740
4741out:
4742
4743
4744
4745
4746
4747
4748
4749 if (cur->bc_bufs[0] == NULL) {
4750 for (i = 0; i < cur->bc_nlevels; i++) {
4751 if (cur->bc_bufs[i]) {
4752 xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[i]);
4753 cur->bc_bufs[i] = NULL;
4754 cur->bc_ptrs[i] = 0;
4755 cur->bc_ra[i] = 0;
4756 }
4757 }
4758 }
4759
4760 return error;
4761}
4762
4763
4764
4765
4766
4767
4768
4769int
4770xfs_btree_query_range(
4771 struct xfs_btree_cur *cur,
4772 union xfs_btree_irec *low_rec,
4773 union xfs_btree_irec *high_rec,
4774 xfs_btree_query_range_fn fn,
4775 void *priv)
4776{
4777 union xfs_btree_rec rec;
4778 union xfs_btree_key low_key;
4779 union xfs_btree_key high_key;
4780
4781
4782 cur->bc_rec = *high_rec;
4783 cur->bc_ops->init_rec_from_cur(cur, &rec);
4784 cur->bc_ops->init_key_from_rec(&high_key, &rec);
4785
4786 cur->bc_rec = *low_rec;
4787 cur->bc_ops->init_rec_from_cur(cur, &rec);
4788 cur->bc_ops->init_key_from_rec(&low_key, &rec);
4789
4790
4791 if (cur->bc_ops->diff_two_keys(cur, &low_key, &high_key) > 0)
4792 return -EINVAL;
4793
4794 if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
4795 return xfs_btree_simple_query_range(cur, &low_key,
4796 &high_key, fn, priv);
4797 return xfs_btree_overlapped_query_range(cur, &low_key, &high_key,
4798 fn, priv);
4799}
4800
4801
4802int
4803xfs_btree_query_all(
4804 struct xfs_btree_cur *cur,
4805 xfs_btree_query_range_fn fn,
4806 void *priv)
4807{
4808 union xfs_btree_key low_key;
4809 union xfs_btree_key high_key;
4810
4811 memset(&cur->bc_rec, 0, sizeof(cur->bc_rec));
4812 memset(&low_key, 0, sizeof(low_key));
4813 memset(&high_key, 0xFF, sizeof(high_key));
4814
4815 return xfs_btree_simple_query_range(cur, &low_key, &high_key, fn, priv);
4816}
4817
4818
4819
4820
4821
4822unsigned long long
4823xfs_btree_calc_size(
4824 uint *limits,
4825 unsigned long long len)
4826{
4827 int level;
4828 int maxrecs;
4829 unsigned long long rval;
4830
4831 maxrecs = limits[0];
4832 for (level = 0, rval = 0; len > 1; level++) {
4833 len += maxrecs - 1;
4834 do_div(len, maxrecs);
4835 maxrecs = limits[1];
4836 rval += len;
4837 }
4838 return rval;
4839}
4840
4841static int
4842xfs_btree_count_blocks_helper(
4843 struct xfs_btree_cur *cur,
4844 int level,
4845 void *data)
4846{
4847 xfs_extlen_t *blocks = data;
4848 (*blocks)++;
4849
4850 return 0;
4851}
4852
4853
4854int
4855xfs_btree_count_blocks(
4856 struct xfs_btree_cur *cur,
4857 xfs_extlen_t *blocks)
4858{
4859 *blocks = 0;
4860 return xfs_btree_visit_blocks(cur, xfs_btree_count_blocks_helper,
4861 XFS_BTREE_VISIT_ALL, blocks);
4862}
4863
4864
4865int64_t
4866xfs_btree_diff_two_ptrs(
4867 struct xfs_btree_cur *cur,
4868 const union xfs_btree_ptr *a,
4869 const union xfs_btree_ptr *b)
4870{
4871 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
4872 return (int64_t)be64_to_cpu(a->l) - be64_to_cpu(b->l);
4873 return (int64_t)be32_to_cpu(a->s) - be32_to_cpu(b->s);
4874}
4875
4876
4877STATIC int
4878xfs_btree_has_record_helper(
4879 struct xfs_btree_cur *cur,
4880 union xfs_btree_rec *rec,
4881 void *priv)
4882{
4883 return -ECANCELED;
4884}
4885
4886
4887int
4888xfs_btree_has_record(
4889 struct xfs_btree_cur *cur,
4890 union xfs_btree_irec *low,
4891 union xfs_btree_irec *high,
4892 bool *exists)
4893{
4894 int error;
4895
4896 error = xfs_btree_query_range(cur, low, high,
4897 &xfs_btree_has_record_helper, NULL);
4898 if (error == -ECANCELED) {
4899 *exists = true;
4900 return 0;
4901 }
4902 *exists = false;
4903 return error;
4904}
4905
4906
4907bool
4908xfs_btree_has_more_records(
4909 struct xfs_btree_cur *cur)
4910{
4911 struct xfs_btree_block *block;
4912 struct xfs_buf *bp;
4913
4914 block = xfs_btree_get_block(cur, 0, &bp);
4915
4916
4917 if (cur->bc_ptrs[0] < xfs_btree_get_numrecs(block))
4918 return true;
4919
4920
4921 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
4922 return block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK);
4923 else
4924 return block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK);
4925}
4926