1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_format.h"
21#include "xfs_log_format.h"
22#include "xfs_shared.h"
23#include "xfs_trans_resv.h"
24#include "xfs_bit.h"
25#include "xfs_sb.h"
26#include "xfs_mount.h"
27#include "xfs_defer.h"
28#include "xfs_inode.h"
29#include "xfs_btree.h"
30#include "xfs_rmap.h"
31#include "xfs_alloc_btree.h"
32#include "xfs_alloc.h"
33#include "xfs_extent_busy.h"
34#include "xfs_errortag.h"
35#include "xfs_error.h"
36#include "xfs_cksum.h"
37#include "xfs_trace.h"
38#include "xfs_trans.h"
39#include "xfs_buf_item.h"
40#include "xfs_log.h"
41#include "xfs_ag_resv.h"
42
43struct workqueue_struct *xfs_alloc_wq;
44
45#define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
46
47#define XFSA_FIXUP_BNO_OK 1
48#define XFSA_FIXUP_CNT_OK 2
49
50STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *);
51STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *);
52STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
53STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
54 xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
55
56unsigned int
57xfs_refc_block(
58 struct xfs_mount *mp)
59{
60 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
61 return XFS_RMAP_BLOCK(mp) + 1;
62 if (xfs_sb_version_hasfinobt(&mp->m_sb))
63 return XFS_FIBT_BLOCK(mp) + 1;
64 return XFS_IBT_BLOCK(mp) + 1;
65}
66
67xfs_extlen_t
68xfs_prealloc_blocks(
69 struct xfs_mount *mp)
70{
71 if (xfs_sb_version_hasreflink(&mp->m_sb))
72 return xfs_refc_block(mp) + 1;
73 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
74 return XFS_RMAP_BLOCK(mp) + 1;
75 if (xfs_sb_version_hasfinobt(&mp->m_sb))
76 return XFS_FIBT_BLOCK(mp) + 1;
77 return XFS_IBT_BLOCK(mp) + 1;
78}
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95unsigned int
96xfs_alloc_set_aside(
97 struct xfs_mount *mp)
98{
99 return mp->m_sb.sb_agcount * (XFS_ALLOC_AGFL_RESERVE + 4);
100}
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116unsigned int
117xfs_alloc_ag_max_usable(
118 struct xfs_mount *mp)
119{
120 unsigned int blocks;
121
122 blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4));
123 blocks += XFS_ALLOC_AGFL_RESERVE;
124 blocks += 3;
125 if (xfs_sb_version_hasfinobt(&mp->m_sb))
126 blocks++;
127 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
128 blocks++;
129 if (xfs_sb_version_hasreflink(&mp->m_sb))
130 blocks++;
131
132 return mp->m_sb.sb_agblocks - blocks;
133}
134
135
136
137
138STATIC int
139xfs_alloc_lookup_eq(
140 struct xfs_btree_cur *cur,
141 xfs_agblock_t bno,
142 xfs_extlen_t len,
143 int *stat)
144{
145 cur->bc_rec.a.ar_startblock = bno;
146 cur->bc_rec.a.ar_blockcount = len;
147 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
148}
149
150
151
152
153
154int
155xfs_alloc_lookup_ge(
156 struct xfs_btree_cur *cur,
157 xfs_agblock_t bno,
158 xfs_extlen_t len,
159 int *stat)
160{
161 cur->bc_rec.a.ar_startblock = bno;
162 cur->bc_rec.a.ar_blockcount = len;
163 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
164}
165
166
167
168
169
170int
171xfs_alloc_lookup_le(
172 struct xfs_btree_cur *cur,
173 xfs_agblock_t bno,
174 xfs_extlen_t len,
175 int *stat)
176{
177 cur->bc_rec.a.ar_startblock = bno;
178 cur->bc_rec.a.ar_blockcount = len;
179 return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
180}
181
182
183
184
185
186
187STATIC int
188xfs_alloc_update(
189 struct xfs_btree_cur *cur,
190 xfs_agblock_t bno,
191 xfs_extlen_t len)
192{
193 union xfs_btree_rec rec;
194
195 rec.alloc.ar_startblock = cpu_to_be32(bno);
196 rec.alloc.ar_blockcount = cpu_to_be32(len);
197 return xfs_btree_update(cur, &rec);
198}
199
200
201
202
203int
204xfs_alloc_get_rec(
205 struct xfs_btree_cur *cur,
206 xfs_agblock_t *bno,
207 xfs_extlen_t *len,
208 int *stat)
209{
210 union xfs_btree_rec *rec;
211 int error;
212
213 error = xfs_btree_get_rec(cur, &rec, stat);
214 if (!error && *stat == 1) {
215 *bno = be32_to_cpu(rec->alloc.ar_startblock);
216 *len = be32_to_cpu(rec->alloc.ar_blockcount);
217 }
218 return error;
219}
220
221
222
223
224
225STATIC bool
226xfs_alloc_compute_aligned(
227 xfs_alloc_arg_t *args,
228 xfs_agblock_t foundbno,
229 xfs_extlen_t foundlen,
230 xfs_agblock_t *resbno,
231 xfs_extlen_t *reslen,
232 unsigned *busy_gen)
233{
234 xfs_agblock_t bno = foundbno;
235 xfs_extlen_t len = foundlen;
236 xfs_extlen_t diff;
237 bool busy;
238
239
240 busy = xfs_extent_busy_trim(args, &bno, &len, busy_gen);
241
242
243
244
245
246 if (bno < args->min_agbno && bno + len > args->min_agbno) {
247 diff = args->min_agbno - bno;
248 if (len > diff) {
249 bno += diff;
250 len -= diff;
251 }
252 }
253
254 if (args->alignment > 1 && len >= args->minlen) {
255 xfs_agblock_t aligned_bno = roundup(bno, args->alignment);
256
257 diff = aligned_bno - bno;
258
259 *resbno = aligned_bno;
260 *reslen = diff >= len ? 0 : len - diff;
261 } else {
262 *resbno = bno;
263 *reslen = len;
264 }
265
266 return busy;
267}
268
269
270
271
272
273STATIC xfs_extlen_t
274xfs_alloc_compute_diff(
275 xfs_agblock_t wantbno,
276 xfs_extlen_t wantlen,
277 xfs_extlen_t alignment,
278 int datatype,
279 xfs_agblock_t freebno,
280 xfs_extlen_t freelen,
281 xfs_agblock_t *newbnop)
282{
283 xfs_agblock_t freeend;
284 xfs_agblock_t newbno1;
285 xfs_agblock_t newbno2;
286 xfs_extlen_t newlen1=0;
287 xfs_extlen_t newlen2=0;
288 xfs_agblock_t wantend;
289 bool userdata = xfs_alloc_is_userdata(datatype);
290
291 ASSERT(freelen >= wantlen);
292 freeend = freebno + freelen;
293 wantend = wantbno + wantlen;
294
295
296
297
298
299
300
301 if (freebno >= wantbno || (userdata && freeend < wantend)) {
302 if ((newbno1 = roundup(freebno, alignment)) >= freeend)
303 newbno1 = NULLAGBLOCK;
304 } else if (freeend >= wantend && alignment > 1) {
305 newbno1 = roundup(wantbno, alignment);
306 newbno2 = newbno1 - alignment;
307 if (newbno1 >= freeend)
308 newbno1 = NULLAGBLOCK;
309 else
310 newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
311 if (newbno2 < freebno)
312 newbno2 = NULLAGBLOCK;
313 else
314 newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
315 if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
316 if (newlen1 < newlen2 ||
317 (newlen1 == newlen2 &&
318 XFS_ABSDIFF(newbno1, wantbno) >
319 XFS_ABSDIFF(newbno2, wantbno)))
320 newbno1 = newbno2;
321 } else if (newbno2 != NULLAGBLOCK)
322 newbno1 = newbno2;
323 } else if (freeend >= wantend) {
324 newbno1 = wantbno;
325 } else if (alignment > 1) {
326 newbno1 = roundup(freeend - wantlen, alignment);
327 if (newbno1 > freeend - wantlen &&
328 newbno1 - alignment >= freebno)
329 newbno1 -= alignment;
330 else if (newbno1 >= freeend)
331 newbno1 = NULLAGBLOCK;
332 } else
333 newbno1 = freeend - wantlen;
334 *newbnop = newbno1;
335 return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
336}
337
338
339
340
341
342
343
344STATIC void
345xfs_alloc_fix_len(
346 xfs_alloc_arg_t *args)
347{
348 xfs_extlen_t k;
349 xfs_extlen_t rlen;
350
351 ASSERT(args->mod < args->prod);
352 rlen = args->len;
353 ASSERT(rlen >= args->minlen);
354 ASSERT(rlen <= args->maxlen);
355 if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
356 (args->mod == 0 && rlen < args->prod))
357 return;
358 k = rlen % args->prod;
359 if (k == args->mod)
360 return;
361 if (k > args->mod)
362 rlen = rlen - (k - args->mod);
363 else
364 rlen = rlen - args->prod + (args->mod - k);
365
366 if ((int)rlen < (int)args->minlen)
367 return;
368 ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
369 ASSERT(rlen % args->prod == args->mod);
370 ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >=
371 rlen + args->minleft);
372 args->len = rlen;
373}
374
375
376
377
378
379
380
381
382STATIC int
383xfs_alloc_fixup_trees(
384 xfs_btree_cur_t *cnt_cur,
385 xfs_btree_cur_t *bno_cur,
386 xfs_agblock_t fbno,
387 xfs_extlen_t flen,
388 xfs_agblock_t rbno,
389 xfs_extlen_t rlen,
390 int flags)
391{
392 int error;
393 int i;
394 xfs_agblock_t nfbno1;
395 xfs_agblock_t nfbno2;
396 xfs_extlen_t nflen1=0;
397 xfs_extlen_t nflen2=0;
398 struct xfs_mount *mp;
399
400 mp = cnt_cur->bc_mp;
401
402
403
404
405 if (flags & XFSA_FIXUP_CNT_OK) {
406#ifdef DEBUG
407 if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
408 return error;
409 XFS_WANT_CORRUPTED_RETURN(mp,
410 i == 1 && nfbno1 == fbno && nflen1 == flen);
411#endif
412 } else {
413 if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
414 return error;
415 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
416 }
417
418
419
420 if (flags & XFSA_FIXUP_BNO_OK) {
421#ifdef DEBUG
422 if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
423 return error;
424 XFS_WANT_CORRUPTED_RETURN(mp,
425 i == 1 && nfbno1 == fbno && nflen1 == flen);
426#endif
427 } else {
428 if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
429 return error;
430 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
431 }
432
433#ifdef DEBUG
434 if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
435 struct xfs_btree_block *bnoblock;
436 struct xfs_btree_block *cntblock;
437
438 bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]);
439 cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]);
440
441 XFS_WANT_CORRUPTED_RETURN(mp,
442 bnoblock->bb_numrecs == cntblock->bb_numrecs);
443 }
444#endif
445
446
447
448
449
450
451 if (rbno == fbno && rlen == flen)
452 nfbno1 = nfbno2 = NULLAGBLOCK;
453 else if (rbno == fbno) {
454 nfbno1 = rbno + rlen;
455 nflen1 = flen - rlen;
456 nfbno2 = NULLAGBLOCK;
457 } else if (rbno + rlen == fbno + flen) {
458 nfbno1 = fbno;
459 nflen1 = flen - rlen;
460 nfbno2 = NULLAGBLOCK;
461 } else {
462 nfbno1 = fbno;
463 nflen1 = rbno - fbno;
464 nfbno2 = rbno + rlen;
465 nflen2 = (fbno + flen) - nfbno2;
466 }
467
468
469
470 if ((error = xfs_btree_delete(cnt_cur, &i)))
471 return error;
472 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
473
474
475
476 if (nfbno1 != NULLAGBLOCK) {
477 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
478 return error;
479 XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
480 if ((error = xfs_btree_insert(cnt_cur, &i)))
481 return error;
482 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
483 }
484 if (nfbno2 != NULLAGBLOCK) {
485 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
486 return error;
487 XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
488 if ((error = xfs_btree_insert(cnt_cur, &i)))
489 return error;
490 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
491 }
492
493
494
495 if (nfbno1 == NULLAGBLOCK) {
496
497
498
499 if ((error = xfs_btree_delete(bno_cur, &i)))
500 return error;
501 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
502 } else {
503
504
505
506 if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
507 return error;
508 }
509 if (nfbno2 != NULLAGBLOCK) {
510
511
512
513 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
514 return error;
515 XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
516 if ((error = xfs_btree_insert(bno_cur, &i)))
517 return error;
518 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
519 }
520 return 0;
521}
522
523static xfs_failaddr_t
524xfs_agfl_verify(
525 struct xfs_buf *bp)
526{
527 struct xfs_mount *mp = bp->b_target->bt_mount;
528 struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
529 int i;
530
531
532
533
534
535
536
537 if (!xfs_sb_version_hascrc(&mp->m_sb))
538 return NULL;
539
540 if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid))
541 return __this_address;
542 if (be32_to_cpu(agfl->agfl_magicnum) != XFS_AGFL_MAGIC)
543 return __this_address;
544
545
546
547
548
549
550 if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
551 return __this_address;
552
553 for (i = 0; i < XFS_AGFL_SIZE(mp); i++) {
554 if (be32_to_cpu(agfl->agfl_bno[i]) != NULLAGBLOCK &&
555 be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks)
556 return __this_address;
557 }
558
559 if (!xfs_log_check_lsn(mp, be64_to_cpu(XFS_BUF_TO_AGFL(bp)->agfl_lsn)))
560 return __this_address;
561 return NULL;
562}
563
564static void
565xfs_agfl_read_verify(
566 struct xfs_buf *bp)
567{
568 struct xfs_mount *mp = bp->b_target->bt_mount;
569 xfs_failaddr_t fa;
570
571
572
573
574
575
576
577 if (!xfs_sb_version_hascrc(&mp->m_sb))
578 return;
579
580 if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF))
581 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
582 else {
583 fa = xfs_agfl_verify(bp);
584 if (fa)
585 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
586 }
587}
588
589static void
590xfs_agfl_write_verify(
591 struct xfs_buf *bp)
592{
593 struct xfs_mount *mp = bp->b_target->bt_mount;
594 struct xfs_buf_log_item *bip = bp->b_log_item;
595 xfs_failaddr_t fa;
596
597
598 if (!xfs_sb_version_hascrc(&mp->m_sb))
599 return;
600
601 fa = xfs_agfl_verify(bp);
602 if (fa) {
603 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
604 return;
605 }
606
607 if (bip)
608 XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn);
609
610 xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF);
611}
612
613const struct xfs_buf_ops xfs_agfl_buf_ops = {
614 .name = "xfs_agfl",
615 .verify_read = xfs_agfl_read_verify,
616 .verify_write = xfs_agfl_write_verify,
617 .verify_struct = xfs_agfl_verify,
618};
619
620
621
622
623int
624xfs_alloc_read_agfl(
625 xfs_mount_t *mp,
626 xfs_trans_t *tp,
627 xfs_agnumber_t agno,
628 xfs_buf_t **bpp)
629{
630 xfs_buf_t *bp;
631 int error;
632
633 ASSERT(agno != NULLAGNUMBER);
634 error = xfs_trans_read_buf(
635 mp, tp, mp->m_ddev_targp,
636 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
637 XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
638 if (error)
639 return error;
640 xfs_buf_set_ref(bp, XFS_AGFL_REF);
641 *bpp = bp;
642 return 0;
643}
644
645STATIC int
646xfs_alloc_update_counters(
647 struct xfs_trans *tp,
648 struct xfs_perag *pag,
649 struct xfs_buf *agbp,
650 long len)
651{
652 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
653
654 pag->pagf_freeblks += len;
655 be32_add_cpu(&agf->agf_freeblks, len);
656
657 xfs_trans_agblocks_delta(tp, len);
658 if (unlikely(be32_to_cpu(agf->agf_freeblks) >
659 be32_to_cpu(agf->agf_length)))
660 return -EFSCORRUPTED;
661
662 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
663 return 0;
664}
665
666
667
668
669
670
671
672
673
674
675
676
677
678STATIC int
679xfs_alloc_ag_vextent(
680 xfs_alloc_arg_t *args)
681{
682 int error=0;
683
684 ASSERT(args->minlen > 0);
685 ASSERT(args->maxlen > 0);
686 ASSERT(args->minlen <= args->maxlen);
687 ASSERT(args->mod < args->prod);
688 ASSERT(args->alignment > 0);
689
690
691
692
693 args->wasfromfl = 0;
694 switch (args->type) {
695 case XFS_ALLOCTYPE_THIS_AG:
696 error = xfs_alloc_ag_vextent_size(args);
697 break;
698 case XFS_ALLOCTYPE_NEAR_BNO:
699 error = xfs_alloc_ag_vextent_near(args);
700 break;
701 case XFS_ALLOCTYPE_THIS_BNO:
702 error = xfs_alloc_ag_vextent_exact(args);
703 break;
704 default:
705 ASSERT(0);
706
707 }
708
709 if (error || args->agbno == NULLAGBLOCK)
710 return error;
711
712 ASSERT(args->len >= args->minlen);
713 ASSERT(args->len <= args->maxlen);
714 ASSERT(!args->wasfromfl || args->resv != XFS_AG_RESV_AGFL);
715 ASSERT(args->agbno % args->alignment == 0);
716
717
718 if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) {
719 error = xfs_rmap_alloc(args->tp, args->agbp, args->agno,
720 args->agbno, args->len, &args->oinfo);
721 if (error)
722 return error;
723 }
724
725 if (!args->wasfromfl) {
726 error = xfs_alloc_update_counters(args->tp, args->pag,
727 args->agbp,
728 -((long)(args->len)));
729 if (error)
730 return error;
731
732 ASSERT(!xfs_extent_busy_search(args->mp, args->agno,
733 args->agbno, args->len));
734 }
735
736 xfs_ag_resv_alloc_extent(args->pag, args->resv, args);
737
738 XFS_STATS_INC(args->mp, xs_allocx);
739 XFS_STATS_ADD(args->mp, xs_allocb, args->len);
740 return error;
741}
742
743
744
745
746
747
748
749STATIC int
750xfs_alloc_ag_vextent_exact(
751 xfs_alloc_arg_t *args)
752{
753 xfs_btree_cur_t *bno_cur;
754 xfs_btree_cur_t *cnt_cur;
755 int error;
756 xfs_agblock_t fbno;
757 xfs_extlen_t flen;
758 xfs_agblock_t tbno;
759 xfs_extlen_t tlen;
760 xfs_agblock_t tend;
761 int i;
762 unsigned busy_gen;
763
764 ASSERT(args->alignment == 1);
765
766
767
768
769 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
770 args->agno, XFS_BTNUM_BNO);
771
772
773
774
775
776
777 error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
778 if (error)
779 goto error0;
780 if (!i)
781 goto not_found;
782
783
784
785
786 error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
787 if (error)
788 goto error0;
789 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
790 ASSERT(fbno <= args->agbno);
791
792
793
794
795 tbno = fbno;
796 tlen = flen;
797 xfs_extent_busy_trim(args, &tbno, &tlen, &busy_gen);
798
799
800
801
802
803 if (tbno > args->agbno)
804 goto not_found;
805 if (tlen < args->minlen)
806 goto not_found;
807 tend = tbno + tlen;
808 if (tend < args->agbno + args->minlen)
809 goto not_found;
810
811
812
813
814
815
816
817 args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
818 - args->agbno;
819 xfs_alloc_fix_len(args);
820 ASSERT(args->agbno + args->len <= tend);
821
822
823
824
825
826 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
827 args->agno, XFS_BTNUM_CNT);
828 ASSERT(args->agbno + args->len <=
829 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
830 error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
831 args->len, XFSA_FIXUP_BNO_OK);
832 if (error) {
833 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
834 goto error0;
835 }
836
837 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
838 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
839
840 args->wasfromfl = 0;
841 trace_xfs_alloc_exact_done(args);
842 return 0;
843
844not_found:
845
846 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
847 args->agbno = NULLAGBLOCK;
848 trace_xfs_alloc_exact_notfound(args);
849 return 0;
850
851error0:
852 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
853 trace_xfs_alloc_exact_error(args);
854 return error;
855}
856
857
858
859
860
861STATIC int
862xfs_alloc_find_best_extent(
863 struct xfs_alloc_arg *args,
864 struct xfs_btree_cur **gcur,
865 struct xfs_btree_cur **scur,
866 xfs_agblock_t gdiff,
867 xfs_agblock_t *sbno,
868 xfs_extlen_t *slen,
869 xfs_agblock_t *sbnoa,
870 xfs_extlen_t *slena,
871 int dir)
872{
873 xfs_agblock_t new;
874 xfs_agblock_t sdiff;
875 int error;
876 int i;
877 unsigned busy_gen;
878
879
880 if (!gdiff)
881 goto out_use_good;
882
883
884
885
886 do {
887 error = xfs_alloc_get_rec(*scur, sbno, slen, &i);
888 if (error)
889 goto error0;
890 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
891 xfs_alloc_compute_aligned(args, *sbno, *slen,
892 sbnoa, slena, &busy_gen);
893
894
895
896
897 if (!dir) {
898 if (*sbnoa > args->max_agbno)
899 goto out_use_good;
900 if (*sbnoa >= args->agbno + gdiff)
901 goto out_use_good;
902 } else {
903 if (*sbnoa < args->min_agbno)
904 goto out_use_good;
905 if (*sbnoa <= args->agbno - gdiff)
906 goto out_use_good;
907 }
908
909
910
911
912 if (*slena >= args->minlen) {
913 args->len = XFS_EXTLEN_MIN(*slena, args->maxlen);
914 xfs_alloc_fix_len(args);
915
916 sdiff = xfs_alloc_compute_diff(args->agbno, args->len,
917 args->alignment,
918 args->datatype, *sbnoa,
919 *slena, &new);
920
921
922
923
924 if (sdiff < gdiff)
925 goto out_use_search;
926 goto out_use_good;
927 }
928
929 if (!dir)
930 error = xfs_btree_increment(*scur, 0, &i);
931 else
932 error = xfs_btree_decrement(*scur, 0, &i);
933 if (error)
934 goto error0;
935 } while (i);
936
937out_use_good:
938 xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR);
939 *scur = NULL;
940 return 0;
941
942out_use_search:
943 xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR);
944 *gcur = NULL;
945 return 0;
946
947error0:
948
949 return error;
950}
951
952
953
954
955
956
957
958STATIC int
959xfs_alloc_ag_vextent_near(
960 xfs_alloc_arg_t *args)
961{
962 xfs_btree_cur_t *bno_cur_gt;
963 xfs_btree_cur_t *bno_cur_lt;
964 xfs_btree_cur_t *cnt_cur;
965 xfs_agblock_t gtbno;
966 xfs_agblock_t gtbnoa;
967 xfs_extlen_t gtdiff;
968 xfs_extlen_t gtlen;
969 xfs_extlen_t gtlena;
970 xfs_agblock_t gtnew;
971 int error;
972 int i;
973 int j;
974 xfs_agblock_t ltbno;
975 xfs_agblock_t ltbnoa;
976 xfs_extlen_t ltdiff;
977 xfs_extlen_t ltlen;
978 xfs_extlen_t ltlena;
979 xfs_agblock_t ltnew;
980 xfs_extlen_t rlen;
981 bool busy;
982 unsigned busy_gen;
983#ifdef DEBUG
984
985
986
987 int dofirst;
988
989 dofirst = prandom_u32() & 1;
990#endif
991
992
993 if (!args->min_agbno && !args->max_agbno)
994 args->max_agbno = args->mp->m_sb.sb_agblocks - 1;
995 ASSERT(args->min_agbno <= args->max_agbno);
996
997
998 if (args->agbno < args->min_agbno)
999 args->agbno = args->min_agbno;
1000 if (args->agbno > args->max_agbno)
1001 args->agbno = args->max_agbno;
1002
1003restart:
1004 bno_cur_lt = NULL;
1005 bno_cur_gt = NULL;
1006 ltlen = 0;
1007 gtlena = 0;
1008 ltlena = 0;
1009 busy = false;
1010
1011
1012
1013
1014 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1015 args->agno, XFS_BTNUM_CNT);
1016
1017
1018
1019
1020 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i)))
1021 goto error0;
1022
1023
1024
1025
1026 if (!i) {
1027 if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, <bno,
1028 <len, &i)))
1029 goto error0;
1030 if (i == 0 || ltlen == 0) {
1031 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1032 trace_xfs_alloc_near_noentry(args);
1033 return 0;
1034 }
1035 ASSERT(i == 1);
1036 }
1037 args->wasfromfl = 0;
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049 while (xfs_btree_islastblock(cnt_cur, 0)) {
1050 xfs_extlen_t bdiff;
1051 int besti=0;
1052 xfs_extlen_t blen=0;
1053 xfs_agblock_t bnew=0;
1054
1055#ifdef DEBUG
1056 if (dofirst)
1057 break;
1058#endif
1059
1060
1061
1062
1063
1064
1065 if (ltlen || args->alignment > 1) {
1066 cnt_cur->bc_ptrs[0] = 1;
1067 do {
1068 if ((error = xfs_alloc_get_rec(cnt_cur, <bno,
1069 <len, &i)))
1070 goto error0;
1071 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1072 if (ltlen >= args->minlen)
1073 break;
1074 if ((error = xfs_btree_increment(cnt_cur, 0, &i)))
1075 goto error0;
1076 } while (i);
1077 ASSERT(ltlen >= args->minlen);
1078 if (!i)
1079 break;
1080 }
1081 i = cnt_cur->bc_ptrs[0];
1082 for (j = 1, blen = 0, bdiff = 0;
1083 !error && j && (blen < args->maxlen || bdiff > 0);
1084 error = xfs_btree_increment(cnt_cur, 0, &j)) {
1085
1086
1087
1088
1089 if ((error = xfs_alloc_get_rec(cnt_cur, <bno, <len, &i)))
1090 goto error0;
1091 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1092 busy = xfs_alloc_compute_aligned(args, ltbno, ltlen,
1093 <bnoa, <lena, &busy_gen);
1094 if (ltlena < args->minlen)
1095 continue;
1096 if (ltbnoa < args->min_agbno || ltbnoa > args->max_agbno)
1097 continue;
1098 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1099 xfs_alloc_fix_len(args);
1100 ASSERT(args->len >= args->minlen);
1101 if (args->len < blen)
1102 continue;
1103 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1104 args->alignment, args->datatype, ltbnoa,
1105 ltlena, <new);
1106 if (ltnew != NULLAGBLOCK &&
1107 (args->len > blen || ltdiff < bdiff)) {
1108 bdiff = ltdiff;
1109 bnew = ltnew;
1110 blen = args->len;
1111 besti = cnt_cur->bc_ptrs[0];
1112 }
1113 }
1114
1115
1116
1117
1118 if (blen == 0)
1119 break;
1120
1121
1122
1123 cnt_cur->bc_ptrs[0] = besti;
1124 if ((error = xfs_alloc_get_rec(cnt_cur, <bno, <len, &i)))
1125 goto error0;
1126 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1127 ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1128 args->len = blen;
1129
1130
1131
1132
1133 args->agbno = bnew;
1134 ASSERT(bnew >= ltbno);
1135 ASSERT(bnew + blen <= ltbno + ltlen);
1136
1137
1138
1139 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp,
1140 args->agbp, args->agno, XFS_BTNUM_BNO);
1141
1142
1143
1144 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno,
1145 ltlen, bnew, blen, XFSA_FIXUP_CNT_OK)))
1146 goto error0;
1147 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1148 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1149
1150 trace_xfs_alloc_near_first(args);
1151 return 0;
1152 }
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1169 args->agno, XFS_BTNUM_BNO);
1170
1171
1172
1173 if ((error = xfs_alloc_lookup_le(bno_cur_lt, args->agbno, args->maxlen, &i)))
1174 goto error0;
1175 if (!i) {
1176
1177
1178
1179
1180 bno_cur_gt = bno_cur_lt;
1181 bno_cur_lt = NULL;
1182 }
1183
1184
1185
1186 else if ((error = xfs_btree_dup_cursor(bno_cur_lt, &bno_cur_gt)))
1187 goto error0;
1188
1189
1190
1191
1192 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
1193 goto error0;
1194 if (!i) {
1195
1196
1197
1198 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR);
1199 bno_cur_gt = NULL;
1200 }
1201
1202
1203
1204
1205
1206 do {
1207 if (bno_cur_lt) {
1208 if ((error = xfs_alloc_get_rec(bno_cur_lt, <bno, <len, &i)))
1209 goto error0;
1210 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1211 busy |= xfs_alloc_compute_aligned(args, ltbno, ltlen,
1212 <bnoa, <lena, &busy_gen);
1213 if (ltlena >= args->minlen && ltbnoa >= args->min_agbno)
1214 break;
1215 if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i)))
1216 goto error0;
1217 if (!i || ltbnoa < args->min_agbno) {
1218 xfs_btree_del_cursor(bno_cur_lt,
1219 XFS_BTREE_NOERROR);
1220 bno_cur_lt = NULL;
1221 }
1222 }
1223 if (bno_cur_gt) {
1224 if ((error = xfs_alloc_get_rec(bno_cur_gt, >bno, >len, &i)))
1225 goto error0;
1226 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1227 busy |= xfs_alloc_compute_aligned(args, gtbno, gtlen,
1228 >bnoa, >lena, &busy_gen);
1229 if (gtlena >= args->minlen && gtbnoa <= args->max_agbno)
1230 break;
1231 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
1232 goto error0;
1233 if (!i || gtbnoa > args->max_agbno) {
1234 xfs_btree_del_cursor(bno_cur_gt,
1235 XFS_BTREE_NOERROR);
1236 bno_cur_gt = NULL;
1237 }
1238 }
1239 } while (bno_cur_lt || bno_cur_gt);
1240
1241
1242
1243
1244 if (bno_cur_lt && bno_cur_gt) {
1245 if (ltlena >= args->minlen) {
1246
1247
1248
1249 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1250 xfs_alloc_fix_len(args);
1251 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1252 args->alignment, args->datatype, ltbnoa,
1253 ltlena, <new);
1254
1255 error = xfs_alloc_find_best_extent(args,
1256 &bno_cur_lt, &bno_cur_gt,
1257 ltdiff, >bno, >len,
1258 >bnoa, >lena,
1259 0 );
1260 } else {
1261 ASSERT(gtlena >= args->minlen);
1262
1263
1264
1265
1266 args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
1267 xfs_alloc_fix_len(args);
1268 gtdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1269 args->alignment, args->datatype, gtbnoa,
1270 gtlena, >new);
1271
1272 error = xfs_alloc_find_best_extent(args,
1273 &bno_cur_gt, &bno_cur_lt,
1274 gtdiff, <bno, <len,
1275 <bnoa, <lena,
1276 1 );
1277 }
1278
1279 if (error)
1280 goto error0;
1281 }
1282
1283
1284
1285
1286 if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
1287 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1288
1289 if (busy) {
1290 trace_xfs_alloc_near_busy(args);
1291 xfs_extent_busy_flush(args->mp, args->pag, busy_gen);
1292 goto restart;
1293 }
1294 trace_xfs_alloc_size_neither(args);
1295 args->agbno = NULLAGBLOCK;
1296 return 0;
1297 }
1298
1299
1300
1301
1302
1303
1304
1305 if (bno_cur_gt) {
1306 bno_cur_lt = bno_cur_gt;
1307 bno_cur_gt = NULL;
1308 ltbno = gtbno;
1309 ltbnoa = gtbnoa;
1310 ltlen = gtlen;
1311 ltlena = gtlena;
1312 j = 1;
1313 } else
1314 j = 0;
1315
1316
1317
1318
1319 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1320 xfs_alloc_fix_len(args);
1321 rlen = args->len;
1322 (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
1323 args->datatype, ltbnoa, ltlena, <new);
1324 ASSERT(ltnew >= ltbno);
1325 ASSERT(ltnew + rlen <= ltbnoa + ltlena);
1326 ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1327 ASSERT(ltnew >= args->min_agbno && ltnew <= args->max_agbno);
1328 args->agbno = ltnew;
1329
1330 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
1331 ltnew, rlen, XFSA_FIXUP_BNO_OK)))
1332 goto error0;
1333
1334 if (j)
1335 trace_xfs_alloc_near_greater(args);
1336 else
1337 trace_xfs_alloc_near_lesser(args);
1338
1339 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1340 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1341 return 0;
1342
1343 error0:
1344 trace_xfs_alloc_near_error(args);
1345 if (cnt_cur != NULL)
1346 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1347 if (bno_cur_lt != NULL)
1348 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_ERROR);
1349 if (bno_cur_gt != NULL)
1350 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_ERROR);
1351 return error;
1352}
1353
1354
1355
1356
1357
1358
1359
1360STATIC int
1361xfs_alloc_ag_vextent_size(
1362 xfs_alloc_arg_t *args)
1363{
1364 xfs_btree_cur_t *bno_cur;
1365 xfs_btree_cur_t *cnt_cur;
1366 int error;
1367 xfs_agblock_t fbno;
1368 xfs_extlen_t flen;
1369 int i;
1370 xfs_agblock_t rbno;
1371 xfs_extlen_t rlen;
1372 bool busy;
1373 unsigned busy_gen;
1374
1375restart:
1376
1377
1378
1379 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1380 args->agno, XFS_BTNUM_CNT);
1381 bno_cur = NULL;
1382 busy = false;
1383
1384
1385
1386
1387 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1388 args->maxlen + args->alignment - 1, &i)))
1389 goto error0;
1390
1391
1392
1393
1394
1395
1396
1397
1398 if (!i) {
1399 error = xfs_alloc_ag_vextent_small(args, cnt_cur,
1400 &fbno, &flen, &i);
1401 if (error)
1402 goto error0;
1403 if (i == 0 || flen == 0) {
1404 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1405 trace_xfs_alloc_size_noentry(args);
1406 return 0;
1407 }
1408 ASSERT(i == 1);
1409 busy = xfs_alloc_compute_aligned(args, fbno, flen, &rbno,
1410 &rlen, &busy_gen);
1411 } else {
1412
1413
1414
1415 for (;;) {
1416 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
1417 if (error)
1418 goto error0;
1419 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1420
1421 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1422 &rbno, &rlen, &busy_gen);
1423
1424 if (rlen >= args->maxlen)
1425 break;
1426
1427 error = xfs_btree_increment(cnt_cur, 0, &i);
1428 if (error)
1429 goto error0;
1430 if (i == 0) {
1431
1432
1433
1434
1435
1436 xfs_btree_del_cursor(cnt_cur,
1437 XFS_BTREE_NOERROR);
1438 trace_xfs_alloc_size_busy(args);
1439 xfs_extent_busy_flush(args->mp,
1440 args->pag, busy_gen);
1441 goto restart;
1442 }
1443 }
1444 }
1445
1446
1447
1448
1449
1450
1451
1452 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1453 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
1454 (rlen <= flen && rbno + rlen <= fbno + flen), error0);
1455 if (rlen < args->maxlen) {
1456 xfs_agblock_t bestfbno;
1457 xfs_extlen_t bestflen;
1458 xfs_agblock_t bestrbno;
1459 xfs_extlen_t bestrlen;
1460
1461 bestrlen = rlen;
1462 bestrbno = rbno;
1463 bestflen = flen;
1464 bestfbno = fbno;
1465 for (;;) {
1466 if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
1467 goto error0;
1468 if (i == 0)
1469 break;
1470 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1471 &i)))
1472 goto error0;
1473 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1474 if (flen < bestrlen)
1475 break;
1476 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1477 &rbno, &rlen, &busy_gen);
1478 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1479 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
1480 (rlen <= flen && rbno + rlen <= fbno + flen),
1481 error0);
1482 if (rlen > bestrlen) {
1483 bestrlen = rlen;
1484 bestrbno = rbno;
1485 bestflen = flen;
1486 bestfbno = fbno;
1487 if (rlen == args->maxlen)
1488 break;
1489 }
1490 }
1491 if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1492 &i)))
1493 goto error0;
1494 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1495 rlen = bestrlen;
1496 rbno = bestrbno;
1497 flen = bestflen;
1498 fbno = bestfbno;
1499 }
1500 args->wasfromfl = 0;
1501
1502
1503
1504 args->len = rlen;
1505 if (rlen < args->minlen) {
1506 if (busy) {
1507 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1508 trace_xfs_alloc_size_busy(args);
1509 xfs_extent_busy_flush(args->mp, args->pag, busy_gen);
1510 goto restart;
1511 }
1512 goto out_nominleft;
1513 }
1514 xfs_alloc_fix_len(args);
1515
1516 rlen = args->len;
1517 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen <= flen, error0);
1518
1519
1520
1521 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1522 args->agno, XFS_BTNUM_BNO);
1523 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
1524 rbno, rlen, XFSA_FIXUP_CNT_OK)))
1525 goto error0;
1526 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1527 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1528 cnt_cur = bno_cur = NULL;
1529 args->len = rlen;
1530 args->agbno = rbno;
1531 XFS_WANT_CORRUPTED_GOTO(args->mp,
1532 args->agbno + args->len <=
1533 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1534 error0);
1535 trace_xfs_alloc_size_done(args);
1536 return 0;
1537
1538error0:
1539 trace_xfs_alloc_size_error(args);
1540 if (cnt_cur)
1541 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1542 if (bno_cur)
1543 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1544 return error;
1545
1546out_nominleft:
1547 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1548 trace_xfs_alloc_size_nominleft(args);
1549 args->agbno = NULLAGBLOCK;
1550 return 0;
1551}
1552
1553
1554
1555
1556
1557
1558STATIC int
1559xfs_alloc_ag_vextent_small(
1560 xfs_alloc_arg_t *args,
1561 xfs_btree_cur_t *ccur,
1562 xfs_agblock_t *fbnop,
1563 xfs_extlen_t *flenp,
1564 int *stat)
1565{
1566 struct xfs_owner_info oinfo;
1567 struct xfs_perag *pag;
1568 int error;
1569 xfs_agblock_t fbno;
1570 xfs_extlen_t flen;
1571 int i;
1572
1573 if ((error = xfs_btree_decrement(ccur, 0, &i)))
1574 goto error0;
1575 if (i) {
1576 if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i)))
1577 goto error0;
1578 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1579 }
1580
1581
1582
1583
1584
1585 else if (args->minlen == 1 && args->alignment == 1 &&
1586 args->resv != XFS_AG_RESV_AGFL &&
1587 (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount)
1588 > args->minleft)) {
1589 error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0);
1590 if (error)
1591 goto error0;
1592 if (fbno != NULLAGBLOCK) {
1593 xfs_extent_busy_reuse(args->mp, args->agno, fbno, 1,
1594 xfs_alloc_allow_busy_reuse(args->datatype));
1595
1596 if (xfs_alloc_is_userdata(args->datatype)) {
1597 xfs_buf_t *bp;
1598
1599 bp = xfs_btree_get_bufs(args->mp, args->tp,
1600 args->agno, fbno, 0);
1601 if (!bp) {
1602 error = -EFSCORRUPTED;
1603 goto error0;
1604 }
1605 xfs_trans_binval(args->tp, bp);
1606 }
1607 args->len = 1;
1608 args->agbno = fbno;
1609 XFS_WANT_CORRUPTED_GOTO(args->mp,
1610 args->agbno + args->len <=
1611 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1612 error0);
1613 args->wasfromfl = 1;
1614 trace_xfs_alloc_small_freelist(args);
1615
1616
1617
1618
1619
1620
1621
1622 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
1623 error = xfs_rmap_free(args->tp, args->agbp, args->agno,
1624 fbno, 1, &oinfo);
1625 if (error)
1626 goto error0;
1627 pag = xfs_perag_get(args->mp, args->agno);
1628 xfs_ag_resv_free_extent(pag, XFS_AG_RESV_AGFL,
1629 args->tp, 1);
1630 xfs_perag_put(pag);
1631
1632 *stat = 0;
1633 return 0;
1634 }
1635
1636
1637
1638 else
1639 flen = 0;
1640 }
1641
1642
1643
1644 else {
1645 fbno = NULLAGBLOCK;
1646 flen = 0;
1647 }
1648
1649
1650
1651 if (flen < args->minlen) {
1652 args->agbno = NULLAGBLOCK;
1653 trace_xfs_alloc_small_notenough(args);
1654 flen = 0;
1655 }
1656 *fbnop = fbno;
1657 *flenp = flen;
1658 *stat = 1;
1659 trace_xfs_alloc_small_done(args);
1660 return 0;
1661
1662error0:
1663 trace_xfs_alloc_small_error(args);
1664 return error;
1665}
1666
1667
1668
1669
1670STATIC int
1671xfs_free_ag_extent(
1672 xfs_trans_t *tp,
1673 xfs_buf_t *agbp,
1674 xfs_agnumber_t agno,
1675 xfs_agblock_t bno,
1676 xfs_extlen_t len,
1677 struct xfs_owner_info *oinfo,
1678 enum xfs_ag_resv_type type)
1679{
1680 xfs_btree_cur_t *bno_cur;
1681 xfs_btree_cur_t *cnt_cur;
1682 int error;
1683 xfs_agblock_t gtbno;
1684 xfs_extlen_t gtlen;
1685 int haveleft;
1686 int haveright;
1687 int i;
1688 xfs_agblock_t ltbno;
1689 xfs_extlen_t ltlen;
1690 xfs_mount_t *mp;
1691 xfs_agblock_t nbno;
1692 xfs_extlen_t nlen;
1693 xfs_perag_t *pag;
1694
1695 bno_cur = cnt_cur = NULL;
1696 mp = tp->t_mountp;
1697
1698 if (!xfs_rmap_should_skip_owner_update(oinfo)) {
1699 error = xfs_rmap_free(tp, agbp, agno, bno, len, oinfo);
1700 if (error)
1701 goto error0;
1702 }
1703
1704
1705
1706
1707 bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO);
1708
1709
1710
1711
1712 if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
1713 goto error0;
1714 if (haveleft) {
1715
1716
1717
1718 if ((error = xfs_alloc_get_rec(bno_cur, <bno, <len, &i)))
1719 goto error0;
1720 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1721
1722
1723
1724 if (ltbno + ltlen < bno)
1725 haveleft = 0;
1726 else {
1727
1728
1729
1730
1731
1732 XFS_WANT_CORRUPTED_GOTO(mp,
1733 ltbno + ltlen <= bno, error0);
1734 }
1735 }
1736
1737
1738
1739
1740 if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
1741 goto error0;
1742 if (haveright) {
1743
1744
1745
1746 if ((error = xfs_alloc_get_rec(bno_cur, >bno, >len, &i)))
1747 goto error0;
1748 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1749
1750
1751
1752 if (bno + len < gtbno)
1753 haveright = 0;
1754 else {
1755
1756
1757
1758
1759
1760 XFS_WANT_CORRUPTED_GOTO(mp, gtbno >= bno + len, error0);
1761 }
1762 }
1763
1764
1765
1766 cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT);
1767
1768
1769
1770
1771 if (haveleft && haveright) {
1772
1773
1774
1775 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1776 goto error0;
1777 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1778 if ((error = xfs_btree_delete(cnt_cur, &i)))
1779 goto error0;
1780 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1781
1782
1783
1784 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1785 goto error0;
1786 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1787 if ((error = xfs_btree_delete(cnt_cur, &i)))
1788 goto error0;
1789 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1790
1791
1792
1793 if ((error = xfs_btree_delete(bno_cur, &i)))
1794 goto error0;
1795 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1796
1797
1798
1799 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1800 goto error0;
1801 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1802#ifdef DEBUG
1803
1804
1805
1806
1807 {
1808 xfs_agblock_t xxbno;
1809 xfs_extlen_t xxlen;
1810
1811 if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
1812 &i)))
1813 goto error0;
1814 XFS_WANT_CORRUPTED_GOTO(mp,
1815 i == 1 && xxbno == ltbno && xxlen == ltlen,
1816 error0);
1817 }
1818#endif
1819
1820
1821
1822 nbno = ltbno;
1823 nlen = len + ltlen + gtlen;
1824 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1825 goto error0;
1826 }
1827
1828
1829
1830
1831 else if (haveleft) {
1832
1833
1834
1835 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1836 goto error0;
1837 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1838 if ((error = xfs_btree_delete(cnt_cur, &i)))
1839 goto error0;
1840 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1841
1842
1843
1844
1845 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1846 goto error0;
1847 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1848 nbno = ltbno;
1849 nlen = len + ltlen;
1850 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1851 goto error0;
1852 }
1853
1854
1855
1856
1857 else if (haveright) {
1858
1859
1860
1861 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1862 goto error0;
1863 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1864 if ((error = xfs_btree_delete(cnt_cur, &i)))
1865 goto error0;
1866 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1867
1868
1869
1870
1871 nbno = bno;
1872 nlen = len + gtlen;
1873 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1874 goto error0;
1875 }
1876
1877
1878
1879
1880 else {
1881 nbno = bno;
1882 nlen = len;
1883 if ((error = xfs_btree_insert(bno_cur, &i)))
1884 goto error0;
1885 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1886 }
1887 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1888 bno_cur = NULL;
1889
1890
1891
1892 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
1893 goto error0;
1894 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, error0);
1895 if ((error = xfs_btree_insert(cnt_cur, &i)))
1896 goto error0;
1897 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1898 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1899 cnt_cur = NULL;
1900
1901
1902
1903
1904 pag = xfs_perag_get(mp, agno);
1905 error = xfs_alloc_update_counters(tp, pag, agbp, len);
1906 xfs_ag_resv_free_extent(pag, type, tp, len);
1907 xfs_perag_put(pag);
1908 if (error)
1909 goto error0;
1910
1911 XFS_STATS_INC(mp, xs_freex);
1912 XFS_STATS_ADD(mp, xs_freeb, len);
1913
1914 trace_xfs_free_extent(mp, agno, bno, len, type == XFS_AG_RESV_AGFL,
1915 haveleft, haveright);
1916
1917 return 0;
1918
1919 error0:
1920 trace_xfs_free_extent(mp, agno, bno, len, type == XFS_AG_RESV_AGFL,
1921 -1, -1);
1922 if (bno_cur)
1923 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1924 if (cnt_cur)
1925 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1926 return error;
1927}
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937void
1938xfs_alloc_compute_maxlevels(
1939 xfs_mount_t *mp)
1940{
1941 mp->m_ag_maxlevels = xfs_btree_compute_maxlevels(mp, mp->m_alloc_mnr,
1942 (mp->m_sb.sb_agblocks + 1) / 2);
1943}
1944
1945
1946
1947
1948
1949
1950
1951xfs_extlen_t
1952xfs_alloc_longest_free_extent(
1953 struct xfs_mount *mp,
1954 struct xfs_perag *pag,
1955 xfs_extlen_t need,
1956 xfs_extlen_t reserved)
1957{
1958 xfs_extlen_t delta = 0;
1959
1960
1961
1962
1963
1964 if (need > pag->pagf_flcount)
1965 delta = need - pag->pagf_flcount;
1966
1967
1968
1969
1970
1971
1972 if (pag->pagf_freeblks - pag->pagf_longest < reserved)
1973 delta += reserved - (pag->pagf_freeblks - pag->pagf_longest);
1974
1975
1976
1977
1978
1979 if (pag->pagf_longest > delta)
1980 return pag->pagf_longest - delta;
1981
1982
1983 return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
1984}
1985
1986unsigned int
1987xfs_alloc_min_freelist(
1988 struct xfs_mount *mp,
1989 struct xfs_perag *pag)
1990{
1991 unsigned int min_free;
1992
1993
1994 min_free = min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_BNOi] + 1,
1995 mp->m_ag_maxlevels);
1996
1997 min_free += min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_CNTi] + 1,
1998 mp->m_ag_maxlevels);
1999
2000 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
2001 min_free += min_t(unsigned int,
2002 pag->pagf_levels[XFS_BTNUM_RMAPi] + 1,
2003 mp->m_rmap_maxlevels);
2004
2005 return min_free;
2006}
2007
2008
2009
2010
2011
2012
2013
2014static bool
2015xfs_alloc_space_available(
2016 struct xfs_alloc_arg *args,
2017 xfs_extlen_t min_free,
2018 int flags)
2019{
2020 struct xfs_perag *pag = args->pag;
2021 xfs_extlen_t alloc_len, longest;
2022 xfs_extlen_t reservation;
2023 int available;
2024
2025 if (flags & XFS_ALLOC_FLAG_FREEING)
2026 return true;
2027
2028 reservation = xfs_ag_resv_needed(pag, args->resv);
2029
2030
2031 alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
2032 longest = xfs_alloc_longest_free_extent(args->mp, pag, min_free,
2033 reservation);
2034 if (longest < alloc_len)
2035 return false;
2036
2037
2038 available = (int)(pag->pagf_freeblks + pag->pagf_flcount -
2039 reservation - min_free - args->minleft);
2040 if (available < (int)max(args->total, alloc_len))
2041 return false;
2042
2043
2044
2045
2046
2047 if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) {
2048 args->maxlen = available;
2049 ASSERT(args->maxlen > 0);
2050 ASSERT(args->maxlen >= args->minlen);
2051 }
2052
2053 return true;
2054}
2055
2056
2057
2058
2059
2060int
2061xfs_alloc_fix_freelist(
2062 struct xfs_alloc_arg *args,
2063 int flags)
2064{
2065 struct xfs_mount *mp = args->mp;
2066 struct xfs_perag *pag = args->pag;
2067 struct xfs_trans *tp = args->tp;
2068 struct xfs_buf *agbp = NULL;
2069 struct xfs_buf *agflbp = NULL;
2070 struct xfs_alloc_arg targs;
2071 xfs_agblock_t bno;
2072 xfs_extlen_t need;
2073 int error = 0;
2074
2075 if (!pag->pagf_init) {
2076 error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
2077 if (error)
2078 goto out_no_agbp;
2079 if (!pag->pagf_init) {
2080 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
2081 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2082 goto out_agbp_relse;
2083 }
2084 }
2085
2086
2087
2088
2089
2090
2091 if (pag->pagf_metadata && xfs_alloc_is_userdata(args->datatype) &&
2092 (flags & XFS_ALLOC_FLAG_TRYLOCK)) {
2093 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2094 goto out_agbp_relse;
2095 }
2096
2097 need = xfs_alloc_min_freelist(mp, pag);
2098 if (!xfs_alloc_space_available(args, need, flags |
2099 XFS_ALLOC_FLAG_CHECK))
2100 goto out_agbp_relse;
2101
2102
2103
2104
2105
2106 if (!agbp) {
2107 error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
2108 if (error)
2109 goto out_no_agbp;
2110 if (!agbp) {
2111 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
2112 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2113 goto out_no_agbp;
2114 }
2115 }
2116
2117
2118 need = xfs_alloc_min_freelist(mp, pag);
2119 if (!xfs_alloc_space_available(args, need, flags))
2120 goto out_agbp_relse;
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146 memset(&targs, 0, sizeof(targs));
2147 if (flags & XFS_ALLOC_FLAG_NORMAP)
2148 xfs_rmap_skip_owner_update(&targs.oinfo);
2149 else
2150 xfs_rmap_ag_owner(&targs.oinfo, XFS_RMAP_OWN_AG);
2151 while (!(flags & XFS_ALLOC_FLAG_NOSHRINK) && pag->pagf_flcount > need) {
2152 struct xfs_buf *bp;
2153
2154 error = xfs_alloc_get_freelist(tp, agbp, &bno, 0);
2155 if (error)
2156 goto out_agbp_relse;
2157 error = xfs_free_ag_extent(tp, agbp, args->agno, bno, 1,
2158 &targs.oinfo, XFS_AG_RESV_AGFL);
2159 if (error)
2160 goto out_agbp_relse;
2161 bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0);
2162 if (!bp) {
2163 error = -EFSCORRUPTED;
2164 goto out_agbp_relse;
2165 }
2166 xfs_trans_binval(tp, bp);
2167 }
2168
2169 targs.tp = tp;
2170 targs.mp = mp;
2171 targs.agbp = agbp;
2172 targs.agno = args->agno;
2173 targs.alignment = targs.minlen = targs.prod = 1;
2174 targs.type = XFS_ALLOCTYPE_THIS_AG;
2175 targs.pag = pag;
2176 error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp);
2177 if (error)
2178 goto out_agbp_relse;
2179
2180
2181 while (pag->pagf_flcount < need) {
2182 targs.agbno = 0;
2183 targs.maxlen = need - pag->pagf_flcount;
2184 targs.resv = XFS_AG_RESV_AGFL;
2185
2186
2187 error = xfs_alloc_ag_vextent(&targs);
2188 if (error)
2189 goto out_agflbp_relse;
2190
2191
2192
2193
2194
2195
2196 if (targs.agbno == NULLAGBLOCK) {
2197 if (flags & XFS_ALLOC_FLAG_FREEING)
2198 break;
2199 goto out_agflbp_relse;
2200 }
2201
2202
2203
2204 for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
2205 error = xfs_alloc_put_freelist(tp, agbp,
2206 agflbp, bno, 0);
2207 if (error)
2208 goto out_agflbp_relse;
2209 }
2210 }
2211 xfs_trans_brelse(tp, agflbp);
2212 args->agbp = agbp;
2213 return 0;
2214
2215out_agflbp_relse:
2216 xfs_trans_brelse(tp, agflbp);
2217out_agbp_relse:
2218 if (agbp)
2219 xfs_trans_brelse(tp, agbp);
2220out_no_agbp:
2221 args->agbp = NULL;
2222 return error;
2223}
2224
2225
2226
2227
2228
2229int
2230xfs_alloc_get_freelist(
2231 xfs_trans_t *tp,
2232 xfs_buf_t *agbp,
2233 xfs_agblock_t *bnop,
2234 int btreeblk)
2235{
2236 xfs_agf_t *agf;
2237 xfs_buf_t *agflbp;
2238 xfs_agblock_t bno;
2239 __be32 *agfl_bno;
2240 int error;
2241 int logflags;
2242 xfs_mount_t *mp = tp->t_mountp;
2243 xfs_perag_t *pag;
2244
2245
2246
2247
2248 agf = XFS_BUF_TO_AGF(agbp);
2249 if (!agf->agf_flcount) {
2250 *bnop = NULLAGBLOCK;
2251 return 0;
2252 }
2253
2254
2255
2256 error = xfs_alloc_read_agfl(mp, tp, be32_to_cpu(agf->agf_seqno),
2257 &agflbp);
2258 if (error)
2259 return error;
2260
2261
2262
2263
2264
2265 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
2266 bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
2267 be32_add_cpu(&agf->agf_flfirst, 1);
2268 xfs_trans_brelse(tp, agflbp);
2269 if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp))
2270 agf->agf_flfirst = 0;
2271
2272 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
2273 be32_add_cpu(&agf->agf_flcount, -1);
2274 xfs_trans_agflist_delta(tp, -1);
2275 pag->pagf_flcount--;
2276 xfs_perag_put(pag);
2277
2278 logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
2279 if (btreeblk) {
2280 be32_add_cpu(&agf->agf_btreeblks, 1);
2281 pag->pagf_btreeblks++;
2282 logflags |= XFS_AGF_BTREEBLKS;
2283 }
2284
2285 xfs_alloc_log_agf(tp, agbp, logflags);
2286 *bnop = bno;
2287
2288 return 0;
2289}
2290
2291
2292
2293
2294void
2295xfs_alloc_log_agf(
2296 xfs_trans_t *tp,
2297 xfs_buf_t *bp,
2298 int fields)
2299{
2300 int first;
2301 int last;
2302 static const short offsets[] = {
2303 offsetof(xfs_agf_t, agf_magicnum),
2304 offsetof(xfs_agf_t, agf_versionnum),
2305 offsetof(xfs_agf_t, agf_seqno),
2306 offsetof(xfs_agf_t, agf_length),
2307 offsetof(xfs_agf_t, agf_roots[0]),
2308 offsetof(xfs_agf_t, agf_levels[0]),
2309 offsetof(xfs_agf_t, agf_flfirst),
2310 offsetof(xfs_agf_t, agf_fllast),
2311 offsetof(xfs_agf_t, agf_flcount),
2312 offsetof(xfs_agf_t, agf_freeblks),
2313 offsetof(xfs_agf_t, agf_longest),
2314 offsetof(xfs_agf_t, agf_btreeblks),
2315 offsetof(xfs_agf_t, agf_uuid),
2316 offsetof(xfs_agf_t, agf_rmap_blocks),
2317 offsetof(xfs_agf_t, agf_refcount_blocks),
2318 offsetof(xfs_agf_t, agf_refcount_root),
2319 offsetof(xfs_agf_t, agf_refcount_level),
2320
2321 offsetof(xfs_agf_t, agf_spare64),
2322 sizeof(xfs_agf_t)
2323 };
2324
2325 trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_);
2326
2327 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF);
2328
2329 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
2330 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
2331}
2332
2333
2334
2335
2336int
2337xfs_alloc_pagf_init(
2338 xfs_mount_t *mp,
2339 xfs_trans_t *tp,
2340 xfs_agnumber_t agno,
2341 int flags)
2342{
2343 xfs_buf_t *bp;
2344 int error;
2345
2346 if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp)))
2347 return error;
2348 if (bp)
2349 xfs_trans_brelse(tp, bp);
2350 return 0;
2351}
2352
2353
2354
2355
2356int
2357xfs_alloc_put_freelist(
2358 xfs_trans_t *tp,
2359 xfs_buf_t *agbp,
2360 xfs_buf_t *agflbp,
2361 xfs_agblock_t bno,
2362 int btreeblk)
2363{
2364 xfs_agf_t *agf;
2365 __be32 *blockp;
2366 int error;
2367 int logflags;
2368 xfs_mount_t *mp;
2369 xfs_perag_t *pag;
2370 __be32 *agfl_bno;
2371 int startoff;
2372
2373 agf = XFS_BUF_TO_AGF(agbp);
2374 mp = tp->t_mountp;
2375
2376 if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
2377 be32_to_cpu(agf->agf_seqno), &agflbp)))
2378 return error;
2379 be32_add_cpu(&agf->agf_fllast, 1);
2380 if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp))
2381 agf->agf_fllast = 0;
2382
2383 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
2384 be32_add_cpu(&agf->agf_flcount, 1);
2385 xfs_trans_agflist_delta(tp, 1);
2386 pag->pagf_flcount++;
2387
2388 logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
2389 if (btreeblk) {
2390 be32_add_cpu(&agf->agf_btreeblks, -1);
2391 pag->pagf_btreeblks--;
2392 logflags |= XFS_AGF_BTREEBLKS;
2393 }
2394 xfs_perag_put(pag);
2395
2396 xfs_alloc_log_agf(tp, agbp, logflags);
2397
2398 ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp));
2399
2400 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
2401 blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
2402 *blockp = cpu_to_be32(bno);
2403 startoff = (char *)blockp - (char *)agflbp->b_addr;
2404
2405 xfs_alloc_log_agf(tp, agbp, logflags);
2406
2407 xfs_trans_buf_set_type(tp, agflbp, XFS_BLFT_AGFL_BUF);
2408 xfs_trans_log_buf(tp, agflbp, startoff,
2409 startoff + sizeof(xfs_agblock_t) - 1);
2410 return 0;
2411}
2412
2413static xfs_failaddr_t
2414xfs_agf_verify(
2415 struct xfs_buf *bp)
2416{
2417 struct xfs_mount *mp = bp->b_target->bt_mount;
2418 struct xfs_agf *agf = XFS_BUF_TO_AGF(bp);
2419
2420 if (xfs_sb_version_hascrc(&mp->m_sb)) {
2421 if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid))
2422 return __this_address;
2423 if (!xfs_log_check_lsn(mp,
2424 be64_to_cpu(XFS_BUF_TO_AGF(bp)->agf_lsn)))
2425 return __this_address;
2426 }
2427
2428 if (!(agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
2429 XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
2430 be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
2431 be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) &&
2432 be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) &&
2433 be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp)))
2434 return __this_address;
2435
2436 if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
2437 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 ||
2438 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS ||
2439 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) > XFS_BTREE_MAXLEVELS)
2440 return __this_address;
2441
2442 if (xfs_sb_version_hasrmapbt(&mp->m_sb) &&
2443 (be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) < 1 ||
2444 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS))
2445 return __this_address;
2446
2447
2448
2449
2450
2451
2452
2453 if (bp->b_pag && be32_to_cpu(agf->agf_seqno) != bp->b_pag->pag_agno)
2454 return __this_address;
2455
2456 if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
2457 be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length))
2458 return __this_address;
2459
2460 if (xfs_sb_version_hasreflink(&mp->m_sb) &&
2461 (be32_to_cpu(agf->agf_refcount_level) < 1 ||
2462 be32_to_cpu(agf->agf_refcount_level) > XFS_BTREE_MAXLEVELS))
2463 return __this_address;
2464
2465 return NULL;
2466
2467}
2468
2469static void
2470xfs_agf_read_verify(
2471 struct xfs_buf *bp)
2472{
2473 struct xfs_mount *mp = bp->b_target->bt_mount;
2474 xfs_failaddr_t fa;
2475
2476 if (xfs_sb_version_hascrc(&mp->m_sb) &&
2477 !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF))
2478 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
2479 else {
2480 fa = xfs_agf_verify(bp);
2481 if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_ALLOC_READ_AGF))
2482 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2483 }
2484}
2485
2486static void
2487xfs_agf_write_verify(
2488 struct xfs_buf *bp)
2489{
2490 struct xfs_mount *mp = bp->b_target->bt_mount;
2491 struct xfs_buf_log_item *bip = bp->b_log_item;
2492 xfs_failaddr_t fa;
2493
2494 fa = xfs_agf_verify(bp);
2495 if (fa) {
2496 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2497 return;
2498 }
2499
2500 if (!xfs_sb_version_hascrc(&mp->m_sb))
2501 return;
2502
2503 if (bip)
2504 XFS_BUF_TO_AGF(bp)->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
2505
2506 xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF);
2507}
2508
2509const struct xfs_buf_ops xfs_agf_buf_ops = {
2510 .name = "xfs_agf",
2511 .verify_read = xfs_agf_read_verify,
2512 .verify_write = xfs_agf_write_verify,
2513 .verify_struct = xfs_agf_verify,
2514};
2515
2516
2517
2518
2519int
2520xfs_read_agf(
2521 struct xfs_mount *mp,
2522 struct xfs_trans *tp,
2523 xfs_agnumber_t agno,
2524 int flags,
2525 struct xfs_buf **bpp)
2526{
2527 int error;
2528
2529 trace_xfs_read_agf(mp, agno);
2530
2531 ASSERT(agno != NULLAGNUMBER);
2532 error = xfs_trans_read_buf(
2533 mp, tp, mp->m_ddev_targp,
2534 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
2535 XFS_FSS_TO_BB(mp, 1), flags, bpp, &xfs_agf_buf_ops);
2536 if (error)
2537 return error;
2538 if (!*bpp)
2539 return 0;
2540
2541 ASSERT(!(*bpp)->b_error);
2542 xfs_buf_set_ref(*bpp, XFS_AGF_REF);
2543 return 0;
2544}
2545
2546
2547
2548
2549int
2550xfs_alloc_read_agf(
2551 struct xfs_mount *mp,
2552 struct xfs_trans *tp,
2553 xfs_agnumber_t agno,
2554 int flags,
2555 struct xfs_buf **bpp)
2556{
2557 struct xfs_agf *agf;
2558 struct xfs_perag *pag;
2559 int error;
2560
2561 trace_xfs_alloc_read_agf(mp, agno);
2562
2563 ASSERT(agno != NULLAGNUMBER);
2564 error = xfs_read_agf(mp, tp, agno,
2565 (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
2566 bpp);
2567 if (error)
2568 return error;
2569 if (!*bpp)
2570 return 0;
2571 ASSERT(!(*bpp)->b_error);
2572
2573 agf = XFS_BUF_TO_AGF(*bpp);
2574 pag = xfs_perag_get(mp, agno);
2575 if (!pag->pagf_init) {
2576 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
2577 pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
2578 pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
2579 pag->pagf_longest = be32_to_cpu(agf->agf_longest);
2580 pag->pagf_levels[XFS_BTNUM_BNOi] =
2581 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
2582 pag->pagf_levels[XFS_BTNUM_CNTi] =
2583 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
2584 pag->pagf_levels[XFS_BTNUM_RMAPi] =
2585 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
2586 pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
2587 spin_lock_init(&pag->pagb_lock);
2588 pag->pagb_count = 0;
2589 pag->pagb_tree = RB_ROOT;
2590 pag->pagf_init = 1;
2591 }
2592#ifdef DEBUG
2593 else if (!XFS_FORCED_SHUTDOWN(mp)) {
2594 ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
2595 ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
2596 ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
2597 ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
2598 ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
2599 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
2600 ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
2601 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
2602 }
2603#endif
2604 xfs_perag_put(pag);
2605 return 0;
2606}
2607
2608
2609
2610
2611
2612
2613int
2614xfs_alloc_vextent(
2615 xfs_alloc_arg_t *args)
2616{
2617 xfs_agblock_t agsize;
2618 int error;
2619 int flags;
2620 xfs_mount_t *mp;
2621 xfs_agnumber_t sagno;
2622 xfs_alloctype_t type;
2623 int bump_rotor = 0;
2624 xfs_agnumber_t rotorstep = xfs_rotorstep;
2625
2626 mp = args->mp;
2627 type = args->otype = args->type;
2628 args->agbno = NULLAGBLOCK;
2629
2630
2631
2632
2633
2634 agsize = mp->m_sb.sb_agblocks;
2635 if (args->maxlen > agsize)
2636 args->maxlen = agsize;
2637 if (args->alignment == 0)
2638 args->alignment = 1;
2639 ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount);
2640 ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize);
2641 ASSERT(args->minlen <= args->maxlen);
2642 ASSERT(args->minlen <= agsize);
2643 ASSERT(args->mod < args->prod);
2644 if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount ||
2645 XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize ||
2646 args->minlen > args->maxlen || args->minlen > agsize ||
2647 args->mod >= args->prod) {
2648 args->fsbno = NULLFSBLOCK;
2649 trace_xfs_alloc_vextent_badargs(args);
2650 return 0;
2651 }
2652
2653 switch (type) {
2654 case XFS_ALLOCTYPE_THIS_AG:
2655 case XFS_ALLOCTYPE_NEAR_BNO:
2656 case XFS_ALLOCTYPE_THIS_BNO:
2657
2658
2659
2660 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2661 args->pag = xfs_perag_get(mp, args->agno);
2662 error = xfs_alloc_fix_freelist(args, 0);
2663 if (error) {
2664 trace_xfs_alloc_vextent_nofix(args);
2665 goto error0;
2666 }
2667 if (!args->agbp) {
2668 trace_xfs_alloc_vextent_noagbp(args);
2669 break;
2670 }
2671 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2672 if ((error = xfs_alloc_ag_vextent(args)))
2673 goto error0;
2674 break;
2675 case XFS_ALLOCTYPE_START_BNO:
2676
2677
2678
2679
2680 if ((args->datatype & XFS_ALLOC_INITIAL_USER_DATA) &&
2681 (mp->m_flags & XFS_MOUNT_32BITINODES)) {
2682 args->fsbno = XFS_AGB_TO_FSB(mp,
2683 ((mp->m_agfrotor / rotorstep) %
2684 mp->m_sb.sb_agcount), 0);
2685 bump_rotor = 1;
2686 }
2687 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2688 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2689
2690 case XFS_ALLOCTYPE_FIRST_AG:
2691
2692
2693
2694 if (type == XFS_ALLOCTYPE_FIRST_AG) {
2695
2696
2697
2698 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2699 args->type = XFS_ALLOCTYPE_THIS_AG;
2700 sagno = 0;
2701 flags = 0;
2702 } else {
2703
2704
2705
2706 args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2707 flags = XFS_ALLOC_FLAG_TRYLOCK;
2708 }
2709
2710
2711
2712
2713 for (;;) {
2714 args->pag = xfs_perag_get(mp, args->agno);
2715 error = xfs_alloc_fix_freelist(args, flags);
2716 if (error) {
2717 trace_xfs_alloc_vextent_nofix(args);
2718 goto error0;
2719 }
2720
2721
2722
2723 if (args->agbp) {
2724 if ((error = xfs_alloc_ag_vextent(args)))
2725 goto error0;
2726 break;
2727 }
2728
2729 trace_xfs_alloc_vextent_loopfailed(args);
2730
2731
2732
2733
2734 if (args->agno == sagno &&
2735 type == XFS_ALLOCTYPE_START_BNO)
2736 args->type = XFS_ALLOCTYPE_THIS_AG;
2737
2738
2739
2740
2741
2742
2743
2744 if (++(args->agno) == mp->m_sb.sb_agcount) {
2745 if (args->firstblock != NULLFSBLOCK)
2746 args->agno = sagno;
2747 else
2748 args->agno = 0;
2749 }
2750
2751
2752
2753
2754 if (args->agno == sagno) {
2755 if (flags == 0) {
2756 args->agbno = NULLAGBLOCK;
2757 trace_xfs_alloc_vextent_allfailed(args);
2758 break;
2759 }
2760
2761 flags = 0;
2762 if (type == XFS_ALLOCTYPE_START_BNO) {
2763 args->agbno = XFS_FSB_TO_AGBNO(mp,
2764 args->fsbno);
2765 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2766 }
2767 }
2768 xfs_perag_put(args->pag);
2769 }
2770 if (bump_rotor) {
2771 if (args->agno == sagno)
2772 mp->m_agfrotor = (mp->m_agfrotor + 1) %
2773 (mp->m_sb.sb_agcount * rotorstep);
2774 else
2775 mp->m_agfrotor = (args->agno * rotorstep + 1) %
2776 (mp->m_sb.sb_agcount * rotorstep);
2777 }
2778 break;
2779 default:
2780 ASSERT(0);
2781
2782 }
2783 if (args->agbno == NULLAGBLOCK)
2784 args->fsbno = NULLFSBLOCK;
2785 else {
2786 args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
2787#ifdef DEBUG
2788 ASSERT(args->len >= args->minlen);
2789 ASSERT(args->len <= args->maxlen);
2790 ASSERT(args->agbno % args->alignment == 0);
2791 XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno),
2792 args->len);
2793#endif
2794
2795
2796 if (args->datatype & XFS_ALLOC_USERDATA_ZERO) {
2797 error = xfs_zero_extent(args->ip, args->fsbno, args->len);
2798 if (error)
2799 goto error0;
2800 }
2801
2802 }
2803 xfs_perag_put(args->pag);
2804 return 0;
2805error0:
2806 xfs_perag_put(args->pag);
2807 return error;
2808}
2809
2810
2811int
2812xfs_free_extent_fix_freelist(
2813 struct xfs_trans *tp,
2814 xfs_agnumber_t agno,
2815 struct xfs_buf **agbp)
2816{
2817 struct xfs_alloc_arg args;
2818 int error;
2819
2820 memset(&args, 0, sizeof(struct xfs_alloc_arg));
2821 args.tp = tp;
2822 args.mp = tp->t_mountp;
2823 args.agno = agno;
2824
2825
2826
2827
2828
2829 if (args.agno >= args.mp->m_sb.sb_agcount)
2830 return -EFSCORRUPTED;
2831
2832 args.pag = xfs_perag_get(args.mp, args.agno);
2833 ASSERT(args.pag);
2834
2835 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
2836 if (error)
2837 goto out;
2838
2839 *agbp = args.agbp;
2840out:
2841 xfs_perag_put(args.pag);
2842 return error;
2843}
2844
2845
2846
2847
2848
2849
2850int
2851xfs_free_extent(
2852 struct xfs_trans *tp,
2853 xfs_fsblock_t bno,
2854 xfs_extlen_t len,
2855 struct xfs_owner_info *oinfo,
2856 enum xfs_ag_resv_type type)
2857{
2858 struct xfs_mount *mp = tp->t_mountp;
2859 struct xfs_buf *agbp;
2860 xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, bno);
2861 xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(mp, bno);
2862 int error;
2863
2864 ASSERT(len != 0);
2865 ASSERT(type != XFS_AG_RESV_AGFL);
2866
2867 if (XFS_TEST_ERROR(false, mp,
2868 XFS_ERRTAG_FREE_EXTENT))
2869 return -EIO;
2870
2871 error = xfs_free_extent_fix_freelist(tp, agno, &agbp);
2872 if (error)
2873 return error;
2874
2875 XFS_WANT_CORRUPTED_GOTO(mp, agbno < mp->m_sb.sb_agblocks, err);
2876
2877
2878 XFS_WANT_CORRUPTED_GOTO(mp,
2879 agbno + len <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_length),
2880 err);
2881
2882 error = xfs_free_ag_extent(tp, agbp, agno, agbno, len, oinfo, type);
2883 if (error)
2884 goto err;
2885
2886 xfs_extent_busy_insert(tp, agno, agbno, len, 0);
2887 return 0;
2888
2889err:
2890 xfs_trans_brelse(tp, agbp);
2891 return error;
2892}
2893
2894struct xfs_alloc_query_range_info {
2895 xfs_alloc_query_range_fn fn;
2896 void *priv;
2897};
2898
2899
2900STATIC int
2901xfs_alloc_query_range_helper(
2902 struct xfs_btree_cur *cur,
2903 union xfs_btree_rec *rec,
2904 void *priv)
2905{
2906 struct xfs_alloc_query_range_info *query = priv;
2907 struct xfs_alloc_rec_incore irec;
2908
2909 irec.ar_startblock = be32_to_cpu(rec->alloc.ar_startblock);
2910 irec.ar_blockcount = be32_to_cpu(rec->alloc.ar_blockcount);
2911 return query->fn(cur, &irec, query->priv);
2912}
2913
2914
2915int
2916xfs_alloc_query_range(
2917 struct xfs_btree_cur *cur,
2918 struct xfs_alloc_rec_incore *low_rec,
2919 struct xfs_alloc_rec_incore *high_rec,
2920 xfs_alloc_query_range_fn fn,
2921 void *priv)
2922{
2923 union xfs_btree_irec low_brec;
2924 union xfs_btree_irec high_brec;
2925 struct xfs_alloc_query_range_info query;
2926
2927 ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
2928 low_brec.a = *low_rec;
2929 high_brec.a = *high_rec;
2930 query.priv = priv;
2931 query.fn = fn;
2932 return xfs_btree_query_range(cur, &low_brec, &high_brec,
2933 xfs_alloc_query_range_helper, &query);
2934}
2935
2936
2937int
2938xfs_alloc_query_all(
2939 struct xfs_btree_cur *cur,
2940 xfs_alloc_query_range_fn fn,
2941 void *priv)
2942{
2943 struct xfs_alloc_query_range_info query;
2944
2945 ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
2946 query.priv = priv;
2947 query.fn = fn;
2948 return xfs_btree_query_all(cur, xfs_alloc_query_range_helper, &query);
2949}
2950
2951
2952xfs_agblock_t
2953xfs_ag_block_count(
2954 struct xfs_mount *mp,
2955 xfs_agnumber_t agno)
2956{
2957 ASSERT(agno < mp->m_sb.sb_agcount);
2958
2959 if (agno < mp->m_sb.sb_agcount - 1)
2960 return mp->m_sb.sb_agblocks;
2961 return mp->m_sb.sb_dblocks - (agno * mp->m_sb.sb_agblocks);
2962}
2963
2964
2965
2966
2967
2968bool
2969xfs_verify_agbno(
2970 struct xfs_mount *mp,
2971 xfs_agnumber_t agno,
2972 xfs_agblock_t agbno)
2973{
2974 xfs_agblock_t eoag;
2975
2976 eoag = xfs_ag_block_count(mp, agno);
2977 if (agbno >= eoag)
2978 return false;
2979 if (agbno <= XFS_AGFL_BLOCK(mp))
2980 return false;
2981 return true;
2982}
2983
2984
2985
2986
2987
2988bool
2989xfs_verify_fsbno(
2990 struct xfs_mount *mp,
2991 xfs_fsblock_t fsbno)
2992{
2993 xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, fsbno);
2994
2995 if (agno >= mp->m_sb.sb_agcount)
2996 return false;
2997 return xfs_verify_agbno(mp, agno, XFS_FSB_TO_AGBNO(mp, fsbno));
2998}
2999
3000
3001int
3002xfs_alloc_has_record(
3003 struct xfs_btree_cur *cur,
3004 xfs_agblock_t bno,
3005 xfs_extlen_t len,
3006 bool *exists)
3007{
3008 union xfs_btree_irec low;
3009 union xfs_btree_irec high;
3010
3011 memset(&low, 0, sizeof(low));
3012 low.a.ar_startblock = bno;
3013 memset(&high, 0xFF, sizeof(high));
3014 high.a.ar_startblock = bno + len - 1;
3015
3016 return xfs_btree_has_record(cur, &low, &high, exists);
3017}
3018