1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
21#include "xfs_bit.h"
22#include "xfs_log.h"
23#include "xfs_inum.h"
24#include "xfs_trans.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_mount.h"
28#include "xfs_bmap_btree.h"
29#include "xfs_alloc_btree.h"
30#include "xfs_ialloc_btree.h"
31#include "xfs_dinode.h"
32#include "xfs_inode.h"
33#include "xfs_btree.h"
34#include "xfs_alloc.h"
35#include "xfs_error.h"
36#include "xfs_trace.h"
37
38
39#define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
40
41#define XFSA_FIXUP_BNO_OK 1
42#define XFSA_FIXUP_CNT_OK 2
43
44STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *);
45STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *);
46STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
47STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
48 xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
49STATIC void xfs_alloc_busy_trim(struct xfs_alloc_arg *,
50 xfs_agblock_t, xfs_extlen_t, xfs_agblock_t *, xfs_extlen_t *);
51
52
53
54
55STATIC int
56xfs_alloc_lookup_eq(
57 struct xfs_btree_cur *cur,
58 xfs_agblock_t bno,
59 xfs_extlen_t len,
60 int *stat)
61{
62 cur->bc_rec.a.ar_startblock = bno;
63 cur->bc_rec.a.ar_blockcount = len;
64 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
65}
66
67
68
69
70
71STATIC int
72xfs_alloc_lookup_ge(
73 struct xfs_btree_cur *cur,
74 xfs_agblock_t bno,
75 xfs_extlen_t len,
76 int *stat)
77{
78 cur->bc_rec.a.ar_startblock = bno;
79 cur->bc_rec.a.ar_blockcount = len;
80 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
81}
82
83
84
85
86
87int
88xfs_alloc_lookup_le(
89 struct xfs_btree_cur *cur,
90 xfs_agblock_t bno,
91 xfs_extlen_t len,
92 int *stat)
93{
94 cur->bc_rec.a.ar_startblock = bno;
95 cur->bc_rec.a.ar_blockcount = len;
96 return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
97}
98
99
100
101
102
103
104STATIC int
105xfs_alloc_update(
106 struct xfs_btree_cur *cur,
107 xfs_agblock_t bno,
108 xfs_extlen_t len)
109{
110 union xfs_btree_rec rec;
111
112 rec.alloc.ar_startblock = cpu_to_be32(bno);
113 rec.alloc.ar_blockcount = cpu_to_be32(len);
114 return xfs_btree_update(cur, &rec);
115}
116
117
118
119
120int
121xfs_alloc_get_rec(
122 struct xfs_btree_cur *cur,
123 xfs_agblock_t *bno,
124 xfs_extlen_t *len,
125 int *stat)
126{
127 union xfs_btree_rec *rec;
128 int error;
129
130 error = xfs_btree_get_rec(cur, &rec, stat);
131 if (!error && *stat == 1) {
132 *bno = be32_to_cpu(rec->alloc.ar_startblock);
133 *len = be32_to_cpu(rec->alloc.ar_blockcount);
134 }
135 return error;
136}
137
138
139
140
141
142STATIC void
143xfs_alloc_compute_aligned(
144 xfs_alloc_arg_t *args,
145 xfs_agblock_t foundbno,
146 xfs_extlen_t foundlen,
147 xfs_agblock_t *resbno,
148 xfs_extlen_t *reslen)
149{
150 xfs_agblock_t bno;
151 xfs_extlen_t len;
152
153
154 xfs_alloc_busy_trim(args, foundbno, foundlen, &bno, &len);
155
156 if (args->alignment > 1 && len >= args->minlen) {
157 xfs_agblock_t aligned_bno = roundup(bno, args->alignment);
158 xfs_extlen_t diff = aligned_bno - bno;
159
160 *resbno = aligned_bno;
161 *reslen = diff >= len ? 0 : len - diff;
162 } else {
163 *resbno = bno;
164 *reslen = len;
165 }
166}
167
168
169
170
171
172STATIC xfs_extlen_t
173xfs_alloc_compute_diff(
174 xfs_agblock_t wantbno,
175 xfs_extlen_t wantlen,
176 xfs_extlen_t alignment,
177 xfs_agblock_t freebno,
178 xfs_extlen_t freelen,
179 xfs_agblock_t *newbnop)
180{
181 xfs_agblock_t freeend;
182 xfs_agblock_t newbno1;
183 xfs_agblock_t newbno2;
184 xfs_extlen_t newlen1=0;
185 xfs_extlen_t newlen2=0;
186 xfs_agblock_t wantend;
187
188 ASSERT(freelen >= wantlen);
189 freeend = freebno + freelen;
190 wantend = wantbno + wantlen;
191 if (freebno >= wantbno) {
192 if ((newbno1 = roundup(freebno, alignment)) >= freeend)
193 newbno1 = NULLAGBLOCK;
194 } else if (freeend >= wantend && alignment > 1) {
195 newbno1 = roundup(wantbno, alignment);
196 newbno2 = newbno1 - alignment;
197 if (newbno1 >= freeend)
198 newbno1 = NULLAGBLOCK;
199 else
200 newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
201 if (newbno2 < freebno)
202 newbno2 = NULLAGBLOCK;
203 else
204 newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
205 if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
206 if (newlen1 < newlen2 ||
207 (newlen1 == newlen2 &&
208 XFS_ABSDIFF(newbno1, wantbno) >
209 XFS_ABSDIFF(newbno2, wantbno)))
210 newbno1 = newbno2;
211 } else if (newbno2 != NULLAGBLOCK)
212 newbno1 = newbno2;
213 } else if (freeend >= wantend) {
214 newbno1 = wantbno;
215 } else if (alignment > 1) {
216 newbno1 = roundup(freeend - wantlen, alignment);
217 if (newbno1 > freeend - wantlen &&
218 newbno1 - alignment >= freebno)
219 newbno1 -= alignment;
220 else if (newbno1 >= freeend)
221 newbno1 = NULLAGBLOCK;
222 } else
223 newbno1 = freeend - wantlen;
224 *newbnop = newbno1;
225 return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
226}
227
228
229
230
231
232
233
234STATIC void
235xfs_alloc_fix_len(
236 xfs_alloc_arg_t *args)
237{
238 xfs_extlen_t k;
239 xfs_extlen_t rlen;
240
241 ASSERT(args->mod < args->prod);
242 rlen = args->len;
243 ASSERT(rlen >= args->minlen);
244 ASSERT(rlen <= args->maxlen);
245 if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
246 (args->mod == 0 && rlen < args->prod))
247 return;
248 k = rlen % args->prod;
249 if (k == args->mod)
250 return;
251 if (k > args->mod) {
252 if ((int)(rlen = rlen - k - args->mod) < (int)args->minlen)
253 return;
254 } else {
255 if ((int)(rlen = rlen - args->prod - (args->mod - k)) <
256 (int)args->minlen)
257 return;
258 }
259 ASSERT(rlen >= args->minlen);
260 ASSERT(rlen <= args->maxlen);
261 args->len = rlen;
262}
263
264
265
266
267
268STATIC int
269xfs_alloc_fix_minleft(
270 xfs_alloc_arg_t *args)
271{
272 xfs_agf_t *agf;
273 int diff;
274
275 if (args->minleft == 0)
276 return 1;
277 agf = XFS_BUF_TO_AGF(args->agbp);
278 diff = be32_to_cpu(agf->agf_freeblks)
279 - args->len - args->minleft;
280 if (diff >= 0)
281 return 1;
282 args->len += diff;
283 if (args->len >= args->minlen)
284 return 1;
285 args->agbno = NULLAGBLOCK;
286 return 0;
287}
288
289
290
291
292
293
294
295
296STATIC int
297xfs_alloc_fixup_trees(
298 xfs_btree_cur_t *cnt_cur,
299 xfs_btree_cur_t *bno_cur,
300 xfs_agblock_t fbno,
301 xfs_extlen_t flen,
302 xfs_agblock_t rbno,
303 xfs_extlen_t rlen,
304 int flags)
305{
306 int error;
307 int i;
308 xfs_agblock_t nfbno1;
309 xfs_agblock_t nfbno2;
310 xfs_extlen_t nflen1=0;
311 xfs_extlen_t nflen2=0;
312
313
314
315
316 if (flags & XFSA_FIXUP_CNT_OK) {
317#ifdef DEBUG
318 if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
319 return error;
320 XFS_WANT_CORRUPTED_RETURN(
321 i == 1 && nfbno1 == fbno && nflen1 == flen);
322#endif
323 } else {
324 if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
325 return error;
326 XFS_WANT_CORRUPTED_RETURN(i == 1);
327 }
328
329
330
331 if (flags & XFSA_FIXUP_BNO_OK) {
332#ifdef DEBUG
333 if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
334 return error;
335 XFS_WANT_CORRUPTED_RETURN(
336 i == 1 && nfbno1 == fbno && nflen1 == flen);
337#endif
338 } else {
339 if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
340 return error;
341 XFS_WANT_CORRUPTED_RETURN(i == 1);
342 }
343
344#ifdef DEBUG
345 if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
346 struct xfs_btree_block *bnoblock;
347 struct xfs_btree_block *cntblock;
348
349 bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]);
350 cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]);
351
352 XFS_WANT_CORRUPTED_RETURN(
353 bnoblock->bb_numrecs == cntblock->bb_numrecs);
354 }
355#endif
356
357
358
359
360
361
362 if (rbno == fbno && rlen == flen)
363 nfbno1 = nfbno2 = NULLAGBLOCK;
364 else if (rbno == fbno) {
365 nfbno1 = rbno + rlen;
366 nflen1 = flen - rlen;
367 nfbno2 = NULLAGBLOCK;
368 } else if (rbno + rlen == fbno + flen) {
369 nfbno1 = fbno;
370 nflen1 = flen - rlen;
371 nfbno2 = NULLAGBLOCK;
372 } else {
373 nfbno1 = fbno;
374 nflen1 = rbno - fbno;
375 nfbno2 = rbno + rlen;
376 nflen2 = (fbno + flen) - nfbno2;
377 }
378
379
380
381 if ((error = xfs_btree_delete(cnt_cur, &i)))
382 return error;
383 XFS_WANT_CORRUPTED_RETURN(i == 1);
384
385
386
387 if (nfbno1 != NULLAGBLOCK) {
388 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
389 return error;
390 XFS_WANT_CORRUPTED_RETURN(i == 0);
391 if ((error = xfs_btree_insert(cnt_cur, &i)))
392 return error;
393 XFS_WANT_CORRUPTED_RETURN(i == 1);
394 }
395 if (nfbno2 != NULLAGBLOCK) {
396 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
397 return error;
398 XFS_WANT_CORRUPTED_RETURN(i == 0);
399 if ((error = xfs_btree_insert(cnt_cur, &i)))
400 return error;
401 XFS_WANT_CORRUPTED_RETURN(i == 1);
402 }
403
404
405
406 if (nfbno1 == NULLAGBLOCK) {
407
408
409
410 if ((error = xfs_btree_delete(bno_cur, &i)))
411 return error;
412 XFS_WANT_CORRUPTED_RETURN(i == 1);
413 } else {
414
415
416
417 if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
418 return error;
419 }
420 if (nfbno2 != NULLAGBLOCK) {
421
422
423
424 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
425 return error;
426 XFS_WANT_CORRUPTED_RETURN(i == 0);
427 if ((error = xfs_btree_insert(bno_cur, &i)))
428 return error;
429 XFS_WANT_CORRUPTED_RETURN(i == 1);
430 }
431 return 0;
432}
433
434
435
436
437STATIC int
438xfs_alloc_read_agfl(
439 xfs_mount_t *mp,
440 xfs_trans_t *tp,
441 xfs_agnumber_t agno,
442 xfs_buf_t **bpp)
443{
444 xfs_buf_t *bp;
445 int error;
446
447 ASSERT(agno != NULLAGNUMBER);
448 error = xfs_trans_read_buf(
449 mp, tp, mp->m_ddev_targp,
450 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
451 XFS_FSS_TO_BB(mp, 1), 0, &bp);
452 if (error)
453 return error;
454 ASSERT(!xfs_buf_geterror(bp));
455 xfs_buf_set_ref(bp, XFS_AGFL_REF);
456 *bpp = bp;
457 return 0;
458}
459
460STATIC int
461xfs_alloc_update_counters(
462 struct xfs_trans *tp,
463 struct xfs_perag *pag,
464 struct xfs_buf *agbp,
465 long len)
466{
467 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
468
469 pag->pagf_freeblks += len;
470 be32_add_cpu(&agf->agf_freeblks, len);
471
472 xfs_trans_agblocks_delta(tp, len);
473 if (unlikely(be32_to_cpu(agf->agf_freeblks) >
474 be32_to_cpu(agf->agf_length)))
475 return EFSCORRUPTED;
476
477 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
478 return 0;
479}
480
481
482
483
484
485
486
487
488
489
490
491
492
493STATIC int
494xfs_alloc_ag_vextent(
495 xfs_alloc_arg_t *args)
496{
497 int error=0;
498
499 ASSERT(args->minlen > 0);
500 ASSERT(args->maxlen > 0);
501 ASSERT(args->minlen <= args->maxlen);
502 ASSERT(args->mod < args->prod);
503 ASSERT(args->alignment > 0);
504
505
506
507 args->wasfromfl = 0;
508 switch (args->type) {
509 case XFS_ALLOCTYPE_THIS_AG:
510 error = xfs_alloc_ag_vextent_size(args);
511 break;
512 case XFS_ALLOCTYPE_NEAR_BNO:
513 error = xfs_alloc_ag_vextent_near(args);
514 break;
515 case XFS_ALLOCTYPE_THIS_BNO:
516 error = xfs_alloc_ag_vextent_exact(args);
517 break;
518 default:
519 ASSERT(0);
520
521 }
522
523 if (error || args->agbno == NULLAGBLOCK)
524 return error;
525
526 ASSERT(args->len >= args->minlen);
527 ASSERT(args->len <= args->maxlen);
528 ASSERT(!args->wasfromfl || !args->isfl);
529 ASSERT(args->agbno % args->alignment == 0);
530
531 if (!args->wasfromfl) {
532 error = xfs_alloc_update_counters(args->tp, args->pag,
533 args->agbp,
534 -((long)(args->len)));
535 if (error)
536 return error;
537
538 ASSERT(!xfs_alloc_busy_search(args->mp, args->agno,
539 args->agbno, args->len));
540 }
541
542 if (!args->isfl) {
543 xfs_trans_mod_sb(args->tp, args->wasdel ?
544 XFS_TRANS_SB_RES_FDBLOCKS :
545 XFS_TRANS_SB_FDBLOCKS,
546 -((long)(args->len)));
547 }
548
549 XFS_STATS_INC(xs_allocx);
550 XFS_STATS_ADD(xs_allocb, args->len);
551 return error;
552}
553
554
555
556
557
558
559
560STATIC int
561xfs_alloc_ag_vextent_exact(
562 xfs_alloc_arg_t *args)
563{
564 xfs_btree_cur_t *bno_cur;
565 xfs_btree_cur_t *cnt_cur;
566 int error;
567 xfs_agblock_t fbno;
568 xfs_extlen_t flen;
569 xfs_agblock_t tbno;
570 xfs_extlen_t tlen;
571 xfs_agblock_t tend;
572 int i;
573
574 ASSERT(args->alignment == 1);
575
576
577
578
579 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
580 args->agno, XFS_BTNUM_BNO);
581
582
583
584
585
586
587 error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
588 if (error)
589 goto error0;
590 if (!i)
591 goto not_found;
592
593
594
595
596 error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
597 if (error)
598 goto error0;
599 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
600 ASSERT(fbno <= args->agbno);
601
602
603
604
605 xfs_alloc_busy_trim(args, fbno, flen, &tbno, &tlen);
606
607
608
609
610
611 if (tbno > args->agbno)
612 goto not_found;
613 if (tlen < args->minlen)
614 goto not_found;
615 tend = tbno + tlen;
616 if (tend < args->agbno + args->minlen)
617 goto not_found;
618
619
620
621
622
623
624
625 args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
626 - args->agbno;
627 xfs_alloc_fix_len(args);
628 if (!xfs_alloc_fix_minleft(args))
629 goto not_found;
630
631 ASSERT(args->agbno + args->len <= tend);
632
633
634
635
636
637 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
638 args->agno, XFS_BTNUM_CNT);
639 ASSERT(args->agbno + args->len <=
640 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
641 error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
642 args->len, XFSA_FIXUP_BNO_OK);
643 if (error) {
644 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
645 goto error0;
646 }
647
648 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
649 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
650
651 args->wasfromfl = 0;
652 trace_xfs_alloc_exact_done(args);
653 return 0;
654
655not_found:
656
657 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
658 args->agbno = NULLAGBLOCK;
659 trace_xfs_alloc_exact_notfound(args);
660 return 0;
661
662error0:
663 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
664 trace_xfs_alloc_exact_error(args);
665 return error;
666}
667
668
669
670
671
672STATIC int
673xfs_alloc_find_best_extent(
674 struct xfs_alloc_arg *args,
675 struct xfs_btree_cur **gcur,
676 struct xfs_btree_cur **scur,
677 xfs_agblock_t gdiff,
678 xfs_agblock_t *sbno,
679 xfs_extlen_t *slen,
680 xfs_agblock_t *sbnoa,
681 xfs_extlen_t *slena,
682 int dir)
683{
684 xfs_agblock_t new;
685 xfs_agblock_t sdiff;
686 int error;
687 int i;
688
689
690 if (!gdiff)
691 goto out_use_good;
692
693
694
695
696 do {
697 error = xfs_alloc_get_rec(*scur, sbno, slen, &i);
698 if (error)
699 goto error0;
700 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
701 xfs_alloc_compute_aligned(args, *sbno, *slen, sbnoa, slena);
702
703
704
705
706 if (!dir) {
707 if (*sbnoa >= args->agbno + gdiff)
708 goto out_use_good;
709 } else {
710 if (*sbnoa <= args->agbno - gdiff)
711 goto out_use_good;
712 }
713
714
715
716
717 if (*slena >= args->minlen) {
718 args->len = XFS_EXTLEN_MIN(*slena, args->maxlen);
719 xfs_alloc_fix_len(args);
720
721 sdiff = xfs_alloc_compute_diff(args->agbno, args->len,
722 args->alignment, *sbnoa,
723 *slena, &new);
724
725
726
727
728 if (sdiff < gdiff)
729 goto out_use_search;
730 goto out_use_good;
731 }
732
733 if (!dir)
734 error = xfs_btree_increment(*scur, 0, &i);
735 else
736 error = xfs_btree_decrement(*scur, 0, &i);
737 if (error)
738 goto error0;
739 } while (i);
740
741out_use_good:
742 xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR);
743 *scur = NULL;
744 return 0;
745
746out_use_search:
747 xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR);
748 *gcur = NULL;
749 return 0;
750
751error0:
752
753 return error;
754}
755
756
757
758
759
760
761
762STATIC int
763xfs_alloc_ag_vextent_near(
764 xfs_alloc_arg_t *args)
765{
766 xfs_btree_cur_t *bno_cur_gt;
767 xfs_btree_cur_t *bno_cur_lt;
768 xfs_btree_cur_t *cnt_cur;
769 xfs_agblock_t gtbno;
770 xfs_agblock_t gtbnoa;
771 xfs_extlen_t gtdiff;
772 xfs_extlen_t gtlen;
773 xfs_extlen_t gtlena;
774 xfs_agblock_t gtnew;
775 int error;
776 int i;
777 int j;
778 xfs_agblock_t ltbno;
779 xfs_agblock_t ltbnoa;
780 xfs_extlen_t ltdiff;
781 xfs_extlen_t ltlen;
782 xfs_extlen_t ltlena;
783 xfs_agblock_t ltnew;
784 xfs_extlen_t rlen;
785 int forced = 0;
786#if defined(DEBUG) && defined(__KERNEL__)
787
788
789
790 int dofirst;
791
792 dofirst = random32() & 1;
793#endif
794
795restart:
796 bno_cur_lt = NULL;
797 bno_cur_gt = NULL;
798 ltlen = 0;
799 gtlena = 0;
800 ltlena = 0;
801
802
803
804
805 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
806 args->agno, XFS_BTNUM_CNT);
807
808
809
810
811 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i)))
812 goto error0;
813
814
815
816
817 if (!i) {
818 if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, <bno,
819 <len, &i)))
820 goto error0;
821 if (i == 0 || ltlen == 0) {
822 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
823 trace_xfs_alloc_near_noentry(args);
824 return 0;
825 }
826 ASSERT(i == 1);
827 }
828 args->wasfromfl = 0;
829
830
831
832
833
834
835
836
837
838
839
840 while (xfs_btree_islastblock(cnt_cur, 0)) {
841 xfs_extlen_t bdiff;
842 int besti=0;
843 xfs_extlen_t blen=0;
844 xfs_agblock_t bnew=0;
845
846#if defined(DEBUG) && defined(__KERNEL__)
847 if (!dofirst)
848 break;
849#endif
850
851
852
853
854
855
856 if (ltlen || args->alignment > 1) {
857 cnt_cur->bc_ptrs[0] = 1;
858 do {
859 if ((error = xfs_alloc_get_rec(cnt_cur, <bno,
860 <len, &i)))
861 goto error0;
862 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
863 if (ltlen >= args->minlen)
864 break;
865 if ((error = xfs_btree_increment(cnt_cur, 0, &i)))
866 goto error0;
867 } while (i);
868 ASSERT(ltlen >= args->minlen);
869 if (!i)
870 break;
871 }
872 i = cnt_cur->bc_ptrs[0];
873 for (j = 1, blen = 0, bdiff = 0;
874 !error && j && (blen < args->maxlen || bdiff > 0);
875 error = xfs_btree_increment(cnt_cur, 0, &j)) {
876
877
878
879
880 if ((error = xfs_alloc_get_rec(cnt_cur, <bno, <len, &i)))
881 goto error0;
882 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
883 xfs_alloc_compute_aligned(args, ltbno, ltlen,
884 <bnoa, <lena);
885 if (ltlena < args->minlen)
886 continue;
887 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
888 xfs_alloc_fix_len(args);
889 ASSERT(args->len >= args->minlen);
890 if (args->len < blen)
891 continue;
892 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
893 args->alignment, ltbnoa, ltlena, <new);
894 if (ltnew != NULLAGBLOCK &&
895 (args->len > blen || ltdiff < bdiff)) {
896 bdiff = ltdiff;
897 bnew = ltnew;
898 blen = args->len;
899 besti = cnt_cur->bc_ptrs[0];
900 }
901 }
902
903
904
905
906 if (blen == 0)
907 break;
908
909
910
911 cnt_cur->bc_ptrs[0] = besti;
912 if ((error = xfs_alloc_get_rec(cnt_cur, <bno, <len, &i)))
913 goto error0;
914 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
915 ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
916 args->len = blen;
917 if (!xfs_alloc_fix_minleft(args)) {
918 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
919 trace_xfs_alloc_near_nominleft(args);
920 return 0;
921 }
922 blen = args->len;
923
924
925
926 args->agbno = bnew;
927 ASSERT(bnew >= ltbno);
928 ASSERT(bnew + blen <= ltbno + ltlen);
929
930
931
932 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp,
933 args->agbp, args->agno, XFS_BTNUM_BNO);
934
935
936
937 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno,
938 ltlen, bnew, blen, XFSA_FIXUP_CNT_OK)))
939 goto error0;
940 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
941 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
942
943 trace_xfs_alloc_near_first(args);
944 return 0;
945 }
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
962 args->agno, XFS_BTNUM_BNO);
963
964
965
966 if ((error = xfs_alloc_lookup_le(bno_cur_lt, args->agbno, args->maxlen, &i)))
967 goto error0;
968 if (!i) {
969
970
971
972
973 bno_cur_gt = bno_cur_lt;
974 bno_cur_lt = NULL;
975 }
976
977
978
979 else if ((error = xfs_btree_dup_cursor(bno_cur_lt, &bno_cur_gt)))
980 goto error0;
981
982
983
984
985 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
986 goto error0;
987 if (!i) {
988
989
990
991 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR);
992 bno_cur_gt = NULL;
993 }
994
995
996
997
998
999 do {
1000 if (bno_cur_lt) {
1001 if ((error = xfs_alloc_get_rec(bno_cur_lt, <bno, <len, &i)))
1002 goto error0;
1003 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1004 xfs_alloc_compute_aligned(args, ltbno, ltlen,
1005 <bnoa, <lena);
1006 if (ltlena >= args->minlen)
1007 break;
1008 if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i)))
1009 goto error0;
1010 if (!i) {
1011 xfs_btree_del_cursor(bno_cur_lt,
1012 XFS_BTREE_NOERROR);
1013 bno_cur_lt = NULL;
1014 }
1015 }
1016 if (bno_cur_gt) {
1017 if ((error = xfs_alloc_get_rec(bno_cur_gt, >bno, >len, &i)))
1018 goto error0;
1019 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1020 xfs_alloc_compute_aligned(args, gtbno, gtlen,
1021 >bnoa, >lena);
1022 if (gtlena >= args->minlen)
1023 break;
1024 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
1025 goto error0;
1026 if (!i) {
1027 xfs_btree_del_cursor(bno_cur_gt,
1028 XFS_BTREE_NOERROR);
1029 bno_cur_gt = NULL;
1030 }
1031 }
1032 } while (bno_cur_lt || bno_cur_gt);
1033
1034
1035
1036
1037 if (bno_cur_lt && bno_cur_gt) {
1038 if (ltlena >= args->minlen) {
1039
1040
1041
1042 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1043 xfs_alloc_fix_len(args);
1044 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1045 args->alignment, ltbnoa, ltlena, <new);
1046
1047 error = xfs_alloc_find_best_extent(args,
1048 &bno_cur_lt, &bno_cur_gt,
1049 ltdiff, >bno, >len,
1050 >bnoa, >lena,
1051 0 );
1052 } else {
1053 ASSERT(gtlena >= args->minlen);
1054
1055
1056
1057
1058 args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
1059 xfs_alloc_fix_len(args);
1060 gtdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1061 args->alignment, gtbnoa, gtlena, >new);
1062
1063 error = xfs_alloc_find_best_extent(args,
1064 &bno_cur_gt, &bno_cur_lt,
1065 gtdiff, <bno, <len,
1066 <bnoa, <lena,
1067 1 );
1068 }
1069
1070 if (error)
1071 goto error0;
1072 }
1073
1074
1075
1076
1077 if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
1078 if (!forced++) {
1079 trace_xfs_alloc_near_busy(args);
1080 xfs_log_force(args->mp, XFS_LOG_SYNC);
1081 goto restart;
1082 }
1083
1084 trace_xfs_alloc_size_neither(args);
1085 args->agbno = NULLAGBLOCK;
1086 return 0;
1087 }
1088
1089
1090
1091
1092
1093
1094
1095 if (bno_cur_gt) {
1096 bno_cur_lt = bno_cur_gt;
1097 bno_cur_gt = NULL;
1098 ltbno = gtbno;
1099 ltbnoa = gtbnoa;
1100 ltlen = gtlen;
1101 ltlena = gtlena;
1102 j = 1;
1103 } else
1104 j = 0;
1105
1106
1107
1108
1109 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1110 xfs_alloc_fix_len(args);
1111 if (!xfs_alloc_fix_minleft(args)) {
1112 trace_xfs_alloc_near_nominleft(args);
1113 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1114 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1115 return 0;
1116 }
1117 rlen = args->len;
1118 (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
1119 ltbnoa, ltlena, <new);
1120 ASSERT(ltnew >= ltbno);
1121 ASSERT(ltnew + rlen <= ltbnoa + ltlena);
1122 ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1123 args->agbno = ltnew;
1124
1125 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
1126 ltnew, rlen, XFSA_FIXUP_BNO_OK)))
1127 goto error0;
1128
1129 if (j)
1130 trace_xfs_alloc_near_greater(args);
1131 else
1132 trace_xfs_alloc_near_lesser(args);
1133
1134 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1135 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1136 return 0;
1137
1138 error0:
1139 trace_xfs_alloc_near_error(args);
1140 if (cnt_cur != NULL)
1141 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1142 if (bno_cur_lt != NULL)
1143 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_ERROR);
1144 if (bno_cur_gt != NULL)
1145 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_ERROR);
1146 return error;
1147}
1148
1149
1150
1151
1152
1153
1154
1155STATIC int
1156xfs_alloc_ag_vextent_size(
1157 xfs_alloc_arg_t *args)
1158{
1159 xfs_btree_cur_t *bno_cur;
1160 xfs_btree_cur_t *cnt_cur;
1161 int error;
1162 xfs_agblock_t fbno;
1163 xfs_extlen_t flen;
1164 int i;
1165 xfs_agblock_t rbno;
1166 xfs_extlen_t rlen;
1167 int forced = 0;
1168
1169restart:
1170
1171
1172
1173 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1174 args->agno, XFS_BTNUM_CNT);
1175 bno_cur = NULL;
1176
1177
1178
1179
1180 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1181 args->maxlen + args->alignment - 1, &i)))
1182 goto error0;
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192 if (!i || forced > 1) {
1193 error = xfs_alloc_ag_vextent_small(args, cnt_cur,
1194 &fbno, &flen, &i);
1195 if (error)
1196 goto error0;
1197 if (i == 0 || flen == 0) {
1198 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1199 trace_xfs_alloc_size_noentry(args);
1200 return 0;
1201 }
1202 ASSERT(i == 1);
1203 xfs_alloc_compute_aligned(args, fbno, flen, &rbno, &rlen);
1204 } else {
1205
1206
1207
1208
1209
1210
1211 for (;;) {
1212 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
1213 if (error)
1214 goto error0;
1215 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1216
1217 xfs_alloc_compute_aligned(args, fbno, flen,
1218 &rbno, &rlen);
1219
1220 if (rlen >= args->maxlen)
1221 break;
1222
1223 error = xfs_btree_increment(cnt_cur, 0, &i);
1224 if (error)
1225 goto error0;
1226 if (i == 0) {
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237 xfs_btree_del_cursor(cnt_cur,
1238 XFS_BTREE_NOERROR);
1239 trace_xfs_alloc_size_busy(args);
1240 if (!forced++)
1241 xfs_log_force(args->mp, XFS_LOG_SYNC);
1242 goto restart;
1243 }
1244 }
1245 }
1246
1247
1248
1249
1250
1251
1252
1253 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1254 XFS_WANT_CORRUPTED_GOTO(rlen == 0 ||
1255 (rlen <= flen && rbno + rlen <= fbno + flen), error0);
1256 if (rlen < args->maxlen) {
1257 xfs_agblock_t bestfbno;
1258 xfs_extlen_t bestflen;
1259 xfs_agblock_t bestrbno;
1260 xfs_extlen_t bestrlen;
1261
1262 bestrlen = rlen;
1263 bestrbno = rbno;
1264 bestflen = flen;
1265 bestfbno = fbno;
1266 for (;;) {
1267 if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
1268 goto error0;
1269 if (i == 0)
1270 break;
1271 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1272 &i)))
1273 goto error0;
1274 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1275 if (flen < bestrlen)
1276 break;
1277 xfs_alloc_compute_aligned(args, fbno, flen,
1278 &rbno, &rlen);
1279 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1280 XFS_WANT_CORRUPTED_GOTO(rlen == 0 ||
1281 (rlen <= flen && rbno + rlen <= fbno + flen),
1282 error0);
1283 if (rlen > bestrlen) {
1284 bestrlen = rlen;
1285 bestrbno = rbno;
1286 bestflen = flen;
1287 bestfbno = fbno;
1288 if (rlen == args->maxlen)
1289 break;
1290 }
1291 }
1292 if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1293 &i)))
1294 goto error0;
1295 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1296 rlen = bestrlen;
1297 rbno = bestrbno;
1298 flen = bestflen;
1299 fbno = bestfbno;
1300 }
1301 args->wasfromfl = 0;
1302
1303
1304
1305 args->len = rlen;
1306 if (rlen < args->minlen) {
1307 if (!forced++) {
1308 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1309 trace_xfs_alloc_size_busy(args);
1310 xfs_log_force(args->mp, XFS_LOG_SYNC);
1311 goto restart;
1312 }
1313 goto out_nominleft;
1314 }
1315 xfs_alloc_fix_len(args);
1316
1317 if (!xfs_alloc_fix_minleft(args))
1318 goto out_nominleft;
1319 rlen = args->len;
1320 XFS_WANT_CORRUPTED_GOTO(rlen <= flen, error0);
1321
1322
1323
1324 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1325 args->agno, XFS_BTNUM_BNO);
1326 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
1327 rbno, rlen, XFSA_FIXUP_CNT_OK)))
1328 goto error0;
1329 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1330 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1331 cnt_cur = bno_cur = NULL;
1332 args->len = rlen;
1333 args->agbno = rbno;
1334 XFS_WANT_CORRUPTED_GOTO(
1335 args->agbno + args->len <=
1336 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1337 error0);
1338 trace_xfs_alloc_size_done(args);
1339 return 0;
1340
1341error0:
1342 trace_xfs_alloc_size_error(args);
1343 if (cnt_cur)
1344 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1345 if (bno_cur)
1346 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1347 return error;
1348
1349out_nominleft:
1350 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1351 trace_xfs_alloc_size_nominleft(args);
1352 args->agbno = NULLAGBLOCK;
1353 return 0;
1354}
1355
1356
1357
1358
1359
1360
1361STATIC int
1362xfs_alloc_ag_vextent_small(
1363 xfs_alloc_arg_t *args,
1364 xfs_btree_cur_t *ccur,
1365 xfs_agblock_t *fbnop,
1366 xfs_extlen_t *flenp,
1367 int *stat)
1368{
1369 int error;
1370 xfs_agblock_t fbno;
1371 xfs_extlen_t flen;
1372 int i;
1373
1374 if ((error = xfs_btree_decrement(ccur, 0, &i)))
1375 goto error0;
1376 if (i) {
1377 if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i)))
1378 goto error0;
1379 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1380 }
1381
1382
1383
1384
1385
1386 else if (args->minlen == 1 && args->alignment == 1 && !args->isfl &&
1387 (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount)
1388 > args->minleft)) {
1389 error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0);
1390 if (error)
1391 goto error0;
1392 if (fbno != NULLAGBLOCK) {
1393 xfs_alloc_busy_reuse(args->mp, args->agno, fbno, 1,
1394 args->userdata);
1395
1396 if (args->userdata) {
1397 xfs_buf_t *bp;
1398
1399 bp = xfs_btree_get_bufs(args->mp, args->tp,
1400 args->agno, fbno, 0);
1401 xfs_trans_binval(args->tp, bp);
1402 }
1403 args->len = 1;
1404 args->agbno = fbno;
1405 XFS_WANT_CORRUPTED_GOTO(
1406 args->agbno + args->len <=
1407 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1408 error0);
1409 args->wasfromfl = 1;
1410 trace_xfs_alloc_small_freelist(args);
1411 *stat = 0;
1412 return 0;
1413 }
1414
1415
1416
1417 else
1418 flen = 0;
1419 }
1420
1421
1422
1423 else {
1424 fbno = NULLAGBLOCK;
1425 flen = 0;
1426 }
1427
1428
1429
1430 if (flen < args->minlen) {
1431 args->agbno = NULLAGBLOCK;
1432 trace_xfs_alloc_small_notenough(args);
1433 flen = 0;
1434 }
1435 *fbnop = fbno;
1436 *flenp = flen;
1437 *stat = 1;
1438 trace_xfs_alloc_small_done(args);
1439 return 0;
1440
1441error0:
1442 trace_xfs_alloc_small_error(args);
1443 return error;
1444}
1445
1446
1447
1448
1449STATIC int
1450xfs_free_ag_extent(
1451 xfs_trans_t *tp,
1452 xfs_buf_t *agbp,
1453 xfs_agnumber_t agno,
1454 xfs_agblock_t bno,
1455 xfs_extlen_t len,
1456 int isfl)
1457{
1458 xfs_btree_cur_t *bno_cur;
1459 xfs_btree_cur_t *cnt_cur;
1460 int error;
1461 xfs_agblock_t gtbno;
1462 xfs_extlen_t gtlen;
1463 int haveleft;
1464 int haveright;
1465 int i;
1466 xfs_agblock_t ltbno;
1467 xfs_extlen_t ltlen;
1468 xfs_mount_t *mp;
1469 xfs_agblock_t nbno;
1470 xfs_extlen_t nlen;
1471 xfs_perag_t *pag;
1472
1473 mp = tp->t_mountp;
1474
1475
1476
1477 bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO);
1478 cnt_cur = NULL;
1479
1480
1481
1482
1483 if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
1484 goto error0;
1485 if (haveleft) {
1486
1487
1488
1489 if ((error = xfs_alloc_get_rec(bno_cur, <bno, <len, &i)))
1490 goto error0;
1491 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1492
1493
1494
1495 if (ltbno + ltlen < bno)
1496 haveleft = 0;
1497 else {
1498
1499
1500
1501
1502
1503 XFS_WANT_CORRUPTED_GOTO(ltbno + ltlen <= bno, error0);
1504 }
1505 }
1506
1507
1508
1509
1510 if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
1511 goto error0;
1512 if (haveright) {
1513
1514
1515
1516 if ((error = xfs_alloc_get_rec(bno_cur, >bno, >len, &i)))
1517 goto error0;
1518 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1519
1520
1521
1522 if (bno + len < gtbno)
1523 haveright = 0;
1524 else {
1525
1526
1527
1528
1529
1530 XFS_WANT_CORRUPTED_GOTO(gtbno >= bno + len, error0);
1531 }
1532 }
1533
1534
1535
1536 cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT);
1537
1538
1539
1540
1541 if (haveleft && haveright) {
1542
1543
1544
1545 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1546 goto error0;
1547 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1548 if ((error = xfs_btree_delete(cnt_cur, &i)))
1549 goto error0;
1550 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1551
1552
1553
1554 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1555 goto error0;
1556 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1557 if ((error = xfs_btree_delete(cnt_cur, &i)))
1558 goto error0;
1559 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1560
1561
1562
1563 if ((error = xfs_btree_delete(bno_cur, &i)))
1564 goto error0;
1565 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1566
1567
1568
1569 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1570 goto error0;
1571 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1572#ifdef DEBUG
1573
1574
1575
1576
1577 {
1578 xfs_agblock_t xxbno;
1579 xfs_extlen_t xxlen;
1580
1581 if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
1582 &i)))
1583 goto error0;
1584 XFS_WANT_CORRUPTED_GOTO(
1585 i == 1 && xxbno == ltbno && xxlen == ltlen,
1586 error0);
1587 }
1588#endif
1589
1590
1591
1592 nbno = ltbno;
1593 nlen = len + ltlen + gtlen;
1594 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1595 goto error0;
1596 }
1597
1598
1599
1600
1601 else if (haveleft) {
1602
1603
1604
1605 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1606 goto error0;
1607 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1608 if ((error = xfs_btree_delete(cnt_cur, &i)))
1609 goto error0;
1610 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1611
1612
1613
1614
1615 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1616 goto error0;
1617 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1618 nbno = ltbno;
1619 nlen = len + ltlen;
1620 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1621 goto error0;
1622 }
1623
1624
1625
1626
1627 else if (haveright) {
1628
1629
1630
1631 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1632 goto error0;
1633 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1634 if ((error = xfs_btree_delete(cnt_cur, &i)))
1635 goto error0;
1636 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1637
1638
1639
1640
1641 nbno = bno;
1642 nlen = len + gtlen;
1643 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1644 goto error0;
1645 }
1646
1647
1648
1649
1650 else {
1651 nbno = bno;
1652 nlen = len;
1653 if ((error = xfs_btree_insert(bno_cur, &i)))
1654 goto error0;
1655 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1656 }
1657 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1658 bno_cur = NULL;
1659
1660
1661
1662 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
1663 goto error0;
1664 XFS_WANT_CORRUPTED_GOTO(i == 0, error0);
1665 if ((error = xfs_btree_insert(cnt_cur, &i)))
1666 goto error0;
1667 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1668 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1669 cnt_cur = NULL;
1670
1671
1672
1673
1674 pag = xfs_perag_get(mp, agno);
1675 error = xfs_alloc_update_counters(tp, pag, agbp, len);
1676 xfs_perag_put(pag);
1677 if (error)
1678 goto error0;
1679
1680 if (!isfl)
1681 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len);
1682 XFS_STATS_INC(xs_freex);
1683 XFS_STATS_ADD(xs_freeb, len);
1684
1685 trace_xfs_free_extent(mp, agno, bno, len, isfl, haveleft, haveright);
1686
1687 return 0;
1688
1689 error0:
1690 trace_xfs_free_extent(mp, agno, bno, len, isfl, -1, -1);
1691 if (bno_cur)
1692 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1693 if (cnt_cur)
1694 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1695 return error;
1696}
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706void
1707xfs_alloc_compute_maxlevels(
1708 xfs_mount_t *mp)
1709{
1710 int level;
1711 uint maxblocks;
1712 uint maxleafents;
1713 int minleafrecs;
1714 int minnoderecs;
1715
1716 maxleafents = (mp->m_sb.sb_agblocks + 1) / 2;
1717 minleafrecs = mp->m_alloc_mnr[0];
1718 minnoderecs = mp->m_alloc_mnr[1];
1719 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
1720 for (level = 1; maxblocks > 1; level++)
1721 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
1722 mp->m_ag_maxlevels = level;
1723}
1724
1725
1726
1727
1728xfs_extlen_t
1729xfs_alloc_longest_free_extent(
1730 struct xfs_mount *mp,
1731 struct xfs_perag *pag)
1732{
1733 xfs_extlen_t need, delta = 0;
1734
1735 need = XFS_MIN_FREELIST_PAG(pag, mp);
1736 if (need > pag->pagf_flcount)
1737 delta = need - pag->pagf_flcount;
1738
1739 if (pag->pagf_longest > delta)
1740 return pag->pagf_longest - delta;
1741 return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
1742}
1743
1744
1745
1746
1747
1748STATIC int
1749xfs_alloc_fix_freelist(
1750 xfs_alloc_arg_t *args,
1751 int flags)
1752{
1753 xfs_buf_t *agbp;
1754 xfs_agf_t *agf;
1755 xfs_buf_t *agflbp;
1756 xfs_agblock_t bno;
1757 xfs_extlen_t delta;
1758 int error;
1759 xfs_extlen_t longest;
1760 xfs_mount_t *mp;
1761 xfs_extlen_t need;
1762 xfs_perag_t *pag;
1763 xfs_alloc_arg_t targs;
1764 xfs_trans_t *tp;
1765
1766 mp = args->mp;
1767
1768 pag = args->pag;
1769 tp = args->tp;
1770 if (!pag->pagf_init) {
1771 if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags,
1772 &agbp)))
1773 return error;
1774 if (!pag->pagf_init) {
1775 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
1776 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
1777 args->agbp = NULL;
1778 return 0;
1779 }
1780 } else
1781 agbp = NULL;
1782
1783
1784
1785
1786
1787
1788 if (pag->pagf_metadata && args->userdata &&
1789 (flags & XFS_ALLOC_FLAG_TRYLOCK)) {
1790 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
1791 args->agbp = NULL;
1792 return 0;
1793 }
1794
1795 if (!(flags & XFS_ALLOC_FLAG_FREEING)) {
1796
1797
1798
1799
1800 need = XFS_MIN_FREELIST_PAG(pag, mp);
1801 longest = xfs_alloc_longest_free_extent(mp, pag);
1802 if ((args->minlen + args->alignment + args->minalignslop - 1) >
1803 longest ||
1804 ((int)(pag->pagf_freeblks + pag->pagf_flcount -
1805 need - args->total) < (int)args->minleft)) {
1806 if (agbp)
1807 xfs_trans_brelse(tp, agbp);
1808 args->agbp = NULL;
1809 return 0;
1810 }
1811 }
1812
1813
1814
1815
1816
1817 if (agbp == NULL) {
1818 if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags,
1819 &agbp)))
1820 return error;
1821 if (agbp == NULL) {
1822 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
1823 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
1824 args->agbp = NULL;
1825 return 0;
1826 }
1827 }
1828
1829
1830
1831 agf = XFS_BUF_TO_AGF(agbp);
1832 need = XFS_MIN_FREELIST(agf, mp);
1833
1834
1835
1836 if (!(flags & XFS_ALLOC_FLAG_FREEING)) {
1837 delta = need > be32_to_cpu(agf->agf_flcount) ?
1838 (need - be32_to_cpu(agf->agf_flcount)) : 0;
1839 longest = be32_to_cpu(agf->agf_longest);
1840 longest = (longest > delta) ? (longest - delta) :
1841 (be32_to_cpu(agf->agf_flcount) > 0 || longest > 0);
1842 if ((args->minlen + args->alignment + args->minalignslop - 1) >
1843 longest ||
1844 ((int)(be32_to_cpu(agf->agf_freeblks) +
1845 be32_to_cpu(agf->agf_flcount) - need - args->total) <
1846 (int)args->minleft)) {
1847 xfs_trans_brelse(tp, agbp);
1848 args->agbp = NULL;
1849 return 0;
1850 }
1851 }
1852
1853
1854
1855 while (be32_to_cpu(agf->agf_flcount) > need) {
1856 xfs_buf_t *bp;
1857
1858 error = xfs_alloc_get_freelist(tp, agbp, &bno, 0);
1859 if (error)
1860 return error;
1861 if ((error = xfs_free_ag_extent(tp, agbp, args->agno, bno, 1, 1)))
1862 return error;
1863 bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0);
1864 xfs_trans_binval(tp, bp);
1865 }
1866
1867
1868
1869 targs.tp = tp;
1870 targs.mp = mp;
1871 targs.agbp = agbp;
1872 targs.agno = args->agno;
1873 targs.mod = targs.minleft = targs.wasdel = targs.userdata =
1874 targs.minalignslop = 0;
1875 targs.alignment = targs.minlen = targs.prod = targs.isfl = 1;
1876 targs.type = XFS_ALLOCTYPE_THIS_AG;
1877 targs.pag = pag;
1878 if ((error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp)))
1879 return error;
1880
1881
1882
1883 while (be32_to_cpu(agf->agf_flcount) < need) {
1884 targs.agbno = 0;
1885 targs.maxlen = need - be32_to_cpu(agf->agf_flcount);
1886
1887
1888
1889 if ((error = xfs_alloc_ag_vextent(&targs))) {
1890 xfs_trans_brelse(tp, agflbp);
1891 return error;
1892 }
1893
1894
1895
1896
1897
1898 if (targs.agbno == NULLAGBLOCK) {
1899 if (flags & XFS_ALLOC_FLAG_FREEING)
1900 break;
1901 xfs_trans_brelse(tp, agflbp);
1902 args->agbp = NULL;
1903 return 0;
1904 }
1905
1906
1907
1908 for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
1909 error = xfs_alloc_put_freelist(tp, agbp,
1910 agflbp, bno, 0);
1911 if (error)
1912 return error;
1913 }
1914 }
1915 xfs_trans_brelse(tp, agflbp);
1916 args->agbp = agbp;
1917 return 0;
1918}
1919
1920
1921
1922
1923
1924int
1925xfs_alloc_get_freelist(
1926 xfs_trans_t *tp,
1927 xfs_buf_t *agbp,
1928 xfs_agblock_t *bnop,
1929 int btreeblk)
1930{
1931 xfs_agf_t *agf;
1932 xfs_agfl_t *agfl;
1933 xfs_buf_t *agflbp;
1934 xfs_agblock_t bno;
1935 int error;
1936 int logflags;
1937 xfs_mount_t *mp;
1938 xfs_perag_t *pag;
1939
1940 agf = XFS_BUF_TO_AGF(agbp);
1941
1942
1943
1944 if (!agf->agf_flcount) {
1945 *bnop = NULLAGBLOCK;
1946 return 0;
1947 }
1948
1949
1950
1951 mp = tp->t_mountp;
1952 if ((error = xfs_alloc_read_agfl(mp, tp,
1953 be32_to_cpu(agf->agf_seqno), &agflbp)))
1954 return error;
1955 agfl = XFS_BUF_TO_AGFL(agflbp);
1956
1957
1958
1959 bno = be32_to_cpu(agfl->agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
1960 be32_add_cpu(&agf->agf_flfirst, 1);
1961 xfs_trans_brelse(tp, agflbp);
1962 if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp))
1963 agf->agf_flfirst = 0;
1964
1965 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
1966 be32_add_cpu(&agf->agf_flcount, -1);
1967 xfs_trans_agflist_delta(tp, -1);
1968 pag->pagf_flcount--;
1969 xfs_perag_put(pag);
1970
1971 logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
1972 if (btreeblk) {
1973 be32_add_cpu(&agf->agf_btreeblks, 1);
1974 pag->pagf_btreeblks++;
1975 logflags |= XFS_AGF_BTREEBLKS;
1976 }
1977
1978 xfs_alloc_log_agf(tp, agbp, logflags);
1979 *bnop = bno;
1980
1981 return 0;
1982}
1983
1984
1985
1986
1987void
1988xfs_alloc_log_agf(
1989 xfs_trans_t *tp,
1990 xfs_buf_t *bp,
1991 int fields)
1992{
1993 int first;
1994 int last;
1995 static const short offsets[] = {
1996 offsetof(xfs_agf_t, agf_magicnum),
1997 offsetof(xfs_agf_t, agf_versionnum),
1998 offsetof(xfs_agf_t, agf_seqno),
1999 offsetof(xfs_agf_t, agf_length),
2000 offsetof(xfs_agf_t, agf_roots[0]),
2001 offsetof(xfs_agf_t, agf_levels[0]),
2002 offsetof(xfs_agf_t, agf_flfirst),
2003 offsetof(xfs_agf_t, agf_fllast),
2004 offsetof(xfs_agf_t, agf_flcount),
2005 offsetof(xfs_agf_t, agf_freeblks),
2006 offsetof(xfs_agf_t, agf_longest),
2007 offsetof(xfs_agf_t, agf_btreeblks),
2008 sizeof(xfs_agf_t)
2009 };
2010
2011 trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_);
2012
2013 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
2014 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
2015}
2016
2017
2018
2019
2020int
2021xfs_alloc_pagf_init(
2022 xfs_mount_t *mp,
2023 xfs_trans_t *tp,
2024 xfs_agnumber_t agno,
2025 int flags)
2026{
2027 xfs_buf_t *bp;
2028 int error;
2029
2030 if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp)))
2031 return error;
2032 if (bp)
2033 xfs_trans_brelse(tp, bp);
2034 return 0;
2035}
2036
2037
2038
2039
2040int
2041xfs_alloc_put_freelist(
2042 xfs_trans_t *tp,
2043 xfs_buf_t *agbp,
2044 xfs_buf_t *agflbp,
2045 xfs_agblock_t bno,
2046 int btreeblk)
2047{
2048 xfs_agf_t *agf;
2049 xfs_agfl_t *agfl;
2050 __be32 *blockp;
2051 int error;
2052 int logflags;
2053 xfs_mount_t *mp;
2054 xfs_perag_t *pag;
2055
2056 agf = XFS_BUF_TO_AGF(agbp);
2057 mp = tp->t_mountp;
2058
2059 if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
2060 be32_to_cpu(agf->agf_seqno), &agflbp)))
2061 return error;
2062 agfl = XFS_BUF_TO_AGFL(agflbp);
2063 be32_add_cpu(&agf->agf_fllast, 1);
2064 if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp))
2065 agf->agf_fllast = 0;
2066
2067 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
2068 be32_add_cpu(&agf->agf_flcount, 1);
2069 xfs_trans_agflist_delta(tp, 1);
2070 pag->pagf_flcount++;
2071
2072 logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
2073 if (btreeblk) {
2074 be32_add_cpu(&agf->agf_btreeblks, -1);
2075 pag->pagf_btreeblks--;
2076 logflags |= XFS_AGF_BTREEBLKS;
2077 }
2078 xfs_perag_put(pag);
2079
2080 xfs_alloc_log_agf(tp, agbp, logflags);
2081
2082 ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp));
2083 blockp = &agfl->agfl_bno[be32_to_cpu(agf->agf_fllast)];
2084 *blockp = cpu_to_be32(bno);
2085 xfs_alloc_log_agf(tp, agbp, logflags);
2086 xfs_trans_log_buf(tp, agflbp,
2087 (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl),
2088 (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl +
2089 sizeof(xfs_agblock_t) - 1));
2090 return 0;
2091}
2092
2093
2094
2095
2096int
2097xfs_read_agf(
2098 struct xfs_mount *mp,
2099 struct xfs_trans *tp,
2100 xfs_agnumber_t agno,
2101 int flags,
2102 struct xfs_buf **bpp)
2103{
2104 struct xfs_agf *agf;
2105 int agf_ok;
2106 int error;
2107
2108 ASSERT(agno != NULLAGNUMBER);
2109 error = xfs_trans_read_buf(
2110 mp, tp, mp->m_ddev_targp,
2111 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
2112 XFS_FSS_TO_BB(mp, 1), flags, bpp);
2113 if (error)
2114 return error;
2115 if (!*bpp)
2116 return 0;
2117
2118 ASSERT(!(*bpp)->b_error);
2119 agf = XFS_BUF_TO_AGF(*bpp);
2120
2121
2122
2123
2124 agf_ok =
2125 agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
2126 XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
2127 be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
2128 be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) &&
2129 be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) &&
2130 be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp) &&
2131 be32_to_cpu(agf->agf_seqno) == agno;
2132 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
2133 agf_ok = agf_ok && be32_to_cpu(agf->agf_btreeblks) <=
2134 be32_to_cpu(agf->agf_length);
2135 if (unlikely(XFS_TEST_ERROR(!agf_ok, mp, XFS_ERRTAG_ALLOC_READ_AGF,
2136 XFS_RANDOM_ALLOC_READ_AGF))) {
2137 XFS_CORRUPTION_ERROR("xfs_alloc_read_agf",
2138 XFS_ERRLEVEL_LOW, mp, agf);
2139 xfs_trans_brelse(tp, *bpp);
2140 return XFS_ERROR(EFSCORRUPTED);
2141 }
2142 xfs_buf_set_ref(*bpp, XFS_AGF_REF);
2143 return 0;
2144}
2145
2146
2147
2148
2149int
2150xfs_alloc_read_agf(
2151 struct xfs_mount *mp,
2152 struct xfs_trans *tp,
2153 xfs_agnumber_t agno,
2154 int flags,
2155 struct xfs_buf **bpp)
2156{
2157 struct xfs_agf *agf;
2158 struct xfs_perag *pag;
2159 int error;
2160
2161 ASSERT(agno != NULLAGNUMBER);
2162
2163 error = xfs_read_agf(mp, tp, agno,
2164 (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
2165 bpp);
2166 if (error)
2167 return error;
2168 if (!*bpp)
2169 return 0;
2170 ASSERT(!(*bpp)->b_error);
2171
2172 agf = XFS_BUF_TO_AGF(*bpp);
2173 pag = xfs_perag_get(mp, agno);
2174 if (!pag->pagf_init) {
2175 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
2176 pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
2177 pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
2178 pag->pagf_longest = be32_to_cpu(agf->agf_longest);
2179 pag->pagf_levels[XFS_BTNUM_BNOi] =
2180 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
2181 pag->pagf_levels[XFS_BTNUM_CNTi] =
2182 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
2183 spin_lock_init(&pag->pagb_lock);
2184 pag->pagb_count = 0;
2185 pag->pagb_tree = RB_ROOT;
2186 pag->pagf_init = 1;
2187 }
2188#ifdef DEBUG
2189 else if (!XFS_FORCED_SHUTDOWN(mp)) {
2190 ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
2191 ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
2192 ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
2193 ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
2194 ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
2195 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
2196 ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
2197 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
2198 }
2199#endif
2200 xfs_perag_put(pag);
2201 return 0;
2202}
2203
2204
2205
2206
2207
2208
2209int
2210xfs_alloc_vextent(
2211 xfs_alloc_arg_t *args)
2212{
2213 xfs_agblock_t agsize;
2214 int error;
2215 int flags;
2216 xfs_extlen_t minleft;
2217 xfs_mount_t *mp;
2218 xfs_agnumber_t sagno;
2219 xfs_alloctype_t type;
2220 int bump_rotor = 0;
2221 int no_min = 0;
2222 xfs_agnumber_t rotorstep = xfs_rotorstep;
2223
2224 mp = args->mp;
2225 type = args->otype = args->type;
2226 args->agbno = NULLAGBLOCK;
2227
2228
2229
2230
2231
2232 agsize = mp->m_sb.sb_agblocks;
2233 if (args->maxlen > agsize)
2234 args->maxlen = agsize;
2235 if (args->alignment == 0)
2236 args->alignment = 1;
2237 ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount);
2238 ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize);
2239 ASSERT(args->minlen <= args->maxlen);
2240 ASSERT(args->minlen <= agsize);
2241 ASSERT(args->mod < args->prod);
2242 if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount ||
2243 XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize ||
2244 args->minlen > args->maxlen || args->minlen > agsize ||
2245 args->mod >= args->prod) {
2246 args->fsbno = NULLFSBLOCK;
2247 trace_xfs_alloc_vextent_badargs(args);
2248 return 0;
2249 }
2250 minleft = args->minleft;
2251
2252 switch (type) {
2253 case XFS_ALLOCTYPE_THIS_AG:
2254 case XFS_ALLOCTYPE_NEAR_BNO:
2255 case XFS_ALLOCTYPE_THIS_BNO:
2256
2257
2258
2259 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2260 args->pag = xfs_perag_get(mp, args->agno);
2261 args->minleft = 0;
2262 error = xfs_alloc_fix_freelist(args, 0);
2263 args->minleft = minleft;
2264 if (error) {
2265 trace_xfs_alloc_vextent_nofix(args);
2266 goto error0;
2267 }
2268 if (!args->agbp) {
2269 trace_xfs_alloc_vextent_noagbp(args);
2270 break;
2271 }
2272 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2273 if ((error = xfs_alloc_ag_vextent(args)))
2274 goto error0;
2275 break;
2276 case XFS_ALLOCTYPE_START_BNO:
2277
2278
2279
2280
2281 if ((args->userdata == XFS_ALLOC_INITIAL_USER_DATA) &&
2282 (mp->m_flags & XFS_MOUNT_32BITINODES)) {
2283 args->fsbno = XFS_AGB_TO_FSB(mp,
2284 ((mp->m_agfrotor / rotorstep) %
2285 mp->m_sb.sb_agcount), 0);
2286 bump_rotor = 1;
2287 }
2288 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2289 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2290
2291 case XFS_ALLOCTYPE_ANY_AG:
2292 case XFS_ALLOCTYPE_START_AG:
2293 case XFS_ALLOCTYPE_FIRST_AG:
2294
2295
2296
2297 if (type == XFS_ALLOCTYPE_ANY_AG) {
2298
2299
2300
2301 args->agno = sagno = (mp->m_agfrotor / rotorstep) %
2302 mp->m_sb.sb_agcount;
2303 args->type = XFS_ALLOCTYPE_THIS_AG;
2304 flags = XFS_ALLOC_FLAG_TRYLOCK;
2305 } else if (type == XFS_ALLOCTYPE_FIRST_AG) {
2306
2307
2308
2309 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2310 args->type = XFS_ALLOCTYPE_THIS_AG;
2311 sagno = 0;
2312 flags = 0;
2313 } else {
2314 if (type == XFS_ALLOCTYPE_START_AG)
2315 args->type = XFS_ALLOCTYPE_THIS_AG;
2316
2317
2318
2319 args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2320 flags = XFS_ALLOC_FLAG_TRYLOCK;
2321 }
2322
2323
2324
2325
2326 for (;;) {
2327 args->pag = xfs_perag_get(mp, args->agno);
2328 if (no_min) args->minleft = 0;
2329 error = xfs_alloc_fix_freelist(args, flags);
2330 args->minleft = minleft;
2331 if (error) {
2332 trace_xfs_alloc_vextent_nofix(args);
2333 goto error0;
2334 }
2335
2336
2337
2338 if (args->agbp) {
2339 if ((error = xfs_alloc_ag_vextent(args)))
2340 goto error0;
2341 break;
2342 }
2343
2344 trace_xfs_alloc_vextent_loopfailed(args);
2345
2346
2347
2348
2349 if (args->agno == sagno &&
2350 type == XFS_ALLOCTYPE_START_BNO)
2351 args->type = XFS_ALLOCTYPE_THIS_AG;
2352
2353
2354
2355
2356
2357
2358
2359 if (++(args->agno) == mp->m_sb.sb_agcount) {
2360 if (args->firstblock != NULLFSBLOCK)
2361 args->agno = sagno;
2362 else
2363 args->agno = 0;
2364 }
2365
2366
2367
2368
2369 if (args->agno == sagno) {
2370 if (no_min == 1) {
2371 args->agbno = NULLAGBLOCK;
2372 trace_xfs_alloc_vextent_allfailed(args);
2373 break;
2374 }
2375 if (flags == 0) {
2376 no_min = 1;
2377 } else {
2378 flags = 0;
2379 if (type == XFS_ALLOCTYPE_START_BNO) {
2380 args->agbno = XFS_FSB_TO_AGBNO(mp,
2381 args->fsbno);
2382 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2383 }
2384 }
2385 }
2386 xfs_perag_put(args->pag);
2387 }
2388 if (bump_rotor || (type == XFS_ALLOCTYPE_ANY_AG)) {
2389 if (args->agno == sagno)
2390 mp->m_agfrotor = (mp->m_agfrotor + 1) %
2391 (mp->m_sb.sb_agcount * rotorstep);
2392 else
2393 mp->m_agfrotor = (args->agno * rotorstep + 1) %
2394 (mp->m_sb.sb_agcount * rotorstep);
2395 }
2396 break;
2397 default:
2398 ASSERT(0);
2399
2400 }
2401 if (args->agbno == NULLAGBLOCK)
2402 args->fsbno = NULLFSBLOCK;
2403 else {
2404 args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
2405#ifdef DEBUG
2406 ASSERT(args->len >= args->minlen);
2407 ASSERT(args->len <= args->maxlen);
2408 ASSERT(args->agbno % args->alignment == 0);
2409 XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno),
2410 args->len);
2411#endif
2412 }
2413 xfs_perag_put(args->pag);
2414 return 0;
2415error0:
2416 xfs_perag_put(args->pag);
2417 return error;
2418}
2419
2420
2421
2422
2423
2424
2425int
2426xfs_free_extent(
2427 xfs_trans_t *tp,
2428 xfs_fsblock_t bno,
2429 xfs_extlen_t len)
2430{
2431 xfs_alloc_arg_t args;
2432 int error;
2433
2434 ASSERT(len != 0);
2435 memset(&args, 0, sizeof(xfs_alloc_arg_t));
2436 args.tp = tp;
2437 args.mp = tp->t_mountp;
2438
2439
2440
2441
2442
2443 args.agno = XFS_FSB_TO_AGNO(args.mp, bno);
2444 if (args.agno >= args.mp->m_sb.sb_agcount)
2445 return EFSCORRUPTED;
2446
2447 args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno);
2448 if (args.agbno >= args.mp->m_sb.sb_agblocks)
2449 return EFSCORRUPTED;
2450
2451 args.pag = xfs_perag_get(args.mp, args.agno);
2452 ASSERT(args.pag);
2453
2454 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
2455 if (error)
2456 goto error0;
2457
2458
2459 if (args.agbno + len >
2460 be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length)) {
2461 error = EFSCORRUPTED;
2462 goto error0;
2463 }
2464
2465 error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0);
2466 if (!error)
2467 xfs_alloc_busy_insert(tp, args.agno, args.agbno, len, 0);
2468error0:
2469 xfs_perag_put(args.pag);
2470 return error;
2471}
2472
2473void
2474xfs_alloc_busy_insert(
2475 struct xfs_trans *tp,
2476 xfs_agnumber_t agno,
2477 xfs_agblock_t bno,
2478 xfs_extlen_t len,
2479 unsigned int flags)
2480{
2481 struct xfs_busy_extent *new;
2482 struct xfs_busy_extent *busyp;
2483 struct xfs_perag *pag;
2484 struct rb_node **rbp;
2485 struct rb_node *parent = NULL;
2486
2487 new = kmem_zalloc(sizeof(struct xfs_busy_extent), KM_MAYFAIL);
2488 if (!new) {
2489
2490
2491
2492
2493
2494 trace_xfs_alloc_busy_enomem(tp->t_mountp, agno, bno, len);
2495 xfs_trans_set_sync(tp);
2496 return;
2497 }
2498
2499 new->agno = agno;
2500 new->bno = bno;
2501 new->length = len;
2502 INIT_LIST_HEAD(&new->list);
2503 new->flags = flags;
2504
2505
2506 trace_xfs_alloc_busy(tp->t_mountp, agno, bno, len);
2507
2508 pag = xfs_perag_get(tp->t_mountp, new->agno);
2509 spin_lock(&pag->pagb_lock);
2510 rbp = &pag->pagb_tree.rb_node;
2511 while (*rbp) {
2512 parent = *rbp;
2513 busyp = rb_entry(parent, struct xfs_busy_extent, rb_node);
2514
2515 if (new->bno < busyp->bno) {
2516 rbp = &(*rbp)->rb_left;
2517 ASSERT(new->bno + new->length <= busyp->bno);
2518 } else if (new->bno > busyp->bno) {
2519 rbp = &(*rbp)->rb_right;
2520 ASSERT(bno >= busyp->bno + busyp->length);
2521 } else {
2522 ASSERT(0);
2523 }
2524 }
2525
2526 rb_link_node(&new->rb_node, parent, rbp);
2527 rb_insert_color(&new->rb_node, &pag->pagb_tree);
2528
2529 list_add(&new->list, &tp->t_busy);
2530 spin_unlock(&pag->pagb_lock);
2531 xfs_perag_put(pag);
2532}
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543int
2544xfs_alloc_busy_search(
2545 struct xfs_mount *mp,
2546 xfs_agnumber_t agno,
2547 xfs_agblock_t bno,
2548 xfs_extlen_t len)
2549{
2550 struct xfs_perag *pag;
2551 struct rb_node *rbp;
2552 struct xfs_busy_extent *busyp;
2553 int match = 0;
2554
2555 pag = xfs_perag_get(mp, agno);
2556 spin_lock(&pag->pagb_lock);
2557
2558 rbp = pag->pagb_tree.rb_node;
2559
2560
2561 while (rbp) {
2562 busyp = rb_entry(rbp, struct xfs_busy_extent, rb_node);
2563 if (bno < busyp->bno) {
2564
2565 if (bno + len > busyp->bno)
2566 match = -1;
2567 rbp = rbp->rb_left;
2568 } else if (bno > busyp->bno) {
2569
2570 if (bno < busyp->bno + busyp->length)
2571 match = -1;
2572 rbp = rbp->rb_right;
2573 } else {
2574
2575 match = (busyp->length == len) ? 1 : -1;
2576 break;
2577 }
2578 }
2579 spin_unlock(&pag->pagb_lock);
2580 xfs_perag_put(pag);
2581 return match;
2582}
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595STATIC bool
2596xfs_alloc_busy_update_extent(
2597 struct xfs_mount *mp,
2598 struct xfs_perag *pag,
2599 struct xfs_busy_extent *busyp,
2600 xfs_agblock_t fbno,
2601 xfs_extlen_t flen,
2602 bool userdata)
2603{
2604 xfs_agblock_t fend = fbno + flen;
2605 xfs_agblock_t bbno = busyp->bno;
2606 xfs_agblock_t bend = bbno + busyp->length;
2607
2608
2609
2610
2611
2612
2613 if (busyp->flags & XFS_ALLOC_BUSY_DISCARDED) {
2614 spin_unlock(&pag->pagb_lock);
2615 delay(1);
2616 spin_lock(&pag->pagb_lock);
2617 return false;
2618 }
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628 if (userdata)
2629 goto out_force_log;
2630
2631 if (bbno < fbno && bend > fend) {
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649 goto out_force_log;
2650 } else if (bbno >= fbno && bend <= fend) {
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689 rb_erase(&busyp->rb_node, &pag->pagb_tree);
2690 busyp->length = 0;
2691 return false;
2692 } else if (fend < bend) {
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707 busyp->bno = fend;
2708 } else if (bbno < fbno) {
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722 busyp->length = fbno - busyp->bno;
2723 } else {
2724 ASSERT(0);
2725 }
2726
2727 trace_xfs_alloc_busy_reuse(mp, pag->pag_agno, fbno, flen);
2728 return true;
2729
2730out_force_log:
2731 spin_unlock(&pag->pagb_lock);
2732 xfs_log_force(mp, XFS_LOG_SYNC);
2733 trace_xfs_alloc_busy_force(mp, pag->pag_agno, fbno, flen);
2734 spin_lock(&pag->pagb_lock);
2735 return false;
2736}
2737
2738
2739
2740
2741
2742void
2743xfs_alloc_busy_reuse(
2744 struct xfs_mount *mp,
2745 xfs_agnumber_t agno,
2746 xfs_agblock_t fbno,
2747 xfs_extlen_t flen,
2748 bool userdata)
2749{
2750 struct xfs_perag *pag;
2751 struct rb_node *rbp;
2752
2753 ASSERT(flen > 0);
2754
2755 pag = xfs_perag_get(mp, agno);
2756 spin_lock(&pag->pagb_lock);
2757restart:
2758 rbp = pag->pagb_tree.rb_node;
2759 while (rbp) {
2760 struct xfs_busy_extent *busyp =
2761 rb_entry(rbp, struct xfs_busy_extent, rb_node);
2762 xfs_agblock_t bbno = busyp->bno;
2763 xfs_agblock_t bend = bbno + busyp->length;
2764
2765 if (fbno + flen <= bbno) {
2766 rbp = rbp->rb_left;
2767 continue;
2768 } else if (fbno >= bend) {
2769 rbp = rbp->rb_right;
2770 continue;
2771 }
2772
2773 if (!xfs_alloc_busy_update_extent(mp, pag, busyp, fbno, flen,
2774 userdata))
2775 goto restart;
2776 }
2777 spin_unlock(&pag->pagb_lock);
2778 xfs_perag_put(pag);
2779}
2780
2781
2782
2783
2784
2785
2786
2787STATIC void
2788xfs_alloc_busy_trim(
2789 struct xfs_alloc_arg *args,
2790 xfs_agblock_t bno,
2791 xfs_extlen_t len,
2792 xfs_agblock_t *rbno,
2793 xfs_extlen_t *rlen)
2794{
2795 xfs_agblock_t fbno;
2796 xfs_extlen_t flen;
2797 struct rb_node *rbp;
2798
2799 ASSERT(len > 0);
2800
2801 spin_lock(&args->pag->pagb_lock);
2802restart:
2803 fbno = bno;
2804 flen = len;
2805 rbp = args->pag->pagb_tree.rb_node;
2806 while (rbp && flen >= args->minlen) {
2807 struct xfs_busy_extent *busyp =
2808 rb_entry(rbp, struct xfs_busy_extent, rb_node);
2809 xfs_agblock_t fend = fbno + flen;
2810 xfs_agblock_t bbno = busyp->bno;
2811 xfs_agblock_t bend = bbno + busyp->length;
2812
2813 if (fend <= bbno) {
2814 rbp = rbp->rb_left;
2815 continue;
2816 } else if (fbno >= bend) {
2817 rbp = rbp->rb_right;
2818 continue;
2819 }
2820
2821
2822
2823
2824
2825 if (!args->userdata &&
2826 !(busyp->flags & XFS_ALLOC_BUSY_DISCARDED)) {
2827 if (!xfs_alloc_busy_update_extent(args->mp, args->pag,
2828 busyp, fbno, flen,
2829 false))
2830 goto restart;
2831 continue;
2832 }
2833
2834 if (bbno <= fbno) {
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864 if (fend <= bend)
2865 goto fail;
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884 fbno = bend;
2885 } else if (bend >= fend) {
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905 fend = bbno;
2906 } else {
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940 if (bbno - fbno >= args->maxlen) {
2941
2942 fend = bbno;
2943 } else if (fend - bend >= args->maxlen * 4) {
2944
2945 fbno = bend;
2946 } else if (bbno - fbno >= args->minlen) {
2947
2948 fend = bbno;
2949 } else {
2950 goto fail;
2951 }
2952 }
2953
2954 flen = fend - fbno;
2955 }
2956 spin_unlock(&args->pag->pagb_lock);
2957
2958 if (fbno != bno || flen != len) {
2959 trace_xfs_alloc_busy_trim(args->mp, args->agno, bno, len,
2960 fbno, flen);
2961 }
2962 *rbno = fbno;
2963 *rlen = flen;
2964 return;
2965fail:
2966
2967
2968
2969
2970 spin_unlock(&args->pag->pagb_lock);
2971 trace_xfs_alloc_busy_trim(args->mp, args->agno, bno, len, fbno, 0);
2972 *rbno = fbno;
2973 *rlen = 0;
2974}
2975
2976static void
2977xfs_alloc_busy_clear_one(
2978 struct xfs_mount *mp,
2979 struct xfs_perag *pag,
2980 struct xfs_busy_extent *busyp)
2981{
2982 if (busyp->length) {
2983 trace_xfs_alloc_busy_clear(mp, busyp->agno, busyp->bno,
2984 busyp->length);
2985 rb_erase(&busyp->rb_node, &pag->pagb_tree);
2986 }
2987
2988 list_del_init(&busyp->list);
2989 kmem_free(busyp);
2990}
2991
2992
2993
2994
2995
2996
2997void
2998xfs_alloc_busy_clear(
2999 struct xfs_mount *mp,
3000 struct list_head *list,
3001 bool do_discard)
3002{
3003 struct xfs_busy_extent *busyp, *n;
3004 struct xfs_perag *pag = NULL;
3005 xfs_agnumber_t agno = NULLAGNUMBER;
3006
3007 list_for_each_entry_safe(busyp, n, list, list) {
3008 if (busyp->agno != agno) {
3009 if (pag) {
3010 spin_unlock(&pag->pagb_lock);
3011 xfs_perag_put(pag);
3012 }
3013 pag = xfs_perag_get(mp, busyp->agno);
3014 spin_lock(&pag->pagb_lock);
3015 agno = busyp->agno;
3016 }
3017
3018 if (do_discard && busyp->length &&
3019 !(busyp->flags & XFS_ALLOC_BUSY_SKIP_DISCARD))
3020 busyp->flags = XFS_ALLOC_BUSY_DISCARDED;
3021 else
3022 xfs_alloc_busy_clear_one(mp, pag, busyp);
3023 }
3024
3025 if (pag) {
3026 spin_unlock(&pag->pagb_lock);
3027 xfs_perag_put(pag);
3028 }
3029}
3030
3031
3032
3033
3034int
3035xfs_busy_extent_ag_cmp(
3036 void *priv,
3037 struct list_head *a,
3038 struct list_head *b)
3039{
3040 return container_of(a, struct xfs_busy_extent, list)->agno -
3041 container_of(b, struct xfs_busy_extent, list)->agno;
3042}
3043