1
2
3
4
5
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_bit.h"
13#include "xfs_sb.h"
14#include "xfs_mount.h"
15#include "xfs_inode.h"
16#include "xfs_btree.h"
17#include "xfs_ialloc.h"
18#include "xfs_ialloc_btree.h"
19#include "xfs_alloc.h"
20#include "xfs_errortag.h"
21#include "xfs_error.h"
22#include "xfs_bmap.h"
23#include "xfs_trans.h"
24#include "xfs_buf_item.h"
25#include "xfs_icreate_item.h"
26#include "xfs_icache.h"
27#include "xfs_trace.h"
28#include "xfs_log.h"
29#include "xfs_rmap.h"
30
31
32
33
34int
35xfs_inobt_lookup(
36 struct xfs_btree_cur *cur,
37 xfs_agino_t ino,
38 xfs_lookup_t dir,
39 int *stat)
40{
41 cur->bc_rec.i.ir_startino = ino;
42 cur->bc_rec.i.ir_holemask = 0;
43 cur->bc_rec.i.ir_count = 0;
44 cur->bc_rec.i.ir_freecount = 0;
45 cur->bc_rec.i.ir_free = 0;
46 return xfs_btree_lookup(cur, dir, stat);
47}
48
49
50
51
52
53STATIC int
54xfs_inobt_update(
55 struct xfs_btree_cur *cur,
56 xfs_inobt_rec_incore_t *irec)
57{
58 union xfs_btree_rec rec;
59
60 rec.inobt.ir_startino = cpu_to_be32(irec->ir_startino);
61 if (xfs_sb_version_hassparseinodes(&cur->bc_mp->m_sb)) {
62 rec.inobt.ir_u.sp.ir_holemask = cpu_to_be16(irec->ir_holemask);
63 rec.inobt.ir_u.sp.ir_count = irec->ir_count;
64 rec.inobt.ir_u.sp.ir_freecount = irec->ir_freecount;
65 } else {
66
67 rec.inobt.ir_u.f.ir_freecount = cpu_to_be32(irec->ir_freecount);
68 }
69 rec.inobt.ir_free = cpu_to_be64(irec->ir_free);
70 return xfs_btree_update(cur, &rec);
71}
72
73
74void
75xfs_inobt_btrec_to_irec(
76 struct xfs_mount *mp,
77 union xfs_btree_rec *rec,
78 struct xfs_inobt_rec_incore *irec)
79{
80 irec->ir_startino = be32_to_cpu(rec->inobt.ir_startino);
81 if (xfs_sb_version_hassparseinodes(&mp->m_sb)) {
82 irec->ir_holemask = be16_to_cpu(rec->inobt.ir_u.sp.ir_holemask);
83 irec->ir_count = rec->inobt.ir_u.sp.ir_count;
84 irec->ir_freecount = rec->inobt.ir_u.sp.ir_freecount;
85 } else {
86
87
88
89
90 irec->ir_holemask = XFS_INOBT_HOLEMASK_FULL;
91 irec->ir_count = XFS_INODES_PER_CHUNK;
92 irec->ir_freecount =
93 be32_to_cpu(rec->inobt.ir_u.f.ir_freecount);
94 }
95 irec->ir_free = be64_to_cpu(rec->inobt.ir_free);
96}
97
98
99
100
101int
102xfs_inobt_get_rec(
103 struct xfs_btree_cur *cur,
104 struct xfs_inobt_rec_incore *irec,
105 int *stat)
106{
107 struct xfs_mount *mp = cur->bc_mp;
108 xfs_agnumber_t agno = cur->bc_private.a.agno;
109 union xfs_btree_rec *rec;
110 int error;
111 uint64_t realfree;
112
113 error = xfs_btree_get_rec(cur, &rec, stat);
114 if (error || *stat == 0)
115 return error;
116
117 xfs_inobt_btrec_to_irec(mp, rec, irec);
118
119 if (!xfs_verify_agino(mp, agno, irec->ir_startino))
120 goto out_bad_rec;
121 if (irec->ir_count < XFS_INODES_PER_HOLEMASK_BIT ||
122 irec->ir_count > XFS_INODES_PER_CHUNK)
123 goto out_bad_rec;
124 if (irec->ir_freecount > XFS_INODES_PER_CHUNK)
125 goto out_bad_rec;
126
127
128 if (!xfs_inobt_issparse(irec->ir_holemask))
129 realfree = irec->ir_free;
130 else
131 realfree = irec->ir_free & xfs_inobt_irec_to_allocmask(irec);
132 if (hweight64(realfree) != irec->ir_freecount)
133 goto out_bad_rec;
134
135 return 0;
136
137out_bad_rec:
138 xfs_warn(mp,
139 "%s Inode BTree record corruption in AG %d detected!",
140 cur->bc_btnum == XFS_BTNUM_INO ? "Used" : "Free", agno);
141 xfs_warn(mp,
142"start inode 0x%x, count 0x%x, free 0x%x freemask 0x%llx, holemask 0x%x",
143 irec->ir_startino, irec->ir_count, irec->ir_freecount,
144 irec->ir_free, irec->ir_holemask);
145 return -EFSCORRUPTED;
146}
147
148
149
150
151int
152xfs_inobt_insert_rec(
153 struct xfs_btree_cur *cur,
154 uint16_t holemask,
155 uint8_t count,
156 int32_t freecount,
157 xfs_inofree_t free,
158 int *stat)
159{
160 cur->bc_rec.i.ir_holemask = holemask;
161 cur->bc_rec.i.ir_count = count;
162 cur->bc_rec.i.ir_freecount = freecount;
163 cur->bc_rec.i.ir_free = free;
164 return xfs_btree_insert(cur, stat);
165}
166
167
168
169
170STATIC int
171xfs_inobt_insert(
172 struct xfs_mount *mp,
173 struct xfs_trans *tp,
174 struct xfs_buf *agbp,
175 xfs_agino_t newino,
176 xfs_agino_t newlen,
177 xfs_btnum_t btnum)
178{
179 struct xfs_btree_cur *cur;
180 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
181 xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
182 xfs_agino_t thisino;
183 int i;
184 int error;
185
186 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, btnum);
187
188 for (thisino = newino;
189 thisino < newino + newlen;
190 thisino += XFS_INODES_PER_CHUNK) {
191 error = xfs_inobt_lookup(cur, thisino, XFS_LOOKUP_EQ, &i);
192 if (error) {
193 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
194 return error;
195 }
196 ASSERT(i == 0);
197
198 error = xfs_inobt_insert_rec(cur, XFS_INOBT_HOLEMASK_FULL,
199 XFS_INODES_PER_CHUNK,
200 XFS_INODES_PER_CHUNK,
201 XFS_INOBT_ALL_FREE, &i);
202 if (error) {
203 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
204 return error;
205 }
206 ASSERT(i == 1);
207 }
208
209 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
210
211 return 0;
212}
213
214
215
216
217#ifdef DEBUG
218STATIC int
219xfs_check_agi_freecount(
220 struct xfs_btree_cur *cur,
221 struct xfs_agi *agi)
222{
223 if (cur->bc_nlevels == 1) {
224 xfs_inobt_rec_incore_t rec;
225 int freecount = 0;
226 int error;
227 int i;
228
229 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
230 if (error)
231 return error;
232
233 do {
234 error = xfs_inobt_get_rec(cur, &rec, &i);
235 if (error)
236 return error;
237
238 if (i) {
239 freecount += rec.ir_freecount;
240 error = xfs_btree_increment(cur, 0, &i);
241 if (error)
242 return error;
243 }
244 } while (i == 1);
245
246 if (!XFS_FORCED_SHUTDOWN(cur->bc_mp))
247 ASSERT(freecount == be32_to_cpu(agi->agi_freecount));
248 }
249 return 0;
250}
251#else
252#define xfs_check_agi_freecount(cur, agi) 0
253#endif
254
255
256
257
258
259
260
261int
262xfs_ialloc_inode_init(
263 struct xfs_mount *mp,
264 struct xfs_trans *tp,
265 struct list_head *buffer_list,
266 int icount,
267 xfs_agnumber_t agno,
268 xfs_agblock_t agbno,
269 xfs_agblock_t length,
270 unsigned int gen)
271{
272 struct xfs_buf *fbuf;
273 struct xfs_dinode *free;
274 int nbufs;
275 int version;
276 int i, j;
277 xfs_daddr_t d;
278 xfs_ino_t ino = 0;
279
280
281
282
283
284
285 nbufs = length / M_IGEO(mp)->blocks_per_cluster;
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306 if (xfs_sb_version_hascrc(&mp->m_sb)) {
307 version = 3;
308 ino = XFS_AGINO_TO_INO(mp, agno, XFS_AGB_TO_AGINO(mp, agbno));
309
310
311
312
313
314
315
316
317
318 if (tp)
319 xfs_icreate_log(tp, agno, agbno, icount,
320 mp->m_sb.sb_inodesize, length, gen);
321 } else
322 version = 2;
323
324 for (j = 0; j < nbufs; j++) {
325
326
327
328 d = XFS_AGB_TO_DADDR(mp, agno, agbno +
329 (j * M_IGEO(mp)->blocks_per_cluster));
330 fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
331 mp->m_bsize *
332 M_IGEO(mp)->blocks_per_cluster,
333 XBF_UNMAPPED);
334 if (!fbuf)
335 return -ENOMEM;
336
337
338 fbuf->b_ops = &xfs_inode_buf_ops;
339 xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length));
340 for (i = 0; i < M_IGEO(mp)->inodes_per_cluster; i++) {
341 int ioffset = i << mp->m_sb.sb_inodelog;
342 uint isize = xfs_dinode_size(version);
343
344 free = xfs_make_iptr(mp, fbuf, i);
345 free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
346 free->di_version = version;
347 free->di_gen = cpu_to_be32(gen);
348 free->di_next_unlinked = cpu_to_be32(NULLAGINO);
349
350 if (version == 3) {
351 free->di_ino = cpu_to_be64(ino);
352 ino++;
353 uuid_copy(&free->di_uuid,
354 &mp->m_sb.sb_meta_uuid);
355 xfs_dinode_calc_crc(mp, free);
356 } else if (tp) {
357
358 xfs_trans_log_buf(tp, fbuf, ioffset,
359 ioffset + isize - 1);
360 }
361 }
362
363 if (tp) {
364
365
366
367
368
369
370
371
372 xfs_trans_inode_alloc_buf(tp, fbuf);
373 if (version == 3) {
374
375
376
377
378
379
380 xfs_trans_ordered_buf(tp, fbuf);
381 }
382 } else {
383 fbuf->b_flags |= XBF_DONE;
384 xfs_buf_delwri_queue(fbuf, buffer_list);
385 xfs_buf_relse(fbuf);
386 }
387 }
388 return 0;
389}
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414STATIC void
415xfs_align_sparse_ino(
416 struct xfs_mount *mp,
417 xfs_agino_t *startino,
418 uint16_t *allocmask)
419{
420 xfs_agblock_t agbno;
421 xfs_agblock_t mod;
422 int offset;
423
424 agbno = XFS_AGINO_TO_AGBNO(mp, *startino);
425 mod = agbno % mp->m_sb.sb_inoalignmt;
426 if (!mod)
427 return;
428
429
430 offset = XFS_AGB_TO_AGINO(mp, mod);
431 *startino -= offset;
432
433
434
435
436
437
438 *allocmask <<= offset / XFS_INODES_PER_HOLEMASK_BIT;
439}
440
441
442
443
444
445
446STATIC bool
447__xfs_inobt_can_merge(
448 struct xfs_inobt_rec_incore *trec,
449 struct xfs_inobt_rec_incore *srec)
450{
451 uint64_t talloc;
452 uint64_t salloc;
453
454
455 if (trec->ir_startino != srec->ir_startino)
456 return false;
457
458
459 if (!xfs_inobt_issparse(trec->ir_holemask) ||
460 !xfs_inobt_issparse(srec->ir_holemask))
461 return false;
462
463
464 if (!trec->ir_count || !srec->ir_count)
465 return false;
466
467
468 if (trec->ir_count + srec->ir_count > XFS_INODES_PER_CHUNK)
469 return false;
470
471
472 talloc = xfs_inobt_irec_to_allocmask(trec);
473 salloc = xfs_inobt_irec_to_allocmask(srec);
474 if (talloc & salloc)
475 return false;
476
477 return true;
478}
479
480
481
482
483
484STATIC void
485__xfs_inobt_rec_merge(
486 struct xfs_inobt_rec_incore *trec,
487 struct xfs_inobt_rec_incore *srec)
488{
489 ASSERT(trec->ir_startino == srec->ir_startino);
490
491
492 trec->ir_count += srec->ir_count;
493 trec->ir_freecount += srec->ir_freecount;
494
495
496
497
498
499 trec->ir_holemask &= srec->ir_holemask;
500 trec->ir_free &= srec->ir_free;
501}
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518STATIC int
519xfs_inobt_insert_sprec(
520 struct xfs_mount *mp,
521 struct xfs_trans *tp,
522 struct xfs_buf *agbp,
523 int btnum,
524 struct xfs_inobt_rec_incore *nrec,
525 bool merge)
526{
527 struct xfs_btree_cur *cur;
528 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
529 xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
530 int error;
531 int i;
532 struct xfs_inobt_rec_incore rec;
533
534 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, btnum);
535
536
537 error = xfs_inobt_lookup(cur, nrec->ir_startino, XFS_LOOKUP_EQ, &i);
538 if (error)
539 goto error;
540
541 if (i == 0) {
542 error = xfs_inobt_insert_rec(cur, nrec->ir_holemask,
543 nrec->ir_count, nrec->ir_freecount,
544 nrec->ir_free, &i);
545 if (error)
546 goto error;
547 if (XFS_IS_CORRUPT(mp, i != 1)) {
548 error = -EFSCORRUPTED;
549 goto error;
550 }
551
552 goto out;
553 }
554
555
556
557
558
559 if (merge) {
560 error = xfs_inobt_get_rec(cur, &rec, &i);
561 if (error)
562 goto error;
563 if (XFS_IS_CORRUPT(mp, i != 1)) {
564 error = -EFSCORRUPTED;
565 goto error;
566 }
567 if (XFS_IS_CORRUPT(mp, rec.ir_startino != nrec->ir_startino)) {
568 error = -EFSCORRUPTED;
569 goto error;
570 }
571
572
573
574
575
576 if (XFS_IS_CORRUPT(mp, !__xfs_inobt_can_merge(nrec, &rec))) {
577 error = -EFSCORRUPTED;
578 goto error;
579 }
580
581 trace_xfs_irec_merge_pre(mp, agno, rec.ir_startino,
582 rec.ir_holemask, nrec->ir_startino,
583 nrec->ir_holemask);
584
585
586 __xfs_inobt_rec_merge(nrec, &rec);
587
588 trace_xfs_irec_merge_post(mp, agno, nrec->ir_startino,
589 nrec->ir_holemask);
590
591 error = xfs_inobt_rec_check_count(mp, nrec);
592 if (error)
593 goto error;
594 }
595
596 error = xfs_inobt_update(cur, nrec);
597 if (error)
598 goto error;
599
600out:
601 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
602 return 0;
603error:
604 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
605 return error;
606}
607
608
609
610
611
612STATIC int
613xfs_ialloc_ag_alloc(
614 struct xfs_trans *tp,
615 struct xfs_buf *agbp,
616 int *alloc)
617{
618 struct xfs_agi *agi;
619 struct xfs_alloc_arg args;
620 xfs_agnumber_t agno;
621 int error;
622 xfs_agino_t newino;
623 xfs_agino_t newlen;
624 int isaligned = 0;
625
626
627 uint16_t allocmask = (uint16_t) -1;
628 struct xfs_inobt_rec_incore rec;
629 struct xfs_perag *pag;
630 struct xfs_ino_geometry *igeo = M_IGEO(tp->t_mountp);
631 int do_sparse = 0;
632
633 memset(&args, 0, sizeof(args));
634 args.tp = tp;
635 args.mp = tp->t_mountp;
636 args.fsbno = NULLFSBLOCK;
637 args.oinfo = XFS_RMAP_OINFO_INODES;
638
639#ifdef DEBUG
640
641 if (xfs_sb_version_hassparseinodes(&tp->t_mountp->m_sb) &&
642 igeo->ialloc_min_blks < igeo->ialloc_blks)
643 do_sparse = prandom_u32() & 1;
644#endif
645
646
647
648
649
650 newlen = igeo->ialloc_inos;
651 if (igeo->maxicount &&
652 percpu_counter_read_positive(&args.mp->m_icount) + newlen >
653 igeo->maxicount)
654 return -ENOSPC;
655 args.minlen = args.maxlen = igeo->ialloc_blks;
656
657
658
659
660
661 agi = XFS_BUF_TO_AGI(agbp);
662 newino = be32_to_cpu(agi->agi_newino);
663 agno = be32_to_cpu(agi->agi_seqno);
664 args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) +
665 igeo->ialloc_blks;
666 if (do_sparse)
667 goto sparse_alloc;
668 if (likely(newino != NULLAGINO &&
669 (args.agbno < be32_to_cpu(agi->agi_length)))) {
670 args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
671 args.type = XFS_ALLOCTYPE_THIS_BNO;
672 args.prod = 1;
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687 args.alignment = 1;
688 args.minalignslop = igeo->cluster_align - 1;
689
690
691 args.minleft = igeo->inobt_maxlevels - 1;
692 if ((error = xfs_alloc_vextent(&args)))
693 return error;
694
695
696
697
698
699
700
701
702
703
704
705 args.minalignslop = 0;
706 }
707
708 if (unlikely(args.fsbno == NULLFSBLOCK)) {
709
710
711
712
713
714
715
716
717 isaligned = 0;
718 if (igeo->ialloc_align) {
719 ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN));
720 args.alignment = args.mp->m_dalign;
721 isaligned = 1;
722 } else
723 args.alignment = igeo->cluster_align;
724
725
726
727
728
729 args.agbno = be32_to_cpu(agi->agi_root);
730 args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
731
732
733
734 args.type = XFS_ALLOCTYPE_NEAR_BNO;
735 args.prod = 1;
736
737
738
739 args.minleft = igeo->inobt_maxlevels - 1;
740 if ((error = xfs_alloc_vextent(&args)))
741 return error;
742 }
743
744
745
746
747
748 if (isaligned && args.fsbno == NULLFSBLOCK) {
749 args.type = XFS_ALLOCTYPE_NEAR_BNO;
750 args.agbno = be32_to_cpu(agi->agi_root);
751 args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
752 args.alignment = igeo->cluster_align;
753 if ((error = xfs_alloc_vextent(&args)))
754 return error;
755 }
756
757
758
759
760
761 if (xfs_sb_version_hassparseinodes(&args.mp->m_sb) &&
762 igeo->ialloc_min_blks < igeo->ialloc_blks &&
763 args.fsbno == NULLFSBLOCK) {
764sparse_alloc:
765 args.type = XFS_ALLOCTYPE_NEAR_BNO;
766 args.agbno = be32_to_cpu(agi->agi_root);
767 args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
768 args.alignment = args.mp->m_sb.sb_spino_align;
769 args.prod = 1;
770
771 args.minlen = igeo->ialloc_min_blks;
772 args.maxlen = args.minlen;
773
774
775
776
777
778
779
780
781
782
783
784 args.min_agbno = args.mp->m_sb.sb_inoalignmt;
785 args.max_agbno = round_down(args.mp->m_sb.sb_agblocks,
786 args.mp->m_sb.sb_inoalignmt) -
787 igeo->ialloc_blks;
788
789 error = xfs_alloc_vextent(&args);
790 if (error)
791 return error;
792
793 newlen = XFS_AGB_TO_AGINO(args.mp, args.len);
794 ASSERT(newlen <= XFS_INODES_PER_CHUNK);
795 allocmask = (1 << (newlen / XFS_INODES_PER_HOLEMASK_BIT)) - 1;
796 }
797
798 if (args.fsbno == NULLFSBLOCK) {
799 *alloc = 0;
800 return 0;
801 }
802 ASSERT(args.len == args.minlen);
803
804
805
806
807
808
809
810
811
812
813 error = xfs_ialloc_inode_init(args.mp, tp, NULL, newlen, agno,
814 args.agbno, args.len, prandom_u32());
815
816 if (error)
817 return error;
818
819
820
821 newino = XFS_AGB_TO_AGINO(args.mp, args.agbno);
822
823 if (xfs_inobt_issparse(~allocmask)) {
824
825
826
827 xfs_align_sparse_ino(args.mp, &newino, &allocmask);
828
829 rec.ir_startino = newino;
830 rec.ir_holemask = ~allocmask;
831 rec.ir_count = newlen;
832 rec.ir_freecount = newlen;
833 rec.ir_free = XFS_INOBT_ALL_FREE;
834
835
836
837
838
839
840 error = xfs_inobt_insert_sprec(args.mp, tp, agbp, XFS_BTNUM_INO,
841 &rec, true);
842 if (error == -EFSCORRUPTED) {
843 xfs_alert(args.mp,
844 "invalid sparse inode record: ino 0x%llx holemask 0x%x count %u",
845 XFS_AGINO_TO_INO(args.mp, agno,
846 rec.ir_startino),
847 rec.ir_holemask, rec.ir_count);
848 xfs_force_shutdown(args.mp, SHUTDOWN_CORRUPT_INCORE);
849 }
850 if (error)
851 return error;
852
853
854
855
856
857
858
859
860
861
862
863
864 if (xfs_sb_version_hasfinobt(&args.mp->m_sb)) {
865 error = xfs_inobt_insert_sprec(args.mp, tp, agbp,
866 XFS_BTNUM_FINO, &rec,
867 false);
868 if (error)
869 return error;
870 }
871 } else {
872
873 error = xfs_inobt_insert(args.mp, tp, agbp, newino, newlen,
874 XFS_BTNUM_INO);
875 if (error)
876 return error;
877
878 if (xfs_sb_version_hasfinobt(&args.mp->m_sb)) {
879 error = xfs_inobt_insert(args.mp, tp, agbp, newino,
880 newlen, XFS_BTNUM_FINO);
881 if (error)
882 return error;
883 }
884 }
885
886
887
888
889 be32_add_cpu(&agi->agi_count, newlen);
890 be32_add_cpu(&agi->agi_freecount, newlen);
891 pag = xfs_perag_get(args.mp, agno);
892 pag->pagi_freecount += newlen;
893 pag->pagi_count += newlen;
894 xfs_perag_put(pag);
895 agi->agi_newino = cpu_to_be32(newino);
896
897
898
899
900 xfs_ialloc_log_agi(tp, agbp,
901 XFS_AGI_COUNT | XFS_AGI_FREECOUNT | XFS_AGI_NEWINO);
902
903
904
905 xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, (long)newlen);
906 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, (long)newlen);
907 *alloc = 1;
908 return 0;
909}
910
911STATIC xfs_agnumber_t
912xfs_ialloc_next_ag(
913 xfs_mount_t *mp)
914{
915 xfs_agnumber_t agno;
916
917 spin_lock(&mp->m_agirotor_lock);
918 agno = mp->m_agirotor;
919 if (++mp->m_agirotor >= mp->m_maxagi)
920 mp->m_agirotor = 0;
921 spin_unlock(&mp->m_agirotor_lock);
922
923 return agno;
924}
925
926
927
928
929
930STATIC xfs_agnumber_t
931xfs_ialloc_ag_select(
932 xfs_trans_t *tp,
933 xfs_ino_t parent,
934 umode_t mode)
935{
936 xfs_agnumber_t agcount;
937 xfs_agnumber_t agno;
938 int flags;
939 xfs_extlen_t ineed;
940 xfs_extlen_t longest = 0;
941 xfs_mount_t *mp;
942 int needspace;
943 xfs_perag_t *pag;
944 xfs_agnumber_t pagno;
945 int error;
946
947
948
949
950
951 needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode);
952 mp = tp->t_mountp;
953 agcount = mp->m_maxagi;
954 if (S_ISDIR(mode))
955 pagno = xfs_ialloc_next_ag(mp);
956 else {
957 pagno = XFS_INO_TO_AGNO(mp, parent);
958 if (pagno >= agcount)
959 pagno = 0;
960 }
961
962 ASSERT(pagno < agcount);
963
964
965
966
967
968
969
970
971 agno = pagno;
972 flags = XFS_ALLOC_FLAG_TRYLOCK;
973 for (;;) {
974 pag = xfs_perag_get(mp, agno);
975 if (!pag->pagi_inodeok) {
976 xfs_ialloc_next_ag(mp);
977 goto nextag;
978 }
979
980 if (!pag->pagi_init) {
981 error = xfs_ialloc_pagi_init(mp, tp, agno);
982 if (error)
983 goto nextag;
984 }
985
986 if (pag->pagi_freecount) {
987 xfs_perag_put(pag);
988 return agno;
989 }
990
991 if (!pag->pagf_init) {
992 error = xfs_alloc_pagf_init(mp, tp, agno, flags);
993 if (error)
994 goto nextag;
995 }
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013 ineed = M_IGEO(mp)->ialloc_min_blks;
1014 if (flags && ineed > 1)
1015 ineed += M_IGEO(mp)->cluster_align;
1016 longest = pag->pagf_longest;
1017 if (!longest)
1018 longest = pag->pagf_flcount > 0;
1019
1020 if (pag->pagf_freeblks >= needspace + ineed &&
1021 longest >= ineed) {
1022 xfs_perag_put(pag);
1023 return agno;
1024 }
1025nextag:
1026 xfs_perag_put(pag);
1027
1028
1029
1030
1031 if (XFS_FORCED_SHUTDOWN(mp))
1032 return NULLAGNUMBER;
1033 agno++;
1034 if (agno >= agcount)
1035 agno = 0;
1036 if (agno == pagno) {
1037 if (flags == 0)
1038 return NULLAGNUMBER;
1039 flags = 0;
1040 }
1041 }
1042}
1043
1044
1045
1046
1047STATIC int
1048xfs_ialloc_next_rec(
1049 struct xfs_btree_cur *cur,
1050 xfs_inobt_rec_incore_t *rec,
1051 int *done,
1052 int left)
1053{
1054 int error;
1055 int i;
1056
1057 if (left)
1058 error = xfs_btree_decrement(cur, 0, &i);
1059 else
1060 error = xfs_btree_increment(cur, 0, &i);
1061
1062 if (error)
1063 return error;
1064 *done = !i;
1065 if (i) {
1066 error = xfs_inobt_get_rec(cur, rec, &i);
1067 if (error)
1068 return error;
1069 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1070 return -EFSCORRUPTED;
1071 }
1072
1073 return 0;
1074}
1075
1076STATIC int
1077xfs_ialloc_get_rec(
1078 struct xfs_btree_cur *cur,
1079 xfs_agino_t agino,
1080 xfs_inobt_rec_incore_t *rec,
1081 int *done)
1082{
1083 int error;
1084 int i;
1085
1086 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_EQ, &i);
1087 if (error)
1088 return error;
1089 *done = !i;
1090 if (i) {
1091 error = xfs_inobt_get_rec(cur, rec, &i);
1092 if (error)
1093 return error;
1094 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1095 return -EFSCORRUPTED;
1096 }
1097
1098 return 0;
1099}
1100
1101
1102
1103
1104
1105
1106STATIC int
1107xfs_inobt_first_free_inode(
1108 struct xfs_inobt_rec_incore *rec)
1109{
1110 xfs_inofree_t realfree;
1111
1112
1113 if (!xfs_inobt_issparse(rec->ir_holemask))
1114 return xfs_lowbit64(rec->ir_free);
1115
1116 realfree = xfs_inobt_irec_to_allocmask(rec);
1117 realfree &= rec->ir_free;
1118
1119 return xfs_lowbit64(realfree);
1120}
1121
1122
1123
1124
1125STATIC int
1126xfs_dialloc_ag_inobt(
1127 struct xfs_trans *tp,
1128 struct xfs_buf *agbp,
1129 xfs_ino_t parent,
1130 xfs_ino_t *inop)
1131{
1132 struct xfs_mount *mp = tp->t_mountp;
1133 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
1134 xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
1135 xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent);
1136 xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent);
1137 struct xfs_perag *pag;
1138 struct xfs_btree_cur *cur, *tcur;
1139 struct xfs_inobt_rec_incore rec, trec;
1140 xfs_ino_t ino;
1141 int error;
1142 int offset;
1143 int i, j;
1144 int searchdistance = 10;
1145
1146 pag = xfs_perag_get(mp, agno);
1147
1148 ASSERT(pag->pagi_init);
1149 ASSERT(pag->pagi_inodeok);
1150 ASSERT(pag->pagi_freecount > 0);
1151
1152 restart_pagno:
1153 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
1154
1155
1156
1157
1158 if (!pagino)
1159 pagino = be32_to_cpu(agi->agi_newino);
1160
1161 error = xfs_check_agi_freecount(cur, agi);
1162 if (error)
1163 goto error0;
1164
1165
1166
1167
1168 if (pagno == agno) {
1169 int doneleft;
1170 int doneright;
1171
1172 error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i);
1173 if (error)
1174 goto error0;
1175 if (XFS_IS_CORRUPT(mp, i != 1)) {
1176 error = -EFSCORRUPTED;
1177 goto error0;
1178 }
1179
1180 error = xfs_inobt_get_rec(cur, &rec, &j);
1181 if (error)
1182 goto error0;
1183 if (XFS_IS_CORRUPT(mp, j != 1)) {
1184 error = -EFSCORRUPTED;
1185 goto error0;
1186 }
1187
1188 if (rec.ir_freecount > 0) {
1189
1190
1191
1192
1193 goto alloc_inode;
1194 }
1195
1196
1197
1198
1199
1200
1201
1202 error = xfs_btree_dup_cursor(cur, &tcur);
1203 if (error)
1204 goto error0;
1205
1206
1207
1208
1209 if (pagino != NULLAGINO &&
1210 pag->pagl_pagino == pagino &&
1211 pag->pagl_leftrec != NULLAGINO &&
1212 pag->pagl_rightrec != NULLAGINO) {
1213 error = xfs_ialloc_get_rec(tcur, pag->pagl_leftrec,
1214 &trec, &doneleft);
1215 if (error)
1216 goto error1;
1217
1218 error = xfs_ialloc_get_rec(cur, pag->pagl_rightrec,
1219 &rec, &doneright);
1220 if (error)
1221 goto error1;
1222 } else {
1223
1224 error = xfs_ialloc_next_rec(tcur, &trec, &doneleft, 1);
1225 if (error)
1226 goto error1;
1227
1228
1229 error = xfs_ialloc_next_rec(cur, &rec, &doneright, 0);
1230 if (error)
1231 goto error1;
1232 }
1233
1234
1235
1236
1237 while (--searchdistance > 0 && (!doneleft || !doneright)) {
1238 int useleft;
1239
1240
1241 if (!doneleft && !doneright) {
1242 useleft = pagino -
1243 (trec.ir_startino + XFS_INODES_PER_CHUNK - 1) <
1244 rec.ir_startino - pagino;
1245 } else {
1246 useleft = !doneleft;
1247 }
1248
1249
1250 if (useleft && trec.ir_freecount) {
1251 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1252 cur = tcur;
1253
1254 pag->pagl_leftrec = trec.ir_startino;
1255 pag->pagl_rightrec = rec.ir_startino;
1256 pag->pagl_pagino = pagino;
1257 rec = trec;
1258 goto alloc_inode;
1259 }
1260
1261
1262 if (!useleft && rec.ir_freecount) {
1263 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
1264
1265 pag->pagl_leftrec = trec.ir_startino;
1266 pag->pagl_rightrec = rec.ir_startino;
1267 pag->pagl_pagino = pagino;
1268 goto alloc_inode;
1269 }
1270
1271
1272 if (useleft) {
1273 error = xfs_ialloc_next_rec(tcur, &trec,
1274 &doneleft, 1);
1275 } else {
1276 error = xfs_ialloc_next_rec(cur, &rec,
1277 &doneright, 0);
1278 }
1279 if (error)
1280 goto error1;
1281 }
1282
1283 if (searchdistance <= 0) {
1284
1285
1286
1287
1288 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
1289 pag->pagl_leftrec = trec.ir_startino;
1290 pag->pagl_rightrec = rec.ir_startino;
1291 pag->pagl_pagino = pagino;
1292
1293 } else {
1294
1295
1296
1297
1298
1299
1300
1301 pag->pagl_pagino = NULLAGINO;
1302 pag->pagl_leftrec = NULLAGINO;
1303 pag->pagl_rightrec = NULLAGINO;
1304 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
1305 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1306 goto restart_pagno;
1307 }
1308 }
1309
1310
1311
1312
1313
1314 if (agi->agi_newino != cpu_to_be32(NULLAGINO)) {
1315 error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino),
1316 XFS_LOOKUP_EQ, &i);
1317 if (error)
1318 goto error0;
1319
1320 if (i == 1) {
1321 error = xfs_inobt_get_rec(cur, &rec, &j);
1322 if (error)
1323 goto error0;
1324
1325 if (j == 1 && rec.ir_freecount > 0) {
1326
1327
1328
1329
1330 goto alloc_inode;
1331 }
1332 }
1333 }
1334
1335
1336
1337
1338 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
1339 if (error)
1340 goto error0;
1341 if (XFS_IS_CORRUPT(mp, i != 1)) {
1342 error = -EFSCORRUPTED;
1343 goto error0;
1344 }
1345
1346 for (;;) {
1347 error = xfs_inobt_get_rec(cur, &rec, &i);
1348 if (error)
1349 goto error0;
1350 if (XFS_IS_CORRUPT(mp, i != 1)) {
1351 error = -EFSCORRUPTED;
1352 goto error0;
1353 }
1354 if (rec.ir_freecount > 0)
1355 break;
1356 error = xfs_btree_increment(cur, 0, &i);
1357 if (error)
1358 goto error0;
1359 if (XFS_IS_CORRUPT(mp, i != 1)) {
1360 error = -EFSCORRUPTED;
1361 goto error0;
1362 }
1363 }
1364
1365alloc_inode:
1366 offset = xfs_inobt_first_free_inode(&rec);
1367 ASSERT(offset >= 0);
1368 ASSERT(offset < XFS_INODES_PER_CHUNK);
1369 ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
1370 XFS_INODES_PER_CHUNK) == 0);
1371 ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset);
1372 rec.ir_free &= ~XFS_INOBT_MASK(offset);
1373 rec.ir_freecount--;
1374 error = xfs_inobt_update(cur, &rec);
1375 if (error)
1376 goto error0;
1377 be32_add_cpu(&agi->agi_freecount, -1);
1378 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
1379 pag->pagi_freecount--;
1380
1381 error = xfs_check_agi_freecount(cur, agi);
1382 if (error)
1383 goto error0;
1384
1385 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1386 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
1387 xfs_perag_put(pag);
1388 *inop = ino;
1389 return 0;
1390error1:
1391 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
1392error0:
1393 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
1394 xfs_perag_put(pag);
1395 return error;
1396}
1397
1398
1399
1400
1401
1402STATIC int
1403xfs_dialloc_ag_finobt_near(
1404 xfs_agino_t pagino,
1405 struct xfs_btree_cur **ocur,
1406 struct xfs_inobt_rec_incore *rec)
1407{
1408 struct xfs_btree_cur *lcur = *ocur;
1409 struct xfs_btree_cur *rcur;
1410 struct xfs_inobt_rec_incore rrec;
1411 int error;
1412 int i, j;
1413
1414 error = xfs_inobt_lookup(lcur, pagino, XFS_LOOKUP_LE, &i);
1415 if (error)
1416 return error;
1417
1418 if (i == 1) {
1419 error = xfs_inobt_get_rec(lcur, rec, &i);
1420 if (error)
1421 return error;
1422 if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1))
1423 return -EFSCORRUPTED;
1424
1425
1426
1427
1428
1429
1430 if (pagino >= rec->ir_startino &&
1431 pagino < (rec->ir_startino + XFS_INODES_PER_CHUNK))
1432 return 0;
1433 }
1434
1435 error = xfs_btree_dup_cursor(lcur, &rcur);
1436 if (error)
1437 return error;
1438
1439 error = xfs_inobt_lookup(rcur, pagino, XFS_LOOKUP_GE, &j);
1440 if (error)
1441 goto error_rcur;
1442 if (j == 1) {
1443 error = xfs_inobt_get_rec(rcur, &rrec, &j);
1444 if (error)
1445 goto error_rcur;
1446 if (XFS_IS_CORRUPT(lcur->bc_mp, j != 1)) {
1447 error = -EFSCORRUPTED;
1448 goto error_rcur;
1449 }
1450 }
1451
1452 if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1 && j != 1)) {
1453 error = -EFSCORRUPTED;
1454 goto error_rcur;
1455 }
1456 if (i == 1 && j == 1) {
1457
1458
1459
1460
1461 if ((pagino - rec->ir_startino + XFS_INODES_PER_CHUNK - 1) >
1462 (rrec.ir_startino - pagino)) {
1463 *rec = rrec;
1464 xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR);
1465 *ocur = rcur;
1466 } else {
1467 xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR);
1468 }
1469 } else if (j == 1) {
1470
1471 *rec = rrec;
1472 xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR);
1473 *ocur = rcur;
1474 } else if (i == 1) {
1475
1476 xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR);
1477 }
1478
1479 return 0;
1480
1481error_rcur:
1482 xfs_btree_del_cursor(rcur, XFS_BTREE_ERROR);
1483 return error;
1484}
1485
1486
1487
1488
1489
1490STATIC int
1491xfs_dialloc_ag_finobt_newino(
1492 struct xfs_agi *agi,
1493 struct xfs_btree_cur *cur,
1494 struct xfs_inobt_rec_incore *rec)
1495{
1496 int error;
1497 int i;
1498
1499 if (agi->agi_newino != cpu_to_be32(NULLAGINO)) {
1500 error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino),
1501 XFS_LOOKUP_EQ, &i);
1502 if (error)
1503 return error;
1504 if (i == 1) {
1505 error = xfs_inobt_get_rec(cur, rec, &i);
1506 if (error)
1507 return error;
1508 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1509 return -EFSCORRUPTED;
1510 return 0;
1511 }
1512 }
1513
1514
1515
1516
1517 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
1518 if (error)
1519 return error;
1520 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1521 return -EFSCORRUPTED;
1522
1523 error = xfs_inobt_get_rec(cur, rec, &i);
1524 if (error)
1525 return error;
1526 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1527 return -EFSCORRUPTED;
1528
1529 return 0;
1530}
1531
1532
1533
1534
1535
1536STATIC int
1537xfs_dialloc_ag_update_inobt(
1538 struct xfs_btree_cur *cur,
1539 struct xfs_inobt_rec_incore *frec,
1540 int offset)
1541{
1542 struct xfs_inobt_rec_incore rec;
1543 int error;
1544 int i;
1545
1546 error = xfs_inobt_lookup(cur, frec->ir_startino, XFS_LOOKUP_EQ, &i);
1547 if (error)
1548 return error;
1549 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1550 return -EFSCORRUPTED;
1551
1552 error = xfs_inobt_get_rec(cur, &rec, &i);
1553 if (error)
1554 return error;
1555 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1556 return -EFSCORRUPTED;
1557 ASSERT((XFS_AGINO_TO_OFFSET(cur->bc_mp, rec.ir_startino) %
1558 XFS_INODES_PER_CHUNK) == 0);
1559
1560 rec.ir_free &= ~XFS_INOBT_MASK(offset);
1561 rec.ir_freecount--;
1562
1563 if (XFS_IS_CORRUPT(cur->bc_mp,
1564 rec.ir_free != frec->ir_free ||
1565 rec.ir_freecount != frec->ir_freecount))
1566 return -EFSCORRUPTED;
1567
1568 return xfs_inobt_update(cur, &rec);
1569}
1570
1571
1572
1573
1574
1575
1576
1577
1578STATIC int
1579xfs_dialloc_ag(
1580 struct xfs_trans *tp,
1581 struct xfs_buf *agbp,
1582 xfs_ino_t parent,
1583 xfs_ino_t *inop)
1584{
1585 struct xfs_mount *mp = tp->t_mountp;
1586 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
1587 xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
1588 xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent);
1589 xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent);
1590 struct xfs_perag *pag;
1591 struct xfs_btree_cur *cur;
1592 struct xfs_btree_cur *icur;
1593 struct xfs_inobt_rec_incore rec;
1594 xfs_ino_t ino;
1595 int error;
1596 int offset;
1597 int i;
1598
1599 if (!xfs_sb_version_hasfinobt(&mp->m_sb))
1600 return xfs_dialloc_ag_inobt(tp, agbp, parent, inop);
1601
1602 pag = xfs_perag_get(mp, agno);
1603
1604
1605
1606
1607
1608 if (!pagino)
1609 pagino = be32_to_cpu(agi->agi_newino);
1610
1611 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_FINO);
1612
1613 error = xfs_check_agi_freecount(cur, agi);
1614 if (error)
1615 goto error_cur;
1616
1617
1618
1619
1620
1621
1622 if (agno == pagno)
1623 error = xfs_dialloc_ag_finobt_near(pagino, &cur, &rec);
1624 else
1625 error = xfs_dialloc_ag_finobt_newino(agi, cur, &rec);
1626 if (error)
1627 goto error_cur;
1628
1629 offset = xfs_inobt_first_free_inode(&rec);
1630 ASSERT(offset >= 0);
1631 ASSERT(offset < XFS_INODES_PER_CHUNK);
1632 ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
1633 XFS_INODES_PER_CHUNK) == 0);
1634 ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset);
1635
1636
1637
1638
1639 rec.ir_free &= ~XFS_INOBT_MASK(offset);
1640 rec.ir_freecount--;
1641 if (rec.ir_freecount)
1642 error = xfs_inobt_update(cur, &rec);
1643 else
1644 error = xfs_btree_delete(cur, &i);
1645 if (error)
1646 goto error_cur;
1647
1648
1649
1650
1651
1652
1653
1654 icur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
1655
1656 error = xfs_check_agi_freecount(icur, agi);
1657 if (error)
1658 goto error_icur;
1659
1660 error = xfs_dialloc_ag_update_inobt(icur, &rec, offset);
1661 if (error)
1662 goto error_icur;
1663
1664
1665
1666
1667
1668 be32_add_cpu(&agi->agi_freecount, -1);
1669 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
1670 pag->pagi_freecount--;
1671
1672 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
1673
1674 error = xfs_check_agi_freecount(icur, agi);
1675 if (error)
1676 goto error_icur;
1677 error = xfs_check_agi_freecount(cur, agi);
1678 if (error)
1679 goto error_icur;
1680
1681 xfs_btree_del_cursor(icur, XFS_BTREE_NOERROR);
1682 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1683 xfs_perag_put(pag);
1684 *inop = ino;
1685 return 0;
1686
1687error_icur:
1688 xfs_btree_del_cursor(icur, XFS_BTREE_ERROR);
1689error_cur:
1690 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
1691 xfs_perag_put(pag);
1692 return error;
1693}
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716int
1717xfs_dialloc(
1718 struct xfs_trans *tp,
1719 xfs_ino_t parent,
1720 umode_t mode,
1721 struct xfs_buf **IO_agbp,
1722 xfs_ino_t *inop)
1723{
1724 struct xfs_mount *mp = tp->t_mountp;
1725 struct xfs_buf *agbp;
1726 xfs_agnumber_t agno;
1727 int error;
1728 int ialloced;
1729 int noroom = 0;
1730 xfs_agnumber_t start_agno;
1731 struct xfs_perag *pag;
1732 struct xfs_ino_geometry *igeo = M_IGEO(mp);
1733 int okalloc = 1;
1734
1735 if (*IO_agbp) {
1736
1737
1738
1739
1740
1741 agbp = *IO_agbp;
1742 goto out_alloc;
1743 }
1744
1745
1746
1747
1748
1749 start_agno = xfs_ialloc_ag_select(tp, parent, mode);
1750 if (start_agno == NULLAGNUMBER) {
1751 *inop = NULLFSINO;
1752 return 0;
1753 }
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763 if (igeo->maxicount &&
1764 percpu_counter_read_positive(&mp->m_icount) + igeo->ialloc_inos
1765 > igeo->maxicount) {
1766 noroom = 1;
1767 okalloc = 0;
1768 }
1769
1770
1771
1772
1773
1774
1775 agno = start_agno;
1776 for (;;) {
1777 pag = xfs_perag_get(mp, agno);
1778 if (!pag->pagi_inodeok) {
1779 xfs_ialloc_next_ag(mp);
1780 goto nextag;
1781 }
1782
1783 if (!pag->pagi_init) {
1784 error = xfs_ialloc_pagi_init(mp, tp, agno);
1785 if (error)
1786 goto out_error;
1787 }
1788
1789
1790
1791
1792 if (!pag->pagi_freecount && !okalloc)
1793 goto nextag;
1794
1795
1796
1797
1798
1799 error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
1800 if (error)
1801 goto out_error;
1802
1803 if (pag->pagi_freecount) {
1804 xfs_perag_put(pag);
1805 goto out_alloc;
1806 }
1807
1808 if (!okalloc)
1809 goto nextag_relse_buffer;
1810
1811
1812 error = xfs_ialloc_ag_alloc(tp, agbp, &ialloced);
1813 if (error) {
1814 xfs_trans_brelse(tp, agbp);
1815
1816 if (error != -ENOSPC)
1817 goto out_error;
1818
1819 xfs_perag_put(pag);
1820 *inop = NULLFSINO;
1821 return 0;
1822 }
1823
1824 if (ialloced) {
1825
1826
1827
1828
1829
1830
1831 ASSERT(pag->pagi_freecount > 0);
1832 xfs_perag_put(pag);
1833
1834 *IO_agbp = agbp;
1835 *inop = NULLFSINO;
1836 return 0;
1837 }
1838
1839nextag_relse_buffer:
1840 xfs_trans_brelse(tp, agbp);
1841nextag:
1842 xfs_perag_put(pag);
1843 if (++agno == mp->m_sb.sb_agcount)
1844 agno = 0;
1845 if (agno == start_agno) {
1846 *inop = NULLFSINO;
1847 return noroom ? -ENOSPC : 0;
1848 }
1849 }
1850
1851out_alloc:
1852 *IO_agbp = NULL;
1853 return xfs_dialloc_ag(tp, agbp, parent, inop);
1854out_error:
1855 xfs_perag_put(pag);
1856 return error;
1857}
1858
1859
1860
1861
1862
1863
1864STATIC void
1865xfs_difree_inode_chunk(
1866 struct xfs_trans *tp,
1867 xfs_agnumber_t agno,
1868 struct xfs_inobt_rec_incore *rec)
1869{
1870 struct xfs_mount *mp = tp->t_mountp;
1871 xfs_agblock_t sagbno = XFS_AGINO_TO_AGBNO(mp,
1872 rec->ir_startino);
1873 int startidx, endidx;
1874 int nextbit;
1875 xfs_agblock_t agbno;
1876 int contigblk;
1877 DECLARE_BITMAP(holemask, XFS_INOBT_HOLEMASK_BITS);
1878
1879 if (!xfs_inobt_issparse(rec->ir_holemask)) {
1880
1881 xfs_bmap_add_free(tp, XFS_AGB_TO_FSB(mp, agno, sagbno),
1882 M_IGEO(mp)->ialloc_blks,
1883 &XFS_RMAP_OINFO_INODES);
1884 return;
1885 }
1886
1887
1888 ASSERT(sizeof(rec->ir_holemask) <= sizeof(holemask[0]));
1889 holemask[0] = rec->ir_holemask;
1890
1891
1892
1893
1894
1895
1896
1897 startidx = endidx = find_first_zero_bit(holemask,
1898 XFS_INOBT_HOLEMASK_BITS);
1899 nextbit = startidx + 1;
1900 while (startidx < XFS_INOBT_HOLEMASK_BITS) {
1901 nextbit = find_next_zero_bit(holemask, XFS_INOBT_HOLEMASK_BITS,
1902 nextbit);
1903
1904
1905
1906
1907 if (nextbit != XFS_INOBT_HOLEMASK_BITS &&
1908 nextbit == endidx + 1) {
1909 endidx = nextbit;
1910 goto next;
1911 }
1912
1913
1914
1915
1916
1917
1918 agbno = sagbno + (startidx * XFS_INODES_PER_HOLEMASK_BIT) /
1919 mp->m_sb.sb_inopblock;
1920 contigblk = ((endidx - startidx + 1) *
1921 XFS_INODES_PER_HOLEMASK_BIT) /
1922 mp->m_sb.sb_inopblock;
1923
1924 ASSERT(agbno % mp->m_sb.sb_spino_align == 0);
1925 ASSERT(contigblk % mp->m_sb.sb_spino_align == 0);
1926 xfs_bmap_add_free(tp, XFS_AGB_TO_FSB(mp, agno, agbno),
1927 contigblk, &XFS_RMAP_OINFO_INODES);
1928
1929
1930 startidx = endidx = nextbit;
1931
1932next:
1933 nextbit++;
1934 }
1935}
1936
1937STATIC int
1938xfs_difree_inobt(
1939 struct xfs_mount *mp,
1940 struct xfs_trans *tp,
1941 struct xfs_buf *agbp,
1942 xfs_agino_t agino,
1943 struct xfs_icluster *xic,
1944 struct xfs_inobt_rec_incore *orec)
1945{
1946 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
1947 xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
1948 struct xfs_perag *pag;
1949 struct xfs_btree_cur *cur;
1950 struct xfs_inobt_rec_incore rec;
1951 int ilen;
1952 int error;
1953 int i;
1954 int off;
1955
1956 ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
1957 ASSERT(XFS_AGINO_TO_AGBNO(mp, agino) < be32_to_cpu(agi->agi_length));
1958
1959
1960
1961
1962 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
1963
1964 error = xfs_check_agi_freecount(cur, agi);
1965 if (error)
1966 goto error0;
1967
1968
1969
1970
1971 if ((error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i))) {
1972 xfs_warn(mp, "%s: xfs_inobt_lookup() returned error %d.",
1973 __func__, error);
1974 goto error0;
1975 }
1976 if (XFS_IS_CORRUPT(mp, i != 1)) {
1977 error = -EFSCORRUPTED;
1978 goto error0;
1979 }
1980 error = xfs_inobt_get_rec(cur, &rec, &i);
1981 if (error) {
1982 xfs_warn(mp, "%s: xfs_inobt_get_rec() returned error %d.",
1983 __func__, error);
1984 goto error0;
1985 }
1986 if (XFS_IS_CORRUPT(mp, i != 1)) {
1987 error = -EFSCORRUPTED;
1988 goto error0;
1989 }
1990
1991
1992
1993 off = agino - rec.ir_startino;
1994 ASSERT(off >= 0 && off < XFS_INODES_PER_CHUNK);
1995 ASSERT(!(rec.ir_free & XFS_INOBT_MASK(off)));
1996
1997
1998
1999 rec.ir_free |= XFS_INOBT_MASK(off);
2000 rec.ir_freecount++;
2001
2002
2003
2004
2005
2006
2007 if (!(mp->m_flags & XFS_MOUNT_IKEEP) &&
2008 rec.ir_free == XFS_INOBT_ALL_FREE &&
2009 mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) {
2010 xic->deleted = true;
2011 xic->first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino);
2012 xic->alloc = xfs_inobt_irec_to_allocmask(&rec);
2013
2014
2015
2016
2017
2018
2019 ilen = rec.ir_freecount;
2020 be32_add_cpu(&agi->agi_count, -ilen);
2021 be32_add_cpu(&agi->agi_freecount, -(ilen - 1));
2022 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT);
2023 pag = xfs_perag_get(mp, agno);
2024 pag->pagi_freecount -= ilen - 1;
2025 pag->pagi_count -= ilen;
2026 xfs_perag_put(pag);
2027 xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, -ilen);
2028 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -(ilen - 1));
2029
2030 if ((error = xfs_btree_delete(cur, &i))) {
2031 xfs_warn(mp, "%s: xfs_btree_delete returned error %d.",
2032 __func__, error);
2033 goto error0;
2034 }
2035
2036 xfs_difree_inode_chunk(tp, agno, &rec);
2037 } else {
2038 xic->deleted = false;
2039
2040 error = xfs_inobt_update(cur, &rec);
2041 if (error) {
2042 xfs_warn(mp, "%s: xfs_inobt_update returned error %d.",
2043 __func__, error);
2044 goto error0;
2045 }
2046
2047
2048
2049
2050 be32_add_cpu(&agi->agi_freecount, 1);
2051 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
2052 pag = xfs_perag_get(mp, agno);
2053 pag->pagi_freecount++;
2054 xfs_perag_put(pag);
2055 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1);
2056 }
2057
2058 error = xfs_check_agi_freecount(cur, agi);
2059 if (error)
2060 goto error0;
2061
2062 *orec = rec;
2063 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
2064 return 0;
2065
2066error0:
2067 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
2068 return error;
2069}
2070
2071
2072
2073
2074STATIC int
2075xfs_difree_finobt(
2076 struct xfs_mount *mp,
2077 struct xfs_trans *tp,
2078 struct xfs_buf *agbp,
2079 xfs_agino_t agino,
2080 struct xfs_inobt_rec_incore *ibtrec)
2081{
2082 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
2083 xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
2084 struct xfs_btree_cur *cur;
2085 struct xfs_inobt_rec_incore rec;
2086 int offset = agino - ibtrec->ir_startino;
2087 int error;
2088 int i;
2089
2090 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_FINO);
2091
2092 error = xfs_inobt_lookup(cur, ibtrec->ir_startino, XFS_LOOKUP_EQ, &i);
2093 if (error)
2094 goto error;
2095 if (i == 0) {
2096
2097
2098
2099
2100
2101 if (XFS_IS_CORRUPT(mp, ibtrec->ir_freecount != 1)) {
2102 error = -EFSCORRUPTED;
2103 goto error;
2104 }
2105
2106 error = xfs_inobt_insert_rec(cur, ibtrec->ir_holemask,
2107 ibtrec->ir_count,
2108 ibtrec->ir_freecount,
2109 ibtrec->ir_free, &i);
2110 if (error)
2111 goto error;
2112 ASSERT(i == 1);
2113
2114 goto out;
2115 }
2116
2117
2118
2119
2120
2121
2122
2123
2124 error = xfs_inobt_get_rec(cur, &rec, &i);
2125 if (error)
2126 goto error;
2127 if (XFS_IS_CORRUPT(mp, i != 1)) {
2128 error = -EFSCORRUPTED;
2129 goto error;
2130 }
2131
2132 rec.ir_free |= XFS_INOBT_MASK(offset);
2133 rec.ir_freecount++;
2134
2135 if (XFS_IS_CORRUPT(mp,
2136 rec.ir_free != ibtrec->ir_free ||
2137 rec.ir_freecount != ibtrec->ir_freecount)) {
2138 error = -EFSCORRUPTED;
2139 goto error;
2140 }
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154 if (rec.ir_free == XFS_INOBT_ALL_FREE &&
2155 mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK &&
2156 !(mp->m_flags & XFS_MOUNT_IKEEP)) {
2157 error = xfs_btree_delete(cur, &i);
2158 if (error)
2159 goto error;
2160 ASSERT(i == 1);
2161 } else {
2162 error = xfs_inobt_update(cur, &rec);
2163 if (error)
2164 goto error;
2165 }
2166
2167out:
2168 error = xfs_check_agi_freecount(cur, agi);
2169 if (error)
2170 goto error;
2171
2172 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
2173 return 0;
2174
2175error:
2176 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
2177 return error;
2178}
2179
2180
2181
2182
2183
2184
2185
2186int
2187xfs_difree(
2188 struct xfs_trans *tp,
2189 xfs_ino_t inode,
2190 struct xfs_icluster *xic)
2191{
2192
2193 xfs_agblock_t agbno;
2194 struct xfs_buf *agbp;
2195 xfs_agino_t agino;
2196 xfs_agnumber_t agno;
2197 int error;
2198 struct xfs_mount *mp;
2199 struct xfs_inobt_rec_incore rec;
2200
2201 mp = tp->t_mountp;
2202
2203
2204
2205
2206 agno = XFS_INO_TO_AGNO(mp, inode);
2207 if (agno >= mp->m_sb.sb_agcount) {
2208 xfs_warn(mp, "%s: agno >= mp->m_sb.sb_agcount (%d >= %d).",
2209 __func__, agno, mp->m_sb.sb_agcount);
2210 ASSERT(0);
2211 return -EINVAL;
2212 }
2213 agino = XFS_INO_TO_AGINO(mp, inode);
2214 if (inode != XFS_AGINO_TO_INO(mp, agno, agino)) {
2215 xfs_warn(mp, "%s: inode != XFS_AGINO_TO_INO() (%llu != %llu).",
2216 __func__, (unsigned long long)inode,
2217 (unsigned long long)XFS_AGINO_TO_INO(mp, agno, agino));
2218 ASSERT(0);
2219 return -EINVAL;
2220 }
2221 agbno = XFS_AGINO_TO_AGBNO(mp, agino);
2222 if (agbno >= mp->m_sb.sb_agblocks) {
2223 xfs_warn(mp, "%s: agbno >= mp->m_sb.sb_agblocks (%d >= %d).",
2224 __func__, agbno, mp->m_sb.sb_agblocks);
2225 ASSERT(0);
2226 return -EINVAL;
2227 }
2228
2229
2230
2231 error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
2232 if (error) {
2233 xfs_warn(mp, "%s: xfs_ialloc_read_agi() returned error %d.",
2234 __func__, error);
2235 return error;
2236 }
2237
2238
2239
2240
2241 error = xfs_difree_inobt(mp, tp, agbp, agino, xic, &rec);
2242 if (error)
2243 goto error0;
2244
2245
2246
2247
2248 if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
2249 error = xfs_difree_finobt(mp, tp, agbp, agino, &rec);
2250 if (error)
2251 goto error0;
2252 }
2253
2254 return 0;
2255
2256error0:
2257 return error;
2258}
2259
2260STATIC int
2261xfs_imap_lookup(
2262 struct xfs_mount *mp,
2263 struct xfs_trans *tp,
2264 xfs_agnumber_t agno,
2265 xfs_agino_t agino,
2266 xfs_agblock_t agbno,
2267 xfs_agblock_t *chunk_agbno,
2268 xfs_agblock_t *offset_agbno,
2269 int flags)
2270{
2271 struct xfs_inobt_rec_incore rec;
2272 struct xfs_btree_cur *cur;
2273 struct xfs_buf *agbp;
2274 int error;
2275 int i;
2276
2277 error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
2278 if (error) {
2279 xfs_alert(mp,
2280 "%s: xfs_ialloc_read_agi() returned error %d, agno %d",
2281 __func__, error, agno);
2282 return error;
2283 }
2284
2285
2286
2287
2288
2289
2290
2291 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
2292 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i);
2293 if (!error) {
2294 if (i)
2295 error = xfs_inobt_get_rec(cur, &rec, &i);
2296 if (!error && i == 0)
2297 error = -EINVAL;
2298 }
2299
2300 xfs_trans_brelse(tp, agbp);
2301 xfs_btree_del_cursor(cur, error);
2302 if (error)
2303 return error;
2304
2305
2306 if (rec.ir_startino > agino ||
2307 rec.ir_startino + M_IGEO(mp)->ialloc_inos <= agino)
2308 return -EINVAL;
2309
2310
2311 if ((flags & XFS_IGET_UNTRUSTED) &&
2312 (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino)))
2313 return -EINVAL;
2314
2315 *chunk_agbno = XFS_AGINO_TO_AGBNO(mp, rec.ir_startino);
2316 *offset_agbno = agbno - *chunk_agbno;
2317 return 0;
2318}
2319
2320
2321
2322
2323int
2324xfs_imap(
2325 xfs_mount_t *mp,
2326 xfs_trans_t *tp,
2327 xfs_ino_t ino,
2328 struct xfs_imap *imap,
2329 uint flags)
2330{
2331 xfs_agblock_t agbno;
2332 xfs_agino_t agino;
2333 xfs_agnumber_t agno;
2334 xfs_agblock_t chunk_agbno;
2335 xfs_agblock_t cluster_agbno;
2336 int error;
2337 int offset;
2338 xfs_agblock_t offset_agbno;
2339
2340 ASSERT(ino != NULLFSINO);
2341
2342
2343
2344
2345 agno = XFS_INO_TO_AGNO(mp, ino);
2346 agino = XFS_INO_TO_AGINO(mp, ino);
2347 agbno = XFS_AGINO_TO_AGBNO(mp, agino);
2348 if (agno >= mp->m_sb.sb_agcount || agbno >= mp->m_sb.sb_agblocks ||
2349 ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
2350#ifdef DEBUG
2351
2352
2353
2354
2355 if (flags & XFS_IGET_UNTRUSTED)
2356 return -EINVAL;
2357 if (agno >= mp->m_sb.sb_agcount) {
2358 xfs_alert(mp,
2359 "%s: agno (%d) >= mp->m_sb.sb_agcount (%d)",
2360 __func__, agno, mp->m_sb.sb_agcount);
2361 }
2362 if (agbno >= mp->m_sb.sb_agblocks) {
2363 xfs_alert(mp,
2364 "%s: agbno (0x%llx) >= mp->m_sb.sb_agblocks (0x%lx)",
2365 __func__, (unsigned long long)agbno,
2366 (unsigned long)mp->m_sb.sb_agblocks);
2367 }
2368 if (ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
2369 xfs_alert(mp,
2370 "%s: ino (0x%llx) != XFS_AGINO_TO_INO() (0x%llx)",
2371 __func__, ino,
2372 XFS_AGINO_TO_INO(mp, agno, agino));
2373 }
2374 xfs_stack_trace();
2375#endif
2376 return -EINVAL;
2377 }
2378
2379
2380
2381
2382
2383
2384
2385
2386 if (flags & XFS_IGET_UNTRUSTED) {
2387 error = xfs_imap_lookup(mp, tp, agno, agino, agbno,
2388 &chunk_agbno, &offset_agbno, flags);
2389 if (error)
2390 return error;
2391 goto out_map;
2392 }
2393
2394
2395
2396
2397
2398 if (M_IGEO(mp)->blocks_per_cluster == 1) {
2399 offset = XFS_INO_TO_OFFSET(mp, ino);
2400 ASSERT(offset < mp->m_sb.sb_inopblock);
2401
2402 imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno);
2403 imap->im_len = XFS_FSB_TO_BB(mp, 1);
2404 imap->im_boffset = (unsigned short)(offset <<
2405 mp->m_sb.sb_inodelog);
2406 return 0;
2407 }
2408
2409
2410
2411
2412
2413
2414 if (M_IGEO(mp)->inoalign_mask) {
2415 offset_agbno = agbno & M_IGEO(mp)->inoalign_mask;
2416 chunk_agbno = agbno - offset_agbno;
2417 } else {
2418 error = xfs_imap_lookup(mp, tp, agno, agino, agbno,
2419 &chunk_agbno, &offset_agbno, flags);
2420 if (error)
2421 return error;
2422 }
2423
2424out_map:
2425 ASSERT(agbno >= chunk_agbno);
2426 cluster_agbno = chunk_agbno +
2427 ((offset_agbno / M_IGEO(mp)->blocks_per_cluster) *
2428 M_IGEO(mp)->blocks_per_cluster);
2429 offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) +
2430 XFS_INO_TO_OFFSET(mp, ino);
2431
2432 imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, cluster_agbno);
2433 imap->im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster);
2434 imap->im_boffset = (unsigned short)(offset << mp->m_sb.sb_inodelog);
2435
2436
2437
2438
2439
2440
2441
2442 if ((imap->im_blkno + imap->im_len) >
2443 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) {
2444 xfs_alert(mp,
2445 "%s: (im_blkno (0x%llx) + im_len (0x%llx)) > sb_dblocks (0x%llx)",
2446 __func__, (unsigned long long) imap->im_blkno,
2447 (unsigned long long) imap->im_len,
2448 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks));
2449 return -EINVAL;
2450 }
2451 return 0;
2452}
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466void
2467xfs_ialloc_log_agi(
2468 xfs_trans_t *tp,
2469 xfs_buf_t *bp,
2470 int fields)
2471{
2472 int first;
2473 int last;
2474 static const short offsets[] = {
2475
2476 offsetof(xfs_agi_t, agi_magicnum),
2477 offsetof(xfs_agi_t, agi_versionnum),
2478 offsetof(xfs_agi_t, agi_seqno),
2479 offsetof(xfs_agi_t, agi_length),
2480 offsetof(xfs_agi_t, agi_count),
2481 offsetof(xfs_agi_t, agi_root),
2482 offsetof(xfs_agi_t, agi_level),
2483 offsetof(xfs_agi_t, agi_freecount),
2484 offsetof(xfs_agi_t, agi_newino),
2485 offsetof(xfs_agi_t, agi_dirino),
2486 offsetof(xfs_agi_t, agi_unlinked),
2487 offsetof(xfs_agi_t, agi_free_root),
2488 offsetof(xfs_agi_t, agi_free_level),
2489 sizeof(xfs_agi_t)
2490 };
2491#ifdef DEBUG
2492 xfs_agi_t *agi;
2493
2494 agi = XFS_BUF_TO_AGI(bp);
2495 ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
2496#endif
2497
2498
2499
2500
2501
2502
2503 if (fields & XFS_AGI_ALL_BITS_R1) {
2504 xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R1,
2505 &first, &last);
2506 xfs_trans_log_buf(tp, bp, first, last);
2507 }
2508
2509
2510
2511
2512
2513 fields &= ~XFS_AGI_ALL_BITS_R1;
2514 if (fields) {
2515 xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R2,
2516 &first, &last);
2517 xfs_trans_log_buf(tp, bp, first, last);
2518 }
2519}
2520
2521static xfs_failaddr_t
2522xfs_agi_verify(
2523 struct xfs_buf *bp)
2524{
2525 struct xfs_mount *mp = bp->b_mount;
2526 struct xfs_agi *agi = XFS_BUF_TO_AGI(bp);
2527 int i;
2528
2529 if (xfs_sb_version_hascrc(&mp->m_sb)) {
2530 if (!uuid_equal(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid))
2531 return __this_address;
2532 if (!xfs_log_check_lsn(mp,
2533 be64_to_cpu(XFS_BUF_TO_AGI(bp)->agi_lsn)))
2534 return __this_address;
2535 }
2536
2537
2538
2539
2540 if (!xfs_verify_magic(bp, agi->agi_magicnum))
2541 return __this_address;
2542 if (!XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)))
2543 return __this_address;
2544
2545 if (be32_to_cpu(agi->agi_level) < 1 ||
2546 be32_to_cpu(agi->agi_level) > XFS_BTREE_MAXLEVELS)
2547 return __this_address;
2548
2549 if (xfs_sb_version_hasfinobt(&mp->m_sb) &&
2550 (be32_to_cpu(agi->agi_free_level) < 1 ||
2551 be32_to_cpu(agi->agi_free_level) > XFS_BTREE_MAXLEVELS))
2552 return __this_address;
2553
2554
2555
2556
2557
2558
2559
2560 if (bp->b_pag && be32_to_cpu(agi->agi_seqno) != bp->b_pag->pag_agno)
2561 return __this_address;
2562
2563 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
2564 if (agi->agi_unlinked[i] == cpu_to_be32(NULLAGINO))
2565 continue;
2566 if (!xfs_verify_ino(mp, be32_to_cpu(agi->agi_unlinked[i])))
2567 return __this_address;
2568 }
2569
2570 return NULL;
2571}
2572
2573static void
2574xfs_agi_read_verify(
2575 struct xfs_buf *bp)
2576{
2577 struct xfs_mount *mp = bp->b_mount;
2578 xfs_failaddr_t fa;
2579
2580 if (xfs_sb_version_hascrc(&mp->m_sb) &&
2581 !xfs_buf_verify_cksum(bp, XFS_AGI_CRC_OFF))
2582 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
2583 else {
2584 fa = xfs_agi_verify(bp);
2585 if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_IALLOC_READ_AGI))
2586 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2587 }
2588}
2589
2590static void
2591xfs_agi_write_verify(
2592 struct xfs_buf *bp)
2593{
2594 struct xfs_mount *mp = bp->b_mount;
2595 struct xfs_buf_log_item *bip = bp->b_log_item;
2596 xfs_failaddr_t fa;
2597
2598 fa = xfs_agi_verify(bp);
2599 if (fa) {
2600 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2601 return;
2602 }
2603
2604 if (!xfs_sb_version_hascrc(&mp->m_sb))
2605 return;
2606
2607 if (bip)
2608 XFS_BUF_TO_AGI(bp)->agi_lsn = cpu_to_be64(bip->bli_item.li_lsn);
2609 xfs_buf_update_cksum(bp, XFS_AGI_CRC_OFF);
2610}
2611
2612const struct xfs_buf_ops xfs_agi_buf_ops = {
2613 .name = "xfs_agi",
2614 .magic = { cpu_to_be32(XFS_AGI_MAGIC), cpu_to_be32(XFS_AGI_MAGIC) },
2615 .verify_read = xfs_agi_read_verify,
2616 .verify_write = xfs_agi_write_verify,
2617 .verify_struct = xfs_agi_verify,
2618};
2619
2620
2621
2622
2623int
2624xfs_read_agi(
2625 struct xfs_mount *mp,
2626 struct xfs_trans *tp,
2627 xfs_agnumber_t agno,
2628 struct xfs_buf **bpp)
2629{
2630 int error;
2631
2632 trace_xfs_read_agi(mp, agno);
2633
2634 ASSERT(agno != NULLAGNUMBER);
2635 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
2636 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
2637 XFS_FSS_TO_BB(mp, 1), 0, bpp, &xfs_agi_buf_ops);
2638 if (error)
2639 return error;
2640 if (tp)
2641 xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_AGI_BUF);
2642
2643 xfs_buf_set_ref(*bpp, XFS_AGI_REF);
2644 return 0;
2645}
2646
2647int
2648xfs_ialloc_read_agi(
2649 struct xfs_mount *mp,
2650 struct xfs_trans *tp,
2651 xfs_agnumber_t agno,
2652 struct xfs_buf **bpp)
2653{
2654 struct xfs_agi *agi;
2655 struct xfs_perag *pag;
2656 int error;
2657
2658 trace_xfs_ialloc_read_agi(mp, agno);
2659
2660 error = xfs_read_agi(mp, tp, agno, bpp);
2661 if (error)
2662 return error;
2663
2664 agi = XFS_BUF_TO_AGI(*bpp);
2665 pag = xfs_perag_get(mp, agno);
2666 if (!pag->pagi_init) {
2667 pag->pagi_freecount = be32_to_cpu(agi->agi_freecount);
2668 pag->pagi_count = be32_to_cpu(agi->agi_count);
2669 pag->pagi_init = 1;
2670 }
2671
2672
2673
2674
2675
2676 ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) ||
2677 XFS_FORCED_SHUTDOWN(mp));
2678 xfs_perag_put(pag);
2679 return 0;
2680}
2681
2682
2683
2684
2685int
2686xfs_ialloc_pagi_init(
2687 xfs_mount_t *mp,
2688 xfs_trans_t *tp,
2689 xfs_agnumber_t agno)
2690{
2691 xfs_buf_t *bp = NULL;
2692 int error;
2693
2694 error = xfs_ialloc_read_agi(mp, tp, agno, &bp);
2695 if (error)
2696 return error;
2697 if (bp)
2698 xfs_trans_brelse(tp, bp);
2699 return 0;
2700}
2701
2702
2703int
2704xfs_ialloc_has_inode_record(
2705 struct xfs_btree_cur *cur,
2706 xfs_agino_t low,
2707 xfs_agino_t high,
2708 bool *exists)
2709{
2710 struct xfs_inobt_rec_incore irec;
2711 xfs_agino_t agino;
2712 uint16_t holemask;
2713 int has_record;
2714 int i;
2715 int error;
2716
2717 *exists = false;
2718 error = xfs_inobt_lookup(cur, low, XFS_LOOKUP_LE, &has_record);
2719 while (error == 0 && has_record) {
2720 error = xfs_inobt_get_rec(cur, &irec, &has_record);
2721 if (error || irec.ir_startino > high)
2722 break;
2723
2724 agino = irec.ir_startino;
2725 holemask = irec.ir_holemask;
2726 for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; holemask >>= 1,
2727 i++, agino += XFS_INODES_PER_HOLEMASK_BIT) {
2728 if (holemask & 1)
2729 continue;
2730 if (agino + XFS_INODES_PER_HOLEMASK_BIT > low &&
2731 agino <= high) {
2732 *exists = true;
2733 return 0;
2734 }
2735 }
2736
2737 error = xfs_btree_increment(cur, 0, &has_record);
2738 }
2739 return error;
2740}
2741
2742
2743int
2744xfs_ialloc_has_inodes_at_extent(
2745 struct xfs_btree_cur *cur,
2746 xfs_agblock_t bno,
2747 xfs_extlen_t len,
2748 bool *exists)
2749{
2750 xfs_agino_t low;
2751 xfs_agino_t high;
2752
2753 low = XFS_AGB_TO_AGINO(cur->bc_mp, bno);
2754 high = XFS_AGB_TO_AGINO(cur->bc_mp, bno + len) - 1;
2755
2756 return xfs_ialloc_has_inode_record(cur, low, high, exists);
2757}
2758
2759struct xfs_ialloc_count_inodes {
2760 xfs_agino_t count;
2761 xfs_agino_t freecount;
2762};
2763
2764
2765STATIC int
2766xfs_ialloc_count_inodes_rec(
2767 struct xfs_btree_cur *cur,
2768 union xfs_btree_rec *rec,
2769 void *priv)
2770{
2771 struct xfs_inobt_rec_incore irec;
2772 struct xfs_ialloc_count_inodes *ci = priv;
2773
2774 xfs_inobt_btrec_to_irec(cur->bc_mp, rec, &irec);
2775 ci->count += irec.ir_count;
2776 ci->freecount += irec.ir_freecount;
2777
2778 return 0;
2779}
2780
2781
2782int
2783xfs_ialloc_count_inodes(
2784 struct xfs_btree_cur *cur,
2785 xfs_agino_t *count,
2786 xfs_agino_t *freecount)
2787{
2788 struct xfs_ialloc_count_inodes ci = {0};
2789 int error;
2790
2791 ASSERT(cur->bc_btnum == XFS_BTNUM_INO);
2792 error = xfs_btree_query_all(cur, xfs_ialloc_count_inodes_rec, &ci);
2793 if (error)
2794 return error;
2795
2796 *count = ci.count;
2797 *freecount = ci.freecount;
2798 return 0;
2799}
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815void
2816xfs_ialloc_setup_geometry(
2817 struct xfs_mount *mp)
2818{
2819 struct xfs_sb *sbp = &mp->m_sb;
2820 struct xfs_ino_geometry *igeo = M_IGEO(mp);
2821 uint64_t icount;
2822 uint inodes;
2823
2824
2825 igeo->agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
2826 igeo->inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1);
2827 igeo->inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0);
2828 igeo->inobt_mnr[0] = igeo->inobt_mxr[0] / 2;
2829 igeo->inobt_mnr[1] = igeo->inobt_mxr[1] / 2;
2830
2831 igeo->ialloc_inos = max_t(uint16_t, XFS_INODES_PER_CHUNK,
2832 sbp->sb_inopblock);
2833 igeo->ialloc_blks = igeo->ialloc_inos >> sbp->sb_inopblog;
2834
2835 if (sbp->sb_spino_align)
2836 igeo->ialloc_min_blks = sbp->sb_spino_align;
2837 else
2838 igeo->ialloc_min_blks = igeo->ialloc_blks;
2839
2840
2841 inodes = (1LL << XFS_INO_AGINO_BITS(mp)) >> XFS_INODES_PER_CHUNK_LOG;
2842 igeo->inobt_maxlevels = xfs_btree_compute_maxlevels(igeo->inobt_mnr,
2843 inodes);
2844
2845
2846
2847
2848
2849
2850
2851 if (sbp->sb_imax_pct && igeo->ialloc_blks) {
2852
2853
2854
2855
2856 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
2857 do_div(icount, 100);
2858 do_div(icount, igeo->ialloc_blks);
2859 igeo->maxicount = XFS_FSB_TO_INO(mp,
2860 icount * igeo->ialloc_blks);
2861 } else {
2862 igeo->maxicount = 0;
2863 }
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875 igeo->inode_cluster_size_raw = XFS_INODE_BIG_CLUSTER_SIZE;
2876 if (xfs_sb_version_hascrc(&mp->m_sb)) {
2877 int new_size = igeo->inode_cluster_size_raw;
2878
2879 new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE;
2880 if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size))
2881 igeo->inode_cluster_size_raw = new_size;
2882 }
2883
2884
2885 if (igeo->inode_cluster_size_raw > mp->m_sb.sb_blocksize)
2886 igeo->blocks_per_cluster = XFS_B_TO_FSBT(mp,
2887 igeo->inode_cluster_size_raw);
2888 else
2889 igeo->blocks_per_cluster = 1;
2890 igeo->inode_cluster_size = XFS_FSB_TO_B(mp, igeo->blocks_per_cluster);
2891 igeo->inodes_per_cluster = XFS_FSB_TO_INO(mp, igeo->blocks_per_cluster);
2892
2893
2894 if (xfs_sb_version_hasalign(&mp->m_sb) &&
2895 mp->m_sb.sb_inoalignmt >= igeo->blocks_per_cluster)
2896 igeo->cluster_align = mp->m_sb.sb_inoalignmt;
2897 else
2898 igeo->cluster_align = 1;
2899 igeo->inoalign_mask = igeo->cluster_align - 1;
2900 igeo->cluster_align_inodes = XFS_FSB_TO_INO(mp, igeo->cluster_align);
2901
2902
2903
2904
2905
2906 if (mp->m_dalign && igeo->inoalign_mask &&
2907 !(mp->m_dalign & igeo->inoalign_mask))
2908 igeo->ialloc_align = mp->m_dalign;
2909 else
2910 igeo->ialloc_align = 0;
2911}
2912
2913
2914xfs_ino_t
2915xfs_ialloc_calc_rootino(
2916 struct xfs_mount *mp,
2917 int sunit)
2918{
2919 struct xfs_ino_geometry *igeo = M_IGEO(mp);
2920 xfs_agblock_t first_bno;
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930 first_bno = howmany(4 * mp->m_sb.sb_sectsize, mp->m_sb.sb_blocksize);
2931
2932
2933 first_bno += 2;
2934
2935
2936 first_bno += 1;
2937
2938
2939 first_bno += xfs_alloc_min_freelist(mp, NULL);
2940
2941
2942 if (xfs_sb_version_hasfinobt(&mp->m_sb))
2943 first_bno++;
2944
2945
2946 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
2947 first_bno++;
2948
2949
2950 if (xfs_sb_version_hasreflink(&mp->m_sb))
2951 first_bno++;
2952
2953
2954
2955
2956
2957
2958
2959
2960 if (mp->m_sb.sb_logstart &&
2961 XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == 0)
2962 first_bno += mp->m_sb.sb_logblocks;
2963
2964
2965
2966
2967
2968 if (xfs_sb_version_hasdalign(&mp->m_sb) && igeo->ialloc_align > 0)
2969 first_bno = roundup(first_bno, sunit);
2970 else if (xfs_sb_version_hasalign(&mp->m_sb) &&
2971 mp->m_sb.sb_inoalignmt > 1)
2972 first_bno = roundup(first_bno, mp->m_sb.sb_inoalignmt);
2973
2974 return XFS_AGINO_TO_INO(mp, 0, XFS_AGB_TO_AGINO(mp, first_bno));
2975}
2976