1
2
3
4
5
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_trans_resv.h"
11#include "xfs_mount.h"
12#include "xfs_defer.h"
13#include "xfs_btree.h"
14#include "xfs_bit.h"
15#include "xfs_log_format.h"
16#include "xfs_trans.h"
17#include "xfs_sb.h"
18#include "xfs_inode.h"
19#include "xfs_alloc.h"
20#include "xfs_ialloc.h"
21#include "xfs_ialloc_btree.h"
22#include "xfs_icache.h"
23#include "xfs_rmap.h"
24#include "xfs_log.h"
25#include "xfs_trans_priv.h"
26#include "scrub/xfs_scrub.h"
27#include "scrub/scrub.h"
28#include "scrub/common.h"
29#include "scrub/btree.h"
30#include "scrub/trace.h"
31
32
33
34
35
36
37int
38xchk_setup_ag_iallocbt(
39 struct xfs_scrub *sc,
40 struct xfs_inode *ip)
41{
42 return xchk_setup_ag_btree(sc, ip, sc->flags & XCHK_TRY_HARDER);
43}
44
45
46
47struct xchk_iallocbt {
48
49 unsigned long long inodes;
50
51
52 xfs_agino_t next_startino;
53
54
55 xfs_agino_t next_cluster_ino;
56};
57
58
59
60
61
62
63static inline void
64xchk_iallocbt_chunk_xref_other(
65 struct xfs_scrub *sc,
66 struct xfs_inobt_rec_incore *irec,
67 xfs_agino_t agino)
68{
69 struct xfs_btree_cur **pcur;
70 bool has_irec;
71 int error;
72
73 if (sc->sm->sm_type == XFS_SCRUB_TYPE_FINOBT)
74 pcur = &sc->sa.ino_cur;
75 else
76 pcur = &sc->sa.fino_cur;
77 if (!(*pcur))
78 return;
79 error = xfs_ialloc_has_inode_record(*pcur, agino, agino, &has_irec);
80 if (!xchk_should_check_xref(sc, &error, pcur))
81 return;
82 if (((irec->ir_freecount > 0 && !has_irec) ||
83 (irec->ir_freecount == 0 && has_irec)))
84 xchk_btree_xref_set_corrupt(sc, *pcur, 0);
85}
86
87
88STATIC void
89xchk_iallocbt_chunk_xref(
90 struct xfs_scrub *sc,
91 struct xfs_inobt_rec_incore *irec,
92 xfs_agino_t agino,
93 xfs_agblock_t agbno,
94 xfs_extlen_t len)
95{
96 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
97 return;
98
99 xchk_xref_is_used_space(sc, agbno, len);
100 xchk_iallocbt_chunk_xref_other(sc, irec, agino);
101 xchk_xref_is_owned_by(sc, agbno, len, &XFS_RMAP_OINFO_INODES);
102 xchk_xref_is_not_shared(sc, agbno, len);
103}
104
105
106STATIC bool
107xchk_iallocbt_chunk(
108 struct xchk_btree *bs,
109 struct xfs_inobt_rec_incore *irec,
110 xfs_agino_t agino,
111 xfs_extlen_t len)
112{
113 struct xfs_mount *mp = bs->cur->bc_mp;
114 xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
115 xfs_agblock_t bno;
116
117 bno = XFS_AGINO_TO_AGBNO(mp, agino);
118 if (bno + len <= bno ||
119 !xfs_verify_agbno(mp, agno, bno) ||
120 !xfs_verify_agbno(mp, agno, bno + len - 1))
121 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
122
123 xchk_iallocbt_chunk_xref(bs->sc, irec, agino, bno, len);
124
125 return true;
126}
127
128
129static unsigned int
130xchk_iallocbt_freecount(
131 xfs_inofree_t freemask)
132{
133 BUILD_BUG_ON(sizeof(freemask) != sizeof(__u64));
134 return hweight64(freemask);
135}
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151STATIC int
152xchk_iallocbt_check_cluster_ifree(
153 struct xchk_btree *bs,
154 struct xfs_inobt_rec_incore *irec,
155 unsigned int irec_ino,
156 struct xfs_dinode *dip)
157{
158 struct xfs_mount *mp = bs->cur->bc_mp;
159 xfs_ino_t fsino;
160 xfs_agino_t agino;
161 bool irec_free;
162 bool ino_inuse;
163 bool freemask_ok;
164 int error = 0;
165
166 if (xchk_should_terminate(bs->sc, &error))
167 return error;
168
169
170
171
172
173 agino = irec->ir_startino + irec_ino;
174 fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_private.a.agno, agino);
175 irec_free = (irec->ir_free & XFS_INOBT_MASK(irec_ino));
176
177 if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC ||
178 (dip->di_version >= 3 && be64_to_cpu(dip->di_ino) != fsino)) {
179 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
180 goto out;
181 }
182
183 error = xfs_icache_inode_is_allocated(mp, bs->cur->bc_tp, fsino,
184 &ino_inuse);
185 if (error == -ENODATA) {
186
187 freemask_ok = irec_free ^ !!(dip->di_mode);
188 if (!(bs->sc->flags & XCHK_TRY_HARDER) && !freemask_ok)
189 return -EDEADLOCK;
190 } else if (error < 0) {
191
192
193
194
195
196 goto out;
197 } else {
198
199 freemask_ok = irec_free ^ ino_inuse;
200 }
201 if (!freemask_ok)
202 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
203out:
204 return 0;
205}
206
207
208
209
210
211
212
213
214STATIC int
215xchk_iallocbt_check_cluster(
216 struct xchk_btree *bs,
217 struct xfs_inobt_rec_incore *irec,
218 unsigned int cluster_base)
219{
220 struct xfs_imap imap;
221 struct xfs_mount *mp = bs->cur->bc_mp;
222 struct xfs_dinode *dip;
223 struct xfs_buf *cluster_bp;
224 unsigned int nr_inodes;
225 xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
226 xfs_agblock_t agbno;
227 unsigned int cluster_index;
228 uint16_t cluster_mask = 0;
229 uint16_t ir_holemask;
230 int error = 0;
231
232 nr_inodes = min_t(unsigned int, XFS_INODES_PER_CHUNK,
233 mp->m_inodes_per_cluster);
234
235
236 agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino + cluster_base);
237
238
239 for (cluster_index = 0;
240 cluster_index < nr_inodes;
241 cluster_index += XFS_INODES_PER_HOLEMASK_BIT)
242 cluster_mask |= XFS_INOBT_MASK((cluster_base + cluster_index) /
243 XFS_INODES_PER_HOLEMASK_BIT);
244
245
246
247
248
249
250
251
252 ir_holemask = (irec->ir_holemask & cluster_mask);
253 imap.im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno);
254 imap.im_len = XFS_FSB_TO_BB(mp, mp->m_blocks_per_cluster);
255 imap.im_boffset = XFS_INO_TO_OFFSET(mp, irec->ir_startino) <<
256 mp->m_sb.sb_inodelog;
257
258 if (imap.im_boffset != 0 && cluster_base != 0) {
259 ASSERT(imap.im_boffset == 0 || cluster_base == 0);
260 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
261 return 0;
262 }
263
264 trace_xchk_iallocbt_check_cluster(mp, agno, irec->ir_startino,
265 imap.im_blkno, imap.im_len, cluster_base, nr_inodes,
266 cluster_mask, ir_holemask,
267 XFS_INO_TO_OFFSET(mp, irec->ir_startino +
268 cluster_base));
269
270
271 if (ir_holemask != cluster_mask && ir_holemask != 0) {
272 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
273 return 0;
274 }
275
276
277 if (ir_holemask) {
278 xchk_xref_is_not_owned_by(bs->sc, agbno,
279 mp->m_blocks_per_cluster,
280 &XFS_RMAP_OINFO_INODES);
281 return 0;
282 }
283
284 xchk_xref_is_owned_by(bs->sc, agbno, mp->m_blocks_per_cluster,
285 &XFS_RMAP_OINFO_INODES);
286
287
288 error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, &dip, &cluster_bp,
289 0, 0);
290 if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0, &error))
291 return error;
292
293
294 for (cluster_index = 0; cluster_index < nr_inodes; cluster_index++) {
295 struct xfs_dinode *dip;
296
297 if (imap.im_boffset >= BBTOB(cluster_bp->b_length)) {
298 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
299 break;
300 }
301
302 dip = xfs_buf_offset(cluster_bp, imap.im_boffset);
303 error = xchk_iallocbt_check_cluster_ifree(bs, irec,
304 cluster_base + cluster_index, dip);
305 if (error)
306 break;
307 imap.im_boffset += mp->m_sb.sb_inodesize;
308 }
309
310 xfs_trans_brelse(bs->cur->bc_tp, cluster_bp);
311 return error;
312}
313
314
315
316
317
318
319STATIC int
320xchk_iallocbt_check_clusters(
321 struct xchk_btree *bs,
322 struct xfs_inobt_rec_incore *irec)
323{
324 unsigned int cluster_base;
325 int error = 0;
326
327
328
329
330
331
332
333
334 for (cluster_base = 0;
335 cluster_base < XFS_INODES_PER_CHUNK;
336 cluster_base += bs->sc->mp->m_inodes_per_cluster) {
337 error = xchk_iallocbt_check_cluster(bs, irec, cluster_base);
338 if (error)
339 break;
340 }
341
342 return error;
343}
344
345
346
347
348
349
350
351STATIC void
352xchk_iallocbt_rec_alignment(
353 struct xchk_btree *bs,
354 struct xfs_inobt_rec_incore *irec)
355{
356 struct xfs_mount *mp = bs->sc->mp;
357 struct xchk_iallocbt *iabt = bs->private;
358
359
360
361
362
363
364
365
366
367
368
369
370
371 if (bs->cur->bc_btnum == XFS_BTNUM_FINO) {
372 unsigned int imask;
373
374 imask = min_t(unsigned int, XFS_INODES_PER_CHUNK,
375 mp->m_cluster_align_inodes) - 1;
376 if (irec->ir_startino & imask)
377 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
378 return;
379 }
380
381 if (iabt->next_startino != NULLAGINO) {
382
383
384
385
386
387 if (irec->ir_startino != iabt->next_startino) {
388 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
389 return;
390 }
391
392 iabt->next_startino += XFS_INODES_PER_CHUNK;
393
394
395 if (iabt->next_startino >= iabt->next_cluster_ino) {
396 iabt->next_startino = NULLAGINO;
397 iabt->next_cluster_ino = NULLAGINO;
398 }
399 return;
400 }
401
402
403 if (irec->ir_startino & (mp->m_cluster_align_inodes - 1)) {
404 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
405 return;
406 }
407
408 if (irec->ir_startino & (mp->m_inodes_per_cluster - 1)) {
409 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
410 return;
411 }
412
413 if (mp->m_inodes_per_cluster <= XFS_INODES_PER_CHUNK)
414 return;
415
416
417
418
419
420
421 iabt->next_startino = irec->ir_startino + XFS_INODES_PER_CHUNK;
422 iabt->next_cluster_ino = irec->ir_startino + mp->m_inodes_per_cluster;
423}
424
425
426STATIC int
427xchk_iallocbt_rec(
428 struct xchk_btree *bs,
429 union xfs_btree_rec *rec)
430{
431 struct xfs_mount *mp = bs->cur->bc_mp;
432 struct xchk_iallocbt *iabt = bs->private;
433 struct xfs_inobt_rec_incore irec;
434 uint64_t holes;
435 xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
436 xfs_agino_t agino;
437 xfs_extlen_t len;
438 int holecount;
439 int i;
440 int error = 0;
441 unsigned int real_freecount;
442 uint16_t holemask;
443
444 xfs_inobt_btrec_to_irec(mp, rec, &irec);
445
446 if (irec.ir_count > XFS_INODES_PER_CHUNK ||
447 irec.ir_freecount > XFS_INODES_PER_CHUNK)
448 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
449
450 real_freecount = irec.ir_freecount +
451 (XFS_INODES_PER_CHUNK - irec.ir_count);
452 if (real_freecount != xchk_iallocbt_freecount(irec.ir_free))
453 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
454
455 agino = irec.ir_startino;
456
457 if (!xfs_verify_agino(mp, agno, agino) ||
458 !xfs_verify_agino(mp, agno, agino + XFS_INODES_PER_CHUNK - 1)) {
459 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
460 goto out;
461 }
462
463 xchk_iallocbt_rec_alignment(bs, &irec);
464 if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
465 goto out;
466
467 iabt->inodes += irec.ir_count;
468
469
470 if (!xfs_inobt_issparse(irec.ir_holemask)) {
471 len = XFS_B_TO_FSB(mp,
472 XFS_INODES_PER_CHUNK * mp->m_sb.sb_inodesize);
473 if (irec.ir_count != XFS_INODES_PER_CHUNK)
474 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
475
476 if (!xchk_iallocbt_chunk(bs, &irec, agino, len))
477 goto out;
478 goto check_clusters;
479 }
480
481
482 holemask = irec.ir_holemask;
483 holecount = 0;
484 len = XFS_B_TO_FSB(mp,
485 XFS_INODES_PER_HOLEMASK_BIT * mp->m_sb.sb_inodesize);
486 holes = ~xfs_inobt_irec_to_allocmask(&irec);
487 if ((holes & irec.ir_free) != holes ||
488 irec.ir_freecount > irec.ir_count)
489 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
490
491 for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; i++) {
492 if (holemask & 1)
493 holecount += XFS_INODES_PER_HOLEMASK_BIT;
494 else if (!xchk_iallocbt_chunk(bs, &irec, agino, len))
495 break;
496 holemask >>= 1;
497 agino += XFS_INODES_PER_HOLEMASK_BIT;
498 }
499
500 if (holecount > XFS_INODES_PER_CHUNK ||
501 holecount + irec.ir_count != XFS_INODES_PER_CHUNK)
502 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
503
504check_clusters:
505 error = xchk_iallocbt_check_clusters(bs, &irec);
506 if (error)
507 goto out;
508
509out:
510 return error;
511}
512
513
514
515
516
517STATIC void
518xchk_iallocbt_xref_rmap_btreeblks(
519 struct xfs_scrub *sc,
520 int which)
521{
522 xfs_filblks_t blocks;
523 xfs_extlen_t inobt_blocks = 0;
524 xfs_extlen_t finobt_blocks = 0;
525 int error;
526
527 if (!sc->sa.ino_cur || !sc->sa.rmap_cur ||
528 (xfs_sb_version_hasfinobt(&sc->mp->m_sb) && !sc->sa.fino_cur) ||
529 xchk_skip_xref(sc->sm))
530 return;
531
532
533 error = xfs_btree_count_blocks(sc->sa.ino_cur, &inobt_blocks);
534 if (!xchk_process_error(sc, 0, 0, &error))
535 return;
536
537 if (sc->sa.fino_cur) {
538 error = xfs_btree_count_blocks(sc->sa.fino_cur, &finobt_blocks);
539 if (!xchk_process_error(sc, 0, 0, &error))
540 return;
541 }
542
543 error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
544 &XFS_RMAP_OINFO_INOBT, &blocks);
545 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
546 return;
547 if (blocks != inobt_blocks + finobt_blocks)
548 xchk_btree_set_corrupt(sc, sc->sa.ino_cur, 0);
549}
550
551
552
553
554
555STATIC void
556xchk_iallocbt_xref_rmap_inodes(
557 struct xfs_scrub *sc,
558 int which,
559 unsigned long long inodes)
560{
561 xfs_filblks_t blocks;
562 xfs_filblks_t inode_blocks;
563 int error;
564
565 if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
566 return;
567
568
569 error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
570 &XFS_RMAP_OINFO_INODES, &blocks);
571 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
572 return;
573 inode_blocks = XFS_B_TO_FSB(sc->mp, inodes * sc->mp->m_sb.sb_inodesize);
574 if (blocks != inode_blocks)
575 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
576}
577
578
579STATIC int
580xchk_iallocbt(
581 struct xfs_scrub *sc,
582 xfs_btnum_t which)
583{
584 struct xfs_btree_cur *cur;
585 struct xchk_iallocbt iabt = {
586 .inodes = 0,
587 .next_startino = NULLAGINO,
588 .next_cluster_ino = NULLAGINO,
589 };
590 int error;
591
592 cur = which == XFS_BTNUM_INO ? sc->sa.ino_cur : sc->sa.fino_cur;
593 error = xchk_btree(sc, cur, xchk_iallocbt_rec, &XFS_RMAP_OINFO_INOBT,
594 &iabt);
595 if (error)
596 return error;
597
598 xchk_iallocbt_xref_rmap_btreeblks(sc, which);
599
600
601
602
603
604
605
606
607 if (which == XFS_BTNUM_INO)
608 xchk_iallocbt_xref_rmap_inodes(sc, which, iabt.inodes);
609
610 return error;
611}
612
613int
614xchk_inobt(
615 struct xfs_scrub *sc)
616{
617 return xchk_iallocbt(sc, XFS_BTNUM_INO);
618}
619
620int
621xchk_finobt(
622 struct xfs_scrub *sc)
623{
624 return xchk_iallocbt(sc, XFS_BTNUM_FINO);
625}
626
627
628static inline void
629xchk_xref_inode_check(
630 struct xfs_scrub *sc,
631 xfs_agblock_t agbno,
632 xfs_extlen_t len,
633 struct xfs_btree_cur **icur,
634 bool should_have_inodes)
635{
636 bool has_inodes;
637 int error;
638
639 if (!(*icur) || xchk_skip_xref(sc->sm))
640 return;
641
642 error = xfs_ialloc_has_inodes_at_extent(*icur, agbno, len, &has_inodes);
643 if (!xchk_should_check_xref(sc, &error, icur))
644 return;
645 if (has_inodes != should_have_inodes)
646 xchk_btree_xref_set_corrupt(sc, *icur, 0);
647}
648
649
650void
651xchk_xref_is_not_inode_chunk(
652 struct xfs_scrub *sc,
653 xfs_agblock_t agbno,
654 xfs_extlen_t len)
655{
656 xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, false);
657 xchk_xref_inode_check(sc, agbno, len, &sc->sa.fino_cur, false);
658}
659
660
661void
662xchk_xref_is_inode_chunk(
663 struct xfs_scrub *sc,
664 xfs_agblock_t agbno,
665 xfs_extlen_t len)
666{
667 xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, true);
668}
669