1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "xfs.h"
21#include "xfs_fs.h"
22#include "xfs_shared.h"
23#include "xfs_format.h"
24#include "xfs_trans_resv.h"
25#include "xfs_mount.h"
26#include "xfs_defer.h"
27#include "xfs_btree.h"
28#include "xfs_bit.h"
29#include "xfs_log_format.h"
30#include "xfs_trans.h"
31#include "xfs_sb.h"
32#include "xfs_inode.h"
33#include "xfs_alloc.h"
34#include "scrub/scrub.h"
35#include "scrub/common.h"
36#include "scrub/btree.h"
37#include "scrub/trace.h"
38
39
40
41
42
43
44
45static bool
46__xfs_scrub_btree_process_error(
47 struct xfs_scrub_context *sc,
48 struct xfs_btree_cur *cur,
49 int level,
50 int *error,
51 __u32 errflag,
52 void *ret_ip)
53{
54 if (*error == 0)
55 return true;
56
57 switch (*error) {
58 case -EDEADLOCK:
59
60 trace_xfs_scrub_deadlock_retry(sc->ip, sc->sm, *error);
61 break;
62 case -EFSBADCRC:
63 case -EFSCORRUPTED:
64
65 sc->sm->sm_flags |= errflag;
66 *error = 0;
67
68 default:
69 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
70 trace_xfs_scrub_ifork_btree_op_error(sc, cur, level,
71 *error, ret_ip);
72 else
73 trace_xfs_scrub_btree_op_error(sc, cur, level,
74 *error, ret_ip);
75 break;
76 }
77 return false;
78}
79
80bool
81xfs_scrub_btree_process_error(
82 struct xfs_scrub_context *sc,
83 struct xfs_btree_cur *cur,
84 int level,
85 int *error)
86{
87 return __xfs_scrub_btree_process_error(sc, cur, level, error,
88 XFS_SCRUB_OFLAG_CORRUPT, __return_address);
89}
90
91bool
92xfs_scrub_btree_xref_process_error(
93 struct xfs_scrub_context *sc,
94 struct xfs_btree_cur *cur,
95 int level,
96 int *error)
97{
98 return __xfs_scrub_btree_process_error(sc, cur, level, error,
99 XFS_SCRUB_OFLAG_XFAIL, __return_address);
100}
101
102
103static void
104__xfs_scrub_btree_set_corrupt(
105 struct xfs_scrub_context *sc,
106 struct xfs_btree_cur *cur,
107 int level,
108 __u32 errflag,
109 void *ret_ip)
110{
111 sc->sm->sm_flags |= errflag;
112
113 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
114 trace_xfs_scrub_ifork_btree_error(sc, cur, level,
115 ret_ip);
116 else
117 trace_xfs_scrub_btree_error(sc, cur, level,
118 ret_ip);
119}
120
121void
122xfs_scrub_btree_set_corrupt(
123 struct xfs_scrub_context *sc,
124 struct xfs_btree_cur *cur,
125 int level)
126{
127 __xfs_scrub_btree_set_corrupt(sc, cur, level, XFS_SCRUB_OFLAG_CORRUPT,
128 __return_address);
129}
130
131void
132xfs_scrub_btree_xref_set_corrupt(
133 struct xfs_scrub_context *sc,
134 struct xfs_btree_cur *cur,
135 int level)
136{
137 __xfs_scrub_btree_set_corrupt(sc, cur, level, XFS_SCRUB_OFLAG_XCORRUPT,
138 __return_address);
139}
140
141
142
143
144
145STATIC void
146xfs_scrub_btree_rec(
147 struct xfs_scrub_btree *bs)
148{
149 struct xfs_btree_cur *cur = bs->cur;
150 union xfs_btree_rec *rec;
151 union xfs_btree_key key;
152 union xfs_btree_key hkey;
153 union xfs_btree_key *keyp;
154 struct xfs_btree_block *block;
155 struct xfs_btree_block *keyblock;
156 struct xfs_buf *bp;
157
158 block = xfs_btree_get_block(cur, 0, &bp);
159 rec = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block);
160
161 trace_xfs_scrub_btree_rec(bs->sc, cur, 0);
162
163
164 if (!bs->firstrec && !cur->bc_ops->recs_inorder(cur, &bs->lastrec, rec))
165 xfs_scrub_btree_set_corrupt(bs->sc, cur, 0);
166 bs->firstrec = false;
167 memcpy(&bs->lastrec, rec, cur->bc_ops->rec_len);
168
169 if (cur->bc_nlevels == 1)
170 return;
171
172
173 cur->bc_ops->init_key_from_rec(&key, rec);
174 keyblock = xfs_btree_get_block(cur, 1, &bp);
175 keyp = xfs_btree_key_addr(cur, cur->bc_ptrs[1], keyblock);
176 if (cur->bc_ops->diff_two_keys(cur, &key, keyp) < 0)
177 xfs_scrub_btree_set_corrupt(bs->sc, cur, 1);
178
179 if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
180 return;
181
182
183 cur->bc_ops->init_high_key_from_rec(&hkey, rec);
184 keyp = xfs_btree_high_key_addr(cur, cur->bc_ptrs[1], keyblock);
185 if (cur->bc_ops->diff_two_keys(cur, keyp, &hkey) < 0)
186 xfs_scrub_btree_set_corrupt(bs->sc, cur, 1);
187}
188
189
190
191
192
193STATIC void
194xfs_scrub_btree_key(
195 struct xfs_scrub_btree *bs,
196 int level)
197{
198 struct xfs_btree_cur *cur = bs->cur;
199 union xfs_btree_key *key;
200 union xfs_btree_key *keyp;
201 struct xfs_btree_block *block;
202 struct xfs_btree_block *keyblock;
203 struct xfs_buf *bp;
204
205 block = xfs_btree_get_block(cur, level, &bp);
206 key = xfs_btree_key_addr(cur, cur->bc_ptrs[level], block);
207
208 trace_xfs_scrub_btree_key(bs->sc, cur, level);
209
210
211 if (!bs->firstkey[level] &&
212 !cur->bc_ops->keys_inorder(cur, &bs->lastkey[level], key))
213 xfs_scrub_btree_set_corrupt(bs->sc, cur, level);
214 bs->firstkey[level] = false;
215 memcpy(&bs->lastkey[level], key, cur->bc_ops->key_len);
216
217 if (level + 1 >= cur->bc_nlevels)
218 return;
219
220
221 keyblock = xfs_btree_get_block(cur, level + 1, &bp);
222 keyp = xfs_btree_key_addr(cur, cur->bc_ptrs[level + 1], keyblock);
223 if (cur->bc_ops->diff_two_keys(cur, key, keyp) < 0)
224 xfs_scrub_btree_set_corrupt(bs->sc, cur, level);
225
226 if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
227 return;
228
229
230 key = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level], block);
231 keyp = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level + 1], keyblock);
232 if (cur->bc_ops->diff_two_keys(cur, keyp, key) < 0)
233 xfs_scrub_btree_set_corrupt(bs->sc, cur, level);
234}
235
236
237
238
239
240static bool
241xfs_scrub_btree_ptr_ok(
242 struct xfs_scrub_btree *bs,
243 int level,
244 union xfs_btree_ptr *ptr)
245{
246 bool res;
247
248
249 if ((bs->cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
250 level == bs->cur->bc_nlevels)
251 return true;
252
253
254 if (bs->cur->bc_flags & XFS_BTREE_LONG_PTRS)
255 res = xfs_btree_check_lptr(bs->cur, be64_to_cpu(ptr->l), level);
256 else
257 res = xfs_btree_check_sptr(bs->cur, be32_to_cpu(ptr->s), level);
258 if (!res)
259 xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, level);
260
261 return res;
262}
263
264
265STATIC int
266xfs_scrub_btree_block_check_sibling(
267 struct xfs_scrub_btree *bs,
268 int level,
269 int direction,
270 union xfs_btree_ptr *sibling)
271{
272 struct xfs_btree_cur *cur = bs->cur;
273 struct xfs_btree_block *pblock;
274 struct xfs_buf *pbp;
275 struct xfs_btree_cur *ncur = NULL;
276 union xfs_btree_ptr *pp;
277 int success;
278 int error;
279
280 error = xfs_btree_dup_cursor(cur, &ncur);
281 if (!xfs_scrub_btree_process_error(bs->sc, cur, level + 1, &error) ||
282 !ncur)
283 return error;
284
285
286
287
288
289 if (xfs_btree_ptr_is_null(cur, sibling)) {
290 if (direction > 0)
291 error = xfs_btree_increment(ncur, level + 1, &success);
292 else
293 error = xfs_btree_decrement(ncur, level + 1, &success);
294 if (error == 0 && success)
295 xfs_scrub_btree_set_corrupt(bs->sc, cur, level);
296 error = 0;
297 goto out;
298 }
299
300
301 if (direction > 0)
302 error = xfs_btree_increment(ncur, level + 1, &success);
303 else
304 error = xfs_btree_decrement(ncur, level + 1, &success);
305 if (!xfs_scrub_btree_process_error(bs->sc, cur, level + 1, &error))
306 goto out;
307 if (!success) {
308 xfs_scrub_btree_set_corrupt(bs->sc, cur, level + 1);
309 goto out;
310 }
311
312
313 pblock = xfs_btree_get_block(ncur, level + 1, &pbp);
314 pp = xfs_btree_ptr_addr(ncur, ncur->bc_ptrs[level + 1], pblock);
315 if (!xfs_scrub_btree_ptr_ok(bs, level + 1, pp))
316 goto out;
317 if (pbp)
318 xfs_scrub_buffer_recheck(bs->sc, pbp);
319
320 if (xfs_btree_diff_two_ptrs(cur, pp, sibling))
321 xfs_scrub_btree_set_corrupt(bs->sc, cur, level);
322out:
323 xfs_btree_del_cursor(ncur, XFS_BTREE_ERROR);
324 return error;
325}
326
327
328STATIC int
329xfs_scrub_btree_block_check_siblings(
330 struct xfs_scrub_btree *bs,
331 struct xfs_btree_block *block)
332{
333 struct xfs_btree_cur *cur = bs->cur;
334 union xfs_btree_ptr leftsib;
335 union xfs_btree_ptr rightsib;
336 int level;
337 int error = 0;
338
339 xfs_btree_get_sibling(cur, block, &leftsib, XFS_BB_LEFTSIB);
340 xfs_btree_get_sibling(cur, block, &rightsib, XFS_BB_RIGHTSIB);
341 level = xfs_btree_get_level(block);
342
343
344 if (level == cur->bc_nlevels - 1) {
345 if (!xfs_btree_ptr_is_null(cur, &leftsib) ||
346 !xfs_btree_ptr_is_null(cur, &rightsib))
347 xfs_scrub_btree_set_corrupt(bs->sc, cur, level);
348 goto out;
349 }
350
351
352
353
354
355
356 error = xfs_scrub_btree_block_check_sibling(bs, level, -1, &leftsib);
357 if (error)
358 return error;
359 error = xfs_scrub_btree_block_check_sibling(bs, level, 1, &rightsib);
360 if (error)
361 return error;
362out:
363 return error;
364}
365
366struct check_owner {
367 struct list_head list;
368 xfs_daddr_t daddr;
369 int level;
370};
371
372
373
374
375
376STATIC int
377xfs_scrub_btree_check_block_owner(
378 struct xfs_scrub_btree *bs,
379 int level,
380 xfs_daddr_t daddr)
381{
382 xfs_agnumber_t agno;
383 xfs_agblock_t agbno;
384 xfs_btnum_t btnum;
385 bool init_sa;
386 int error = 0;
387
388 if (!bs->cur)
389 return 0;
390
391 btnum = bs->cur->bc_btnum;
392 agno = xfs_daddr_to_agno(bs->cur->bc_mp, daddr);
393 agbno = xfs_daddr_to_agbno(bs->cur->bc_mp, daddr);
394
395 init_sa = bs->cur->bc_flags & XFS_BTREE_LONG_PTRS;
396 if (init_sa) {
397 error = xfs_scrub_ag_init(bs->sc, agno, &bs->sc->sa);
398 if (!xfs_scrub_btree_xref_process_error(bs->sc, bs->cur,
399 level, &error))
400 return error;
401 }
402
403 xfs_scrub_xref_is_used_space(bs->sc, agbno, 1);
404
405
406
407
408
409 if (!bs->sc->sa.bno_cur && btnum == XFS_BTNUM_BNO)
410 bs->cur = NULL;
411
412 xfs_scrub_xref_is_owned_by(bs->sc, agbno, 1, bs->oinfo);
413 if (!bs->sc->sa.rmap_cur && btnum == XFS_BTNUM_RMAP)
414 bs->cur = NULL;
415
416 if (init_sa)
417 xfs_scrub_ag_free(bs->sc, &bs->sc->sa);
418
419 return error;
420}
421
422
423STATIC int
424xfs_scrub_btree_check_owner(
425 struct xfs_scrub_btree *bs,
426 int level,
427 struct xfs_buf *bp)
428{
429 struct xfs_btree_cur *cur = bs->cur;
430 struct check_owner *co;
431
432 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && bp == NULL)
433 return 0;
434
435
436
437
438
439
440
441
442
443 if (cur->bc_btnum == XFS_BTNUM_BNO || cur->bc_btnum == XFS_BTNUM_RMAP) {
444 co = kmem_alloc(sizeof(struct check_owner),
445 KM_MAYFAIL | KM_NOFS);
446 if (!co)
447 return -ENOMEM;
448 co->level = level;
449 co->daddr = XFS_BUF_ADDR(bp);
450 list_add_tail(&co->list, &bs->to_check);
451 return 0;
452 }
453
454 return xfs_scrub_btree_check_block_owner(bs, level, XFS_BUF_ADDR(bp));
455}
456
457
458
459
460
461STATIC int
462xfs_scrub_btree_get_block(
463 struct xfs_scrub_btree *bs,
464 int level,
465 union xfs_btree_ptr *pp,
466 struct xfs_btree_block **pblock,
467 struct xfs_buf **pbp)
468{
469 void *failed_at;
470 int error;
471
472 *pblock = NULL;
473 *pbp = NULL;
474
475 error = xfs_btree_lookup_get_block(bs->cur, level, pp, pblock);
476 if (!xfs_scrub_btree_process_error(bs->sc, bs->cur, level, &error) ||
477 !*pblock)
478 return error;
479
480 xfs_btree_get_block(bs->cur, level, pbp);
481 if (bs->cur->bc_flags & XFS_BTREE_LONG_PTRS)
482 failed_at = __xfs_btree_check_lblock(bs->cur, *pblock,
483 level, *pbp);
484 else
485 failed_at = __xfs_btree_check_sblock(bs->cur, *pblock,
486 level, *pbp);
487 if (failed_at) {
488 xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, level);
489 return 0;
490 }
491 if (*pbp)
492 xfs_scrub_buffer_recheck(bs->sc, *pbp);
493
494
495
496
497
498 error = xfs_scrub_btree_check_owner(bs, level, *pbp);
499 if (error)
500 return error;
501
502
503
504
505
506 return xfs_scrub_btree_block_check_siblings(bs, *pblock);
507}
508
509
510
511
512
513STATIC void
514xfs_scrub_btree_block_keys(
515 struct xfs_scrub_btree *bs,
516 int level,
517 struct xfs_btree_block *block)
518{
519 union xfs_btree_key block_keys;
520 struct xfs_btree_cur *cur = bs->cur;
521 union xfs_btree_key *high_bk;
522 union xfs_btree_key *parent_keys;
523 union xfs_btree_key *high_pk;
524 struct xfs_btree_block *parent_block;
525 struct xfs_buf *bp;
526
527 if (level >= cur->bc_nlevels - 1)
528 return;
529
530
531 xfs_btree_get_keys(cur, block, &block_keys);
532
533
534 parent_block = xfs_btree_get_block(cur, level + 1, &bp);
535 parent_keys = xfs_btree_key_addr(cur, cur->bc_ptrs[level + 1],
536 parent_block);
537
538 if (cur->bc_ops->diff_two_keys(cur, &block_keys, parent_keys) != 0)
539 xfs_scrub_btree_set_corrupt(bs->sc, cur, 1);
540
541 if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
542 return;
543
544
545 high_bk = xfs_btree_high_key_from_key(cur, &block_keys);
546 high_pk = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level + 1],
547 parent_block);
548
549 if (cur->bc_ops->diff_two_keys(cur, high_bk, high_pk) != 0)
550 xfs_scrub_btree_set_corrupt(bs->sc, cur, 1);
551}
552
553
554
555
556
557
558int
559xfs_scrub_btree(
560 struct xfs_scrub_context *sc,
561 struct xfs_btree_cur *cur,
562 xfs_scrub_btree_rec_fn scrub_fn,
563 struct xfs_owner_info *oinfo,
564 void *private)
565{
566 struct xfs_scrub_btree bs = { NULL };
567 union xfs_btree_ptr ptr;
568 union xfs_btree_ptr *pp;
569 union xfs_btree_rec *recp;
570 struct xfs_btree_block *block;
571 int level;
572 struct xfs_buf *bp;
573 struct check_owner *co;
574 struct check_owner *n;
575 int i;
576 int error = 0;
577
578
579 bs.cur = cur;
580 bs.scrub_rec = scrub_fn;
581 bs.oinfo = oinfo;
582 bs.firstrec = true;
583 bs.private = private;
584 bs.sc = sc;
585 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++)
586 bs.firstkey[i] = true;
587 INIT_LIST_HEAD(&bs.to_check);
588
589
590 if (cur->bc_nlevels > XFS_BTREE_MAXLEVELS) {
591 xfs_scrub_btree_set_corrupt(sc, cur, 0);
592 goto out;
593 }
594
595
596
597
598
599 level = cur->bc_nlevels - 1;
600 cur->bc_ops->init_ptr_from_cur(cur, &ptr);
601 if (!xfs_scrub_btree_ptr_ok(&bs, cur->bc_nlevels, &ptr))
602 goto out;
603 error = xfs_scrub_btree_get_block(&bs, level, &ptr, &block, &bp);
604 if (error || !block)
605 goto out;
606
607 cur->bc_ptrs[level] = 1;
608
609 while (level < cur->bc_nlevels) {
610 block = xfs_btree_get_block(cur, level, &bp);
611
612 if (level == 0) {
613
614 if (cur->bc_ptrs[level] >
615 be16_to_cpu(block->bb_numrecs)) {
616 xfs_scrub_btree_block_keys(&bs, level, block);
617 if (level < cur->bc_nlevels - 1)
618 cur->bc_ptrs[level + 1]++;
619 level++;
620 continue;
621 }
622
623
624 xfs_scrub_btree_rec(&bs);
625
626
627 recp = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block);
628 error = bs.scrub_rec(&bs, recp);
629 if (error)
630 break;
631 if (xfs_scrub_should_terminate(sc, &error) ||
632 (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
633 break;
634
635 cur->bc_ptrs[level]++;
636 continue;
637 }
638
639
640 if (cur->bc_ptrs[level] > be16_to_cpu(block->bb_numrecs)) {
641 xfs_scrub_btree_block_keys(&bs, level, block);
642 if (level < cur->bc_nlevels - 1)
643 cur->bc_ptrs[level + 1]++;
644 level++;
645 continue;
646 }
647
648
649 xfs_scrub_btree_key(&bs, level);
650
651
652 pp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[level], block);
653 if (!xfs_scrub_btree_ptr_ok(&bs, level, pp)) {
654 cur->bc_ptrs[level]++;
655 continue;
656 }
657 level--;
658 error = xfs_scrub_btree_get_block(&bs, level, pp, &block, &bp);
659 if (error || !block)
660 goto out;
661
662 cur->bc_ptrs[level] = 1;
663 }
664
665out:
666
667 list_for_each_entry_safe(co, n, &bs.to_check, list) {
668 if (!error && bs.cur)
669 error = xfs_scrub_btree_check_block_owner(&bs,
670 co->level, co->daddr);
671 list_del(&co->list);
672 kmem_free(co);
673 }
674
675 return error;
676}
677