1
2
3
4
5
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_trans_resv.h"
11#include "xfs_mount.h"
12#include "xfs_btree.h"
13#include "xfs_log_format.h"
14#include "xfs_trans.h"
15#include "xfs_sb.h"
16#include "xfs_inode.h"
17#include "xfs_alloc.h"
18#include "xfs_alloc_btree.h"
19#include "xfs_ialloc.h"
20#include "xfs_ialloc_btree.h"
21#include "xfs_rmap.h"
22#include "xfs_rmap_btree.h"
23#include "xfs_refcount_btree.h"
24#include "xfs_extent_busy.h"
25#include "xfs_ag.h"
26#include "xfs_ag_resv.h"
27#include "xfs_quota.h"
28#include "scrub/scrub.h"
29#include "scrub/common.h"
30#include "scrub/trace.h"
31#include "scrub/repair.h"
32#include "scrub/bitmap.h"
33
34
35
36
37
38
39int
40xrep_attempt(
41 struct xfs_scrub *sc)
42{
43 int error = 0;
44
45 trace_xrep_attempt(XFS_I(file_inode(sc->file)), sc->sm, error);
46
47 xchk_ag_btcur_free(&sc->sa);
48
49
50 ASSERT(sc->ops->repair);
51 error = sc->ops->repair(sc);
52 trace_xrep_done(XFS_I(file_inode(sc->file)), sc->sm, error);
53 switch (error) {
54 case 0:
55
56
57
58
59 sc->sm->sm_flags &= ~XFS_SCRUB_FLAGS_OUT;
60 sc->flags |= XREP_ALREADY_FIXED;
61 return -EAGAIN;
62 case -EDEADLOCK:
63 case -EAGAIN:
64
65 if (!(sc->flags & XCHK_TRY_HARDER)) {
66 sc->flags |= XCHK_TRY_HARDER;
67 return -EAGAIN;
68 }
69
70
71
72
73
74 return -EFSCORRUPTED;
75 default:
76 return error;
77 }
78}
79
80
81
82
83
84
85
86
87
88
89void
90xrep_failure(
91 struct xfs_mount *mp)
92{
93 xfs_alert_ratelimited(mp,
94"Corruption not fixed during online repair. Unmount and run xfs_repair.");
95}
96
97
98
99
100
101int
102xrep_probe(
103 struct xfs_scrub *sc)
104{
105 int error = 0;
106
107 if (xchk_should_terminate(sc, &error))
108 return error;
109
110 return 0;
111}
112
113
114
115
116
117int
118xrep_roll_ag_trans(
119 struct xfs_scrub *sc)
120{
121 int error;
122
123
124 if (sc->sa.agi_bp)
125 xfs_trans_bhold(sc->tp, sc->sa.agi_bp);
126 if (sc->sa.agf_bp)
127 xfs_trans_bhold(sc->tp, sc->sa.agf_bp);
128 if (sc->sa.agfl_bp)
129 xfs_trans_bhold(sc->tp, sc->sa.agfl_bp);
130
131
132
133
134
135
136
137
138 error = xfs_trans_roll(&sc->tp);
139 if (error)
140 return error;
141
142
143 if (sc->sa.agi_bp)
144 xfs_trans_bjoin(sc->tp, sc->sa.agi_bp);
145 if (sc->sa.agf_bp)
146 xfs_trans_bjoin(sc->tp, sc->sa.agf_bp);
147 if (sc->sa.agfl_bp)
148 xfs_trans_bjoin(sc->tp, sc->sa.agfl_bp);
149
150 return 0;
151}
152
153
154
155
156
157
158bool
159xrep_ag_has_space(
160 struct xfs_perag *pag,
161 xfs_extlen_t nr_blocks,
162 enum xfs_ag_resv_type type)
163{
164 return !xfs_ag_resv_critical(pag, XFS_AG_RESV_RMAPBT) &&
165 !xfs_ag_resv_critical(pag, XFS_AG_RESV_METADATA) &&
166 pag->pagf_freeblks > xfs_ag_resv_needed(pag, type) + nr_blocks;
167}
168
169
170
171
172
173
174xfs_extlen_t
175xrep_calc_ag_resblks(
176 struct xfs_scrub *sc)
177{
178 struct xfs_mount *mp = sc->mp;
179 struct xfs_scrub_metadata *sm = sc->sm;
180 struct xfs_perag *pag;
181 struct xfs_buf *bp;
182 xfs_agino_t icount = NULLAGINO;
183 xfs_extlen_t aglen = NULLAGBLOCK;
184 xfs_extlen_t usedlen;
185 xfs_extlen_t freelen;
186 xfs_extlen_t bnobt_sz;
187 xfs_extlen_t inobt_sz;
188 xfs_extlen_t rmapbt_sz;
189 xfs_extlen_t refcbt_sz;
190 int error;
191
192 if (!(sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR))
193 return 0;
194
195 pag = xfs_perag_get(mp, sm->sm_agno);
196 if (pag->pagi_init) {
197
198 icount = pag->pagi_count;
199 } else {
200
201 error = xfs_ialloc_read_agi(mp, NULL, sm->sm_agno, &bp);
202 if (!error) {
203 icount = pag->pagi_count;
204 xfs_buf_relse(bp);
205 }
206 }
207
208
209 error = xfs_alloc_read_agf(mp, NULL, sm->sm_agno, 0, &bp);
210 if (error) {
211 aglen = xfs_ag_block_count(mp, sm->sm_agno);
212 freelen = aglen;
213 usedlen = aglen;
214 } else {
215 struct xfs_agf *agf = bp->b_addr;
216
217 aglen = be32_to_cpu(agf->agf_length);
218 freelen = be32_to_cpu(agf->agf_freeblks);
219 usedlen = aglen - freelen;
220 xfs_buf_relse(bp);
221 }
222 xfs_perag_put(pag);
223
224
225 if (icount == NULLAGINO ||
226 !xfs_verify_agino(mp, sm->sm_agno, icount)) {
227 xfs_agino_t first, last;
228
229 xfs_agino_range(mp, sm->sm_agno, &first, &last);
230 icount = last - first + 1;
231 }
232
233
234 if (aglen == NULLAGBLOCK ||
235 aglen != xfs_ag_block_count(mp, sm->sm_agno) ||
236 freelen >= aglen) {
237 aglen = xfs_ag_block_count(mp, sm->sm_agno);
238 freelen = aglen;
239 usedlen = aglen;
240 }
241
242 trace_xrep_calc_ag_resblks(mp, sm->sm_agno, icount, aglen,
243 freelen, usedlen);
244
245
246
247
248
249
250 bnobt_sz = 2 * xfs_allocbt_calc_size(mp, freelen);
251 if (xfs_has_sparseinodes(mp))
252 inobt_sz = xfs_iallocbt_calc_size(mp, icount /
253 XFS_INODES_PER_HOLEMASK_BIT);
254 else
255 inobt_sz = xfs_iallocbt_calc_size(mp, icount /
256 XFS_INODES_PER_CHUNK);
257 if (xfs_has_finobt(mp))
258 inobt_sz *= 2;
259 if (xfs_has_reflink(mp))
260 refcbt_sz = xfs_refcountbt_calc_size(mp, usedlen);
261 else
262 refcbt_sz = 0;
263 if (xfs_has_rmapbt(mp)) {
264
265
266
267
268
269
270
271
272 if (xfs_has_reflink(mp))
273 rmapbt_sz = xfs_rmapbt_calc_size(mp,
274 (unsigned long long)aglen * 2);
275 else
276 rmapbt_sz = xfs_rmapbt_calc_size(mp, usedlen);
277 } else {
278 rmapbt_sz = 0;
279 }
280
281 trace_xrep_calc_ag_resblks_btsize(mp, sm->sm_agno, bnobt_sz,
282 inobt_sz, rmapbt_sz, refcbt_sz);
283
284 return max(max(bnobt_sz, inobt_sz), max(rmapbt_sz, refcbt_sz));
285}
286
287
288int
289xrep_alloc_ag_block(
290 struct xfs_scrub *sc,
291 const struct xfs_owner_info *oinfo,
292 xfs_fsblock_t *fsbno,
293 enum xfs_ag_resv_type resv)
294{
295 struct xfs_alloc_arg args = {0};
296 xfs_agblock_t bno;
297 int error;
298
299 switch (resv) {
300 case XFS_AG_RESV_AGFL:
301 case XFS_AG_RESV_RMAPBT:
302 error = xfs_alloc_get_freelist(sc->tp, sc->sa.agf_bp, &bno, 1);
303 if (error)
304 return error;
305 if (bno == NULLAGBLOCK)
306 return -ENOSPC;
307 xfs_extent_busy_reuse(sc->mp, sc->sa.pag, bno,
308 1, false);
309 *fsbno = XFS_AGB_TO_FSB(sc->mp, sc->sa.pag->pag_agno, bno);
310 if (resv == XFS_AG_RESV_RMAPBT)
311 xfs_ag_resv_rmapbt_alloc(sc->mp, sc->sa.pag->pag_agno);
312 return 0;
313 default:
314 break;
315 }
316
317 args.tp = sc->tp;
318 args.mp = sc->mp;
319 args.oinfo = *oinfo;
320 args.fsbno = XFS_AGB_TO_FSB(args.mp, sc->sa.pag->pag_agno, 0);
321 args.minlen = 1;
322 args.maxlen = 1;
323 args.prod = 1;
324 args.type = XFS_ALLOCTYPE_THIS_AG;
325 args.resv = resv;
326
327 error = xfs_alloc_vextent(&args);
328 if (error)
329 return error;
330 if (args.fsbno == NULLFSBLOCK)
331 return -ENOSPC;
332 ASSERT(args.len == 1);
333 *fsbno = args.fsbno;
334
335 return 0;
336}
337
338
339int
340xrep_init_btblock(
341 struct xfs_scrub *sc,
342 xfs_fsblock_t fsb,
343 struct xfs_buf **bpp,
344 xfs_btnum_t btnum,
345 const struct xfs_buf_ops *ops)
346{
347 struct xfs_trans *tp = sc->tp;
348 struct xfs_mount *mp = sc->mp;
349 struct xfs_buf *bp;
350 int error;
351
352 trace_xrep_init_btblock(mp, XFS_FSB_TO_AGNO(mp, fsb),
353 XFS_FSB_TO_AGBNO(mp, fsb), btnum);
354
355 ASSERT(XFS_FSB_TO_AGNO(mp, fsb) == sc->sa.pag->pag_agno);
356 error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
357 XFS_FSB_TO_DADDR(mp, fsb), XFS_FSB_TO_BB(mp, 1), 0,
358 &bp);
359 if (error)
360 return error;
361 xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
362 xfs_btree_init_block(mp, bp, btnum, 0, 0, sc->sa.pag->pag_agno);
363 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_BTREE_BUF);
364 xfs_trans_log_buf(tp, bp, 0, BBTOB(bp->b_length) - 1);
365 bp->b_ops = ops;
366 *bpp = bp;
367
368 return 0;
369}
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440int
441xrep_invalidate_blocks(
442 struct xfs_scrub *sc,
443 struct xbitmap *bitmap)
444{
445 struct xbitmap_range *bmr;
446 struct xbitmap_range *n;
447 struct xfs_buf *bp;
448 xfs_fsblock_t fsbno;
449
450
451
452
453
454
455
456
457
458 for_each_xbitmap_block(fsbno, bmr, n, bitmap) {
459
460 if (!xfs_verify_fsbno(sc->mp, fsbno))
461 continue;
462 bp = xfs_buf_incore(sc->mp->m_ddev_targp,
463 XFS_FSB_TO_DADDR(sc->mp, fsbno),
464 XFS_FSB_TO_BB(sc->mp, 1), XBF_TRYLOCK);
465 if (bp) {
466 xfs_trans_bjoin(sc->tp, bp);
467 xfs_trans_binval(sc->tp, bp);
468 }
469 }
470
471 return 0;
472}
473
474
475int
476xrep_fix_freelist(
477 struct xfs_scrub *sc,
478 bool can_shrink)
479{
480 struct xfs_alloc_arg args = {0};
481
482 args.mp = sc->mp;
483 args.tp = sc->tp;
484 args.agno = sc->sa.pag->pag_agno;
485 args.alignment = 1;
486 args.pag = sc->sa.pag;
487
488 return xfs_alloc_fix_freelist(&args,
489 can_shrink ? 0 : XFS_ALLOC_FLAG_NOSHRINK);
490}
491
492
493
494
495STATIC int
496xrep_put_freelist(
497 struct xfs_scrub *sc,
498 xfs_agblock_t agbno)
499{
500 int error;
501
502
503 error = xrep_fix_freelist(sc, true);
504 if (error)
505 return error;
506
507
508
509
510
511
512 error = xfs_rmap_alloc(sc->tp, sc->sa.agf_bp, sc->sa.pag, agbno, 1,
513 &XFS_RMAP_OINFO_AG);
514 if (error)
515 return error;
516
517
518 error = xfs_alloc_put_freelist(sc->tp, sc->sa.agf_bp, sc->sa.agfl_bp,
519 agbno, 0);
520 if (error)
521 return error;
522 xfs_extent_busy_insert(sc->tp, sc->sa.pag, agbno, 1,
523 XFS_EXTENT_BUSY_SKIP_DISCARD);
524
525 return 0;
526}
527
528
529STATIC int
530xrep_reap_block(
531 struct xfs_scrub *sc,
532 xfs_fsblock_t fsbno,
533 const struct xfs_owner_info *oinfo,
534 enum xfs_ag_resv_type resv)
535{
536 struct xfs_btree_cur *cur;
537 struct xfs_buf *agf_bp = NULL;
538 xfs_agnumber_t agno;
539 xfs_agblock_t agbno;
540 bool has_other_rmap;
541 int error;
542
543 agno = XFS_FSB_TO_AGNO(sc->mp, fsbno);
544 agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno);
545
546
547
548
549
550
551 if (sc->ip) {
552 error = xfs_alloc_read_agf(sc->mp, sc->tp, agno, 0, &agf_bp);
553 if (error)
554 return error;
555 } else {
556 agf_bp = sc->sa.agf_bp;
557 }
558 cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf_bp, sc->sa.pag);
559
560
561 error = xfs_rmap_has_other_keys(cur, agbno, 1, oinfo, &has_other_rmap);
562 xfs_btree_del_cursor(cur, error);
563 if (error)
564 goto out_free;
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579 if (has_other_rmap)
580 error = xfs_rmap_free(sc->tp, agf_bp, sc->sa.pag, agbno,
581 1, oinfo);
582 else if (resv == XFS_AG_RESV_AGFL)
583 error = xrep_put_freelist(sc, agbno);
584 else
585 error = xfs_free_extent(sc->tp, fsbno, 1, oinfo, resv);
586 if (agf_bp != sc->sa.agf_bp)
587 xfs_trans_brelse(sc->tp, agf_bp);
588 if (error)
589 return error;
590
591 if (sc->ip)
592 return xfs_trans_roll_inode(&sc->tp, sc->ip);
593 return xrep_roll_ag_trans(sc);
594
595out_free:
596 if (agf_bp != sc->sa.agf_bp)
597 xfs_trans_brelse(sc->tp, agf_bp);
598 return error;
599}
600
601
602int
603xrep_reap_extents(
604 struct xfs_scrub *sc,
605 struct xbitmap *bitmap,
606 const struct xfs_owner_info *oinfo,
607 enum xfs_ag_resv_type type)
608{
609 struct xbitmap_range *bmr;
610 struct xbitmap_range *n;
611 xfs_fsblock_t fsbno;
612 int error = 0;
613
614 ASSERT(xfs_has_rmapbt(sc->mp));
615
616 for_each_xbitmap_block(fsbno, bmr, n, bitmap) {
617 ASSERT(sc->ip != NULL ||
618 XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.pag->pag_agno);
619 trace_xrep_dispose_btree_extent(sc->mp,
620 XFS_FSB_TO_AGNO(sc->mp, fsbno),
621 XFS_FSB_TO_AGBNO(sc->mp, fsbno), 1);
622
623 error = xrep_reap_block(sc, fsbno, oinfo, type);
624 if (error)
625 break;
626 }
627
628 return error;
629}
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658struct xrep_findroot {
659 struct xfs_scrub *sc;
660 struct xfs_buf *agfl_bp;
661 struct xfs_agf *agf;
662 struct xrep_find_ag_btree *btree_info;
663};
664
665
666STATIC int
667xrep_findroot_agfl_walk(
668 struct xfs_mount *mp,
669 xfs_agblock_t bno,
670 void *priv)
671{
672 xfs_agblock_t *agbno = priv;
673
674 return (*agbno == bno) ? -ECANCELED : 0;
675}
676
677
678STATIC int
679xrep_findroot_block(
680 struct xrep_findroot *ri,
681 struct xrep_find_ag_btree *fab,
682 uint64_t owner,
683 xfs_agblock_t agbno,
684 bool *done_with_block)
685{
686 struct xfs_mount *mp = ri->sc->mp;
687 struct xfs_buf *bp;
688 struct xfs_btree_block *btblock;
689 xfs_daddr_t daddr;
690 int block_level;
691 int error = 0;
692
693 daddr = XFS_AGB_TO_DADDR(mp, ri->sc->sa.pag->pag_agno, agbno);
694
695
696
697
698
699
700
701 if (owner == XFS_RMAP_OWN_AG) {
702 error = xfs_agfl_walk(mp, ri->agf, ri->agfl_bp,
703 xrep_findroot_agfl_walk, &agbno);
704 if (error == -ECANCELED)
705 return 0;
706 if (error)
707 return error;
708 }
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728 error = xfs_trans_read_buf(mp, ri->sc->tp, mp->m_ddev_targp, daddr,
729 mp->m_bsize, 0, &bp, NULL);
730 if (error)
731 return error;
732
733
734 btblock = XFS_BUF_TO_BLOCK(bp);
735 ASSERT(fab->buf_ops->magic[1] != 0);
736 if (btblock->bb_magic != fab->buf_ops->magic[1])
737 goto out;
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753 if (bp->b_ops) {
754 if (bp->b_ops != fab->buf_ops)
755 goto out;
756 } else {
757 ASSERT(!xfs_trans_buf_is_dirty(bp));
758 if (!uuid_equal(&btblock->bb_u.s.bb_uuid,
759 &mp->m_sb.sb_meta_uuid))
760 goto out;
761
762
763
764
765
766 bp->b_ops = fab->buf_ops;
767 fab->buf_ops->verify_read(bp);
768 if (bp->b_error) {
769 bp->b_ops = NULL;
770 bp->b_error = 0;
771 goto out;
772 }
773
774
775
776
777
778 }
779
780
781
782
783
784 *done_with_block = true;
785
786
787
788
789
790
791
792
793
794
795
796 block_level = xfs_btree_get_level(btblock);
797 if (block_level + 1 == fab->height) {
798 fab->root = NULLAGBLOCK;
799 goto out;
800 } else if (block_level < fab->height) {
801 goto out;
802 }
803
804
805
806
807
808
809 fab->height = block_level + 1;
810
811
812
813
814
815
816 if (btblock->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK) &&
817 btblock->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK))
818 fab->root = agbno;
819 else
820 fab->root = NULLAGBLOCK;
821
822 trace_xrep_findroot_block(mp, ri->sc->sa.pag->pag_agno, agbno,
823 be32_to_cpu(btblock->bb_magic), fab->height - 1);
824out:
825 xfs_trans_brelse(ri->sc->tp, bp);
826 return error;
827}
828
829
830
831
832
833STATIC int
834xrep_findroot_rmap(
835 struct xfs_btree_cur *cur,
836 const struct xfs_rmap_irec *rec,
837 void *priv)
838{
839 struct xrep_findroot *ri = priv;
840 struct xrep_find_ag_btree *fab;
841 xfs_agblock_t b;
842 bool done;
843 int error = 0;
844
845
846 if (!XFS_RMAP_NON_INODE_OWNER(rec->rm_owner))
847 return 0;
848
849
850 for (b = 0; b < rec->rm_blockcount; b++) {
851 done = false;
852 for (fab = ri->btree_info; fab->buf_ops; fab++) {
853 if (rec->rm_owner != fab->rmap_owner)
854 continue;
855 error = xrep_findroot_block(ri, fab,
856 rec->rm_owner, rec->rm_startblock + b,
857 &done);
858 if (error)
859 return error;
860 if (done)
861 break;
862 }
863 }
864
865 return 0;
866}
867
868
869int
870xrep_find_ag_btree_roots(
871 struct xfs_scrub *sc,
872 struct xfs_buf *agf_bp,
873 struct xrep_find_ag_btree *btree_info,
874 struct xfs_buf *agfl_bp)
875{
876 struct xfs_mount *mp = sc->mp;
877 struct xrep_findroot ri;
878 struct xrep_find_ag_btree *fab;
879 struct xfs_btree_cur *cur;
880 int error;
881
882 ASSERT(xfs_buf_islocked(agf_bp));
883 ASSERT(agfl_bp == NULL || xfs_buf_islocked(agfl_bp));
884
885 ri.sc = sc;
886 ri.btree_info = btree_info;
887 ri.agf = agf_bp->b_addr;
888 ri.agfl_bp = agfl_bp;
889 for (fab = btree_info; fab->buf_ops; fab++) {
890 ASSERT(agfl_bp || fab->rmap_owner != XFS_RMAP_OWN_AG);
891 ASSERT(XFS_RMAP_NON_INODE_OWNER(fab->rmap_owner));
892 fab->root = NULLAGBLOCK;
893 fab->height = 0;
894 }
895
896 cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
897 error = xfs_rmap_query_all(cur, xrep_findroot_rmap, &ri);
898 xfs_btree_del_cursor(cur, error);
899
900 return error;
901}
902
903
904void
905xrep_force_quotacheck(
906 struct xfs_scrub *sc,
907 xfs_dqtype_t type)
908{
909 uint flag;
910
911 flag = xfs_quota_chkd_flag(type);
912 if (!(flag & sc->mp->m_qflags))
913 return;
914
915 sc->mp->m_qflags &= ~flag;
916 spin_lock(&sc->mp->m_sb_lock);
917 sc->mp->m_sb.sb_qflags &= ~flag;
918 spin_unlock(&sc->mp->m_sb_lock);
919 xfs_log_sb(sc->tp);
920}
921
922
923
924
925
926
927
928
929
930
931
932int
933xrep_ino_dqattach(
934 struct xfs_scrub *sc)
935{
936 int error;
937
938 error = xfs_qm_dqattach_locked(sc->ip, false);
939 switch (error) {
940 case -EFSBADCRC:
941 case -EFSCORRUPTED:
942 case -ENOENT:
943 xfs_err_ratelimited(sc->mp,
944"inode %llu repair encountered quota error %d, quotacheck forced.",
945 (unsigned long long)sc->ip->i_ino, error);
946 if (XFS_IS_UQUOTA_ON(sc->mp) && !sc->ip->i_udquot)
947 xrep_force_quotacheck(sc, XFS_DQTYPE_USER);
948 if (XFS_IS_GQUOTA_ON(sc->mp) && !sc->ip->i_gdquot)
949 xrep_force_quotacheck(sc, XFS_DQTYPE_GROUP);
950 if (XFS_IS_PQUOTA_ON(sc->mp) && !sc->ip->i_pdquot)
951 xrep_force_quotacheck(sc, XFS_DQTYPE_PROJ);
952 fallthrough;
953 case -ESRCH:
954 error = 0;
955 break;
956 default:
957 break;
958 }
959
960 return error;
961}
962