1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_log_format.h"
21#include "xfs_trans_resv.h"
22#include "xfs_bit.h"
23#include "xfs_sb.h"
24#include "xfs_ag.h"
25#include "xfs_mount.h"
26#include "xfs_trans.h"
27#include "xfs_buf_item.h"
28#include "xfs_trans_priv.h"
29#include "xfs_error.h"
30#include "xfs_trace.h"
31#include "xfs_log.h"
32
33
34kmem_zone_t *xfs_buf_item_zone;
35
36static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
37{
38 return container_of(lip, struct xfs_buf_log_item, bli_item);
39}
40
41STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp);
42
43static inline int
44xfs_buf_log_format_size(
45 struct xfs_buf_log_format *blfp)
46{
47 return offsetof(struct xfs_buf_log_format, blf_data_map) +
48 (blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
49}
50
51
52
53
54
55
56
57
58
59
60
61STATIC void
62xfs_buf_item_size_segment(
63 struct xfs_buf_log_item *bip,
64 struct xfs_buf_log_format *blfp,
65 int *nvecs,
66 int *nbytes)
67{
68 struct xfs_buf *bp = bip->bli_buf;
69 int next_bit;
70 int last_bit;
71
72 last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
73 if (last_bit == -1)
74 return;
75
76
77
78
79
80 *nvecs += 2;
81 *nbytes += xfs_buf_log_format_size(blfp) + XFS_BLF_CHUNK;
82
83 while (last_bit != -1) {
84
85
86
87
88
89
90 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
91 last_bit + 1);
92
93
94
95
96
97 if (next_bit == -1) {
98 break;
99 } else if (next_bit != last_bit + 1) {
100 last_bit = next_bit;
101 (*nvecs)++;
102 } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) !=
103 (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) +
104 XFS_BLF_CHUNK)) {
105 last_bit = next_bit;
106 (*nvecs)++;
107 } else {
108 last_bit++;
109 }
110 *nbytes += XFS_BLF_CHUNK;
111 }
112}
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131STATIC void
132xfs_buf_item_size(
133 struct xfs_log_item *lip,
134 int *nvecs,
135 int *nbytes)
136{
137 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
138 int i;
139
140 ASSERT(atomic_read(&bip->bli_refcount) > 0);
141 if (bip->bli_flags & XFS_BLI_STALE) {
142
143
144
145
146
147 trace_xfs_buf_item_size_stale(bip);
148 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
149 *nvecs += bip->bli_format_count;
150 for (i = 0; i < bip->bli_format_count; i++) {
151 *nbytes += xfs_buf_log_format_size(&bip->bli_formats[i]);
152 }
153 return;
154 }
155
156 ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
157
158 if (bip->bli_flags & XFS_BLI_ORDERED) {
159
160
161
162
163
164 trace_xfs_buf_item_size_ordered(bip);
165 *nvecs = XFS_LOG_VEC_ORDERED;
166 return;
167 }
168
169
170
171
172
173
174
175
176
177
178 for (i = 0; i < bip->bli_format_count; i++) {
179 xfs_buf_item_size_segment(bip, &bip->bli_formats[i],
180 nvecs, nbytes);
181 }
182 trace_xfs_buf_item_size(bip);
183}
184
185static inline void
186xfs_buf_item_copy_iovec(
187 struct xfs_log_vec *lv,
188 struct xfs_log_iovec **vecp,
189 struct xfs_buf *bp,
190 uint offset,
191 int first_bit,
192 uint nbits)
193{
194 offset += first_bit * XFS_BLF_CHUNK;
195 xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BCHUNK,
196 xfs_buf_offset(bp, offset),
197 nbits * XFS_BLF_CHUNK);
198}
199
200static inline bool
201xfs_buf_item_straddle(
202 struct xfs_buf *bp,
203 uint offset,
204 int next_bit,
205 int last_bit)
206{
207 return xfs_buf_offset(bp, offset + (next_bit << XFS_BLF_SHIFT)) !=
208 (xfs_buf_offset(bp, offset + (last_bit << XFS_BLF_SHIFT)) +
209 XFS_BLF_CHUNK);
210}
211
212static void
213xfs_buf_item_format_segment(
214 struct xfs_buf_log_item *bip,
215 struct xfs_log_vec *lv,
216 struct xfs_log_iovec **vecp,
217 uint offset,
218 struct xfs_buf_log_format *blfp)
219{
220 struct xfs_buf *bp = bip->bli_buf;
221 uint base_size;
222 int first_bit;
223 int last_bit;
224 int next_bit;
225 uint nbits;
226
227
228 blfp->blf_flags = bip->__bli_format.blf_flags;
229
230
231
232
233
234
235 base_size = xfs_buf_log_format_size(blfp);
236
237 first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
238 if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) {
239
240
241
242
243 return;
244 }
245
246 blfp = xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BFORMAT, blfp, base_size);
247 blfp->blf_size = 1;
248
249 if (bip->bli_flags & XFS_BLI_STALE) {
250
251
252
253
254
255 trace_xfs_buf_item_format_stale(bip);
256 ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
257 return;
258 }
259
260
261
262
263
264 last_bit = first_bit;
265 nbits = 1;
266 for (;;) {
267
268
269
270
271
272
273 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
274 (uint)last_bit + 1);
275
276
277
278
279
280
281
282 if (next_bit == -1) {
283 xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
284 first_bit, nbits);
285 blfp->blf_size++;
286 break;
287 } else if (next_bit != last_bit + 1 ||
288 xfs_buf_item_straddle(bp, offset, next_bit, last_bit)) {
289 xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
290 first_bit, nbits);
291 blfp->blf_size++;
292 first_bit = next_bit;
293 last_bit = next_bit;
294 nbits = 1;
295 } else {
296 last_bit++;
297 nbits++;
298 }
299 }
300}
301
302
303
304
305
306
307
308STATIC void
309xfs_buf_item_format(
310 struct xfs_log_item *lip,
311 struct xfs_log_vec *lv)
312{
313 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
314 struct xfs_buf *bp = bip->bli_buf;
315 struct xfs_log_iovec *vecp = NULL;
316 uint offset = 0;
317 int i;
318
319 ASSERT(atomic_read(&bip->bli_refcount) > 0);
320 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
321 (bip->bli_flags & XFS_BLI_STALE));
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337 if (bip->bli_flags & XFS_BLI_INODE_BUF) {
338 if (xfs_sb_version_hascrc(&lip->li_mountp->m_sb) ||
339 !((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
340 xfs_log_item_in_current_chkpt(lip)))
341 bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
342 bip->bli_flags &= ~XFS_BLI_INODE_BUF;
343 }
344
345 if ((bip->bli_flags & (XFS_BLI_ORDERED|XFS_BLI_STALE)) ==
346 XFS_BLI_ORDERED) {
347
348
349
350
351 trace_xfs_buf_item_format_ordered(bip);
352 return;
353 }
354
355 for (i = 0; i < bip->bli_format_count; i++) {
356 xfs_buf_item_format_segment(bip, lv, &vecp, offset,
357 &bip->bli_formats[i]);
358 offset += bp->b_maps[i].bm_len;
359 }
360
361
362
363
364 trace_xfs_buf_item_format(bip);
365}
366
367
368
369
370
371
372
373
374
375
376STATIC void
377xfs_buf_item_pin(
378 struct xfs_log_item *lip)
379{
380 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
381
382 ASSERT(atomic_read(&bip->bli_refcount) > 0);
383 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
384 (bip->bli_flags & XFS_BLI_ORDERED) ||
385 (bip->bli_flags & XFS_BLI_STALE));
386
387 trace_xfs_buf_item_pin(bip);
388
389 atomic_inc(&bip->bli_refcount);
390 atomic_inc(&bip->bli_buf->b_pin_count);
391}
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406STATIC void
407xfs_buf_item_unpin(
408 struct xfs_log_item *lip,
409 int remove)
410{
411 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
412 xfs_buf_t *bp = bip->bli_buf;
413 struct xfs_ail *ailp = lip->li_ailp;
414 int stale = bip->bli_flags & XFS_BLI_STALE;
415 int freed;
416
417 ASSERT(bp->b_fspriv == bip);
418 ASSERT(atomic_read(&bip->bli_refcount) > 0);
419
420 trace_xfs_buf_item_unpin(bip);
421
422 freed = atomic_dec_and_test(&bip->bli_refcount);
423
424 if (atomic_dec_and_test(&bp->b_pin_count))
425 wake_up_all(&bp->b_waiters);
426
427 if (freed && stale) {
428 ASSERT(bip->bli_flags & XFS_BLI_STALE);
429 ASSERT(xfs_buf_islocked(bp));
430 ASSERT(XFS_BUF_ISSTALE(bp));
431 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
432
433 trace_xfs_buf_item_unpin_stale(bip);
434
435 if (remove) {
436
437
438
439
440
441
442
443
444 if (lip->li_desc)
445 xfs_trans_del_item(lip);
446
447
448
449
450
451 bp->b_transp = NULL;
452 }
453
454
455
456
457
458
459
460 if (bip->bli_flags & XFS_BLI_STALE_INODE) {
461 xfs_buf_do_callbacks(bp);
462 bp->b_fspriv = NULL;
463 bp->b_iodone = NULL;
464 } else {
465 spin_lock(&ailp->xa_lock);
466 xfs_trans_ail_delete(ailp, lip, SHUTDOWN_LOG_IO_ERROR);
467 xfs_buf_item_relse(bp);
468 ASSERT(bp->b_fspriv == NULL);
469 }
470 xfs_buf_relse(bp);
471 } else if (freed && remove) {
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488 xfs_buf_lock(bp);
489 xfs_buf_hold(bp);
490 bp->b_flags |= XBF_ASYNC;
491 xfs_buf_ioerror(bp, EIO);
492 XFS_BUF_UNDONE(bp);
493 xfs_buf_stale(bp);
494 xfs_buf_ioend(bp, 0);
495 }
496}
497
498
499
500
501
502
503
504DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state, 30 * HZ, 10);
505
506STATIC uint
507xfs_buf_item_push(
508 struct xfs_log_item *lip,
509 struct list_head *buffer_list)
510{
511 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
512 struct xfs_buf *bp = bip->bli_buf;
513 uint rval = XFS_ITEM_SUCCESS;
514
515 if (xfs_buf_ispinned(bp))
516 return XFS_ITEM_PINNED;
517 if (!xfs_buf_trylock(bp)) {
518
519
520
521
522
523
524
525 if (xfs_buf_ispinned(bp))
526 return XFS_ITEM_PINNED;
527 return XFS_ITEM_LOCKED;
528 }
529
530 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
531
532 trace_xfs_buf_item_push(bip);
533
534
535 if ((bp->b_flags & XBF_WRITE_FAIL) &&
536 ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS:")) {
537 xfs_warn(bp->b_target->bt_mount,
538"Detected failing async write on buffer block 0x%llx. Retrying async write.\n",
539 (long long)bp->b_bn);
540 }
541
542 if (!xfs_buf_delwri_queue(bp, buffer_list))
543 rval = XFS_ITEM_FLUSHING;
544 xfs_buf_unlock(bp);
545 return rval;
546}
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567STATIC void
568xfs_buf_item_unlock(
569 struct xfs_log_item *lip)
570{
571 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
572 struct xfs_buf *bp = bip->bli_buf;
573 bool clean;
574 bool aborted;
575 int flags;
576
577
578 bp->b_transp = NULL;
579
580
581
582
583
584
585
586 aborted = (lip->li_flags & XFS_LI_ABORTED) ? true : false;
587
588
589
590
591
592 flags = bip->bli_flags;
593 bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
594
595
596
597
598
599
600 if (flags & XFS_BLI_STALE) {
601 trace_xfs_buf_item_unlock_stale(bip);
602 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
603 if (!aborted) {
604 atomic_dec(&bip->bli_refcount);
605 return;
606 }
607 }
608
609 trace_xfs_buf_item_unlock(bip);
610
611
612
613
614
615
616
617
618
619
620
621 clean = (flags & XFS_BLI_DIRTY) ? false : true;
622 if (clean) {
623 int i;
624 for (i = 0; i < bip->bli_format_count; i++) {
625 if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
626 bip->bli_formats[i].blf_map_size)) {
627 clean = false;
628 break;
629 }
630 }
631 }
632
633
634
635
636
637
638
639
640
641 if (atomic_dec_and_test(&bip->bli_refcount)) {
642 if (clean)
643 xfs_buf_item_relse(bp);
644 else if (aborted) {
645 ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
646 if (lip->li_flags & XFS_LI_IN_AIL) {
647 spin_lock(&lip->li_ailp->xa_lock);
648 xfs_trans_ail_delete(lip->li_ailp, lip,
649 SHUTDOWN_LOG_IO_ERROR);
650 }
651 xfs_buf_item_relse(bp);
652 }
653 }
654
655 if (!(flags & XFS_BLI_HOLD))
656 xfs_buf_relse(bp);
657}
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677STATIC xfs_lsn_t
678xfs_buf_item_committed(
679 struct xfs_log_item *lip,
680 xfs_lsn_t lsn)
681{
682 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
683
684 trace_xfs_buf_item_committed(bip);
685
686 if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
687 return lip->li_lsn;
688 return lsn;
689}
690
691STATIC void
692xfs_buf_item_committing(
693 struct xfs_log_item *lip,
694 xfs_lsn_t commit_lsn)
695{
696}
697
698
699
700
701static const struct xfs_item_ops xfs_buf_item_ops = {
702 .iop_size = xfs_buf_item_size,
703 .iop_format = xfs_buf_item_format,
704 .iop_pin = xfs_buf_item_pin,
705 .iop_unpin = xfs_buf_item_unpin,
706 .iop_unlock = xfs_buf_item_unlock,
707 .iop_committed = xfs_buf_item_committed,
708 .iop_push = xfs_buf_item_push,
709 .iop_committing = xfs_buf_item_committing
710};
711
712STATIC int
713xfs_buf_item_get_format(
714 struct xfs_buf_log_item *bip,
715 int count)
716{
717 ASSERT(bip->bli_formats == NULL);
718 bip->bli_format_count = count;
719
720 if (count == 1) {
721 bip->bli_formats = &bip->__bli_format;
722 return 0;
723 }
724
725 bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
726 KM_SLEEP);
727 if (!bip->bli_formats)
728 return ENOMEM;
729 return 0;
730}
731
732STATIC void
733xfs_buf_item_free_format(
734 struct xfs_buf_log_item *bip)
735{
736 if (bip->bli_formats != &bip->__bli_format) {
737 kmem_free(bip->bli_formats);
738 bip->bli_formats = NULL;
739 }
740}
741
742
743
744
745
746
747
748
749void
750xfs_buf_item_init(
751 xfs_buf_t *bp,
752 xfs_mount_t *mp)
753{
754 xfs_log_item_t *lip = bp->b_fspriv;
755 xfs_buf_log_item_t *bip;
756 int chunks;
757 int map_size;
758 int error;
759 int i;
760
761
762
763
764
765
766
767 ASSERT(bp->b_target->bt_mount == mp);
768 if (lip != NULL && lip->li_type == XFS_LI_BUF)
769 return;
770
771 bip = kmem_zone_zalloc(xfs_buf_item_zone, KM_SLEEP);
772 xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
773 bip->bli_buf = bp;
774 xfs_buf_hold(bp);
775
776
777
778
779
780
781
782
783
784
785 error = xfs_buf_item_get_format(bip, bp->b_map_count);
786 ASSERT(error == 0);
787
788 for (i = 0; i < bip->bli_format_count; i++) {
789 chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
790 XFS_BLF_CHUNK);
791 map_size = DIV_ROUND_UP(chunks, NBWORD);
792
793 bip->bli_formats[i].blf_type = XFS_LI_BUF;
794 bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn;
795 bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len;
796 bip->bli_formats[i].blf_map_size = map_size;
797 }
798
799
800
801
802
803 if (bp->b_fspriv)
804 bip->bli_item.li_bio_list = bp->b_fspriv;
805 bp->b_fspriv = bip;
806}
807
808
809
810
811
812
813static void
814xfs_buf_item_log_segment(
815 uint first,
816 uint last,
817 uint *map)
818{
819 uint first_bit;
820 uint last_bit;
821 uint bits_to_set;
822 uint bits_set;
823 uint word_num;
824 uint *wordp;
825 uint bit;
826 uint end_bit;
827 uint mask;
828
829
830
831
832 first_bit = first >> XFS_BLF_SHIFT;
833 last_bit = last >> XFS_BLF_SHIFT;
834
835
836
837
838 bits_to_set = last_bit - first_bit + 1;
839
840
841
842
843
844 word_num = first_bit >> BIT_TO_WORD_SHIFT;
845 wordp = &map[word_num];
846
847
848
849
850 bit = first_bit & (uint)(NBWORD - 1);
851
852
853
854
855
856
857
858
859
860 if (bit) {
861 end_bit = MIN(bit + bits_to_set, (uint)NBWORD);
862 mask = ((1 << (end_bit - bit)) - 1) << bit;
863 *wordp |= mask;
864 wordp++;
865 bits_set = end_bit - bit;
866 } else {
867 bits_set = 0;
868 }
869
870
871
872
873
874 while ((bits_to_set - bits_set) >= NBWORD) {
875 *wordp |= 0xffffffff;
876 bits_set += NBWORD;
877 wordp++;
878 }
879
880
881
882
883 end_bit = bits_to_set - bits_set;
884 if (end_bit) {
885 mask = (1 << end_bit) - 1;
886 *wordp |= mask;
887 }
888}
889
890
891
892
893
894void
895xfs_buf_item_log(
896 xfs_buf_log_item_t *bip,
897 uint first,
898 uint last)
899{
900 int i;
901 uint start;
902 uint end;
903 struct xfs_buf *bp = bip->bli_buf;
904
905
906
907
908 start = 0;
909 for (i = 0; i < bip->bli_format_count; i++) {
910 if (start > last)
911 break;
912 end = start + BBTOB(bp->b_maps[i].bm_len);
913 if (first > end) {
914 start += BBTOB(bp->b_maps[i].bm_len);
915 continue;
916 }
917 if (first < start)
918 first = start;
919 if (end > last)
920 end = last;
921
922 xfs_buf_item_log_segment(first, end,
923 &bip->bli_formats[i].blf_data_map[0]);
924
925 start += bp->b_maps[i].bm_len;
926 }
927}
928
929
930
931
932
933
934uint
935xfs_buf_item_dirty(
936 xfs_buf_log_item_t *bip)
937{
938 return (bip->bli_flags & XFS_BLI_DIRTY);
939}
940
941STATIC void
942xfs_buf_item_free(
943 xfs_buf_log_item_t *bip)
944{
945 xfs_buf_item_free_format(bip);
946 kmem_zone_free(xfs_buf_item_zone, bip);
947}
948
949
950
951
952
953
954
955
956void
957xfs_buf_item_relse(
958 xfs_buf_t *bp)
959{
960 xfs_buf_log_item_t *bip = bp->b_fspriv;
961
962 trace_xfs_buf_item_relse(bp, _RET_IP_);
963 ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL));
964
965 bp->b_fspriv = bip->bli_item.li_bio_list;
966 if (bp->b_fspriv == NULL)
967 bp->b_iodone = NULL;
968
969 xfs_buf_rele(bp);
970 xfs_buf_item_free(bip);
971}
972
973
974
975
976
977
978
979
980
981
982
983void
984xfs_buf_attach_iodone(
985 xfs_buf_t *bp,
986 void (*cb)(xfs_buf_t *, xfs_log_item_t *),
987 xfs_log_item_t *lip)
988{
989 xfs_log_item_t *head_lip;
990
991 ASSERT(xfs_buf_islocked(bp));
992
993 lip->li_cb = cb;
994 head_lip = bp->b_fspriv;
995 if (head_lip) {
996 lip->li_bio_list = head_lip->li_bio_list;
997 head_lip->li_bio_list = lip;
998 } else {
999 bp->b_fspriv = lip;
1000 }
1001
1002 ASSERT(bp->b_iodone == NULL ||
1003 bp->b_iodone == xfs_buf_iodone_callbacks);
1004 bp->b_iodone = xfs_buf_iodone_callbacks;
1005}
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019STATIC void
1020xfs_buf_do_callbacks(
1021 struct xfs_buf *bp)
1022{
1023 struct xfs_log_item *lip;
1024
1025 while ((lip = bp->b_fspriv) != NULL) {
1026 bp->b_fspriv = lip->li_bio_list;
1027 ASSERT(lip->li_cb != NULL);
1028
1029
1030
1031
1032
1033
1034 lip->li_bio_list = NULL;
1035 lip->li_cb(bp, lip);
1036 }
1037}
1038
1039
1040
1041
1042
1043
1044
1045
1046void
1047xfs_buf_iodone_callbacks(
1048 struct xfs_buf *bp)
1049{
1050 struct xfs_log_item *lip = bp->b_fspriv;
1051 struct xfs_mount *mp = lip->li_mountp;
1052 static ulong lasttime;
1053 static xfs_buftarg_t *lasttarg;
1054
1055 if (likely(!bp->b_error))
1056 goto do_callbacks;
1057
1058
1059
1060
1061
1062 if (XFS_FORCED_SHUTDOWN(mp)) {
1063 xfs_buf_stale(bp);
1064 XFS_BUF_DONE(bp);
1065 trace_xfs_buf_item_iodone(bp, _RET_IP_);
1066 goto do_callbacks;
1067 }
1068
1069 if (bp->b_target != lasttarg ||
1070 time_after(jiffies, (lasttime + 5*HZ))) {
1071 lasttime = jiffies;
1072 xfs_buf_ioerror_alert(bp, __func__);
1073 }
1074 lasttarg = bp->b_target;
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087 if (XFS_BUF_ISASYNC(bp)) {
1088 ASSERT(bp->b_iodone != NULL);
1089
1090 trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
1091
1092 xfs_buf_ioerror(bp, 0);
1093
1094 if (!(bp->b_flags & (XBF_STALE|XBF_WRITE_FAIL))) {
1095 bp->b_flags |= XBF_WRITE | XBF_ASYNC |
1096 XBF_DONE | XBF_WRITE_FAIL;
1097 xfs_buf_iorequest(bp);
1098 } else {
1099 xfs_buf_relse(bp);
1100 }
1101
1102 return;
1103 }
1104
1105
1106
1107
1108
1109 xfs_buf_stale(bp);
1110 XFS_BUF_DONE(bp);
1111
1112 trace_xfs_buf_error_relse(bp, _RET_IP_);
1113
1114do_callbacks:
1115 xfs_buf_do_callbacks(bp);
1116 bp->b_fspriv = NULL;
1117 bp->b_iodone = NULL;
1118 xfs_buf_ioend(bp, 0);
1119}
1120
1121
1122
1123
1124
1125
1126
1127
1128void
1129xfs_buf_iodone(
1130 struct xfs_buf *bp,
1131 struct xfs_log_item *lip)
1132{
1133 struct xfs_ail *ailp = lip->li_ailp;
1134
1135 ASSERT(BUF_ITEM(lip)->bli_buf == bp);
1136
1137 xfs_buf_rele(bp);
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148 spin_lock(&ailp->xa_lock);
1149 xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
1150 xfs_buf_item_free(BUF_ITEM(lip));
1151}
1152