1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_log_format.h"
21#include "xfs_trans_resv.h"
22#include "xfs_bit.h"
23#include "xfs_sb.h"
24#include "xfs_ag.h"
25#include "xfs_mount.h"
26#include "xfs_trans.h"
27#include "xfs_buf_item.h"
28#include "xfs_trans_priv.h"
29#include "xfs_error.h"
30#include "xfs_trace.h"
31#include "xfs_log.h"
32
33
34kmem_zone_t *xfs_buf_item_zone;
35
36static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
37{
38 return container_of(lip, struct xfs_buf_log_item, bli_item);
39}
40
41STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp);
42
43static inline int
44xfs_buf_log_format_size(
45 struct xfs_buf_log_format *blfp)
46{
47 return offsetof(struct xfs_buf_log_format, blf_data_map) +
48 (blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
49}
50
51
52
53
54
55
56
57
58
59
60
61STATIC void
62xfs_buf_item_size_segment(
63 struct xfs_buf_log_item *bip,
64 struct xfs_buf_log_format *blfp,
65 int *nvecs,
66 int *nbytes)
67{
68 struct xfs_buf *bp = bip->bli_buf;
69 int next_bit;
70 int last_bit;
71
72 last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
73 if (last_bit == -1)
74 return;
75
76
77
78
79
80 *nvecs += 2;
81 *nbytes += xfs_buf_log_format_size(blfp) + XFS_BLF_CHUNK;
82
83 while (last_bit != -1) {
84
85
86
87
88
89
90 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
91 last_bit + 1);
92
93
94
95
96
97 if (next_bit == -1) {
98 break;
99 } else if (next_bit != last_bit + 1) {
100 last_bit = next_bit;
101 (*nvecs)++;
102 } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) !=
103 (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) +
104 XFS_BLF_CHUNK)) {
105 last_bit = next_bit;
106 (*nvecs)++;
107 } else {
108 last_bit++;
109 }
110 *nbytes += XFS_BLF_CHUNK;
111 }
112}
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131STATIC void
132xfs_buf_item_size(
133 struct xfs_log_item *lip,
134 int *nvecs,
135 int *nbytes)
136{
137 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
138 int i;
139
140 ASSERT(atomic_read(&bip->bli_refcount) > 0);
141 if (bip->bli_flags & XFS_BLI_STALE) {
142
143
144
145
146
147 trace_xfs_buf_item_size_stale(bip);
148 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
149 *nvecs += bip->bli_format_count;
150 for (i = 0; i < bip->bli_format_count; i++) {
151 *nbytes += xfs_buf_log_format_size(&bip->bli_formats[i]);
152 }
153 return;
154 }
155
156 ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
157
158 if (bip->bli_flags & XFS_BLI_ORDERED) {
159
160
161
162
163
164 trace_xfs_buf_item_size_ordered(bip);
165 *nvecs = XFS_LOG_VEC_ORDERED;
166 return;
167 }
168
169
170
171
172
173
174
175
176
177
178 for (i = 0; i < bip->bli_format_count; i++) {
179 xfs_buf_item_size_segment(bip, &bip->bli_formats[i],
180 nvecs, nbytes);
181 }
182 trace_xfs_buf_item_size(bip);
183}
184
185static inline void
186xfs_buf_item_copy_iovec(
187 struct xfs_log_vec *lv,
188 struct xfs_log_iovec **vecp,
189 struct xfs_buf *bp,
190 uint offset,
191 int first_bit,
192 uint nbits)
193{
194 offset += first_bit * XFS_BLF_CHUNK;
195 xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BCHUNK,
196 xfs_buf_offset(bp, offset),
197 nbits * XFS_BLF_CHUNK);
198}
199
200static inline bool
201xfs_buf_item_straddle(
202 struct xfs_buf *bp,
203 uint offset,
204 int next_bit,
205 int last_bit)
206{
207 return xfs_buf_offset(bp, offset + (next_bit << XFS_BLF_SHIFT)) !=
208 (xfs_buf_offset(bp, offset + (last_bit << XFS_BLF_SHIFT)) +
209 XFS_BLF_CHUNK);
210}
211
212static void
213xfs_buf_item_format_segment(
214 struct xfs_buf_log_item *bip,
215 struct xfs_log_vec *lv,
216 struct xfs_log_iovec **vecp,
217 uint offset,
218 struct xfs_buf_log_format *blfp)
219{
220 struct xfs_buf *bp = bip->bli_buf;
221 uint base_size;
222 int first_bit;
223 int last_bit;
224 int next_bit;
225 uint nbits;
226
227
228 blfp->blf_flags = bip->__bli_format.blf_flags;
229
230
231
232
233
234
235 base_size = xfs_buf_log_format_size(blfp);
236
237 first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
238 if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) {
239
240
241
242
243 return;
244 }
245
246 blfp = xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BFORMAT, blfp, base_size);
247 blfp->blf_size = 1;
248
249 if (bip->bli_flags & XFS_BLI_STALE) {
250
251
252
253
254
255 trace_xfs_buf_item_format_stale(bip);
256 ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
257 return;
258 }
259
260
261
262
263
264 last_bit = first_bit;
265 nbits = 1;
266 for (;;) {
267
268
269
270
271
272
273 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
274 (uint)last_bit + 1);
275
276
277
278
279
280
281
282 if (next_bit == -1) {
283 xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
284 first_bit, nbits);
285 blfp->blf_size++;
286 break;
287 } else if (next_bit != last_bit + 1 ||
288 xfs_buf_item_straddle(bp, offset, next_bit, last_bit)) {
289 xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
290 first_bit, nbits);
291 blfp->blf_size++;
292 first_bit = next_bit;
293 last_bit = next_bit;
294 nbits = 1;
295 } else {
296 last_bit++;
297 nbits++;
298 }
299 }
300}
301
302
303
304
305
306
307
308STATIC void
309xfs_buf_item_format(
310 struct xfs_log_item *lip,
311 struct xfs_log_vec *lv)
312{
313 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
314 struct xfs_buf *bp = bip->bli_buf;
315 struct xfs_log_iovec *vecp = NULL;
316 uint offset = 0;
317 int i;
318
319 ASSERT(atomic_read(&bip->bli_refcount) > 0);
320 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
321 (bip->bli_flags & XFS_BLI_STALE));
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337 if (bip->bli_flags & XFS_BLI_INODE_BUF) {
338 if (xfs_sb_version_hascrc(&lip->li_mountp->m_sb) ||
339 !((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
340 xfs_log_item_in_current_chkpt(lip)))
341 bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
342 bip->bli_flags &= ~XFS_BLI_INODE_BUF;
343 }
344
345 if ((bip->bli_flags & (XFS_BLI_ORDERED|XFS_BLI_STALE)) ==
346 XFS_BLI_ORDERED) {
347
348
349
350
351 trace_xfs_buf_item_format_ordered(bip);
352 return;
353 }
354
355 for (i = 0; i < bip->bli_format_count; i++) {
356 xfs_buf_item_format_segment(bip, lv, &vecp, offset,
357 &bip->bli_formats[i]);
358 offset += bp->b_maps[i].bm_len;
359 }
360
361
362
363
364 trace_xfs_buf_item_format(bip);
365}
366
367
368
369
370
371
372
373
374
375
376STATIC void
377xfs_buf_item_pin(
378 struct xfs_log_item *lip)
379{
380 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
381
382 ASSERT(atomic_read(&bip->bli_refcount) > 0);
383 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
384 (bip->bli_flags & XFS_BLI_ORDERED) ||
385 (bip->bli_flags & XFS_BLI_STALE));
386
387 trace_xfs_buf_item_pin(bip);
388
389 atomic_inc(&bip->bli_refcount);
390 atomic_inc(&bip->bli_buf->b_pin_count);
391}
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406STATIC void
407xfs_buf_item_unpin(
408 struct xfs_log_item *lip,
409 int remove)
410{
411 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
412 xfs_buf_t *bp = bip->bli_buf;
413 struct xfs_ail *ailp = lip->li_ailp;
414 int stale = bip->bli_flags & XFS_BLI_STALE;
415 int freed;
416
417 ASSERT(bp->b_fspriv == bip);
418 ASSERT(atomic_read(&bip->bli_refcount) > 0);
419
420 trace_xfs_buf_item_unpin(bip);
421
422 freed = atomic_dec_and_test(&bip->bli_refcount);
423
424 if (atomic_dec_and_test(&bp->b_pin_count))
425 wake_up_all(&bp->b_waiters);
426
427 if (freed && stale) {
428 ASSERT(bip->bli_flags & XFS_BLI_STALE);
429 ASSERT(xfs_buf_islocked(bp));
430 ASSERT(XFS_BUF_ISSTALE(bp));
431 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
432
433 trace_xfs_buf_item_unpin_stale(bip);
434
435 if (remove) {
436
437
438
439
440
441
442
443
444 if (lip->li_desc)
445 xfs_trans_del_item(lip);
446
447
448
449
450
451 bp->b_transp = NULL;
452 }
453
454
455
456
457
458
459
460 if (bip->bli_flags & XFS_BLI_STALE_INODE) {
461 xfs_buf_do_callbacks(bp);
462 bp->b_fspriv = NULL;
463 bp->b_iodone = NULL;
464 } else {
465 spin_lock(&ailp->xa_lock);
466 xfs_trans_ail_delete(ailp, lip, SHUTDOWN_LOG_IO_ERROR);
467 xfs_buf_item_relse(bp);
468 ASSERT(bp->b_fspriv == NULL);
469 }
470 xfs_buf_relse(bp);
471 } else if (freed && remove) {
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488 xfs_buf_lock(bp);
489 xfs_buf_hold(bp);
490 bp->b_flags |= XBF_ASYNC;
491 xfs_buf_ioerror(bp, EIO);
492 XFS_BUF_UNDONE(bp);
493 xfs_buf_stale(bp);
494 xfs_buf_ioend(bp, 0);
495 }
496}
497
498
499
500
501
502
503
504DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state, 30 * HZ, 10);
505
506STATIC uint
507xfs_buf_item_push(
508 struct xfs_log_item *lip,
509 struct list_head *buffer_list)
510{
511 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
512 struct xfs_buf *bp = bip->bli_buf;
513 uint rval = XFS_ITEM_SUCCESS;
514
515 if (xfs_buf_ispinned(bp))
516 return XFS_ITEM_PINNED;
517 if (!xfs_buf_trylock(bp)) {
518
519
520
521
522
523
524
525 if (xfs_buf_ispinned(bp))
526 return XFS_ITEM_PINNED;
527 return XFS_ITEM_LOCKED;
528 }
529
530 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
531
532 trace_xfs_buf_item_push(bip);
533
534
535 if ((bp->b_flags & XBF_WRITE_FAIL) &&
536 ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS:")) {
537 xfs_warn(bp->b_target->bt_mount,
538"Detected failing async write on buffer block 0x%llx. Retrying async write.\n",
539 (long long)bp->b_bn);
540 }
541
542 if (!xfs_buf_delwri_queue(bp, buffer_list))
543 rval = XFS_ITEM_FLUSHING;
544 xfs_buf_unlock(bp);
545 return rval;
546}
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567STATIC void
568xfs_buf_item_unlock(
569 struct xfs_log_item *lip)
570{
571 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
572 struct xfs_buf *bp = bip->bli_buf;
573 bool clean;
574 bool aborted;
575 int flags;
576
577
578 bp->b_transp = NULL;
579
580
581
582
583
584
585
586 aborted = (lip->li_flags & XFS_LI_ABORTED) ? true : false;
587
588
589
590
591
592 flags = bip->bli_flags;
593 bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
594
595
596
597
598
599
600 if (flags & XFS_BLI_STALE) {
601 trace_xfs_buf_item_unlock_stale(bip);
602 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
603 if (!aborted) {
604 atomic_dec(&bip->bli_refcount);
605 return;
606 }
607 }
608
609 trace_xfs_buf_item_unlock(bip);
610
611
612
613
614
615
616
617
618
619
620
621 clean = (flags & XFS_BLI_DIRTY) ? false : true;
622 if (clean) {
623 int i;
624 for (i = 0; i < bip->bli_format_count; i++) {
625 if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
626 bip->bli_formats[i].blf_map_size)) {
627 clean = false;
628 break;
629 }
630 }
631 }
632
633
634
635
636
637
638
639
640
641 if (atomic_dec_and_test(&bip->bli_refcount)) {
642 if (clean)
643 xfs_buf_item_relse(bp);
644 else if (aborted) {
645 ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
646 if (lip->li_flags & XFS_LI_IN_AIL) {
647 spin_lock(&lip->li_ailp->xa_lock);
648 xfs_trans_ail_delete(lip->li_ailp, lip,
649 SHUTDOWN_LOG_IO_ERROR);
650 }
651 xfs_buf_item_relse(bp);
652 }
653 }
654
655 if (!(flags & XFS_BLI_HOLD))
656 xfs_buf_relse(bp);
657}
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677STATIC xfs_lsn_t
678xfs_buf_item_committed(
679 struct xfs_log_item *lip,
680 xfs_lsn_t lsn)
681{
682 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
683
684 trace_xfs_buf_item_committed(bip);
685
686 if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
687 return lip->li_lsn;
688 return lsn;
689}
690
691STATIC void
692xfs_buf_item_committing(
693 struct xfs_log_item *lip,
694 xfs_lsn_t commit_lsn)
695{
696}
697
698
699
700
701static const struct xfs_item_ops xfs_buf_item_ops = {
702 .iop_size = xfs_buf_item_size,
703 .iop_format = xfs_buf_item_format,
704 .iop_pin = xfs_buf_item_pin,
705 .iop_unpin = xfs_buf_item_unpin,
706 .iop_unlock = xfs_buf_item_unlock,
707 .iop_committed = xfs_buf_item_committed,
708 .iop_push = xfs_buf_item_push,
709 .iop_committing = xfs_buf_item_committing
710};
711
712STATIC int
713xfs_buf_item_get_format(
714 struct xfs_buf_log_item *bip,
715 int count)
716{
717 ASSERT(bip->bli_formats == NULL);
718 bip->bli_format_count = count;
719
720 if (count == 1) {
721 bip->bli_formats = &bip->__bli_format;
722 return 0;
723 }
724
725 bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
726 KM_SLEEP);
727 if (!bip->bli_formats)
728 return ENOMEM;
729 return 0;
730}
731
732STATIC void
733xfs_buf_item_free_format(
734 struct xfs_buf_log_item *bip)
735{
736 if (bip->bli_formats != &bip->__bli_format) {
737 kmem_free(bip->bli_formats);
738 bip->bli_formats = NULL;
739 }
740}
741
742
743
744
745
746
747
748
749void
750xfs_buf_item_init(
751 xfs_buf_t *bp,
752 xfs_mount_t *mp)
753{
754 xfs_log_item_t *lip = bp->b_fspriv;
755 xfs_buf_log_item_t *bip;
756 int chunks;
757 int map_size;
758 int error;
759 int i;
760
761
762
763
764
765
766
767 ASSERT(bp->b_target->bt_mount == mp);
768 if (lip != NULL && lip->li_type == XFS_LI_BUF)
769 return;
770
771 bip = kmem_zone_zalloc(xfs_buf_item_zone, KM_SLEEP);
772 xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
773 bip->bli_buf = bp;
774 xfs_buf_hold(bp);
775
776
777
778
779
780
781
782
783
784
785 error = xfs_buf_item_get_format(bip, bp->b_map_count);
786 ASSERT(error == 0);
787
788 for (i = 0; i < bip->bli_format_count; i++) {
789 chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
790 XFS_BLF_CHUNK);
791 map_size = DIV_ROUND_UP(chunks, NBWORD);
792
793 bip->bli_formats[i].blf_type = XFS_LI_BUF;
794 bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn;
795 bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len;
796 bip->bli_formats[i].blf_map_size = map_size;
797 }
798
799#ifdef XFS_TRANS_DEBUG
800
801
802
803
804
805
806
807
808 bip->bli_orig = kmem_alloc(BBTOB(bp->b_length), KM_SLEEP);
809 memcpy(bip->bli_orig, bp->b_addr, BBTOB(bp->b_length));
810 bip->bli_logged = kmem_zalloc(BBTOB(bp->b_length) / NBBY, KM_SLEEP);
811#endif
812
813
814
815
816
817 if (bp->b_fspriv)
818 bip->bli_item.li_bio_list = bp->b_fspriv;
819 bp->b_fspriv = bip;
820}
821
822
823
824
825
826
827static void
828xfs_buf_item_log_segment(
829 struct xfs_buf_log_item *bip,
830 uint first,
831 uint last,
832 uint *map)
833{
834 uint first_bit;
835 uint last_bit;
836 uint bits_to_set;
837 uint bits_set;
838 uint word_num;
839 uint *wordp;
840 uint bit;
841 uint end_bit;
842 uint mask;
843
844
845
846
847 first_bit = first >> XFS_BLF_SHIFT;
848 last_bit = last >> XFS_BLF_SHIFT;
849
850
851
852
853 bits_to_set = last_bit - first_bit + 1;
854
855
856
857
858
859 word_num = first_bit >> BIT_TO_WORD_SHIFT;
860 wordp = &map[word_num];
861
862
863
864
865 bit = first_bit & (uint)(NBWORD - 1);
866
867
868
869
870
871
872
873
874
875 if (bit) {
876 end_bit = MIN(bit + bits_to_set, (uint)NBWORD);
877 mask = ((1 << (end_bit - bit)) - 1) << bit;
878 *wordp |= mask;
879 wordp++;
880 bits_set = end_bit - bit;
881 } else {
882 bits_set = 0;
883 }
884
885
886
887
888
889 while ((bits_to_set - bits_set) >= NBWORD) {
890 *wordp |= 0xffffffff;
891 bits_set += NBWORD;
892 wordp++;
893 }
894
895
896
897
898 end_bit = bits_to_set - bits_set;
899 if (end_bit) {
900 mask = (1 << end_bit) - 1;
901 *wordp |= mask;
902 }
903}
904
905
906
907
908
909void
910xfs_buf_item_log(
911 xfs_buf_log_item_t *bip,
912 uint first,
913 uint last)
914{
915 int i;
916 uint start;
917 uint end;
918 struct xfs_buf *bp = bip->bli_buf;
919
920
921
922
923 start = 0;
924 for (i = 0; i < bip->bli_format_count; i++) {
925 if (start > last)
926 break;
927 end = start + BBTOB(bp->b_maps[i].bm_len);
928 if (first > end) {
929 start += BBTOB(bp->b_maps[i].bm_len);
930 continue;
931 }
932 if (first < start)
933 first = start;
934 if (end > last)
935 end = last;
936
937 xfs_buf_item_log_segment(bip, first, end,
938 &bip->bli_formats[i].blf_data_map[0]);
939
940 start += bp->b_maps[i].bm_len;
941 }
942}
943
944
945
946
947
948
949uint
950xfs_buf_item_dirty(
951 xfs_buf_log_item_t *bip)
952{
953 return (bip->bli_flags & XFS_BLI_DIRTY);
954}
955
956STATIC void
957xfs_buf_item_free(
958 xfs_buf_log_item_t *bip)
959{
960#ifdef XFS_TRANS_DEBUG
961 kmem_free(bip->bli_orig);
962 kmem_free(bip->bli_logged);
963#endif
964
965 xfs_buf_item_free_format(bip);
966 kmem_zone_free(xfs_buf_item_zone, bip);
967}
968
969
970
971
972
973
974
975
976void
977xfs_buf_item_relse(
978 xfs_buf_t *bp)
979{
980 xfs_buf_log_item_t *bip = bp->b_fspriv;
981
982 trace_xfs_buf_item_relse(bp, _RET_IP_);
983 ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL));
984
985 bp->b_fspriv = bip->bli_item.li_bio_list;
986 if (bp->b_fspriv == NULL)
987 bp->b_iodone = NULL;
988
989 xfs_buf_rele(bp);
990 xfs_buf_item_free(bip);
991}
992
993
994
995
996
997
998
999
1000
1001
1002
1003void
1004xfs_buf_attach_iodone(
1005 xfs_buf_t *bp,
1006 void (*cb)(xfs_buf_t *, xfs_log_item_t *),
1007 xfs_log_item_t *lip)
1008{
1009 xfs_log_item_t *head_lip;
1010
1011 ASSERT(xfs_buf_islocked(bp));
1012
1013 lip->li_cb = cb;
1014 head_lip = bp->b_fspriv;
1015 if (head_lip) {
1016 lip->li_bio_list = head_lip->li_bio_list;
1017 head_lip->li_bio_list = lip;
1018 } else {
1019 bp->b_fspriv = lip;
1020 }
1021
1022 ASSERT(bp->b_iodone == NULL ||
1023 bp->b_iodone == xfs_buf_iodone_callbacks);
1024 bp->b_iodone = xfs_buf_iodone_callbacks;
1025}
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039STATIC void
1040xfs_buf_do_callbacks(
1041 struct xfs_buf *bp)
1042{
1043 struct xfs_log_item *lip;
1044
1045 while ((lip = bp->b_fspriv) != NULL) {
1046 bp->b_fspriv = lip->li_bio_list;
1047 ASSERT(lip->li_cb != NULL);
1048
1049
1050
1051
1052
1053
1054 lip->li_bio_list = NULL;
1055 lip->li_cb(bp, lip);
1056 }
1057}
1058
1059
1060
1061
1062
1063
1064
1065
1066void
1067xfs_buf_iodone_callbacks(
1068 struct xfs_buf *bp)
1069{
1070 struct xfs_log_item *lip = bp->b_fspriv;
1071 struct xfs_mount *mp = lip->li_mountp;
1072 static ulong lasttime;
1073 static xfs_buftarg_t *lasttarg;
1074
1075 if (likely(!xfs_buf_geterror(bp)))
1076 goto do_callbacks;
1077
1078
1079
1080
1081
1082 if (XFS_FORCED_SHUTDOWN(mp)) {
1083 xfs_buf_stale(bp);
1084 XFS_BUF_DONE(bp);
1085 trace_xfs_buf_item_iodone(bp, _RET_IP_);
1086 goto do_callbacks;
1087 }
1088
1089 if (bp->b_target != lasttarg ||
1090 time_after(jiffies, (lasttime + 5*HZ))) {
1091 lasttime = jiffies;
1092 xfs_buf_ioerror_alert(bp, __func__);
1093 }
1094 lasttarg = bp->b_target;
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107 if (XFS_BUF_ISASYNC(bp)) {
1108 ASSERT(bp->b_iodone != NULL);
1109
1110 trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
1111
1112 xfs_buf_ioerror(bp, 0);
1113
1114 if (!(bp->b_flags & (XBF_STALE|XBF_WRITE_FAIL))) {
1115 bp->b_flags |= XBF_WRITE | XBF_ASYNC |
1116 XBF_DONE | XBF_WRITE_FAIL;
1117 xfs_buf_iorequest(bp);
1118 } else {
1119 xfs_buf_relse(bp);
1120 }
1121
1122 return;
1123 }
1124
1125
1126
1127
1128
1129 xfs_buf_stale(bp);
1130 XFS_BUF_DONE(bp);
1131
1132 trace_xfs_buf_error_relse(bp, _RET_IP_);
1133
1134do_callbacks:
1135 xfs_buf_do_callbacks(bp);
1136 bp->b_fspriv = NULL;
1137 bp->b_iodone = NULL;
1138 xfs_buf_ioend(bp, 0);
1139}
1140
1141
1142
1143
1144
1145
1146
1147
1148void
1149xfs_buf_iodone(
1150 struct xfs_buf *bp,
1151 struct xfs_log_item *lip)
1152{
1153 struct xfs_ail *ailp = lip->li_ailp;
1154
1155 ASSERT(BUF_ITEM(lip)->bli_buf == bp);
1156
1157 xfs_buf_rele(bp);
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168 spin_lock(&ailp->xa_lock);
1169 xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
1170 xfs_buf_item_free(BUF_ITEM(lip));
1171}
1172