1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
21#include "xfs_bit.h"
22#include "xfs_log.h"
23#include "xfs_trans.h"
24#include "xfs_sb.h"
25#include "xfs_ag.h"
26#include "xfs_mount.h"
27#include "xfs_buf_item.h"
28#include "xfs_trans_priv.h"
29#include "xfs_error.h"
30#include "xfs_trace.h"
31
32
33kmem_zone_t *xfs_buf_item_zone;
34
35static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
36{
37 return container_of(lip, struct xfs_buf_log_item, bli_item);
38}
39
40STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp);
41
42
43
44
45
46
47
48
49
50
51
52STATIC uint
53xfs_buf_item_size_segment(
54 struct xfs_buf_log_item *bip,
55 struct xfs_buf_log_format *blfp)
56{
57 struct xfs_buf *bp = bip->bli_buf;
58 uint nvecs;
59 int next_bit;
60 int last_bit;
61
62 last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
63 if (last_bit == -1)
64 return 0;
65
66
67
68
69
70 nvecs = 2;
71
72 while (last_bit != -1) {
73
74
75
76
77
78
79 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
80 last_bit + 1);
81
82
83
84
85
86 if (next_bit == -1) {
87 break;
88 } else if (next_bit != last_bit + 1) {
89 last_bit = next_bit;
90 nvecs++;
91 } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) !=
92 (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) +
93 XFS_BLF_CHUNK)) {
94 last_bit = next_bit;
95 nvecs++;
96 } else {
97 last_bit++;
98 }
99 }
100
101 return nvecs;
102}
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121STATIC uint
122xfs_buf_item_size(
123 struct xfs_log_item *lip)
124{
125 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
126 uint nvecs;
127 int i;
128
129 ASSERT(atomic_read(&bip->bli_refcount) > 0);
130 if (bip->bli_flags & XFS_BLI_STALE) {
131
132
133
134
135
136 trace_xfs_buf_item_size_stale(bip);
137 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
138 return bip->bli_format_count;
139 }
140
141 ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
142
143
144
145
146
147
148
149
150
151
152 nvecs = 0;
153 for (i = 0; i < bip->bli_format_count; i++) {
154 nvecs += xfs_buf_item_size_segment(bip, &bip->bli_formats[i]);
155 }
156
157 trace_xfs_buf_item_size(bip);
158 return nvecs;
159}
160
161static struct xfs_log_iovec *
162xfs_buf_item_format_segment(
163 struct xfs_buf_log_item *bip,
164 struct xfs_log_iovec *vecp,
165 uint offset,
166 struct xfs_buf_log_format *blfp)
167{
168 struct xfs_buf *bp = bip->bli_buf;
169 uint base_size;
170 uint nvecs;
171 int first_bit;
172 int last_bit;
173 int next_bit;
174 uint nbits;
175 uint buffer_offset;
176
177
178 blfp->blf_flags = bip->__bli_format.blf_flags;
179
180
181
182
183
184
185 base_size = offsetof(struct xfs_buf_log_format, blf_data_map) +
186 (blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
187
188 nvecs = 0;
189 first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
190 if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) {
191
192
193
194
195 goto out;
196 }
197
198 vecp->i_addr = blfp;
199 vecp->i_len = base_size;
200 vecp->i_type = XLOG_REG_TYPE_BFORMAT;
201 vecp++;
202 nvecs = 1;
203
204 if (bip->bli_flags & XFS_BLI_STALE) {
205
206
207
208
209
210 trace_xfs_buf_item_format_stale(bip);
211 ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
212 goto out;
213 }
214
215
216
217
218
219 last_bit = first_bit;
220 nbits = 1;
221 for (;;) {
222
223
224
225
226
227
228 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
229 (uint)last_bit + 1);
230
231
232
233
234
235
236
237
238
239 if (next_bit == -1) {
240 buffer_offset = offset + first_bit * XFS_BLF_CHUNK;
241 vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
242 vecp->i_len = nbits * XFS_BLF_CHUNK;
243 vecp->i_type = XLOG_REG_TYPE_BCHUNK;
244 nvecs++;
245 break;
246 } else if (next_bit != last_bit + 1) {
247 buffer_offset = offset + first_bit * XFS_BLF_CHUNK;
248 vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
249 vecp->i_len = nbits * XFS_BLF_CHUNK;
250 vecp->i_type = XLOG_REG_TYPE_BCHUNK;
251 nvecs++;
252 vecp++;
253 first_bit = next_bit;
254 last_bit = next_bit;
255 nbits = 1;
256 } else if (xfs_buf_offset(bp, offset +
257 (next_bit << XFS_BLF_SHIFT)) !=
258 (xfs_buf_offset(bp, offset +
259 (last_bit << XFS_BLF_SHIFT)) +
260 XFS_BLF_CHUNK)) {
261 buffer_offset = offset + first_bit * XFS_BLF_CHUNK;
262 vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
263 vecp->i_len = nbits * XFS_BLF_CHUNK;
264 vecp->i_type = XLOG_REG_TYPE_BCHUNK;
265 nvecs++;
266 vecp++;
267 first_bit = next_bit;
268 last_bit = next_bit;
269 nbits = 1;
270 } else {
271 last_bit++;
272 nbits++;
273 }
274 }
275out:
276 blfp->blf_size = nvecs;
277 return vecp;
278}
279
280
281
282
283
284
285
286STATIC void
287xfs_buf_item_format(
288 struct xfs_log_item *lip,
289 struct xfs_log_iovec *vecp)
290{
291 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
292 struct xfs_buf *bp = bip->bli_buf;
293 uint offset = 0;
294 int i;
295
296 ASSERT(atomic_read(&bip->bli_refcount) > 0);
297 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
298 (bip->bli_flags & XFS_BLI_STALE));
299
300
301
302
303
304
305
306
307 if (bip->bli_flags & XFS_BLI_INODE_BUF) {
308 if (!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
309 xfs_log_item_in_current_chkpt(lip)))
310 bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
311 bip->bli_flags &= ~XFS_BLI_INODE_BUF;
312 }
313
314 for (i = 0; i < bip->bli_format_count; i++) {
315 vecp = xfs_buf_item_format_segment(bip, vecp, offset,
316 &bip->bli_formats[i]);
317 offset += bp->b_maps[i].bm_len;
318 }
319
320
321
322
323 trace_xfs_buf_item_format(bip);
324}
325
326
327
328
329
330
331
332
333
334
335STATIC void
336xfs_buf_item_pin(
337 struct xfs_log_item *lip)
338{
339 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
340
341 ASSERT(atomic_read(&bip->bli_refcount) > 0);
342 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
343 (bip->bli_flags & XFS_BLI_STALE));
344
345 trace_xfs_buf_item_pin(bip);
346
347 atomic_inc(&bip->bli_refcount);
348 atomic_inc(&bip->bli_buf->b_pin_count);
349}
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364STATIC void
365xfs_buf_item_unpin(
366 struct xfs_log_item *lip,
367 int remove)
368{
369 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
370 xfs_buf_t *bp = bip->bli_buf;
371 struct xfs_ail *ailp = lip->li_ailp;
372 int stale = bip->bli_flags & XFS_BLI_STALE;
373 int freed;
374
375 ASSERT(bp->b_fspriv == bip);
376 ASSERT(atomic_read(&bip->bli_refcount) > 0);
377
378 trace_xfs_buf_item_unpin(bip);
379
380 freed = atomic_dec_and_test(&bip->bli_refcount);
381
382 if (atomic_dec_and_test(&bp->b_pin_count))
383 wake_up_all(&bp->b_waiters);
384
385 if (freed && stale) {
386 ASSERT(bip->bli_flags & XFS_BLI_STALE);
387 ASSERT(xfs_buf_islocked(bp));
388 ASSERT(XFS_BUF_ISSTALE(bp));
389 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
390
391 trace_xfs_buf_item_unpin_stale(bip);
392
393 if (remove) {
394
395
396
397
398
399
400
401
402 if (lip->li_desc)
403 xfs_trans_del_item(lip);
404
405
406
407
408
409 bp->b_transp = NULL;
410 }
411
412
413
414
415
416
417
418 if (bip->bli_flags & XFS_BLI_STALE_INODE) {
419 xfs_buf_do_callbacks(bp);
420 bp->b_fspriv = NULL;
421 bp->b_iodone = NULL;
422 } else {
423 spin_lock(&ailp->xa_lock);
424 xfs_trans_ail_delete(ailp, lip, SHUTDOWN_LOG_IO_ERROR);
425 xfs_buf_item_relse(bp);
426 ASSERT(bp->b_fspriv == NULL);
427 }
428 xfs_buf_relse(bp);
429 } else if (freed && remove) {
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446 xfs_buf_lock(bp);
447 xfs_buf_hold(bp);
448 bp->b_flags |= XBF_ASYNC;
449 xfs_buf_ioerror(bp, EIO);
450 XFS_BUF_UNDONE(bp);
451 xfs_buf_stale(bp);
452 xfs_buf_ioend(bp, 0);
453 }
454}
455
456STATIC uint
457xfs_buf_item_push(
458 struct xfs_log_item *lip,
459 struct list_head *buffer_list)
460{
461 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
462 struct xfs_buf *bp = bip->bli_buf;
463 uint rval = XFS_ITEM_SUCCESS;
464
465 if (xfs_buf_ispinned(bp))
466 return XFS_ITEM_PINNED;
467 if (!xfs_buf_trylock(bp)) {
468
469
470
471
472
473
474
475 if (xfs_buf_ispinned(bp))
476 return XFS_ITEM_PINNED;
477 return XFS_ITEM_LOCKED;
478 }
479
480 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
481
482 trace_xfs_buf_item_push(bip);
483
484 if (!xfs_buf_delwri_queue(bp, buffer_list))
485 rval = XFS_ITEM_FLUSHING;
486 xfs_buf_unlock(bp);
487 return rval;
488}
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509STATIC void
510xfs_buf_item_unlock(
511 struct xfs_log_item *lip)
512{
513 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
514 struct xfs_buf *bp = bip->bli_buf;
515 int aborted, clean, i;
516 uint hold;
517
518
519 bp->b_transp = NULL;
520
521
522
523
524
525
526
527 aborted = (lip->li_flags & XFS_LI_ABORTED) != 0;
528
529
530
531
532
533 hold = bip->bli_flags & XFS_BLI_HOLD;
534
535
536 bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD);
537
538
539
540
541
542
543 if (bip->bli_flags & XFS_BLI_STALE) {
544 trace_xfs_buf_item_unlock_stale(bip);
545 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
546 if (!aborted) {
547 atomic_dec(&bip->bli_refcount);
548 return;
549 }
550 }
551
552 trace_xfs_buf_item_unlock(bip);
553
554
555
556
557
558
559
560
561 clean = 1;
562 for (i = 0; i < bip->bli_format_count; i++) {
563 if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
564 bip->bli_formats[i].blf_map_size)) {
565 clean = 0;
566 break;
567 }
568 }
569 if (clean)
570 xfs_buf_item_relse(bp);
571 else if (aborted) {
572 if (atomic_dec_and_test(&bip->bli_refcount)) {
573 ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
574 xfs_buf_item_relse(bp);
575 }
576 } else
577 atomic_dec(&bip->bli_refcount);
578
579 if (!hold)
580 xfs_buf_relse(bp);
581}
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601STATIC xfs_lsn_t
602xfs_buf_item_committed(
603 struct xfs_log_item *lip,
604 xfs_lsn_t lsn)
605{
606 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
607
608 trace_xfs_buf_item_committed(bip);
609
610 if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
611 return lip->li_lsn;
612 return lsn;
613}
614
615STATIC void
616xfs_buf_item_committing(
617 struct xfs_log_item *lip,
618 xfs_lsn_t commit_lsn)
619{
620}
621
622
623
624
625static const struct xfs_item_ops xfs_buf_item_ops = {
626 .iop_size = xfs_buf_item_size,
627 .iop_format = xfs_buf_item_format,
628 .iop_pin = xfs_buf_item_pin,
629 .iop_unpin = xfs_buf_item_unpin,
630 .iop_unlock = xfs_buf_item_unlock,
631 .iop_committed = xfs_buf_item_committed,
632 .iop_push = xfs_buf_item_push,
633 .iop_committing = xfs_buf_item_committing
634};
635
636STATIC int
637xfs_buf_item_get_format(
638 struct xfs_buf_log_item *bip,
639 int count)
640{
641 ASSERT(bip->bli_formats == NULL);
642 bip->bli_format_count = count;
643
644 if (count == 1) {
645 bip->bli_formats = &bip->__bli_format;
646 return 0;
647 }
648
649 bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
650 KM_SLEEP);
651 if (!bip->bli_formats)
652 return ENOMEM;
653 return 0;
654}
655
656STATIC void
657xfs_buf_item_free_format(
658 struct xfs_buf_log_item *bip)
659{
660 if (bip->bli_formats != &bip->__bli_format) {
661 kmem_free(bip->bli_formats);
662 bip->bli_formats = NULL;
663 }
664}
665
666
667
668
669
670
671
672
673void
674xfs_buf_item_init(
675 xfs_buf_t *bp,
676 xfs_mount_t *mp)
677{
678 xfs_log_item_t *lip = bp->b_fspriv;
679 xfs_buf_log_item_t *bip;
680 int chunks;
681 int map_size;
682 int error;
683 int i;
684
685
686
687
688
689
690
691 ASSERT(bp->b_target->bt_mount == mp);
692 if (lip != NULL && lip->li_type == XFS_LI_BUF)
693 return;
694
695 bip = kmem_zone_zalloc(xfs_buf_item_zone, KM_SLEEP);
696 xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
697 bip->bli_buf = bp;
698 xfs_buf_hold(bp);
699
700
701
702
703
704
705
706
707
708
709 error = xfs_buf_item_get_format(bip, bp->b_map_count);
710 ASSERT(error == 0);
711
712 for (i = 0; i < bip->bli_format_count; i++) {
713 chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
714 XFS_BLF_CHUNK);
715 map_size = DIV_ROUND_UP(chunks, NBWORD);
716
717 bip->bli_formats[i].blf_type = XFS_LI_BUF;
718 bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn;
719 bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len;
720 bip->bli_formats[i].blf_map_size = map_size;
721 }
722
723#ifdef XFS_TRANS_DEBUG
724
725
726
727
728
729
730
731
732 bip->bli_orig = kmem_alloc(BBTOB(bp->b_length), KM_SLEEP);
733 memcpy(bip->bli_orig, bp->b_addr, BBTOB(bp->b_length));
734 bip->bli_logged = kmem_zalloc(BBTOB(bp->b_length) / NBBY, KM_SLEEP);
735#endif
736
737
738
739
740
741 if (bp->b_fspriv)
742 bip->bli_item.li_bio_list = bp->b_fspriv;
743 bp->b_fspriv = bip;
744}
745
746
747
748
749
750
751void
752xfs_buf_item_log_segment(
753 struct xfs_buf_log_item *bip,
754 uint first,
755 uint last,
756 uint *map)
757{
758 uint first_bit;
759 uint last_bit;
760 uint bits_to_set;
761 uint bits_set;
762 uint word_num;
763 uint *wordp;
764 uint bit;
765 uint end_bit;
766 uint mask;
767
768
769
770
771 first_bit = first >> XFS_BLF_SHIFT;
772 last_bit = last >> XFS_BLF_SHIFT;
773
774
775
776
777 bits_to_set = last_bit - first_bit + 1;
778
779
780
781
782
783 word_num = first_bit >> BIT_TO_WORD_SHIFT;
784 wordp = &map[word_num];
785
786
787
788
789 bit = first_bit & (uint)(NBWORD - 1);
790
791
792
793
794
795
796
797
798
799 if (bit) {
800 end_bit = MIN(bit + bits_to_set, (uint)NBWORD);
801 mask = ((1 << (end_bit - bit)) - 1) << bit;
802 *wordp |= mask;
803 wordp++;
804 bits_set = end_bit - bit;
805 } else {
806 bits_set = 0;
807 }
808
809
810
811
812
813 while ((bits_to_set - bits_set) >= NBWORD) {
814 *wordp |= 0xffffffff;
815 bits_set += NBWORD;
816 wordp++;
817 }
818
819
820
821
822 end_bit = bits_to_set - bits_set;
823 if (end_bit) {
824 mask = (1 << end_bit) - 1;
825 *wordp |= mask;
826 }
827}
828
829
830
831
832
833void
834xfs_buf_item_log(
835 xfs_buf_log_item_t *bip,
836 uint first,
837 uint last)
838{
839 int i;
840 uint start;
841 uint end;
842 struct xfs_buf *bp = bip->bli_buf;
843
844
845
846
847
848 bip->bli_flags |= XFS_BLI_DIRTY;
849
850
851
852
853 start = 0;
854 for (i = 0; i < bip->bli_format_count; i++) {
855 if (start > last)
856 break;
857 end = start + BBTOB(bp->b_maps[i].bm_len);
858 if (first > end) {
859 start += BBTOB(bp->b_maps[i].bm_len);
860 continue;
861 }
862 if (first < start)
863 first = start;
864 if (end > last)
865 end = last;
866
867 xfs_buf_item_log_segment(bip, first, end,
868 &bip->bli_formats[i].blf_data_map[0]);
869
870 start += bp->b_maps[i].bm_len;
871 }
872}
873
874
875
876
877
878
879uint
880xfs_buf_item_dirty(
881 xfs_buf_log_item_t *bip)
882{
883 return (bip->bli_flags & XFS_BLI_DIRTY);
884}
885
886STATIC void
887xfs_buf_item_free(
888 xfs_buf_log_item_t *bip)
889{
890#ifdef XFS_TRANS_DEBUG
891 kmem_free(bip->bli_orig);
892 kmem_free(bip->bli_logged);
893#endif
894
895 xfs_buf_item_free_format(bip);
896 kmem_zone_free(xfs_buf_item_zone, bip);
897}
898
899
900
901
902
903
904
905
906void
907xfs_buf_item_relse(
908 xfs_buf_t *bp)
909{
910 xfs_buf_log_item_t *bip;
911
912 trace_xfs_buf_item_relse(bp, _RET_IP_);
913
914 bip = bp->b_fspriv;
915 bp->b_fspriv = bip->bli_item.li_bio_list;
916 if (bp->b_fspriv == NULL)
917 bp->b_iodone = NULL;
918
919 xfs_buf_rele(bp);
920 xfs_buf_item_free(bip);
921}
922
923
924
925
926
927
928
929
930
931
932
933void
934xfs_buf_attach_iodone(
935 xfs_buf_t *bp,
936 void (*cb)(xfs_buf_t *, xfs_log_item_t *),
937 xfs_log_item_t *lip)
938{
939 xfs_log_item_t *head_lip;
940
941 ASSERT(xfs_buf_islocked(bp));
942
943 lip->li_cb = cb;
944 head_lip = bp->b_fspriv;
945 if (head_lip) {
946 lip->li_bio_list = head_lip->li_bio_list;
947 head_lip->li_bio_list = lip;
948 } else {
949 bp->b_fspriv = lip;
950 }
951
952 ASSERT(bp->b_iodone == NULL ||
953 bp->b_iodone == xfs_buf_iodone_callbacks);
954 bp->b_iodone = xfs_buf_iodone_callbacks;
955}
956
957
958
959
960
961
962
963
964
965
966
967
968
969STATIC void
970xfs_buf_do_callbacks(
971 struct xfs_buf *bp)
972{
973 struct xfs_log_item *lip;
974
975 while ((lip = bp->b_fspriv) != NULL) {
976 bp->b_fspriv = lip->li_bio_list;
977 ASSERT(lip->li_cb != NULL);
978
979
980
981
982
983
984 lip->li_bio_list = NULL;
985 lip->li_cb(bp, lip);
986 }
987}
988
989
990
991
992
993
994
995
996void
997xfs_buf_iodone_callbacks(
998 struct xfs_buf *bp)
999{
1000 struct xfs_log_item *lip = bp->b_fspriv;
1001 struct xfs_mount *mp = lip->li_mountp;
1002 static ulong lasttime;
1003 static xfs_buftarg_t *lasttarg;
1004
1005 if (likely(!xfs_buf_geterror(bp)))
1006 goto do_callbacks;
1007
1008
1009
1010
1011
1012 if (XFS_FORCED_SHUTDOWN(mp)) {
1013 xfs_buf_stale(bp);
1014 XFS_BUF_DONE(bp);
1015 trace_xfs_buf_item_iodone(bp, _RET_IP_);
1016 goto do_callbacks;
1017 }
1018
1019 if (bp->b_target != lasttarg ||
1020 time_after(jiffies, (lasttime + 5*HZ))) {
1021 lasttime = jiffies;
1022 xfs_buf_ioerror_alert(bp, __func__);
1023 }
1024 lasttarg = bp->b_target;
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037 if (XFS_BUF_ISASYNC(bp)) {
1038 ASSERT(bp->b_iodone != NULL);
1039
1040 trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
1041
1042 xfs_buf_ioerror(bp, 0);
1043
1044 if (!XFS_BUF_ISSTALE(bp)) {
1045 bp->b_flags |= XBF_WRITE | XBF_ASYNC | XBF_DONE;
1046 xfs_buf_iorequest(bp);
1047 } else {
1048 xfs_buf_relse(bp);
1049 }
1050
1051 return;
1052 }
1053
1054
1055
1056
1057
1058 xfs_buf_stale(bp);
1059 XFS_BUF_DONE(bp);
1060
1061 trace_xfs_buf_error_relse(bp, _RET_IP_);
1062
1063do_callbacks:
1064 xfs_buf_do_callbacks(bp);
1065 bp->b_fspriv = NULL;
1066 bp->b_iodone = NULL;
1067 xfs_buf_ioend(bp, 0);
1068}
1069
1070
1071
1072
1073
1074
1075
1076
1077void
1078xfs_buf_iodone(
1079 struct xfs_buf *bp,
1080 struct xfs_log_item *lip)
1081{
1082 struct xfs_ail *ailp = lip->li_ailp;
1083
1084 ASSERT(BUF_ITEM(lip)->bli_buf == bp);
1085
1086 xfs_buf_rele(bp);
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097 spin_lock(&ailp->xa_lock);
1098 xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
1099 xfs_buf_item_free(BUF_ITEM(lip));
1100}
1101