1
2
3
4
5
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_bit.h"
13#include "xfs_mount.h"
14#include "xfs_trans.h"
15#include "xfs_buf_item.h"
16#include "xfs_trans_priv.h"
17#include "xfs_trace.h"
18#include "xfs_log.h"
19
20
21kmem_zone_t *xfs_buf_item_zone;
22
23static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
24{
25 return container_of(lip, struct xfs_buf_log_item, bli_item);
26}
27
28STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp);
29
30static inline int
31xfs_buf_log_format_size(
32 struct xfs_buf_log_format *blfp)
33{
34 return offsetof(struct xfs_buf_log_format, blf_data_map) +
35 (blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
36}
37
38
39
40
41
42
43
44
45
46
47
48STATIC void
49xfs_buf_item_size_segment(
50 struct xfs_buf_log_item *bip,
51 struct xfs_buf_log_format *blfp,
52 int *nvecs,
53 int *nbytes)
54{
55 struct xfs_buf *bp = bip->bli_buf;
56 int next_bit;
57 int last_bit;
58
59 last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
60 if (last_bit == -1)
61 return;
62
63
64
65
66
67 *nvecs += 2;
68 *nbytes += xfs_buf_log_format_size(blfp) + XFS_BLF_CHUNK;
69
70 while (last_bit != -1) {
71
72
73
74
75
76
77 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
78 last_bit + 1);
79
80
81
82
83
84 if (next_bit == -1) {
85 break;
86 } else if (next_bit != last_bit + 1) {
87 last_bit = next_bit;
88 (*nvecs)++;
89 } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) !=
90 (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) +
91 XFS_BLF_CHUNK)) {
92 last_bit = next_bit;
93 (*nvecs)++;
94 } else {
95 last_bit++;
96 }
97 *nbytes += XFS_BLF_CHUNK;
98 }
99}
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118STATIC void
119xfs_buf_item_size(
120 struct xfs_log_item *lip,
121 int *nvecs,
122 int *nbytes)
123{
124 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
125 int i;
126
127 ASSERT(atomic_read(&bip->bli_refcount) > 0);
128 if (bip->bli_flags & XFS_BLI_STALE) {
129
130
131
132
133
134 trace_xfs_buf_item_size_stale(bip);
135 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
136 *nvecs += bip->bli_format_count;
137 for (i = 0; i < bip->bli_format_count; i++) {
138 *nbytes += xfs_buf_log_format_size(&bip->bli_formats[i]);
139 }
140 return;
141 }
142
143 ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
144
145 if (bip->bli_flags & XFS_BLI_ORDERED) {
146
147
148
149
150
151 trace_xfs_buf_item_size_ordered(bip);
152 *nvecs = XFS_LOG_VEC_ORDERED;
153 return;
154 }
155
156
157
158
159
160
161
162
163
164
165 for (i = 0; i < bip->bli_format_count; i++) {
166 xfs_buf_item_size_segment(bip, &bip->bli_formats[i],
167 nvecs, nbytes);
168 }
169 trace_xfs_buf_item_size(bip);
170}
171
172static inline void
173xfs_buf_item_copy_iovec(
174 struct xfs_log_vec *lv,
175 struct xfs_log_iovec **vecp,
176 struct xfs_buf *bp,
177 uint offset,
178 int first_bit,
179 uint nbits)
180{
181 offset += first_bit * XFS_BLF_CHUNK;
182 xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BCHUNK,
183 xfs_buf_offset(bp, offset),
184 nbits * XFS_BLF_CHUNK);
185}
186
187static inline bool
188xfs_buf_item_straddle(
189 struct xfs_buf *bp,
190 uint offset,
191 int next_bit,
192 int last_bit)
193{
194 return xfs_buf_offset(bp, offset + (next_bit << XFS_BLF_SHIFT)) !=
195 (xfs_buf_offset(bp, offset + (last_bit << XFS_BLF_SHIFT)) +
196 XFS_BLF_CHUNK);
197}
198
199static void
200xfs_buf_item_format_segment(
201 struct xfs_buf_log_item *bip,
202 struct xfs_log_vec *lv,
203 struct xfs_log_iovec **vecp,
204 uint offset,
205 struct xfs_buf_log_format *blfp)
206{
207 struct xfs_buf *bp = bip->bli_buf;
208 uint base_size;
209 int first_bit;
210 int last_bit;
211 int next_bit;
212 uint nbits;
213
214
215 blfp->blf_flags = bip->__bli_format.blf_flags;
216
217
218
219
220
221
222 base_size = xfs_buf_log_format_size(blfp);
223
224 first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
225 if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) {
226
227
228
229
230 return;
231 }
232
233 blfp = xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BFORMAT, blfp, base_size);
234 blfp->blf_size = 1;
235
236 if (bip->bli_flags & XFS_BLI_STALE) {
237
238
239
240
241
242 trace_xfs_buf_item_format_stale(bip);
243 ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
244 return;
245 }
246
247
248
249
250
251 last_bit = first_bit;
252 nbits = 1;
253 for (;;) {
254
255
256
257
258
259
260 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
261 (uint)last_bit + 1);
262
263
264
265
266
267
268
269 if (next_bit == -1) {
270 xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
271 first_bit, nbits);
272 blfp->blf_size++;
273 break;
274 } else if (next_bit != last_bit + 1 ||
275 xfs_buf_item_straddle(bp, offset, next_bit, last_bit)) {
276 xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
277 first_bit, nbits);
278 blfp->blf_size++;
279 first_bit = next_bit;
280 last_bit = next_bit;
281 nbits = 1;
282 } else {
283 last_bit++;
284 nbits++;
285 }
286 }
287}
288
289
290
291
292
293
294
295STATIC void
296xfs_buf_item_format(
297 struct xfs_log_item *lip,
298 struct xfs_log_vec *lv)
299{
300 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
301 struct xfs_buf *bp = bip->bli_buf;
302 struct xfs_log_iovec *vecp = NULL;
303 uint offset = 0;
304 int i;
305
306 ASSERT(atomic_read(&bip->bli_refcount) > 0);
307 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
308 (bip->bli_flags & XFS_BLI_STALE));
309 ASSERT((bip->bli_flags & XFS_BLI_STALE) ||
310 (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF
311 && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF));
312 ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED) ||
313 (bip->bli_flags & XFS_BLI_STALE));
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330 if (bip->bli_flags & XFS_BLI_INODE_BUF) {
331 if (xfs_sb_version_hascrc(&lip->li_mountp->m_sb) ||
332 !((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
333 xfs_log_item_in_current_chkpt(lip)))
334 bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
335 bip->bli_flags &= ~XFS_BLI_INODE_BUF;
336 }
337
338 for (i = 0; i < bip->bli_format_count; i++) {
339 xfs_buf_item_format_segment(bip, lv, &vecp, offset,
340 &bip->bli_formats[i]);
341 offset += BBTOB(bp->b_maps[i].bm_len);
342 }
343
344
345
346
347 trace_xfs_buf_item_format(bip);
348}
349
350
351
352
353
354
355
356
357
358
359STATIC void
360xfs_buf_item_pin(
361 struct xfs_log_item *lip)
362{
363 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
364
365 ASSERT(atomic_read(&bip->bli_refcount) > 0);
366 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
367 (bip->bli_flags & XFS_BLI_ORDERED) ||
368 (bip->bli_flags & XFS_BLI_STALE));
369
370 trace_xfs_buf_item_pin(bip);
371
372 atomic_inc(&bip->bli_refcount);
373 atomic_inc(&bip->bli_buf->b_pin_count);
374}
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389STATIC void
390xfs_buf_item_unpin(
391 struct xfs_log_item *lip,
392 int remove)
393{
394 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
395 xfs_buf_t *bp = bip->bli_buf;
396 struct xfs_ail *ailp = lip->li_ailp;
397 int stale = bip->bli_flags & XFS_BLI_STALE;
398 int freed;
399
400 ASSERT(bp->b_log_item == bip);
401 ASSERT(atomic_read(&bip->bli_refcount) > 0);
402
403 trace_xfs_buf_item_unpin(bip);
404
405 freed = atomic_dec_and_test(&bip->bli_refcount);
406
407 if (atomic_dec_and_test(&bp->b_pin_count))
408 wake_up_all(&bp->b_waiters);
409
410 if (freed && stale) {
411 ASSERT(bip->bli_flags & XFS_BLI_STALE);
412 ASSERT(xfs_buf_islocked(bp));
413 ASSERT(bp->b_flags & XBF_STALE);
414 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
415
416 trace_xfs_buf_item_unpin_stale(bip);
417
418 if (remove) {
419
420
421
422
423
424
425
426
427 if (!list_empty(&lip->li_trans))
428 xfs_trans_del_item(lip);
429
430
431
432
433
434 bp->b_transp = NULL;
435 }
436
437
438
439
440
441
442
443 if (bip->bli_flags & XFS_BLI_STALE_INODE) {
444 xfs_buf_do_callbacks(bp);
445 bp->b_log_item = NULL;
446 list_del_init(&bp->b_li_list);
447 bp->b_iodone = NULL;
448 } else {
449 spin_lock(&ailp->ail_lock);
450 xfs_trans_ail_delete(ailp, lip, SHUTDOWN_LOG_IO_ERROR);
451 xfs_buf_item_relse(bp);
452 ASSERT(bp->b_log_item == NULL);
453 }
454 xfs_buf_relse(bp);
455 } else if (freed && remove) {
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472 xfs_buf_lock(bp);
473 xfs_buf_hold(bp);
474 bp->b_flags |= XBF_ASYNC;
475 xfs_buf_ioerror(bp, -EIO);
476 bp->b_flags &= ~XBF_DONE;
477 xfs_buf_stale(bp);
478 xfs_buf_ioend(bp);
479 }
480}
481
482
483
484
485
486
487
488static DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state, 30 * HZ, 10);
489
490STATIC uint
491xfs_buf_item_push(
492 struct xfs_log_item *lip,
493 struct list_head *buffer_list)
494{
495 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
496 struct xfs_buf *bp = bip->bli_buf;
497 uint rval = XFS_ITEM_SUCCESS;
498
499 if (xfs_buf_ispinned(bp))
500 return XFS_ITEM_PINNED;
501 if (!xfs_buf_trylock(bp)) {
502
503
504
505
506
507
508
509 if (xfs_buf_ispinned(bp))
510 return XFS_ITEM_PINNED;
511 return XFS_ITEM_LOCKED;
512 }
513
514 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
515
516 trace_xfs_buf_item_push(bip);
517
518
519 if ((bp->b_flags & XBF_WRITE_FAIL) &&
520 ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS: Failing async write")) {
521 xfs_warn(bp->b_mount,
522"Failing async write on buffer block 0x%llx. Retrying async write.",
523 (long long)bp->b_bn);
524 }
525
526 if (!xfs_buf_delwri_queue(bp, buffer_list))
527 rval = XFS_ITEM_FLUSHING;
528 xfs_buf_unlock(bp);
529 return rval;
530}
531
532
533
534
535
536
537
538
539bool
540xfs_buf_item_put(
541 struct xfs_buf_log_item *bip)
542{
543 struct xfs_log_item *lip = &bip->bli_item;
544 bool aborted;
545 bool dirty;
546
547
548 if (!atomic_dec_and_test(&bip->bli_refcount))
549 return false;
550
551
552
553
554
555
556
557 aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags) ||
558 XFS_FORCED_SHUTDOWN(lip->li_mountp);
559 dirty = bip->bli_flags & XFS_BLI_DIRTY;
560 if (dirty && !aborted)
561 return false;
562
563
564
565
566
567
568
569 if (aborted)
570 xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR);
571 xfs_buf_item_relse(bip->bli_buf);
572 return true;
573}
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594STATIC void
595xfs_buf_item_release(
596 struct xfs_log_item *lip)
597{
598 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
599 struct xfs_buf *bp = bip->bli_buf;
600 bool released;
601 bool hold = bip->bli_flags & XFS_BLI_HOLD;
602 bool stale = bip->bli_flags & XFS_BLI_STALE;
603#if defined(DEBUG) || defined(XFS_WARN)
604 bool ordered = bip->bli_flags & XFS_BLI_ORDERED;
605 bool dirty = bip->bli_flags & XFS_BLI_DIRTY;
606 bool aborted = test_bit(XFS_LI_ABORTED,
607 &lip->li_flags);
608#endif
609
610 trace_xfs_buf_item_release(bip);
611
612
613
614
615
616 ASSERT((!ordered && dirty == xfs_buf_item_dirty_format(bip)) ||
617 (ordered && dirty && !xfs_buf_item_dirty_format(bip)));
618 ASSERT(!stale || (bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
619
620
621
622
623
624 bp->b_transp = NULL;
625 bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
626
627
628
629
630
631
632
633 released = xfs_buf_item_put(bip);
634 if (hold || (stale && !released))
635 return;
636 ASSERT(!stale || aborted);
637 xfs_buf_relse(bp);
638}
639
640STATIC void
641xfs_buf_item_committing(
642 struct xfs_log_item *lip,
643 xfs_lsn_t commit_lsn)
644{
645 return xfs_buf_item_release(lip);
646}
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666STATIC xfs_lsn_t
667xfs_buf_item_committed(
668 struct xfs_log_item *lip,
669 xfs_lsn_t lsn)
670{
671 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
672
673 trace_xfs_buf_item_committed(bip);
674
675 if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
676 return lip->li_lsn;
677 return lsn;
678}
679
680static const struct xfs_item_ops xfs_buf_item_ops = {
681 .iop_size = xfs_buf_item_size,
682 .iop_format = xfs_buf_item_format,
683 .iop_pin = xfs_buf_item_pin,
684 .iop_unpin = xfs_buf_item_unpin,
685 .iop_release = xfs_buf_item_release,
686 .iop_committing = xfs_buf_item_committing,
687 .iop_committed = xfs_buf_item_committed,
688 .iop_push = xfs_buf_item_push,
689};
690
691STATIC int
692xfs_buf_item_get_format(
693 struct xfs_buf_log_item *bip,
694 int count)
695{
696 ASSERT(bip->bli_formats == NULL);
697 bip->bli_format_count = count;
698
699 if (count == 1) {
700 bip->bli_formats = &bip->__bli_format;
701 return 0;
702 }
703
704 bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
705 0);
706 if (!bip->bli_formats)
707 return -ENOMEM;
708 return 0;
709}
710
711STATIC void
712xfs_buf_item_free_format(
713 struct xfs_buf_log_item *bip)
714{
715 if (bip->bli_formats != &bip->__bli_format) {
716 kmem_free(bip->bli_formats);
717 bip->bli_formats = NULL;
718 }
719}
720
721
722
723
724
725
726int
727xfs_buf_item_init(
728 struct xfs_buf *bp,
729 struct xfs_mount *mp)
730{
731 struct xfs_buf_log_item *bip = bp->b_log_item;
732 int chunks;
733 int map_size;
734 int error;
735 int i;
736
737
738
739
740
741
742 ASSERT(bp->b_mount == mp);
743 if (bip) {
744 ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
745 ASSERT(!bp->b_transp);
746 ASSERT(bip->bli_buf == bp);
747 return 0;
748 }
749
750 bip = kmem_zone_zalloc(xfs_buf_item_zone, 0);
751 xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
752 bip->bli_buf = bp;
753
754
755
756
757
758
759
760
761
762
763 error = xfs_buf_item_get_format(bip, bp->b_map_count);
764 ASSERT(error == 0);
765 if (error) {
766 kmem_zone_free(xfs_buf_item_zone, bip);
767 return error;
768 }
769
770
771 for (i = 0; i < bip->bli_format_count; i++) {
772 chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
773 XFS_BLF_CHUNK);
774 map_size = DIV_ROUND_UP(chunks, NBWORD);
775
776 bip->bli_formats[i].blf_type = XFS_LI_BUF;
777 bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn;
778 bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len;
779 bip->bli_formats[i].blf_map_size = map_size;
780 }
781
782 bp->b_log_item = bip;
783 xfs_buf_hold(bp);
784 return 0;
785}
786
787
788
789
790
791
792static void
793xfs_buf_item_log_segment(
794 uint first,
795 uint last,
796 uint *map)
797{
798 uint first_bit;
799 uint last_bit;
800 uint bits_to_set;
801 uint bits_set;
802 uint word_num;
803 uint *wordp;
804 uint bit;
805 uint end_bit;
806 uint mask;
807
808
809
810
811 first_bit = first >> XFS_BLF_SHIFT;
812 last_bit = last >> XFS_BLF_SHIFT;
813
814
815
816
817 bits_to_set = last_bit - first_bit + 1;
818
819
820
821
822
823 word_num = first_bit >> BIT_TO_WORD_SHIFT;
824 wordp = &map[word_num];
825
826
827
828
829 bit = first_bit & (uint)(NBWORD - 1);
830
831
832
833
834
835
836
837
838
839 if (bit) {
840 end_bit = min(bit + bits_to_set, (uint)NBWORD);
841 mask = ((1U << (end_bit - bit)) - 1) << bit;
842 *wordp |= mask;
843 wordp++;
844 bits_set = end_bit - bit;
845 } else {
846 bits_set = 0;
847 }
848
849
850
851
852
853 while ((bits_to_set - bits_set) >= NBWORD) {
854 *wordp |= 0xffffffff;
855 bits_set += NBWORD;
856 wordp++;
857 }
858
859
860
861
862 end_bit = bits_to_set - bits_set;
863 if (end_bit) {
864 mask = (1U << end_bit) - 1;
865 *wordp |= mask;
866 }
867}
868
869
870
871
872
873void
874xfs_buf_item_log(
875 struct xfs_buf_log_item *bip,
876 uint first,
877 uint last)
878{
879 int i;
880 uint start;
881 uint end;
882 struct xfs_buf *bp = bip->bli_buf;
883
884
885
886
887 start = 0;
888 for (i = 0; i < bip->bli_format_count; i++) {
889 if (start > last)
890 break;
891 end = start + BBTOB(bp->b_maps[i].bm_len) - 1;
892
893
894 if (first > end) {
895 start += BBTOB(bp->b_maps[i].bm_len);
896 continue;
897 }
898
899
900
901
902
903
904
905 if (first < start)
906 first = start;
907 if (end > last)
908 end = last;
909 xfs_buf_item_log_segment(first - start, end - start,
910 &bip->bli_formats[i].blf_data_map[0]);
911
912 start += BBTOB(bp->b_maps[i].bm_len);
913 }
914}
915
916
917
918
919
920
921bool
922xfs_buf_item_dirty_format(
923 struct xfs_buf_log_item *bip)
924{
925 int i;
926
927 for (i = 0; i < bip->bli_format_count; i++) {
928 if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
929 bip->bli_formats[i].blf_map_size))
930 return true;
931 }
932
933 return false;
934}
935
936STATIC void
937xfs_buf_item_free(
938 struct xfs_buf_log_item *bip)
939{
940 xfs_buf_item_free_format(bip);
941 kmem_free(bip->bli_item.li_lv_shadow);
942 kmem_zone_free(xfs_buf_item_zone, bip);
943}
944
945
946
947
948
949
950
951
952void
953xfs_buf_item_relse(
954 xfs_buf_t *bp)
955{
956 struct xfs_buf_log_item *bip = bp->b_log_item;
957
958 trace_xfs_buf_item_relse(bp, _RET_IP_);
959 ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL));
960
961 bp->b_log_item = NULL;
962 if (list_empty(&bp->b_li_list))
963 bp->b_iodone = NULL;
964
965 xfs_buf_rele(bp);
966 xfs_buf_item_free(bip);
967}
968
969
970
971
972
973
974
975
976
977void
978xfs_buf_attach_iodone(
979 struct xfs_buf *bp,
980 void (*cb)(struct xfs_buf *, struct xfs_log_item *),
981 struct xfs_log_item *lip)
982{
983 ASSERT(xfs_buf_islocked(bp));
984
985 lip->li_cb = cb;
986 list_add_tail(&lip->li_bio_list, &bp->b_li_list);
987
988 ASSERT(bp->b_iodone == NULL ||
989 bp->b_iodone == xfs_buf_iodone_callbacks);
990 bp->b_iodone = xfs_buf_iodone_callbacks;
991}
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005STATIC void
1006xfs_buf_do_callbacks(
1007 struct xfs_buf *bp)
1008{
1009 struct xfs_buf_log_item *blip = bp->b_log_item;
1010 struct xfs_log_item *lip;
1011
1012
1013 if (blip) {
1014 lip = &blip->bli_item;
1015 lip->li_cb(bp, lip);
1016 }
1017
1018 while (!list_empty(&bp->b_li_list)) {
1019 lip = list_first_entry(&bp->b_li_list, struct xfs_log_item,
1020 li_bio_list);
1021
1022
1023
1024
1025
1026
1027
1028 list_del_init(&lip->li_bio_list);
1029 lip->li_cb(bp, lip);
1030 }
1031}
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041STATIC void
1042xfs_buf_do_callbacks_fail(
1043 struct xfs_buf *bp)
1044{
1045 struct xfs_log_item *lip;
1046 struct xfs_ail *ailp;
1047
1048
1049
1050
1051
1052
1053 if (list_empty(&bp->b_li_list))
1054 return;
1055
1056 lip = list_first_entry(&bp->b_li_list, struct xfs_log_item,
1057 li_bio_list);
1058 ailp = lip->li_ailp;
1059 spin_lock(&ailp->ail_lock);
1060 list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
1061 if (lip->li_ops->iop_error)
1062 lip->li_ops->iop_error(lip, bp);
1063 }
1064 spin_unlock(&ailp->ail_lock);
1065}
1066
1067static bool
1068xfs_buf_iodone_callback_error(
1069 struct xfs_buf *bp)
1070{
1071 struct xfs_buf_log_item *bip = bp->b_log_item;
1072 struct xfs_log_item *lip;
1073 struct xfs_mount *mp;
1074 static ulong lasttime;
1075 static xfs_buftarg_t *lasttarg;
1076 struct xfs_error_cfg *cfg;
1077
1078
1079
1080
1081
1082
1083 lip = list_first_entry_or_null(&bp->b_li_list, struct xfs_log_item,
1084 li_bio_list);
1085 mp = lip ? lip->li_mountp : bip->bli_item.li_mountp;
1086
1087
1088
1089
1090
1091 if (XFS_FORCED_SHUTDOWN(mp))
1092 goto out_stale;
1093
1094 if (bp->b_target != lasttarg ||
1095 time_after(jiffies, (lasttime + 5*HZ))) {
1096 lasttime = jiffies;
1097 xfs_buf_ioerror_alert(bp, __func__);
1098 }
1099 lasttarg = bp->b_target;
1100
1101
1102 if (!(bp->b_flags & XBF_ASYNC))
1103 goto out_stale;
1104
1105 trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
1106 ASSERT(bp->b_iodone != NULL);
1107
1108 cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error);
1109
1110
1111
1112
1113
1114
1115
1116
1117 if (!(bp->b_flags & (XBF_STALE | XBF_WRITE_FAIL)) ||
1118 bp->b_last_error != bp->b_error) {
1119 bp->b_flags |= (XBF_WRITE | XBF_DONE | XBF_WRITE_FAIL);
1120 bp->b_last_error = bp->b_error;
1121 if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
1122 !bp->b_first_retry_time)
1123 bp->b_first_retry_time = jiffies;
1124
1125 xfs_buf_ioerror(bp, 0);
1126 xfs_buf_submit(bp);
1127 return true;
1128 }
1129
1130
1131
1132
1133
1134
1135 if (cfg->max_retries != XFS_ERR_RETRY_FOREVER &&
1136 ++bp->b_retries > cfg->max_retries)
1137 goto permanent_error;
1138 if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
1139 time_after(jiffies, cfg->retry_timeout + bp->b_first_retry_time))
1140 goto permanent_error;
1141
1142
1143 if ((mp->m_flags & XFS_MOUNT_UNMOUNTING) && mp->m_fail_unmount)
1144 goto permanent_error;
1145
1146
1147
1148
1149
1150 xfs_buf_do_callbacks_fail(bp);
1151 xfs_buf_ioerror(bp, 0);
1152 xfs_buf_relse(bp);
1153 return true;
1154
1155
1156
1157
1158
1159permanent_error:
1160 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1161out_stale:
1162 xfs_buf_stale(bp);
1163 bp->b_flags |= XBF_DONE;
1164 trace_xfs_buf_error_relse(bp, _RET_IP_);
1165 return false;
1166}
1167
1168
1169
1170
1171
1172
1173
1174void
1175xfs_buf_iodone_callbacks(
1176 struct xfs_buf *bp)
1177{
1178
1179
1180
1181
1182
1183 if (bp->b_error && xfs_buf_iodone_callback_error(bp))
1184 return;
1185
1186
1187
1188
1189
1190 bp->b_last_error = 0;
1191 bp->b_retries = 0;
1192 bp->b_first_retry_time = 0;
1193
1194 xfs_buf_do_callbacks(bp);
1195 bp->b_log_item = NULL;
1196 list_del_init(&bp->b_li_list);
1197 bp->b_iodone = NULL;
1198 xfs_buf_ioend(bp);
1199}
1200
1201
1202
1203
1204
1205
1206
1207
1208void
1209xfs_buf_iodone(
1210 struct xfs_buf *bp,
1211 struct xfs_log_item *lip)
1212{
1213 struct xfs_ail *ailp = lip->li_ailp;
1214
1215 ASSERT(BUF_ITEM(lip)->bli_buf == bp);
1216
1217 xfs_buf_rele(bp);
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228 spin_lock(&ailp->ail_lock);
1229 xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
1230 xfs_buf_item_free(BUF_ITEM(lip));
1231}
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252bool
1253xfs_buf_resubmit_failed_buffers(
1254 struct xfs_buf *bp,
1255 struct list_head *buffer_list)
1256{
1257 struct xfs_log_item *lip;
1258 bool ret;
1259
1260 ret = xfs_buf_delwri_queue(bp, buffer_list);
1261
1262
1263
1264
1265
1266 list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
1267 xfs_clear_li_failed(lip);
1268
1269 return ret;
1270}
1271