1
2
3
4
5
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_format.h"
9#include "xfs_log_format.h"
10#include "xfs_trans_resv.h"
11#include "xfs_bit.h"
12#include "xfs_sb.h"
13#include "xfs_mount.h"
14#include "xfs_trans.h"
15#include "xfs_buf_item.h"
16#include "xfs_trans_priv.h"
17#include "xfs_error.h"
18#include "xfs_trace.h"
19#include "xfs_log.h"
20#include "xfs_inode.h"
21
22
23kmem_zone_t *xfs_buf_item_zone;
24
25static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
26{
27 return container_of(lip, struct xfs_buf_log_item, bli_item);
28}
29
30STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp);
31
32static inline int
33xfs_buf_log_format_size(
34 struct xfs_buf_log_format *blfp)
35{
36 return offsetof(struct xfs_buf_log_format, blf_data_map) +
37 (blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
38}
39
40
41
42
43
44
45
46
47
48
49
50STATIC void
51xfs_buf_item_size_segment(
52 struct xfs_buf_log_item *bip,
53 struct xfs_buf_log_format *blfp,
54 int *nvecs,
55 int *nbytes)
56{
57 struct xfs_buf *bp = bip->bli_buf;
58 int next_bit;
59 int last_bit;
60
61 last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
62 if (last_bit == -1)
63 return;
64
65
66
67
68
69 *nvecs += 2;
70 *nbytes += xfs_buf_log_format_size(blfp) + XFS_BLF_CHUNK;
71
72 while (last_bit != -1) {
73
74
75
76
77
78
79 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
80 last_bit + 1);
81
82
83
84
85
86 if (next_bit == -1) {
87 break;
88 } else if (next_bit != last_bit + 1) {
89 last_bit = next_bit;
90 (*nvecs)++;
91 } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) !=
92 (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) +
93 XFS_BLF_CHUNK)) {
94 last_bit = next_bit;
95 (*nvecs)++;
96 } else {
97 last_bit++;
98 }
99 *nbytes += XFS_BLF_CHUNK;
100 }
101}
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120STATIC void
121xfs_buf_item_size(
122 struct xfs_log_item *lip,
123 int *nvecs,
124 int *nbytes)
125{
126 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
127 int i;
128
129 ASSERT(atomic_read(&bip->bli_refcount) > 0);
130 if (bip->bli_flags & XFS_BLI_STALE) {
131
132
133
134
135
136 trace_xfs_buf_item_size_stale(bip);
137 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
138 *nvecs += bip->bli_format_count;
139 for (i = 0; i < bip->bli_format_count; i++) {
140 *nbytes += xfs_buf_log_format_size(&bip->bli_formats[i]);
141 }
142 return;
143 }
144
145 ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
146
147 if (bip->bli_flags & XFS_BLI_ORDERED) {
148
149
150
151
152
153 trace_xfs_buf_item_size_ordered(bip);
154 *nvecs = XFS_LOG_VEC_ORDERED;
155 return;
156 }
157
158
159
160
161
162
163
164
165
166
167 for (i = 0; i < bip->bli_format_count; i++) {
168 xfs_buf_item_size_segment(bip, &bip->bli_formats[i],
169 nvecs, nbytes);
170 }
171 trace_xfs_buf_item_size(bip);
172}
173
174static inline void
175xfs_buf_item_copy_iovec(
176 struct xfs_log_vec *lv,
177 struct xfs_log_iovec **vecp,
178 struct xfs_buf *bp,
179 uint offset,
180 int first_bit,
181 uint nbits)
182{
183 offset += first_bit * XFS_BLF_CHUNK;
184 xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BCHUNK,
185 xfs_buf_offset(bp, offset),
186 nbits * XFS_BLF_CHUNK);
187}
188
189static inline bool
190xfs_buf_item_straddle(
191 struct xfs_buf *bp,
192 uint offset,
193 int next_bit,
194 int last_bit)
195{
196 return xfs_buf_offset(bp, offset + (next_bit << XFS_BLF_SHIFT)) !=
197 (xfs_buf_offset(bp, offset + (last_bit << XFS_BLF_SHIFT)) +
198 XFS_BLF_CHUNK);
199}
200
201static void
202xfs_buf_item_format_segment(
203 struct xfs_buf_log_item *bip,
204 struct xfs_log_vec *lv,
205 struct xfs_log_iovec **vecp,
206 uint offset,
207 struct xfs_buf_log_format *blfp)
208{
209 struct xfs_buf *bp = bip->bli_buf;
210 uint base_size;
211 int first_bit;
212 int last_bit;
213 int next_bit;
214 uint nbits;
215
216
217 blfp->blf_flags = bip->__bli_format.blf_flags;
218
219
220
221
222
223
224 base_size = xfs_buf_log_format_size(blfp);
225
226 first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
227 if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) {
228
229
230
231
232 return;
233 }
234
235 blfp = xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BFORMAT, blfp, base_size);
236 blfp->blf_size = 1;
237
238 if (bip->bli_flags & XFS_BLI_STALE) {
239
240
241
242
243
244 trace_xfs_buf_item_format_stale(bip);
245 ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
246 return;
247 }
248
249
250
251
252
253 last_bit = first_bit;
254 nbits = 1;
255 for (;;) {
256
257
258
259
260
261
262 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
263 (uint)last_bit + 1);
264
265
266
267
268
269
270
271 if (next_bit == -1) {
272 xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
273 first_bit, nbits);
274 blfp->blf_size++;
275 break;
276 } else if (next_bit != last_bit + 1 ||
277 xfs_buf_item_straddle(bp, offset, next_bit, last_bit)) {
278 xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
279 first_bit, nbits);
280 blfp->blf_size++;
281 first_bit = next_bit;
282 last_bit = next_bit;
283 nbits = 1;
284 } else {
285 last_bit++;
286 nbits++;
287 }
288 }
289}
290
291
292
293
294
295
296
297STATIC void
298xfs_buf_item_format(
299 struct xfs_log_item *lip,
300 struct xfs_log_vec *lv)
301{
302 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
303 struct xfs_buf *bp = bip->bli_buf;
304 struct xfs_log_iovec *vecp = NULL;
305 uint offset = 0;
306 int i;
307
308 ASSERT(atomic_read(&bip->bli_refcount) > 0);
309 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
310 (bip->bli_flags & XFS_BLI_STALE));
311 ASSERT((bip->bli_flags & XFS_BLI_STALE) ||
312 (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF
313 && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF));
314 ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED) ||
315 (bip->bli_flags & XFS_BLI_STALE));
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332 if (bip->bli_flags & XFS_BLI_INODE_BUF) {
333 if (xfs_sb_version_hascrc(&lip->li_mountp->m_sb) ||
334 !((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
335 xfs_log_item_in_current_chkpt(lip)))
336 bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
337 bip->bli_flags &= ~XFS_BLI_INODE_BUF;
338 }
339
340 for (i = 0; i < bip->bli_format_count; i++) {
341 xfs_buf_item_format_segment(bip, lv, &vecp, offset,
342 &bip->bli_formats[i]);
343 offset += BBTOB(bp->b_maps[i].bm_len);
344 }
345
346
347
348
349 trace_xfs_buf_item_format(bip);
350}
351
352
353
354
355
356
357
358
359
360
361STATIC void
362xfs_buf_item_pin(
363 struct xfs_log_item *lip)
364{
365 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
366
367 ASSERT(atomic_read(&bip->bli_refcount) > 0);
368 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
369 (bip->bli_flags & XFS_BLI_ORDERED) ||
370 (bip->bli_flags & XFS_BLI_STALE));
371
372 trace_xfs_buf_item_pin(bip);
373
374 atomic_inc(&bip->bli_refcount);
375 atomic_inc(&bip->bli_buf->b_pin_count);
376}
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391STATIC void
392xfs_buf_item_unpin(
393 struct xfs_log_item *lip,
394 int remove)
395{
396 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
397 xfs_buf_t *bp = bip->bli_buf;
398 struct xfs_ail *ailp = lip->li_ailp;
399 int stale = bip->bli_flags & XFS_BLI_STALE;
400 int freed;
401
402 ASSERT(bp->b_log_item == bip);
403 ASSERT(atomic_read(&bip->bli_refcount) > 0);
404
405 trace_xfs_buf_item_unpin(bip);
406
407 freed = atomic_dec_and_test(&bip->bli_refcount);
408
409 if (atomic_dec_and_test(&bp->b_pin_count))
410 wake_up_all(&bp->b_waiters);
411
412 if (freed && stale) {
413 ASSERT(bip->bli_flags & XFS_BLI_STALE);
414 ASSERT(xfs_buf_islocked(bp));
415 ASSERT(bp->b_flags & XBF_STALE);
416 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
417
418 trace_xfs_buf_item_unpin_stale(bip);
419
420 if (remove) {
421
422
423
424
425
426
427
428
429 if (!list_empty(&lip->li_trans))
430 xfs_trans_del_item(lip);
431
432
433
434
435
436 bp->b_transp = NULL;
437 }
438
439
440
441
442
443
444
445 if (bip->bli_flags & XFS_BLI_STALE_INODE) {
446 xfs_buf_do_callbacks(bp);
447 bp->b_log_item = NULL;
448 list_del_init(&bp->b_li_list);
449 bp->b_iodone = NULL;
450 } else {
451 spin_lock(&ailp->ail_lock);
452 xfs_trans_ail_delete(ailp, lip, SHUTDOWN_LOG_IO_ERROR);
453 xfs_buf_item_relse(bp);
454 ASSERT(bp->b_log_item == NULL);
455 }
456 xfs_buf_relse(bp);
457 } else if (freed && remove) {
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474 xfs_buf_lock(bp);
475 xfs_buf_hold(bp);
476 bp->b_flags |= XBF_ASYNC;
477 xfs_buf_ioerror(bp, -EIO);
478 bp->b_flags &= ~XBF_DONE;
479 xfs_buf_stale(bp);
480 xfs_buf_ioend(bp);
481 }
482}
483
484
485
486
487
488
489
490static DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state, 30 * HZ, 10);
491
492STATIC uint
493xfs_buf_item_push(
494 struct xfs_log_item *lip,
495 struct list_head *buffer_list)
496{
497 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
498 struct xfs_buf *bp = bip->bli_buf;
499 uint rval = XFS_ITEM_SUCCESS;
500
501 if (xfs_buf_ispinned(bp))
502 return XFS_ITEM_PINNED;
503 if (!xfs_buf_trylock(bp)) {
504
505
506
507
508
509
510
511 if (xfs_buf_ispinned(bp))
512 return XFS_ITEM_PINNED;
513 return XFS_ITEM_LOCKED;
514 }
515
516 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
517
518 trace_xfs_buf_item_push(bip);
519
520
521 if ((bp->b_flags & XBF_WRITE_FAIL) &&
522 ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS: Failing async write")) {
523 xfs_warn(bp->b_target->bt_mount,
524"Failing async write on buffer block 0x%llx. Retrying async write.",
525 (long long)bp->b_bn);
526 }
527
528 if (!xfs_buf_delwri_queue(bp, buffer_list))
529 rval = XFS_ITEM_FLUSHING;
530 xfs_buf_unlock(bp);
531 return rval;
532}
533
534
535
536
537
538
539
540
541bool
542xfs_buf_item_put(
543 struct xfs_buf_log_item *bip)
544{
545 struct xfs_log_item *lip = &bip->bli_item;
546 bool aborted;
547 bool dirty;
548
549
550 if (!atomic_dec_and_test(&bip->bli_refcount))
551 return false;
552
553
554
555
556
557
558
559 aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags) ||
560 XFS_FORCED_SHUTDOWN(lip->li_mountp);
561 dirty = bip->bli_flags & XFS_BLI_DIRTY;
562 if (dirty && !aborted)
563 return false;
564
565
566
567
568
569
570
571 if (aborted)
572 xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR);
573 xfs_buf_item_relse(bip->bli_buf);
574 return true;
575}
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596STATIC void
597xfs_buf_item_unlock(
598 struct xfs_log_item *lip)
599{
600 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
601 struct xfs_buf *bp = bip->bli_buf;
602 bool released;
603 bool hold = bip->bli_flags & XFS_BLI_HOLD;
604 bool stale = bip->bli_flags & XFS_BLI_STALE;
605#if defined(DEBUG) || defined(XFS_WARN)
606 bool ordered = bip->bli_flags & XFS_BLI_ORDERED;
607 bool dirty = bip->bli_flags & XFS_BLI_DIRTY;
608 bool aborted = test_bit(XFS_LI_ABORTED,
609 &lip->li_flags);
610#endif
611
612 trace_xfs_buf_item_unlock(bip);
613
614
615
616
617
618 ASSERT((!ordered && dirty == xfs_buf_item_dirty_format(bip)) ||
619 (ordered && dirty && !xfs_buf_item_dirty_format(bip)));
620 ASSERT(!stale || (bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
621
622
623
624
625
626 bp->b_transp = NULL;
627 bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
628
629
630
631
632
633
634
635 released = xfs_buf_item_put(bip);
636 if (hold || (stale && !released))
637 return;
638 ASSERT(!stale || aborted);
639 xfs_buf_relse(bp);
640}
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660STATIC xfs_lsn_t
661xfs_buf_item_committed(
662 struct xfs_log_item *lip,
663 xfs_lsn_t lsn)
664{
665 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
666
667 trace_xfs_buf_item_committed(bip);
668
669 if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
670 return lip->li_lsn;
671 return lsn;
672}
673
674STATIC void
675xfs_buf_item_committing(
676 struct xfs_log_item *lip,
677 xfs_lsn_t commit_lsn)
678{
679}
680
681
682
683
684static const struct xfs_item_ops xfs_buf_item_ops = {
685 .iop_size = xfs_buf_item_size,
686 .iop_format = xfs_buf_item_format,
687 .iop_pin = xfs_buf_item_pin,
688 .iop_unpin = xfs_buf_item_unpin,
689 .iop_unlock = xfs_buf_item_unlock,
690 .iop_committed = xfs_buf_item_committed,
691 .iop_push = xfs_buf_item_push,
692 .iop_committing = xfs_buf_item_committing
693};
694
695STATIC int
696xfs_buf_item_get_format(
697 struct xfs_buf_log_item *bip,
698 int count)
699{
700 ASSERT(bip->bli_formats == NULL);
701 bip->bli_format_count = count;
702
703 if (count == 1) {
704 bip->bli_formats = &bip->__bli_format;
705 return 0;
706 }
707
708 bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
709 KM_SLEEP);
710 if (!bip->bli_formats)
711 return -ENOMEM;
712 return 0;
713}
714
715STATIC void
716xfs_buf_item_free_format(
717 struct xfs_buf_log_item *bip)
718{
719 if (bip->bli_formats != &bip->__bli_format) {
720 kmem_free(bip->bli_formats);
721 bip->bli_formats = NULL;
722 }
723}
724
725
726
727
728
729
730int
731xfs_buf_item_init(
732 struct xfs_buf *bp,
733 struct xfs_mount *mp)
734{
735 struct xfs_buf_log_item *bip = bp->b_log_item;
736 int chunks;
737 int map_size;
738 int error;
739 int i;
740
741
742
743
744
745
746 ASSERT(bp->b_target->bt_mount == mp);
747 if (bip) {
748 ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
749 ASSERT(!bp->b_transp);
750 ASSERT(bip->bli_buf == bp);
751 return 0;
752 }
753
754 bip = kmem_zone_zalloc(xfs_buf_item_zone, KM_SLEEP);
755 xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
756 bip->bli_buf = bp;
757
758
759
760
761
762
763
764
765
766
767 error = xfs_buf_item_get_format(bip, bp->b_map_count);
768 ASSERT(error == 0);
769 if (error) {
770 kmem_zone_free(xfs_buf_item_zone, bip);
771 return error;
772 }
773
774
775 for (i = 0; i < bip->bli_format_count; i++) {
776 chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
777 XFS_BLF_CHUNK);
778 map_size = DIV_ROUND_UP(chunks, NBWORD);
779
780 bip->bli_formats[i].blf_type = XFS_LI_BUF;
781 bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn;
782 bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len;
783 bip->bli_formats[i].blf_map_size = map_size;
784 }
785
786 bp->b_log_item = bip;
787 xfs_buf_hold(bp);
788 return 0;
789}
790
791
792
793
794
795
796static void
797xfs_buf_item_log_segment(
798 uint first,
799 uint last,
800 uint *map)
801{
802 uint first_bit;
803 uint last_bit;
804 uint bits_to_set;
805 uint bits_set;
806 uint word_num;
807 uint *wordp;
808 uint bit;
809 uint end_bit;
810 uint mask;
811
812
813
814
815 first_bit = first >> XFS_BLF_SHIFT;
816 last_bit = last >> XFS_BLF_SHIFT;
817
818
819
820
821 bits_to_set = last_bit - first_bit + 1;
822
823
824
825
826
827 word_num = first_bit >> BIT_TO_WORD_SHIFT;
828 wordp = &map[word_num];
829
830
831
832
833 bit = first_bit & (uint)(NBWORD - 1);
834
835
836
837
838
839
840
841
842
843 if (bit) {
844 end_bit = min(bit + bits_to_set, (uint)NBWORD);
845 mask = ((1U << (end_bit - bit)) - 1) << bit;
846 *wordp |= mask;
847 wordp++;
848 bits_set = end_bit - bit;
849 } else {
850 bits_set = 0;
851 }
852
853
854
855
856
857 while ((bits_to_set - bits_set) >= NBWORD) {
858 *wordp |= 0xffffffff;
859 bits_set += NBWORD;
860 wordp++;
861 }
862
863
864
865
866 end_bit = bits_to_set - bits_set;
867 if (end_bit) {
868 mask = (1U << end_bit) - 1;
869 *wordp |= mask;
870 }
871}
872
873
874
875
876
877void
878xfs_buf_item_log(
879 struct xfs_buf_log_item *bip,
880 uint first,
881 uint last)
882{
883 int i;
884 uint start;
885 uint end;
886 struct xfs_buf *bp = bip->bli_buf;
887
888
889
890
891 start = 0;
892 for (i = 0; i < bip->bli_format_count; i++) {
893 if (start > last)
894 break;
895 end = start + BBTOB(bp->b_maps[i].bm_len) - 1;
896
897
898 if (first > end) {
899 start += BBTOB(bp->b_maps[i].bm_len);
900 continue;
901 }
902
903
904
905
906
907
908
909 if (first < start)
910 first = start;
911 if (end > last)
912 end = last;
913 xfs_buf_item_log_segment(first - start, end - start,
914 &bip->bli_formats[i].blf_data_map[0]);
915
916 start += BBTOB(bp->b_maps[i].bm_len);
917 }
918}
919
920
921
922
923
924
925bool
926xfs_buf_item_dirty_format(
927 struct xfs_buf_log_item *bip)
928{
929 int i;
930
931 for (i = 0; i < bip->bli_format_count; i++) {
932 if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
933 bip->bli_formats[i].blf_map_size))
934 return true;
935 }
936
937 return false;
938}
939
940STATIC void
941xfs_buf_item_free(
942 struct xfs_buf_log_item *bip)
943{
944 xfs_buf_item_free_format(bip);
945 kmem_free(bip->bli_item.li_lv_shadow);
946 kmem_zone_free(xfs_buf_item_zone, bip);
947}
948
949
950
951
952
953
954
955
956void
957xfs_buf_item_relse(
958 xfs_buf_t *bp)
959{
960 struct xfs_buf_log_item *bip = bp->b_log_item;
961
962 trace_xfs_buf_item_relse(bp, _RET_IP_);
963 ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL));
964
965 bp->b_log_item = NULL;
966 if (list_empty(&bp->b_li_list))
967 bp->b_iodone = NULL;
968
969 xfs_buf_rele(bp);
970 xfs_buf_item_free(bip);
971}
972
973
974
975
976
977
978
979
980
981void
982xfs_buf_attach_iodone(
983 xfs_buf_t *bp,
984 void (*cb)(xfs_buf_t *, xfs_log_item_t *),
985 xfs_log_item_t *lip)
986{
987 ASSERT(xfs_buf_islocked(bp));
988
989 lip->li_cb = cb;
990 list_add_tail(&lip->li_bio_list, &bp->b_li_list);
991
992 ASSERT(bp->b_iodone == NULL ||
993 bp->b_iodone == xfs_buf_iodone_callbacks);
994 bp->b_iodone = xfs_buf_iodone_callbacks;
995}
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009STATIC void
1010xfs_buf_do_callbacks(
1011 struct xfs_buf *bp)
1012{
1013 struct xfs_buf_log_item *blip = bp->b_log_item;
1014 struct xfs_log_item *lip;
1015
1016
1017 if (blip) {
1018 lip = &blip->bli_item;
1019 lip->li_cb(bp, lip);
1020 }
1021
1022 while (!list_empty(&bp->b_li_list)) {
1023 lip = list_first_entry(&bp->b_li_list, struct xfs_log_item,
1024 li_bio_list);
1025
1026
1027
1028
1029
1030
1031
1032 list_del_init(&lip->li_bio_list);
1033 lip->li_cb(bp, lip);
1034 }
1035}
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045STATIC void
1046xfs_buf_do_callbacks_fail(
1047 struct xfs_buf *bp)
1048{
1049 struct xfs_log_item *lip;
1050 struct xfs_ail *ailp;
1051
1052
1053
1054
1055
1056
1057 if (list_empty(&bp->b_li_list))
1058 return;
1059
1060 lip = list_first_entry(&bp->b_li_list, struct xfs_log_item,
1061 li_bio_list);
1062 ailp = lip->li_ailp;
1063 spin_lock(&ailp->ail_lock);
1064 list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
1065 if (lip->li_ops->iop_error)
1066 lip->li_ops->iop_error(lip, bp);
1067 }
1068 spin_unlock(&ailp->ail_lock);
1069}
1070
1071static bool
1072xfs_buf_iodone_callback_error(
1073 struct xfs_buf *bp)
1074{
1075 struct xfs_buf_log_item *bip = bp->b_log_item;
1076 struct xfs_log_item *lip;
1077 struct xfs_mount *mp;
1078 static ulong lasttime;
1079 static xfs_buftarg_t *lasttarg;
1080 struct xfs_error_cfg *cfg;
1081
1082
1083
1084
1085
1086
1087 lip = list_first_entry_or_null(&bp->b_li_list, struct xfs_log_item,
1088 li_bio_list);
1089 mp = lip ? lip->li_mountp : bip->bli_item.li_mountp;
1090
1091
1092
1093
1094
1095 if (XFS_FORCED_SHUTDOWN(mp))
1096 goto out_stale;
1097
1098 if (bp->b_target != lasttarg ||
1099 time_after(jiffies, (lasttime + 5*HZ))) {
1100 lasttime = jiffies;
1101 xfs_buf_ioerror_alert(bp, __func__);
1102 }
1103 lasttarg = bp->b_target;
1104
1105
1106 if (!(bp->b_flags & XBF_ASYNC))
1107 goto out_stale;
1108
1109 trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
1110 ASSERT(bp->b_iodone != NULL);
1111
1112 cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error);
1113
1114
1115
1116
1117
1118
1119
1120
1121 if (!(bp->b_flags & (XBF_STALE | XBF_WRITE_FAIL)) ||
1122 bp->b_last_error != bp->b_error) {
1123 bp->b_flags |= (XBF_WRITE | XBF_DONE | XBF_WRITE_FAIL);
1124 bp->b_last_error = bp->b_error;
1125 if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
1126 !bp->b_first_retry_time)
1127 bp->b_first_retry_time = jiffies;
1128
1129 xfs_buf_ioerror(bp, 0);
1130 xfs_buf_submit(bp);
1131 return true;
1132 }
1133
1134
1135
1136
1137
1138
1139 if (cfg->max_retries != XFS_ERR_RETRY_FOREVER &&
1140 ++bp->b_retries > cfg->max_retries)
1141 goto permanent_error;
1142 if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
1143 time_after(jiffies, cfg->retry_timeout + bp->b_first_retry_time))
1144 goto permanent_error;
1145
1146
1147 if ((mp->m_flags & XFS_MOUNT_UNMOUNTING) && mp->m_fail_unmount)
1148 goto permanent_error;
1149
1150
1151
1152
1153
1154 xfs_buf_do_callbacks_fail(bp);
1155 xfs_buf_ioerror(bp, 0);
1156 xfs_buf_relse(bp);
1157 return true;
1158
1159
1160
1161
1162
1163permanent_error:
1164 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1165out_stale:
1166 xfs_buf_stale(bp);
1167 bp->b_flags |= XBF_DONE;
1168 trace_xfs_buf_error_relse(bp, _RET_IP_);
1169 return false;
1170}
1171
1172
1173
1174
1175
1176
1177
1178void
1179xfs_buf_iodone_callbacks(
1180 struct xfs_buf *bp)
1181{
1182
1183
1184
1185
1186
1187 if (bp->b_error && xfs_buf_iodone_callback_error(bp))
1188 return;
1189
1190
1191
1192
1193
1194 bp->b_last_error = 0;
1195 bp->b_retries = 0;
1196 bp->b_first_retry_time = 0;
1197
1198 xfs_buf_do_callbacks(bp);
1199 bp->b_log_item = NULL;
1200 list_del_init(&bp->b_li_list);
1201 bp->b_iodone = NULL;
1202 xfs_buf_ioend(bp);
1203}
1204
1205
1206
1207
1208
1209
1210
1211
1212void
1213xfs_buf_iodone(
1214 struct xfs_buf *bp,
1215 struct xfs_log_item *lip)
1216{
1217 struct xfs_ail *ailp = lip->li_ailp;
1218
1219 ASSERT(BUF_ITEM(lip)->bli_buf == bp);
1220
1221 xfs_buf_rele(bp);
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232 spin_lock(&ailp->ail_lock);
1233 xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
1234 xfs_buf_item_free(BUF_ITEM(lip));
1235}
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256bool
1257xfs_buf_resubmit_failed_buffers(
1258 struct xfs_buf *bp,
1259 struct list_head *buffer_list)
1260{
1261 struct xfs_log_item *lip;
1262 bool ret;
1263
1264 ret = xfs_buf_delwri_queue(bp, buffer_list);
1265
1266
1267
1268
1269
1270 list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
1271 xfs_clear_li_failed(lip);
1272
1273 return ret;
1274}
1275