1
2
3
4
5
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_format.h"
9#include "xfs_log_format.h"
10#include "xfs_trans_resv.h"
11#include "xfs_bit.h"
12#include "xfs_sb.h"
13#include "xfs_mount.h"
14#include "xfs_trans.h"
15#include "xfs_buf_item.h"
16#include "xfs_trans_priv.h"
17#include "xfs_error.h"
18#include "xfs_trace.h"
19#include "xfs_log.h"
20#include "xfs_inode.h"
21
22
23kmem_zone_t *xfs_buf_item_zone;
24
25static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
26{
27 return container_of(lip, struct xfs_buf_log_item, bli_item);
28}
29
30STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp);
31
32static inline int
33xfs_buf_log_format_size(
34 struct xfs_buf_log_format *blfp)
35{
36 return offsetof(struct xfs_buf_log_format, blf_data_map) +
37 (blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
38}
39
40
41
42
43
44
45
46
47
48
49
50STATIC void
51xfs_buf_item_size_segment(
52 struct xfs_buf_log_item *bip,
53 struct xfs_buf_log_format *blfp,
54 int *nvecs,
55 int *nbytes)
56{
57 struct xfs_buf *bp = bip->bli_buf;
58 int next_bit;
59 int last_bit;
60
61 last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
62 if (last_bit == -1)
63 return;
64
65
66
67
68
69 *nvecs += 2;
70 *nbytes += xfs_buf_log_format_size(blfp) + XFS_BLF_CHUNK;
71
72 while (last_bit != -1) {
73
74
75
76
77
78
79 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
80 last_bit + 1);
81
82
83
84
85
86 if (next_bit == -1) {
87 break;
88 } else if (next_bit != last_bit + 1) {
89 last_bit = next_bit;
90 (*nvecs)++;
91 } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) !=
92 (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) +
93 XFS_BLF_CHUNK)) {
94 last_bit = next_bit;
95 (*nvecs)++;
96 } else {
97 last_bit++;
98 }
99 *nbytes += XFS_BLF_CHUNK;
100 }
101}
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120STATIC void
121xfs_buf_item_size(
122 struct xfs_log_item *lip,
123 int *nvecs,
124 int *nbytes)
125{
126 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
127 int i;
128
129 ASSERT(atomic_read(&bip->bli_refcount) > 0);
130 if (bip->bli_flags & XFS_BLI_STALE) {
131
132
133
134
135
136 trace_xfs_buf_item_size_stale(bip);
137 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
138 *nvecs += bip->bli_format_count;
139 for (i = 0; i < bip->bli_format_count; i++) {
140 *nbytes += xfs_buf_log_format_size(&bip->bli_formats[i]);
141 }
142 return;
143 }
144
145 ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
146
147 if (bip->bli_flags & XFS_BLI_ORDERED) {
148
149
150
151
152
153 trace_xfs_buf_item_size_ordered(bip);
154 *nvecs = XFS_LOG_VEC_ORDERED;
155 return;
156 }
157
158
159
160
161
162
163
164
165
166
167 for (i = 0; i < bip->bli_format_count; i++) {
168 xfs_buf_item_size_segment(bip, &bip->bli_formats[i],
169 nvecs, nbytes);
170 }
171 trace_xfs_buf_item_size(bip);
172}
173
174static inline void
175xfs_buf_item_copy_iovec(
176 struct xfs_log_vec *lv,
177 struct xfs_log_iovec **vecp,
178 struct xfs_buf *bp,
179 uint offset,
180 int first_bit,
181 uint nbits)
182{
183 offset += first_bit * XFS_BLF_CHUNK;
184 xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BCHUNK,
185 xfs_buf_offset(bp, offset),
186 nbits * XFS_BLF_CHUNK);
187}
188
189static inline bool
190xfs_buf_item_straddle(
191 struct xfs_buf *bp,
192 uint offset,
193 int next_bit,
194 int last_bit)
195{
196 return xfs_buf_offset(bp, offset + (next_bit << XFS_BLF_SHIFT)) !=
197 (xfs_buf_offset(bp, offset + (last_bit << XFS_BLF_SHIFT)) +
198 XFS_BLF_CHUNK);
199}
200
201static void
202xfs_buf_item_format_segment(
203 struct xfs_buf_log_item *bip,
204 struct xfs_log_vec *lv,
205 struct xfs_log_iovec **vecp,
206 uint offset,
207 struct xfs_buf_log_format *blfp)
208{
209 struct xfs_buf *bp = bip->bli_buf;
210 uint base_size;
211 int first_bit;
212 int last_bit;
213 int next_bit;
214 uint nbits;
215
216
217 blfp->blf_flags = bip->__bli_format.blf_flags;
218
219
220
221
222
223
224 base_size = xfs_buf_log_format_size(blfp);
225
226 first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
227 if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) {
228
229
230
231
232 return;
233 }
234
235 blfp = xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BFORMAT, blfp, base_size);
236 blfp->blf_size = 1;
237
238 if (bip->bli_flags & XFS_BLI_STALE) {
239
240
241
242
243
244 trace_xfs_buf_item_format_stale(bip);
245 ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
246 return;
247 }
248
249
250
251
252
253 last_bit = first_bit;
254 nbits = 1;
255 for (;;) {
256
257
258
259
260
261
262 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
263 (uint)last_bit + 1);
264
265
266
267
268
269
270
271 if (next_bit == -1) {
272 xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
273 first_bit, nbits);
274 blfp->blf_size++;
275 break;
276 } else if (next_bit != last_bit + 1 ||
277 xfs_buf_item_straddle(bp, offset, next_bit, last_bit)) {
278 xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
279 first_bit, nbits);
280 blfp->blf_size++;
281 first_bit = next_bit;
282 last_bit = next_bit;
283 nbits = 1;
284 } else {
285 last_bit++;
286 nbits++;
287 }
288 }
289}
290
291
292
293
294
295
296
297STATIC void
298xfs_buf_item_format(
299 struct xfs_log_item *lip,
300 struct xfs_log_vec *lv)
301{
302 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
303 struct xfs_buf *bp = bip->bli_buf;
304 struct xfs_log_iovec *vecp = NULL;
305 uint offset = 0;
306 int i;
307
308 ASSERT(atomic_read(&bip->bli_refcount) > 0);
309 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
310 (bip->bli_flags & XFS_BLI_STALE));
311 ASSERT((bip->bli_flags & XFS_BLI_STALE) ||
312 (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF
313 && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF));
314 ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED) ||
315 (bip->bli_flags & XFS_BLI_STALE));
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332 if (bip->bli_flags & XFS_BLI_INODE_BUF) {
333 if (xfs_sb_version_hascrc(&lip->li_mountp->m_sb) ||
334 !((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
335 xfs_log_item_in_current_chkpt(lip)))
336 bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
337 bip->bli_flags &= ~XFS_BLI_INODE_BUF;
338 }
339
340 for (i = 0; i < bip->bli_format_count; i++) {
341 xfs_buf_item_format_segment(bip, lv, &vecp, offset,
342 &bip->bli_formats[i]);
343 offset += BBTOB(bp->b_maps[i].bm_len);
344 }
345
346
347
348
349 trace_xfs_buf_item_format(bip);
350}
351
352
353
354
355
356
357
358
359
360
361STATIC void
362xfs_buf_item_pin(
363 struct xfs_log_item *lip)
364{
365 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
366
367 ASSERT(atomic_read(&bip->bli_refcount) > 0);
368 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
369 (bip->bli_flags & XFS_BLI_ORDERED) ||
370 (bip->bli_flags & XFS_BLI_STALE));
371
372 trace_xfs_buf_item_pin(bip);
373
374 atomic_inc(&bip->bli_refcount);
375 atomic_inc(&bip->bli_buf->b_pin_count);
376}
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391STATIC void
392xfs_buf_item_unpin(
393 struct xfs_log_item *lip,
394 int remove)
395{
396 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
397 xfs_buf_t *bp = bip->bli_buf;
398 struct xfs_ail *ailp = lip->li_ailp;
399 int stale = bip->bli_flags & XFS_BLI_STALE;
400 int freed;
401
402 ASSERT(bp->b_log_item == bip);
403 ASSERT(atomic_read(&bip->bli_refcount) > 0);
404
405 trace_xfs_buf_item_unpin(bip);
406
407 freed = atomic_dec_and_test(&bip->bli_refcount);
408
409 if (atomic_dec_and_test(&bp->b_pin_count))
410 wake_up_all(&bp->b_waiters);
411
412 if (freed && stale) {
413 ASSERT(bip->bli_flags & XFS_BLI_STALE);
414 ASSERT(xfs_buf_islocked(bp));
415 ASSERT(bp->b_flags & XBF_STALE);
416 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
417
418 trace_xfs_buf_item_unpin_stale(bip);
419
420 if (remove) {
421
422
423
424
425
426
427
428
429 if (!list_empty(&lip->li_trans))
430 xfs_trans_del_item(lip);
431
432
433
434
435
436 bp->b_transp = NULL;
437 }
438
439
440
441
442
443
444
445 if (bip->bli_flags & XFS_BLI_STALE_INODE) {
446 xfs_buf_do_callbacks(bp);
447 bp->b_log_item = NULL;
448 list_del_init(&bp->b_li_list);
449 bp->b_iodone = NULL;
450 } else {
451 spin_lock(&ailp->ail_lock);
452 xfs_trans_ail_delete(ailp, lip, SHUTDOWN_LOG_IO_ERROR);
453 xfs_buf_item_relse(bp);
454 ASSERT(bp->b_log_item == NULL);
455 }
456 xfs_buf_relse(bp);
457 } else if (freed && remove) {
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474 xfs_buf_lock(bp);
475 xfs_buf_hold(bp);
476 bp->b_flags |= XBF_ASYNC;
477 xfs_buf_ioerror(bp, -EIO);
478 bp->b_flags &= ~XBF_DONE;
479 xfs_buf_stale(bp);
480 xfs_buf_ioend(bp);
481 }
482}
483
484
485
486
487
488
489
490static DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state, 30 * HZ, 10);
491
492STATIC uint
493xfs_buf_item_push(
494 struct xfs_log_item *lip,
495 struct list_head *buffer_list)
496{
497 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
498 struct xfs_buf *bp = bip->bli_buf;
499 uint rval = XFS_ITEM_SUCCESS;
500
501 if (xfs_buf_ispinned(bp))
502 return XFS_ITEM_PINNED;
503 if (!xfs_buf_trylock(bp)) {
504
505
506
507
508
509
510
511 if (xfs_buf_ispinned(bp))
512 return XFS_ITEM_PINNED;
513 return XFS_ITEM_LOCKED;
514 }
515
516 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
517
518 trace_xfs_buf_item_push(bip);
519
520
521 if ((bp->b_flags & XBF_WRITE_FAIL) &&
522 ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS: Failing async write")) {
523 xfs_warn(bp->b_target->bt_mount,
524"Failing async write on buffer block 0x%llx. Retrying async write.",
525 (long long)bp->b_bn);
526 }
527
528 if (!xfs_buf_delwri_queue(bp, buffer_list))
529 rval = XFS_ITEM_FLUSHING;
530 xfs_buf_unlock(bp);
531 return rval;
532}
533
534
535
536
537
538
539
540
541bool
542xfs_buf_item_put(
543 struct xfs_buf_log_item *bip)
544{
545 struct xfs_log_item *lip = &bip->bli_item;
546 bool aborted;
547 bool dirty;
548
549
550 if (!atomic_dec_and_test(&bip->bli_refcount))
551 return false;
552
553
554
555
556
557
558
559 aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags) ||
560 XFS_FORCED_SHUTDOWN(lip->li_mountp);
561 dirty = bip->bli_flags & XFS_BLI_DIRTY;
562 if (dirty && !aborted)
563 return false;
564
565
566
567
568
569
570
571 if (aborted)
572 xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR);
573 xfs_buf_item_relse(bip->bli_buf);
574 return true;
575}
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596STATIC void
597xfs_buf_item_unlock(
598 struct xfs_log_item *lip)
599{
600 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
601 struct xfs_buf *bp = bip->bli_buf;
602 bool released;
603 bool hold = bip->bli_flags & XFS_BLI_HOLD;
604 bool stale = bip->bli_flags & XFS_BLI_STALE;
605#if defined(DEBUG) || defined(XFS_WARN)
606 bool ordered = bip->bli_flags & XFS_BLI_ORDERED;
607 bool dirty = bip->bli_flags & XFS_BLI_DIRTY;
608#endif
609
610 trace_xfs_buf_item_unlock(bip);
611
612
613
614
615
616 ASSERT((!ordered && dirty == xfs_buf_item_dirty_format(bip)) ||
617 (ordered && dirty && !xfs_buf_item_dirty_format(bip)));
618 ASSERT(!stale || (bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
619
620
621
622
623
624 bp->b_transp = NULL;
625 bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
626
627
628
629
630
631
632
633 released = xfs_buf_item_put(bip);
634 if (hold || (stale && !released))
635 return;
636 ASSERT(!stale || test_bit(XFS_LI_ABORTED, &lip->li_flags));
637 xfs_buf_relse(bp);
638}
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658STATIC xfs_lsn_t
659xfs_buf_item_committed(
660 struct xfs_log_item *lip,
661 xfs_lsn_t lsn)
662{
663 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
664
665 trace_xfs_buf_item_committed(bip);
666
667 if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
668 return lip->li_lsn;
669 return lsn;
670}
671
672STATIC void
673xfs_buf_item_committing(
674 struct xfs_log_item *lip,
675 xfs_lsn_t commit_lsn)
676{
677}
678
679
680
681
682static const struct xfs_item_ops xfs_buf_item_ops = {
683 .iop_size = xfs_buf_item_size,
684 .iop_format = xfs_buf_item_format,
685 .iop_pin = xfs_buf_item_pin,
686 .iop_unpin = xfs_buf_item_unpin,
687 .iop_unlock = xfs_buf_item_unlock,
688 .iop_committed = xfs_buf_item_committed,
689 .iop_push = xfs_buf_item_push,
690 .iop_committing = xfs_buf_item_committing
691};
692
693STATIC int
694xfs_buf_item_get_format(
695 struct xfs_buf_log_item *bip,
696 int count)
697{
698 ASSERT(bip->bli_formats == NULL);
699 bip->bli_format_count = count;
700
701 if (count == 1) {
702 bip->bli_formats = &bip->__bli_format;
703 return 0;
704 }
705
706 bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
707 KM_SLEEP);
708 if (!bip->bli_formats)
709 return -ENOMEM;
710 return 0;
711}
712
713STATIC void
714xfs_buf_item_free_format(
715 struct xfs_buf_log_item *bip)
716{
717 if (bip->bli_formats != &bip->__bli_format) {
718 kmem_free(bip->bli_formats);
719 bip->bli_formats = NULL;
720 }
721}
722
723
724
725
726
727
728int
729xfs_buf_item_init(
730 struct xfs_buf *bp,
731 struct xfs_mount *mp)
732{
733 struct xfs_buf_log_item *bip = bp->b_log_item;
734 int chunks;
735 int map_size;
736 int error;
737 int i;
738
739
740
741
742
743
744 ASSERT(bp->b_target->bt_mount == mp);
745 if (bip) {
746 ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
747 ASSERT(!bp->b_transp);
748 ASSERT(bip->bli_buf == bp);
749 return 0;
750 }
751
752 bip = kmem_zone_zalloc(xfs_buf_item_zone, KM_SLEEP);
753 xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
754 bip->bli_buf = bp;
755
756
757
758
759
760
761
762
763
764
765 error = xfs_buf_item_get_format(bip, bp->b_map_count);
766 ASSERT(error == 0);
767 if (error) {
768 kmem_zone_free(xfs_buf_item_zone, bip);
769 return error;
770 }
771
772
773 for (i = 0; i < bip->bli_format_count; i++) {
774 chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
775 XFS_BLF_CHUNK);
776 map_size = DIV_ROUND_UP(chunks, NBWORD);
777
778 bip->bli_formats[i].blf_type = XFS_LI_BUF;
779 bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn;
780 bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len;
781 bip->bli_formats[i].blf_map_size = map_size;
782 }
783
784 bp->b_log_item = bip;
785 xfs_buf_hold(bp);
786 return 0;
787}
788
789
790
791
792
793
794static void
795xfs_buf_item_log_segment(
796 uint first,
797 uint last,
798 uint *map)
799{
800 uint first_bit;
801 uint last_bit;
802 uint bits_to_set;
803 uint bits_set;
804 uint word_num;
805 uint *wordp;
806 uint bit;
807 uint end_bit;
808 uint mask;
809
810
811
812
813 first_bit = first >> XFS_BLF_SHIFT;
814 last_bit = last >> XFS_BLF_SHIFT;
815
816
817
818
819 bits_to_set = last_bit - first_bit + 1;
820
821
822
823
824
825 word_num = first_bit >> BIT_TO_WORD_SHIFT;
826 wordp = &map[word_num];
827
828
829
830
831 bit = first_bit & (uint)(NBWORD - 1);
832
833
834
835
836
837
838
839
840
841 if (bit) {
842 end_bit = min(bit + bits_to_set, (uint)NBWORD);
843 mask = ((1U << (end_bit - bit)) - 1) << bit;
844 *wordp |= mask;
845 wordp++;
846 bits_set = end_bit - bit;
847 } else {
848 bits_set = 0;
849 }
850
851
852
853
854
855 while ((bits_to_set - bits_set) >= NBWORD) {
856 *wordp |= 0xffffffff;
857 bits_set += NBWORD;
858 wordp++;
859 }
860
861
862
863
864 end_bit = bits_to_set - bits_set;
865 if (end_bit) {
866 mask = (1U << end_bit) - 1;
867 *wordp |= mask;
868 }
869}
870
871
872
873
874
875void
876xfs_buf_item_log(
877 struct xfs_buf_log_item *bip,
878 uint first,
879 uint last)
880{
881 int i;
882 uint start;
883 uint end;
884 struct xfs_buf *bp = bip->bli_buf;
885
886
887
888
889 start = 0;
890 for (i = 0; i < bip->bli_format_count; i++) {
891 if (start > last)
892 break;
893 end = start + BBTOB(bp->b_maps[i].bm_len) - 1;
894
895
896 if (first > end) {
897 start += BBTOB(bp->b_maps[i].bm_len);
898 continue;
899 }
900
901
902
903
904
905
906
907 if (first < start)
908 first = start;
909 if (end > last)
910 end = last;
911 xfs_buf_item_log_segment(first - start, end - start,
912 &bip->bli_formats[i].blf_data_map[0]);
913
914 start += BBTOB(bp->b_maps[i].bm_len);
915 }
916}
917
918
919
920
921
922
923bool
924xfs_buf_item_dirty_format(
925 struct xfs_buf_log_item *bip)
926{
927 int i;
928
929 for (i = 0; i < bip->bli_format_count; i++) {
930 if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
931 bip->bli_formats[i].blf_map_size))
932 return true;
933 }
934
935 return false;
936}
937
938STATIC void
939xfs_buf_item_free(
940 struct xfs_buf_log_item *bip)
941{
942 xfs_buf_item_free_format(bip);
943 kmem_free(bip->bli_item.li_lv_shadow);
944 kmem_zone_free(xfs_buf_item_zone, bip);
945}
946
947
948
949
950
951
952
953
954void
955xfs_buf_item_relse(
956 xfs_buf_t *bp)
957{
958 struct xfs_buf_log_item *bip = bp->b_log_item;
959
960 trace_xfs_buf_item_relse(bp, _RET_IP_);
961 ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL));
962
963 bp->b_log_item = NULL;
964 if (list_empty(&bp->b_li_list))
965 bp->b_iodone = NULL;
966
967 xfs_buf_rele(bp);
968 xfs_buf_item_free(bip);
969}
970
971
972
973
974
975
976
977
978
979void
980xfs_buf_attach_iodone(
981 xfs_buf_t *bp,
982 void (*cb)(xfs_buf_t *, xfs_log_item_t *),
983 xfs_log_item_t *lip)
984{
985 ASSERT(xfs_buf_islocked(bp));
986
987 lip->li_cb = cb;
988 list_add_tail(&lip->li_bio_list, &bp->b_li_list);
989
990 ASSERT(bp->b_iodone == NULL ||
991 bp->b_iodone == xfs_buf_iodone_callbacks);
992 bp->b_iodone = xfs_buf_iodone_callbacks;
993}
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007STATIC void
1008xfs_buf_do_callbacks(
1009 struct xfs_buf *bp)
1010{
1011 struct xfs_buf_log_item *blip = bp->b_log_item;
1012 struct xfs_log_item *lip;
1013
1014
1015 if (blip) {
1016 lip = &blip->bli_item;
1017 lip->li_cb(bp, lip);
1018 }
1019
1020 while (!list_empty(&bp->b_li_list)) {
1021 lip = list_first_entry(&bp->b_li_list, struct xfs_log_item,
1022 li_bio_list);
1023
1024
1025
1026
1027
1028
1029
1030 list_del_init(&lip->li_bio_list);
1031 lip->li_cb(bp, lip);
1032 }
1033}
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043STATIC void
1044xfs_buf_do_callbacks_fail(
1045 struct xfs_buf *bp)
1046{
1047 struct xfs_log_item *lip;
1048 struct xfs_ail *ailp;
1049
1050
1051
1052
1053
1054
1055 if (list_empty(&bp->b_li_list))
1056 return;
1057
1058 lip = list_first_entry(&bp->b_li_list, struct xfs_log_item,
1059 li_bio_list);
1060 ailp = lip->li_ailp;
1061 spin_lock(&ailp->ail_lock);
1062 list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
1063 if (lip->li_ops->iop_error)
1064 lip->li_ops->iop_error(lip, bp);
1065 }
1066 spin_unlock(&ailp->ail_lock);
1067}
1068
1069static bool
1070xfs_buf_iodone_callback_error(
1071 struct xfs_buf *bp)
1072{
1073 struct xfs_buf_log_item *bip = bp->b_log_item;
1074 struct xfs_log_item *lip;
1075 struct xfs_mount *mp;
1076 static ulong lasttime;
1077 static xfs_buftarg_t *lasttarg;
1078 struct xfs_error_cfg *cfg;
1079
1080
1081
1082
1083
1084
1085 lip = list_first_entry_or_null(&bp->b_li_list, struct xfs_log_item,
1086 li_bio_list);
1087 mp = lip ? lip->li_mountp : bip->bli_item.li_mountp;
1088
1089
1090
1091
1092
1093 if (XFS_FORCED_SHUTDOWN(mp))
1094 goto out_stale;
1095
1096 if (bp->b_target != lasttarg ||
1097 time_after(jiffies, (lasttime + 5*HZ))) {
1098 lasttime = jiffies;
1099 xfs_buf_ioerror_alert(bp, __func__);
1100 }
1101 lasttarg = bp->b_target;
1102
1103
1104 if (!(bp->b_flags & XBF_ASYNC))
1105 goto out_stale;
1106
1107 trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
1108 ASSERT(bp->b_iodone != NULL);
1109
1110 cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error);
1111
1112
1113
1114
1115
1116
1117
1118
1119 if (!(bp->b_flags & (XBF_STALE | XBF_WRITE_FAIL)) ||
1120 bp->b_last_error != bp->b_error) {
1121 bp->b_flags |= (XBF_WRITE | XBF_DONE | XBF_WRITE_FAIL);
1122 bp->b_last_error = bp->b_error;
1123 if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
1124 !bp->b_first_retry_time)
1125 bp->b_first_retry_time = jiffies;
1126
1127 xfs_buf_ioerror(bp, 0);
1128 xfs_buf_submit(bp);
1129 return true;
1130 }
1131
1132
1133
1134
1135
1136
1137 if (cfg->max_retries != XFS_ERR_RETRY_FOREVER &&
1138 ++bp->b_retries > cfg->max_retries)
1139 goto permanent_error;
1140 if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
1141 time_after(jiffies, cfg->retry_timeout + bp->b_first_retry_time))
1142 goto permanent_error;
1143
1144
1145 if ((mp->m_flags & XFS_MOUNT_UNMOUNTING) && mp->m_fail_unmount)
1146 goto permanent_error;
1147
1148
1149
1150
1151
1152 xfs_buf_do_callbacks_fail(bp);
1153 xfs_buf_ioerror(bp, 0);
1154 xfs_buf_relse(bp);
1155 return true;
1156
1157
1158
1159
1160
1161permanent_error:
1162 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1163out_stale:
1164 xfs_buf_stale(bp);
1165 bp->b_flags |= XBF_DONE;
1166 trace_xfs_buf_error_relse(bp, _RET_IP_);
1167 return false;
1168}
1169
1170
1171
1172
1173
1174
1175
1176void
1177xfs_buf_iodone_callbacks(
1178 struct xfs_buf *bp)
1179{
1180
1181
1182
1183
1184
1185 if (bp->b_error && xfs_buf_iodone_callback_error(bp))
1186 return;
1187
1188
1189
1190
1191
1192 bp->b_last_error = 0;
1193 bp->b_retries = 0;
1194 bp->b_first_retry_time = 0;
1195
1196 xfs_buf_do_callbacks(bp);
1197 bp->b_log_item = NULL;
1198 list_del_init(&bp->b_li_list);
1199 bp->b_iodone = NULL;
1200 xfs_buf_ioend(bp);
1201}
1202
1203
1204
1205
1206
1207
1208
1209
1210void
1211xfs_buf_iodone(
1212 struct xfs_buf *bp,
1213 struct xfs_log_item *lip)
1214{
1215 struct xfs_ail *ailp = lip->li_ailp;
1216
1217 ASSERT(BUF_ITEM(lip)->bli_buf == bp);
1218
1219 xfs_buf_rele(bp);
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230 spin_lock(&ailp->ail_lock);
1231 xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
1232 xfs_buf_item_free(BUF_ITEM(lip));
1233}
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254bool
1255xfs_buf_resubmit_failed_buffers(
1256 struct xfs_buf *bp,
1257 struct list_head *buffer_list)
1258{
1259 struct xfs_log_item *lip;
1260 bool ret;
1261
1262 ret = xfs_buf_delwri_queue(bp, buffer_list);
1263
1264
1265
1266
1267
1268 list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
1269 xfs_clear_li_failed(lip);
1270
1271 return ret;
1272}
1273