1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
21#include "xfs_log.h"
22#include "xfs_trans.h"
23#include "xfs_sb.h"
24#include "xfs_ag.h"
25#include "xfs_mount.h"
26#include "xfs_error.h"
27#include "xfs_log_priv.h"
28#include "xfs_buf_item.h"
29#include "xfs_bmap_btree.h"
30#include "xfs_alloc_btree.h"
31#include "xfs_ialloc_btree.h"
32#include "xfs_log_recover.h"
33#include "xfs_trans_priv.h"
34#include "xfs_dinode.h"
35#include "xfs_inode.h"
36#include "xfs_trace.h"
37
38kmem_zone_t *xfs_log_ticket_zone;
39
40
41STATIC int
42xlog_commit_record(
43 struct xlog *log,
44 struct xlog_ticket *ticket,
45 struct xlog_in_core **iclog,
46 xfs_lsn_t *commitlsnp);
47
48STATIC struct xlog *
49xlog_alloc_log(
50 struct xfs_mount *mp,
51 struct xfs_buftarg *log_target,
52 xfs_daddr_t blk_offset,
53 int num_bblks);
54STATIC int
55xlog_space_left(
56 struct xlog *log,
57 atomic64_t *head);
58STATIC int
59xlog_sync(
60 struct xlog *log,
61 struct xlog_in_core *iclog);
62STATIC void
63xlog_dealloc_log(
64 struct xlog *log);
65
66
67STATIC void xlog_state_done_syncing(xlog_in_core_t *iclog, int);
68STATIC void
69xlog_state_do_callback(
70 struct xlog *log,
71 int aborted,
72 struct xlog_in_core *iclog);
73STATIC int
74xlog_state_get_iclog_space(
75 struct xlog *log,
76 int len,
77 struct xlog_in_core **iclog,
78 struct xlog_ticket *ticket,
79 int *continued_write,
80 int *logoffsetp);
81STATIC int
82xlog_state_release_iclog(
83 struct xlog *log,
84 struct xlog_in_core *iclog);
85STATIC void
86xlog_state_switch_iclogs(
87 struct xlog *log,
88 struct xlog_in_core *iclog,
89 int eventual_size);
90STATIC void
91xlog_state_want_sync(
92 struct xlog *log,
93 struct xlog_in_core *iclog);
94
95STATIC void
96xlog_grant_push_ail(
97 struct xlog *log,
98 int need_bytes);
99STATIC void
100xlog_regrant_reserve_log_space(
101 struct xlog *log,
102 struct xlog_ticket *ticket);
103STATIC void
104xlog_ungrant_log_space(
105 struct xlog *log,
106 struct xlog_ticket *ticket);
107
108#if defined(DEBUG)
109STATIC void
110xlog_verify_dest_ptr(
111 struct xlog *log,
112 char *ptr);
113STATIC void
114xlog_verify_grant_tail(
115 struct xlog *log);
116STATIC void
117xlog_verify_iclog(
118 struct xlog *log,
119 struct xlog_in_core *iclog,
120 int count,
121 boolean_t syncing);
122STATIC void
123xlog_verify_tail_lsn(
124 struct xlog *log,
125 struct xlog_in_core *iclog,
126 xfs_lsn_t tail_lsn);
127#else
128#define xlog_verify_dest_ptr(a,b)
129#define xlog_verify_grant_tail(a)
130#define xlog_verify_iclog(a,b,c,d)
131#define xlog_verify_tail_lsn(a,b,c)
132#endif
133
134STATIC int
135xlog_iclogs_empty(
136 struct xlog *log);
137
138static void
139xlog_grant_sub_space(
140 struct xlog *log,
141 atomic64_t *head,
142 int bytes)
143{
144 int64_t head_val = atomic64_read(head);
145 int64_t new, old;
146
147 do {
148 int cycle, space;
149
150 xlog_crack_grant_head_val(head_val, &cycle, &space);
151
152 space -= bytes;
153 if (space < 0) {
154 space += log->l_logsize;
155 cycle--;
156 }
157
158 old = head_val;
159 new = xlog_assign_grant_head_val(cycle, space);
160 head_val = atomic64_cmpxchg(head, old, new);
161 } while (head_val != old);
162}
163
164static void
165xlog_grant_add_space(
166 struct xlog *log,
167 atomic64_t *head,
168 int bytes)
169{
170 int64_t head_val = atomic64_read(head);
171 int64_t new, old;
172
173 do {
174 int tmp;
175 int cycle, space;
176
177 xlog_crack_grant_head_val(head_val, &cycle, &space);
178
179 tmp = log->l_logsize - space;
180 if (tmp > bytes)
181 space += bytes;
182 else {
183 space = bytes - tmp;
184 cycle++;
185 }
186
187 old = head_val;
188 new = xlog_assign_grant_head_val(cycle, space);
189 head_val = atomic64_cmpxchg(head, old, new);
190 } while (head_val != old);
191}
192
193STATIC void
194xlog_grant_head_init(
195 struct xlog_grant_head *head)
196{
197 xlog_assign_grant_head(&head->grant, 1, 0);
198 INIT_LIST_HEAD(&head->waiters);
199 spin_lock_init(&head->lock);
200}
201
202STATIC void
203xlog_grant_head_wake_all(
204 struct xlog_grant_head *head)
205{
206 struct xlog_ticket *tic;
207
208 spin_lock(&head->lock);
209 list_for_each_entry(tic, &head->waiters, t_queue)
210 wake_up_process(tic->t_task);
211 spin_unlock(&head->lock);
212}
213
214static inline int
215xlog_ticket_reservation(
216 struct xlog *log,
217 struct xlog_grant_head *head,
218 struct xlog_ticket *tic)
219{
220 if (head == &log->l_write_head) {
221 ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
222 return tic->t_unit_res;
223 } else {
224 if (tic->t_flags & XLOG_TIC_PERM_RESERV)
225 return tic->t_unit_res * tic->t_cnt;
226 else
227 return tic->t_unit_res;
228 }
229}
230
231STATIC bool
232xlog_grant_head_wake(
233 struct xlog *log,
234 struct xlog_grant_head *head,
235 int *free_bytes)
236{
237 struct xlog_ticket *tic;
238 int need_bytes;
239
240 list_for_each_entry(tic, &head->waiters, t_queue) {
241 need_bytes = xlog_ticket_reservation(log, head, tic);
242 if (*free_bytes < need_bytes)
243 return false;
244
245 *free_bytes -= need_bytes;
246 trace_xfs_log_grant_wake_up(log, tic);
247 wake_up_process(tic->t_task);
248 }
249
250 return true;
251}
252
253STATIC int
254xlog_grant_head_wait(
255 struct xlog *log,
256 struct xlog_grant_head *head,
257 struct xlog_ticket *tic,
258 int need_bytes)
259{
260 list_add_tail(&tic->t_queue, &head->waiters);
261
262 do {
263 if (XLOG_FORCED_SHUTDOWN(log))
264 goto shutdown;
265 xlog_grant_push_ail(log, need_bytes);
266
267 __set_current_state(TASK_UNINTERRUPTIBLE);
268 spin_unlock(&head->lock);
269
270 XFS_STATS_INC(xs_sleep_logspace);
271
272 trace_xfs_log_grant_sleep(log, tic);
273 schedule();
274 trace_xfs_log_grant_wake(log, tic);
275
276 spin_lock(&head->lock);
277 if (XLOG_FORCED_SHUTDOWN(log))
278 goto shutdown;
279 } while (xlog_space_left(log, &head->grant) < need_bytes);
280
281 list_del_init(&tic->t_queue);
282 return 0;
283shutdown:
284 list_del_init(&tic->t_queue);
285 return XFS_ERROR(EIO);
286}
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305STATIC int
306xlog_grant_head_check(
307 struct xlog *log,
308 struct xlog_grant_head *head,
309 struct xlog_ticket *tic,
310 int *need_bytes)
311{
312 int free_bytes;
313 int error = 0;
314
315 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
316
317
318
319
320
321
322
323 *need_bytes = xlog_ticket_reservation(log, head, tic);
324 free_bytes = xlog_space_left(log, &head->grant);
325 if (!list_empty_careful(&head->waiters)) {
326 spin_lock(&head->lock);
327 if (!xlog_grant_head_wake(log, head, &free_bytes) ||
328 free_bytes < *need_bytes) {
329 error = xlog_grant_head_wait(log, head, tic,
330 *need_bytes);
331 }
332 spin_unlock(&head->lock);
333 } else if (free_bytes < *need_bytes) {
334 spin_lock(&head->lock);
335 error = xlog_grant_head_wait(log, head, tic, *need_bytes);
336 spin_unlock(&head->lock);
337 }
338
339 return error;
340}
341
342static void
343xlog_tic_reset_res(xlog_ticket_t *tic)
344{
345 tic->t_res_num = 0;
346 tic->t_res_arr_sum = 0;
347 tic->t_res_num_ophdrs = 0;
348}
349
350static void
351xlog_tic_add_region(xlog_ticket_t *tic, uint len, uint type)
352{
353 if (tic->t_res_num == XLOG_TIC_LEN_MAX) {
354
355 tic->t_res_o_flow += tic->t_res_arr_sum;
356 tic->t_res_num = 0;
357 tic->t_res_arr_sum = 0;
358 }
359
360 tic->t_res_arr[tic->t_res_num].r_len = len;
361 tic->t_res_arr[tic->t_res_num].r_type = type;
362 tic->t_res_arr_sum += len;
363 tic->t_res_num++;
364}
365
366
367
368
369int
370xfs_log_regrant(
371 struct xfs_mount *mp,
372 struct xlog_ticket *tic)
373{
374 struct xlog *log = mp->m_log;
375 int need_bytes;
376 int error = 0;
377
378 if (XLOG_FORCED_SHUTDOWN(log))
379 return XFS_ERROR(EIO);
380
381 XFS_STATS_INC(xs_try_logspace);
382
383
384
385
386
387
388
389 tic->t_tid++;
390
391 xlog_grant_push_ail(log, tic->t_unit_res);
392
393 tic->t_curr_res = tic->t_unit_res;
394 xlog_tic_reset_res(tic);
395
396 if (tic->t_cnt > 0)
397 return 0;
398
399 trace_xfs_log_regrant(log, tic);
400
401 error = xlog_grant_head_check(log, &log->l_write_head, tic,
402 &need_bytes);
403 if (error)
404 goto out_error;
405
406 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
407 trace_xfs_log_regrant_exit(log, tic);
408 xlog_verify_grant_tail(log);
409 return 0;
410
411out_error:
412
413
414
415
416
417 tic->t_curr_res = 0;
418 tic->t_cnt = 0;
419 return error;
420}
421
422
423
424
425
426
427
428
429
430int
431xfs_log_reserve(
432 struct xfs_mount *mp,
433 int unit_bytes,
434 int cnt,
435 struct xlog_ticket **ticp,
436 __uint8_t client,
437 bool permanent,
438 uint t_type)
439{
440 struct xlog *log = mp->m_log;
441 struct xlog_ticket *tic;
442 int need_bytes;
443 int error = 0;
444
445 ASSERT(client == XFS_TRANSACTION || client == XFS_LOG);
446
447 if (XLOG_FORCED_SHUTDOWN(log))
448 return XFS_ERROR(EIO);
449
450 XFS_STATS_INC(xs_try_logspace);
451
452 ASSERT(*ticp == NULL);
453 tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent,
454 KM_SLEEP | KM_MAYFAIL);
455 if (!tic)
456 return XFS_ERROR(ENOMEM);
457
458 tic->t_trans_type = t_type;
459 *ticp = tic;
460
461 xlog_grant_push_ail(log, tic->t_unit_res * tic->t_cnt);
462
463 trace_xfs_log_reserve(log, tic);
464
465 error = xlog_grant_head_check(log, &log->l_reserve_head, tic,
466 &need_bytes);
467 if (error)
468 goto out_error;
469
470 xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes);
471 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
472 trace_xfs_log_reserve_exit(log, tic);
473 xlog_verify_grant_tail(log);
474 return 0;
475
476out_error:
477
478
479
480
481
482 tic->t_curr_res = 0;
483 tic->t_cnt = 0;
484 return error;
485}
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509xfs_lsn_t
510xfs_log_done(
511 struct xfs_mount *mp,
512 struct xlog_ticket *ticket,
513 struct xlog_in_core **iclog,
514 uint flags)
515{
516 struct xlog *log = mp->m_log;
517 xfs_lsn_t lsn = 0;
518
519 if (XLOG_FORCED_SHUTDOWN(log) ||
520
521
522
523
524 (((ticket->t_flags & XLOG_TIC_INITED) == 0) &&
525 (xlog_commit_record(log, ticket, iclog, &lsn)))) {
526 lsn = (xfs_lsn_t) -1;
527 if (ticket->t_flags & XLOG_TIC_PERM_RESERV) {
528 flags |= XFS_LOG_REL_PERM_RESERV;
529 }
530 }
531
532
533 if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) == 0 ||
534 (flags & XFS_LOG_REL_PERM_RESERV)) {
535 trace_xfs_log_done_nonperm(log, ticket);
536
537
538
539
540
541 xlog_ungrant_log_space(log, ticket);
542 xfs_log_ticket_put(ticket);
543 } else {
544 trace_xfs_log_done_perm(log, ticket);
545
546 xlog_regrant_reserve_log_space(log, ticket);
547
548
549
550
551 ticket->t_flags |= XLOG_TIC_INITED;
552 }
553
554 return lsn;
555}
556
557
558
559
560
561
562
563int
564xfs_log_notify(
565 struct xfs_mount *mp,
566 struct xlog_in_core *iclog,
567 xfs_log_callback_t *cb)
568{
569 int abortflg;
570
571 spin_lock(&iclog->ic_callback_lock);
572 abortflg = (iclog->ic_state & XLOG_STATE_IOERROR);
573 if (!abortflg) {
574 ASSERT_ALWAYS((iclog->ic_state == XLOG_STATE_ACTIVE) ||
575 (iclog->ic_state == XLOG_STATE_WANT_SYNC));
576 cb->cb_next = NULL;
577 *(iclog->ic_callback_tail) = cb;
578 iclog->ic_callback_tail = &(cb->cb_next);
579 }
580 spin_unlock(&iclog->ic_callback_lock);
581 return abortflg;
582}
583
584int
585xfs_log_release_iclog(
586 struct xfs_mount *mp,
587 struct xlog_in_core *iclog)
588{
589 if (xlog_state_release_iclog(mp->m_log, iclog)) {
590 xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
591 return EIO;
592 }
593
594 return 0;
595}
596
597
598
599
600
601
602
603
604
605
606
607int
608xfs_log_mount(
609 xfs_mount_t *mp,
610 xfs_buftarg_t *log_target,
611 xfs_daddr_t blk_offset,
612 int num_bblks)
613{
614 int error;
615
616 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY))
617 xfs_notice(mp, "Mounting Filesystem");
618 else {
619 xfs_notice(mp,
620"Mounting filesystem in no-recovery mode. Filesystem will be inconsistent.");
621 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
622 }
623
624 mp->m_log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks);
625 if (IS_ERR(mp->m_log)) {
626 error = -PTR_ERR(mp->m_log);
627 goto out;
628 }
629
630
631
632
633 error = xfs_trans_ail_init(mp);
634 if (error) {
635 xfs_warn(mp, "AIL initialisation failed: error %d", error);
636 goto out_free_log;
637 }
638 mp->m_log->l_ailp = mp->m_ail;
639
640
641
642
643
644 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) {
645 int readonly = (mp->m_flags & XFS_MOUNT_RDONLY);
646
647 if (readonly)
648 mp->m_flags &= ~XFS_MOUNT_RDONLY;
649
650 error = xlog_recover(mp->m_log);
651
652 if (readonly)
653 mp->m_flags |= XFS_MOUNT_RDONLY;
654 if (error) {
655 xfs_warn(mp, "log mount/recovery failed: error %d",
656 error);
657 goto out_destroy_ail;
658 }
659 }
660
661
662 mp->m_log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
663
664
665
666
667
668
669 xlog_cil_init_post_recovery(mp->m_log);
670
671 return 0;
672
673out_destroy_ail:
674 xfs_trans_ail_destroy(mp);
675out_free_log:
676 xlog_dealloc_log(mp->m_log);
677out:
678 return error;
679}
680
681
682
683
684
685
686
687
688
689int
690xfs_log_mount_finish(xfs_mount_t *mp)
691{
692 int error;
693
694 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY))
695 error = xlog_recover_finish(mp->m_log);
696 else {
697 error = 0;
698 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
699 }
700
701 return error;
702}
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720int
721xfs_log_unmount_write(xfs_mount_t *mp)
722{
723 struct xlog *log = mp->m_log;
724 xlog_in_core_t *iclog;
725#ifdef DEBUG
726 xlog_in_core_t *first_iclog;
727#endif
728 xlog_ticket_t *tic = NULL;
729 xfs_lsn_t lsn;
730 int error;
731
732
733
734
735
736 if (mp->m_flags & XFS_MOUNT_RDONLY)
737 return 0;
738
739 error = _xfs_log_force(mp, XFS_LOG_SYNC, NULL);
740 ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log)));
741
742#ifdef DEBUG
743 first_iclog = iclog = log->l_iclog;
744 do {
745 if (!(iclog->ic_state & XLOG_STATE_IOERROR)) {
746 ASSERT(iclog->ic_state & XLOG_STATE_ACTIVE);
747 ASSERT(iclog->ic_offset == 0);
748 }
749 iclog = iclog->ic_next;
750 } while (iclog != first_iclog);
751#endif
752 if (! (XLOG_FORCED_SHUTDOWN(log))) {
753 error = xfs_log_reserve(mp, 600, 1, &tic,
754 XFS_LOG, 0, XLOG_UNMOUNT_REC_TYPE);
755 if (!error) {
756
757 struct {
758 __uint16_t magic;
759 __uint16_t pad1;
760 __uint32_t pad2;
761 } magic = {
762 .magic = XLOG_UNMOUNT_TYPE,
763 };
764 struct xfs_log_iovec reg = {
765 .i_addr = &magic,
766 .i_len = sizeof(magic),
767 .i_type = XLOG_REG_TYPE_UNMOUNT,
768 };
769 struct xfs_log_vec vec = {
770 .lv_niovecs = 1,
771 .lv_iovecp = ®,
772 };
773
774
775 tic->t_flags = 0;
776 tic->t_curr_res -= sizeof(magic);
777 error = xlog_write(log, &vec, tic, &lsn,
778 NULL, XLOG_UNMOUNT_TRANS);
779
780
781
782
783
784 }
785
786 if (error)
787 xfs_alert(mp, "%s: unmount record failed", __func__);
788
789
790 spin_lock(&log->l_icloglock);
791 iclog = log->l_iclog;
792 atomic_inc(&iclog->ic_refcnt);
793 xlog_state_want_sync(log, iclog);
794 spin_unlock(&log->l_icloglock);
795 error = xlog_state_release_iclog(log, iclog);
796
797 spin_lock(&log->l_icloglock);
798 if (!(iclog->ic_state == XLOG_STATE_ACTIVE ||
799 iclog->ic_state == XLOG_STATE_DIRTY)) {
800 if (!XLOG_FORCED_SHUTDOWN(log)) {
801 xlog_wait(&iclog->ic_force_wait,
802 &log->l_icloglock);
803 } else {
804 spin_unlock(&log->l_icloglock);
805 }
806 } else {
807 spin_unlock(&log->l_icloglock);
808 }
809 if (tic) {
810 trace_xfs_log_umount_write(log, tic);
811 xlog_ungrant_log_space(log, tic);
812 xfs_log_ticket_put(tic);
813 }
814 } else {
815
816
817
818
819
820
821
822
823
824
825
826
827
828 spin_lock(&log->l_icloglock);
829 iclog = log->l_iclog;
830 atomic_inc(&iclog->ic_refcnt);
831
832 xlog_state_want_sync(log, iclog);
833 spin_unlock(&log->l_icloglock);
834 error = xlog_state_release_iclog(log, iclog);
835
836 spin_lock(&log->l_icloglock);
837
838 if ( ! ( iclog->ic_state == XLOG_STATE_ACTIVE
839 || iclog->ic_state == XLOG_STATE_DIRTY
840 || iclog->ic_state == XLOG_STATE_IOERROR) ) {
841
842 xlog_wait(&iclog->ic_force_wait,
843 &log->l_icloglock);
844 } else {
845 spin_unlock(&log->l_icloglock);
846 }
847 }
848
849 return error;
850}
851
852
853
854
855
856
857
858void
859xfs_log_unmount(xfs_mount_t *mp)
860{
861 cancel_delayed_work_sync(&mp->m_sync_work);
862 xfs_trans_ail_destroy(mp);
863 xlog_dealloc_log(mp->m_log);
864}
865
866void
867xfs_log_item_init(
868 struct xfs_mount *mp,
869 struct xfs_log_item *item,
870 int type,
871 const struct xfs_item_ops *ops)
872{
873 item->li_mountp = mp;
874 item->li_ailp = mp->m_ail;
875 item->li_type = type;
876 item->li_ops = ops;
877 item->li_lv = NULL;
878
879 INIT_LIST_HEAD(&item->li_ail);
880 INIT_LIST_HEAD(&item->li_cil);
881}
882
883
884
885
886void
887xfs_log_space_wake(
888 struct xfs_mount *mp)
889{
890 struct xlog *log = mp->m_log;
891 int free_bytes;
892
893 if (XLOG_FORCED_SHUTDOWN(log))
894 return;
895
896 if (!list_empty_careful(&log->l_write_head.waiters)) {
897 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
898
899 spin_lock(&log->l_write_head.lock);
900 free_bytes = xlog_space_left(log, &log->l_write_head.grant);
901 xlog_grant_head_wake(log, &log->l_write_head, &free_bytes);
902 spin_unlock(&log->l_write_head.lock);
903 }
904
905 if (!list_empty_careful(&log->l_reserve_head.waiters)) {
906 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
907
908 spin_lock(&log->l_reserve_head.lock);
909 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
910 xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes);
911 spin_unlock(&log->l_reserve_head.lock);
912 }
913}
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928int
929xfs_log_need_covered(xfs_mount_t *mp)
930{
931 int needed = 0;
932 struct xlog *log = mp->m_log;
933
934 if (!xfs_fs_writable(mp))
935 return 0;
936
937 spin_lock(&log->l_icloglock);
938 switch (log->l_covered_state) {
939 case XLOG_STATE_COVER_DONE:
940 case XLOG_STATE_COVER_DONE2:
941 case XLOG_STATE_COVER_IDLE:
942 break;
943 case XLOG_STATE_COVER_NEED:
944 case XLOG_STATE_COVER_NEED2:
945 if (!xfs_ail_min_lsn(log->l_ailp) &&
946 xlog_iclogs_empty(log)) {
947 if (log->l_covered_state == XLOG_STATE_COVER_NEED)
948 log->l_covered_state = XLOG_STATE_COVER_DONE;
949 else
950 log->l_covered_state = XLOG_STATE_COVER_DONE2;
951 }
952
953 default:
954 needed = 1;
955 break;
956 }
957 spin_unlock(&log->l_icloglock);
958 return needed;
959}
960
961
962
963
964xfs_lsn_t
965xlog_assign_tail_lsn_locked(
966 struct xfs_mount *mp)
967{
968 struct xlog *log = mp->m_log;
969 struct xfs_log_item *lip;
970 xfs_lsn_t tail_lsn;
971
972 assert_spin_locked(&mp->m_ail->xa_lock);
973
974
975
976
977
978
979 lip = xfs_ail_min(mp->m_ail);
980 if (lip)
981 tail_lsn = lip->li_lsn;
982 else
983 tail_lsn = atomic64_read(&log->l_last_sync_lsn);
984 atomic64_set(&log->l_tail_lsn, tail_lsn);
985 return tail_lsn;
986}
987
988xfs_lsn_t
989xlog_assign_tail_lsn(
990 struct xfs_mount *mp)
991{
992 xfs_lsn_t tail_lsn;
993
994 spin_lock(&mp->m_ail->xa_lock);
995 tail_lsn = xlog_assign_tail_lsn_locked(mp);
996 spin_unlock(&mp->m_ail->xa_lock);
997
998 return tail_lsn;
999}
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015STATIC int
1016xlog_space_left(
1017 struct xlog *log,
1018 atomic64_t *head)
1019{
1020 int free_bytes;
1021 int tail_bytes;
1022 int tail_cycle;
1023 int head_cycle;
1024 int head_bytes;
1025
1026 xlog_crack_grant_head(head, &head_cycle, &head_bytes);
1027 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes);
1028 tail_bytes = BBTOB(tail_bytes);
1029 if (tail_cycle == head_cycle && head_bytes >= tail_bytes)
1030 free_bytes = log->l_logsize - (head_bytes - tail_bytes);
1031 else if (tail_cycle + 1 < head_cycle)
1032 return 0;
1033 else if (tail_cycle < head_cycle) {
1034 ASSERT(tail_cycle == (head_cycle - 1));
1035 free_bytes = tail_bytes - head_bytes;
1036 } else {
1037
1038
1039
1040
1041
1042 xfs_alert(log->l_mp,
1043 "xlog_space_left: head behind tail\n"
1044 " tail_cycle = %d, tail_bytes = %d\n"
1045 " GH cycle = %d, GH bytes = %d",
1046 tail_cycle, tail_bytes, head_cycle, head_bytes);
1047 ASSERT(0);
1048 free_bytes = log->l_logsize;
1049 }
1050 return free_bytes;
1051}
1052
1053
1054
1055
1056
1057
1058
1059
1060void
1061xlog_iodone(xfs_buf_t *bp)
1062{
1063 struct xlog_in_core *iclog = bp->b_fspriv;
1064 struct xlog *l = iclog->ic_log;
1065 int aborted = 0;
1066
1067
1068
1069
1070 if (XFS_TEST_ERROR((xfs_buf_geterror(bp)), l->l_mp,
1071 XFS_ERRTAG_IODONE_IOERR, XFS_RANDOM_IODONE_IOERR)) {
1072 xfs_buf_ioerror_alert(bp, __func__);
1073 xfs_buf_stale(bp);
1074 xfs_force_shutdown(l->l_mp, SHUTDOWN_LOG_IO_ERROR);
1075
1076
1077
1078
1079
1080 aborted = XFS_LI_ABORTED;
1081 } else if (iclog->ic_state & XLOG_STATE_IOERROR) {
1082 aborted = XFS_LI_ABORTED;
1083 }
1084
1085
1086 ASSERT(XFS_BUF_ISASYNC(bp));
1087 xlog_state_done_syncing(iclog, aborted);
1088
1089
1090
1091
1092
1093
1094}
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105STATIC void
1106xlog_get_iclog_buffer_size(
1107 struct xfs_mount *mp,
1108 struct xlog *log)
1109{
1110 int size;
1111 int xhdrs;
1112
1113 if (mp->m_logbufs <= 0)
1114 log->l_iclog_bufs = XLOG_MAX_ICLOGS;
1115 else
1116 log->l_iclog_bufs = mp->m_logbufs;
1117
1118
1119
1120
1121 if (mp->m_logbsize > 0) {
1122 size = log->l_iclog_size = mp->m_logbsize;
1123 log->l_iclog_size_log = 0;
1124 while (size != 1) {
1125 log->l_iclog_size_log++;
1126 size >>= 1;
1127 }
1128
1129 if (xfs_sb_version_haslogv2(&mp->m_sb)) {
1130
1131
1132
1133
1134 xhdrs = mp->m_logbsize / XLOG_HEADER_CYCLE_SIZE;
1135 if (mp->m_logbsize % XLOG_HEADER_CYCLE_SIZE)
1136 xhdrs++;
1137 log->l_iclog_hsize = xhdrs << BBSHIFT;
1138 log->l_iclog_heads = xhdrs;
1139 } else {
1140 ASSERT(mp->m_logbsize <= XLOG_BIG_RECORD_BSIZE);
1141 log->l_iclog_hsize = BBSIZE;
1142 log->l_iclog_heads = 1;
1143 }
1144 goto done;
1145 }
1146
1147
1148 log->l_iclog_size = XLOG_BIG_RECORD_BSIZE;
1149 log->l_iclog_size_log = XLOG_BIG_RECORD_BSHIFT;
1150
1151
1152 log->l_iclog_hsize = BBSIZE;
1153 log->l_iclog_heads = 1;
1154
1155done:
1156
1157 if (mp->m_logbufs == 0)
1158 mp->m_logbufs = log->l_iclog_bufs;
1159 if (mp->m_logbsize == 0)
1160 mp->m_logbsize = log->l_iclog_size;
1161}
1162
1163
1164
1165
1166
1167
1168
1169STATIC struct xlog *
1170xlog_alloc_log(
1171 struct xfs_mount *mp,
1172 struct xfs_buftarg *log_target,
1173 xfs_daddr_t blk_offset,
1174 int num_bblks)
1175{
1176 struct xlog *log;
1177 xlog_rec_header_t *head;
1178 xlog_in_core_t **iclogp;
1179 xlog_in_core_t *iclog, *prev_iclog=NULL;
1180 xfs_buf_t *bp;
1181 int i;
1182 int error = ENOMEM;
1183 uint log2_size = 0;
1184
1185 log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL);
1186 if (!log) {
1187 xfs_warn(mp, "Log allocation failed: No memory!");
1188 goto out;
1189 }
1190
1191 log->l_mp = mp;
1192 log->l_targ = log_target;
1193 log->l_logsize = BBTOB(num_bblks);
1194 log->l_logBBstart = blk_offset;
1195 log->l_logBBsize = num_bblks;
1196 log->l_covered_state = XLOG_STATE_COVER_IDLE;
1197 log->l_flags |= XLOG_ACTIVE_RECOVERY;
1198
1199 log->l_prev_block = -1;
1200
1201 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
1202 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);
1203 log->l_curr_cycle = 1;
1204
1205 xlog_grant_head_init(&log->l_reserve_head);
1206 xlog_grant_head_init(&log->l_write_head);
1207
1208 error = EFSCORRUPTED;
1209 if (xfs_sb_version_hassector(&mp->m_sb)) {
1210 log2_size = mp->m_sb.sb_logsectlog;
1211 if (log2_size < BBSHIFT) {
1212 xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)",
1213 log2_size, BBSHIFT);
1214 goto out_free_log;
1215 }
1216
1217 log2_size -= BBSHIFT;
1218 if (log2_size > mp->m_sectbb_log) {
1219 xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)",
1220 log2_size, mp->m_sectbb_log);
1221 goto out_free_log;
1222 }
1223
1224
1225 if (log2_size && log->l_logBBstart > 0 &&
1226 !xfs_sb_version_haslogv2(&mp->m_sb)) {
1227 xfs_warn(mp,
1228 "log sector size (0x%x) invalid for configuration.",
1229 log2_size);
1230 goto out_free_log;
1231 }
1232 }
1233 log->l_sectBBsize = 1 << log2_size;
1234
1235 xlog_get_iclog_buffer_size(mp, log);
1236
1237 error = ENOMEM;
1238 bp = xfs_buf_alloc(mp->m_logdev_targp, 0, BTOBB(log->l_iclog_size), 0);
1239 if (!bp)
1240 goto out_free_log;
1241 bp->b_iodone = xlog_iodone;
1242 ASSERT(xfs_buf_islocked(bp));
1243 log->l_xbuf = bp;
1244
1245 spin_lock_init(&log->l_icloglock);
1246 init_waitqueue_head(&log->l_flush_wait);
1247
1248 iclogp = &log->l_iclog;
1249
1250
1251
1252
1253
1254
1255
1256 ASSERT(log->l_iclog_size >= 4096);
1257 for (i=0; i < log->l_iclog_bufs; i++) {
1258 *iclogp = kmem_zalloc(sizeof(xlog_in_core_t), KM_MAYFAIL);
1259 if (!*iclogp)
1260 goto out_free_iclog;
1261
1262 iclog = *iclogp;
1263 iclog->ic_prev = prev_iclog;
1264 prev_iclog = iclog;
1265
1266 bp = xfs_buf_get_uncached(mp->m_logdev_targp,
1267 BTOBB(log->l_iclog_size), 0);
1268 if (!bp)
1269 goto out_free_iclog;
1270
1271 bp->b_iodone = xlog_iodone;
1272 iclog->ic_bp = bp;
1273 iclog->ic_data = bp->b_addr;
1274#ifdef DEBUG
1275 log->l_iclog_bak[i] = (xfs_caddr_t)&(iclog->ic_header);
1276#endif
1277 head = &iclog->ic_header;
1278 memset(head, 0, sizeof(xlog_rec_header_t));
1279 head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1280 head->h_version = cpu_to_be32(
1281 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1282 head->h_size = cpu_to_be32(log->l_iclog_size);
1283
1284 head->h_fmt = cpu_to_be32(XLOG_FMT);
1285 memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t));
1286
1287 iclog->ic_size = BBTOB(bp->b_length) - log->l_iclog_hsize;
1288 iclog->ic_state = XLOG_STATE_ACTIVE;
1289 iclog->ic_log = log;
1290 atomic_set(&iclog->ic_refcnt, 0);
1291 spin_lock_init(&iclog->ic_callback_lock);
1292 iclog->ic_callback_tail = &(iclog->ic_callback);
1293 iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize;
1294
1295 ASSERT(xfs_buf_islocked(iclog->ic_bp));
1296 init_waitqueue_head(&iclog->ic_force_wait);
1297 init_waitqueue_head(&iclog->ic_write_wait);
1298
1299 iclogp = &iclog->ic_next;
1300 }
1301 *iclogp = log->l_iclog;
1302 log->l_iclog->ic_prev = prev_iclog;
1303
1304 error = xlog_cil_init(log);
1305 if (error)
1306 goto out_free_iclog;
1307 return log;
1308
1309out_free_iclog:
1310 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
1311 prev_iclog = iclog->ic_next;
1312 if (iclog->ic_bp)
1313 xfs_buf_free(iclog->ic_bp);
1314 kmem_free(iclog);
1315 }
1316 spinlock_destroy(&log->l_icloglock);
1317 xfs_buf_free(log->l_xbuf);
1318out_free_log:
1319 kmem_free(log);
1320out:
1321 return ERR_PTR(-error);
1322}
1323
1324
1325
1326
1327
1328
1329STATIC int
1330xlog_commit_record(
1331 struct xlog *log,
1332 struct xlog_ticket *ticket,
1333 struct xlog_in_core **iclog,
1334 xfs_lsn_t *commitlsnp)
1335{
1336 struct xfs_mount *mp = log->l_mp;
1337 int error;
1338 struct xfs_log_iovec reg = {
1339 .i_addr = NULL,
1340 .i_len = 0,
1341 .i_type = XLOG_REG_TYPE_COMMIT,
1342 };
1343 struct xfs_log_vec vec = {
1344 .lv_niovecs = 1,
1345 .lv_iovecp = ®,
1346 };
1347
1348 ASSERT_ALWAYS(iclog);
1349 error = xlog_write(log, &vec, ticket, commitlsnp, iclog,
1350 XLOG_COMMIT_TRANS);
1351 if (error)
1352 xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
1353 return error;
1354}
1355
1356
1357
1358
1359
1360
1361
1362
1363STATIC void
1364xlog_grant_push_ail(
1365 struct xlog *log,
1366 int need_bytes)
1367{
1368 xfs_lsn_t threshold_lsn = 0;
1369 xfs_lsn_t last_sync_lsn;
1370 int free_blocks;
1371 int free_bytes;
1372 int threshold_block;
1373 int threshold_cycle;
1374 int free_threshold;
1375
1376 ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
1377
1378 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
1379 free_blocks = BTOBBT(free_bytes);
1380
1381
1382
1383
1384
1385
1386 free_threshold = BTOBB(need_bytes);
1387 free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2));
1388 free_threshold = MAX(free_threshold, 256);
1389 if (free_blocks >= free_threshold)
1390 return;
1391
1392 xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle,
1393 &threshold_block);
1394 threshold_block += free_threshold;
1395 if (threshold_block >= log->l_logBBsize) {
1396 threshold_block -= log->l_logBBsize;
1397 threshold_cycle += 1;
1398 }
1399 threshold_lsn = xlog_assign_lsn(threshold_cycle,
1400 threshold_block);
1401
1402
1403
1404
1405
1406 last_sync_lsn = atomic64_read(&log->l_last_sync_lsn);
1407 if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0)
1408 threshold_lsn = last_sync_lsn;
1409
1410
1411
1412
1413
1414
1415 if (!XLOG_FORCED_SHUTDOWN(log))
1416 xfs_ail_push(log->l_ailp, threshold_lsn);
1417}
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427STATIC int
1428xlog_bdstrat(
1429 struct xfs_buf *bp)
1430{
1431 struct xlog_in_core *iclog = bp->b_fspriv;
1432
1433 if (iclog->ic_state & XLOG_STATE_IOERROR) {
1434 xfs_buf_ioerror(bp, EIO);
1435 xfs_buf_stale(bp);
1436 xfs_buf_ioend(bp, 0);
1437
1438
1439
1440
1441
1442 return 0;
1443 }
1444
1445 xfs_buf_iorequest(bp);
1446 return 0;
1447}
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474STATIC int
1475xlog_sync(
1476 struct xlog *log,
1477 struct xlog_in_core *iclog)
1478{
1479 xfs_caddr_t dptr;
1480 xfs_buf_t *bp;
1481 int i;
1482 uint count;
1483 uint count_init;
1484 int roundoff;
1485 int split = 0;
1486 int error;
1487 int v2 = xfs_sb_version_haslogv2(&log->l_mp->m_sb);
1488
1489 XFS_STATS_INC(xs_log_writes);
1490 ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
1491
1492
1493 count_init = log->l_iclog_hsize + iclog->ic_offset;
1494
1495
1496 if (v2 && log->l_mp->m_sb.sb_logsunit > 1) {
1497
1498 count = XLOG_LSUNITTOB(log, XLOG_BTOLSUNIT(log, count_init));
1499 } else {
1500 count = BBTOB(BTOBB(count_init));
1501 }
1502 roundoff = count - count_init;
1503 ASSERT(roundoff >= 0);
1504 ASSERT((v2 && log->l_mp->m_sb.sb_logsunit > 1 &&
1505 roundoff < log->l_mp->m_sb.sb_logsunit)
1506 ||
1507 (log->l_mp->m_sb.sb_logsunit <= 1 &&
1508 roundoff < BBTOB(1)));
1509
1510
1511 xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff);
1512 xlog_grant_add_space(log, &log->l_write_head.grant, roundoff);
1513
1514
1515 xlog_pack_data(log, iclog, roundoff);
1516
1517
1518 if (v2) {
1519 iclog->ic_header.h_len =
1520 cpu_to_be32(iclog->ic_offset + roundoff);
1521 } else {
1522 iclog->ic_header.h_len =
1523 cpu_to_be32(iclog->ic_offset);
1524 }
1525
1526 bp = iclog->ic_bp;
1527 XFS_BUF_SET_ADDR(bp, BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn)));
1528
1529 XFS_STATS_ADD(xs_log_blocks, BTOBB(count));
1530
1531
1532 if (XFS_BUF_ADDR(bp) + BTOBB(count) > log->l_logBBsize) {
1533 split = count - (BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp)));
1534 count = BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp));
1535 iclog->ic_bwritecnt = 2;
1536 } else {
1537 iclog->ic_bwritecnt = 1;
1538 }
1539 bp->b_io_length = BTOBB(count);
1540 bp->b_fspriv = iclog;
1541 XFS_BUF_ZEROFLAGS(bp);
1542 XFS_BUF_ASYNC(bp);
1543 bp->b_flags |= XBF_SYNCIO;
1544
1545 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) {
1546 bp->b_flags |= XBF_FUA;
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557 if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp)
1558 xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp);
1559 else
1560 bp->b_flags |= XBF_FLUSH;
1561 }
1562
1563 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
1564 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
1565
1566 xlog_verify_iclog(log, iclog, count, B_TRUE);
1567
1568
1569 XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
1570
1571
1572
1573
1574 XFS_BUF_WRITE(bp);
1575
1576 error = xlog_bdstrat(bp);
1577 if (error) {
1578 xfs_buf_ioerror_alert(bp, "xlog_sync");
1579 return error;
1580 }
1581 if (split) {
1582 bp = iclog->ic_log->l_xbuf;
1583 XFS_BUF_SET_ADDR(bp, 0);
1584 xfs_buf_associate_memory(bp,
1585 (char *)&iclog->ic_header + count, split);
1586 bp->b_fspriv = iclog;
1587 XFS_BUF_ZEROFLAGS(bp);
1588 XFS_BUF_ASYNC(bp);
1589 bp->b_flags |= XBF_SYNCIO;
1590 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
1591 bp->b_flags |= XBF_FUA;
1592 dptr = bp->b_addr;
1593
1594
1595
1596
1597
1598
1599 for (i = 0; i < split; i += BBSIZE) {
1600 be32_add_cpu((__be32 *)dptr, 1);
1601 if (be32_to_cpu(*(__be32 *)dptr) == XLOG_HEADER_MAGIC_NUM)
1602 be32_add_cpu((__be32 *)dptr, 1);
1603 dptr += BBSIZE;
1604 }
1605
1606 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
1607 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
1608
1609
1610 XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
1611 XFS_BUF_WRITE(bp);
1612 error = xlog_bdstrat(bp);
1613 if (error) {
1614 xfs_buf_ioerror_alert(bp, "xlog_sync (split)");
1615 return error;
1616 }
1617 }
1618 return 0;
1619}
1620
1621
1622
1623
1624
1625STATIC void
1626xlog_dealloc_log(
1627 struct xlog *log)
1628{
1629 xlog_in_core_t *iclog, *next_iclog;
1630 int i;
1631
1632 xlog_cil_destroy(log);
1633
1634
1635
1636
1637
1638 xfs_buf_set_empty(log->l_xbuf, BTOBB(log->l_iclog_size));
1639 xfs_buf_free(log->l_xbuf);
1640
1641 iclog = log->l_iclog;
1642 for (i=0; i<log->l_iclog_bufs; i++) {
1643 xfs_buf_free(iclog->ic_bp);
1644 next_iclog = iclog->ic_next;
1645 kmem_free(iclog);
1646 iclog = next_iclog;
1647 }
1648 spinlock_destroy(&log->l_icloglock);
1649
1650 log->l_mp->m_log = NULL;
1651 kmem_free(log);
1652}
1653
1654
1655
1656
1657
1658static inline void
1659xlog_state_finish_copy(
1660 struct xlog *log,
1661 struct xlog_in_core *iclog,
1662 int record_cnt,
1663 int copy_bytes)
1664{
1665 spin_lock(&log->l_icloglock);
1666
1667 be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt);
1668 iclog->ic_offset += copy_bytes;
1669
1670 spin_unlock(&log->l_icloglock);
1671}
1672
1673
1674
1675
1676
1677
1678
1679
1680void
1681xlog_print_tic_res(
1682 struct xfs_mount *mp,
1683 struct xlog_ticket *ticket)
1684{
1685 uint i;
1686 uint ophdr_spc = ticket->t_res_num_ophdrs * (uint)sizeof(xlog_op_header_t);
1687
1688
1689 static char *res_type_str[XLOG_REG_TYPE_MAX] = {
1690 "bformat",
1691 "bchunk",
1692 "efi_format",
1693 "efd_format",
1694 "iformat",
1695 "icore",
1696 "iext",
1697 "ibroot",
1698 "ilocal",
1699 "iattr_ext",
1700 "iattr_broot",
1701 "iattr_local",
1702 "qformat",
1703 "dquot",
1704 "quotaoff",
1705 "LR header",
1706 "unmount",
1707 "commit",
1708 "trans header"
1709 };
1710 static char *trans_type_str[XFS_TRANS_TYPE_MAX] = {
1711 "SETATTR_NOT_SIZE",
1712 "SETATTR_SIZE",
1713 "INACTIVE",
1714 "CREATE",
1715 "CREATE_TRUNC",
1716 "TRUNCATE_FILE",
1717 "REMOVE",
1718 "LINK",
1719 "RENAME",
1720 "MKDIR",
1721 "RMDIR",
1722 "SYMLINK",
1723 "SET_DMATTRS",
1724 "GROWFS",
1725 "STRAT_WRITE",
1726 "DIOSTRAT",
1727 "WRITE_SYNC",
1728 "WRITEID",
1729 "ADDAFORK",
1730 "ATTRINVAL",
1731 "ATRUNCATE",
1732 "ATTR_SET",
1733 "ATTR_RM",
1734 "ATTR_FLAG",
1735 "CLEAR_AGI_BUCKET",
1736 "QM_SBCHANGE",
1737 "DUMMY1",
1738 "DUMMY2",
1739 "QM_QUOTAOFF",
1740 "QM_DQALLOC",
1741 "QM_SETQLIM",
1742 "QM_DQCLUSTER",
1743 "QM_QINOCREATE",
1744 "QM_QUOTAOFF_END",
1745 "SB_UNIT",
1746 "FSYNC_TS",
1747 "GROWFSRT_ALLOC",
1748 "GROWFSRT_ZERO",
1749 "GROWFSRT_FREE",
1750 "SWAPEXT"
1751 };
1752
1753 xfs_warn(mp,
1754 "xlog_write: reservation summary:\n"
1755 " trans type = %s (%u)\n"
1756 " unit res = %d bytes\n"
1757 " current res = %d bytes\n"
1758 " total reg = %u bytes (o/flow = %u bytes)\n"
1759 " ophdrs = %u (ophdr space = %u bytes)\n"
1760 " ophdr + reg = %u bytes\n"
1761 " num regions = %u\n",
1762 ((ticket->t_trans_type <= 0 ||
1763 ticket->t_trans_type > XFS_TRANS_TYPE_MAX) ?
1764 "bad-trans-type" : trans_type_str[ticket->t_trans_type-1]),
1765 ticket->t_trans_type,
1766 ticket->t_unit_res,
1767 ticket->t_curr_res,
1768 ticket->t_res_arr_sum, ticket->t_res_o_flow,
1769 ticket->t_res_num_ophdrs, ophdr_spc,
1770 ticket->t_res_arr_sum +
1771 ticket->t_res_o_flow + ophdr_spc,
1772 ticket->t_res_num);
1773
1774 for (i = 0; i < ticket->t_res_num; i++) {
1775 uint r_type = ticket->t_res_arr[i].r_type;
1776 xfs_warn(mp, "region[%u]: %s - %u bytes\n", i,
1777 ((r_type <= 0 || r_type > XLOG_REG_TYPE_MAX) ?
1778 "bad-rtype" : res_type_str[r_type-1]),
1779 ticket->t_res_arr[i].r_len);
1780 }
1781
1782 xfs_alert_tag(mp, XFS_PTAG_LOGRES,
1783 "xlog_write: reservation ran out. Need to up reservation");
1784 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1785}
1786
1787
1788
1789
1790
1791static int
1792xlog_write_calc_vec_length(
1793 struct xlog_ticket *ticket,
1794 struct xfs_log_vec *log_vector)
1795{
1796 struct xfs_log_vec *lv;
1797 int headers = 0;
1798 int len = 0;
1799 int i;
1800
1801
1802 if (ticket->t_flags & XLOG_TIC_INITED)
1803 headers++;
1804
1805 for (lv = log_vector; lv; lv = lv->lv_next) {
1806 headers += lv->lv_niovecs;
1807
1808 for (i = 0; i < lv->lv_niovecs; i++) {
1809 struct xfs_log_iovec *vecp = &lv->lv_iovecp[i];
1810
1811 len += vecp->i_len;
1812 xlog_tic_add_region(ticket, vecp->i_len, vecp->i_type);
1813 }
1814 }
1815
1816 ticket->t_res_num_ophdrs += headers;
1817 len += headers * sizeof(struct xlog_op_header);
1818
1819 return len;
1820}
1821
1822
1823
1824
1825
1826static int
1827xlog_write_start_rec(
1828 struct xlog_op_header *ophdr,
1829 struct xlog_ticket *ticket)
1830{
1831 if (!(ticket->t_flags & XLOG_TIC_INITED))
1832 return 0;
1833
1834 ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
1835 ophdr->oh_clientid = ticket->t_clientid;
1836 ophdr->oh_len = 0;
1837 ophdr->oh_flags = XLOG_START_TRANS;
1838 ophdr->oh_res2 = 0;
1839
1840 ticket->t_flags &= ~XLOG_TIC_INITED;
1841
1842 return sizeof(struct xlog_op_header);
1843}
1844
1845static xlog_op_header_t *
1846xlog_write_setup_ophdr(
1847 struct xlog *log,
1848 struct xlog_op_header *ophdr,
1849 struct xlog_ticket *ticket,
1850 uint flags)
1851{
1852 ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
1853 ophdr->oh_clientid = ticket->t_clientid;
1854 ophdr->oh_res2 = 0;
1855
1856
1857 ophdr->oh_flags = flags;
1858
1859
1860
1861
1862
1863
1864 switch (ophdr->oh_clientid) {
1865 case XFS_TRANSACTION:
1866 case XFS_VOLUME:
1867 case XFS_LOG:
1868 break;
1869 default:
1870 xfs_warn(log->l_mp,
1871 "Bad XFS transaction clientid 0x%x in ticket 0x%p",
1872 ophdr->oh_clientid, ticket);
1873 return NULL;
1874 }
1875
1876 return ophdr;
1877}
1878
1879
1880
1881
1882
1883
1884
1885static int
1886xlog_write_setup_copy(
1887 struct xlog_ticket *ticket,
1888 struct xlog_op_header *ophdr,
1889 int space_available,
1890 int space_required,
1891 int *copy_off,
1892 int *copy_len,
1893 int *last_was_partial_copy,
1894 int *bytes_consumed)
1895{
1896 int still_to_copy;
1897
1898 still_to_copy = space_required - *bytes_consumed;
1899 *copy_off = *bytes_consumed;
1900
1901 if (still_to_copy <= space_available) {
1902
1903 *copy_len = still_to_copy;
1904 ophdr->oh_len = cpu_to_be32(*copy_len);
1905 if (*last_was_partial_copy)
1906 ophdr->oh_flags |= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS);
1907 *last_was_partial_copy = 0;
1908 *bytes_consumed = 0;
1909 return 0;
1910 }
1911
1912
1913 *copy_len = space_available;
1914 ophdr->oh_len = cpu_to_be32(*copy_len);
1915 ophdr->oh_flags |= XLOG_CONTINUE_TRANS;
1916 if (*last_was_partial_copy)
1917 ophdr->oh_flags |= XLOG_WAS_CONT_TRANS;
1918 *bytes_consumed += *copy_len;
1919 (*last_was_partial_copy)++;
1920
1921
1922 ticket->t_curr_res -= sizeof(struct xlog_op_header);
1923 ticket->t_res_num_ophdrs++;
1924
1925 return sizeof(struct xlog_op_header);
1926}
1927
1928static int
1929xlog_write_copy_finish(
1930 struct xlog *log,
1931 struct xlog_in_core *iclog,
1932 uint flags,
1933 int *record_cnt,
1934 int *data_cnt,
1935 int *partial_copy,
1936 int *partial_copy_len,
1937 int log_offset,
1938 struct xlog_in_core **commit_iclog)
1939{
1940 if (*partial_copy) {
1941
1942
1943
1944
1945 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
1946 *record_cnt = 0;
1947 *data_cnt = 0;
1948 return xlog_state_release_iclog(log, iclog);
1949 }
1950
1951 *partial_copy = 0;
1952 *partial_copy_len = 0;
1953
1954 if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) {
1955
1956 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
1957 *record_cnt = 0;
1958 *data_cnt = 0;
1959
1960 spin_lock(&log->l_icloglock);
1961 xlog_state_want_sync(log, iclog);
1962 spin_unlock(&log->l_icloglock);
1963
1964 if (!commit_iclog)
1965 return xlog_state_release_iclog(log, iclog);
1966 ASSERT(flags & XLOG_COMMIT_TRANS);
1967 *commit_iclog = iclog;
1968 }
1969
1970 return 0;
1971}
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013int
2014xlog_write(
2015 struct xlog *log,
2016 struct xfs_log_vec *log_vector,
2017 struct xlog_ticket *ticket,
2018 xfs_lsn_t *start_lsn,
2019 struct xlog_in_core **commit_iclog,
2020 uint flags)
2021{
2022 struct xlog_in_core *iclog = NULL;
2023 struct xfs_log_iovec *vecp;
2024 struct xfs_log_vec *lv;
2025 int len;
2026 int index;
2027 int partial_copy = 0;
2028 int partial_copy_len = 0;
2029 int contwr = 0;
2030 int record_cnt = 0;
2031 int data_cnt = 0;
2032 int error;
2033
2034 *start_lsn = 0;
2035
2036 len = xlog_write_calc_vec_length(ticket, log_vector);
2037
2038
2039
2040
2041
2042
2043 if (ticket->t_flags & XLOG_TIC_INITED)
2044 ticket->t_curr_res -= sizeof(xlog_op_header_t);
2045
2046
2047
2048
2049
2050 if (flags & (XLOG_COMMIT_TRANS | XLOG_UNMOUNT_TRANS))
2051 ticket->t_curr_res -= sizeof(xlog_op_header_t);
2052
2053 if (ticket->t_curr_res < 0)
2054 xlog_print_tic_res(log->l_mp, ticket);
2055
2056 index = 0;
2057 lv = log_vector;
2058 vecp = lv->lv_iovecp;
2059 while (lv && index < lv->lv_niovecs) {
2060 void *ptr;
2061 int log_offset;
2062
2063 error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
2064 &contwr, &log_offset);
2065 if (error)
2066 return error;
2067
2068 ASSERT(log_offset <= iclog->ic_size - 1);
2069 ptr = iclog->ic_datap + log_offset;
2070
2071
2072 if (!*start_lsn)
2073 *start_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2074
2075
2076
2077
2078
2079 while (lv && index < lv->lv_niovecs) {
2080 struct xfs_log_iovec *reg = &vecp[index];
2081 struct xlog_op_header *ophdr;
2082 int start_rec_copy;
2083 int copy_len;
2084 int copy_off;
2085
2086 ASSERT(reg->i_len % sizeof(__int32_t) == 0);
2087 ASSERT((unsigned long)ptr % sizeof(__int32_t) == 0);
2088
2089 start_rec_copy = xlog_write_start_rec(ptr, ticket);
2090 if (start_rec_copy) {
2091 record_cnt++;
2092 xlog_write_adv_cnt(&ptr, &len, &log_offset,
2093 start_rec_copy);
2094 }
2095
2096 ophdr = xlog_write_setup_ophdr(log, ptr, ticket, flags);
2097 if (!ophdr)
2098 return XFS_ERROR(EIO);
2099
2100 xlog_write_adv_cnt(&ptr, &len, &log_offset,
2101 sizeof(struct xlog_op_header));
2102
2103 len += xlog_write_setup_copy(ticket, ophdr,
2104 iclog->ic_size-log_offset,
2105 reg->i_len,
2106 ©_off, ©_len,
2107 &partial_copy,
2108 &partial_copy_len);
2109 xlog_verify_dest_ptr(log, ptr);
2110
2111
2112 ASSERT(copy_len >= 0);
2113 memcpy(ptr, reg->i_addr + copy_off, copy_len);
2114 xlog_write_adv_cnt(&ptr, &len, &log_offset, copy_len);
2115
2116 copy_len += start_rec_copy + sizeof(xlog_op_header_t);
2117 record_cnt++;
2118 data_cnt += contwr ? copy_len : 0;
2119
2120 error = xlog_write_copy_finish(log, iclog, flags,
2121 &record_cnt, &data_cnt,
2122 &partial_copy,
2123 &partial_copy_len,
2124 log_offset,
2125 commit_iclog);
2126 if (error)
2127 return error;
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141 if (partial_copy)
2142 break;
2143
2144 if (++index == lv->lv_niovecs) {
2145 lv = lv->lv_next;
2146 index = 0;
2147 if (lv)
2148 vecp = lv->lv_iovecp;
2149 }
2150 if (record_cnt == 0) {
2151 if (!lv)
2152 return 0;
2153 break;
2154 }
2155 }
2156 }
2157
2158 ASSERT(len == 0);
2159
2160 xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
2161 if (!commit_iclog)
2162 return xlog_state_release_iclog(log, iclog);
2163
2164 ASSERT(flags & XLOG_COMMIT_TRANS);
2165 *commit_iclog = iclog;
2166 return 0;
2167}
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185STATIC void
2186xlog_state_clean_log(
2187 struct xlog *log)
2188{
2189 xlog_in_core_t *iclog;
2190 int changed = 0;
2191
2192 iclog = log->l_iclog;
2193 do {
2194 if (iclog->ic_state == XLOG_STATE_DIRTY) {
2195 iclog->ic_state = XLOG_STATE_ACTIVE;
2196 iclog->ic_offset = 0;
2197 ASSERT(iclog->ic_callback == NULL);
2198
2199
2200
2201
2202
2203
2204
2205
2206 if (!changed &&
2207 (be32_to_cpu(iclog->ic_header.h_num_logops) ==
2208 XLOG_COVER_OPS)) {
2209 changed = 1;
2210 } else {
2211
2212
2213
2214
2215
2216 changed = 2;
2217 }
2218 iclog->ic_header.h_num_logops = 0;
2219 memset(iclog->ic_header.h_cycle_data, 0,
2220 sizeof(iclog->ic_header.h_cycle_data));
2221 iclog->ic_header.h_lsn = 0;
2222 } else if (iclog->ic_state == XLOG_STATE_ACTIVE)
2223 ;
2224 else
2225 break;
2226 iclog = iclog->ic_next;
2227 } while (iclog != log->l_iclog);
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237 if (changed) {
2238 switch (log->l_covered_state) {
2239 case XLOG_STATE_COVER_IDLE:
2240 case XLOG_STATE_COVER_NEED:
2241 case XLOG_STATE_COVER_NEED2:
2242 log->l_covered_state = XLOG_STATE_COVER_NEED;
2243 break;
2244
2245 case XLOG_STATE_COVER_DONE:
2246 if (changed == 1)
2247 log->l_covered_state = XLOG_STATE_COVER_NEED2;
2248 else
2249 log->l_covered_state = XLOG_STATE_COVER_NEED;
2250 break;
2251
2252 case XLOG_STATE_COVER_DONE2:
2253 if (changed == 1)
2254 log->l_covered_state = XLOG_STATE_COVER_IDLE;
2255 else
2256 log->l_covered_state = XLOG_STATE_COVER_NEED;
2257 break;
2258
2259 default:
2260 ASSERT(0);
2261 }
2262 }
2263}
2264
2265STATIC xfs_lsn_t
2266xlog_get_lowest_lsn(
2267 struct xlog *log)
2268{
2269 xlog_in_core_t *lsn_log;
2270 xfs_lsn_t lowest_lsn, lsn;
2271
2272 lsn_log = log->l_iclog;
2273 lowest_lsn = 0;
2274 do {
2275 if (!(lsn_log->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY))) {
2276 lsn = be64_to_cpu(lsn_log->ic_header.h_lsn);
2277 if ((lsn && !lowest_lsn) ||
2278 (XFS_LSN_CMP(lsn, lowest_lsn) < 0)) {
2279 lowest_lsn = lsn;
2280 }
2281 }
2282 lsn_log = lsn_log->ic_next;
2283 } while (lsn_log != log->l_iclog);
2284 return lowest_lsn;
2285}
2286
2287
2288STATIC void
2289xlog_state_do_callback(
2290 struct xlog *log,
2291 int aborted,
2292 struct xlog_in_core *ciclog)
2293{
2294 xlog_in_core_t *iclog;
2295 xlog_in_core_t *first_iclog;
2296
2297 xfs_log_callback_t *cb, *cb_next;
2298 int flushcnt = 0;
2299 xfs_lsn_t lowest_lsn;
2300 int ioerrors;
2301 int loopdidcallbacks;
2302 int funcdidcallbacks;
2303 int repeats;
2304
2305 int wake = 0;
2306
2307 spin_lock(&log->l_icloglock);
2308 first_iclog = iclog = log->l_iclog;
2309 ioerrors = 0;
2310 funcdidcallbacks = 0;
2311 repeats = 0;
2312
2313 do {
2314
2315
2316
2317
2318
2319
2320
2321
2322 first_iclog = log->l_iclog;
2323 iclog = log->l_iclog;
2324 loopdidcallbacks = 0;
2325 repeats++;
2326
2327 do {
2328
2329
2330 if (iclog->ic_state &
2331 (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY)) {
2332 iclog = iclog->ic_next;
2333 continue;
2334 }
2335
2336
2337
2338
2339
2340
2341
2342
2343 if (!(iclog->ic_state & XLOG_STATE_IOERROR)) {
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354 if (!(iclog->ic_state &
2355 (XLOG_STATE_DONE_SYNC |
2356 XLOG_STATE_DO_CALLBACK))) {
2357 if (ciclog && (ciclog->ic_state ==
2358 XLOG_STATE_DONE_SYNC)) {
2359 ciclog->ic_state = XLOG_STATE_DO_CALLBACK;
2360 }
2361 break;
2362 }
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377 lowest_lsn = xlog_get_lowest_lsn(log);
2378 if (lowest_lsn &&
2379 XFS_LSN_CMP(lowest_lsn,
2380 be64_to_cpu(iclog->ic_header.h_lsn)) < 0) {
2381 iclog = iclog->ic_next;
2382 continue;
2383
2384 }
2385
2386 iclog->ic_state = XLOG_STATE_CALLBACK;
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406 ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn),
2407 be64_to_cpu(iclog->ic_header.h_lsn)) <= 0);
2408 if (iclog->ic_callback)
2409 atomic64_set(&log->l_last_sync_lsn,
2410 be64_to_cpu(iclog->ic_header.h_lsn));
2411
2412 } else
2413 ioerrors++;
2414
2415 spin_unlock(&log->l_icloglock);
2416
2417
2418
2419
2420
2421
2422
2423
2424 spin_lock(&iclog->ic_callback_lock);
2425 cb = iclog->ic_callback;
2426 while (cb) {
2427 iclog->ic_callback_tail = &(iclog->ic_callback);
2428 iclog->ic_callback = NULL;
2429 spin_unlock(&iclog->ic_callback_lock);
2430
2431
2432 for (; cb; cb = cb_next) {
2433 cb_next = cb->cb_next;
2434 cb->cb_func(cb->cb_arg, aborted);
2435 }
2436 spin_lock(&iclog->ic_callback_lock);
2437 cb = iclog->ic_callback;
2438 }
2439
2440 loopdidcallbacks++;
2441 funcdidcallbacks++;
2442
2443 spin_lock(&log->l_icloglock);
2444 ASSERT(iclog->ic_callback == NULL);
2445 spin_unlock(&iclog->ic_callback_lock);
2446 if (!(iclog->ic_state & XLOG_STATE_IOERROR))
2447 iclog->ic_state = XLOG_STATE_DIRTY;
2448
2449
2450
2451
2452
2453 xlog_state_clean_log(log);
2454
2455
2456 wake_up_all(&iclog->ic_force_wait);
2457
2458 iclog = iclog->ic_next;
2459 } while (first_iclog != iclog);
2460
2461 if (repeats > 5000) {
2462 flushcnt += repeats;
2463 repeats = 0;
2464 xfs_warn(log->l_mp,
2465 "%s: possible infinite loop (%d iterations)",
2466 __func__, flushcnt);
2467 }
2468 } while (!ioerrors && loopdidcallbacks);
2469
2470
2471
2472
2473
2474#ifdef DEBUG
2475 if (funcdidcallbacks) {
2476 first_iclog = iclog = log->l_iclog;
2477 do {
2478 ASSERT(iclog->ic_state != XLOG_STATE_DO_CALLBACK);
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488 if (iclog->ic_state == XLOG_STATE_WANT_SYNC ||
2489 iclog->ic_state == XLOG_STATE_SYNCING ||
2490 iclog->ic_state == XLOG_STATE_DONE_SYNC ||
2491 iclog->ic_state == XLOG_STATE_IOERROR )
2492 break;
2493 iclog = iclog->ic_next;
2494 } while (first_iclog != iclog);
2495 }
2496#endif
2497
2498 if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR))
2499 wake = 1;
2500 spin_unlock(&log->l_icloglock);
2501
2502 if (wake)
2503 wake_up_all(&log->l_flush_wait);
2504}
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520STATIC void
2521xlog_state_done_syncing(
2522 xlog_in_core_t *iclog,
2523 int aborted)
2524{
2525 struct xlog *log = iclog->ic_log;
2526
2527 spin_lock(&log->l_icloglock);
2528
2529 ASSERT(iclog->ic_state == XLOG_STATE_SYNCING ||
2530 iclog->ic_state == XLOG_STATE_IOERROR);
2531 ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
2532 ASSERT(iclog->ic_bwritecnt == 1 || iclog->ic_bwritecnt == 2);
2533
2534
2535
2536
2537
2538
2539
2540
2541 if (iclog->ic_state != XLOG_STATE_IOERROR) {
2542 if (--iclog->ic_bwritecnt == 1) {
2543 spin_unlock(&log->l_icloglock);
2544 return;
2545 }
2546 iclog->ic_state = XLOG_STATE_DONE_SYNC;
2547 }
2548
2549
2550
2551
2552
2553
2554 wake_up_all(&iclog->ic_write_wait);
2555 spin_unlock(&log->l_icloglock);
2556 xlog_state_do_callback(log, aborted, iclog);
2557}
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578STATIC int
2579xlog_state_get_iclog_space(
2580 struct xlog *log,
2581 int len,
2582 struct xlog_in_core **iclogp,
2583 struct xlog_ticket *ticket,
2584 int *continued_write,
2585 int *logoffsetp)
2586{
2587 int log_offset;
2588 xlog_rec_header_t *head;
2589 xlog_in_core_t *iclog;
2590 int error;
2591
2592restart:
2593 spin_lock(&log->l_icloglock);
2594 if (XLOG_FORCED_SHUTDOWN(log)) {
2595 spin_unlock(&log->l_icloglock);
2596 return XFS_ERROR(EIO);
2597 }
2598
2599 iclog = log->l_iclog;
2600 if (iclog->ic_state != XLOG_STATE_ACTIVE) {
2601 XFS_STATS_INC(xs_log_noiclogs);
2602
2603
2604 xlog_wait(&log->l_flush_wait, &log->l_icloglock);
2605 goto restart;
2606 }
2607
2608 head = &iclog->ic_header;
2609
2610 atomic_inc(&iclog->ic_refcnt);
2611 log_offset = iclog->ic_offset;
2612
2613
2614
2615
2616
2617
2618 if (log_offset == 0) {
2619 ticket->t_curr_res -= log->l_iclog_hsize;
2620 xlog_tic_add_region(ticket,
2621 log->l_iclog_hsize,
2622 XLOG_REG_TYPE_LRHEADER);
2623 head->h_cycle = cpu_to_be32(log->l_curr_cycle);
2624 head->h_lsn = cpu_to_be64(
2625 xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block));
2626 ASSERT(log->l_curr_block >= 0);
2627 }
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638 if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) {
2639 xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2640
2641
2642
2643
2644
2645
2646
2647
2648 if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1)) {
2649
2650 spin_unlock(&log->l_icloglock);
2651 error = xlog_state_release_iclog(log, iclog);
2652 if (error)
2653 return error;
2654 } else {
2655 spin_unlock(&log->l_icloglock);
2656 }
2657 goto restart;
2658 }
2659
2660
2661
2662
2663
2664
2665
2666 if (len <= iclog->ic_size - iclog->ic_offset) {
2667 *continued_write = 0;
2668 iclog->ic_offset += len;
2669 } else {
2670 *continued_write = 1;
2671 xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2672 }
2673 *iclogp = iclog;
2674
2675 ASSERT(iclog->ic_offset <= iclog->ic_size);
2676 spin_unlock(&log->l_icloglock);
2677
2678 *logoffsetp = log_offset;
2679 return 0;
2680}
2681
2682
2683
2684
2685
2686
2687
2688
2689STATIC void
2690xlog_regrant_reserve_log_space(
2691 struct xlog *log,
2692 struct xlog_ticket *ticket)
2693{
2694 trace_xfs_log_regrant_reserve_enter(log, ticket);
2695
2696 if (ticket->t_cnt > 0)
2697 ticket->t_cnt--;
2698
2699 xlog_grant_sub_space(log, &log->l_reserve_head.grant,
2700 ticket->t_curr_res);
2701 xlog_grant_sub_space(log, &log->l_write_head.grant,
2702 ticket->t_curr_res);
2703 ticket->t_curr_res = ticket->t_unit_res;
2704 xlog_tic_reset_res(ticket);
2705
2706 trace_xfs_log_regrant_reserve_sub(log, ticket);
2707
2708
2709 if (ticket->t_cnt > 0)
2710 return;
2711
2712 xlog_grant_add_space(log, &log->l_reserve_head.grant,
2713 ticket->t_unit_res);
2714
2715 trace_xfs_log_regrant_reserve_exit(log, ticket);
2716
2717 ticket->t_curr_res = ticket->t_unit_res;
2718 xlog_tic_reset_res(ticket);
2719}
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736STATIC void
2737xlog_ungrant_log_space(
2738 struct xlog *log,
2739 struct xlog_ticket *ticket)
2740{
2741 int bytes;
2742
2743 if (ticket->t_cnt > 0)
2744 ticket->t_cnt--;
2745
2746 trace_xfs_log_ungrant_enter(log, ticket);
2747 trace_xfs_log_ungrant_sub(log, ticket);
2748
2749
2750
2751
2752
2753 bytes = ticket->t_curr_res;
2754 if (ticket->t_cnt > 0) {
2755 ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV);
2756 bytes += ticket->t_unit_res*ticket->t_cnt;
2757 }
2758
2759 xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes);
2760 xlog_grant_sub_space(log, &log->l_write_head.grant, bytes);
2761
2762 trace_xfs_log_ungrant_exit(log, ticket);
2763
2764 xfs_log_space_wake(log->l_mp);
2765}
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776STATIC int
2777xlog_state_release_iclog(
2778 struct xlog *log,
2779 struct xlog_in_core *iclog)
2780{
2781 int sync = 0;
2782
2783 if (iclog->ic_state & XLOG_STATE_IOERROR)
2784 return XFS_ERROR(EIO);
2785
2786 ASSERT(atomic_read(&iclog->ic_refcnt) > 0);
2787 if (!atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock))
2788 return 0;
2789
2790 if (iclog->ic_state & XLOG_STATE_IOERROR) {
2791 spin_unlock(&log->l_icloglock);
2792 return XFS_ERROR(EIO);
2793 }
2794 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE ||
2795 iclog->ic_state == XLOG_STATE_WANT_SYNC);
2796
2797 if (iclog->ic_state == XLOG_STATE_WANT_SYNC) {
2798
2799 xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp);
2800 sync++;
2801 iclog->ic_state = XLOG_STATE_SYNCING;
2802 iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
2803 xlog_verify_tail_lsn(log, iclog, tail_lsn);
2804
2805 }
2806 spin_unlock(&log->l_icloglock);
2807
2808
2809
2810
2811
2812
2813
2814
2815 if (sync)
2816 return xlog_sync(log, iclog);
2817 return 0;
2818}
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828STATIC void
2829xlog_state_switch_iclogs(
2830 struct xlog *log,
2831 struct xlog_in_core *iclog,
2832 int eventual_size)
2833{
2834 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
2835 if (!eventual_size)
2836 eventual_size = iclog->ic_offset;
2837 iclog->ic_state = XLOG_STATE_WANT_SYNC;
2838 iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block);
2839 log->l_prev_block = log->l_curr_block;
2840 log->l_prev_cycle = log->l_curr_cycle;
2841
2842
2843 log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize);
2844
2845
2846 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) &&
2847 log->l_mp->m_sb.sb_logsunit > 1) {
2848 __uint32_t sunit_bb = BTOBB(log->l_mp->m_sb.sb_logsunit);
2849 log->l_curr_block = roundup(log->l_curr_block, sunit_bb);
2850 }
2851
2852 if (log->l_curr_block >= log->l_logBBsize) {
2853 log->l_curr_cycle++;
2854 if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM)
2855 log->l_curr_cycle++;
2856 log->l_curr_block -= log->l_logBBsize;
2857 ASSERT(log->l_curr_block >= 0);
2858 }
2859 ASSERT(iclog == log->l_iclog);
2860 log->l_iclog = iclog->ic_next;
2861}
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890int
2891_xfs_log_force(
2892 struct xfs_mount *mp,
2893 uint flags,
2894 int *log_flushed)
2895{
2896 struct xlog *log = mp->m_log;
2897 struct xlog_in_core *iclog;
2898 xfs_lsn_t lsn;
2899
2900 XFS_STATS_INC(xs_log_force);
2901
2902 xlog_cil_force(log);
2903
2904 spin_lock(&log->l_icloglock);
2905
2906 iclog = log->l_iclog;
2907 if (iclog->ic_state & XLOG_STATE_IOERROR) {
2908 spin_unlock(&log->l_icloglock);
2909 return XFS_ERROR(EIO);
2910 }
2911
2912
2913
2914
2915 if (iclog->ic_state == XLOG_STATE_ACTIVE ||
2916 iclog->ic_state == XLOG_STATE_DIRTY) {
2917
2918
2919
2920
2921
2922
2923
2924 if (iclog->ic_state == XLOG_STATE_DIRTY ||
2925 (atomic_read(&iclog->ic_refcnt) == 0
2926 && iclog->ic_offset == 0)) {
2927 iclog = iclog->ic_prev;
2928 if (iclog->ic_state == XLOG_STATE_ACTIVE ||
2929 iclog->ic_state == XLOG_STATE_DIRTY)
2930 goto no_sleep;
2931 else
2932 goto maybe_sleep;
2933 } else {
2934 if (atomic_read(&iclog->ic_refcnt) == 0) {
2935
2936
2937
2938
2939
2940
2941 atomic_inc(&iclog->ic_refcnt);
2942 lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2943 xlog_state_switch_iclogs(log, iclog, 0);
2944 spin_unlock(&log->l_icloglock);
2945
2946 if (xlog_state_release_iclog(log, iclog))
2947 return XFS_ERROR(EIO);
2948
2949 if (log_flushed)
2950 *log_flushed = 1;
2951 spin_lock(&log->l_icloglock);
2952 if (be64_to_cpu(iclog->ic_header.h_lsn) == lsn &&
2953 iclog->ic_state != XLOG_STATE_DIRTY)
2954 goto maybe_sleep;
2955 else
2956 goto no_sleep;
2957 } else {
2958
2959
2960
2961
2962
2963 xlog_state_switch_iclogs(log, iclog, 0);
2964 goto maybe_sleep;
2965 }
2966 }
2967 }
2968
2969
2970
2971
2972
2973maybe_sleep:
2974 if (flags & XFS_LOG_SYNC) {
2975
2976
2977
2978
2979
2980
2981 if (iclog->ic_state & XLOG_STATE_IOERROR) {
2982 spin_unlock(&log->l_icloglock);
2983 return XFS_ERROR(EIO);
2984 }
2985 XFS_STATS_INC(xs_log_force_sleep);
2986 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
2987
2988
2989
2990
2991
2992 if (iclog->ic_state & XLOG_STATE_IOERROR)
2993 return XFS_ERROR(EIO);
2994 if (log_flushed)
2995 *log_flushed = 1;
2996 } else {
2997
2998no_sleep:
2999 spin_unlock(&log->l_icloglock);
3000 }
3001 return 0;
3002}
3003
3004
3005
3006
3007
3008
3009void
3010xfs_log_force(
3011 xfs_mount_t *mp,
3012 uint flags)
3013{
3014 int error;
3015
3016 trace_xfs_log_force(mp, 0);
3017 error = _xfs_log_force(mp, flags, NULL);
3018 if (error)
3019 xfs_warn(mp, "%s: error %d returned.", __func__, error);
3020}
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037int
3038_xfs_log_force_lsn(
3039 struct xfs_mount *mp,
3040 xfs_lsn_t lsn,
3041 uint flags,
3042 int *log_flushed)
3043{
3044 struct xlog *log = mp->m_log;
3045 struct xlog_in_core *iclog;
3046 int already_slept = 0;
3047
3048 ASSERT(lsn != 0);
3049
3050 XFS_STATS_INC(xs_log_force);
3051
3052 lsn = xlog_cil_force_lsn(log, lsn);
3053 if (lsn == NULLCOMMITLSN)
3054 return 0;
3055
3056try_again:
3057 spin_lock(&log->l_icloglock);
3058 iclog = log->l_iclog;
3059 if (iclog->ic_state & XLOG_STATE_IOERROR) {
3060 spin_unlock(&log->l_icloglock);
3061 return XFS_ERROR(EIO);
3062 }
3063
3064 do {
3065 if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
3066 iclog = iclog->ic_next;
3067 continue;
3068 }
3069
3070 if (iclog->ic_state == XLOG_STATE_DIRTY) {
3071 spin_unlock(&log->l_icloglock);
3072 return 0;
3073 }
3074
3075 if (iclog->ic_state == XLOG_STATE_ACTIVE) {
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094 if (!already_slept &&
3095 (iclog->ic_prev->ic_state &
3096 (XLOG_STATE_WANT_SYNC | XLOG_STATE_SYNCING))) {
3097 ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR));
3098
3099 XFS_STATS_INC(xs_log_force_sleep);
3100
3101 xlog_wait(&iclog->ic_prev->ic_write_wait,
3102 &log->l_icloglock);
3103 if (log_flushed)
3104 *log_flushed = 1;
3105 already_slept = 1;
3106 goto try_again;
3107 }
3108 atomic_inc(&iclog->ic_refcnt);
3109 xlog_state_switch_iclogs(log, iclog, 0);
3110 spin_unlock(&log->l_icloglock);
3111 if (xlog_state_release_iclog(log, iclog))
3112 return XFS_ERROR(EIO);
3113 if (log_flushed)
3114 *log_flushed = 1;
3115 spin_lock(&log->l_icloglock);
3116 }
3117
3118 if ((flags & XFS_LOG_SYNC) &&
3119 !(iclog->ic_state &
3120 (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) {
3121
3122
3123
3124
3125 if (iclog->ic_state & XLOG_STATE_IOERROR) {
3126 spin_unlock(&log->l_icloglock);
3127 return XFS_ERROR(EIO);
3128 }
3129 XFS_STATS_INC(xs_log_force_sleep);
3130 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
3131
3132
3133
3134
3135
3136 if (iclog->ic_state & XLOG_STATE_IOERROR)
3137 return XFS_ERROR(EIO);
3138
3139 if (log_flushed)
3140 *log_flushed = 1;
3141 } else {
3142 spin_unlock(&log->l_icloglock);
3143 }
3144
3145 return 0;
3146 } while (iclog != log->l_iclog);
3147
3148 spin_unlock(&log->l_icloglock);
3149 return 0;
3150}
3151
3152
3153
3154
3155
3156
3157void
3158xfs_log_force_lsn(
3159 xfs_mount_t *mp,
3160 xfs_lsn_t lsn,
3161 uint flags)
3162{
3163 int error;
3164
3165 trace_xfs_log_force(mp, lsn);
3166 error = _xfs_log_force_lsn(mp, lsn, flags, NULL);
3167 if (error)
3168 xfs_warn(mp, "%s: error %d returned.", __func__, error);
3169}
3170
3171
3172
3173
3174
3175STATIC void
3176xlog_state_want_sync(
3177 struct xlog *log,
3178 struct xlog_in_core *iclog)
3179{
3180 assert_spin_locked(&log->l_icloglock);
3181
3182 if (iclog->ic_state == XLOG_STATE_ACTIVE) {
3183 xlog_state_switch_iclogs(log, iclog, 0);
3184 } else {
3185 ASSERT(iclog->ic_state &
3186 (XLOG_STATE_WANT_SYNC|XLOG_STATE_IOERROR));
3187 }
3188}
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201void
3202xfs_log_ticket_put(
3203 xlog_ticket_t *ticket)
3204{
3205 ASSERT(atomic_read(&ticket->t_ref) > 0);
3206 if (atomic_dec_and_test(&ticket->t_ref))
3207 kmem_zone_free(xfs_log_ticket_zone, ticket);
3208}
3209
3210xlog_ticket_t *
3211xfs_log_ticket_get(
3212 xlog_ticket_t *ticket)
3213{
3214 ASSERT(atomic_read(&ticket->t_ref) > 0);
3215 atomic_inc(&ticket->t_ref);
3216 return ticket;
3217}
3218
3219
3220
3221
3222struct xlog_ticket *
3223xlog_ticket_alloc(
3224 struct xlog *log,
3225 int unit_bytes,
3226 int cnt,
3227 char client,
3228 bool permanent,
3229 xfs_km_flags_t alloc_flags)
3230{
3231 struct xlog_ticket *tic;
3232 uint num_headers;
3233 int iclog_space;
3234
3235 tic = kmem_zone_zalloc(xfs_log_ticket_zone, alloc_flags);
3236 if (!tic)
3237 return NULL;
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271 unit_bytes += sizeof(xlog_op_header_t);
3272 unit_bytes += sizeof(xfs_trans_header_t);
3273
3274
3275 unit_bytes += sizeof(xlog_op_header_t);
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294 iclog_space = log->l_iclog_size - log->l_iclog_hsize;
3295 num_headers = howmany(unit_bytes, iclog_space);
3296
3297
3298 unit_bytes += sizeof(xlog_op_header_t) * num_headers;
3299
3300
3301 while (!num_headers ||
3302 howmany(unit_bytes, iclog_space) > num_headers) {
3303 unit_bytes += sizeof(xlog_op_header_t);
3304 num_headers++;
3305 }
3306 unit_bytes += log->l_iclog_hsize * num_headers;
3307
3308
3309 unit_bytes += log->l_iclog_hsize;
3310
3311
3312 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) &&
3313 log->l_mp->m_sb.sb_logsunit > 1) {
3314
3315 unit_bytes += 2*log->l_mp->m_sb.sb_logsunit;
3316 } else {
3317
3318 unit_bytes += 2*BBSIZE;
3319 }
3320
3321 atomic_set(&tic->t_ref, 1);
3322 tic->t_task = current;
3323 INIT_LIST_HEAD(&tic->t_queue);
3324 tic->t_unit_res = unit_bytes;
3325 tic->t_curr_res = unit_bytes;
3326 tic->t_cnt = cnt;
3327 tic->t_ocnt = cnt;
3328 tic->t_tid = random32();
3329 tic->t_clientid = client;
3330 tic->t_flags = XLOG_TIC_INITED;
3331 tic->t_trans_type = 0;
3332 if (permanent)
3333 tic->t_flags |= XLOG_TIC_PERM_RESERV;
3334
3335 xlog_tic_reset_res(tic);
3336
3337 return tic;
3338}
3339
3340
3341
3342
3343
3344
3345
3346
3347#if defined(DEBUG)
3348
3349
3350
3351
3352
3353void
3354xlog_verify_dest_ptr(
3355 struct xlog *log,
3356 char *ptr)
3357{
3358 int i;
3359 int good_ptr = 0;
3360
3361 for (i = 0; i < log->l_iclog_bufs; i++) {
3362 if (ptr >= log->l_iclog_bak[i] &&
3363 ptr <= log->l_iclog_bak[i] + log->l_iclog_size)
3364 good_ptr++;
3365 }
3366
3367 if (!good_ptr)
3368 xfs_emerg(log->l_mp, "%s: invalid ptr", __func__);
3369}
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382STATIC void
3383xlog_verify_grant_tail(
3384 struct xlog *log)
3385{
3386 int tail_cycle, tail_blocks;
3387 int cycle, space;
3388
3389 xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space);
3390 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
3391 if (tail_cycle != cycle) {
3392 if (cycle - 1 != tail_cycle &&
3393 !(log->l_flags & XLOG_TAIL_WARN)) {
3394 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
3395 "%s: cycle - 1 != tail_cycle", __func__);
3396 log->l_flags |= XLOG_TAIL_WARN;
3397 }
3398
3399 if (space > BBTOB(tail_blocks) &&
3400 !(log->l_flags & XLOG_TAIL_WARN)) {
3401 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
3402 "%s: space > BBTOB(tail_blocks)", __func__);
3403 log->l_flags |= XLOG_TAIL_WARN;
3404 }
3405 }
3406}
3407
3408
3409STATIC void
3410xlog_verify_tail_lsn(
3411 struct xlog *log,
3412 struct xlog_in_core *iclog,
3413 xfs_lsn_t tail_lsn)
3414{
3415 int blocks;
3416
3417 if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) {
3418 blocks =
3419 log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn));
3420 if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize))
3421 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
3422 } else {
3423 ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle);
3424
3425 if (BLOCK_LSN(tail_lsn) == log->l_prev_block)
3426 xfs_emerg(log->l_mp, "%s: tail wrapped", __func__);
3427
3428 blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block;
3429 if (blocks < BTOBB(iclog->ic_offset) + 1)
3430 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
3431 }
3432}
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449STATIC void
3450xlog_verify_iclog(
3451 struct xlog *log,
3452 struct xlog_in_core *iclog,
3453 int count,
3454 boolean_t syncing)
3455{
3456 xlog_op_header_t *ophead;
3457 xlog_in_core_t *icptr;
3458 xlog_in_core_2_t *xhdr;
3459 xfs_caddr_t ptr;
3460 xfs_caddr_t base_ptr;
3461 __psint_t field_offset;
3462 __uint8_t clientid;
3463 int len, i, j, k, op_len;
3464 int idx;
3465
3466
3467 spin_lock(&log->l_icloglock);
3468 icptr = log->l_iclog;
3469 for (i=0; i < log->l_iclog_bufs; i++) {
3470 if (icptr == NULL)
3471 xfs_emerg(log->l_mp, "%s: invalid ptr", __func__);
3472 icptr = icptr->ic_next;
3473 }
3474 if (icptr != log->l_iclog)
3475 xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__);
3476 spin_unlock(&log->l_icloglock);
3477
3478
3479 if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3480 xfs_emerg(log->l_mp, "%s: invalid magic num", __func__);
3481
3482 ptr = (xfs_caddr_t) &iclog->ic_header;
3483 for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&iclog->ic_header) + count;
3484 ptr += BBSIZE) {
3485 if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3486 xfs_emerg(log->l_mp, "%s: unexpected magic num",
3487 __func__);
3488 }
3489
3490
3491 len = be32_to_cpu(iclog->ic_header.h_num_logops);
3492 ptr = iclog->ic_datap;
3493 base_ptr = ptr;
3494 ophead = (xlog_op_header_t *)ptr;
3495 xhdr = iclog->ic_data;
3496 for (i = 0; i < len; i++) {
3497 ophead = (xlog_op_header_t *)ptr;
3498
3499
3500 field_offset = (__psint_t)
3501 ((xfs_caddr_t)&(ophead->oh_clientid) - base_ptr);
3502 if (syncing == B_FALSE || (field_offset & 0x1ff)) {
3503 clientid = ophead->oh_clientid;
3504 } else {
3505 idx = BTOBBT((xfs_caddr_t)&(ophead->oh_clientid) - iclog->ic_datap);
3506 if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
3507 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3508 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3509 clientid = xlog_get_client_id(
3510 xhdr[j].hic_xheader.xh_cycle_data[k]);
3511 } else {
3512 clientid = xlog_get_client_id(
3513 iclog->ic_header.h_cycle_data[idx]);
3514 }
3515 }
3516 if (clientid != XFS_TRANSACTION && clientid != XFS_LOG)
3517 xfs_warn(log->l_mp,
3518 "%s: invalid clientid %d op 0x%p offset 0x%lx",
3519 __func__, clientid, ophead,
3520 (unsigned long)field_offset);
3521
3522
3523 field_offset = (__psint_t)
3524 ((xfs_caddr_t)&(ophead->oh_len) - base_ptr);
3525 if (syncing == B_FALSE || (field_offset & 0x1ff)) {
3526 op_len = be32_to_cpu(ophead->oh_len);
3527 } else {
3528 idx = BTOBBT((__psint_t)&ophead->oh_len -
3529 (__psint_t)iclog->ic_datap);
3530 if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
3531 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3532 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3533 op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]);
3534 } else {
3535 op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]);
3536 }
3537 }
3538 ptr += sizeof(xlog_op_header_t) + op_len;
3539 }
3540}
3541#endif
3542
3543
3544
3545
3546STATIC int
3547xlog_state_ioerror(
3548 struct xlog *log)
3549{
3550 xlog_in_core_t *iclog, *ic;
3551
3552 iclog = log->l_iclog;
3553 if (! (iclog->ic_state & XLOG_STATE_IOERROR)) {
3554
3555
3556
3557
3558 ic = iclog;
3559 do {
3560 ic->ic_state = XLOG_STATE_IOERROR;
3561 ic = ic->ic_next;
3562 } while (ic != iclog);
3563 return 0;
3564 }
3565
3566
3567
3568 return 1;
3569}
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588int
3589xfs_log_force_umount(
3590 struct xfs_mount *mp,
3591 int logerror)
3592{
3593 struct xlog *log;
3594 int retval;
3595
3596 log = mp->m_log;
3597
3598
3599
3600
3601
3602 if (!log ||
3603 log->l_flags & XLOG_ACTIVE_RECOVERY) {
3604 mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN;
3605 if (mp->m_sb_bp)
3606 XFS_BUF_DONE(mp->m_sb_bp);
3607 return 0;
3608 }
3609
3610
3611
3612
3613
3614 if (logerror && log->l_iclog->ic_state & XLOG_STATE_IOERROR) {
3615 ASSERT(XLOG_FORCED_SHUTDOWN(log));
3616 return 1;
3617 }
3618 retval = 0;
3619
3620
3621
3622
3623
3624
3625
3626 if (!logerror)
3627 xlog_cil_force(log);
3628
3629
3630
3631
3632
3633 spin_lock(&log->l_icloglock);
3634 mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN;
3635 if (mp->m_sb_bp)
3636 XFS_BUF_DONE(mp->m_sb_bp);
3637
3638
3639
3640
3641
3642
3643 log->l_flags |= XLOG_IO_ERROR;
3644
3645
3646
3647
3648
3649 if (logerror)
3650 retval = xlog_state_ioerror(log);
3651 spin_unlock(&log->l_icloglock);
3652
3653
3654
3655
3656
3657
3658
3659
3660 xlog_grant_head_wake_all(&log->l_reserve_head);
3661 xlog_grant_head_wake_all(&log->l_write_head);
3662
3663 if (!(log->l_iclog->ic_state & XLOG_STATE_IOERROR)) {
3664 ASSERT(!logerror);
3665
3666
3667
3668
3669 _xfs_log_force(mp, XFS_LOG_SYNC, NULL);
3670
3671 spin_lock(&log->l_icloglock);
3672 retval = xlog_state_ioerror(log);
3673 spin_unlock(&log->l_icloglock);
3674 }
3675
3676
3677
3678
3679
3680 xlog_state_do_callback(log, XFS_LI_ABORTED, NULL);
3681
3682#ifdef XFSERRORDEBUG
3683 {
3684 xlog_in_core_t *iclog;
3685
3686 spin_lock(&log->l_icloglock);
3687 iclog = log->l_iclog;
3688 do {
3689 ASSERT(iclog->ic_callback == 0);
3690 iclog = iclog->ic_next;
3691 } while (iclog != log->l_iclog);
3692 spin_unlock(&log->l_icloglock);
3693 }
3694#endif
3695
3696 return retval;
3697}
3698
3699STATIC int
3700xlog_iclogs_empty(
3701 struct xlog *log)
3702{
3703 xlog_in_core_t *iclog;
3704
3705 iclog = log->l_iclog;
3706 do {
3707
3708
3709
3710 if (iclog->ic_header.h_num_logops)
3711 return 0;
3712 iclog = iclog->ic_next;
3713 } while (iclog != log->l_iclog);
3714 return 1;
3715}
3716