1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/module.h>
15
16#include <linux/slab.h>
17#include <linux/drbd.h>
18#include "drbd_int.h"
19#include "drbd_req.h"
20
21
22static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size);
23
24static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio *bio_src)
25{
26 struct drbd_request *req;
27
28 req = mempool_alloc(&drbd_request_mempool, GFP_NOIO);
29 if (!req)
30 return NULL;
31 memset(req, 0, sizeof(*req));
32
33 req->private_bio = bio_clone_fast(bio_src, GFP_NOIO, &drbd_io_bio_set);
34 req->private_bio->bi_private = req;
35 req->private_bio->bi_end_io = drbd_request_endio;
36
37 req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0)
38 | (bio_op(bio_src) == REQ_OP_WRITE_SAME ? RQ_WSAME : 0)
39 | (bio_op(bio_src) == REQ_OP_WRITE_ZEROES ? RQ_ZEROES : 0)
40 | (bio_op(bio_src) == REQ_OP_DISCARD ? RQ_UNMAP : 0);
41 req->device = device;
42 req->master_bio = bio_src;
43 req->epoch = 0;
44
45 drbd_clear_interval(&req->i);
46 req->i.sector = bio_src->bi_iter.bi_sector;
47 req->i.size = bio_src->bi_iter.bi_size;
48 req->i.local = true;
49 req->i.waiting = false;
50
51 INIT_LIST_HEAD(&req->tl_requests);
52 INIT_LIST_HEAD(&req->w.list);
53 INIT_LIST_HEAD(&req->req_pending_master_completion);
54 INIT_LIST_HEAD(&req->req_pending_local);
55
56
57 atomic_set(&req->completion_ref, 1);
58
59 kref_init(&req->kref);
60 return req;
61}
62
63static void drbd_remove_request_interval(struct rb_root *root,
64 struct drbd_request *req)
65{
66 struct drbd_device *device = req->device;
67 struct drbd_interval *i = &req->i;
68
69 drbd_remove_interval(root, i);
70
71
72 if (i->waiting)
73 wake_up(&device->misc_wait);
74}
75
76void drbd_req_destroy(struct kref *kref)
77{
78 struct drbd_request *req = container_of(kref, struct drbd_request, kref);
79 struct drbd_device *device = req->device;
80 const unsigned s = req->rq_state;
81
82 if ((req->master_bio && !(s & RQ_POSTPONED)) ||
83 atomic_read(&req->completion_ref) ||
84 (s & RQ_LOCAL_PENDING) ||
85 ((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) {
86 drbd_err(device, "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n",
87 s, atomic_read(&req->completion_ref));
88 return;
89 }
90
91
92
93
94
95
96
97
98
99 list_del_init(&req->tl_requests);
100
101
102
103 if (!drbd_interval_empty(&req->i)) {
104 struct rb_root *root;
105
106 if (s & RQ_WRITE)
107 root = &device->write_requests;
108 else
109 root = &device->read_requests;
110 drbd_remove_request_interval(root, req);
111 } else if (s & (RQ_NET_MASK & ~RQ_NET_DONE) && req->i.size != 0)
112 drbd_err(device, "drbd_req_destroy: Logic BUG: interval empty, but: rq_state=0x%x, sect=%llu, size=%u\n",
113 s, (unsigned long long)req->i.sector, req->i.size);
114
115
116
117
118 if (s & RQ_WRITE) {
119
120
121
122
123
124
125
126
127
128
129
130 if ((s & (RQ_POSTPONED|RQ_LOCAL_MASK|RQ_NET_MASK)) != RQ_POSTPONED) {
131 if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
132 drbd_set_out_of_sync(device, req->i.sector, req->i.size);
133
134 if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
135 drbd_set_in_sync(device, req->i.sector, req->i.size);
136 }
137
138
139
140
141
142
143
144
145
146
147
148 if (s & RQ_IN_ACT_LOG) {
149 if (get_ldev_if_state(device, D_FAILED)) {
150 drbd_al_complete_io(device, &req->i);
151 put_ldev(device);
152 } else if (__ratelimit(&drbd_ratelimit_state)) {
153 drbd_warn(device, "Should have called drbd_al_complete_io(, %llu, %u), "
154 "but my Disk seems to have failed :(\n",
155 (unsigned long long) req->i.sector, req->i.size);
156 }
157 }
158 }
159
160 mempool_free(req, &drbd_request_mempool);
161}
162
163static void wake_all_senders(struct drbd_connection *connection)
164{
165 wake_up(&connection->sender_work.q_wait);
166}
167
168
169void start_new_tl_epoch(struct drbd_connection *connection)
170{
171
172 if (connection->current_tle_writes == 0)
173 return;
174
175 connection->current_tle_writes = 0;
176 atomic_inc(&connection->current_tle_nr);
177 wake_all_senders(connection);
178}
179
180void complete_master_bio(struct drbd_device *device,
181 struct bio_and_error *m)
182{
183 m->bio->bi_status = errno_to_blk_status(m->error);
184 bio_endio(m->bio);
185 dec_ap_bio(device);
186}
187
188
189
190
191
192
193
194
195static
196void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
197{
198 const unsigned s = req->rq_state;
199 struct drbd_device *device = req->device;
200 int error, ok;
201
202
203
204
205
206
207
208
209
210
211 if ((s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) ||
212 (s & RQ_NET_QUEUED) || (s & RQ_NET_PENDING) ||
213 (s & RQ_COMPLETION_SUSP)) {
214 drbd_err(device, "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s);
215 return;
216 }
217
218 if (!req->master_bio) {
219 drbd_err(device, "drbd_req_complete: Logic BUG, master_bio == NULL!\n");
220 return;
221 }
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236 ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK);
237 error = PTR_ERR(req->private_bio);
238
239
240
241
242
243
244
245
246 if (op_is_write(bio_op(req->master_bio)) &&
247 req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr))
248 start_new_tl_epoch(first_peer_device(device)->connection);
249
250
251 bio_end_io_acct(req->master_bio, req->start_jif);
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267 if (!ok &&
268 bio_op(req->master_bio) == REQ_OP_READ &&
269 !(req->master_bio->bi_opf & REQ_RAHEAD) &&
270 !list_empty(&req->tl_requests))
271 req->rq_state |= RQ_POSTPONED;
272
273 if (!(req->rq_state & RQ_POSTPONED)) {
274 m->error = ok ? 0 : (error ?: -EIO);
275 m->bio = req->master_bio;
276 req->master_bio = NULL;
277
278
279
280
281 req->i.completed = true;
282 }
283
284 if (req->i.waiting)
285 wake_up(&device->misc_wait);
286
287
288
289
290
291 list_del_init(&req->req_pending_master_completion);
292}
293
294
295static void drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
296{
297 struct drbd_device *device = req->device;
298 D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED));
299
300 if (!put)
301 return;
302
303 if (!atomic_sub_and_test(put, &req->completion_ref))
304 return;
305
306 drbd_req_complete(req, m);
307
308
309
310 if (req->rq_state & RQ_LOCAL_ABORTED)
311 return;
312
313 if (req->rq_state & RQ_POSTPONED) {
314
315
316 drbd_restart_request(req);
317 return;
318 }
319
320 kref_put(&req->kref, drbd_req_destroy);
321}
322
323static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
324{
325 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
326 if (!connection)
327 return;
328 if (connection->req_next == NULL)
329 connection->req_next = req;
330}
331
332static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
333{
334 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
335 if (!connection)
336 return;
337 if (connection->req_next != req)
338 return;
339 list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
340 const unsigned s = req->rq_state;
341 if (s & RQ_NET_QUEUED)
342 break;
343 }
344 if (&req->tl_requests == &connection->transfer_log)
345 req = NULL;
346 connection->req_next = req;
347}
348
349static void set_if_null_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
350{
351 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
352 if (!connection)
353 return;
354 if (connection->req_ack_pending == NULL)
355 connection->req_ack_pending = req;
356}
357
358static void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
359{
360 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
361 if (!connection)
362 return;
363 if (connection->req_ack_pending != req)
364 return;
365 list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
366 const unsigned s = req->rq_state;
367 if ((s & RQ_NET_SENT) && (s & RQ_NET_PENDING))
368 break;
369 }
370 if (&req->tl_requests == &connection->transfer_log)
371 req = NULL;
372 connection->req_ack_pending = req;
373}
374
375static void set_if_null_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
376{
377 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
378 if (!connection)
379 return;
380 if (connection->req_not_net_done == NULL)
381 connection->req_not_net_done = req;
382}
383
384static void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
385{
386 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
387 if (!connection)
388 return;
389 if (connection->req_not_net_done != req)
390 return;
391 list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
392 const unsigned s = req->rq_state;
393 if ((s & RQ_NET_SENT) && !(s & RQ_NET_DONE))
394 break;
395 }
396 if (&req->tl_requests == &connection->transfer_log)
397 req = NULL;
398 connection->req_not_net_done = req;
399}
400
401
402
403static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
404 int clear, int set)
405{
406 struct drbd_device *device = req->device;
407 struct drbd_peer_device *peer_device = first_peer_device(device);
408 unsigned s = req->rq_state;
409 int c_put = 0;
410
411 if (drbd_suspended(device) && !((s | clear) & RQ_COMPLETION_SUSP))
412 set |= RQ_COMPLETION_SUSP;
413
414
415
416 req->rq_state &= ~clear;
417 req->rq_state |= set;
418
419
420 if (req->rq_state == s)
421 return;
422
423
424
425 kref_get(&req->kref);
426
427 if (!(s & RQ_LOCAL_PENDING) && (set & RQ_LOCAL_PENDING))
428 atomic_inc(&req->completion_ref);
429
430 if (!(s & RQ_NET_PENDING) && (set & RQ_NET_PENDING)) {
431 inc_ap_pending(device);
432 atomic_inc(&req->completion_ref);
433 }
434
435 if (!(s & RQ_NET_QUEUED) && (set & RQ_NET_QUEUED)) {
436 atomic_inc(&req->completion_ref);
437 set_if_null_req_next(peer_device, req);
438 }
439
440 if (!(s & RQ_EXP_BARR_ACK) && (set & RQ_EXP_BARR_ACK))
441 kref_get(&req->kref);
442
443 if (!(s & RQ_NET_SENT) && (set & RQ_NET_SENT)) {
444
445 if (!(s & RQ_NET_DONE)) {
446 atomic_add(req->i.size >> 9, &device->ap_in_flight);
447 set_if_null_req_not_net_done(peer_device, req);
448 }
449 if (req->rq_state & RQ_NET_PENDING)
450 set_if_null_req_ack_pending(peer_device, req);
451 }
452
453 if (!(s & RQ_COMPLETION_SUSP) && (set & RQ_COMPLETION_SUSP))
454 atomic_inc(&req->completion_ref);
455
456
457
458 if ((s & RQ_COMPLETION_SUSP) && (clear & RQ_COMPLETION_SUSP))
459 ++c_put;
460
461 if (!(s & RQ_LOCAL_ABORTED) && (set & RQ_LOCAL_ABORTED)) {
462 D_ASSERT(device, req->rq_state & RQ_LOCAL_PENDING);
463 ++c_put;
464 }
465
466 if ((s & RQ_LOCAL_PENDING) && (clear & RQ_LOCAL_PENDING)) {
467 if (req->rq_state & RQ_LOCAL_ABORTED)
468 kref_put(&req->kref, drbd_req_destroy);
469 else
470 ++c_put;
471 list_del_init(&req->req_pending_local);
472 }
473
474 if ((s & RQ_NET_PENDING) && (clear & RQ_NET_PENDING)) {
475 dec_ap_pending(device);
476 ++c_put;
477 req->acked_jif = jiffies;
478 advance_conn_req_ack_pending(peer_device, req);
479 }
480
481 if ((s & RQ_NET_QUEUED) && (clear & RQ_NET_QUEUED)) {
482 ++c_put;
483 advance_conn_req_next(peer_device, req);
484 }
485
486 if (!(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) {
487 if (s & RQ_NET_SENT)
488 atomic_sub(req->i.size >> 9, &device->ap_in_flight);
489 if (s & RQ_EXP_BARR_ACK)
490 kref_put(&req->kref, drbd_req_destroy);
491 req->net_done_jif = jiffies;
492
493
494
495
496 advance_conn_req_next(peer_device, req);
497 advance_conn_req_ack_pending(peer_device, req);
498 advance_conn_req_not_net_done(peer_device, req);
499 }
500
501
502
503
504 if (req->i.waiting)
505 wake_up(&device->misc_wait);
506
507 drbd_req_put_completion_ref(req, m, c_put);
508 kref_put(&req->kref, drbd_req_destroy);
509}
510
511static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req)
512{
513 char b[BDEVNAME_SIZE];
514
515 if (!__ratelimit(&drbd_ratelimit_state))
516 return;
517
518 drbd_warn(device, "local %s IO error sector %llu+%u on %s\n",
519 (req->rq_state & RQ_WRITE) ? "WRITE" : "READ",
520 (unsigned long long)req->i.sector,
521 req->i.size >> 9,
522 bdevname(device->ldev->backing_bdev, b));
523}
524
525
526
527
528
529
530
531static inline bool is_pending_write_protocol_A(struct drbd_request *req)
532{
533 return (req->rq_state &
534 (RQ_WRITE|RQ_NET_PENDING|RQ_EXP_WRITE_ACK|RQ_EXP_RECEIVE_ACK))
535 == (RQ_WRITE|RQ_NET_PENDING);
536}
537
538
539
540
541
542
543
544
545
546
547
548
549
550int __req_mod(struct drbd_request *req, enum drbd_req_event what,
551 struct bio_and_error *m)
552{
553 struct drbd_device *const device = req->device;
554 struct drbd_peer_device *const peer_device = first_peer_device(device);
555 struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
556 struct net_conf *nc;
557 int p, rv = 0;
558
559 if (m)
560 m->bio = NULL;
561
562 switch (what) {
563 default:
564 drbd_err(device, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__);
565 break;
566
567
568
569
570
571
572
573 case TO_BE_SENT:
574
575
576 D_ASSERT(device, !(req->rq_state & RQ_NET_MASK));
577 rcu_read_lock();
578 nc = rcu_dereference(connection->net_conf);
579 p = nc->wire_protocol;
580 rcu_read_unlock();
581 req->rq_state |=
582 p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK :
583 p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0;
584 mod_rq_state(req, m, 0, RQ_NET_PENDING);
585 break;
586
587 case TO_BE_SUBMITTED:
588
589 D_ASSERT(device, !(req->rq_state & RQ_LOCAL_MASK));
590 mod_rq_state(req, m, 0, RQ_LOCAL_PENDING);
591 break;
592
593 case COMPLETED_OK:
594 if (req->rq_state & RQ_WRITE)
595 device->writ_cnt += req->i.size >> 9;
596 else
597 device->read_cnt += req->i.size >> 9;
598
599 mod_rq_state(req, m, RQ_LOCAL_PENDING,
600 RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
601 break;
602
603 case ABORT_DISK_IO:
604 mod_rq_state(req, m, 0, RQ_LOCAL_ABORTED);
605 break;
606
607 case WRITE_COMPLETED_WITH_ERROR:
608 drbd_report_io_error(device, req);
609 __drbd_chk_io_error(device, DRBD_WRITE_ERROR);
610 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
611 break;
612
613 case READ_COMPLETED_WITH_ERROR:
614 drbd_set_out_of_sync(device, req->i.sector, req->i.size);
615 drbd_report_io_error(device, req);
616 __drbd_chk_io_error(device, DRBD_READ_ERROR);
617 fallthrough;
618 case READ_AHEAD_COMPLETED_WITH_ERROR:
619
620 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
621 break;
622
623 case DISCARD_COMPLETED_NOTSUPP:
624 case DISCARD_COMPLETED_WITH_ERROR:
625
626
627 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
628 break;
629
630 case QUEUE_FOR_NET_READ:
631
632
633
634
635
636
637
638
639
640
641 D_ASSERT(device, drbd_interval_empty(&req->i));
642 drbd_insert_interval(&device->read_requests, &req->i);
643
644 set_bit(UNPLUG_REMOTE, &device->flags);
645
646 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
647 D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0);
648 mod_rq_state(req, m, 0, RQ_NET_QUEUED);
649 req->w.cb = w_send_read_req;
650 drbd_queue_work(&connection->sender_work,
651 &req->w);
652 break;
653
654 case QUEUE_FOR_NET_WRITE:
655
656
657
658
659
660 D_ASSERT(device, drbd_interval_empty(&req->i));
661 drbd_insert_interval(&device->write_requests, &req->i);
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680 set_bit(UNPLUG_REMOTE, &device->flags);
681
682
683 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
684 mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK);
685 req->w.cb = w_send_dblock;
686 drbd_queue_work(&connection->sender_work,
687 &req->w);
688
689
690 rcu_read_lock();
691 nc = rcu_dereference(connection->net_conf);
692 p = nc->max_epoch_size;
693 rcu_read_unlock();
694 if (connection->current_tle_writes >= p)
695 start_new_tl_epoch(connection);
696
697 break;
698
699 case QUEUE_FOR_SEND_OOS:
700 mod_rq_state(req, m, 0, RQ_NET_QUEUED);
701 req->w.cb = w_send_out_of_sync;
702 drbd_queue_work(&connection->sender_work,
703 &req->w);
704 break;
705
706 case READ_RETRY_REMOTE_CANCELED:
707 case SEND_CANCELED:
708 case SEND_FAILED:
709
710
711 mod_rq_state(req, m, RQ_NET_QUEUED, 0);
712 break;
713
714 case HANDED_OVER_TO_NETWORK:
715
716 if (is_pending_write_protocol_A(req))
717
718
719 mod_rq_state(req, m, RQ_NET_QUEUED|RQ_NET_PENDING,
720 RQ_NET_SENT|RQ_NET_OK);
721 else
722 mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT);
723
724
725
726 break;
727
728 case OOS_HANDED_TO_NETWORK:
729
730
731 mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_DONE);
732 break;
733
734 case CONNECTION_LOST_WHILE_PENDING:
735
736 mod_rq_state(req, m,
737 RQ_NET_OK|RQ_NET_PENDING|RQ_COMPLETION_SUSP,
738 RQ_NET_DONE);
739 break;
740
741 case CONFLICT_RESOLVED:
742
743
744
745
746
747
748
749 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
750 D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
751 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK);
752 break;
753
754 case WRITE_ACKED_BY_PEER_AND_SIS:
755 req->rq_state |= RQ_NET_SIS;
756 fallthrough;
757 case WRITE_ACKED_BY_PEER:
758
759
760
761
762
763
764
765 goto ack_common;
766 case RECV_ACKED_BY_PEER:
767 D_ASSERT(device, req->rq_state & RQ_EXP_RECEIVE_ACK);
768
769
770
771 ack_common:
772 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK);
773 break;
774
775 case POSTPONE_WRITE:
776 D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
777
778
779
780
781 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
782 req->rq_state |= RQ_POSTPONED;
783 if (req->i.waiting)
784 wake_up(&device->misc_wait);
785
786
787
788 break;
789
790 case NEG_ACKED:
791 mod_rq_state(req, m, RQ_NET_OK|RQ_NET_PENDING, 0);
792 break;
793
794 case FAIL_FROZEN_DISK_IO:
795 if (!(req->rq_state & RQ_LOCAL_COMPLETED))
796 break;
797 mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
798 break;
799
800 case RESTART_FROZEN_DISK_IO:
801 if (!(req->rq_state & RQ_LOCAL_COMPLETED))
802 break;
803
804 mod_rq_state(req, m,
805 RQ_COMPLETION_SUSP|RQ_LOCAL_COMPLETED,
806 RQ_LOCAL_PENDING);
807
808 rv = MR_READ;
809 if (bio_data_dir(req->master_bio) == WRITE)
810 rv = MR_WRITE;
811
812 get_ldev(device);
813 req->w.cb = w_restart_disk_io;
814 drbd_queue_work(&connection->sender_work,
815 &req->w);
816 break;
817
818 case RESEND:
819
820 if (!(req->rq_state & RQ_WRITE) && !req->w.cb) {
821 mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
822 break;
823 }
824
825
826
827
828
829
830 if (!(req->rq_state & RQ_NET_OK)) {
831
832
833
834 mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING);
835 if (req->w.cb) {
836
837 drbd_queue_work(&connection->sender_work,
838 &req->w);
839 rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
840 }
841 break;
842 }
843 fallthrough;
844
845 case BARRIER_ACKED:
846
847 if (!(req->rq_state & RQ_WRITE))
848 break;
849
850 if (req->rq_state & RQ_NET_PENDING) {
851
852
853
854 drbd_err(device, "FIXME (BARRIER_ACKED but pending)\n");
855 }
856
857
858
859
860 mod_rq_state(req, m, RQ_COMPLETION_SUSP,
861 (req->rq_state & RQ_NET_MASK) ? RQ_NET_DONE : 0);
862 break;
863
864 case DATA_RECEIVED:
865 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
866 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE);
867 break;
868
869 case QUEUE_AS_DRBD_BARRIER:
870 start_new_tl_epoch(connection);
871 mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE);
872 break;
873 }
874
875 return rv;
876}
877
878
879
880
881
882
883
884
885static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size)
886{
887 unsigned long sbnr, ebnr;
888 sector_t esector, nr_sectors;
889
890 if (device->state.disk == D_UP_TO_DATE)
891 return true;
892 if (device->state.disk != D_INCONSISTENT)
893 return false;
894 esector = sector + (size >> 9) - 1;
895 nr_sectors = get_capacity(device->vdisk);
896 D_ASSERT(device, sector < nr_sectors);
897 D_ASSERT(device, esector < nr_sectors);
898
899 sbnr = BM_SECT_TO_BIT(sector);
900 ebnr = BM_SECT_TO_BIT(esector);
901
902 return drbd_bm_count_bits(device, sbnr, ebnr) == 0;
903}
904
905static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t sector,
906 enum drbd_read_balancing rbm)
907{
908 struct backing_dev_info *bdi;
909 int stripe_shift;
910
911 switch (rbm) {
912 case RB_CONGESTED_REMOTE:
913 bdi = device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
914 return bdi_read_congested(bdi);
915 case RB_LEAST_PENDING:
916 return atomic_read(&device->local_cnt) >
917 atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt);
918 case RB_32K_STRIPING:
919 case RB_64K_STRIPING:
920 case RB_128K_STRIPING:
921 case RB_256K_STRIPING:
922 case RB_512K_STRIPING:
923 case RB_1M_STRIPING:
924 stripe_shift = (rbm - RB_32K_STRIPING + 15);
925 return (sector >> (stripe_shift - 9)) & 1;
926 case RB_ROUND_ROBIN:
927 return test_and_change_bit(READ_BALANCE_RR, &device->flags);
928 case RB_PREFER_REMOTE:
929 return true;
930 case RB_PREFER_LOCAL:
931 default:
932 return false;
933 }
934}
935
936
937
938
939
940
941
942
943
944
945static void complete_conflicting_writes(struct drbd_request *req)
946{
947 DEFINE_WAIT(wait);
948 struct drbd_device *device = req->device;
949 struct drbd_interval *i;
950 sector_t sector = req->i.sector;
951 int size = req->i.size;
952
953 for (;;) {
954 drbd_for_each_overlap(i, &device->write_requests, sector, size) {
955
956 if (i->completed)
957 continue;
958
959
960 break;
961 }
962 if (!i)
963 break;
964
965
966 prepare_to_wait(&device->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
967 i->waiting = true;
968 spin_unlock_irq(&device->resource->req_lock);
969 schedule();
970 spin_lock_irq(&device->resource->req_lock);
971 }
972 finish_wait(&device->misc_wait, &wait);
973}
974
975
976static void maybe_pull_ahead(struct drbd_device *device)
977{
978 struct drbd_connection *connection = first_peer_device(device)->connection;
979 struct net_conf *nc;
980 bool congested = false;
981 enum drbd_on_congestion on_congestion;
982
983 rcu_read_lock();
984 nc = rcu_dereference(connection->net_conf);
985 on_congestion = nc ? nc->on_congestion : OC_BLOCK;
986 rcu_read_unlock();
987 if (on_congestion == OC_BLOCK ||
988 connection->agreed_pro_version < 96)
989 return;
990
991 if (on_congestion == OC_PULL_AHEAD && device->state.conn == C_AHEAD)
992 return;
993
994
995
996
997
998 if (!get_ldev_if_state(device, D_UP_TO_DATE))
999 return;
1000
1001 if (nc->cong_fill &&
1002 atomic_read(&device->ap_in_flight) >= nc->cong_fill) {
1003 drbd_info(device, "Congestion-fill threshold reached\n");
1004 congested = true;
1005 }
1006
1007 if (device->act_log->used >= nc->cong_extents) {
1008 drbd_info(device, "Congestion-extents threshold reached\n");
1009 congested = true;
1010 }
1011
1012 if (congested) {
1013
1014 start_new_tl_epoch(first_peer_device(device)->connection);
1015
1016 if (on_congestion == OC_PULL_AHEAD)
1017 _drbd_set_state(_NS(device, conn, C_AHEAD), 0, NULL);
1018 else
1019 _drbd_set_state(_NS(device, conn, C_DISCONNECTING), 0, NULL);
1020 }
1021 put_ldev(device);
1022}
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033static bool do_remote_read(struct drbd_request *req)
1034{
1035 struct drbd_device *device = req->device;
1036 enum drbd_read_balancing rbm;
1037
1038 if (req->private_bio) {
1039 if (!drbd_may_do_local_read(device,
1040 req->i.sector, req->i.size)) {
1041 bio_put(req->private_bio);
1042 req->private_bio = NULL;
1043 put_ldev(device);
1044 }
1045 }
1046
1047 if (device->state.pdsk != D_UP_TO_DATE)
1048 return false;
1049
1050 if (req->private_bio == NULL)
1051 return true;
1052
1053
1054
1055
1056 rcu_read_lock();
1057 rbm = rcu_dereference(device->ldev->disk_conf)->read_balancing;
1058 rcu_read_unlock();
1059
1060 if (rbm == RB_PREFER_LOCAL && req->private_bio)
1061 return false;
1062
1063 if (remote_due_to_read_balancing(device, req->i.sector, rbm)) {
1064 if (req->private_bio) {
1065 bio_put(req->private_bio);
1066 req->private_bio = NULL;
1067 put_ldev(device);
1068 }
1069 return true;
1070 }
1071
1072 return false;
1073}
1074
1075bool drbd_should_do_remote(union drbd_dev_state s)
1076{
1077 return s.pdsk == D_UP_TO_DATE ||
1078 (s.pdsk >= D_INCONSISTENT &&
1079 s.conn >= C_WF_BITMAP_T &&
1080 s.conn < C_AHEAD);
1081
1082
1083
1084}
1085
1086static bool drbd_should_send_out_of_sync(union drbd_dev_state s)
1087{
1088 return s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S;
1089
1090
1091}
1092
1093
1094
1095
1096static int drbd_process_write_request(struct drbd_request *req)
1097{
1098 struct drbd_device *device = req->device;
1099 int remote, send_oos;
1100
1101 remote = drbd_should_do_remote(device->state);
1102 send_oos = drbd_should_send_out_of_sync(device->state);
1103
1104
1105
1106
1107
1108
1109
1110 if (unlikely(req->i.size == 0)) {
1111
1112 D_ASSERT(device, req->master_bio->bi_opf & REQ_PREFLUSH);
1113 if (remote)
1114 _req_mod(req, QUEUE_AS_DRBD_BARRIER);
1115 return remote;
1116 }
1117
1118 if (!remote && !send_oos)
1119 return 0;
1120
1121 D_ASSERT(device, !(remote && send_oos));
1122
1123 if (remote) {
1124 _req_mod(req, TO_BE_SENT);
1125 _req_mod(req, QUEUE_FOR_NET_WRITE);
1126 } else if (drbd_set_out_of_sync(device, req->i.sector, req->i.size))
1127 _req_mod(req, QUEUE_FOR_SEND_OOS);
1128
1129 return remote;
1130}
1131
1132static void drbd_process_discard_or_zeroes_req(struct drbd_request *req, int flags)
1133{
1134 int err = drbd_issue_discard_or_zero_out(req->device,
1135 req->i.sector, req->i.size >> 9, flags);
1136 if (err)
1137 req->private_bio->bi_status = BLK_STS_IOERR;
1138 bio_endio(req->private_bio);
1139}
1140
1141static void
1142drbd_submit_req_private_bio(struct drbd_request *req)
1143{
1144 struct drbd_device *device = req->device;
1145 struct bio *bio = req->private_bio;
1146 unsigned int type;
1147
1148 if (bio_op(bio) != REQ_OP_READ)
1149 type = DRBD_FAULT_DT_WR;
1150 else if (bio->bi_opf & REQ_RAHEAD)
1151 type = DRBD_FAULT_DT_RA;
1152 else
1153 type = DRBD_FAULT_DT_RD;
1154
1155 bio_set_dev(bio, device->ldev->backing_bdev);
1156
1157
1158
1159
1160
1161
1162 if (get_ldev(device)) {
1163 if (drbd_insert_fault(device, type))
1164 bio_io_error(bio);
1165 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES)
1166 drbd_process_discard_or_zeroes_req(req, EE_ZEROOUT |
1167 ((bio->bi_opf & REQ_NOUNMAP) ? 0 : EE_TRIM));
1168 else if (bio_op(bio) == REQ_OP_DISCARD)
1169 drbd_process_discard_or_zeroes_req(req, EE_TRIM);
1170 else
1171 submit_bio_noacct(bio);
1172 put_ldev(device);
1173 } else
1174 bio_io_error(bio);
1175}
1176
1177static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req)
1178{
1179 spin_lock_irq(&device->resource->req_lock);
1180 list_add_tail(&req->tl_requests, &device->submit.writes);
1181 list_add_tail(&req->req_pending_master_completion,
1182 &device->pending_master_completion[1 ]);
1183 spin_unlock_irq(&device->resource->req_lock);
1184 queue_work(device->submit.wq, &device->submit.worker);
1185
1186 wake_up(&device->al_wait);
1187}
1188
1189
1190
1191
1192
1193
1194static struct drbd_request *
1195drbd_request_prepare(struct drbd_device *device, struct bio *bio)
1196{
1197 const int rw = bio_data_dir(bio);
1198 struct drbd_request *req;
1199
1200
1201 req = drbd_req_new(device, bio);
1202 if (!req) {
1203 dec_ap_bio(device);
1204
1205
1206 drbd_err(device, "could not kmalloc() req\n");
1207 bio->bi_status = BLK_STS_RESOURCE;
1208 bio_endio(bio);
1209 return ERR_PTR(-ENOMEM);
1210 }
1211
1212
1213 req->start_jif = bio_start_io_acct(req->master_bio);
1214
1215 if (!get_ldev(device)) {
1216 bio_put(req->private_bio);
1217 req->private_bio = NULL;
1218 }
1219
1220
1221 if (bio_op(bio) == REQ_OP_WRITE_ZEROES ||
1222 bio_op(bio) == REQ_OP_DISCARD)
1223 goto queue_for_submitter_thread;
1224
1225 if (rw == WRITE && req->private_bio && req->i.size
1226 && !test_bit(AL_SUSPENDED, &device->flags)) {
1227 if (!drbd_al_begin_io_fastpath(device, &req->i))
1228 goto queue_for_submitter_thread;
1229 req->rq_state |= RQ_IN_ACT_LOG;
1230 req->in_actlog_jif = jiffies;
1231 }
1232 return req;
1233
1234 queue_for_submitter_thread:
1235 atomic_inc(&device->ap_actlog_cnt);
1236 drbd_queue_write(device, req);
1237 return NULL;
1238}
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250static bool may_do_writes(struct drbd_device *device)
1251{
1252 const union drbd_dev_state s = device->state;
1253 return s.disk == D_UP_TO_DATE || s.pdsk == D_UP_TO_DATE;
1254}
1255
1256struct drbd_plug_cb {
1257 struct blk_plug_cb cb;
1258 struct drbd_request *most_recent_req;
1259
1260};
1261
1262static void drbd_unplug(struct blk_plug_cb *cb, bool from_schedule)
1263{
1264 struct drbd_plug_cb *plug = container_of(cb, struct drbd_plug_cb, cb);
1265 struct drbd_resource *resource = plug->cb.data;
1266 struct drbd_request *req = plug->most_recent_req;
1267
1268 kfree(cb);
1269 if (!req)
1270 return;
1271
1272 spin_lock_irq(&resource->req_lock);
1273
1274
1275 req->rq_state |= RQ_UNPLUG;
1276
1277 drbd_queue_unplug(req->device);
1278 kref_put(&req->kref, drbd_req_destroy);
1279 spin_unlock_irq(&resource->req_lock);
1280}
1281
1282static struct drbd_plug_cb* drbd_check_plugged(struct drbd_resource *resource)
1283{
1284
1285
1286 struct drbd_plug_cb *plug;
1287 struct blk_plug_cb *cb = blk_check_plugged(drbd_unplug, resource, sizeof(*plug));
1288
1289 if (cb)
1290 plug = container_of(cb, struct drbd_plug_cb, cb);
1291 else
1292 plug = NULL;
1293 return plug;
1294}
1295
1296static void drbd_update_plug(struct drbd_plug_cb *plug, struct drbd_request *req)
1297{
1298 struct drbd_request *tmp = plug->most_recent_req;
1299
1300
1301 kref_get(&req->kref);
1302 plug->most_recent_req = req;
1303 if (tmp)
1304 kref_put(&tmp->kref, drbd_req_destroy);
1305}
1306
1307static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req)
1308{
1309 struct drbd_resource *resource = device->resource;
1310 const int rw = bio_data_dir(req->master_bio);
1311 struct bio_and_error m = { NULL, };
1312 bool no_remote = false;
1313 bool submit_private_bio = false;
1314
1315 spin_lock_irq(&resource->req_lock);
1316 if (rw == WRITE) {
1317
1318
1319
1320 complete_conflicting_writes(req);
1321
1322
1323
1324
1325 maybe_pull_ahead(device);
1326 }
1327
1328
1329 if (drbd_suspended(device)) {
1330
1331 req->rq_state |= RQ_POSTPONED;
1332 if (req->private_bio) {
1333 bio_put(req->private_bio);
1334 req->private_bio = NULL;
1335 put_ldev(device);
1336 }
1337 goto out;
1338 }
1339
1340
1341
1342
1343 if (rw != WRITE) {
1344 if (!do_remote_read(req) && !req->private_bio)
1345 goto nodata;
1346 }
1347
1348
1349 req->epoch = atomic_read(&first_peer_device(device)->connection->current_tle_nr);
1350
1351
1352
1353 if (likely(req->i.size!=0)) {
1354 if (rw == WRITE)
1355 first_peer_device(device)->connection->current_tle_writes++;
1356
1357 list_add_tail(&req->tl_requests, &first_peer_device(device)->connection->transfer_log);
1358 }
1359
1360 if (rw == WRITE) {
1361 if (req->private_bio && !may_do_writes(device)) {
1362 bio_put(req->private_bio);
1363 req->private_bio = NULL;
1364 put_ldev(device);
1365 goto nodata;
1366 }
1367 if (!drbd_process_write_request(req))
1368 no_remote = true;
1369 } else {
1370
1371
1372 if (req->private_bio == NULL) {
1373 _req_mod(req, TO_BE_SENT);
1374 _req_mod(req, QUEUE_FOR_NET_READ);
1375 } else
1376 no_remote = true;
1377 }
1378
1379 if (no_remote == false) {
1380 struct drbd_plug_cb *plug = drbd_check_plugged(resource);
1381 if (plug)
1382 drbd_update_plug(plug, req);
1383 }
1384
1385
1386
1387 if (list_empty(&req->req_pending_master_completion))
1388 list_add_tail(&req->req_pending_master_completion,
1389 &device->pending_master_completion[rw == WRITE]);
1390 if (req->private_bio) {
1391
1392 req->pre_submit_jif = jiffies;
1393 list_add_tail(&req->req_pending_local,
1394 &device->pending_completion[rw == WRITE]);
1395 _req_mod(req, TO_BE_SUBMITTED);
1396
1397 submit_private_bio = true;
1398 } else if (no_remote) {
1399nodata:
1400 if (__ratelimit(&drbd_ratelimit_state))
1401 drbd_err(device, "IO ERROR: neither local nor remote data, sector %llu+%u\n",
1402 (unsigned long long)req->i.sector, req->i.size >> 9);
1403
1404
1405 }
1406
1407out:
1408 drbd_req_put_completion_ref(req, &m, 1);
1409 spin_unlock_irq(&resource->req_lock);
1410
1411
1412
1413
1414
1415
1416
1417 if (submit_private_bio)
1418 drbd_submit_req_private_bio(req);
1419 if (m.bio)
1420 complete_master_bio(device, &m);
1421}
1422
1423void __drbd_make_request(struct drbd_device *device, struct bio *bio)
1424{
1425 struct drbd_request *req = drbd_request_prepare(device, bio);
1426 if (IS_ERR_OR_NULL(req))
1427 return;
1428 drbd_send_and_submit(device, req);
1429}
1430
1431static void submit_fast_path(struct drbd_device *device, struct list_head *incoming)
1432{
1433 struct blk_plug plug;
1434 struct drbd_request *req, *tmp;
1435
1436 blk_start_plug(&plug);
1437 list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
1438 const int rw = bio_data_dir(req->master_bio);
1439
1440 if (rw == WRITE
1441 && req->private_bio && req->i.size
1442 && !test_bit(AL_SUSPENDED, &device->flags)) {
1443 if (!drbd_al_begin_io_fastpath(device, &req->i))
1444 continue;
1445
1446 req->rq_state |= RQ_IN_ACT_LOG;
1447 req->in_actlog_jif = jiffies;
1448 atomic_dec(&device->ap_actlog_cnt);
1449 }
1450
1451 list_del_init(&req->tl_requests);
1452 drbd_send_and_submit(device, req);
1453 }
1454 blk_finish_plug(&plug);
1455}
1456
1457static bool prepare_al_transaction_nonblock(struct drbd_device *device,
1458 struct list_head *incoming,
1459 struct list_head *pending,
1460 struct list_head *later)
1461{
1462 struct drbd_request *req;
1463 int wake = 0;
1464 int err;
1465
1466 spin_lock_irq(&device->al_lock);
1467 while ((req = list_first_entry_or_null(incoming, struct drbd_request, tl_requests))) {
1468 err = drbd_al_begin_io_nonblock(device, &req->i);
1469 if (err == -ENOBUFS)
1470 break;
1471 if (err == -EBUSY)
1472 wake = 1;
1473 if (err)
1474 list_move_tail(&req->tl_requests, later);
1475 else
1476 list_move_tail(&req->tl_requests, pending);
1477 }
1478 spin_unlock_irq(&device->al_lock);
1479 if (wake)
1480 wake_up(&device->al_wait);
1481 return !list_empty(pending);
1482}
1483
1484static void send_and_submit_pending(struct drbd_device *device, struct list_head *pending)
1485{
1486 struct blk_plug plug;
1487 struct drbd_request *req;
1488
1489 blk_start_plug(&plug);
1490 while ((req = list_first_entry_or_null(pending, struct drbd_request, tl_requests))) {
1491 req->rq_state |= RQ_IN_ACT_LOG;
1492 req->in_actlog_jif = jiffies;
1493 atomic_dec(&device->ap_actlog_cnt);
1494 list_del_init(&req->tl_requests);
1495 drbd_send_and_submit(device, req);
1496 }
1497 blk_finish_plug(&plug);
1498}
1499
1500void do_submit(struct work_struct *ws)
1501{
1502 struct drbd_device *device = container_of(ws, struct drbd_device, submit.worker);
1503 LIST_HEAD(incoming);
1504 LIST_HEAD(pending);
1505 LIST_HEAD(busy);
1506
1507
1508 spin_lock_irq(&device->resource->req_lock);
1509 list_splice_tail_init(&device->submit.writes, &incoming);
1510 spin_unlock_irq(&device->resource->req_lock);
1511
1512 for (;;) {
1513 DEFINE_WAIT(wait);
1514
1515
1516 list_splice_init(&busy, &incoming);
1517 submit_fast_path(device, &incoming);
1518 if (list_empty(&incoming))
1519 break;
1520
1521 for (;;) {
1522 prepare_to_wait(&device->al_wait, &wait, TASK_UNINTERRUPTIBLE);
1523
1524 list_splice_init(&busy, &incoming);
1525 prepare_al_transaction_nonblock(device, &incoming, &pending, &busy);
1526 if (!list_empty(&pending))
1527 break;
1528
1529 schedule();
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542 if (!list_empty(&incoming))
1543 continue;
1544
1545
1546
1547
1548 spin_lock_irq(&device->resource->req_lock);
1549 list_splice_tail_init(&device->submit.writes, &incoming);
1550 spin_unlock_irq(&device->resource->req_lock);
1551 }
1552 finish_wait(&device->al_wait, &wait);
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570 while (list_empty(&incoming)) {
1571 LIST_HEAD(more_pending);
1572 LIST_HEAD(more_incoming);
1573 bool made_progress;
1574
1575
1576
1577 if (list_empty(&device->submit.writes))
1578 break;
1579
1580 spin_lock_irq(&device->resource->req_lock);
1581 list_splice_tail_init(&device->submit.writes, &more_incoming);
1582 spin_unlock_irq(&device->resource->req_lock);
1583
1584 if (list_empty(&more_incoming))
1585 break;
1586
1587 made_progress = prepare_al_transaction_nonblock(device, &more_incoming, &more_pending, &busy);
1588
1589 list_splice_tail_init(&more_pending, &pending);
1590 list_splice_tail_init(&more_incoming, &incoming);
1591 if (!made_progress)
1592 break;
1593 }
1594
1595 drbd_al_begin_io_commit(device);
1596 send_and_submit_pending(device, &pending);
1597 }
1598}
1599
1600blk_qc_t drbd_submit_bio(struct bio *bio)
1601{
1602 struct drbd_device *device = bio->bi_bdev->bd_disk->private_data;
1603
1604 blk_queue_split(&bio);
1605
1606
1607
1608
1609 D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512));
1610
1611 inc_ap_bio(device);
1612 __drbd_make_request(device, bio);
1613 return BLK_QC_T_NONE;
1614}
1615
1616static bool net_timeout_reached(struct drbd_request *net_req,
1617 struct drbd_connection *connection,
1618 unsigned long now, unsigned long ent,
1619 unsigned int ko_count, unsigned int timeout)
1620{
1621 struct drbd_device *device = net_req->device;
1622
1623 if (!time_after(now, net_req->pre_send_jif + ent))
1624 return false;
1625
1626 if (time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent))
1627 return false;
1628
1629 if (net_req->rq_state & RQ_NET_PENDING) {
1630 drbd_warn(device, "Remote failed to finish a request within %ums > ko-count (%u) * timeout (%u * 0.1s)\n",
1631 jiffies_to_msecs(now - net_req->pre_send_jif), ko_count, timeout);
1632 return true;
1633 }
1634
1635
1636
1637
1638
1639 if (net_req->epoch == connection->send.current_epoch_nr) {
1640 drbd_warn(device,
1641 "We did not send a P_BARRIER for %ums > ko-count (%u) * timeout (%u * 0.1s); drbd kernel thread blocked?\n",
1642 jiffies_to_msecs(now - net_req->pre_send_jif), ko_count, timeout);
1643 return false;
1644 }
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662 if (time_after(now, connection->send.last_sent_barrier_jif + ent)) {
1663 drbd_warn(device, "Remote failed to answer a P_BARRIER (sent at %lu jif; now=%lu jif) within %ums > ko-count (%u) * timeout (%u * 0.1s)\n",
1664 connection->send.last_sent_barrier_jif, now,
1665 jiffies_to_msecs(now - connection->send.last_sent_barrier_jif), ko_count, timeout);
1666 return true;
1667 }
1668 return false;
1669}
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688void request_timer_fn(struct timer_list *t)
1689{
1690 struct drbd_device *device = from_timer(device, t, request_timer);
1691 struct drbd_connection *connection = first_peer_device(device)->connection;
1692 struct drbd_request *req_read, *req_write, *req_peer;
1693 struct net_conf *nc;
1694 unsigned long oldest_submit_jif;
1695 unsigned long ent = 0, dt = 0, et, nt;
1696 unsigned long now;
1697 unsigned int ko_count = 0, timeout = 0;
1698
1699 rcu_read_lock();
1700 nc = rcu_dereference(connection->net_conf);
1701 if (nc && device->state.conn >= C_WF_REPORT_PARAMS) {
1702 ko_count = nc->ko_count;
1703 timeout = nc->timeout;
1704 }
1705
1706 if (get_ldev(device)) {
1707 dt = rcu_dereference(device->ldev->disk_conf)->disk_timeout * HZ / 10;
1708 put_ldev(device);
1709 }
1710 rcu_read_unlock();
1711
1712
1713 ent = timeout * HZ/10 * ko_count;
1714 et = min_not_zero(dt, ent);
1715
1716 if (!et)
1717 return;
1718
1719 now = jiffies;
1720 nt = now + et;
1721
1722 spin_lock_irq(&device->resource->req_lock);
1723 req_read = list_first_entry_or_null(&device->pending_completion[0], struct drbd_request, req_pending_local);
1724 req_write = list_first_entry_or_null(&device->pending_completion[1], struct drbd_request, req_pending_local);
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734 req_peer = connection->req_ack_pending;
1735
1736
1737
1738
1739 if (!req_peer)
1740 req_peer = connection->req_not_net_done;
1741
1742
1743 if (req_peer && req_peer->device != device)
1744 req_peer = NULL;
1745
1746
1747 if (req_peer == NULL && req_write == NULL && req_read == NULL)
1748 goto out;
1749
1750 oldest_submit_jif =
1751 (req_write && req_read)
1752 ? ( time_before(req_write->pre_submit_jif, req_read->pre_submit_jif)
1753 ? req_write->pre_submit_jif : req_read->pre_submit_jif )
1754 : req_write ? req_write->pre_submit_jif
1755 : req_read ? req_read->pre_submit_jif : now;
1756
1757 if (ent && req_peer && net_timeout_reached(req_peer, connection, now, ent, ko_count, timeout))
1758 _conn_request_state(connection, NS(conn, C_TIMEOUT), CS_VERBOSE | CS_HARD);
1759
1760 if (dt && oldest_submit_jif != now &&
1761 time_after(now, oldest_submit_jif + dt) &&
1762 !time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) {
1763 drbd_warn(device, "Local backing device failed to meet the disk-timeout\n");
1764 __drbd_chk_io_error(device, DRBD_FORCE_DETACH);
1765 }
1766
1767
1768
1769 ent = (ent && req_peer && time_before(now, req_peer->pre_send_jif + ent))
1770 ? req_peer->pre_send_jif + ent : now + et;
1771 dt = (dt && oldest_submit_jif != now && time_before(now, oldest_submit_jif + dt))
1772 ? oldest_submit_jif + dt : now + et;
1773 nt = time_before(ent, dt) ? ent : dt;
1774out:
1775 spin_unlock_irq(&device->resource->req_lock);
1776 mod_timer(&device->request_timer, nt);
1777}
1778