1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/module.h>
27
28#include <linux/slab.h>
29#include <linux/drbd.h>
30#include "drbd_int.h"
31#include "drbd_req.h"
32
33
34static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size);
35
36
37static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req)
38{
39 generic_start_io_acct(bio_data_dir(req->master_bio), req->i.size >> 9,
40 &device->vdisk->part0);
41}
42
43
44static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req)
45{
46 generic_end_io_acct(bio_data_dir(req->master_bio),
47 &device->vdisk->part0, req->start_jif);
48}
49
50static struct drbd_request *drbd_req_new(struct drbd_device *device,
51 struct bio *bio_src)
52{
53 struct drbd_request *req;
54
55 req = mempool_alloc(drbd_request_mempool, GFP_NOIO);
56 if (!req)
57 return NULL;
58 memset(req, 0, sizeof(*req));
59
60 drbd_req_make_private_bio(req, bio_src);
61 req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
62 req->device = device;
63 req->master_bio = bio_src;
64 req->epoch = 0;
65
66 drbd_clear_interval(&req->i);
67 req->i.sector = bio_src->bi_iter.bi_sector;
68 req->i.size = bio_src->bi_iter.bi_size;
69 req->i.local = true;
70 req->i.waiting = false;
71
72 INIT_LIST_HEAD(&req->tl_requests);
73 INIT_LIST_HEAD(&req->w.list);
74 INIT_LIST_HEAD(&req->req_pending_master_completion);
75 INIT_LIST_HEAD(&req->req_pending_local);
76
77
78 atomic_set(&req->completion_ref, 1);
79
80 kref_init(&req->kref);
81 return req;
82}
83
84static void drbd_remove_request_interval(struct rb_root *root,
85 struct drbd_request *req)
86{
87 struct drbd_device *device = req->device;
88 struct drbd_interval *i = &req->i;
89
90 drbd_remove_interval(root, i);
91
92
93 if (i->waiting)
94 wake_up(&device->misc_wait);
95}
96
97void drbd_req_destroy(struct kref *kref)
98{
99 struct drbd_request *req = container_of(kref, struct drbd_request, kref);
100 struct drbd_device *device = req->device;
101 const unsigned s = req->rq_state;
102
103 if ((req->master_bio && !(s & RQ_POSTPONED)) ||
104 atomic_read(&req->completion_ref) ||
105 (s & RQ_LOCAL_PENDING) ||
106 ((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) {
107 drbd_err(device, "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n",
108 s, atomic_read(&req->completion_ref));
109 return;
110 }
111
112
113
114
115
116
117
118
119
120 list_del_init(&req->tl_requests);
121
122
123
124 if (!drbd_interval_empty(&req->i)) {
125 struct rb_root *root;
126
127 if (s & RQ_WRITE)
128 root = &device->write_requests;
129 else
130 root = &device->read_requests;
131 drbd_remove_request_interval(root, req);
132 } else if (s & (RQ_NET_MASK & ~RQ_NET_DONE) && req->i.size != 0)
133 drbd_err(device, "drbd_req_destroy: Logic BUG: interval empty, but: rq_state=0x%x, sect=%llu, size=%u\n",
134 s, (unsigned long long)req->i.sector, req->i.size);
135
136
137
138
139 if (s & RQ_WRITE) {
140
141
142
143
144
145
146
147
148
149
150
151 if ((s & (RQ_POSTPONED|RQ_LOCAL_MASK|RQ_NET_MASK)) != RQ_POSTPONED) {
152 if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
153 drbd_set_out_of_sync(device, req->i.sector, req->i.size);
154
155 if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
156 drbd_set_in_sync(device, req->i.sector, req->i.size);
157 }
158
159
160
161
162
163
164
165
166
167
168
169 if (s & RQ_IN_ACT_LOG) {
170 if (get_ldev_if_state(device, D_FAILED)) {
171 drbd_al_complete_io(device, &req->i);
172 put_ldev(device);
173 } else if (__ratelimit(&drbd_ratelimit_state)) {
174 drbd_warn(device, "Should have called drbd_al_complete_io(, %llu, %u), "
175 "but my Disk seems to have failed :(\n",
176 (unsigned long long) req->i.sector, req->i.size);
177 }
178 }
179 }
180
181 mempool_free(req, drbd_request_mempool);
182}
183
184static void wake_all_senders(struct drbd_connection *connection)
185{
186 wake_up(&connection->sender_work.q_wait);
187}
188
189
190void start_new_tl_epoch(struct drbd_connection *connection)
191{
192
193 if (connection->current_tle_writes == 0)
194 return;
195
196 connection->current_tle_writes = 0;
197 atomic_inc(&connection->current_tle_nr);
198 wake_all_senders(connection);
199}
200
201void complete_master_bio(struct drbd_device *device,
202 struct bio_and_error *m)
203{
204 m->bio->bi_error = m->error;
205 bio_endio(m->bio);
206 dec_ap_bio(device);
207}
208
209
210
211
212
213
214
215
216static
217void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
218{
219 const unsigned s = req->rq_state;
220 struct drbd_device *device = req->device;
221 int rw;
222 int error, ok;
223
224
225
226
227
228
229
230
231
232
233 if ((s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) ||
234 (s & RQ_NET_QUEUED) || (s & RQ_NET_PENDING) ||
235 (s & RQ_COMPLETION_SUSP)) {
236 drbd_err(device, "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s);
237 return;
238 }
239
240 if (!req->master_bio) {
241 drbd_err(device, "drbd_req_complete: Logic BUG, master_bio == NULL!\n");
242 return;
243 }
244
245 rw = bio_rw(req->master_bio);
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260 ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK);
261 error = PTR_ERR(req->private_bio);
262
263
264
265
266
267
268
269
270 if (rw == WRITE &&
271 req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr))
272 start_new_tl_epoch(first_peer_device(device)->connection);
273
274
275 _drbd_end_io_acct(device, req);
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291 if (!ok && rw == READ && !list_empty(&req->tl_requests))
292 req->rq_state |= RQ_POSTPONED;
293
294 if (!(req->rq_state & RQ_POSTPONED)) {
295 m->error = ok ? 0 : (error ?: -EIO);
296 m->bio = req->master_bio;
297 req->master_bio = NULL;
298
299
300
301
302 req->i.completed = true;
303 }
304
305 if (req->i.waiting)
306 wake_up(&device->misc_wait);
307
308
309
310
311
312 list_del_init(&req->req_pending_master_completion);
313}
314
315
316static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
317{
318 struct drbd_device *device = req->device;
319 D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED));
320
321 if (!atomic_sub_and_test(put, &req->completion_ref))
322 return 0;
323
324 drbd_req_complete(req, m);
325
326 if (req->rq_state & RQ_POSTPONED) {
327
328
329 drbd_restart_request(req);
330 return 0;
331 }
332
333 return 1;
334}
335
336static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
337{
338 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
339 if (!connection)
340 return;
341 if (connection->req_next == NULL)
342 connection->req_next = req;
343}
344
345static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
346{
347 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
348 if (!connection)
349 return;
350 if (connection->req_next != req)
351 return;
352 list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
353 const unsigned s = req->rq_state;
354 if (s & RQ_NET_QUEUED)
355 break;
356 }
357 if (&req->tl_requests == &connection->transfer_log)
358 req = NULL;
359 connection->req_next = req;
360}
361
362static void set_if_null_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
363{
364 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
365 if (!connection)
366 return;
367 if (connection->req_ack_pending == NULL)
368 connection->req_ack_pending = req;
369}
370
371static void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
372{
373 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
374 if (!connection)
375 return;
376 if (connection->req_ack_pending != req)
377 return;
378 list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
379 const unsigned s = req->rq_state;
380 if ((s & RQ_NET_SENT) && (s & RQ_NET_PENDING))
381 break;
382 }
383 if (&req->tl_requests == &connection->transfer_log)
384 req = NULL;
385 connection->req_ack_pending = req;
386}
387
388static void set_if_null_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
389{
390 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
391 if (!connection)
392 return;
393 if (connection->req_not_net_done == NULL)
394 connection->req_not_net_done = req;
395}
396
397static void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
398{
399 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
400 if (!connection)
401 return;
402 if (connection->req_not_net_done != req)
403 return;
404 list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
405 const unsigned s = req->rq_state;
406 if ((s & RQ_NET_SENT) && !(s & RQ_NET_DONE))
407 break;
408 }
409 if (&req->tl_requests == &connection->transfer_log)
410 req = NULL;
411 connection->req_not_net_done = req;
412}
413
414
415
416static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
417 int clear, int set)
418{
419 struct drbd_device *device = req->device;
420 struct drbd_peer_device *peer_device = first_peer_device(device);
421 unsigned s = req->rq_state;
422 int c_put = 0;
423 int k_put = 0;
424
425 if (drbd_suspended(device) && !((s | clear) & RQ_COMPLETION_SUSP))
426 set |= RQ_COMPLETION_SUSP;
427
428
429
430 req->rq_state &= ~clear;
431 req->rq_state |= set;
432
433
434 if (req->rq_state == s)
435 return;
436
437
438
439 if (!(s & RQ_LOCAL_PENDING) && (set & RQ_LOCAL_PENDING))
440 atomic_inc(&req->completion_ref);
441
442 if (!(s & RQ_NET_PENDING) && (set & RQ_NET_PENDING)) {
443 inc_ap_pending(device);
444 atomic_inc(&req->completion_ref);
445 }
446
447 if (!(s & RQ_NET_QUEUED) && (set & RQ_NET_QUEUED)) {
448 atomic_inc(&req->completion_ref);
449 set_if_null_req_next(peer_device, req);
450 }
451
452 if (!(s & RQ_EXP_BARR_ACK) && (set & RQ_EXP_BARR_ACK))
453 kref_get(&req->kref);
454
455 if (!(s & RQ_NET_SENT) && (set & RQ_NET_SENT)) {
456
457 if (!(s & RQ_NET_DONE)) {
458 atomic_add(req->i.size >> 9, &device->ap_in_flight);
459 set_if_null_req_not_net_done(peer_device, req);
460 }
461 if (req->rq_state & RQ_NET_PENDING)
462 set_if_null_req_ack_pending(peer_device, req);
463 }
464
465 if (!(s & RQ_COMPLETION_SUSP) && (set & RQ_COMPLETION_SUSP))
466 atomic_inc(&req->completion_ref);
467
468
469
470 if ((s & RQ_COMPLETION_SUSP) && (clear & RQ_COMPLETION_SUSP))
471 ++c_put;
472
473 if (!(s & RQ_LOCAL_ABORTED) && (set & RQ_LOCAL_ABORTED)) {
474 D_ASSERT(device, req->rq_state & RQ_LOCAL_PENDING);
475
476
477 kref_get(&req->kref);
478 ++c_put;
479 }
480
481 if ((s & RQ_LOCAL_PENDING) && (clear & RQ_LOCAL_PENDING)) {
482 if (req->rq_state & RQ_LOCAL_ABORTED)
483 ++k_put;
484 else
485 ++c_put;
486 list_del_init(&req->req_pending_local);
487 }
488
489 if ((s & RQ_NET_PENDING) && (clear & RQ_NET_PENDING)) {
490 dec_ap_pending(device);
491 ++c_put;
492 req->acked_jif = jiffies;
493 advance_conn_req_ack_pending(peer_device, req);
494 }
495
496 if ((s & RQ_NET_QUEUED) && (clear & RQ_NET_QUEUED)) {
497 ++c_put;
498 advance_conn_req_next(peer_device, req);
499 }
500
501 if (!(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) {
502 if (s & RQ_NET_SENT)
503 atomic_sub(req->i.size >> 9, &device->ap_in_flight);
504 if (s & RQ_EXP_BARR_ACK)
505 ++k_put;
506 req->net_done_jif = jiffies;
507
508
509
510
511 advance_conn_req_next(peer_device, req);
512 advance_conn_req_ack_pending(peer_device, req);
513 advance_conn_req_not_net_done(peer_device, req);
514 }
515
516
517
518 if (k_put || c_put) {
519
520
521 int at_least = k_put + !!c_put;
522 int refcount = atomic_read(&req->kref.refcount);
523 if (refcount < at_least)
524 drbd_err(device,
525 "mod_rq_state: Logic BUG: %x -> %x: refcount = %d, should be >= %d\n",
526 s, req->rq_state, refcount, at_least);
527 }
528
529
530 if (req->i.waiting)
531 wake_up(&device->misc_wait);
532
533 if (c_put)
534 k_put += drbd_req_put_completion_ref(req, m, c_put);
535 if (k_put)
536 kref_sub(&req->kref, k_put, drbd_req_destroy);
537}
538
539static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req)
540{
541 char b[BDEVNAME_SIZE];
542
543 if (!__ratelimit(&drbd_ratelimit_state))
544 return;
545
546 drbd_warn(device, "local %s IO error sector %llu+%u on %s\n",
547 (req->rq_state & RQ_WRITE) ? "WRITE" : "READ",
548 (unsigned long long)req->i.sector,
549 req->i.size >> 9,
550 bdevname(device->ldev->backing_bdev, b));
551}
552
553
554
555
556
557
558
559static inline bool is_pending_write_protocol_A(struct drbd_request *req)
560{
561 return (req->rq_state &
562 (RQ_WRITE|RQ_NET_PENDING|RQ_EXP_WRITE_ACK|RQ_EXP_RECEIVE_ACK))
563 == (RQ_WRITE|RQ_NET_PENDING);
564}
565
566
567
568
569
570
571
572
573
574
575
576
577
578int __req_mod(struct drbd_request *req, enum drbd_req_event what,
579 struct bio_and_error *m)
580{
581 struct drbd_device *const device = req->device;
582 struct drbd_peer_device *const peer_device = first_peer_device(device);
583 struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
584 struct net_conf *nc;
585 int p, rv = 0;
586
587 if (m)
588 m->bio = NULL;
589
590 switch (what) {
591 default:
592 drbd_err(device, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__);
593 break;
594
595
596
597
598
599
600
601 case TO_BE_SENT:
602
603
604 D_ASSERT(device, !(req->rq_state & RQ_NET_MASK));
605 rcu_read_lock();
606 nc = rcu_dereference(connection->net_conf);
607 p = nc->wire_protocol;
608 rcu_read_unlock();
609 req->rq_state |=
610 p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK :
611 p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0;
612 mod_rq_state(req, m, 0, RQ_NET_PENDING);
613 break;
614
615 case TO_BE_SUBMITTED:
616
617 D_ASSERT(device, !(req->rq_state & RQ_LOCAL_MASK));
618 mod_rq_state(req, m, 0, RQ_LOCAL_PENDING);
619 break;
620
621 case COMPLETED_OK:
622 if (req->rq_state & RQ_WRITE)
623 device->writ_cnt += req->i.size >> 9;
624 else
625 device->read_cnt += req->i.size >> 9;
626
627 mod_rq_state(req, m, RQ_LOCAL_PENDING,
628 RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
629 break;
630
631 case ABORT_DISK_IO:
632 mod_rq_state(req, m, 0, RQ_LOCAL_ABORTED);
633 break;
634
635 case WRITE_COMPLETED_WITH_ERROR:
636 drbd_report_io_error(device, req);
637 __drbd_chk_io_error(device, DRBD_WRITE_ERROR);
638 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
639 break;
640
641 case READ_COMPLETED_WITH_ERROR:
642 drbd_set_out_of_sync(device, req->i.sector, req->i.size);
643 drbd_report_io_error(device, req);
644 __drbd_chk_io_error(device, DRBD_READ_ERROR);
645
646 case READ_AHEAD_COMPLETED_WITH_ERROR:
647
648 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
649 break;
650
651 case DISCARD_COMPLETED_NOTSUPP:
652 case DISCARD_COMPLETED_WITH_ERROR:
653
654
655 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
656 break;
657
658 case QUEUE_FOR_NET_READ:
659
660
661
662
663
664
665
666
667
668
669 D_ASSERT(device, drbd_interval_empty(&req->i));
670 drbd_insert_interval(&device->read_requests, &req->i);
671
672 set_bit(UNPLUG_REMOTE, &device->flags);
673
674 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
675 D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0);
676 mod_rq_state(req, m, 0, RQ_NET_QUEUED);
677 req->w.cb = w_send_read_req;
678 drbd_queue_work(&connection->sender_work,
679 &req->w);
680 break;
681
682 case QUEUE_FOR_NET_WRITE:
683
684
685
686
687
688 D_ASSERT(device, drbd_interval_empty(&req->i));
689 drbd_insert_interval(&device->write_requests, &req->i);
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708 set_bit(UNPLUG_REMOTE, &device->flags);
709
710
711 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
712 mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK);
713 req->w.cb = w_send_dblock;
714 drbd_queue_work(&connection->sender_work,
715 &req->w);
716
717
718 rcu_read_lock();
719 nc = rcu_dereference(connection->net_conf);
720 p = nc->max_epoch_size;
721 rcu_read_unlock();
722 if (connection->current_tle_writes >= p)
723 start_new_tl_epoch(connection);
724
725 break;
726
727 case QUEUE_FOR_SEND_OOS:
728 mod_rq_state(req, m, 0, RQ_NET_QUEUED);
729 req->w.cb = w_send_out_of_sync;
730 drbd_queue_work(&connection->sender_work,
731 &req->w);
732 break;
733
734 case READ_RETRY_REMOTE_CANCELED:
735 case SEND_CANCELED:
736 case SEND_FAILED:
737
738
739 mod_rq_state(req, m, RQ_NET_QUEUED, 0);
740 break;
741
742 case HANDED_OVER_TO_NETWORK:
743
744 if (is_pending_write_protocol_A(req))
745
746
747 mod_rq_state(req, m, RQ_NET_QUEUED|RQ_NET_PENDING,
748 RQ_NET_SENT|RQ_NET_OK);
749 else
750 mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT);
751
752
753
754 break;
755
756 case OOS_HANDED_TO_NETWORK:
757
758
759 mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_DONE);
760 break;
761
762 case CONNECTION_LOST_WHILE_PENDING:
763
764 mod_rq_state(req, m,
765 RQ_NET_OK|RQ_NET_PENDING|RQ_COMPLETION_SUSP,
766 RQ_NET_DONE);
767 break;
768
769 case CONFLICT_RESOLVED:
770
771
772
773
774
775
776
777 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
778 D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
779 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK);
780 break;
781
782 case WRITE_ACKED_BY_PEER_AND_SIS:
783 req->rq_state |= RQ_NET_SIS;
784 case WRITE_ACKED_BY_PEER:
785
786
787
788
789
790
791
792 goto ack_common;
793 case RECV_ACKED_BY_PEER:
794 D_ASSERT(device, req->rq_state & RQ_EXP_RECEIVE_ACK);
795
796
797
798 ack_common:
799 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK);
800 break;
801
802 case POSTPONE_WRITE:
803 D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
804
805
806
807
808 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
809 req->rq_state |= RQ_POSTPONED;
810 if (req->i.waiting)
811 wake_up(&device->misc_wait);
812
813
814
815 break;
816
817 case NEG_ACKED:
818 mod_rq_state(req, m, RQ_NET_OK|RQ_NET_PENDING, 0);
819 break;
820
821 case FAIL_FROZEN_DISK_IO:
822 if (!(req->rq_state & RQ_LOCAL_COMPLETED))
823 break;
824 mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
825 break;
826
827 case RESTART_FROZEN_DISK_IO:
828 if (!(req->rq_state & RQ_LOCAL_COMPLETED))
829 break;
830
831 mod_rq_state(req, m,
832 RQ_COMPLETION_SUSP|RQ_LOCAL_COMPLETED,
833 RQ_LOCAL_PENDING);
834
835 rv = MR_READ;
836 if (bio_data_dir(req->master_bio) == WRITE)
837 rv = MR_WRITE;
838
839 get_ldev(device);
840 req->w.cb = w_restart_disk_io;
841 drbd_queue_work(&connection->sender_work,
842 &req->w);
843 break;
844
845 case RESEND:
846
847 if (!(req->rq_state & RQ_WRITE) && !req->w.cb) {
848 mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
849 break;
850 }
851
852
853
854
855
856
857 if (!(req->rq_state & RQ_NET_OK)) {
858
859
860
861 mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING);
862 if (req->w.cb) {
863
864 drbd_queue_work(&connection->sender_work,
865 &req->w);
866 rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
867 }
868 break;
869 }
870
871
872 case BARRIER_ACKED:
873
874 if (!(req->rq_state & RQ_WRITE))
875 break;
876
877 if (req->rq_state & RQ_NET_PENDING) {
878
879
880
881 drbd_err(device, "FIXME (BARRIER_ACKED but pending)\n");
882 }
883
884
885
886
887 mod_rq_state(req, m, RQ_COMPLETION_SUSP,
888 (req->rq_state & RQ_NET_MASK) ? RQ_NET_DONE : 0);
889 break;
890
891 case DATA_RECEIVED:
892 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
893 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE);
894 break;
895
896 case QUEUE_AS_DRBD_BARRIER:
897 start_new_tl_epoch(connection);
898 mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE);
899 break;
900 };
901
902 return rv;
903}
904
905
906
907
908
909
910
911
912static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size)
913{
914 unsigned long sbnr, ebnr;
915 sector_t esector, nr_sectors;
916
917 if (device->state.disk == D_UP_TO_DATE)
918 return true;
919 if (device->state.disk != D_INCONSISTENT)
920 return false;
921 esector = sector + (size >> 9) - 1;
922 nr_sectors = drbd_get_capacity(device->this_bdev);
923 D_ASSERT(device, sector < nr_sectors);
924 D_ASSERT(device, esector < nr_sectors);
925
926 sbnr = BM_SECT_TO_BIT(sector);
927 ebnr = BM_SECT_TO_BIT(esector);
928
929 return drbd_bm_count_bits(device, sbnr, ebnr) == 0;
930}
931
932static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t sector,
933 enum drbd_read_balancing rbm)
934{
935 struct backing_dev_info *bdi;
936 int stripe_shift;
937
938 switch (rbm) {
939 case RB_CONGESTED_REMOTE:
940 bdi = &device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
941 return bdi_read_congested(bdi);
942 case RB_LEAST_PENDING:
943 return atomic_read(&device->local_cnt) >
944 atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt);
945 case RB_32K_STRIPING:
946 case RB_64K_STRIPING:
947 case RB_128K_STRIPING:
948 case RB_256K_STRIPING:
949 case RB_512K_STRIPING:
950 case RB_1M_STRIPING:
951 stripe_shift = (rbm - RB_32K_STRIPING + 15);
952 return (sector >> (stripe_shift - 9)) & 1;
953 case RB_ROUND_ROBIN:
954 return test_and_change_bit(READ_BALANCE_RR, &device->flags);
955 case RB_PREFER_REMOTE:
956 return true;
957 case RB_PREFER_LOCAL:
958 default:
959 return false;
960 }
961}
962
963
964
965
966
967
968
969
970
971
972static void complete_conflicting_writes(struct drbd_request *req)
973{
974 DEFINE_WAIT(wait);
975 struct drbd_device *device = req->device;
976 struct drbd_interval *i;
977 sector_t sector = req->i.sector;
978 int size = req->i.size;
979
980 i = drbd_find_overlap(&device->write_requests, sector, size);
981 if (!i)
982 return;
983
984 for (;;) {
985 prepare_to_wait(&device->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
986 i = drbd_find_overlap(&device->write_requests, sector, size);
987 if (!i)
988 break;
989
990 i->waiting = true;
991 spin_unlock_irq(&device->resource->req_lock);
992 schedule();
993 spin_lock_irq(&device->resource->req_lock);
994 }
995 finish_wait(&device->misc_wait, &wait);
996}
997
998
999static void maybe_pull_ahead(struct drbd_device *device)
1000{
1001 struct drbd_connection *connection = first_peer_device(device)->connection;
1002 struct net_conf *nc;
1003 bool congested = false;
1004 enum drbd_on_congestion on_congestion;
1005
1006 rcu_read_lock();
1007 nc = rcu_dereference(connection->net_conf);
1008 on_congestion = nc ? nc->on_congestion : OC_BLOCK;
1009 rcu_read_unlock();
1010 if (on_congestion == OC_BLOCK ||
1011 connection->agreed_pro_version < 96)
1012 return;
1013
1014 if (on_congestion == OC_PULL_AHEAD && device->state.conn == C_AHEAD)
1015 return;
1016
1017
1018
1019
1020
1021 if (!get_ldev_if_state(device, D_UP_TO_DATE))
1022 return;
1023
1024 if (nc->cong_fill &&
1025 atomic_read(&device->ap_in_flight) >= nc->cong_fill) {
1026 drbd_info(device, "Congestion-fill threshold reached\n");
1027 congested = true;
1028 }
1029
1030 if (device->act_log->used >= nc->cong_extents) {
1031 drbd_info(device, "Congestion-extents threshold reached\n");
1032 congested = true;
1033 }
1034
1035 if (congested) {
1036
1037 start_new_tl_epoch(first_peer_device(device)->connection);
1038
1039 if (on_congestion == OC_PULL_AHEAD)
1040 _drbd_set_state(_NS(device, conn, C_AHEAD), 0, NULL);
1041 else
1042 _drbd_set_state(_NS(device, conn, C_DISCONNECTING), 0, NULL);
1043 }
1044 put_ldev(device);
1045}
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056static bool do_remote_read(struct drbd_request *req)
1057{
1058 struct drbd_device *device = req->device;
1059 enum drbd_read_balancing rbm;
1060
1061 if (req->private_bio) {
1062 if (!drbd_may_do_local_read(device,
1063 req->i.sector, req->i.size)) {
1064 bio_put(req->private_bio);
1065 req->private_bio = NULL;
1066 put_ldev(device);
1067 }
1068 }
1069
1070 if (device->state.pdsk != D_UP_TO_DATE)
1071 return false;
1072
1073 if (req->private_bio == NULL)
1074 return true;
1075
1076
1077
1078
1079 rcu_read_lock();
1080 rbm = rcu_dereference(device->ldev->disk_conf)->read_balancing;
1081 rcu_read_unlock();
1082
1083 if (rbm == RB_PREFER_LOCAL && req->private_bio)
1084 return false;
1085
1086 if (remote_due_to_read_balancing(device, req->i.sector, rbm)) {
1087 if (req->private_bio) {
1088 bio_put(req->private_bio);
1089 req->private_bio = NULL;
1090 put_ldev(device);
1091 }
1092 return true;
1093 }
1094
1095 return false;
1096}
1097
1098bool drbd_should_do_remote(union drbd_dev_state s)
1099{
1100 return s.pdsk == D_UP_TO_DATE ||
1101 (s.pdsk >= D_INCONSISTENT &&
1102 s.conn >= C_WF_BITMAP_T &&
1103 s.conn < C_AHEAD);
1104
1105
1106
1107}
1108
1109static bool drbd_should_send_out_of_sync(union drbd_dev_state s)
1110{
1111 return s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S;
1112
1113
1114}
1115
1116
1117
1118
1119static int drbd_process_write_request(struct drbd_request *req)
1120{
1121 struct drbd_device *device = req->device;
1122 int remote, send_oos;
1123
1124 remote = drbd_should_do_remote(device->state);
1125 send_oos = drbd_should_send_out_of_sync(device->state);
1126
1127
1128
1129
1130
1131
1132
1133 if (unlikely(req->i.size == 0)) {
1134
1135 D_ASSERT(device, req->master_bio->bi_rw & REQ_FLUSH);
1136 if (remote)
1137 _req_mod(req, QUEUE_AS_DRBD_BARRIER);
1138 return remote;
1139 }
1140
1141 if (!remote && !send_oos)
1142 return 0;
1143
1144 D_ASSERT(device, !(remote && send_oos));
1145
1146 if (remote) {
1147 _req_mod(req, TO_BE_SENT);
1148 _req_mod(req, QUEUE_FOR_NET_WRITE);
1149 } else if (drbd_set_out_of_sync(device, req->i.sector, req->i.size))
1150 _req_mod(req, QUEUE_FOR_SEND_OOS);
1151
1152 return remote;
1153}
1154
1155static void
1156drbd_submit_req_private_bio(struct drbd_request *req)
1157{
1158 struct drbd_device *device = req->device;
1159 struct bio *bio = req->private_bio;
1160 const int rw = bio_rw(bio);
1161
1162 bio->bi_bdev = device->ldev->backing_bdev;
1163
1164
1165
1166
1167
1168
1169 if (get_ldev(device)) {
1170 if (drbd_insert_fault(device,
1171 rw == WRITE ? DRBD_FAULT_DT_WR
1172 : rw == READ ? DRBD_FAULT_DT_RD
1173 : DRBD_FAULT_DT_RA))
1174 bio_io_error(bio);
1175 else
1176 generic_make_request(bio);
1177 put_ldev(device);
1178 } else
1179 bio_io_error(bio);
1180}
1181
1182static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req)
1183{
1184 spin_lock_irq(&device->resource->req_lock);
1185 list_add_tail(&req->tl_requests, &device->submit.writes);
1186 list_add_tail(&req->req_pending_master_completion,
1187 &device->pending_master_completion[1 ]);
1188 spin_unlock_irq(&device->resource->req_lock);
1189 queue_work(device->submit.wq, &device->submit.worker);
1190
1191 wake_up(&device->al_wait);
1192}
1193
1194
1195
1196
1197
1198
1199static struct drbd_request *
1200drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long start_jif)
1201{
1202 const int rw = bio_data_dir(bio);
1203 struct drbd_request *req;
1204
1205
1206 req = drbd_req_new(device, bio);
1207 if (!req) {
1208 dec_ap_bio(device);
1209
1210
1211 drbd_err(device, "could not kmalloc() req\n");
1212 bio->bi_error = -ENOMEM;
1213 bio_endio(bio);
1214 return ERR_PTR(-ENOMEM);
1215 }
1216 req->start_jif = start_jif;
1217
1218 if (!get_ldev(device)) {
1219 bio_put(req->private_bio);
1220 req->private_bio = NULL;
1221 }
1222
1223
1224 _drbd_start_io_acct(device, req);
1225
1226 if (rw == WRITE && req->private_bio && req->i.size
1227 && !test_bit(AL_SUSPENDED, &device->flags)) {
1228 if (!drbd_al_begin_io_fastpath(device, &req->i)) {
1229 atomic_inc(&device->ap_actlog_cnt);
1230 drbd_queue_write(device, req);
1231 return NULL;
1232 }
1233 req->rq_state |= RQ_IN_ACT_LOG;
1234 req->in_actlog_jif = jiffies;
1235 }
1236
1237 return req;
1238}
1239
1240static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req)
1241{
1242 struct drbd_resource *resource = device->resource;
1243 const int rw = bio_rw(req->master_bio);
1244 struct bio_and_error m = { NULL, };
1245 bool no_remote = false;
1246 bool submit_private_bio = false;
1247
1248 spin_lock_irq(&resource->req_lock);
1249 if (rw == WRITE) {
1250
1251
1252
1253 complete_conflicting_writes(req);
1254
1255
1256
1257
1258 maybe_pull_ahead(device);
1259 }
1260
1261
1262 if (drbd_suspended(device)) {
1263
1264 req->rq_state |= RQ_POSTPONED;
1265 if (req->private_bio) {
1266 bio_put(req->private_bio);
1267 req->private_bio = NULL;
1268 put_ldev(device);
1269 }
1270 goto out;
1271 }
1272
1273
1274
1275
1276 if (rw != WRITE) {
1277 if (!do_remote_read(req) && !req->private_bio)
1278 goto nodata;
1279 }
1280
1281
1282 req->epoch = atomic_read(&first_peer_device(device)->connection->current_tle_nr);
1283
1284
1285
1286 if (likely(req->i.size!=0)) {
1287 if (rw == WRITE)
1288 first_peer_device(device)->connection->current_tle_writes++;
1289
1290 list_add_tail(&req->tl_requests, &first_peer_device(device)->connection->transfer_log);
1291 }
1292
1293 if (rw == WRITE) {
1294 if (!drbd_process_write_request(req))
1295 no_remote = true;
1296 } else {
1297
1298
1299 if (req->private_bio == NULL) {
1300 _req_mod(req, TO_BE_SENT);
1301 _req_mod(req, QUEUE_FOR_NET_READ);
1302 } else
1303 no_remote = true;
1304 }
1305
1306
1307
1308 if (list_empty(&req->req_pending_master_completion))
1309 list_add_tail(&req->req_pending_master_completion,
1310 &device->pending_master_completion[rw == WRITE]);
1311 if (req->private_bio) {
1312
1313 req->pre_submit_jif = jiffies;
1314 list_add_tail(&req->req_pending_local,
1315 &device->pending_completion[rw == WRITE]);
1316 _req_mod(req, TO_BE_SUBMITTED);
1317
1318 submit_private_bio = true;
1319 } else if (no_remote) {
1320nodata:
1321 if (__ratelimit(&drbd_ratelimit_state))
1322 drbd_err(device, "IO ERROR: neither local nor remote data, sector %llu+%u\n",
1323 (unsigned long long)req->i.sector, req->i.size >> 9);
1324
1325
1326 }
1327
1328out:
1329 if (drbd_req_put_completion_ref(req, &m, 1))
1330 kref_put(&req->kref, drbd_req_destroy);
1331 spin_unlock_irq(&resource->req_lock);
1332
1333
1334
1335
1336
1337
1338
1339 if (submit_private_bio)
1340 drbd_submit_req_private_bio(req);
1341 if (m.bio)
1342 complete_master_bio(device, &m);
1343}
1344
1345void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned long start_jif)
1346{
1347 struct drbd_request *req = drbd_request_prepare(device, bio, start_jif);
1348 if (IS_ERR_OR_NULL(req))
1349 return;
1350 drbd_send_and_submit(device, req);
1351}
1352
1353static void submit_fast_path(struct drbd_device *device, struct list_head *incoming)
1354{
1355 struct drbd_request *req, *tmp;
1356 list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
1357 const int rw = bio_data_dir(req->master_bio);
1358
1359 if (rw == WRITE
1360 && req->private_bio && req->i.size
1361 && !test_bit(AL_SUSPENDED, &device->flags)) {
1362 if (!drbd_al_begin_io_fastpath(device, &req->i))
1363 continue;
1364
1365 req->rq_state |= RQ_IN_ACT_LOG;
1366 req->in_actlog_jif = jiffies;
1367 atomic_dec(&device->ap_actlog_cnt);
1368 }
1369
1370 list_del_init(&req->tl_requests);
1371 drbd_send_and_submit(device, req);
1372 }
1373}
1374
1375static bool prepare_al_transaction_nonblock(struct drbd_device *device,
1376 struct list_head *incoming,
1377 struct list_head *pending,
1378 struct list_head *later)
1379{
1380 struct drbd_request *req, *tmp;
1381 int wake = 0;
1382 int err;
1383
1384 spin_lock_irq(&device->al_lock);
1385 list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
1386 err = drbd_al_begin_io_nonblock(device, &req->i);
1387 if (err == -ENOBUFS)
1388 break;
1389 if (err == -EBUSY)
1390 wake = 1;
1391 if (err)
1392 list_move_tail(&req->tl_requests, later);
1393 else
1394 list_move_tail(&req->tl_requests, pending);
1395 }
1396 spin_unlock_irq(&device->al_lock);
1397 if (wake)
1398 wake_up(&device->al_wait);
1399 return !list_empty(pending);
1400}
1401
1402void send_and_submit_pending(struct drbd_device *device, struct list_head *pending)
1403{
1404 struct drbd_request *req, *tmp;
1405
1406 list_for_each_entry_safe(req, tmp, pending, tl_requests) {
1407 req->rq_state |= RQ_IN_ACT_LOG;
1408 req->in_actlog_jif = jiffies;
1409 atomic_dec(&device->ap_actlog_cnt);
1410 list_del_init(&req->tl_requests);
1411 drbd_send_and_submit(device, req);
1412 }
1413}
1414
1415void do_submit(struct work_struct *ws)
1416{
1417 struct drbd_device *device = container_of(ws, struct drbd_device, submit.worker);
1418 LIST_HEAD(incoming);
1419 LIST_HEAD(pending);
1420 LIST_HEAD(busy);
1421
1422
1423 spin_lock_irq(&device->resource->req_lock);
1424 list_splice_tail_init(&device->submit.writes, &incoming);
1425 spin_unlock_irq(&device->resource->req_lock);
1426
1427 for (;;) {
1428 DEFINE_WAIT(wait);
1429
1430
1431 list_splice_init(&busy, &incoming);
1432 submit_fast_path(device, &incoming);
1433 if (list_empty(&incoming))
1434 break;
1435
1436 for (;;) {
1437 prepare_to_wait(&device->al_wait, &wait, TASK_UNINTERRUPTIBLE);
1438
1439 list_splice_init(&busy, &incoming);
1440 prepare_al_transaction_nonblock(device, &incoming, &pending, &busy);
1441 if (!list_empty(&pending))
1442 break;
1443
1444 schedule();
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457 if (!list_empty(&incoming))
1458 continue;
1459
1460
1461
1462
1463 spin_lock_irq(&device->resource->req_lock);
1464 list_splice_tail_init(&device->submit.writes, &incoming);
1465 spin_unlock_irq(&device->resource->req_lock);
1466 }
1467 finish_wait(&device->al_wait, &wait);
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485 while (list_empty(&incoming)) {
1486 LIST_HEAD(more_pending);
1487 LIST_HEAD(more_incoming);
1488 bool made_progress;
1489
1490
1491
1492 if (list_empty(&device->submit.writes))
1493 break;
1494
1495 spin_lock_irq(&device->resource->req_lock);
1496 list_splice_tail_init(&device->submit.writes, &more_incoming);
1497 spin_unlock_irq(&device->resource->req_lock);
1498
1499 if (list_empty(&more_incoming))
1500 break;
1501
1502 made_progress = prepare_al_transaction_nonblock(device, &more_incoming, &more_pending, &busy);
1503
1504 list_splice_tail_init(&more_pending, &pending);
1505 list_splice_tail_init(&more_incoming, &incoming);
1506 if (!made_progress)
1507 break;
1508 }
1509
1510 drbd_al_begin_io_commit(device);
1511 send_and_submit_pending(device, &pending);
1512 }
1513}
1514
1515blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio)
1516{
1517 struct drbd_device *device = (struct drbd_device *) q->queuedata;
1518 unsigned long start_jif;
1519
1520 blk_queue_split(q, &bio, q->bio_split);
1521
1522 start_jif = jiffies;
1523
1524
1525
1526
1527 D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512));
1528
1529 inc_ap_bio(device);
1530 __drbd_make_request(device, bio, start_jif);
1531 return BLK_QC_T_NONE;
1532}
1533
1534static bool net_timeout_reached(struct drbd_request *net_req,
1535 struct drbd_connection *connection,
1536 unsigned long now, unsigned long ent,
1537 unsigned int ko_count, unsigned int timeout)
1538{
1539 struct drbd_device *device = net_req->device;
1540
1541 if (!time_after(now, net_req->pre_send_jif + ent))
1542 return false;
1543
1544 if (time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent))
1545 return false;
1546
1547 if (net_req->rq_state & RQ_NET_PENDING) {
1548 drbd_warn(device, "Remote failed to finish a request within %ums > ko-count (%u) * timeout (%u * 0.1s)\n",
1549 jiffies_to_msecs(now - net_req->pre_send_jif), ko_count, timeout);
1550 return true;
1551 }
1552
1553
1554
1555
1556
1557 if (net_req->epoch == connection->send.current_epoch_nr) {
1558 drbd_warn(device,
1559 "We did not send a P_BARRIER for %ums > ko-count (%u) * timeout (%u * 0.1s); drbd kernel thread blocked?\n",
1560 jiffies_to_msecs(now - net_req->pre_send_jif), ko_count, timeout);
1561 return false;
1562 }
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580 if (time_after(now, connection->send.last_sent_barrier_jif + ent)) {
1581 drbd_warn(device, "Remote failed to answer a P_BARRIER (sent at %lu jif; now=%lu jif) within %ums > ko-count (%u) * timeout (%u * 0.1s)\n",
1582 connection->send.last_sent_barrier_jif, now,
1583 jiffies_to_msecs(now - connection->send.last_sent_barrier_jif), ko_count, timeout);
1584 return true;
1585 }
1586 return false;
1587}
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606void request_timer_fn(unsigned long data)
1607{
1608 struct drbd_device *device = (struct drbd_device *) data;
1609 struct drbd_connection *connection = first_peer_device(device)->connection;
1610 struct drbd_request *req_read, *req_write, *req_peer;
1611 struct net_conf *nc;
1612 unsigned long oldest_submit_jif;
1613 unsigned long ent = 0, dt = 0, et, nt;
1614 unsigned long now;
1615 unsigned int ko_count = 0, timeout = 0;
1616
1617 rcu_read_lock();
1618 nc = rcu_dereference(connection->net_conf);
1619 if (nc && device->state.conn >= C_WF_REPORT_PARAMS) {
1620 ko_count = nc->ko_count;
1621 timeout = nc->timeout;
1622 }
1623
1624 if (get_ldev(device)) {
1625 dt = rcu_dereference(device->ldev->disk_conf)->disk_timeout * HZ / 10;
1626 put_ldev(device);
1627 }
1628 rcu_read_unlock();
1629
1630
1631 ent = timeout * HZ/10 * ko_count;
1632 et = min_not_zero(dt, ent);
1633
1634 if (!et)
1635 return;
1636
1637 now = jiffies;
1638 nt = now + et;
1639
1640 spin_lock_irq(&device->resource->req_lock);
1641 req_read = list_first_entry_or_null(&device->pending_completion[0], struct drbd_request, req_pending_local);
1642 req_write = list_first_entry_or_null(&device->pending_completion[1], struct drbd_request, req_pending_local);
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652 req_peer = connection->req_ack_pending;
1653
1654
1655
1656
1657 if (!req_peer)
1658 req_peer = connection->req_not_net_done;
1659
1660
1661 if (req_peer && req_peer->device != device)
1662 req_peer = NULL;
1663
1664
1665 if (req_peer == NULL && req_write == NULL && req_read == NULL)
1666 goto out;
1667
1668 oldest_submit_jif =
1669 (req_write && req_read)
1670 ? ( time_before(req_write->pre_submit_jif, req_read->pre_submit_jif)
1671 ? req_write->pre_submit_jif : req_read->pre_submit_jif )
1672 : req_write ? req_write->pre_submit_jif
1673 : req_read ? req_read->pre_submit_jif : now;
1674
1675 if (ent && req_peer && net_timeout_reached(req_peer, connection, now, ent, ko_count, timeout))
1676 _conn_request_state(connection, NS(conn, C_TIMEOUT), CS_VERBOSE | CS_HARD);
1677
1678 if (dt && oldest_submit_jif != now &&
1679 time_after(now, oldest_submit_jif + dt) &&
1680 !time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) {
1681 drbd_warn(device, "Local backing device failed to meet the disk-timeout\n");
1682 __drbd_chk_io_error(device, DRBD_FORCE_DETACH);
1683 }
1684
1685
1686
1687 ent = (ent && req_peer && time_before(now, req_peer->pre_send_jif + ent))
1688 ? req_peer->pre_send_jif + ent : now + et;
1689 dt = (dt && oldest_submit_jif != now && time_before(now, oldest_submit_jif + dt))
1690 ? oldest_submit_jif + dt : now + et;
1691 nt = time_before(ent, dt) ? ent : dt;
1692out:
1693 spin_unlock_irq(&device->resource->req_lock);
1694 mod_timer(&device->request_timer, nt);
1695}
1696