1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include "socklnd.h"
28
29ksock_tx_t *
30ksocknal_alloc_tx(int type, int size)
31{
32 ksock_tx_t *tx = NULL;
33
34 if (type == KSOCK_MSG_NOOP) {
35 LASSERT(size == KSOCK_NOOP_TX_SIZE);
36
37
38 spin_lock(&ksocknal_data.ksnd_tx_lock);
39
40 if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
41 tx = list_entry(ksocknal_data.ksnd_idle_noop_txs. \
42 next, ksock_tx_t, tx_list);
43 LASSERT(tx->tx_desc_size == size);
44 list_del(&tx->tx_list);
45 }
46
47 spin_unlock(&ksocknal_data.ksnd_tx_lock);
48 }
49
50 if (tx == NULL)
51 LIBCFS_ALLOC(tx, size);
52
53 if (tx == NULL)
54 return NULL;
55
56 atomic_set(&tx->tx_refcount, 1);
57 tx->tx_zc_aborted = 0;
58 tx->tx_zc_capable = 0;
59 tx->tx_zc_checked = 0;
60 tx->tx_desc_size = size;
61
62 atomic_inc(&ksocknal_data.ksnd_nactive_txs);
63
64 return tx;
65}
66
67ksock_tx_t *
68ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
69{
70 ksock_tx_t *tx;
71
72 tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE);
73 if (tx == NULL) {
74 CERROR("Can't allocate noop tx desc\n");
75 return NULL;
76 }
77
78 tx->tx_conn = NULL;
79 tx->tx_lnetmsg = NULL;
80 tx->tx_kiov = NULL;
81 tx->tx_nkiov = 0;
82 tx->tx_iov = tx->tx_frags.virt.iov;
83 tx->tx_niov = 1;
84 tx->tx_nonblk = nonblk;
85
86 socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_NOOP);
87 tx->tx_msg.ksm_zc_cookies[1] = cookie;
88
89 return tx;
90}
91
92void
93ksocknal_free_tx (ksock_tx_t *tx)
94{
95 atomic_dec(&ksocknal_data.ksnd_nactive_txs);
96
97 if (tx->tx_lnetmsg == NULL && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
98
99 spin_lock(&ksocknal_data.ksnd_tx_lock);
100
101 list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
102
103 spin_unlock(&ksocknal_data.ksnd_tx_lock);
104 } else {
105 LIBCFS_FREE(tx, tx->tx_desc_size);
106 }
107}
108
109static int
110ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
111{
112 struct kvec *iov = tx->tx_iov;
113 int nob;
114 int rc;
115
116 LASSERT(tx->tx_niov > 0);
117
118
119 rc = ksocknal_lib_send_iov(conn, tx);
120
121 if (rc <= 0)
122 return rc;
123
124 nob = rc;
125 LASSERT (nob <= tx->tx_resid);
126 tx->tx_resid -= nob;
127
128
129 do {
130 LASSERT(tx->tx_niov > 0);
131
132 if (nob < (int) iov->iov_len) {
133 iov->iov_base = (void *)((char *)iov->iov_base + nob);
134 iov->iov_len -= nob;
135 return rc;
136 }
137
138 nob -= iov->iov_len;
139 tx->tx_iov = ++iov;
140 tx->tx_niov--;
141 } while (nob != 0);
142
143 return rc;
144}
145
146static int
147ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
148{
149 lnet_kiov_t *kiov = tx->tx_kiov;
150 int nob;
151 int rc;
152
153 LASSERT(tx->tx_niov == 0);
154 LASSERT(tx->tx_nkiov > 0);
155
156
157 rc = ksocknal_lib_send_kiov(conn, tx);
158
159 if (rc <= 0)
160 return rc;
161
162 nob = rc;
163 LASSERT (nob <= tx->tx_resid);
164 tx->tx_resid -= nob;
165
166
167 do {
168 LASSERT(tx->tx_nkiov > 0);
169
170 if (nob < (int)kiov->kiov_len) {
171 kiov->kiov_offset += nob;
172 kiov->kiov_len -= nob;
173 return rc;
174 }
175
176 nob -= (int)kiov->kiov_len;
177 tx->tx_kiov = ++kiov;
178 tx->tx_nkiov--;
179 } while (nob != 0);
180
181 return rc;
182}
183
184static int
185ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
186{
187 int rc;
188 int bufnob;
189
190 if (ksocknal_data.ksnd_stall_tx != 0) {
191 set_current_state(TASK_UNINTERRUPTIBLE);
192 schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
193 }
194
195 LASSERT(tx->tx_resid != 0);
196
197 rc = ksocknal_connsock_addref(conn);
198 if (rc != 0) {
199 LASSERT (conn->ksnc_closing);
200 return -ESHUTDOWN;
201 }
202
203 do {
204 if (ksocknal_data.ksnd_enomem_tx > 0) {
205
206 ksocknal_data.ksnd_enomem_tx--;
207 rc = -EAGAIN;
208 } else if (tx->tx_niov != 0) {
209 rc = ksocknal_send_iov (conn, tx);
210 } else {
211 rc = ksocknal_send_kiov (conn, tx);
212 }
213
214 bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
215 if (rc > 0)
216 conn->ksnc_tx_bufnob += rc;
217
218 if (bufnob < conn->ksnc_tx_bufnob) {
219
220
221 conn->ksnc_tx_deadline =
222 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
223 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
224 conn->ksnc_tx_bufnob = bufnob;
225 mb();
226 }
227
228 if (rc <= 0) {
229
230 if (rc == 0)
231 rc = -EAGAIN;
232
233
234 if (rc == -EAGAIN && ksocknal_lib_memory_pressure(conn))
235 rc = -ENOMEM;
236
237 break;
238 }
239
240
241 atomic_sub (rc, &conn->ksnc_tx_nob);
242 rc = 0;
243
244 } while (tx->tx_resid != 0);
245
246 ksocknal_connsock_decref(conn);
247 return rc;
248}
249
250static int
251ksocknal_recv_iov (ksock_conn_t *conn)
252{
253 struct kvec *iov = conn->ksnc_rx_iov;
254 int nob;
255 int rc;
256
257 LASSERT(conn->ksnc_rx_niov > 0);
258
259
260
261 rc = ksocknal_lib_recv_iov(conn);
262
263 if (rc <= 0)
264 return rc;
265
266
267 nob = rc;
268
269 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
270 conn->ksnc_rx_deadline =
271 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
272 mb();
273 conn->ksnc_rx_started = 1;
274
275 conn->ksnc_rx_nob_wanted -= nob;
276 conn->ksnc_rx_nob_left -= nob;
277
278 do {
279 LASSERT(conn->ksnc_rx_niov > 0);
280
281 if (nob < (int)iov->iov_len) {
282 iov->iov_len -= nob;
283 iov->iov_base += nob;
284 return -EAGAIN;
285 }
286
287 nob -= iov->iov_len;
288 conn->ksnc_rx_iov = ++iov;
289 conn->ksnc_rx_niov--;
290 } while (nob != 0);
291
292 return rc;
293}
294
295static int
296ksocknal_recv_kiov (ksock_conn_t *conn)
297{
298 lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
299 int nob;
300 int rc;
301
302 LASSERT(conn->ksnc_rx_nkiov > 0);
303
304
305
306 rc = ksocknal_lib_recv_kiov(conn);
307
308 if (rc <= 0)
309 return rc;
310
311
312 nob = rc;
313
314 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
315 conn->ksnc_rx_deadline =
316 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
317 mb();
318 conn->ksnc_rx_started = 1;
319
320 conn->ksnc_rx_nob_wanted -= nob;
321 conn->ksnc_rx_nob_left -= nob;
322
323 do {
324 LASSERT(conn->ksnc_rx_nkiov > 0);
325
326 if (nob < (int) kiov->kiov_len) {
327 kiov->kiov_offset += nob;
328 kiov->kiov_len -= nob;
329 return -EAGAIN;
330 }
331
332 nob -= kiov->kiov_len;
333 conn->ksnc_rx_kiov = ++kiov;
334 conn->ksnc_rx_nkiov--;
335 } while (nob != 0);
336
337 return 1;
338}
339
340static int
341ksocknal_receive (ksock_conn_t *conn)
342{
343
344
345
346 int rc;
347
348 if (ksocknal_data.ksnd_stall_rx != 0) {
349 set_current_state(TASK_UNINTERRUPTIBLE);
350 schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
351 }
352
353 rc = ksocknal_connsock_addref(conn);
354 if (rc != 0) {
355 LASSERT (conn->ksnc_closing);
356 return -ESHUTDOWN;
357 }
358
359 for (;;) {
360 if (conn->ksnc_rx_niov != 0)
361 rc = ksocknal_recv_iov (conn);
362 else
363 rc = ksocknal_recv_kiov (conn);
364
365 if (rc <= 0) {
366
367 if (rc == -EAGAIN) {
368 rc = 1;
369 } else if (rc == 0 && conn->ksnc_rx_started) {
370
371 rc = -EPROTO;
372 }
373 break;
374 }
375
376
377
378 if (conn->ksnc_rx_nob_wanted == 0) {
379 rc = 1;
380 break;
381 }
382 }
383
384 ksocknal_connsock_decref(conn);
385 return rc;
386}
387
388void
389ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx)
390{
391 lnet_msg_t *lnetmsg = tx->tx_lnetmsg;
392 int rc = (tx->tx_resid == 0 && !tx->tx_zc_aborted) ? 0 : -EIO;
393
394 LASSERT(ni != NULL || tx->tx_conn != NULL);
395
396 if (tx->tx_conn != NULL)
397 ksocknal_conn_decref(tx->tx_conn);
398
399 if (ni == NULL && tx->tx_conn != NULL)
400 ni = tx->tx_conn->ksnc_peer->ksnp_ni;
401
402 ksocknal_free_tx (tx);
403 if (lnetmsg != NULL)
404 lnet_finalize (ni, lnetmsg, rc);
405}
406
407void
408ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error)
409{
410 ksock_tx_t *tx;
411
412 while (!list_empty (txlist)) {
413 tx = list_entry(txlist->next, ksock_tx_t, tx_list);
414
415 if (error && tx->tx_lnetmsg != NULL) {
416 CNETERR("Deleting packet type %d len %d %s->%s\n",
417 le32_to_cpu (tx->tx_lnetmsg->msg_hdr.type),
418 le32_to_cpu (tx->tx_lnetmsg->msg_hdr.payload_length),
419 libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)),
420 libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.dest_nid)));
421 } else if (error) {
422 CNETERR("Deleting noop packet\n");
423 }
424
425 list_del(&tx->tx_list);
426
427 LASSERT(atomic_read(&tx->tx_refcount) == 1);
428 ksocknal_tx_done(ni, tx);
429 }
430}
431
432static void
433ksocknal_check_zc_req(ksock_tx_t *tx)
434{
435 ksock_conn_t *conn = tx->tx_conn;
436 ksock_peer_t *peer = conn->ksnc_peer;
437
438
439
440
441
442
443
444 LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
445 LASSERT(tx->tx_zc_capable);
446
447 tx->tx_zc_checked = 1;
448
449 if (conn->ksnc_proto == &ksocknal_protocol_v1x ||
450 !conn->ksnc_zc_capable)
451 return;
452
453
454
455
456 ksocknal_tx_addref(tx);
457
458 spin_lock(&peer->ksnp_lock);
459
460
461 tx->tx_deadline =
462 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
463
464 LASSERT(tx->tx_msg.ksm_zc_cookies[0] == 0);
465
466 tx->tx_msg.ksm_zc_cookies[0] = peer->ksnp_zc_next_cookie++;
467
468 if (peer->ksnp_zc_next_cookie == 0)
469 peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
470
471 list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list);
472
473 spin_unlock(&peer->ksnp_lock);
474}
475
476static void
477ksocknal_uncheck_zc_req(ksock_tx_t *tx)
478{
479 ksock_peer_t *peer = tx->tx_conn->ksnc_peer;
480
481 LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
482 LASSERT(tx->tx_zc_capable);
483
484 tx->tx_zc_checked = 0;
485
486 spin_lock(&peer->ksnp_lock);
487
488 if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
489
490 spin_unlock(&peer->ksnp_lock);
491 return;
492 }
493
494 tx->tx_msg.ksm_zc_cookies[0] = 0;
495 list_del(&tx->tx_zc_list);
496
497 spin_unlock(&peer->ksnp_lock);
498
499 ksocknal_tx_decref(tx);
500}
501
502static int
503ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
504{
505 int rc;
506
507 if (tx->tx_zc_capable && !tx->tx_zc_checked)
508 ksocknal_check_zc_req(tx);
509
510 rc = ksocknal_transmit (conn, tx);
511
512 CDEBUG(D_NET, "send(%d) %d\n", tx->tx_resid, rc);
513
514 if (tx->tx_resid == 0) {
515
516 LASSERT (rc == 0);
517
518 return 0;
519 }
520
521 if (rc == -EAGAIN)
522 return rc;
523
524 if (rc == -ENOMEM) {
525 static int counter;
526
527 counter++;
528 if ((counter & (-counter)) == counter)
529 CWARN("%u ENOMEM tx %p\n", counter, conn);
530
531
532 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
533
534
535 LASSERT (conn->ksnc_tx_scheduled);
536 list_add_tail(&conn->ksnc_tx_list,
537 &ksocknal_data.ksnd_enomem_conns);
538 if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
539 SOCKNAL_ENOMEM_RETRY),
540 ksocknal_data.ksnd_reaper_waketime))
541 wake_up (&ksocknal_data.ksnd_reaper_waitq);
542
543 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
544 return rc;
545 }
546
547
548 LASSERT(rc < 0);
549
550 if (!conn->ksnc_closing) {
551 switch (rc) {
552 case -ECONNRESET:
553 LCONSOLE_WARN("Host %pI4h reset our connection while we were sending data; it may have rebooted.\n",
554 &conn->ksnc_ipaddr);
555 break;
556 default:
557 LCONSOLE_WARN("There was an unexpected network error while writing to %pI4h: %d.\n",
558 &conn->ksnc_ipaddr, rc);
559 break;
560 }
561 CDEBUG(D_NET, "[%p] Error %d on write to %s ip %pI4h:%d\n",
562 conn, rc,
563 libcfs_id2str(conn->ksnc_peer->ksnp_id),
564 &conn->ksnc_ipaddr,
565 conn->ksnc_port);
566 }
567
568 if (tx->tx_zc_checked)
569 ksocknal_uncheck_zc_req(tx);
570
571
572 ksocknal_close_conn_and_siblings (conn,
573 (conn->ksnc_closing) ? 0 : rc);
574
575 return rc;
576}
577
578static void
579ksocknal_launch_connection_locked (ksock_route_t *route)
580{
581
582
583
584 LASSERT(!route->ksnr_scheduled);
585 LASSERT(!route->ksnr_connecting);
586 LASSERT((ksocknal_route_mask() & ~route->ksnr_connected) != 0);
587
588 route->ksnr_scheduled = 1;
589 ksocknal_route_addref(route);
590
591 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
592
593 list_add_tail(&route->ksnr_connd_list,
594 &ksocknal_data.ksnd_connd_routes);
595 wake_up(&ksocknal_data.ksnd_connd_waitq);
596
597 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
598}
599
600void
601ksocknal_launch_all_connections_locked (ksock_peer_t *peer)
602{
603 ksock_route_t *route;
604
605
606 for (;;) {
607
608 route = ksocknal_find_connectable_route_locked(peer);
609 if (route == NULL)
610 return;
611
612 ksocknal_launch_connection_locked(route);
613 }
614}
615
616ksock_conn_t *
617ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
618{
619 struct list_head *tmp;
620 ksock_conn_t *conn;
621 ksock_conn_t *typed = NULL;
622 ksock_conn_t *fallback = NULL;
623 int tnob = 0;
624 int fnob = 0;
625
626 list_for_each (tmp, &peer->ksnp_conns) {
627 ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list);
628 int nob = atomic_read(&c->ksnc_tx_nob) +
629 c->ksnc_sock->sk->sk_wmem_queued;
630 int rc;
631
632 LASSERT(!c->ksnc_closing);
633 LASSERT(c->ksnc_proto != NULL &&
634 c->ksnc_proto->pro_match_tx != NULL);
635
636 rc = c->ksnc_proto->pro_match_tx(c, tx, nonblk);
637
638 switch (rc) {
639 default:
640 LBUG();
641 case SOCKNAL_MATCH_NO:
642 continue;
643
644 case SOCKNAL_MATCH_YES:
645 if (typed == NULL || tnob > nob ||
646 (tnob == nob && *ksocknal_tunables.ksnd_round_robin &&
647 cfs_time_after(typed->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
648 typed = c;
649 tnob = nob;
650 }
651 break;
652
653 case SOCKNAL_MATCH_MAY:
654 if (fallback == NULL || fnob > nob ||
655 (fnob == nob && *ksocknal_tunables.ksnd_round_robin &&
656 cfs_time_after(fallback->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
657 fallback = c;
658 fnob = nob;
659 }
660 break;
661 }
662 }
663
664
665 conn = (typed != NULL) ? typed : fallback;
666
667 if (conn != NULL)
668 conn->ksnc_tx_last_post = cfs_time_current();
669
670 return conn;
671}
672
673void
674ksocknal_tx_prep(ksock_conn_t *conn, ksock_tx_t *tx)
675{
676 conn->ksnc_proto->pro_pack(tx);
677
678 atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
679 ksocknal_conn_addref(conn);
680 tx->tx_conn = conn;
681}
682
683void
684ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
685{
686 ksock_sched_t *sched = conn->ksnc_scheduler;
687 ksock_msg_t *msg = &tx->tx_msg;
688 ksock_tx_t *ztx = NULL;
689 int bufnob = 0;
690
691
692
693
694
695 LASSERT(!conn->ksnc_closing);
696
697 CDEBUG(D_NET, "Sending to %s ip %pI4h:%d\n",
698 libcfs_id2str(conn->ksnc_peer->ksnp_id),
699 &conn->ksnc_ipaddr,
700 conn->ksnc_port);
701
702 ksocknal_tx_prep(conn, tx);
703
704
705
706
707
708
709
710 LASSERT(lnet_iov_nob (tx->tx_niov, tx->tx_iov) +
711 lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) ==
712 (unsigned int)tx->tx_nob);
713 LASSERT(tx->tx_niov >= 1);
714 LASSERT(tx->tx_resid == tx->tx_nob);
715
716 CDEBUG (D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
717 tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type :
718 KSOCK_MSG_NOOP,
719 tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
720
721
722
723
724
725 bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
726 spin_lock_bh(&sched->kss_lock);
727
728 if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
729
730 conn->ksnc_tx_deadline =
731 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
732 if (conn->ksnc_tx_bufnob > 0)
733 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
734 conn->ksnc_tx_bufnob = 0;
735 mb();
736 }
737
738 if (msg->ksm_type == KSOCK_MSG_NOOP) {
739
740
741 LASSERT(msg->ksm_zc_cookies[1] != 0);
742 LASSERT(conn->ksnc_proto->pro_queue_tx_zcack != NULL);
743
744 if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
745 ztx = tx;
746
747 } else {
748
749
750 LASSERT(msg->ksm_zc_cookies[1] == 0);
751 LASSERT(conn->ksnc_proto->pro_queue_tx_msg != NULL);
752
753 ztx = conn->ksnc_proto->pro_queue_tx_msg(conn, tx);
754
755 }
756
757 if (ztx != NULL) {
758 atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
759 list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
760 }
761
762 if (conn->ksnc_tx_ready &&
763 !conn->ksnc_tx_scheduled) {
764
765 ksocknal_conn_addref(conn);
766 list_add_tail (&conn->ksnc_tx_list,
767 &sched->kss_tx_conns);
768 conn->ksnc_tx_scheduled = 1;
769 wake_up (&sched->kss_waitq);
770 }
771
772 spin_unlock_bh(&sched->kss_lock);
773}
774
775ksock_route_t *
776ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
777{
778 unsigned long now = cfs_time_current();
779 struct list_head *tmp;
780 ksock_route_t *route;
781
782 list_for_each (tmp, &peer->ksnp_routes) {
783 route = list_entry (tmp, ksock_route_t, ksnr_list);
784
785 LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
786
787 if (route->ksnr_scheduled)
788 continue;
789
790
791 if ((ksocknal_route_mask() & ~route->ksnr_connected) == 0)
792 continue;
793
794 if (!(route->ksnr_retry_interval == 0 ||
795 cfs_time_aftereq(now, route->ksnr_timeout))) {
796 CDEBUG(D_NET,
797 "Too soon to retry route %pI4h (cnted %d, interval %ld, %ld secs later)\n",
798 &route->ksnr_ipaddr,
799 route->ksnr_connected,
800 route->ksnr_retry_interval,
801 cfs_duration_sec(route->ksnr_timeout - now));
802 continue;
803 }
804
805 return route;
806 }
807
808 return NULL;
809}
810
811ksock_route_t *
812ksocknal_find_connecting_route_locked (ksock_peer_t *peer)
813{
814 struct list_head *tmp;
815 ksock_route_t *route;
816
817 list_for_each (tmp, &peer->ksnp_routes) {
818 route = list_entry (tmp, ksock_route_t, ksnr_list);
819
820 LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
821
822 if (route->ksnr_scheduled)
823 return route;
824 }
825
826 return NULL;
827}
828
829int
830ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
831{
832 ksock_peer_t *peer;
833 ksock_conn_t *conn;
834 rwlock_t *g_lock;
835 int retry;
836 int rc;
837
838 LASSERT(tx->tx_conn == NULL);
839
840 g_lock = &ksocknal_data.ksnd_global_lock;
841
842 for (retry = 0;; retry = 1) {
843 read_lock(g_lock);
844 peer = ksocknal_find_peer_locked(ni, id);
845 if (peer != NULL) {
846 if (ksocknal_find_connectable_route_locked(peer) == NULL) {
847 conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
848 if (conn != NULL) {
849
850
851
852 ksocknal_queue_tx_locked (tx, conn);
853 read_unlock(g_lock);
854 return 0;
855 }
856 }
857 }
858
859
860 read_unlock(g_lock);
861
862 write_lock_bh(g_lock);
863
864 peer = ksocknal_find_peer_locked(ni, id);
865 if (peer != NULL)
866 break;
867
868 write_unlock_bh(g_lock);
869
870 if ((id.pid & LNET_PID_USERFLAG) != 0) {
871 CERROR("Refusing to create a connection to userspace process %s\n",
872 libcfs_id2str(id));
873 return -EHOSTUNREACH;
874 }
875
876 if (retry) {
877 CERROR("Can't find peer %s\n", libcfs_id2str(id));
878 return -EHOSTUNREACH;
879 }
880
881 rc = ksocknal_add_peer(ni, id,
882 LNET_NIDADDR(id.nid),
883 lnet_acceptor_port());
884 if (rc != 0) {
885 CERROR("Can't add peer %s: %d\n",
886 libcfs_id2str(id), rc);
887 return rc;
888 }
889 }
890
891 ksocknal_launch_all_connections_locked(peer);
892
893 conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
894 if (conn != NULL) {
895
896 ksocknal_queue_tx_locked (tx, conn);
897 write_unlock_bh(g_lock);
898 return 0;
899 }
900
901 if (peer->ksnp_accepting > 0 ||
902 ksocknal_find_connecting_route_locked (peer) != NULL) {
903
904 tx->tx_deadline =
905 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
906
907
908 list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue);
909 write_unlock_bh(g_lock);
910 return 0;
911 }
912
913 write_unlock_bh(g_lock);
914
915
916 CNETERR("No usable routes to %s\n", libcfs_id2str(id));
917 return -EHOSTUNREACH;
918}
919
920int
921ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
922{
923 int mpflag = 1;
924 int type = lntmsg->msg_type;
925 lnet_process_id_t target = lntmsg->msg_target;
926 unsigned int payload_niov = lntmsg->msg_niov;
927 struct kvec *payload_iov = lntmsg->msg_iov;
928 lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
929 unsigned int payload_offset = lntmsg->msg_offset;
930 unsigned int payload_nob = lntmsg->msg_len;
931 ksock_tx_t *tx;
932 int desc_size;
933 int rc;
934
935
936
937
938 CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n",
939 payload_nob, payload_niov, libcfs_id2str(target));
940
941 LASSERT(payload_nob == 0 || payload_niov > 0);
942 LASSERT(payload_niov <= LNET_MAX_IOV);
943
944 LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
945 LASSERT (!in_interrupt ());
946
947 if (payload_iov != NULL)
948 desc_size = offsetof(ksock_tx_t,
949 tx_frags.virt.iov[1 + payload_niov]);
950 else
951 desc_size = offsetof(ksock_tx_t,
952 tx_frags.paged.kiov[payload_niov]);
953
954 if (lntmsg->msg_vmflush)
955 mpflag = cfs_memory_pressure_get_and_set();
956 tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size);
957 if (tx == NULL) {
958 CERROR("Can't allocate tx desc type %d size %d\n",
959 type, desc_size);
960 if (lntmsg->msg_vmflush)
961 cfs_memory_pressure_restore(mpflag);
962 return -ENOMEM;
963 }
964
965 tx->tx_conn = NULL;
966 tx->tx_lnetmsg = lntmsg;
967
968 if (payload_iov != NULL) {
969 tx->tx_kiov = NULL;
970 tx->tx_nkiov = 0;
971 tx->tx_iov = tx->tx_frags.virt.iov;
972 tx->tx_niov = 1 +
973 lnet_extract_iov(payload_niov, &tx->tx_iov[1],
974 payload_niov, payload_iov,
975 payload_offset, payload_nob);
976 } else {
977 tx->tx_niov = 1;
978 tx->tx_iov = &tx->tx_frags.paged.iov;
979 tx->tx_kiov = tx->tx_frags.paged.kiov;
980 tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov,
981 payload_niov, payload_kiov,
982 payload_offset, payload_nob);
983
984 if (payload_nob >= *ksocknal_tunables.ksnd_zc_min_payload)
985 tx->tx_zc_capable = 1;
986 }
987
988 socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_LNET);
989
990
991 rc = ksocknal_launch_packet(ni, tx, target);
992 if (!mpflag)
993 cfs_memory_pressure_restore(mpflag);
994
995 if (rc == 0)
996 return 0;
997
998 ksocknal_free_tx(tx);
999 return -EIO;
1000}
1001
1002int
1003ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
1004{
1005 struct task_struct *task = kthread_run(fn, arg, "%s", name);
1006
1007 if (IS_ERR(task))
1008 return PTR_ERR(task);
1009
1010 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1011 ksocknal_data.ksnd_nthreads++;
1012 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1013 return 0;
1014}
1015
1016void
1017ksocknal_thread_fini (void)
1018{
1019 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1020 ksocknal_data.ksnd_nthreads--;
1021 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1022}
1023
1024int
1025ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
1026{
1027 static char ksocknal_slop_buffer[4096];
1028
1029 int nob;
1030 unsigned int niov;
1031 int skipped;
1032
1033 LASSERT(conn->ksnc_proto != NULL);
1034
1035 if ((*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) != 0) {
1036
1037 ksocknal_lib_eager_ack(conn);
1038 }
1039
1040 if (nob_to_skip == 0) {
1041 conn->ksnc_rx_started = 0;
1042 mb();
1043
1044 switch (conn->ksnc_proto->pro_version) {
1045 case KSOCK_PROTO_V2:
1046 case KSOCK_PROTO_V3:
1047 conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER;
1048 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1049 conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg;
1050
1051 conn->ksnc_rx_nob_wanted = offsetof(ksock_msg_t, ksm_u);
1052 conn->ksnc_rx_nob_left = offsetof(ksock_msg_t, ksm_u);
1053 conn->ksnc_rx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u);
1054 break;
1055
1056 case KSOCK_PROTO_V1:
1057
1058 conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
1059 conn->ksnc_rx_nob_wanted = sizeof(lnet_hdr_t);
1060 conn->ksnc_rx_nob_left = sizeof(lnet_hdr_t);
1061
1062 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1063 conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
1064 conn->ksnc_rx_iov[0].iov_len = sizeof (lnet_hdr_t);
1065 break;
1066
1067 default:
1068 LBUG ();
1069 }
1070 conn->ksnc_rx_niov = 1;
1071
1072 conn->ksnc_rx_kiov = NULL;
1073 conn->ksnc_rx_nkiov = 0;
1074 conn->ksnc_rx_csum = ~0;
1075 return 1;
1076 }
1077
1078
1079
1080
1081 conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
1082 conn->ksnc_rx_nob_left = nob_to_skip;
1083 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1084 skipped = 0;
1085 niov = 0;
1086
1087 do {
1088 nob = min_t(int, nob_to_skip, sizeof(ksocknal_slop_buffer));
1089
1090 conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer;
1091 conn->ksnc_rx_iov[niov].iov_len = nob;
1092 niov++;
1093 skipped += nob;
1094 nob_to_skip -= nob;
1095
1096 } while (nob_to_skip != 0 &&
1097 niov < sizeof(conn->ksnc_rx_iov_space) / sizeof (struct iovec));
1098
1099 conn->ksnc_rx_niov = niov;
1100 conn->ksnc_rx_kiov = NULL;
1101 conn->ksnc_rx_nkiov = 0;
1102 conn->ksnc_rx_nob_wanted = skipped;
1103 return 0;
1104}
1105
1106static int
1107ksocknal_process_receive (ksock_conn_t *conn)
1108{
1109 lnet_hdr_t *lhdr;
1110 lnet_process_id_t *id;
1111 int rc;
1112
1113 LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
1114
1115
1116
1117 LASSERT(conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER ||
1118 conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD ||
1119 conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
1120 conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
1121 again:
1122 if (conn->ksnc_rx_nob_wanted != 0) {
1123 rc = ksocknal_receive(conn);
1124
1125 if (rc <= 0) {
1126 LASSERT (rc != -EAGAIN);
1127
1128 if (rc == 0)
1129 CDEBUG(D_NET, "[%p] EOF from %s ip %pI4h:%d\n",
1130 conn,
1131 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1132 &conn->ksnc_ipaddr,
1133 conn->ksnc_port);
1134 else if (!conn->ksnc_closing)
1135 CERROR("[%p] Error %d on read from %s ip %pI4h:%d\n",
1136 conn, rc,
1137 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1138 &conn->ksnc_ipaddr,
1139 conn->ksnc_port);
1140
1141
1142 ksocknal_close_conn_and_siblings (conn,
1143 (conn->ksnc_closing) ? 0 : rc);
1144 return (rc == 0 ? -ESHUTDOWN : rc);
1145 }
1146
1147 if (conn->ksnc_rx_nob_wanted != 0) {
1148
1149 return -EAGAIN;
1150 }
1151 }
1152 switch (conn->ksnc_rx_state) {
1153 case SOCKNAL_RX_KSM_HEADER:
1154 if (conn->ksnc_flip) {
1155 __swab32s(&conn->ksnc_msg.ksm_type);
1156 __swab32s(&conn->ksnc_msg.ksm_csum);
1157 __swab64s(&conn->ksnc_msg.ksm_zc_cookies[0]);
1158 __swab64s(&conn->ksnc_msg.ksm_zc_cookies[1]);
1159 }
1160
1161 if (conn->ksnc_msg.ksm_type != KSOCK_MSG_NOOP &&
1162 conn->ksnc_msg.ksm_type != KSOCK_MSG_LNET) {
1163 CERROR("%s: Unknown message type: %x\n",
1164 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1165 conn->ksnc_msg.ksm_type);
1166 ksocknal_new_packet(conn, 0);
1167 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1168 return -EPROTO;
1169 }
1170
1171 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP &&
1172 conn->ksnc_msg.ksm_csum != 0 &&
1173 conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
1174
1175 CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
1176 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1177 conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
1178 ksocknal_new_packet(conn, 0);
1179 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1180 return -EIO;
1181 }
1182
1183 if (conn->ksnc_msg.ksm_zc_cookies[1] != 0) {
1184 __u64 cookie = 0;
1185
1186 LASSERT (conn->ksnc_proto != &ksocknal_protocol_v1x);
1187
1188 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP)
1189 cookie = conn->ksnc_msg.ksm_zc_cookies[0];
1190
1191 rc = conn->ksnc_proto->pro_handle_zcack(conn, cookie,
1192 conn->ksnc_msg.ksm_zc_cookies[1]);
1193
1194 if (rc != 0) {
1195 CERROR("%s: Unknown ZC-ACK cookie: %llu, %llu\n",
1196 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1197 cookie, conn->ksnc_msg.ksm_zc_cookies[1]);
1198 ksocknal_new_packet(conn, 0);
1199 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1200 return rc;
1201 }
1202 }
1203
1204 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP) {
1205 ksocknal_new_packet (conn, 0);
1206 return 0;
1207 }
1208
1209 conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
1210 conn->ksnc_rx_nob_wanted = sizeof(ksock_lnet_msg_t);
1211 conn->ksnc_rx_nob_left = sizeof(ksock_lnet_msg_t);
1212
1213 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1214 conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
1215 conn->ksnc_rx_iov[0].iov_len = sizeof(ksock_lnet_msg_t);
1216
1217 conn->ksnc_rx_niov = 1;
1218 conn->ksnc_rx_kiov = NULL;
1219 conn->ksnc_rx_nkiov = 0;
1220
1221 goto again;
1222
1223 case SOCKNAL_RX_LNET_HEADER:
1224
1225 conn->ksnc_proto->pro_unpack(&conn->ksnc_msg);
1226
1227 if ((conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) != 0) {
1228
1229 lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
1230 id = &conn->ksnc_peer->ksnp_id;
1231
1232
1233 lhdr->src_pid = cpu_to_le32(id->pid);
1234 lhdr->src_nid = cpu_to_le64(id->nid);
1235 }
1236
1237 conn->ksnc_rx_state = SOCKNAL_RX_PARSE;
1238 ksocknal_conn_addref(conn);
1239
1240 rc = lnet_parse(conn->ksnc_peer->ksnp_ni,
1241 &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr,
1242 conn->ksnc_peer->ksnp_id.nid, conn, 0);
1243 if (rc < 0) {
1244
1245 ksocknal_new_packet(conn, 0);
1246 ksocknal_close_conn_and_siblings (conn, rc);
1247 ksocknal_conn_decref(conn);
1248 return -EPROTO;
1249 }
1250
1251
1252 LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_PARSE ||
1253 conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD);
1254
1255 if (conn->ksnc_rx_state != SOCKNAL_RX_LNET_PAYLOAD)
1256 return 0;
1257
1258
1259 goto again;
1260
1261 case SOCKNAL_RX_LNET_PAYLOAD:
1262
1263 rc = 0;
1264
1265 if (conn->ksnc_rx_nob_left == 0 &&
1266 conn->ksnc_msg.ksm_csum != 0 &&
1267 conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
1268 CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
1269 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1270 conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
1271 rc = -EIO;
1272 }
1273
1274 if (rc == 0 && conn->ksnc_msg.ksm_zc_cookies[0] != 0) {
1275 LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
1276
1277 lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
1278 id = &conn->ksnc_peer->ksnp_id;
1279
1280 rc = conn->ksnc_proto->pro_handle_zcreq(conn,
1281 conn->ksnc_msg.ksm_zc_cookies[0],
1282 *ksocknal_tunables.ksnd_nonblk_zcack ||
1283 le64_to_cpu(lhdr->src_nid) != id->nid);
1284 }
1285
1286 lnet_finalize(conn->ksnc_peer->ksnp_ni, conn->ksnc_cookie, rc);
1287
1288 if (rc != 0) {
1289 ksocknal_new_packet(conn, 0);
1290 ksocknal_close_conn_and_siblings (conn, rc);
1291 return -EPROTO;
1292 }
1293
1294
1295 case SOCKNAL_RX_SLOP:
1296
1297 if (ksocknal_new_packet (conn, conn->ksnc_rx_nob_left))
1298 return 0;
1299 goto again;
1300
1301 default:
1302 break;
1303 }
1304
1305
1306 LBUG();
1307 return -EINVAL;
1308}
1309
1310int
1311ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
1312 unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
1313 unsigned int offset, unsigned int mlen, unsigned int rlen)
1314{
1315 ksock_conn_t *conn = private;
1316 ksock_sched_t *sched = conn->ksnc_scheduler;
1317
1318 LASSERT(mlen <= rlen);
1319 LASSERT(niov <= LNET_MAX_IOV);
1320
1321 conn->ksnc_cookie = msg;
1322 conn->ksnc_rx_nob_wanted = mlen;
1323 conn->ksnc_rx_nob_left = rlen;
1324
1325 if (mlen == 0 || iov != NULL) {
1326 conn->ksnc_rx_nkiov = 0;
1327 conn->ksnc_rx_kiov = NULL;
1328 conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
1329 conn->ksnc_rx_niov =
1330 lnet_extract_iov(LNET_MAX_IOV, conn->ksnc_rx_iov,
1331 niov, iov, offset, mlen);
1332 } else {
1333 conn->ksnc_rx_niov = 0;
1334 conn->ksnc_rx_iov = NULL;
1335 conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
1336 conn->ksnc_rx_nkiov =
1337 lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov,
1338 niov, kiov, offset, mlen);
1339 }
1340
1341 LASSERT(mlen ==
1342 lnet_iov_nob(conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
1343 lnet_kiov_nob(conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov));
1344
1345 LASSERT(conn->ksnc_rx_scheduled);
1346
1347 spin_lock_bh(&sched->kss_lock);
1348
1349 switch (conn->ksnc_rx_state) {
1350 case SOCKNAL_RX_PARSE_WAIT:
1351 list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
1352 wake_up (&sched->kss_waitq);
1353 LASSERT (conn->ksnc_rx_ready);
1354 break;
1355
1356 case SOCKNAL_RX_PARSE:
1357
1358 break;
1359 }
1360
1361 conn->ksnc_rx_state = SOCKNAL_RX_LNET_PAYLOAD;
1362
1363 spin_unlock_bh(&sched->kss_lock);
1364 ksocknal_conn_decref(conn);
1365 return 0;
1366}
1367
1368static inline int
1369ksocknal_sched_cansleep(ksock_sched_t *sched)
1370{
1371 int rc;
1372
1373 spin_lock_bh(&sched->kss_lock);
1374
1375 rc = !ksocknal_data.ksnd_shuttingdown &&
1376 list_empty(&sched->kss_rx_conns) &&
1377 list_empty(&sched->kss_tx_conns);
1378
1379 spin_unlock_bh(&sched->kss_lock);
1380 return rc;
1381}
1382
1383int ksocknal_scheduler(void *arg)
1384{
1385 struct ksock_sched_info *info;
1386 ksock_sched_t *sched;
1387 ksock_conn_t *conn;
1388 ksock_tx_t *tx;
1389 int rc;
1390 int nloops = 0;
1391 long id = (long)arg;
1392
1393 info = ksocknal_data.ksnd_sched_info[KSOCK_THREAD_CPT(id)];
1394 sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
1395
1396 cfs_block_allsigs();
1397
1398 rc = cfs_cpt_bind(lnet_cpt_table(), info->ksi_cpt);
1399 if (rc != 0) {
1400 CERROR("Can't set CPT affinity to %d: %d\n",
1401 info->ksi_cpt, rc);
1402 }
1403
1404 spin_lock_bh(&sched->kss_lock);
1405
1406 while (!ksocknal_data.ksnd_shuttingdown) {
1407 int did_something = 0;
1408
1409
1410
1411 if (!list_empty (&sched->kss_rx_conns)) {
1412 conn = list_entry(sched->kss_rx_conns.next,
1413 ksock_conn_t, ksnc_rx_list);
1414 list_del(&conn->ksnc_rx_list);
1415
1416 LASSERT(conn->ksnc_rx_scheduled);
1417 LASSERT(conn->ksnc_rx_ready);
1418
1419
1420
1421
1422
1423 conn->ksnc_rx_ready = 0;
1424 spin_unlock_bh(&sched->kss_lock);
1425
1426 rc = ksocknal_process_receive(conn);
1427
1428 spin_lock_bh(&sched->kss_lock);
1429
1430
1431 LASSERT(conn->ksnc_rx_scheduled);
1432
1433
1434 if (rc == 0)
1435 conn->ksnc_rx_ready = 1;
1436
1437 if (conn->ksnc_rx_state == SOCKNAL_RX_PARSE) {
1438
1439
1440
1441 conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
1442 } else if (conn->ksnc_rx_ready) {
1443
1444 list_add_tail (&conn->ksnc_rx_list,
1445 &sched->kss_rx_conns);
1446 } else {
1447 conn->ksnc_rx_scheduled = 0;
1448
1449 ksocknal_conn_decref(conn);
1450 }
1451
1452 did_something = 1;
1453 }
1454
1455 if (!list_empty (&sched->kss_tx_conns)) {
1456 LIST_HEAD(zlist);
1457
1458 if (!list_empty(&sched->kss_zombie_noop_txs)) {
1459 list_add(&zlist,
1460 &sched->kss_zombie_noop_txs);
1461 list_del_init(&sched->kss_zombie_noop_txs);
1462 }
1463
1464 conn = list_entry(sched->kss_tx_conns.next,
1465 ksock_conn_t, ksnc_tx_list);
1466 list_del (&conn->ksnc_tx_list);
1467
1468 LASSERT(conn->ksnc_tx_scheduled);
1469 LASSERT(conn->ksnc_tx_ready);
1470 LASSERT(!list_empty(&conn->ksnc_tx_queue));
1471
1472 tx = list_entry(conn->ksnc_tx_queue.next,
1473 ksock_tx_t, tx_list);
1474
1475 if (conn->ksnc_tx_carrier == tx)
1476 ksocknal_next_tx_carrier(conn);
1477
1478
1479 list_del(&tx->tx_list);
1480
1481
1482
1483
1484
1485 conn->ksnc_tx_ready = 0;
1486 spin_unlock_bh(&sched->kss_lock);
1487
1488 if (!list_empty(&zlist)) {
1489
1490
1491 ksocknal_txlist_done(NULL, &zlist, 0);
1492 }
1493
1494 rc = ksocknal_process_transmit(conn, tx);
1495
1496 if (rc == -ENOMEM || rc == -EAGAIN) {
1497
1498 spin_lock_bh(&sched->kss_lock);
1499 list_add(&tx->tx_list,
1500 &conn->ksnc_tx_queue);
1501 } else {
1502
1503 ksocknal_tx_decref(tx);
1504
1505 spin_lock_bh(&sched->kss_lock);
1506
1507 conn->ksnc_tx_ready = 1;
1508 }
1509
1510 if (rc == -ENOMEM) {
1511
1512
1513 } else if (conn->ksnc_tx_ready &&
1514 !list_empty(&conn->ksnc_tx_queue)) {
1515
1516 list_add_tail(&conn->ksnc_tx_list,
1517 &sched->kss_tx_conns);
1518 } else {
1519 conn->ksnc_tx_scheduled = 0;
1520
1521 ksocknal_conn_decref(conn);
1522 }
1523
1524 did_something = 1;
1525 }
1526 if (!did_something ||
1527 ++nloops == SOCKNAL_RESCHED) {
1528 spin_unlock_bh(&sched->kss_lock);
1529
1530 nloops = 0;
1531
1532 if (!did_something) {
1533 rc = wait_event_interruptible_exclusive(
1534 sched->kss_waitq,
1535 !ksocknal_sched_cansleep(sched));
1536 LASSERT (rc == 0);
1537 } else {
1538 cond_resched();
1539 }
1540
1541 spin_lock_bh(&sched->kss_lock);
1542 }
1543 }
1544
1545 spin_unlock_bh(&sched->kss_lock);
1546 ksocknal_thread_fini();
1547 return 0;
1548}
1549
1550
1551
1552
1553
1554void ksocknal_read_callback (ksock_conn_t *conn)
1555{
1556 ksock_sched_t *sched;
1557
1558 sched = conn->ksnc_scheduler;
1559
1560 spin_lock_bh(&sched->kss_lock);
1561
1562 conn->ksnc_rx_ready = 1;
1563
1564 if (!conn->ksnc_rx_scheduled) {
1565 list_add_tail(&conn->ksnc_rx_list,
1566 &sched->kss_rx_conns);
1567 conn->ksnc_rx_scheduled = 1;
1568
1569 ksocknal_conn_addref(conn);
1570
1571 wake_up (&sched->kss_waitq);
1572 }
1573 spin_unlock_bh(&sched->kss_lock);
1574}
1575
1576
1577
1578
1579
1580void ksocknal_write_callback (ksock_conn_t *conn)
1581{
1582 ksock_sched_t *sched;
1583
1584 sched = conn->ksnc_scheduler;
1585
1586 spin_lock_bh(&sched->kss_lock);
1587
1588 conn->ksnc_tx_ready = 1;
1589
1590 if (!conn->ksnc_tx_scheduled &&
1591 !list_empty(&conn->ksnc_tx_queue)) {
1592 list_add_tail (&conn->ksnc_tx_list,
1593 &sched->kss_tx_conns);
1594 conn->ksnc_tx_scheduled = 1;
1595
1596 ksocknal_conn_addref(conn);
1597
1598 wake_up (&sched->kss_waitq);
1599 }
1600
1601 spin_unlock_bh(&sched->kss_lock);
1602}
1603
1604static ksock_proto_t *
1605ksocknal_parse_proto_version (ksock_hello_msg_t *hello)
1606{
1607 __u32 version = 0;
1608
1609 if (hello->kshm_magic == LNET_PROTO_MAGIC)
1610 version = hello->kshm_version;
1611 else if (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC))
1612 version = __swab32(hello->kshm_version);
1613
1614 if (version != 0) {
1615#if SOCKNAL_VERSION_DEBUG
1616 if (*ksocknal_tunables.ksnd_protocol == 1)
1617 return NULL;
1618
1619 if (*ksocknal_tunables.ksnd_protocol == 2 &&
1620 version == KSOCK_PROTO_V3)
1621 return NULL;
1622#endif
1623 if (version == KSOCK_PROTO_V2)
1624 return &ksocknal_protocol_v2x;
1625
1626 if (version == KSOCK_PROTO_V3)
1627 return &ksocknal_protocol_v3x;
1628
1629 return NULL;
1630 }
1631
1632 if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
1633 lnet_magicversion_t *hmv = (lnet_magicversion_t *)hello;
1634
1635 CLASSERT(sizeof (lnet_magicversion_t) ==
1636 offsetof (ksock_hello_msg_t, kshm_src_nid));
1637
1638 if (hmv->version_major == cpu_to_le16 (KSOCK_PROTO_V1_MAJOR) &&
1639 hmv->version_minor == cpu_to_le16 (KSOCK_PROTO_V1_MINOR))
1640 return &ksocknal_protocol_v1x;
1641 }
1642
1643 return NULL;
1644}
1645
1646int
1647ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn,
1648 lnet_nid_t peer_nid, ksock_hello_msg_t *hello)
1649{
1650
1651 ksock_net_t *net = (ksock_net_t *)ni->ni_data;
1652
1653 LASSERT(hello->kshm_nips <= LNET_MAX_INTERFACES);
1654
1655
1656 LASSERT(conn->ksnc_proto != NULL);
1657
1658 hello->kshm_src_nid = ni->ni_nid;
1659 hello->kshm_dst_nid = peer_nid;
1660 hello->kshm_src_pid = the_lnet.ln_pid;
1661
1662 hello->kshm_src_incarnation = net->ksnn_incarnation;
1663 hello->kshm_ctype = conn->ksnc_type;
1664
1665 return conn->ksnc_proto->pro_send_hello(conn, hello);
1666}
1667
1668static int
1669ksocknal_invert_type(int type)
1670{
1671 switch (type) {
1672 case SOCKLND_CONN_ANY:
1673 case SOCKLND_CONN_CONTROL:
1674 return type;
1675 case SOCKLND_CONN_BULK_IN:
1676 return SOCKLND_CONN_BULK_OUT;
1677 case SOCKLND_CONN_BULK_OUT:
1678 return SOCKLND_CONN_BULK_IN;
1679 default:
1680 return SOCKLND_CONN_NONE;
1681 }
1682}
1683
1684int
1685ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
1686 ksock_hello_msg_t *hello, lnet_process_id_t *peerid,
1687 __u64 *incarnation)
1688{
1689
1690
1691
1692
1693
1694 struct socket *sock = conn->ksnc_sock;
1695 int active = (conn->ksnc_proto != NULL);
1696 int timeout;
1697 int proto_match;
1698 int rc;
1699 ksock_proto_t *proto;
1700 lnet_process_id_t recv_id;
1701
1702
1703 LASSERT(!active == !(conn->ksnc_type != SOCKLND_CONN_NONE));
1704
1705 timeout = active ? *ksocknal_tunables.ksnd_timeout :
1706 lnet_acceptor_timeout();
1707
1708 rc = lnet_sock_read(sock, &hello->kshm_magic, sizeof (hello->kshm_magic), timeout);
1709 if (rc != 0) {
1710 CERROR("Error %d reading HELLO from %pI4h\n",
1711 rc, &conn->ksnc_ipaddr);
1712 LASSERT (rc < 0);
1713 return rc;
1714 }
1715
1716 if (hello->kshm_magic != LNET_PROTO_MAGIC &&
1717 hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
1718 hello->kshm_magic != le32_to_cpu (LNET_PROTO_TCP_MAGIC)) {
1719
1720 CERROR("Bad magic(1) %#08x (%#08x expected) from %pI4h\n",
1721 __cpu_to_le32 (hello->kshm_magic),
1722 LNET_PROTO_TCP_MAGIC,
1723 &conn->ksnc_ipaddr);
1724 return -EPROTO;
1725 }
1726
1727 rc = lnet_sock_read(sock, &hello->kshm_version,
1728 sizeof(hello->kshm_version), timeout);
1729 if (rc != 0) {
1730 CERROR("Error %d reading HELLO from %pI4h\n",
1731 rc, &conn->ksnc_ipaddr);
1732 LASSERT(rc < 0);
1733 return rc;
1734 }
1735
1736 proto = ksocknal_parse_proto_version(hello);
1737 if (proto == NULL) {
1738 if (!active) {
1739
1740 conn->ksnc_proto = &ksocknal_protocol_v3x;
1741#if SOCKNAL_VERSION_DEBUG
1742 if (*ksocknal_tunables.ksnd_protocol == 2)
1743 conn->ksnc_proto = &ksocknal_protocol_v2x;
1744 else if (*ksocknal_tunables.ksnd_protocol == 1)
1745 conn->ksnc_proto = &ksocknal_protocol_v1x;
1746#endif
1747 hello->kshm_nips = 0;
1748 ksocknal_send_hello(ni, conn, ni->ni_nid, hello);
1749 }
1750
1751 CERROR("Unknown protocol version (%d.x expected) from %pI4h\n",
1752 conn->ksnc_proto->pro_version,
1753 &conn->ksnc_ipaddr);
1754
1755 return -EPROTO;
1756 }
1757
1758 proto_match = (conn->ksnc_proto == proto);
1759 conn->ksnc_proto = proto;
1760
1761
1762 rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout);
1763 if (rc != 0) {
1764 CERROR("Error %d reading or checking hello from from %pI4h\n",
1765 rc, &conn->ksnc_ipaddr);
1766 LASSERT(rc < 0);
1767 return rc;
1768 }
1769
1770 *incarnation = hello->kshm_src_incarnation;
1771
1772 if (hello->kshm_src_nid == LNET_NID_ANY) {
1773 CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY from %pI4h\n",
1774 &conn->ksnc_ipaddr);
1775 return -EPROTO;
1776 }
1777
1778 if (!active &&
1779 conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
1780
1781 recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG;
1782 recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), conn->ksnc_ipaddr);
1783 } else {
1784 recv_id.nid = hello->kshm_src_nid;
1785 recv_id.pid = hello->kshm_src_pid;
1786 }
1787
1788 if (!active) {
1789 *peerid = recv_id;
1790
1791
1792 conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype);
1793 if (conn->ksnc_type == SOCKLND_CONN_NONE) {
1794 CERROR("Unexpected type %d from %s ip %pI4h\n",
1795 hello->kshm_ctype, libcfs_id2str(*peerid),
1796 &conn->ksnc_ipaddr);
1797 return -EPROTO;
1798 }
1799
1800 return 0;
1801 }
1802
1803 if (peerid->pid != recv_id.pid ||
1804 peerid->nid != recv_id.nid) {
1805 LCONSOLE_ERROR_MSG(0x130, "Connected successfully to %s on host %pI4h, but they claimed they were %s; please check your Lustre configuration.\n",
1806 libcfs_id2str(*peerid),
1807 &conn->ksnc_ipaddr,
1808 libcfs_id2str(recv_id));
1809 return -EPROTO;
1810 }
1811
1812 if (hello->kshm_ctype == SOCKLND_CONN_NONE) {
1813
1814 return proto_match ? EALREADY : EPROTO;
1815 }
1816
1817 if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) {
1818 CERROR("Mismatched types: me %d, %s ip %pI4h %d\n",
1819 conn->ksnc_type, libcfs_id2str(*peerid),
1820 &conn->ksnc_ipaddr,
1821 hello->kshm_ctype);
1822 return -EPROTO;
1823 }
1824
1825 return 0;
1826}
1827
1828static int
1829ksocknal_connect (ksock_route_t *route)
1830{
1831 LIST_HEAD(zombies);
1832 ksock_peer_t *peer = route->ksnr_peer;
1833 int type;
1834 int wanted;
1835 struct socket *sock;
1836 unsigned long deadline;
1837 int retry_later = 0;
1838 int rc = 0;
1839
1840 deadline = cfs_time_add(cfs_time_current(),
1841 cfs_time_seconds(*ksocknal_tunables.ksnd_timeout));
1842
1843 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1844
1845 LASSERT(route->ksnr_scheduled);
1846 LASSERT(!route->ksnr_connecting);
1847
1848 route->ksnr_connecting = 1;
1849
1850 for (;;) {
1851 wanted = ksocknal_route_mask() & ~route->ksnr_connected;
1852
1853
1854
1855 if (peer->ksnp_closing || route->ksnr_deleted ||
1856 wanted == 0) {
1857 retry_later = 0;
1858 break;
1859 }
1860
1861
1862 if (peer->ksnp_accepting > 0) {
1863 CDEBUG(D_NET,
1864 "peer %s(%d) already connecting to me, retry later.\n",
1865 libcfs_nid2str(peer->ksnp_id.nid), peer->ksnp_accepting);
1866 retry_later = 1;
1867 }
1868
1869 if (retry_later)
1870 break;
1871
1872 if ((wanted & (1 << SOCKLND_CONN_ANY)) != 0) {
1873 type = SOCKLND_CONN_ANY;
1874 } else if ((wanted & (1 << SOCKLND_CONN_CONTROL)) != 0) {
1875 type = SOCKLND_CONN_CONTROL;
1876 } else if ((wanted & (1 << SOCKLND_CONN_BULK_IN)) != 0) {
1877 type = SOCKLND_CONN_BULK_IN;
1878 } else {
1879 LASSERT ((wanted & (1 << SOCKLND_CONN_BULK_OUT)) != 0);
1880 type = SOCKLND_CONN_BULK_OUT;
1881 }
1882
1883 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1884
1885 if (cfs_time_aftereq(cfs_time_current(), deadline)) {
1886 rc = -ETIMEDOUT;
1887 lnet_connect_console_error(rc, peer->ksnp_id.nid,
1888 route->ksnr_ipaddr,
1889 route->ksnr_port);
1890 goto failed;
1891 }
1892
1893 rc = lnet_connect(&sock, peer->ksnp_id.nid,
1894 route->ksnr_myipaddr,
1895 route->ksnr_ipaddr, route->ksnr_port);
1896 if (rc != 0)
1897 goto failed;
1898
1899 rc = ksocknal_create_conn(peer->ksnp_ni, route, sock, type);
1900 if (rc < 0) {
1901 lnet_connect_console_error(rc, peer->ksnp_id.nid,
1902 route->ksnr_ipaddr,
1903 route->ksnr_port);
1904 goto failed;
1905 }
1906
1907
1908
1909 retry_later = (rc != 0);
1910 if (retry_later)
1911 CDEBUG(D_NET, "peer %s: conn race, retry later.\n",
1912 libcfs_nid2str(peer->ksnp_id.nid));
1913
1914 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1915 }
1916
1917 route->ksnr_scheduled = 0;
1918 route->ksnr_connecting = 0;
1919
1920 if (retry_later) {
1921
1922
1923
1924 if (rc == EALREADY ||
1925 (rc == 0 && peer->ksnp_accepting > 0)) {
1926
1927
1928
1929
1930 route->ksnr_retry_interval =
1931 cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000;
1932 route->ksnr_timeout = cfs_time_add(cfs_time_current(),
1933 route->ksnr_retry_interval);
1934 }
1935
1936 ksocknal_launch_connection_locked(route);
1937 }
1938
1939 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1940 return retry_later;
1941
1942 failed:
1943 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1944
1945 route->ksnr_scheduled = 0;
1946 route->ksnr_connecting = 0;
1947
1948
1949 route->ksnr_retry_interval *= 2;
1950 route->ksnr_retry_interval =
1951 max(route->ksnr_retry_interval,
1952 cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000);
1953 route->ksnr_retry_interval =
1954 min(route->ksnr_retry_interval,
1955 cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms)/1000);
1956
1957 LASSERT (route->ksnr_retry_interval != 0);
1958 route->ksnr_timeout = cfs_time_add(cfs_time_current(),
1959 route->ksnr_retry_interval);
1960
1961 if (!list_empty(&peer->ksnp_tx_queue) &&
1962 peer->ksnp_accepting == 0 &&
1963 ksocknal_find_connecting_route_locked(peer) == NULL) {
1964 ksock_conn_t *conn;
1965
1966
1967
1968 if (!list_empty (&peer->ksnp_conns)) {
1969 conn = list_entry(peer->ksnp_conns.next,
1970 ksock_conn_t, ksnc_list);
1971 LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
1972 }
1973
1974
1975
1976 list_splice_init(&peer->ksnp_tx_queue, &zombies);
1977 }
1978
1979#if 0
1980 if (!route->ksnr_deleted) {
1981
1982 list_del(&route->ksnr_list);
1983 list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
1984 }
1985#endif
1986 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1987
1988 ksocknal_peer_failed(peer);
1989 ksocknal_txlist_done(peer->ksnp_ni, &zombies, 1);
1990 return 0;
1991}
1992
1993
1994
1995
1996
1997
1998
1999static int
2000ksocknal_connd_check_start(time64_t sec, long *timeout)
2001{
2002 char name[16];
2003 int rc;
2004 int total = ksocknal_data.ksnd_connd_starting +
2005 ksocknal_data.ksnd_connd_running;
2006
2007 if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
2008
2009 return 0;
2010 }
2011
2012 if (total >= *ksocknal_tunables.ksnd_nconnds_max ||
2013 total > ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV) {
2014
2015
2016 return 0;
2017 }
2018
2019 if (list_empty(&ksocknal_data.ksnd_connd_routes)) {
2020
2021 return 0;
2022 }
2023
2024 if (sec - ksocknal_data.ksnd_connd_failed_stamp <= 1) {
2025
2026 *timeout = cfs_time_seconds(1);
2027 return 0;
2028 }
2029
2030 if (ksocknal_data.ksnd_connd_starting > 0) {
2031
2032 return 0;
2033 }
2034
2035 ksocknal_data.ksnd_connd_starting_stamp = sec;
2036 ksocknal_data.ksnd_connd_starting++;
2037 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2038
2039
2040 snprintf(name, sizeof(name), "socknal_cd%02d", total);
2041 rc = ksocknal_thread_start(ksocknal_connd, NULL, name);
2042
2043 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2044 if (rc == 0)
2045 return 1;
2046
2047
2048 LASSERT(ksocknal_data.ksnd_connd_starting > 0);
2049 ksocknal_data.ksnd_connd_starting--;
2050 ksocknal_data.ksnd_connd_failed_stamp = ktime_get_real_seconds();
2051
2052 return 1;
2053}
2054
2055
2056
2057
2058
2059
2060
2061static int
2062ksocknal_connd_check_stop(time64_t sec, long *timeout)
2063{
2064 int val;
2065
2066 if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
2067
2068 return 0;
2069 }
2070
2071 if (ksocknal_data.ksnd_connd_starting > 0) {
2072
2073 return 0;
2074 }
2075
2076 if (ksocknal_data.ksnd_connd_running <=
2077 *ksocknal_tunables.ksnd_nconnds) {
2078 return 0;
2079 }
2080
2081
2082 val = (int)(ksocknal_data.ksnd_connd_starting_stamp +
2083 SOCKNAL_CONND_TIMEOUT - sec);
2084
2085 *timeout = (val > 0) ? cfs_time_seconds(val) :
2086 cfs_time_seconds(SOCKNAL_CONND_TIMEOUT);
2087 if (val > 0)
2088 return 0;
2089
2090
2091
2092 return ksocknal_data.ksnd_connd_running >
2093 ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV;
2094}
2095
2096
2097
2098static ksock_route_t *
2099ksocknal_connd_get_route_locked(signed long *timeout_p)
2100{
2101 ksock_route_t *route;
2102 unsigned long now;
2103
2104 now = cfs_time_current();
2105
2106
2107 list_for_each_entry (route, &ksocknal_data.ksnd_connd_routes,
2108 ksnr_connd_list) {
2109
2110 if (route->ksnr_retry_interval == 0 ||
2111 cfs_time_aftereq(now, route->ksnr_timeout))
2112 return route;
2113
2114 if (*timeout_p == MAX_SCHEDULE_TIMEOUT ||
2115 (int)*timeout_p > (int)(route->ksnr_timeout - now))
2116 *timeout_p = (int)(route->ksnr_timeout - now);
2117 }
2118
2119 return NULL;
2120}
2121
2122int
2123ksocknal_connd (void *arg)
2124{
2125 spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
2126 ksock_connreq_t *cr;
2127 wait_queue_t wait;
2128 int nloops = 0;
2129 int cons_retry = 0;
2130
2131 cfs_block_allsigs();
2132
2133 init_waitqueue_entry(&wait, current);
2134
2135 spin_lock_bh(connd_lock);
2136
2137 LASSERT(ksocknal_data.ksnd_connd_starting > 0);
2138 ksocknal_data.ksnd_connd_starting--;
2139 ksocknal_data.ksnd_connd_running++;
2140
2141 while (!ksocknal_data.ksnd_shuttingdown) {
2142 ksock_route_t *route = NULL;
2143 time64_t sec = ktime_get_real_seconds();
2144 long timeout = MAX_SCHEDULE_TIMEOUT;
2145 int dropped_lock = 0;
2146
2147 if (ksocknal_connd_check_stop(sec, &timeout)) {
2148
2149 wake_up(&ksocknal_data.ksnd_connd_waitq);
2150 break;
2151 }
2152
2153 if (ksocknal_connd_check_start(sec, &timeout)) {
2154
2155 dropped_lock = 1;
2156 }
2157
2158 if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
2159
2160 cr = list_entry(ksocknal_data.ksnd_connd_connreqs. \
2161 next, ksock_connreq_t, ksncr_list);
2162
2163 list_del(&cr->ksncr_list);
2164 spin_unlock_bh(connd_lock);
2165 dropped_lock = 1;
2166
2167 ksocknal_create_conn(cr->ksncr_ni, NULL,
2168 cr->ksncr_sock, SOCKLND_CONN_NONE);
2169 lnet_ni_decref(cr->ksncr_ni);
2170 LIBCFS_FREE(cr, sizeof(*cr));
2171
2172 spin_lock_bh(connd_lock);
2173 }
2174
2175
2176
2177
2178 if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV <
2179 ksocknal_data.ksnd_connd_running) {
2180 route = ksocknal_connd_get_route_locked(&timeout);
2181 }
2182 if (route != NULL) {
2183 list_del (&route->ksnr_connd_list);
2184 ksocknal_data.ksnd_connd_connecting++;
2185 spin_unlock_bh(connd_lock);
2186 dropped_lock = 1;
2187
2188 if (ksocknal_connect(route)) {
2189
2190 if (cons_retry++ > SOCKNAL_INSANITY_RECONN) {
2191 CWARN("massive consecutive re-connecting to %pI4h\n",
2192 &route->ksnr_ipaddr);
2193 cons_retry = 0;
2194 }
2195 } else {
2196 cons_retry = 0;
2197 }
2198
2199 ksocknal_route_decref(route);
2200
2201 spin_lock_bh(connd_lock);
2202 ksocknal_data.ksnd_connd_connecting--;
2203 }
2204
2205 if (dropped_lock) {
2206 if (++nloops < SOCKNAL_RESCHED)
2207 continue;
2208 spin_unlock_bh(connd_lock);
2209 nloops = 0;
2210 cond_resched();
2211 spin_lock_bh(connd_lock);
2212 continue;
2213 }
2214
2215
2216 set_current_state(TASK_INTERRUPTIBLE);
2217 add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
2218 spin_unlock_bh(connd_lock);
2219
2220 nloops = 0;
2221 schedule_timeout(timeout);
2222
2223 remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
2224 spin_lock_bh(connd_lock);
2225 }
2226 ksocknal_data.ksnd_connd_running--;
2227 spin_unlock_bh(connd_lock);
2228
2229 ksocknal_thread_fini();
2230 return 0;
2231}
2232
2233static ksock_conn_t *
2234ksocknal_find_timed_out_conn (ksock_peer_t *peer)
2235{
2236
2237 ksock_conn_t *conn;
2238 struct list_head *ctmp;
2239
2240 list_for_each (ctmp, &peer->ksnp_conns) {
2241 int error;
2242
2243 conn = list_entry (ctmp, ksock_conn_t, ksnc_list);
2244
2245
2246 LASSERT(!conn->ksnc_closing);
2247
2248
2249
2250 error = conn->ksnc_sock->sk->sk_err;
2251 if (error != 0) {
2252 ksocknal_conn_addref(conn);
2253
2254 switch (error) {
2255 case ECONNRESET:
2256 CNETERR("A connection with %s (%pI4h:%d) was reset; it may have rebooted.\n",
2257 libcfs_id2str(peer->ksnp_id),
2258 &conn->ksnc_ipaddr,
2259 conn->ksnc_port);
2260 break;
2261 case ETIMEDOUT:
2262 CNETERR("A connection with %s (%pI4h:%d) timed out; the network or node may be down.\n",
2263 libcfs_id2str(peer->ksnp_id),
2264 &conn->ksnc_ipaddr,
2265 conn->ksnc_port);
2266 break;
2267 default:
2268 CNETERR("An unexpected network error %d occurred with %s (%pI4h:%d\n",
2269 error,
2270 libcfs_id2str(peer->ksnp_id),
2271 &conn->ksnc_ipaddr,
2272 conn->ksnc_port);
2273 break;
2274 }
2275
2276 return conn;
2277 }
2278
2279 if (conn->ksnc_rx_started &&
2280 cfs_time_aftereq(cfs_time_current(),
2281 conn->ksnc_rx_deadline)) {
2282
2283 ksocknal_conn_addref(conn);
2284 CNETERR("Timeout receiving from %s (%pI4h:%d), state %d wanted %d left %d\n",
2285 libcfs_id2str(peer->ksnp_id),
2286 &conn->ksnc_ipaddr,
2287 conn->ksnc_port,
2288 conn->ksnc_rx_state,
2289 conn->ksnc_rx_nob_wanted,
2290 conn->ksnc_rx_nob_left);
2291 return conn;
2292 }
2293
2294 if ((!list_empty(&conn->ksnc_tx_queue) ||
2295 conn->ksnc_sock->sk->sk_wmem_queued != 0) &&
2296 cfs_time_aftereq(cfs_time_current(),
2297 conn->ksnc_tx_deadline)) {
2298
2299
2300 ksocknal_conn_addref(conn);
2301 CNETERR("Timeout sending data to %s (%pI4h:%d) the network or that node may be down.\n",
2302 libcfs_id2str(peer->ksnp_id),
2303 &conn->ksnc_ipaddr,
2304 conn->ksnc_port);
2305 return conn;
2306 }
2307 }
2308
2309 return NULL;
2310}
2311
2312static inline void
2313ksocknal_flush_stale_txs(ksock_peer_t *peer)
2314{
2315 ksock_tx_t *tx;
2316 LIST_HEAD(stale_txs);
2317
2318 write_lock_bh(&ksocknal_data.ksnd_global_lock);
2319
2320 while (!list_empty (&peer->ksnp_tx_queue)) {
2321 tx = list_entry (peer->ksnp_tx_queue.next,
2322 ksock_tx_t, tx_list);
2323
2324 if (!cfs_time_aftereq(cfs_time_current(),
2325 tx->tx_deadline))
2326 break;
2327
2328 list_del (&tx->tx_list);
2329 list_add_tail (&tx->tx_list, &stale_txs);
2330 }
2331
2332 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2333
2334 ksocknal_txlist_done(peer->ksnp_ni, &stale_txs, 1);
2335}
2336
2337static int
2338ksocknal_send_keepalive_locked(ksock_peer_t *peer)
2339{
2340 ksock_sched_t *sched;
2341 ksock_conn_t *conn;
2342 ksock_tx_t *tx;
2343
2344 if (list_empty(&peer->ksnp_conns))
2345 return 0;
2346
2347 if (peer->ksnp_proto != &ksocknal_protocol_v3x)
2348 return 0;
2349
2350 if (*ksocknal_tunables.ksnd_keepalive <= 0 ||
2351 time_before(cfs_time_current(),
2352 cfs_time_add(peer->ksnp_last_alive,
2353 cfs_time_seconds(*ksocknal_tunables.ksnd_keepalive))))
2354 return 0;
2355
2356 if (time_before(cfs_time_current(), peer->ksnp_send_keepalive))
2357 return 0;
2358
2359
2360
2361 peer->ksnp_send_keepalive = cfs_time_shift(10);
2362
2363 conn = ksocknal_find_conn_locked(peer, NULL, 1);
2364 if (conn != NULL) {
2365 sched = conn->ksnc_scheduler;
2366
2367 spin_lock_bh(&sched->kss_lock);
2368 if (!list_empty(&conn->ksnc_tx_queue)) {
2369 spin_unlock_bh(&sched->kss_lock);
2370
2371 return 0;
2372 }
2373
2374 spin_unlock_bh(&sched->kss_lock);
2375 }
2376
2377 read_unlock(&ksocknal_data.ksnd_global_lock);
2378
2379
2380 tx = ksocknal_alloc_tx_noop(1, 1);
2381 if (tx == NULL) {
2382 read_lock(&ksocknal_data.ksnd_global_lock);
2383 return -ENOMEM;
2384 }
2385
2386 if (ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id) == 0) {
2387 read_lock(&ksocknal_data.ksnd_global_lock);
2388 return 1;
2389 }
2390
2391 ksocknal_free_tx(tx);
2392 read_lock(&ksocknal_data.ksnd_global_lock);
2393
2394 return -EIO;
2395}
2396
2397static void
2398ksocknal_check_peer_timeouts (int idx)
2399{
2400 struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
2401 ksock_peer_t *peer;
2402 ksock_conn_t *conn;
2403 ksock_tx_t *tx;
2404
2405 again:
2406
2407
2408
2409 read_lock(&ksocknal_data.ksnd_global_lock);
2410
2411 list_for_each_entry(peer, peers, ksnp_list) {
2412 unsigned long deadline = 0;
2413 int resid = 0;
2414 int n = 0;
2415
2416 if (ksocknal_send_keepalive_locked(peer) != 0) {
2417 read_unlock(&ksocknal_data.ksnd_global_lock);
2418 goto again;
2419 }
2420
2421 conn = ksocknal_find_timed_out_conn (peer);
2422
2423 if (conn != NULL) {
2424 read_unlock(&ksocknal_data.ksnd_global_lock);
2425
2426 ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
2427
2428
2429
2430
2431 ksocknal_conn_decref(conn);
2432 goto again;
2433 }
2434
2435
2436
2437 if (!list_empty (&peer->ksnp_tx_queue)) {
2438 ksock_tx_t *tx =
2439 list_entry (peer->ksnp_tx_queue.next,
2440 ksock_tx_t, tx_list);
2441
2442 if (cfs_time_aftereq(cfs_time_current(),
2443 tx->tx_deadline)) {
2444
2445 ksocknal_peer_addref(peer);
2446 read_unlock(&ksocknal_data.ksnd_global_lock);
2447
2448 ksocknal_flush_stale_txs(peer);
2449
2450 ksocknal_peer_decref(peer);
2451 goto again;
2452 }
2453 }
2454
2455 if (list_empty(&peer->ksnp_zc_req_list))
2456 continue;
2457
2458 spin_lock(&peer->ksnp_lock);
2459 list_for_each_entry(tx, &peer->ksnp_zc_req_list, tx_zc_list) {
2460 if (!cfs_time_aftereq(cfs_time_current(),
2461 tx->tx_deadline))
2462 break;
2463
2464 if (tx->tx_conn->ksnc_closing)
2465 continue;
2466 n++;
2467 }
2468
2469 if (n == 0) {
2470 spin_unlock(&peer->ksnp_lock);
2471 continue;
2472 }
2473
2474 tx = list_entry(peer->ksnp_zc_req_list.next,
2475 ksock_tx_t, tx_zc_list);
2476 deadline = tx->tx_deadline;
2477 resid = tx->tx_resid;
2478 conn = tx->tx_conn;
2479 ksocknal_conn_addref(conn);
2480
2481 spin_unlock(&peer->ksnp_lock);
2482 read_unlock(&ksocknal_data.ksnd_global_lock);
2483
2484 CERROR("Total %d stale ZC_REQs for peer %s detected; the oldest(%p) timed out %ld secs ago, resid: %d, wmem: %d\n",
2485 n, libcfs_nid2str(peer->ksnp_id.nid), tx,
2486 cfs_duration_sec(cfs_time_current() - deadline),
2487 resid, conn->ksnc_sock->sk->sk_wmem_queued);
2488
2489 ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
2490 ksocknal_conn_decref(conn);
2491 goto again;
2492 }
2493
2494 read_unlock(&ksocknal_data.ksnd_global_lock);
2495}
2496
2497int
2498ksocknal_reaper (void *arg)
2499{
2500 wait_queue_t wait;
2501 ksock_conn_t *conn;
2502 ksock_sched_t *sched;
2503 struct list_head enomem_conns;
2504 int nenomem_conns;
2505 long timeout;
2506 int i;
2507 int peer_index = 0;
2508 unsigned long deadline = cfs_time_current();
2509
2510 cfs_block_allsigs();
2511
2512 INIT_LIST_HEAD(&enomem_conns);
2513 init_waitqueue_entry(&wait, current);
2514
2515 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2516
2517 while (!ksocknal_data.ksnd_shuttingdown) {
2518
2519 if (!list_empty (&ksocknal_data.ksnd_deathrow_conns)) {
2520 conn = list_entry (ksocknal_data. \
2521 ksnd_deathrow_conns.next,
2522 ksock_conn_t, ksnc_list);
2523 list_del (&conn->ksnc_list);
2524
2525 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2526
2527 ksocknal_terminate_conn(conn);
2528 ksocknal_conn_decref(conn);
2529
2530 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2531 continue;
2532 }
2533
2534 if (!list_empty (&ksocknal_data.ksnd_zombie_conns)) {
2535 conn = list_entry (ksocknal_data.ksnd_zombie_conns.\
2536 next, ksock_conn_t, ksnc_list);
2537 list_del (&conn->ksnc_list);
2538
2539 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2540
2541 ksocknal_destroy_conn(conn);
2542
2543 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2544 continue;
2545 }
2546
2547 if (!list_empty (&ksocknal_data.ksnd_enomem_conns)) {
2548 list_add(&enomem_conns,
2549 &ksocknal_data.ksnd_enomem_conns);
2550 list_del_init(&ksocknal_data.ksnd_enomem_conns);
2551 }
2552
2553 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2554
2555
2556 nenomem_conns = 0;
2557 while (!list_empty (&enomem_conns)) {
2558 conn = list_entry (enomem_conns.next,
2559 ksock_conn_t, ksnc_tx_list);
2560 list_del (&conn->ksnc_tx_list);
2561
2562 sched = conn->ksnc_scheduler;
2563
2564 spin_lock_bh(&sched->kss_lock);
2565
2566 LASSERT(conn->ksnc_tx_scheduled);
2567 conn->ksnc_tx_ready = 1;
2568 list_add_tail(&conn->ksnc_tx_list,
2569 &sched->kss_tx_conns);
2570 wake_up(&sched->kss_waitq);
2571
2572 spin_unlock_bh(&sched->kss_lock);
2573 nenomem_conns++;
2574 }
2575
2576
2577 while ((timeout = cfs_time_sub(deadline,
2578 cfs_time_current())) <= 0) {
2579 const int n = 4;
2580 const int p = 1;
2581 int chunk = ksocknal_data.ksnd_peer_hash_size;
2582
2583
2584
2585
2586
2587
2588
2589
2590 if (*ksocknal_tunables.ksnd_timeout > n * p)
2591 chunk = (chunk * n * p) /
2592 *ksocknal_tunables.ksnd_timeout;
2593 if (chunk == 0)
2594 chunk = 1;
2595
2596 for (i = 0; i < chunk; i++) {
2597 ksocknal_check_peer_timeouts (peer_index);
2598 peer_index = (peer_index + 1) %
2599 ksocknal_data.ksnd_peer_hash_size;
2600 }
2601
2602 deadline = cfs_time_add(deadline, cfs_time_seconds(p));
2603 }
2604
2605 if (nenomem_conns != 0) {
2606
2607
2608
2609 timeout = SOCKNAL_ENOMEM_RETRY;
2610 }
2611 ksocknal_data.ksnd_reaper_waketime =
2612 cfs_time_add(cfs_time_current(), timeout);
2613
2614 set_current_state (TASK_INTERRUPTIBLE);
2615 add_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait);
2616
2617 if (!ksocknal_data.ksnd_shuttingdown &&
2618 list_empty (&ksocknal_data.ksnd_deathrow_conns) &&
2619 list_empty (&ksocknal_data.ksnd_zombie_conns))
2620 schedule_timeout(timeout);
2621
2622 set_current_state (TASK_RUNNING);
2623 remove_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait);
2624
2625 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2626 }
2627
2628 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2629
2630 ksocknal_thread_fini();
2631 return 0;
2632}
2633