1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/kernel.h>
34#include <linux/slab.h>
35#include <net/sock.h>
36#include <linux/in.h>
37#include <linux/export.h>
38#include <linux/time.h>
39#include <linux/rds.h>
40
41#include "rds.h"
42
43void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
44 __be32 saddr)
45{
46 int i;
47
48 atomic_set(&inc->i_refcount, 1);
49 INIT_LIST_HEAD(&inc->i_item);
50 inc->i_conn = conn;
51 inc->i_saddr = saddr;
52 inc->i_rdma_cookie = 0;
53 inc->i_rx_tstamp.tv_sec = 0;
54 inc->i_rx_tstamp.tv_usec = 0;
55
56 for (i = 0; i < RDS_RX_MAX_TRACES; i++)
57 inc->i_rx_lat_trace[i] = 0;
58}
59EXPORT_SYMBOL_GPL(rds_inc_init);
60
61void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *cp,
62 __be32 saddr)
63{
64 atomic_set(&inc->i_refcount, 1);
65 INIT_LIST_HEAD(&inc->i_item);
66 inc->i_conn = cp->cp_conn;
67 inc->i_conn_path = cp;
68 inc->i_saddr = saddr;
69 inc->i_rdma_cookie = 0;
70 inc->i_rx_tstamp.tv_sec = 0;
71 inc->i_rx_tstamp.tv_usec = 0;
72}
73EXPORT_SYMBOL_GPL(rds_inc_path_init);
74
75static void rds_inc_addref(struct rds_incoming *inc)
76{
77 rdsdebug("addref inc %p ref %d\n", inc, atomic_read(&inc->i_refcount));
78 atomic_inc(&inc->i_refcount);
79}
80
81void rds_inc_put(struct rds_incoming *inc)
82{
83 rdsdebug("put inc %p ref %d\n", inc, atomic_read(&inc->i_refcount));
84 if (atomic_dec_and_test(&inc->i_refcount)) {
85 BUG_ON(!list_empty(&inc->i_item));
86
87 inc->i_conn->c_trans->inc_free(inc);
88 }
89}
90EXPORT_SYMBOL_GPL(rds_inc_put);
91
92static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk,
93 struct rds_cong_map *map,
94 int delta, __be16 port)
95{
96 int now_congested;
97
98 if (delta == 0)
99 return;
100
101 rs->rs_rcv_bytes += delta;
102 if (delta > 0)
103 rds_stats_add(s_recv_bytes_added_to_socket, delta);
104 else
105 rds_stats_add(s_recv_bytes_removed_from_socket, -delta);
106 now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
107
108 rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d "
109 "now_cong %d delta %d\n",
110 rs, &rs->rs_bound_addr,
111 ntohs(rs->rs_bound_port), rs->rs_rcv_bytes,
112 rds_sk_rcvbuf(rs), now_congested, delta);
113
114
115 if (!rs->rs_congested && now_congested) {
116 rs->rs_congested = 1;
117 rds_cong_set_bit(map, port);
118 rds_cong_queue_updates(map);
119 }
120
121
122
123 else if (rs->rs_congested && (rs->rs_rcv_bytes < (rds_sk_rcvbuf(rs)/2))) {
124 rs->rs_congested = 0;
125 rds_cong_clear_bit(map, port);
126 rds_cong_queue_updates(map);
127 }
128
129
130}
131
132static void rds_conn_peer_gen_update(struct rds_connection *conn,
133 u32 peer_gen_num)
134{
135 int i;
136 struct rds_message *rm, *tmp;
137 unsigned long flags;
138
139 WARN_ON(conn->c_trans->t_type != RDS_TRANS_TCP);
140 if (peer_gen_num != 0) {
141 if (conn->c_peer_gen_num != 0 &&
142 peer_gen_num != conn->c_peer_gen_num) {
143 for (i = 0; i < RDS_MPATH_WORKERS; i++) {
144 struct rds_conn_path *cp;
145
146 cp = &conn->c_path[i];
147 spin_lock_irqsave(&cp->cp_lock, flags);
148 cp->cp_next_tx_seq = 1;
149 cp->cp_next_rx_seq = 0;
150 list_for_each_entry_safe(rm, tmp,
151 &cp->cp_retrans,
152 m_conn_item) {
153 set_bit(RDS_MSG_FLUSH, &rm->m_flags);
154 }
155 spin_unlock_irqrestore(&cp->cp_lock, flags);
156 }
157 }
158 conn->c_peer_gen_num = peer_gen_num;
159 }
160}
161
162
163
164
165static void rds_recv_incoming_exthdrs(struct rds_incoming *inc, struct rds_sock *rs)
166{
167 struct rds_header *hdr = &inc->i_hdr;
168 unsigned int pos = 0, type, len;
169 union {
170 struct rds_ext_header_version version;
171 struct rds_ext_header_rdma rdma;
172 struct rds_ext_header_rdma_dest rdma_dest;
173 } buffer;
174
175 while (1) {
176 len = sizeof(buffer);
177 type = rds_message_next_extension(hdr, &pos, &buffer, &len);
178 if (type == RDS_EXTHDR_NONE)
179 break;
180
181 switch (type) {
182 case RDS_EXTHDR_RDMA:
183 rds_rdma_unuse(rs, be32_to_cpu(buffer.rdma.h_rdma_rkey), 0);
184 break;
185
186 case RDS_EXTHDR_RDMA_DEST:
187
188
189 inc->i_rdma_cookie = rds_rdma_make_cookie(
190 be32_to_cpu(buffer.rdma_dest.h_rdma_rkey),
191 be32_to_cpu(buffer.rdma_dest.h_rdma_offset));
192
193 break;
194 }
195 }
196}
197
198static void rds_recv_hs_exthdrs(struct rds_header *hdr,
199 struct rds_connection *conn)
200{
201 unsigned int pos = 0, type, len;
202 union {
203 struct rds_ext_header_version version;
204 u16 rds_npaths;
205 u32 rds_gen_num;
206 } buffer;
207 u32 new_peer_gen_num = 0;
208
209 while (1) {
210 len = sizeof(buffer);
211 type = rds_message_next_extension(hdr, &pos, &buffer, &len);
212 if (type == RDS_EXTHDR_NONE)
213 break;
214
215 switch (type) {
216 case RDS_EXTHDR_NPATHS:
217 conn->c_npaths = min_t(int, RDS_MPATH_WORKERS,
218 buffer.rds_npaths);
219 break;
220 case RDS_EXTHDR_GEN_NUM:
221 new_peer_gen_num = buffer.rds_gen_num;
222 break;
223 default:
224 pr_warn_ratelimited("ignoring unknown exthdr type "
225 "0x%x\n", type);
226 }
227 }
228
229 conn->c_npaths = max_t(int, conn->c_npaths, 1);
230 rds_conn_peer_gen_update(conn, new_peer_gen_num);
231}
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252static void rds_start_mprds(struct rds_connection *conn)
253{
254 int i;
255 struct rds_conn_path *cp;
256
257 if (conn->c_npaths > 1 && conn->c_laddr < conn->c_faddr) {
258 for (i = 1; i < conn->c_npaths; i++) {
259 cp = &conn->c_path[i];
260 rds_conn_path_connect_if_down(cp);
261 }
262 }
263}
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
282 struct rds_incoming *inc, gfp_t gfp)
283{
284 struct rds_sock *rs = NULL;
285 struct sock *sk;
286 unsigned long flags;
287 struct rds_conn_path *cp;
288
289 inc->i_conn = conn;
290 inc->i_rx_jiffies = jiffies;
291 if (conn->c_trans->t_mp_capable)
292 cp = inc->i_conn_path;
293 else
294 cp = &conn->c_path[0];
295
296 rdsdebug("conn %p next %llu inc %p seq %llu len %u sport %u dport %u "
297 "flags 0x%x rx_jiffies %lu\n", conn,
298 (unsigned long long)cp->cp_next_rx_seq,
299 inc,
300 (unsigned long long)be64_to_cpu(inc->i_hdr.h_sequence),
301 be32_to_cpu(inc->i_hdr.h_len),
302 be16_to_cpu(inc->i_hdr.h_sport),
303 be16_to_cpu(inc->i_hdr.h_dport),
304 inc->i_hdr.h_flags,
305 inc->i_rx_jiffies);
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327 if (be64_to_cpu(inc->i_hdr.h_sequence) < cp->cp_next_rx_seq &&
328 (inc->i_hdr.h_flags & RDS_FLAG_RETRANSMITTED)) {
329 rds_stats_inc(s_recv_drop_old_seq);
330 goto out;
331 }
332 cp->cp_next_rx_seq = be64_to_cpu(inc->i_hdr.h_sequence) + 1;
333
334 if (rds_sysctl_ping_enable && inc->i_hdr.h_dport == 0) {
335 if (inc->i_hdr.h_sport == 0) {
336 rdsdebug("ignore ping with 0 sport from 0x%x\n", saddr);
337 goto out;
338 }
339 rds_stats_inc(s_recv_ping);
340 rds_send_pong(cp, inc->i_hdr.h_sport);
341
342 if (RDS_HS_PROBE(inc->i_hdr.h_sport, inc->i_hdr.h_dport)) {
343 rds_recv_hs_exthdrs(&inc->i_hdr, cp->cp_conn);
344 rds_start_mprds(cp->cp_conn);
345 }
346 goto out;
347 }
348
349 if (inc->i_hdr.h_dport == RDS_FLAG_PROBE_PORT &&
350 inc->i_hdr.h_sport == 0) {
351 rds_recv_hs_exthdrs(&inc->i_hdr, cp->cp_conn);
352
353 rds_start_mprds(cp->cp_conn);
354 wake_up(&cp->cp_conn->c_hs_waitq);
355 goto out;
356 }
357
358 rs = rds_find_bound(daddr, inc->i_hdr.h_dport);
359 if (!rs) {
360 rds_stats_inc(s_recv_drop_no_sock);
361 goto out;
362 }
363
364
365 rds_recv_incoming_exthdrs(inc, rs);
366
367
368 sk = rds_rs_to_sk(rs);
369
370
371 write_lock_irqsave(&rs->rs_recv_lock, flags);
372 if (!sock_flag(sk, SOCK_DEAD)) {
373 rdsdebug("adding inc %p to rs %p's recv queue\n", inc, rs);
374 rds_stats_inc(s_recv_queued);
375 rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
376 be32_to_cpu(inc->i_hdr.h_len),
377 inc->i_hdr.h_dport);
378 if (sock_flag(sk, SOCK_RCVTSTAMP))
379 do_gettimeofday(&inc->i_rx_tstamp);
380 rds_inc_addref(inc);
381 inc->i_rx_lat_trace[RDS_MSG_RX_END] = local_clock();
382 list_add_tail(&inc->i_item, &rs->rs_recv_queue);
383 __rds_wake_sk_sleep(sk);
384 } else {
385 rds_stats_inc(s_recv_drop_dead_sock);
386 }
387 write_unlock_irqrestore(&rs->rs_recv_lock, flags);
388
389out:
390 if (rs)
391 rds_sock_put(rs);
392}
393EXPORT_SYMBOL_GPL(rds_recv_incoming);
394
395
396
397
398
399static int rds_next_incoming(struct rds_sock *rs, struct rds_incoming **inc)
400{
401 unsigned long flags;
402
403 if (!*inc) {
404 read_lock_irqsave(&rs->rs_recv_lock, flags);
405 if (!list_empty(&rs->rs_recv_queue)) {
406 *inc = list_entry(rs->rs_recv_queue.next,
407 struct rds_incoming,
408 i_item);
409 rds_inc_addref(*inc);
410 }
411 read_unlock_irqrestore(&rs->rs_recv_lock, flags);
412 }
413
414 return *inc != NULL;
415}
416
417static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
418 int drop)
419{
420 struct sock *sk = rds_rs_to_sk(rs);
421 int ret = 0;
422 unsigned long flags;
423
424 write_lock_irqsave(&rs->rs_recv_lock, flags);
425 if (!list_empty(&inc->i_item)) {
426 ret = 1;
427 if (drop) {
428
429 rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
430 -be32_to_cpu(inc->i_hdr.h_len),
431 inc->i_hdr.h_dport);
432 list_del_init(&inc->i_item);
433 rds_inc_put(inc);
434 }
435 }
436 write_unlock_irqrestore(&rs->rs_recv_lock, flags);
437
438 rdsdebug("inc %p rs %p still %d dropped %d\n", inc, rs, ret, drop);
439 return ret;
440}
441
442
443
444
445
446int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
447{
448 struct rds_notifier *notifier;
449 struct rds_rdma_notify cmsg = { 0 };
450 unsigned int count = 0, max_messages = ~0U;
451 unsigned long flags;
452 LIST_HEAD(copy);
453 int err = 0;
454
455
456
457
458
459
460
461
462
463 if (msghdr) {
464 max_messages = msghdr->msg_controllen / CMSG_SPACE(sizeof(cmsg));
465 if (!max_messages)
466 max_messages = 1;
467 }
468
469 spin_lock_irqsave(&rs->rs_lock, flags);
470 while (!list_empty(&rs->rs_notify_queue) && count < max_messages) {
471 notifier = list_entry(rs->rs_notify_queue.next,
472 struct rds_notifier, n_list);
473 list_move(¬ifier->n_list, ©);
474 count++;
475 }
476 spin_unlock_irqrestore(&rs->rs_lock, flags);
477
478 if (!count)
479 return 0;
480
481 while (!list_empty(©)) {
482 notifier = list_entry(copy.next, struct rds_notifier, n_list);
483
484 if (msghdr) {
485 cmsg.user_token = notifier->n_user_token;
486 cmsg.status = notifier->n_status;
487
488 err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_RDMA_STATUS,
489 sizeof(cmsg), &cmsg);
490 if (err)
491 break;
492 }
493
494 list_del_init(¬ifier->n_list);
495 kfree(notifier);
496 }
497
498
499
500
501 if (!list_empty(©)) {
502 spin_lock_irqsave(&rs->rs_lock, flags);
503 list_splice(©, &rs->rs_notify_queue);
504 spin_unlock_irqrestore(&rs->rs_lock, flags);
505 }
506
507 return err;
508}
509
510
511
512
513static int rds_notify_cong(struct rds_sock *rs, struct msghdr *msghdr)
514{
515 uint64_t notify = rs->rs_cong_notify;
516 unsigned long flags;
517 int err;
518
519 err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_CONG_UPDATE,
520 sizeof(notify), ¬ify);
521 if (err)
522 return err;
523
524 spin_lock_irqsave(&rs->rs_lock, flags);
525 rs->rs_cong_notify &= ~notify;
526 spin_unlock_irqrestore(&rs->rs_lock, flags);
527
528 return 0;
529}
530
531
532
533
534static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg,
535 struct rds_sock *rs)
536{
537 int ret = 0;
538
539 if (inc->i_rdma_cookie) {
540 ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RDMA_DEST,
541 sizeof(inc->i_rdma_cookie), &inc->i_rdma_cookie);
542 if (ret)
543 goto out;
544 }
545
546 if ((inc->i_rx_tstamp.tv_sec != 0) &&
547 sock_flag(rds_rs_to_sk(rs), SOCK_RCVTSTAMP)) {
548 ret = put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP,
549 sizeof(struct timeval),
550 &inc->i_rx_tstamp);
551 if (ret)
552 goto out;
553 }
554
555 if (rs->rs_rx_traces) {
556 struct rds_cmsg_rx_trace t;
557 int i, j;
558
559 inc->i_rx_lat_trace[RDS_MSG_RX_CMSG] = local_clock();
560 t.rx_traces = rs->rs_rx_traces;
561 for (i = 0; i < rs->rs_rx_traces; i++) {
562 j = rs->rs_rx_trace[i];
563 t.rx_trace_pos[i] = j;
564 t.rx_trace[i] = inc->i_rx_lat_trace[j + 1] -
565 inc->i_rx_lat_trace[j];
566 }
567
568 ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RXPATH_LATENCY,
569 sizeof(t), &t);
570 if (ret)
571 goto out;
572 }
573
574out:
575 return ret;
576}
577
578int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
579 int msg_flags)
580{
581 struct sock *sk = sock->sk;
582 struct rds_sock *rs = rds_sk_to_rs(sk);
583 long timeo;
584 int ret = 0, nonblock = msg_flags & MSG_DONTWAIT;
585 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
586 struct rds_incoming *inc = NULL;
587
588
589 timeo = sock_rcvtimeo(sk, nonblock);
590
591 rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
592
593 if (msg_flags & MSG_OOB)
594 goto out;
595
596 while (1) {
597 struct iov_iter save;
598
599 if (!list_empty(&rs->rs_notify_queue)) {
600 ret = rds_notify_queue_get(rs, msg);
601 break;
602 }
603
604 if (rs->rs_cong_notify) {
605 ret = rds_notify_cong(rs, msg);
606 break;
607 }
608
609 if (!rds_next_incoming(rs, &inc)) {
610 if (nonblock) {
611 ret = -EAGAIN;
612 break;
613 }
614
615 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
616 (!list_empty(&rs->rs_notify_queue) ||
617 rs->rs_cong_notify ||
618 rds_next_incoming(rs, &inc)), timeo);
619 rdsdebug("recvmsg woke inc %p timeo %ld\n", inc,
620 timeo);
621 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
622 continue;
623
624 ret = timeo;
625 if (ret == 0)
626 ret = -ETIMEDOUT;
627 break;
628 }
629
630 rdsdebug("copying inc %p from %pI4:%u to user\n", inc,
631 &inc->i_conn->c_faddr,
632 ntohs(inc->i_hdr.h_sport));
633 save = msg->msg_iter;
634 ret = inc->i_conn->c_trans->inc_copy_to_user(inc, &msg->msg_iter);
635 if (ret < 0)
636 break;
637
638
639
640
641
642
643 if (!rds_still_queued(rs, inc, !(msg_flags & MSG_PEEK))) {
644 rds_inc_put(inc);
645 inc = NULL;
646 rds_stats_inc(s_recv_deliver_raced);
647 msg->msg_iter = save;
648 continue;
649 }
650
651 if (ret < be32_to_cpu(inc->i_hdr.h_len)) {
652 if (msg_flags & MSG_TRUNC)
653 ret = be32_to_cpu(inc->i_hdr.h_len);
654 msg->msg_flags |= MSG_TRUNC;
655 }
656
657 if (rds_cmsg_recv(inc, msg, rs)) {
658 ret = -EFAULT;
659 goto out;
660 }
661
662 rds_stats_inc(s_recv_delivered);
663
664 if (sin) {
665 sin->sin_family = AF_INET;
666 sin->sin_port = inc->i_hdr.h_sport;
667 sin->sin_addr.s_addr = inc->i_saddr;
668 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
669 msg->msg_namelen = sizeof(*sin);
670 }
671 break;
672 }
673
674 if (inc)
675 rds_inc_put(inc);
676
677out:
678 return ret;
679}
680
681
682
683
684
685
686void rds_clear_recv_queue(struct rds_sock *rs)
687{
688 struct sock *sk = rds_rs_to_sk(rs);
689 struct rds_incoming *inc, *tmp;
690 unsigned long flags;
691
692 write_lock_irqsave(&rs->rs_recv_lock, flags);
693 list_for_each_entry_safe(inc, tmp, &rs->rs_recv_queue, i_item) {
694 rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
695 -be32_to_cpu(inc->i_hdr.h_len),
696 inc->i_hdr.h_dport);
697 list_del_init(&inc->i_item);
698 rds_inc_put(inc);
699 }
700 write_unlock_irqrestore(&rs->rs_recv_lock, flags);
701}
702
703
704
705
706
707void rds_inc_info_copy(struct rds_incoming *inc,
708 struct rds_info_iterator *iter,
709 __be32 saddr, __be32 daddr, int flip)
710{
711 struct rds_info_message minfo;
712
713 minfo.seq = be64_to_cpu(inc->i_hdr.h_sequence);
714 minfo.len = be32_to_cpu(inc->i_hdr.h_len);
715
716 if (flip) {
717 minfo.laddr = daddr;
718 minfo.faddr = saddr;
719 minfo.lport = inc->i_hdr.h_dport;
720 minfo.fport = inc->i_hdr.h_sport;
721 } else {
722 minfo.laddr = saddr;
723 minfo.faddr = daddr;
724 minfo.lport = inc->i_hdr.h_sport;
725 minfo.fport = inc->i_hdr.h_dport;
726 }
727
728 minfo.flags = 0;
729
730 rds_info_copy(iter, &minfo, sizeof(minfo));
731}
732