1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/kernel.h>
34#include <linux/moduleparam.h>
35#include <linux/gfp.h>
36#include <net/sock.h>
37#include <linux/in.h>
38#include <linux/list.h>
39#include <linux/ratelimit.h>
40#include <linux/export.h>
41#include <linux/sizes.h>
42
43#include "rds.h"
44
45
46
47
48
49
50
51
52
53
54
55static int send_batch_count = SZ_1K;
56module_param(send_batch_count, int, 0444);
57MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
58
59static void rds_send_remove_from_sock(struct list_head *messages, int status);
60
61
62
63
64
65void rds_send_path_reset(struct rds_conn_path *cp)
66{
67 struct rds_message *rm, *tmp;
68 unsigned long flags;
69
70 if (cp->cp_xmit_rm) {
71 rm = cp->cp_xmit_rm;
72 cp->cp_xmit_rm = NULL;
73
74
75
76
77 rds_message_unmapped(rm);
78 rds_message_put(rm);
79 }
80
81 cp->cp_xmit_sg = 0;
82 cp->cp_xmit_hdr_off = 0;
83 cp->cp_xmit_data_off = 0;
84 cp->cp_xmit_atomic_sent = 0;
85 cp->cp_xmit_rdma_sent = 0;
86 cp->cp_xmit_data_sent = 0;
87
88 cp->cp_conn->c_map_queued = 0;
89
90 cp->cp_unacked_packets = rds_sysctl_max_unacked_packets;
91 cp->cp_unacked_bytes = rds_sysctl_max_unacked_bytes;
92
93
94 spin_lock_irqsave(&cp->cp_lock, flags);
95 list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
96 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
97 set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
98 }
99 list_splice_init(&cp->cp_retrans, &cp->cp_send_queue);
100 spin_unlock_irqrestore(&cp->cp_lock, flags);
101}
102EXPORT_SYMBOL_GPL(rds_send_path_reset);
103
104static int acquire_in_xmit(struct rds_conn_path *cp)
105{
106 return test_and_set_bit(RDS_IN_XMIT, &cp->cp_flags) == 0;
107}
108
109static void release_in_xmit(struct rds_conn_path *cp)
110{
111 clear_bit(RDS_IN_XMIT, &cp->cp_flags);
112 smp_mb__after_atomic();
113
114
115
116
117
118
119 if (waitqueue_active(&cp->cp_waitq))
120 wake_up_all(&cp->cp_waitq);
121}
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137int rds_send_xmit(struct rds_conn_path *cp)
138{
139 struct rds_connection *conn = cp->cp_conn;
140 struct rds_message *rm;
141 unsigned long flags;
142 unsigned int tmp;
143 struct scatterlist *sg;
144 int ret = 0;
145 LIST_HEAD(to_be_dropped);
146 int batch_count;
147 unsigned long send_gen = 0;
148 int same_rm = 0;
149
150restart:
151 batch_count = 0;
152
153
154
155
156
157
158
159
160 if (!acquire_in_xmit(cp)) {
161 rds_stats_inc(s_send_lock_contention);
162 ret = -ENOMEM;
163 goto out;
164 }
165
166 if (rds_destroy_pending(cp->cp_conn)) {
167 release_in_xmit(cp);
168 ret = -ENETUNREACH;
169 goto out;
170 }
171
172
173
174
175
176
177
178
179
180 send_gen = READ_ONCE(cp->cp_send_gen) + 1;
181 WRITE_ONCE(cp->cp_send_gen, send_gen);
182
183
184
185
186
187 if (!rds_conn_path_up(cp)) {
188 release_in_xmit(cp);
189 ret = 0;
190 goto out;
191 }
192
193 if (conn->c_trans->xmit_path_prepare)
194 conn->c_trans->xmit_path_prepare(cp);
195
196
197
198
199
200 while (1) {
201
202 rm = cp->cp_xmit_rm;
203
204 if (!rm) {
205 same_rm = 0;
206 } else {
207 same_rm++;
208 if (same_rm >= 4096) {
209 rds_stats_inc(s_send_stuck_rm);
210 ret = -EAGAIN;
211 break;
212 }
213 }
214
215
216
217
218
219 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
220 rm = rds_cong_update_alloc(conn);
221 if (IS_ERR(rm)) {
222 ret = PTR_ERR(rm);
223 break;
224 }
225 rm->data.op_active = 1;
226 rm->m_inc.i_conn_path = cp;
227 rm->m_inc.i_conn = cp->cp_conn;
228
229 cp->cp_xmit_rm = rm;
230 }
231
232
233
234
235
236
237
238
239 if (!rm) {
240 unsigned int len;
241
242 batch_count++;
243
244
245
246
247
248
249 if (batch_count >= send_batch_count)
250 goto over_batch;
251
252 spin_lock_irqsave(&cp->cp_lock, flags);
253
254 if (!list_empty(&cp->cp_send_queue)) {
255 rm = list_entry(cp->cp_send_queue.next,
256 struct rds_message,
257 m_conn_item);
258 rds_message_addref(rm);
259
260
261
262
263
264 list_move_tail(&rm->m_conn_item,
265 &cp->cp_retrans);
266 }
267
268 spin_unlock_irqrestore(&cp->cp_lock, flags);
269
270 if (!rm)
271 break;
272
273
274
275
276
277
278
279
280 if (test_bit(RDS_MSG_FLUSH, &rm->m_flags) ||
281 (rm->rdma.op_active &&
282 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))) {
283 spin_lock_irqsave(&cp->cp_lock, flags);
284 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
285 list_move(&rm->m_conn_item, &to_be_dropped);
286 spin_unlock_irqrestore(&cp->cp_lock, flags);
287 continue;
288 }
289
290
291 len = ntohl(rm->m_inc.i_hdr.h_len);
292 if (cp->cp_unacked_packets == 0 ||
293 cp->cp_unacked_bytes < len) {
294 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
295
296 cp->cp_unacked_packets =
297 rds_sysctl_max_unacked_packets;
298 cp->cp_unacked_bytes =
299 rds_sysctl_max_unacked_bytes;
300 rds_stats_inc(s_send_ack_required);
301 } else {
302 cp->cp_unacked_bytes -= len;
303 cp->cp_unacked_packets--;
304 }
305
306 cp->cp_xmit_rm = rm;
307 }
308
309
310 if (rm->rdma.op_active && !cp->cp_xmit_rdma_sent) {
311 rm->m_final_op = &rm->rdma;
312
313
314
315 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
316 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
317 if (ret) {
318 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
319 wake_up_interruptible(&rm->m_flush_wait);
320 break;
321 }
322 cp->cp_xmit_rdma_sent = 1;
323
324 }
325
326 if (rm->atomic.op_active && !cp->cp_xmit_atomic_sent) {
327 rm->m_final_op = &rm->atomic;
328
329
330
331 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
332 ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
333 if (ret) {
334 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
335 wake_up_interruptible(&rm->m_flush_wait);
336 break;
337 }
338 cp->cp_xmit_atomic_sent = 1;
339
340 }
341
342
343
344
345
346
347
348
349 if (rm->data.op_nents == 0) {
350 int ops_present;
351 int all_ops_are_silent = 1;
352
353 ops_present = (rm->atomic.op_active || rm->rdma.op_active);
354 if (rm->atomic.op_active && !rm->atomic.op_silent)
355 all_ops_are_silent = 0;
356 if (rm->rdma.op_active && !rm->rdma.op_silent)
357 all_ops_are_silent = 0;
358
359 if (ops_present && all_ops_are_silent
360 && !rm->m_rdma_cookie)
361 rm->data.op_active = 0;
362 }
363
364 if (rm->data.op_active && !cp->cp_xmit_data_sent) {
365 rm->m_final_op = &rm->data;
366
367 ret = conn->c_trans->xmit(conn, rm,
368 cp->cp_xmit_hdr_off,
369 cp->cp_xmit_sg,
370 cp->cp_xmit_data_off);
371 if (ret <= 0)
372 break;
373
374 if (cp->cp_xmit_hdr_off < sizeof(struct rds_header)) {
375 tmp = min_t(int, ret,
376 sizeof(struct rds_header) -
377 cp->cp_xmit_hdr_off);
378 cp->cp_xmit_hdr_off += tmp;
379 ret -= tmp;
380 }
381
382 sg = &rm->data.op_sg[cp->cp_xmit_sg];
383 while (ret) {
384 tmp = min_t(int, ret, sg->length -
385 cp->cp_xmit_data_off);
386 cp->cp_xmit_data_off += tmp;
387 ret -= tmp;
388 if (cp->cp_xmit_data_off == sg->length) {
389 cp->cp_xmit_data_off = 0;
390 sg++;
391 cp->cp_xmit_sg++;
392 BUG_ON(ret != 0 && cp->cp_xmit_sg ==
393 rm->data.op_nents);
394 }
395 }
396
397 if (cp->cp_xmit_hdr_off == sizeof(struct rds_header) &&
398 (cp->cp_xmit_sg == rm->data.op_nents))
399 cp->cp_xmit_data_sent = 1;
400 }
401
402
403
404
405
406
407 if (!rm->data.op_active || cp->cp_xmit_data_sent) {
408 cp->cp_xmit_rm = NULL;
409 cp->cp_xmit_sg = 0;
410 cp->cp_xmit_hdr_off = 0;
411 cp->cp_xmit_data_off = 0;
412 cp->cp_xmit_rdma_sent = 0;
413 cp->cp_xmit_atomic_sent = 0;
414 cp->cp_xmit_data_sent = 0;
415
416 rds_message_put(rm);
417 }
418 }
419
420over_batch:
421 if (conn->c_trans->xmit_path_complete)
422 conn->c_trans->xmit_path_complete(cp);
423 release_in_xmit(cp);
424
425
426 if (!list_empty(&to_be_dropped)) {
427
428 list_for_each_entry(rm, &to_be_dropped, m_conn_item)
429 rds_message_put(rm);
430 rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
431 }
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448 if (ret == 0) {
449 bool raced;
450
451 smp_mb();
452 raced = send_gen != READ_ONCE(cp->cp_send_gen);
453
454 if ((test_bit(0, &conn->c_map_queued) ||
455 !list_empty(&cp->cp_send_queue)) && !raced) {
456 if (batch_count < send_batch_count)
457 goto restart;
458 rcu_read_lock();
459 if (rds_destroy_pending(cp->cp_conn))
460 ret = -ENETUNREACH;
461 else
462 queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
463 rcu_read_unlock();
464 } else if (raced) {
465 rds_stats_inc(s_send_lock_queue_raced);
466 }
467 }
468out:
469 return ret;
470}
471EXPORT_SYMBOL_GPL(rds_send_xmit);
472
473static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
474{
475 u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
476
477 assert_spin_locked(&rs->rs_lock);
478
479 BUG_ON(rs->rs_snd_bytes < len);
480 rs->rs_snd_bytes -= len;
481
482 if (rs->rs_snd_bytes == 0)
483 rds_stats_inc(s_send_queue_empty);
484}
485
486static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
487 is_acked_func is_acked)
488{
489 if (is_acked)
490 return is_acked(rm, ack);
491 return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
492}
493
494
495
496
497
498
499
500void rds_rdma_send_complete(struct rds_message *rm, int status)
501{
502 struct rds_sock *rs = NULL;
503 struct rm_rdma_op *ro;
504 struct rds_notifier *notifier;
505 unsigned long flags;
506
507 spin_lock_irqsave(&rm->m_rs_lock, flags);
508
509 ro = &rm->rdma;
510 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
511 ro->op_active && ro->op_notify && ro->op_notifier) {
512 notifier = ro->op_notifier;
513 rs = rm->m_rs;
514 sock_hold(rds_rs_to_sk(rs));
515
516 notifier->n_status = status;
517 spin_lock(&rs->rs_lock);
518 list_add_tail(¬ifier->n_list, &rs->rs_notify_queue);
519 spin_unlock(&rs->rs_lock);
520
521 ro->op_notifier = NULL;
522 }
523
524 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
525
526 if (rs) {
527 rds_wake_sk_sleep(rs);
528 sock_put(rds_rs_to_sk(rs));
529 }
530}
531EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
532
533
534
535
536void rds_atomic_send_complete(struct rds_message *rm, int status)
537{
538 struct rds_sock *rs = NULL;
539 struct rm_atomic_op *ao;
540 struct rds_notifier *notifier;
541 unsigned long flags;
542
543 spin_lock_irqsave(&rm->m_rs_lock, flags);
544
545 ao = &rm->atomic;
546 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
547 && ao->op_active && ao->op_notify && ao->op_notifier) {
548 notifier = ao->op_notifier;
549 rs = rm->m_rs;
550 sock_hold(rds_rs_to_sk(rs));
551
552 notifier->n_status = status;
553 spin_lock(&rs->rs_lock);
554 list_add_tail(¬ifier->n_list, &rs->rs_notify_queue);
555 spin_unlock(&rs->rs_lock);
556
557 ao->op_notifier = NULL;
558 }
559
560 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
561
562 if (rs) {
563 rds_wake_sk_sleep(rs);
564 sock_put(rds_rs_to_sk(rs));
565 }
566}
567EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
568
569
570
571
572
573
574static inline void
575__rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
576{
577 struct rm_rdma_op *ro;
578 struct rm_atomic_op *ao;
579
580 ro = &rm->rdma;
581 if (ro->op_active && ro->op_notify && ro->op_notifier) {
582 ro->op_notifier->n_status = status;
583 list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
584 ro->op_notifier = NULL;
585 }
586
587 ao = &rm->atomic;
588 if (ao->op_active && ao->op_notify && ao->op_notifier) {
589 ao->op_notifier->n_status = status;
590 list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
591 ao->op_notifier = NULL;
592 }
593
594
595}
596
597
598
599
600
601
602
603
604
605static void rds_send_remove_from_sock(struct list_head *messages, int status)
606{
607 unsigned long flags;
608 struct rds_sock *rs = NULL;
609 struct rds_message *rm;
610
611 while (!list_empty(messages)) {
612 int was_on_sock = 0;
613
614 rm = list_entry(messages->next, struct rds_message,
615 m_conn_item);
616 list_del_init(&rm->m_conn_item);
617
618
619
620
621
622
623
624
625
626
627
628 spin_lock_irqsave(&rm->m_rs_lock, flags);
629 if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
630 goto unlock_and_drop;
631
632 if (rs != rm->m_rs) {
633 if (rs) {
634 rds_wake_sk_sleep(rs);
635 sock_put(rds_rs_to_sk(rs));
636 }
637 rs = rm->m_rs;
638 if (rs)
639 sock_hold(rds_rs_to_sk(rs));
640 }
641 if (!rs)
642 goto unlock_and_drop;
643 spin_lock(&rs->rs_lock);
644
645 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
646 struct rm_rdma_op *ro = &rm->rdma;
647 struct rds_notifier *notifier;
648
649 list_del_init(&rm->m_sock_item);
650 rds_send_sndbuf_remove(rs, rm);
651
652 if (ro->op_active && ro->op_notifier &&
653 (ro->op_notify || (ro->op_recverr && status))) {
654 notifier = ro->op_notifier;
655 list_add_tail(¬ifier->n_list,
656 &rs->rs_notify_queue);
657 if (!notifier->n_status)
658 notifier->n_status = status;
659 rm->rdma.op_notifier = NULL;
660 }
661 was_on_sock = 1;
662 }
663 spin_unlock(&rs->rs_lock);
664
665unlock_and_drop:
666 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
667 rds_message_put(rm);
668 if (was_on_sock)
669 rds_message_put(rm);
670 }
671
672 if (rs) {
673 rds_wake_sk_sleep(rs);
674 sock_put(rds_rs_to_sk(rs));
675 }
676}
677
678
679
680
681
682
683
684
685
686void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
687 is_acked_func is_acked)
688{
689 struct rds_message *rm, *tmp;
690 unsigned long flags;
691 LIST_HEAD(list);
692
693 spin_lock_irqsave(&cp->cp_lock, flags);
694
695 list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
696 if (!rds_send_is_acked(rm, ack, is_acked))
697 break;
698
699 list_move(&rm->m_conn_item, &list);
700 clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
701 }
702
703
704 if (!list_empty(&list))
705 smp_mb__after_atomic();
706
707 spin_unlock_irqrestore(&cp->cp_lock, flags);
708
709
710 rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
711}
712EXPORT_SYMBOL_GPL(rds_send_path_drop_acked);
713
714void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
715 is_acked_func is_acked)
716{
717 WARN_ON(conn->c_trans->t_mp_capable);
718 rds_send_path_drop_acked(&conn->c_path[0], ack, is_acked);
719}
720EXPORT_SYMBOL_GPL(rds_send_drop_acked);
721
722void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in6 *dest)
723{
724 struct rds_message *rm, *tmp;
725 struct rds_connection *conn;
726 struct rds_conn_path *cp;
727 unsigned long flags;
728 LIST_HEAD(list);
729
730
731 spin_lock_irqsave(&rs->rs_lock, flags);
732
733 list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
734 if (dest &&
735 (!ipv6_addr_equal(&dest->sin6_addr, &rm->m_daddr) ||
736 dest->sin6_port != rm->m_inc.i_hdr.h_dport))
737 continue;
738
739 list_move(&rm->m_sock_item, &list);
740 rds_send_sndbuf_remove(rs, rm);
741 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
742 }
743
744
745 smp_mb__after_atomic();
746
747 spin_unlock_irqrestore(&rs->rs_lock, flags);
748
749 if (list_empty(&list))
750 return;
751
752
753 list_for_each_entry(rm, &list, m_sock_item) {
754
755 conn = rm->m_inc.i_conn;
756 if (conn->c_trans->t_mp_capable)
757 cp = rm->m_inc.i_conn_path;
758 else
759 cp = &conn->c_path[0];
760
761 spin_lock_irqsave(&cp->cp_lock, flags);
762
763
764
765
766
767 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
768 spin_unlock_irqrestore(&cp->cp_lock, flags);
769 continue;
770 }
771 list_del_init(&rm->m_conn_item);
772 spin_unlock_irqrestore(&cp->cp_lock, flags);
773
774
775
776
777
778 spin_lock_irqsave(&rm->m_rs_lock, flags);
779
780 spin_lock(&rs->rs_lock);
781 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
782 spin_unlock(&rs->rs_lock);
783
784 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
785
786 rds_message_put(rm);
787 }
788
789 rds_wake_sk_sleep(rs);
790
791 while (!list_empty(&list)) {
792 rm = list_entry(list.next, struct rds_message, m_sock_item);
793 list_del_init(&rm->m_sock_item);
794 rds_message_wait(rm);
795
796
797
798
799
800
801 spin_lock_irqsave(&rm->m_rs_lock, flags);
802
803 spin_lock(&rs->rs_lock);
804 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
805 spin_unlock(&rs->rs_lock);
806
807 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
808
809 rds_message_put(rm);
810 }
811}
812
813
814
815
816
817
818static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
819 struct rds_conn_path *cp,
820 struct rds_message *rm, __be16 sport,
821 __be16 dport, int *queued)
822{
823 unsigned long flags;
824 u32 len;
825
826 if (*queued)
827 goto out;
828
829 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
830
831
832
833 spin_lock_irqsave(&rs->rs_lock, flags);
834
835
836
837
838
839
840
841
842
843 if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
844 rs->rs_snd_bytes += len;
845
846
847
848
849
850
851 if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
852 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
853
854 list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
855 set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
856 rds_message_addref(rm);
857 sock_hold(rds_rs_to_sk(rs));
858 rm->m_rs = rs;
859
860
861
862 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
863 rm->m_inc.i_conn = conn;
864 rm->m_inc.i_conn_path = cp;
865 rds_message_addref(rm);
866
867 spin_lock(&cp->cp_lock);
868 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(cp->cp_next_tx_seq++);
869 list_add_tail(&rm->m_conn_item, &cp->cp_send_queue);
870 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
871 spin_unlock(&cp->cp_lock);
872
873 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
874 rm, len, rs, rs->rs_snd_bytes,
875 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
876
877 *queued = 1;
878 }
879
880 spin_unlock_irqrestore(&rs->rs_lock, flags);
881out:
882 return *queued;
883}
884
885
886
887
888
889static int rds_rm_size(struct msghdr *msg, int num_sgs,
890 struct rds_iov_vector_arr *vct)
891{
892 struct cmsghdr *cmsg;
893 int size = 0;
894 int cmsg_groups = 0;
895 int retval;
896 bool zcopy_cookie = false;
897 struct rds_iov_vector *iov, *tmp_iov;
898
899 if (num_sgs < 0)
900 return -EINVAL;
901
902 for_each_cmsghdr(cmsg, msg) {
903 if (!CMSG_OK(msg, cmsg))
904 return -EINVAL;
905
906 if (cmsg->cmsg_level != SOL_RDS)
907 continue;
908
909 switch (cmsg->cmsg_type) {
910 case RDS_CMSG_RDMA_ARGS:
911 if (vct->indx >= vct->len) {
912 vct->len += vct->incr;
913 tmp_iov =
914 krealloc(vct->vec,
915 vct->len *
916 sizeof(struct rds_iov_vector),
917 GFP_KERNEL);
918 if (!tmp_iov) {
919 vct->len -= vct->incr;
920 return -ENOMEM;
921 }
922 vct->vec = tmp_iov;
923 }
924 iov = &vct->vec[vct->indx];
925 memset(iov, 0, sizeof(struct rds_iov_vector));
926 vct->indx++;
927 cmsg_groups |= 1;
928 retval = rds_rdma_extra_size(CMSG_DATA(cmsg), iov);
929 if (retval < 0)
930 return retval;
931 size += retval;
932
933 break;
934
935 case RDS_CMSG_ZCOPY_COOKIE:
936 zcopy_cookie = true;
937 fallthrough;
938
939 case RDS_CMSG_RDMA_DEST:
940 case RDS_CMSG_RDMA_MAP:
941 cmsg_groups |= 2;
942
943 break;
944
945 case RDS_CMSG_ATOMIC_CSWP:
946 case RDS_CMSG_ATOMIC_FADD:
947 case RDS_CMSG_MASKED_ATOMIC_CSWP:
948 case RDS_CMSG_MASKED_ATOMIC_FADD:
949 cmsg_groups |= 1;
950 size += sizeof(struct scatterlist);
951 break;
952
953 default:
954 return -EINVAL;
955 }
956
957 }
958
959 if ((msg->msg_flags & MSG_ZEROCOPY) && !zcopy_cookie)
960 return -EINVAL;
961
962 size += num_sgs * sizeof(struct scatterlist);
963
964
965 if (cmsg_groups == 3)
966 return -EINVAL;
967
968 return size;
969}
970
971static int rds_cmsg_zcopy(struct rds_sock *rs, struct rds_message *rm,
972 struct cmsghdr *cmsg)
973{
974 u32 *cookie;
975
976 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*cookie)) ||
977 !rm->data.op_mmp_znotifier)
978 return -EINVAL;
979 cookie = CMSG_DATA(cmsg);
980 rm->data.op_mmp_znotifier->z_cookie = *cookie;
981 return 0;
982}
983
984static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
985 struct msghdr *msg, int *allocated_mr,
986 struct rds_iov_vector_arr *vct)
987{
988 struct cmsghdr *cmsg;
989 int ret = 0, ind = 0;
990
991 for_each_cmsghdr(cmsg, msg) {
992 if (!CMSG_OK(msg, cmsg))
993 return -EINVAL;
994
995 if (cmsg->cmsg_level != SOL_RDS)
996 continue;
997
998
999
1000
1001 switch (cmsg->cmsg_type) {
1002 case RDS_CMSG_RDMA_ARGS:
1003 if (ind >= vct->indx)
1004 return -ENOMEM;
1005 ret = rds_cmsg_rdma_args(rs, rm, cmsg, &vct->vec[ind]);
1006 ind++;
1007 break;
1008
1009 case RDS_CMSG_RDMA_DEST:
1010 ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
1011 break;
1012
1013 case RDS_CMSG_RDMA_MAP:
1014 ret = rds_cmsg_rdma_map(rs, rm, cmsg);
1015 if (!ret)
1016 *allocated_mr = 1;
1017 else if (ret == -ENODEV)
1018
1019
1020
1021 ret = -EAGAIN;
1022 break;
1023 case RDS_CMSG_ATOMIC_CSWP:
1024 case RDS_CMSG_ATOMIC_FADD:
1025 case RDS_CMSG_MASKED_ATOMIC_CSWP:
1026 case RDS_CMSG_MASKED_ATOMIC_FADD:
1027 ret = rds_cmsg_atomic(rs, rm, cmsg);
1028 break;
1029
1030 case RDS_CMSG_ZCOPY_COOKIE:
1031 ret = rds_cmsg_zcopy(rs, rm, cmsg);
1032 break;
1033
1034 default:
1035 return -EINVAL;
1036 }
1037
1038 if (ret)
1039 break;
1040 }
1041
1042 return ret;
1043}
1044
1045static int rds_send_mprds_hash(struct rds_sock *rs,
1046 struct rds_connection *conn, int nonblock)
1047{
1048 int hash;
1049
1050 if (conn->c_npaths == 0)
1051 hash = RDS_MPATH_HASH(rs, RDS_MPATH_WORKERS);
1052 else
1053 hash = RDS_MPATH_HASH(rs, conn->c_npaths);
1054 if (conn->c_npaths == 0 && hash != 0) {
1055 rds_send_ping(conn, 0);
1056
1057
1058
1059
1060
1061
1062 if (conn->c_npaths == 0) {
1063
1064
1065
1066 if (nonblock)
1067 return 0;
1068 if (wait_event_interruptible(conn->c_hs_waitq,
1069 conn->c_npaths != 0))
1070 hash = 0;
1071 }
1072 if (conn->c_npaths == 1)
1073 hash = 0;
1074 }
1075 return hash;
1076}
1077
1078static int rds_rdma_bytes(struct msghdr *msg, size_t *rdma_bytes)
1079{
1080 struct rds_rdma_args *args;
1081 struct cmsghdr *cmsg;
1082
1083 for_each_cmsghdr(cmsg, msg) {
1084 if (!CMSG_OK(msg, cmsg))
1085 return -EINVAL;
1086
1087 if (cmsg->cmsg_level != SOL_RDS)
1088 continue;
1089
1090 if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) {
1091 if (cmsg->cmsg_len <
1092 CMSG_LEN(sizeof(struct rds_rdma_args)))
1093 return -EINVAL;
1094 args = CMSG_DATA(cmsg);
1095 *rdma_bytes += args->remote_vec.bytes;
1096 }
1097 }
1098 return 0;
1099}
1100
1101int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
1102{
1103 struct sock *sk = sock->sk;
1104 struct rds_sock *rs = rds_sk_to_rs(sk);
1105 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1106 DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
1107 __be16 dport;
1108 struct rds_message *rm = NULL;
1109 struct rds_connection *conn;
1110 int ret = 0;
1111 int queued = 0, allocated_mr = 0;
1112 int nonblock = msg->msg_flags & MSG_DONTWAIT;
1113 long timeo = sock_sndtimeo(sk, nonblock);
1114 struct rds_conn_path *cpath;
1115 struct in6_addr daddr;
1116 __u32 scope_id = 0;
1117 size_t total_payload_len = payload_len, rdma_payload_len = 0;
1118 bool zcopy = ((msg->msg_flags & MSG_ZEROCOPY) &&
1119 sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY));
1120 int num_sgs = DIV_ROUND_UP(payload_len, PAGE_SIZE);
1121 int namelen;
1122 struct rds_iov_vector_arr vct;
1123 int ind;
1124
1125 memset(&vct, 0, sizeof(vct));
1126
1127
1128 vct.incr = 1;
1129
1130
1131
1132 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT | MSG_ZEROCOPY)) {
1133 ret = -EOPNOTSUPP;
1134 goto out;
1135 }
1136
1137 namelen = msg->msg_namelen;
1138 if (namelen != 0) {
1139 if (namelen < sizeof(*usin)) {
1140 ret = -EINVAL;
1141 goto out;
1142 }
1143 switch (usin->sin_family) {
1144 case AF_INET:
1145 if (usin->sin_addr.s_addr == htonl(INADDR_ANY) ||
1146 usin->sin_addr.s_addr == htonl(INADDR_BROADCAST) ||
1147 ipv4_is_multicast(usin->sin_addr.s_addr)) {
1148 ret = -EINVAL;
1149 goto out;
1150 }
1151 ipv6_addr_set_v4mapped(usin->sin_addr.s_addr, &daddr);
1152 dport = usin->sin_port;
1153 break;
1154
1155#if IS_ENABLED(CONFIG_IPV6)
1156 case AF_INET6: {
1157 int addr_type;
1158
1159 if (namelen < sizeof(*sin6)) {
1160 ret = -EINVAL;
1161 goto out;
1162 }
1163 addr_type = ipv6_addr_type(&sin6->sin6_addr);
1164 if (!(addr_type & IPV6_ADDR_UNICAST)) {
1165 __be32 addr4;
1166
1167 if (!(addr_type & IPV6_ADDR_MAPPED)) {
1168 ret = -EINVAL;
1169 goto out;
1170 }
1171
1172
1173
1174
1175 addr4 = sin6->sin6_addr.s6_addr32[3];
1176 if (addr4 == htonl(INADDR_ANY) ||
1177 addr4 == htonl(INADDR_BROADCAST) ||
1178 ipv4_is_multicast(addr4)) {
1179 ret = -EINVAL;
1180 goto out;
1181 }
1182 }
1183 if (addr_type & IPV6_ADDR_LINKLOCAL) {
1184 if (sin6->sin6_scope_id == 0) {
1185 ret = -EINVAL;
1186 goto out;
1187 }
1188 scope_id = sin6->sin6_scope_id;
1189 }
1190
1191 daddr = sin6->sin6_addr;
1192 dport = sin6->sin6_port;
1193 break;
1194 }
1195#endif
1196
1197 default:
1198 ret = -EINVAL;
1199 goto out;
1200 }
1201 } else {
1202
1203 lock_sock(sk);
1204 daddr = rs->rs_conn_addr;
1205 dport = rs->rs_conn_port;
1206 scope_id = rs->rs_bound_scope_id;
1207 release_sock(sk);
1208 }
1209
1210 lock_sock(sk);
1211 if (ipv6_addr_any(&rs->rs_bound_addr) || ipv6_addr_any(&daddr)) {
1212 release_sock(sk);
1213 ret = -ENOTCONN;
1214 goto out;
1215 } else if (namelen != 0) {
1216
1217
1218
1219
1220 if (ipv6_addr_v4mapped(&daddr) ^
1221 ipv6_addr_v4mapped(&rs->rs_bound_addr)) {
1222 release_sock(sk);
1223 ret = -EOPNOTSUPP;
1224 goto out;
1225 }
1226
1227
1228
1229
1230 if (scope_id != rs->rs_bound_scope_id) {
1231 if (!scope_id) {
1232 scope_id = rs->rs_bound_scope_id;
1233 } else if (rs->rs_bound_scope_id) {
1234 release_sock(sk);
1235 ret = -EINVAL;
1236 goto out;
1237 }
1238 }
1239 }
1240 release_sock(sk);
1241
1242 ret = rds_rdma_bytes(msg, &rdma_payload_len);
1243 if (ret)
1244 goto out;
1245
1246 total_payload_len += rdma_payload_len;
1247 if (max_t(size_t, payload_len, rdma_payload_len) > RDS_MAX_MSG_SIZE) {
1248 ret = -EMSGSIZE;
1249 goto out;
1250 }
1251
1252 if (payload_len > rds_sk_sndbuf(rs)) {
1253 ret = -EMSGSIZE;
1254 goto out;
1255 }
1256
1257 if (zcopy) {
1258 if (rs->rs_transport->t_type != RDS_TRANS_TCP) {
1259 ret = -EOPNOTSUPP;
1260 goto out;
1261 }
1262 num_sgs = iov_iter_npages(&msg->msg_iter, INT_MAX);
1263 }
1264
1265 ret = rds_rm_size(msg, num_sgs, &vct);
1266 if (ret < 0)
1267 goto out;
1268
1269 rm = rds_message_alloc(ret, GFP_KERNEL);
1270 if (!rm) {
1271 ret = -ENOMEM;
1272 goto out;
1273 }
1274
1275
1276 if (payload_len) {
1277 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
1278 if (IS_ERR(rm->data.op_sg)) {
1279 ret = PTR_ERR(rm->data.op_sg);
1280 goto out;
1281 }
1282 ret = rds_message_copy_from_user(rm, &msg->msg_iter, zcopy);
1283 if (ret)
1284 goto out;
1285 }
1286 rm->data.op_active = 1;
1287
1288 rm->m_daddr = daddr;
1289
1290
1291
1292 if (rs->rs_conn && ipv6_addr_equal(&rs->rs_conn->c_faddr, &daddr) &&
1293 rs->rs_tos == rs->rs_conn->c_tos) {
1294 conn = rs->rs_conn;
1295 } else {
1296 conn = rds_conn_create_outgoing(sock_net(sock->sk),
1297 &rs->rs_bound_addr, &daddr,
1298 rs->rs_transport, rs->rs_tos,
1299 sock->sk->sk_allocation,
1300 scope_id);
1301 if (IS_ERR(conn)) {
1302 ret = PTR_ERR(conn);
1303 goto out;
1304 }
1305 rs->rs_conn = conn;
1306 }
1307
1308 if (conn->c_trans->t_mp_capable)
1309 cpath = &conn->c_path[rds_send_mprds_hash(rs, conn, nonblock)];
1310 else
1311 cpath = &conn->c_path[0];
1312
1313 rm->m_conn_path = cpath;
1314
1315
1316 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr, &vct);
1317 if (ret) {
1318
1319 if (ret == -EAGAIN)
1320 rds_conn_connect_if_down(conn);
1321 goto out;
1322 }
1323
1324 if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
1325 printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
1326 &rm->rdma, conn->c_trans->xmit_rdma);
1327 ret = -EOPNOTSUPP;
1328 goto out;
1329 }
1330
1331 if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
1332 printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
1333 &rm->atomic, conn->c_trans->xmit_atomic);
1334 ret = -EOPNOTSUPP;
1335 goto out;
1336 }
1337
1338 if (rds_destroy_pending(conn)) {
1339 ret = -EAGAIN;
1340 goto out;
1341 }
1342
1343 if (rds_conn_path_down(cpath))
1344 rds_check_all_paths(conn);
1345
1346 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1347 if (ret) {
1348 rs->rs_seen_congestion = 1;
1349 goto out;
1350 }
1351 while (!rds_send_queue_rm(rs, conn, cpath, rm, rs->rs_bound_port,
1352 dport, &queued)) {
1353 rds_stats_inc(s_send_queue_full);
1354
1355 if (nonblock) {
1356 ret = -EAGAIN;
1357 goto out;
1358 }
1359
1360 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
1361 rds_send_queue_rm(rs, conn, cpath, rm,
1362 rs->rs_bound_port,
1363 dport,
1364 &queued),
1365 timeo);
1366 rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1367 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1368 continue;
1369
1370 ret = timeo;
1371 if (ret == 0)
1372 ret = -ETIMEDOUT;
1373 goto out;
1374 }
1375
1376
1377
1378
1379
1380 rds_stats_inc(s_send_queued);
1381
1382 ret = rds_send_xmit(cpath);
1383 if (ret == -ENOMEM || ret == -EAGAIN) {
1384 ret = 0;
1385 rcu_read_lock();
1386 if (rds_destroy_pending(cpath->cp_conn))
1387 ret = -ENETUNREACH;
1388 else
1389 queue_delayed_work(rds_wq, &cpath->cp_send_w, 1);
1390 rcu_read_unlock();
1391 }
1392 if (ret)
1393 goto out;
1394 rds_message_put(rm);
1395
1396 for (ind = 0; ind < vct.indx; ind++)
1397 kfree(vct.vec[ind].iov);
1398 kfree(vct.vec);
1399
1400 return payload_len;
1401
1402out:
1403 for (ind = 0; ind < vct.indx; ind++)
1404 kfree(vct.vec[ind].iov);
1405 kfree(vct.vec);
1406
1407
1408
1409
1410 if (allocated_mr)
1411 rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1412
1413 if (rm)
1414 rds_message_put(rm);
1415 return ret;
1416}
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426static int
1427rds_send_probe(struct rds_conn_path *cp, __be16 sport,
1428 __be16 dport, u8 h_flags)
1429{
1430 struct rds_message *rm;
1431 unsigned long flags;
1432 int ret = 0;
1433
1434 rm = rds_message_alloc(0, GFP_ATOMIC);
1435 if (!rm) {
1436 ret = -ENOMEM;
1437 goto out;
1438 }
1439
1440 rm->m_daddr = cp->cp_conn->c_faddr;
1441 rm->data.op_active = 1;
1442
1443 rds_conn_path_connect_if_down(cp);
1444
1445 ret = rds_cong_wait(cp->cp_conn->c_fcong, dport, 1, NULL);
1446 if (ret)
1447 goto out;
1448
1449 spin_lock_irqsave(&cp->cp_lock, flags);
1450 list_add_tail(&rm->m_conn_item, &cp->cp_send_queue);
1451 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1452 rds_message_addref(rm);
1453 rm->m_inc.i_conn = cp->cp_conn;
1454 rm->m_inc.i_conn_path = cp;
1455
1456 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport,
1457 cp->cp_next_tx_seq);
1458 rm->m_inc.i_hdr.h_flags |= h_flags;
1459 cp->cp_next_tx_seq++;
1460
1461 if (RDS_HS_PROBE(be16_to_cpu(sport), be16_to_cpu(dport)) &&
1462 cp->cp_conn->c_trans->t_mp_capable) {
1463 u16 npaths = cpu_to_be16(RDS_MPATH_WORKERS);
1464 u32 my_gen_num = cpu_to_be32(cp->cp_conn->c_my_gen_num);
1465
1466 rds_message_add_extension(&rm->m_inc.i_hdr,
1467 RDS_EXTHDR_NPATHS, &npaths,
1468 sizeof(npaths));
1469 rds_message_add_extension(&rm->m_inc.i_hdr,
1470 RDS_EXTHDR_GEN_NUM,
1471 &my_gen_num,
1472 sizeof(u32));
1473 }
1474 spin_unlock_irqrestore(&cp->cp_lock, flags);
1475
1476 rds_stats_inc(s_send_queued);
1477 rds_stats_inc(s_send_pong);
1478
1479
1480 rcu_read_lock();
1481 if (!rds_destroy_pending(cp->cp_conn))
1482 queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
1483 rcu_read_unlock();
1484
1485 rds_message_put(rm);
1486 return 0;
1487
1488out:
1489 if (rm)
1490 rds_message_put(rm);
1491 return ret;
1492}
1493
1494int
1495rds_send_pong(struct rds_conn_path *cp, __be16 dport)
1496{
1497 return rds_send_probe(cp, 0, dport, 0);
1498}
1499
1500void
1501rds_send_ping(struct rds_connection *conn, int cp_index)
1502{
1503 unsigned long flags;
1504 struct rds_conn_path *cp = &conn->c_path[cp_index];
1505
1506 spin_lock_irqsave(&cp->cp_lock, flags);
1507 if (conn->c_ping_triggered) {
1508 spin_unlock_irqrestore(&cp->cp_lock, flags);
1509 return;
1510 }
1511 conn->c_ping_triggered = 1;
1512 spin_unlock_irqrestore(&cp->cp_lock, flags);
1513 rds_send_probe(cp, cpu_to_be16(RDS_FLAG_PROBE_PORT), 0, 0);
1514}
1515EXPORT_SYMBOL_GPL(rds_send_ping);
1516