1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/kernel.h>
34#include <linux/moduleparam.h>
35#include <linux/gfp.h>
36#include <net/sock.h>
37#include <linux/in.h>
38#include <linux/list.h>
39#include <linux/ratelimit.h>
40#include <linux/export.h>
41#include <linux/sizes.h>
42
43#include "rds.h"
44
45
46
47
48
49
50
51
52
53
54
55static int send_batch_count = SZ_1K;
56module_param(send_batch_count, int, 0444);
57MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
58
59static void rds_send_remove_from_sock(struct list_head *messages, int status);
60
61
62
63
64
65void rds_send_reset(struct rds_connection *conn)
66{
67 struct rds_message *rm, *tmp;
68 unsigned long flags;
69
70 if (conn->c_xmit_rm) {
71 rm = conn->c_xmit_rm;
72 conn->c_xmit_rm = NULL;
73
74
75
76
77 rds_message_unmapped(rm);
78 rds_message_put(rm);
79 }
80
81 conn->c_xmit_sg = 0;
82 conn->c_xmit_hdr_off = 0;
83 conn->c_xmit_data_off = 0;
84 conn->c_xmit_atomic_sent = 0;
85 conn->c_xmit_rdma_sent = 0;
86 conn->c_xmit_data_sent = 0;
87
88 conn->c_map_queued = 0;
89
90 conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
91 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
92
93
94 spin_lock_irqsave(&conn->c_lock, flags);
95 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
96 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
97 set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
98 }
99 list_splice_init(&conn->c_retrans, &conn->c_send_queue);
100 spin_unlock_irqrestore(&conn->c_lock, flags);
101}
102
103static int acquire_in_xmit(struct rds_connection *conn)
104{
105 return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0;
106}
107
108static void release_in_xmit(struct rds_connection *conn)
109{
110 clear_bit(RDS_IN_XMIT, &conn->c_flags);
111 smp_mb__after_atomic();
112
113
114
115
116
117
118 if (waitqueue_active(&conn->c_waitq))
119 wake_up_all(&conn->c_waitq);
120}
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136int rds_send_xmit(struct rds_connection *conn)
137{
138 struct rds_message *rm;
139 unsigned long flags;
140 unsigned int tmp;
141 struct scatterlist *sg;
142 int ret = 0;
143 LIST_HEAD(to_be_dropped);
144 int batch_count;
145 unsigned long send_gen = 0;
146
147restart:
148 batch_count = 0;
149
150
151
152
153
154
155
156
157 if (!acquire_in_xmit(conn)) {
158 rds_stats_inc(s_send_lock_contention);
159 ret = -ENOMEM;
160 goto out;
161 }
162
163
164
165
166
167
168
169
170
171 conn->c_send_gen++;
172 send_gen = conn->c_send_gen;
173
174
175
176
177
178 if (!rds_conn_up(conn)) {
179 release_in_xmit(conn);
180 ret = 0;
181 goto out;
182 }
183
184 if (conn->c_trans->xmit_prepare)
185 conn->c_trans->xmit_prepare(conn);
186
187
188
189
190
191 while (1) {
192
193 rm = conn->c_xmit_rm;
194
195
196
197
198
199 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
200 rm = rds_cong_update_alloc(conn);
201 if (IS_ERR(rm)) {
202 ret = PTR_ERR(rm);
203 break;
204 }
205 rm->data.op_active = 1;
206
207 conn->c_xmit_rm = rm;
208 }
209
210
211
212
213
214
215
216
217 if (!rm) {
218 unsigned int len;
219
220 batch_count++;
221
222
223
224
225
226
227 if (batch_count >= send_batch_count)
228 goto over_batch;
229
230 spin_lock_irqsave(&conn->c_lock, flags);
231
232 if (!list_empty(&conn->c_send_queue)) {
233 rm = list_entry(conn->c_send_queue.next,
234 struct rds_message,
235 m_conn_item);
236 rds_message_addref(rm);
237
238
239
240
241
242 list_move_tail(&rm->m_conn_item, &conn->c_retrans);
243 }
244
245 spin_unlock_irqrestore(&conn->c_lock, flags);
246
247 if (!rm)
248 break;
249
250
251
252
253
254
255
256
257 if (rm->rdma.op_active &&
258 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
259 spin_lock_irqsave(&conn->c_lock, flags);
260 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
261 list_move(&rm->m_conn_item, &to_be_dropped);
262 spin_unlock_irqrestore(&conn->c_lock, flags);
263 continue;
264 }
265
266
267 len = ntohl(rm->m_inc.i_hdr.h_len);
268 if (conn->c_unacked_packets == 0 ||
269 conn->c_unacked_bytes < len) {
270 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
271
272 conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
273 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
274 rds_stats_inc(s_send_ack_required);
275 } else {
276 conn->c_unacked_bytes -= len;
277 conn->c_unacked_packets--;
278 }
279
280 conn->c_xmit_rm = rm;
281 }
282
283
284 if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
285 rm->m_final_op = &rm->rdma;
286
287
288
289 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
290 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
291 if (ret) {
292 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
293 wake_up_interruptible(&rm->m_flush_wait);
294 break;
295 }
296 conn->c_xmit_rdma_sent = 1;
297
298 }
299
300 if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
301 rm->m_final_op = &rm->atomic;
302
303
304
305 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
306 ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
307 if (ret) {
308 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
309 wake_up_interruptible(&rm->m_flush_wait);
310 break;
311 }
312 conn->c_xmit_atomic_sent = 1;
313
314 }
315
316
317
318
319
320
321
322
323 if (rm->data.op_nents == 0) {
324 int ops_present;
325 int all_ops_are_silent = 1;
326
327 ops_present = (rm->atomic.op_active || rm->rdma.op_active);
328 if (rm->atomic.op_active && !rm->atomic.op_silent)
329 all_ops_are_silent = 0;
330 if (rm->rdma.op_active && !rm->rdma.op_silent)
331 all_ops_are_silent = 0;
332
333 if (ops_present && all_ops_are_silent
334 && !rm->m_rdma_cookie)
335 rm->data.op_active = 0;
336 }
337
338 if (rm->data.op_active && !conn->c_xmit_data_sent) {
339 rm->m_final_op = &rm->data;
340 ret = conn->c_trans->xmit(conn, rm,
341 conn->c_xmit_hdr_off,
342 conn->c_xmit_sg,
343 conn->c_xmit_data_off);
344 if (ret <= 0)
345 break;
346
347 if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
348 tmp = min_t(int, ret,
349 sizeof(struct rds_header) -
350 conn->c_xmit_hdr_off);
351 conn->c_xmit_hdr_off += tmp;
352 ret -= tmp;
353 }
354
355 sg = &rm->data.op_sg[conn->c_xmit_sg];
356 while (ret) {
357 tmp = min_t(int, ret, sg->length -
358 conn->c_xmit_data_off);
359 conn->c_xmit_data_off += tmp;
360 ret -= tmp;
361 if (conn->c_xmit_data_off == sg->length) {
362 conn->c_xmit_data_off = 0;
363 sg++;
364 conn->c_xmit_sg++;
365 BUG_ON(ret != 0 &&
366 conn->c_xmit_sg == rm->data.op_nents);
367 }
368 }
369
370 if (conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
371 (conn->c_xmit_sg == rm->data.op_nents))
372 conn->c_xmit_data_sent = 1;
373 }
374
375
376
377
378
379
380 if (!rm->data.op_active || conn->c_xmit_data_sent) {
381 conn->c_xmit_rm = NULL;
382 conn->c_xmit_sg = 0;
383 conn->c_xmit_hdr_off = 0;
384 conn->c_xmit_data_off = 0;
385 conn->c_xmit_rdma_sent = 0;
386 conn->c_xmit_atomic_sent = 0;
387 conn->c_xmit_data_sent = 0;
388
389 rds_message_put(rm);
390 }
391 }
392
393over_batch:
394 if (conn->c_trans->xmit_complete)
395 conn->c_trans->xmit_complete(conn);
396 release_in_xmit(conn);
397
398
399 if (!list_empty(&to_be_dropped)) {
400
401 list_for_each_entry(rm, &to_be_dropped, m_conn_item)
402 rds_message_put(rm);
403 rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
404 }
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421 if (ret == 0) {
422 smp_mb();
423 if ((test_bit(0, &conn->c_map_queued) ||
424 !list_empty(&conn->c_send_queue)) &&
425 send_gen == conn->c_send_gen) {
426 rds_stats_inc(s_send_lock_queue_raced);
427 if (batch_count < send_batch_count)
428 goto restart;
429 queue_delayed_work(rds_wq, &conn->c_send_w, 1);
430 }
431 }
432out:
433 return ret;
434}
435EXPORT_SYMBOL_GPL(rds_send_xmit);
436
437static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
438{
439 u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
440
441 assert_spin_locked(&rs->rs_lock);
442
443 BUG_ON(rs->rs_snd_bytes < len);
444 rs->rs_snd_bytes -= len;
445
446 if (rs->rs_snd_bytes == 0)
447 rds_stats_inc(s_send_queue_empty);
448}
449
450static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
451 is_acked_func is_acked)
452{
453 if (is_acked)
454 return is_acked(rm, ack);
455 return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
456}
457
458
459
460
461
462
463
464void rds_rdma_send_complete(struct rds_message *rm, int status)
465{
466 struct rds_sock *rs = NULL;
467 struct rm_rdma_op *ro;
468 struct rds_notifier *notifier;
469 unsigned long flags;
470
471 spin_lock_irqsave(&rm->m_rs_lock, flags);
472
473 ro = &rm->rdma;
474 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
475 ro->op_active && ro->op_notify && ro->op_notifier) {
476 notifier = ro->op_notifier;
477 rs = rm->m_rs;
478 sock_hold(rds_rs_to_sk(rs));
479
480 notifier->n_status = status;
481 spin_lock(&rs->rs_lock);
482 list_add_tail(¬ifier->n_list, &rs->rs_notify_queue);
483 spin_unlock(&rs->rs_lock);
484
485 ro->op_notifier = NULL;
486 }
487
488 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
489
490 if (rs) {
491 rds_wake_sk_sleep(rs);
492 sock_put(rds_rs_to_sk(rs));
493 }
494}
495EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
496
497
498
499
500void rds_atomic_send_complete(struct rds_message *rm, int status)
501{
502 struct rds_sock *rs = NULL;
503 struct rm_atomic_op *ao;
504 struct rds_notifier *notifier;
505 unsigned long flags;
506
507 spin_lock_irqsave(&rm->m_rs_lock, flags);
508
509 ao = &rm->atomic;
510 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
511 && ao->op_active && ao->op_notify && ao->op_notifier) {
512 notifier = ao->op_notifier;
513 rs = rm->m_rs;
514 sock_hold(rds_rs_to_sk(rs));
515
516 notifier->n_status = status;
517 spin_lock(&rs->rs_lock);
518 list_add_tail(¬ifier->n_list, &rs->rs_notify_queue);
519 spin_unlock(&rs->rs_lock);
520
521 ao->op_notifier = NULL;
522 }
523
524 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
525
526 if (rs) {
527 rds_wake_sk_sleep(rs);
528 sock_put(rds_rs_to_sk(rs));
529 }
530}
531EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
532
533
534
535
536
537
538static inline void
539__rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
540{
541 struct rm_rdma_op *ro;
542 struct rm_atomic_op *ao;
543
544 ro = &rm->rdma;
545 if (ro->op_active && ro->op_notify && ro->op_notifier) {
546 ro->op_notifier->n_status = status;
547 list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
548 ro->op_notifier = NULL;
549 }
550
551 ao = &rm->atomic;
552 if (ao->op_active && ao->op_notify && ao->op_notifier) {
553 ao->op_notifier->n_status = status;
554 list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
555 ao->op_notifier = NULL;
556 }
557
558
559}
560
561
562
563
564
565
566struct rds_message *rds_send_get_message(struct rds_connection *conn,
567 struct rm_rdma_op *op)
568{
569 struct rds_message *rm, *tmp, *found = NULL;
570 unsigned long flags;
571
572 spin_lock_irqsave(&conn->c_lock, flags);
573
574 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
575 if (&rm->rdma == op) {
576 atomic_inc(&rm->m_refcount);
577 found = rm;
578 goto out;
579 }
580 }
581
582 list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
583 if (&rm->rdma == op) {
584 atomic_inc(&rm->m_refcount);
585 found = rm;
586 break;
587 }
588 }
589
590out:
591 spin_unlock_irqrestore(&conn->c_lock, flags);
592
593 return found;
594}
595EXPORT_SYMBOL_GPL(rds_send_get_message);
596
597
598
599
600
601
602
603
604
605static void rds_send_remove_from_sock(struct list_head *messages, int status)
606{
607 unsigned long flags;
608 struct rds_sock *rs = NULL;
609 struct rds_message *rm;
610
611 while (!list_empty(messages)) {
612 int was_on_sock = 0;
613
614 rm = list_entry(messages->next, struct rds_message,
615 m_conn_item);
616 list_del_init(&rm->m_conn_item);
617
618
619
620
621
622
623
624
625
626
627
628 spin_lock_irqsave(&rm->m_rs_lock, flags);
629 if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
630 goto unlock_and_drop;
631
632 if (rs != rm->m_rs) {
633 if (rs) {
634 rds_wake_sk_sleep(rs);
635 sock_put(rds_rs_to_sk(rs));
636 }
637 rs = rm->m_rs;
638 if (rs)
639 sock_hold(rds_rs_to_sk(rs));
640 }
641 if (!rs)
642 goto unlock_and_drop;
643 spin_lock(&rs->rs_lock);
644
645 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
646 struct rm_rdma_op *ro = &rm->rdma;
647 struct rds_notifier *notifier;
648
649 list_del_init(&rm->m_sock_item);
650 rds_send_sndbuf_remove(rs, rm);
651
652 if (ro->op_active && ro->op_notifier &&
653 (ro->op_notify || (ro->op_recverr && status))) {
654 notifier = ro->op_notifier;
655 list_add_tail(¬ifier->n_list,
656 &rs->rs_notify_queue);
657 if (!notifier->n_status)
658 notifier->n_status = status;
659 rm->rdma.op_notifier = NULL;
660 }
661 was_on_sock = 1;
662 rm->m_rs = NULL;
663 }
664 spin_unlock(&rs->rs_lock);
665
666unlock_and_drop:
667 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
668 rds_message_put(rm);
669 if (was_on_sock)
670 rds_message_put(rm);
671 }
672
673 if (rs) {
674 rds_wake_sk_sleep(rs);
675 sock_put(rds_rs_to_sk(rs));
676 }
677}
678
679
680
681
682
683
684
685
686
687void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
688 is_acked_func is_acked)
689{
690 struct rds_message *rm, *tmp;
691 unsigned long flags;
692 LIST_HEAD(list);
693
694 spin_lock_irqsave(&conn->c_lock, flags);
695
696 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
697 if (!rds_send_is_acked(rm, ack, is_acked))
698 break;
699
700 list_move(&rm->m_conn_item, &list);
701 clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
702 }
703
704
705 if (!list_empty(&list))
706 smp_mb__after_atomic();
707
708 spin_unlock_irqrestore(&conn->c_lock, flags);
709
710
711 rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
712}
713EXPORT_SYMBOL_GPL(rds_send_drop_acked);
714
715void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
716{
717 struct rds_message *rm, *tmp;
718 struct rds_connection *conn;
719 unsigned long flags;
720 LIST_HEAD(list);
721
722
723 spin_lock_irqsave(&rs->rs_lock, flags);
724
725 list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
726 if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
727 dest->sin_port != rm->m_inc.i_hdr.h_dport))
728 continue;
729
730 list_move(&rm->m_sock_item, &list);
731 rds_send_sndbuf_remove(rs, rm);
732 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
733 }
734
735
736 smp_mb__after_atomic();
737
738 spin_unlock_irqrestore(&rs->rs_lock, flags);
739
740 if (list_empty(&list))
741 return;
742
743
744 list_for_each_entry(rm, &list, m_sock_item) {
745
746 conn = rm->m_inc.i_conn;
747
748 spin_lock_irqsave(&conn->c_lock, flags);
749
750
751
752
753
754 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
755 spin_unlock_irqrestore(&conn->c_lock, flags);
756 spin_lock_irqsave(&rm->m_rs_lock, flags);
757 rm->m_rs = NULL;
758 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
759 continue;
760 }
761 list_del_init(&rm->m_conn_item);
762 spin_unlock_irqrestore(&conn->c_lock, flags);
763
764
765
766
767
768 spin_lock_irqsave(&rm->m_rs_lock, flags);
769
770 spin_lock(&rs->rs_lock);
771 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
772 spin_unlock(&rs->rs_lock);
773
774 rm->m_rs = NULL;
775 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
776
777 rds_message_put(rm);
778 }
779
780 rds_wake_sk_sleep(rs);
781
782 while (!list_empty(&list)) {
783 rm = list_entry(list.next, struct rds_message, m_sock_item);
784 list_del_init(&rm->m_sock_item);
785 rds_message_wait(rm);
786
787
788
789
790
791
792 spin_lock_irqsave(&rm->m_rs_lock, flags);
793
794 spin_lock(&rs->rs_lock);
795 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
796 spin_unlock(&rs->rs_lock);
797
798 rm->m_rs = NULL;
799 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
800
801 rds_message_put(rm);
802 }
803}
804
805
806
807
808
809
810static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
811 struct rds_message *rm, __be16 sport,
812 __be16 dport, int *queued)
813{
814 unsigned long flags;
815 u32 len;
816
817 if (*queued)
818 goto out;
819
820 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
821
822
823
824 spin_lock_irqsave(&rs->rs_lock, flags);
825
826
827
828
829
830
831
832
833
834 if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
835 rs->rs_snd_bytes += len;
836
837
838
839
840
841
842 if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
843 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
844
845 list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
846 set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
847 rds_message_addref(rm);
848 rm->m_rs = rs;
849
850
851
852 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
853 rm->m_inc.i_conn = conn;
854 rds_message_addref(rm);
855
856 spin_lock(&conn->c_lock);
857 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
858 list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
859 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
860 spin_unlock(&conn->c_lock);
861
862 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
863 rm, len, rs, rs->rs_snd_bytes,
864 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
865
866 *queued = 1;
867 }
868
869 spin_unlock_irqrestore(&rs->rs_lock, flags);
870out:
871 return *queued;
872}
873
874
875
876
877
878static int rds_rm_size(struct msghdr *msg, int data_len)
879{
880 struct cmsghdr *cmsg;
881 int size = 0;
882 int cmsg_groups = 0;
883 int retval;
884
885 for_each_cmsghdr(cmsg, msg) {
886 if (!CMSG_OK(msg, cmsg))
887 return -EINVAL;
888
889 if (cmsg->cmsg_level != SOL_RDS)
890 continue;
891
892 switch (cmsg->cmsg_type) {
893 case RDS_CMSG_RDMA_ARGS:
894 cmsg_groups |= 1;
895 retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
896 if (retval < 0)
897 return retval;
898 size += retval;
899
900 break;
901
902 case RDS_CMSG_RDMA_DEST:
903 case RDS_CMSG_RDMA_MAP:
904 cmsg_groups |= 2;
905
906 break;
907
908 case RDS_CMSG_ATOMIC_CSWP:
909 case RDS_CMSG_ATOMIC_FADD:
910 case RDS_CMSG_MASKED_ATOMIC_CSWP:
911 case RDS_CMSG_MASKED_ATOMIC_FADD:
912 cmsg_groups |= 1;
913 size += sizeof(struct scatterlist);
914 break;
915
916 default:
917 return -EINVAL;
918 }
919
920 }
921
922 size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
923
924
925 if (cmsg_groups == 3)
926 return -EINVAL;
927
928 return size;
929}
930
931static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
932 struct msghdr *msg, int *allocated_mr)
933{
934 struct cmsghdr *cmsg;
935 int ret = 0;
936
937 for_each_cmsghdr(cmsg, msg) {
938 if (!CMSG_OK(msg, cmsg))
939 return -EINVAL;
940
941 if (cmsg->cmsg_level != SOL_RDS)
942 continue;
943
944
945
946
947 switch (cmsg->cmsg_type) {
948 case RDS_CMSG_RDMA_ARGS:
949 ret = rds_cmsg_rdma_args(rs, rm, cmsg);
950 break;
951
952 case RDS_CMSG_RDMA_DEST:
953 ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
954 break;
955
956 case RDS_CMSG_RDMA_MAP:
957 ret = rds_cmsg_rdma_map(rs, rm, cmsg);
958 if (!ret)
959 *allocated_mr = 1;
960 break;
961 case RDS_CMSG_ATOMIC_CSWP:
962 case RDS_CMSG_ATOMIC_FADD:
963 case RDS_CMSG_MASKED_ATOMIC_CSWP:
964 case RDS_CMSG_MASKED_ATOMIC_FADD:
965 ret = rds_cmsg_atomic(rs, rm, cmsg);
966 break;
967
968 default:
969 return -EINVAL;
970 }
971
972 if (ret)
973 break;
974 }
975
976 return ret;
977}
978
979int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
980{
981 struct sock *sk = sock->sk;
982 struct rds_sock *rs = rds_sk_to_rs(sk);
983 DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
984 __be32 daddr;
985 __be16 dport;
986 struct rds_message *rm = NULL;
987 struct rds_connection *conn;
988 int ret = 0;
989 int queued = 0, allocated_mr = 0;
990 int nonblock = msg->msg_flags & MSG_DONTWAIT;
991 long timeo = sock_sndtimeo(sk, nonblock);
992
993
994
995 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
996 ret = -EOPNOTSUPP;
997 goto out;
998 }
999
1000 if (msg->msg_namelen) {
1001
1002 if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
1003 ret = -EINVAL;
1004 goto out;
1005 }
1006 daddr = usin->sin_addr.s_addr;
1007 dport = usin->sin_port;
1008 } else {
1009
1010 lock_sock(sk);
1011 daddr = rs->rs_conn_addr;
1012 dport = rs->rs_conn_port;
1013 release_sock(sk);
1014 }
1015
1016 lock_sock(sk);
1017 if (daddr == 0 || rs->rs_bound_addr == 0) {
1018 release_sock(sk);
1019 ret = -ENOTCONN;
1020 goto out;
1021 }
1022 release_sock(sk);
1023
1024 if (payload_len > rds_sk_sndbuf(rs)) {
1025 ret = -EMSGSIZE;
1026 goto out;
1027 }
1028
1029
1030 ret = rds_rm_size(msg, payload_len);
1031 if (ret < 0)
1032 goto out;
1033
1034 rm = rds_message_alloc(ret, GFP_KERNEL);
1035 if (!rm) {
1036 ret = -ENOMEM;
1037 goto out;
1038 }
1039
1040
1041 if (payload_len) {
1042 rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
1043 if (!rm->data.op_sg) {
1044 ret = -ENOMEM;
1045 goto out;
1046 }
1047 ret = rds_message_copy_from_user(rm, &msg->msg_iter);
1048 if (ret)
1049 goto out;
1050 }
1051 rm->data.op_active = 1;
1052
1053 rm->m_daddr = daddr;
1054
1055
1056
1057 if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
1058 conn = rs->rs_conn;
1059 else {
1060 conn = rds_conn_create_outgoing(sock_net(sock->sk),
1061 rs->rs_bound_addr, daddr,
1062 rs->rs_transport,
1063 sock->sk->sk_allocation);
1064 if (IS_ERR(conn)) {
1065 ret = PTR_ERR(conn);
1066 goto out;
1067 }
1068 rs->rs_conn = conn;
1069 }
1070
1071
1072 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
1073 if (ret)
1074 goto out;
1075
1076 if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
1077 printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
1078 &rm->rdma, conn->c_trans->xmit_rdma);
1079 ret = -EOPNOTSUPP;
1080 goto out;
1081 }
1082
1083 if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
1084 printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
1085 &rm->atomic, conn->c_trans->xmit_atomic);
1086 ret = -EOPNOTSUPP;
1087 goto out;
1088 }
1089
1090 rds_conn_connect_if_down(conn);
1091
1092 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1093 if (ret) {
1094 rs->rs_seen_congestion = 1;
1095 goto out;
1096 }
1097
1098 while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
1099 dport, &queued)) {
1100 rds_stats_inc(s_send_queue_full);
1101
1102 if (nonblock) {
1103 ret = -EAGAIN;
1104 goto out;
1105 }
1106
1107 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
1108 rds_send_queue_rm(rs, conn, rm,
1109 rs->rs_bound_port,
1110 dport,
1111 &queued),
1112 timeo);
1113 rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1114 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1115 continue;
1116
1117 ret = timeo;
1118 if (ret == 0)
1119 ret = -ETIMEDOUT;
1120 goto out;
1121 }
1122
1123
1124
1125
1126
1127 rds_stats_inc(s_send_queued);
1128
1129 ret = rds_send_xmit(conn);
1130 if (ret == -ENOMEM || ret == -EAGAIN)
1131 queue_delayed_work(rds_wq, &conn->c_send_w, 1);
1132
1133 rds_message_put(rm);
1134 return payload_len;
1135
1136out:
1137
1138
1139
1140 if (allocated_mr)
1141 rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1142
1143 if (rm)
1144 rds_message_put(rm);
1145 return ret;
1146}
1147
1148
1149
1150
1151int
1152rds_send_pong(struct rds_connection *conn, __be16 dport)
1153{
1154 struct rds_message *rm;
1155 unsigned long flags;
1156 int ret = 0;
1157
1158 rm = rds_message_alloc(0, GFP_ATOMIC);
1159 if (!rm) {
1160 ret = -ENOMEM;
1161 goto out;
1162 }
1163
1164 rm->m_daddr = conn->c_faddr;
1165 rm->data.op_active = 1;
1166
1167 rds_conn_connect_if_down(conn);
1168
1169 ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
1170 if (ret)
1171 goto out;
1172
1173 spin_lock_irqsave(&conn->c_lock, flags);
1174 list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
1175 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1176 rds_message_addref(rm);
1177 rm->m_inc.i_conn = conn;
1178
1179 rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
1180 conn->c_next_tx_seq);
1181 conn->c_next_tx_seq++;
1182 spin_unlock_irqrestore(&conn->c_lock, flags);
1183
1184 rds_stats_inc(s_send_queued);
1185 rds_stats_inc(s_send_pong);
1186
1187
1188 queue_delayed_work(rds_wq, &conn->c_send_w, 1);
1189
1190 rds_message_put(rm);
1191 return 0;
1192
1193out:
1194 if (rm)
1195 rds_message_put(rm);
1196 return ret;
1197}
1198