1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/kernel.h>
34#include <linux/moduleparam.h>
35#include <linux/gfp.h>
36#include <net/sock.h>
37#include <linux/in.h>
38#include <linux/list.h>
39#include <linux/ratelimit.h>
40#include <linux/export.h>
41#include <linux/sizes.h>
42
43#include "rds.h"
44
45
46
47
48
49
50
51
52
53
54
55static int send_batch_count = SZ_1K;
56module_param(send_batch_count, int, 0444);
57MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
58
59static void rds_send_remove_from_sock(struct list_head *messages, int status);
60
61
62
63
64
65void rds_send_reset(struct rds_connection *conn)
66{
67 struct rds_message *rm, *tmp;
68 unsigned long flags;
69
70 if (conn->c_xmit_rm) {
71 rm = conn->c_xmit_rm;
72 conn->c_xmit_rm = NULL;
73
74
75
76
77 rds_message_unmapped(rm);
78 rds_message_put(rm);
79 }
80
81 conn->c_xmit_sg = 0;
82 conn->c_xmit_hdr_off = 0;
83 conn->c_xmit_data_off = 0;
84 conn->c_xmit_atomic_sent = 0;
85 conn->c_xmit_rdma_sent = 0;
86 conn->c_xmit_data_sent = 0;
87
88 conn->c_map_queued = 0;
89
90 conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
91 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
92
93
94 spin_lock_irqsave(&conn->c_lock, flags);
95 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
96 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
97 set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
98 }
99 list_splice_init(&conn->c_retrans, &conn->c_send_queue);
100 spin_unlock_irqrestore(&conn->c_lock, flags);
101}
102EXPORT_SYMBOL_GPL(rds_send_reset);
103
104static int acquire_in_xmit(struct rds_connection *conn)
105{
106 return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0;
107}
108
109static void release_in_xmit(struct rds_connection *conn)
110{
111 clear_bit(RDS_IN_XMIT, &conn->c_flags);
112 smp_mb__after_atomic();
113
114
115
116
117
118
119 if (waitqueue_active(&conn->c_waitq))
120 wake_up_all(&conn->c_waitq);
121}
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137int rds_send_xmit(struct rds_connection *conn)
138{
139 struct rds_message *rm;
140 unsigned long flags;
141 unsigned int tmp;
142 struct scatterlist *sg;
143 int ret = 0;
144 LIST_HEAD(to_be_dropped);
145 int batch_count;
146 unsigned long send_gen = 0;
147
148restart:
149 batch_count = 0;
150
151
152
153
154
155
156
157
158 if (!acquire_in_xmit(conn)) {
159 rds_stats_inc(s_send_lock_contention);
160 ret = -ENOMEM;
161 goto out;
162 }
163
164
165
166
167
168
169
170
171
172 conn->c_send_gen++;
173 send_gen = conn->c_send_gen;
174
175
176
177
178
179 if (!rds_conn_up(conn)) {
180 release_in_xmit(conn);
181 ret = 0;
182 goto out;
183 }
184
185 if (conn->c_trans->xmit_prepare)
186 conn->c_trans->xmit_prepare(conn);
187
188
189
190
191
192 while (1) {
193
194 rm = conn->c_xmit_rm;
195
196
197
198
199
200 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
201 rm = rds_cong_update_alloc(conn);
202 if (IS_ERR(rm)) {
203 ret = PTR_ERR(rm);
204 break;
205 }
206 rm->data.op_active = 1;
207
208 conn->c_xmit_rm = rm;
209 }
210
211
212
213
214
215
216
217
218 if (!rm) {
219 unsigned int len;
220
221 batch_count++;
222
223
224
225
226
227
228 if (batch_count >= send_batch_count)
229 goto over_batch;
230
231 spin_lock_irqsave(&conn->c_lock, flags);
232
233 if (!list_empty(&conn->c_send_queue)) {
234 rm = list_entry(conn->c_send_queue.next,
235 struct rds_message,
236 m_conn_item);
237 rds_message_addref(rm);
238
239
240
241
242
243 list_move_tail(&rm->m_conn_item, &conn->c_retrans);
244 }
245
246 spin_unlock_irqrestore(&conn->c_lock, flags);
247
248 if (!rm)
249 break;
250
251
252
253
254
255
256
257
258 if (rm->rdma.op_active &&
259 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
260 spin_lock_irqsave(&conn->c_lock, flags);
261 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
262 list_move(&rm->m_conn_item, &to_be_dropped);
263 spin_unlock_irqrestore(&conn->c_lock, flags);
264 continue;
265 }
266
267
268 len = ntohl(rm->m_inc.i_hdr.h_len);
269 if (conn->c_unacked_packets == 0 ||
270 conn->c_unacked_bytes < len) {
271 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
272
273 conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
274 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
275 rds_stats_inc(s_send_ack_required);
276 } else {
277 conn->c_unacked_bytes -= len;
278 conn->c_unacked_packets--;
279 }
280
281 conn->c_xmit_rm = rm;
282 }
283
284
285 if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
286 rm->m_final_op = &rm->rdma;
287
288
289
290 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
291 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
292 if (ret) {
293 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
294 wake_up_interruptible(&rm->m_flush_wait);
295 break;
296 }
297 conn->c_xmit_rdma_sent = 1;
298
299 }
300
301 if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
302 rm->m_final_op = &rm->atomic;
303
304
305
306 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
307 ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
308 if (ret) {
309 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
310 wake_up_interruptible(&rm->m_flush_wait);
311 break;
312 }
313 conn->c_xmit_atomic_sent = 1;
314
315 }
316
317
318
319
320
321
322
323
324 if (rm->data.op_nents == 0) {
325 int ops_present;
326 int all_ops_are_silent = 1;
327
328 ops_present = (rm->atomic.op_active || rm->rdma.op_active);
329 if (rm->atomic.op_active && !rm->atomic.op_silent)
330 all_ops_are_silent = 0;
331 if (rm->rdma.op_active && !rm->rdma.op_silent)
332 all_ops_are_silent = 0;
333
334 if (ops_present && all_ops_are_silent
335 && !rm->m_rdma_cookie)
336 rm->data.op_active = 0;
337 }
338
339 if (rm->data.op_active && !conn->c_xmit_data_sent) {
340 rm->m_final_op = &rm->data;
341 ret = conn->c_trans->xmit(conn, rm,
342 conn->c_xmit_hdr_off,
343 conn->c_xmit_sg,
344 conn->c_xmit_data_off);
345 if (ret <= 0)
346 break;
347
348 if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
349 tmp = min_t(int, ret,
350 sizeof(struct rds_header) -
351 conn->c_xmit_hdr_off);
352 conn->c_xmit_hdr_off += tmp;
353 ret -= tmp;
354 }
355
356 sg = &rm->data.op_sg[conn->c_xmit_sg];
357 while (ret) {
358 tmp = min_t(int, ret, sg->length -
359 conn->c_xmit_data_off);
360 conn->c_xmit_data_off += tmp;
361 ret -= tmp;
362 if (conn->c_xmit_data_off == sg->length) {
363 conn->c_xmit_data_off = 0;
364 sg++;
365 conn->c_xmit_sg++;
366 BUG_ON(ret != 0 &&
367 conn->c_xmit_sg == rm->data.op_nents);
368 }
369 }
370
371 if (conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
372 (conn->c_xmit_sg == rm->data.op_nents))
373 conn->c_xmit_data_sent = 1;
374 }
375
376
377
378
379
380
381 if (!rm->data.op_active || conn->c_xmit_data_sent) {
382 conn->c_xmit_rm = NULL;
383 conn->c_xmit_sg = 0;
384 conn->c_xmit_hdr_off = 0;
385 conn->c_xmit_data_off = 0;
386 conn->c_xmit_rdma_sent = 0;
387 conn->c_xmit_atomic_sent = 0;
388 conn->c_xmit_data_sent = 0;
389
390 rds_message_put(rm);
391 }
392 }
393
394over_batch:
395 if (conn->c_trans->xmit_complete)
396 conn->c_trans->xmit_complete(conn);
397 release_in_xmit(conn);
398
399
400 if (!list_empty(&to_be_dropped)) {
401
402 list_for_each_entry(rm, &to_be_dropped, m_conn_item)
403 rds_message_put(rm);
404 rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
405 }
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422 if (ret == 0) {
423 smp_mb();
424 if ((test_bit(0, &conn->c_map_queued) ||
425 !list_empty(&conn->c_send_queue)) &&
426 send_gen == conn->c_send_gen) {
427 rds_stats_inc(s_send_lock_queue_raced);
428 if (batch_count < send_batch_count)
429 goto restart;
430 queue_delayed_work(rds_wq, &conn->c_send_w, 1);
431 }
432 }
433out:
434 return ret;
435}
436EXPORT_SYMBOL_GPL(rds_send_xmit);
437
438static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
439{
440 u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
441
442 assert_spin_locked(&rs->rs_lock);
443
444 BUG_ON(rs->rs_snd_bytes < len);
445 rs->rs_snd_bytes -= len;
446
447 if (rs->rs_snd_bytes == 0)
448 rds_stats_inc(s_send_queue_empty);
449}
450
451static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
452 is_acked_func is_acked)
453{
454 if (is_acked)
455 return is_acked(rm, ack);
456 return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
457}
458
459
460
461
462
463
464
465void rds_rdma_send_complete(struct rds_message *rm, int status)
466{
467 struct rds_sock *rs = NULL;
468 struct rm_rdma_op *ro;
469 struct rds_notifier *notifier;
470 unsigned long flags;
471
472 spin_lock_irqsave(&rm->m_rs_lock, flags);
473
474 ro = &rm->rdma;
475 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
476 ro->op_active && ro->op_notify && ro->op_notifier) {
477 notifier = ro->op_notifier;
478 rs = rm->m_rs;
479 sock_hold(rds_rs_to_sk(rs));
480
481 notifier->n_status = status;
482 spin_lock(&rs->rs_lock);
483 list_add_tail(¬ifier->n_list, &rs->rs_notify_queue);
484 spin_unlock(&rs->rs_lock);
485
486 ro->op_notifier = NULL;
487 }
488
489 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
490
491 if (rs) {
492 rds_wake_sk_sleep(rs);
493 sock_put(rds_rs_to_sk(rs));
494 }
495}
496EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
497
498
499
500
501void rds_atomic_send_complete(struct rds_message *rm, int status)
502{
503 struct rds_sock *rs = NULL;
504 struct rm_atomic_op *ao;
505 struct rds_notifier *notifier;
506 unsigned long flags;
507
508 spin_lock_irqsave(&rm->m_rs_lock, flags);
509
510 ao = &rm->atomic;
511 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
512 && ao->op_active && ao->op_notify && ao->op_notifier) {
513 notifier = ao->op_notifier;
514 rs = rm->m_rs;
515 sock_hold(rds_rs_to_sk(rs));
516
517 notifier->n_status = status;
518 spin_lock(&rs->rs_lock);
519 list_add_tail(¬ifier->n_list, &rs->rs_notify_queue);
520 spin_unlock(&rs->rs_lock);
521
522 ao->op_notifier = NULL;
523 }
524
525 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
526
527 if (rs) {
528 rds_wake_sk_sleep(rs);
529 sock_put(rds_rs_to_sk(rs));
530 }
531}
532EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
533
534
535
536
537
538
539static inline void
540__rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
541{
542 struct rm_rdma_op *ro;
543 struct rm_atomic_op *ao;
544
545 ro = &rm->rdma;
546 if (ro->op_active && ro->op_notify && ro->op_notifier) {
547 ro->op_notifier->n_status = status;
548 list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
549 ro->op_notifier = NULL;
550 }
551
552 ao = &rm->atomic;
553 if (ao->op_active && ao->op_notify && ao->op_notifier) {
554 ao->op_notifier->n_status = status;
555 list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
556 ao->op_notifier = NULL;
557 }
558
559
560}
561
562
563
564
565
566
567struct rds_message *rds_send_get_message(struct rds_connection *conn,
568 struct rm_rdma_op *op)
569{
570 struct rds_message *rm, *tmp, *found = NULL;
571 unsigned long flags;
572
573 spin_lock_irqsave(&conn->c_lock, flags);
574
575 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
576 if (&rm->rdma == op) {
577 atomic_inc(&rm->m_refcount);
578 found = rm;
579 goto out;
580 }
581 }
582
583 list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
584 if (&rm->rdma == op) {
585 atomic_inc(&rm->m_refcount);
586 found = rm;
587 break;
588 }
589 }
590
591out:
592 spin_unlock_irqrestore(&conn->c_lock, flags);
593
594 return found;
595}
596EXPORT_SYMBOL_GPL(rds_send_get_message);
597
598
599
600
601
602
603
604
605
606static void rds_send_remove_from_sock(struct list_head *messages, int status)
607{
608 unsigned long flags;
609 struct rds_sock *rs = NULL;
610 struct rds_message *rm;
611
612 while (!list_empty(messages)) {
613 int was_on_sock = 0;
614
615 rm = list_entry(messages->next, struct rds_message,
616 m_conn_item);
617 list_del_init(&rm->m_conn_item);
618
619
620
621
622
623
624
625
626
627
628
629 spin_lock_irqsave(&rm->m_rs_lock, flags);
630 if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
631 goto unlock_and_drop;
632
633 if (rs != rm->m_rs) {
634 if (rs) {
635 rds_wake_sk_sleep(rs);
636 sock_put(rds_rs_to_sk(rs));
637 }
638 rs = rm->m_rs;
639 if (rs)
640 sock_hold(rds_rs_to_sk(rs));
641 }
642 if (!rs)
643 goto unlock_and_drop;
644 spin_lock(&rs->rs_lock);
645
646 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
647 struct rm_rdma_op *ro = &rm->rdma;
648 struct rds_notifier *notifier;
649
650 list_del_init(&rm->m_sock_item);
651 rds_send_sndbuf_remove(rs, rm);
652
653 if (ro->op_active && ro->op_notifier &&
654 (ro->op_notify || (ro->op_recverr && status))) {
655 notifier = ro->op_notifier;
656 list_add_tail(¬ifier->n_list,
657 &rs->rs_notify_queue);
658 if (!notifier->n_status)
659 notifier->n_status = status;
660 rm->rdma.op_notifier = NULL;
661 }
662 was_on_sock = 1;
663 rm->m_rs = NULL;
664 }
665 spin_unlock(&rs->rs_lock);
666
667unlock_and_drop:
668 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
669 rds_message_put(rm);
670 if (was_on_sock)
671 rds_message_put(rm);
672 }
673
674 if (rs) {
675 rds_wake_sk_sleep(rs);
676 sock_put(rds_rs_to_sk(rs));
677 }
678}
679
680
681
682
683
684
685
686
687
688void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
689 is_acked_func is_acked)
690{
691 struct rds_message *rm, *tmp;
692 unsigned long flags;
693 LIST_HEAD(list);
694
695 spin_lock_irqsave(&conn->c_lock, flags);
696
697 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
698 if (!rds_send_is_acked(rm, ack, is_acked))
699 break;
700
701 list_move(&rm->m_conn_item, &list);
702 clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
703 }
704
705
706 if (!list_empty(&list))
707 smp_mb__after_atomic();
708
709 spin_unlock_irqrestore(&conn->c_lock, flags);
710
711
712 rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
713}
714EXPORT_SYMBOL_GPL(rds_send_drop_acked);
715
716void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
717{
718 struct rds_message *rm, *tmp;
719 struct rds_connection *conn;
720 unsigned long flags;
721 LIST_HEAD(list);
722
723
724 spin_lock_irqsave(&rs->rs_lock, flags);
725
726 list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
727 if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
728 dest->sin_port != rm->m_inc.i_hdr.h_dport))
729 continue;
730
731 list_move(&rm->m_sock_item, &list);
732 rds_send_sndbuf_remove(rs, rm);
733 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
734 }
735
736
737 smp_mb__after_atomic();
738
739 spin_unlock_irqrestore(&rs->rs_lock, flags);
740
741 if (list_empty(&list))
742 return;
743
744
745 list_for_each_entry(rm, &list, m_sock_item) {
746
747 conn = rm->m_inc.i_conn;
748
749 spin_lock_irqsave(&conn->c_lock, flags);
750
751
752
753
754
755 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
756 spin_unlock_irqrestore(&conn->c_lock, flags);
757 spin_lock_irqsave(&rm->m_rs_lock, flags);
758 rm->m_rs = NULL;
759 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
760 continue;
761 }
762 list_del_init(&rm->m_conn_item);
763 spin_unlock_irqrestore(&conn->c_lock, flags);
764
765
766
767
768
769 spin_lock_irqsave(&rm->m_rs_lock, flags);
770
771 spin_lock(&rs->rs_lock);
772 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
773 spin_unlock(&rs->rs_lock);
774
775 rm->m_rs = NULL;
776 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
777
778 rds_message_put(rm);
779 }
780
781 rds_wake_sk_sleep(rs);
782
783 while (!list_empty(&list)) {
784 rm = list_entry(list.next, struct rds_message, m_sock_item);
785 list_del_init(&rm->m_sock_item);
786 rds_message_wait(rm);
787
788
789
790
791
792
793 spin_lock_irqsave(&rm->m_rs_lock, flags);
794
795 spin_lock(&rs->rs_lock);
796 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
797 spin_unlock(&rs->rs_lock);
798
799 rm->m_rs = NULL;
800 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
801
802 rds_message_put(rm);
803 }
804}
805
806
807
808
809
810
811static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
812 struct rds_message *rm, __be16 sport,
813 __be16 dport, int *queued)
814{
815 unsigned long flags;
816 u32 len;
817
818 if (*queued)
819 goto out;
820
821 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
822
823
824
825 spin_lock_irqsave(&rs->rs_lock, flags);
826
827
828
829
830
831
832
833
834
835 if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
836 rs->rs_snd_bytes += len;
837
838
839
840
841
842
843 if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
844 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
845
846 list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
847 set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
848 rds_message_addref(rm);
849 rm->m_rs = rs;
850
851
852
853 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
854 rm->m_inc.i_conn = conn;
855 rds_message_addref(rm);
856
857 spin_lock(&conn->c_lock);
858 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
859 list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
860 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
861 spin_unlock(&conn->c_lock);
862
863 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
864 rm, len, rs, rs->rs_snd_bytes,
865 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
866
867 *queued = 1;
868 }
869
870 spin_unlock_irqrestore(&rs->rs_lock, flags);
871out:
872 return *queued;
873}
874
875
876
877
878
879static int rds_rm_size(struct msghdr *msg, int data_len)
880{
881 struct cmsghdr *cmsg;
882 int size = 0;
883 int cmsg_groups = 0;
884 int retval;
885
886 for_each_cmsghdr(cmsg, msg) {
887 if (!CMSG_OK(msg, cmsg))
888 return -EINVAL;
889
890 if (cmsg->cmsg_level != SOL_RDS)
891 continue;
892
893 switch (cmsg->cmsg_type) {
894 case RDS_CMSG_RDMA_ARGS:
895 cmsg_groups |= 1;
896 retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
897 if (retval < 0)
898 return retval;
899 size += retval;
900
901 break;
902
903 case RDS_CMSG_RDMA_DEST:
904 case RDS_CMSG_RDMA_MAP:
905 cmsg_groups |= 2;
906
907 break;
908
909 case RDS_CMSG_ATOMIC_CSWP:
910 case RDS_CMSG_ATOMIC_FADD:
911 case RDS_CMSG_MASKED_ATOMIC_CSWP:
912 case RDS_CMSG_MASKED_ATOMIC_FADD:
913 cmsg_groups |= 1;
914 size += sizeof(struct scatterlist);
915 break;
916
917 default:
918 return -EINVAL;
919 }
920
921 }
922
923 size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
924
925
926 if (cmsg_groups == 3)
927 return -EINVAL;
928
929 return size;
930}
931
932static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
933 struct msghdr *msg, int *allocated_mr)
934{
935 struct cmsghdr *cmsg;
936 int ret = 0;
937
938 for_each_cmsghdr(cmsg, msg) {
939 if (!CMSG_OK(msg, cmsg))
940 return -EINVAL;
941
942 if (cmsg->cmsg_level != SOL_RDS)
943 continue;
944
945
946
947
948 switch (cmsg->cmsg_type) {
949 case RDS_CMSG_RDMA_ARGS:
950 ret = rds_cmsg_rdma_args(rs, rm, cmsg);
951 break;
952
953 case RDS_CMSG_RDMA_DEST:
954 ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
955 break;
956
957 case RDS_CMSG_RDMA_MAP:
958 ret = rds_cmsg_rdma_map(rs, rm, cmsg);
959 if (!ret)
960 *allocated_mr = 1;
961 break;
962 case RDS_CMSG_ATOMIC_CSWP:
963 case RDS_CMSG_ATOMIC_FADD:
964 case RDS_CMSG_MASKED_ATOMIC_CSWP:
965 case RDS_CMSG_MASKED_ATOMIC_FADD:
966 ret = rds_cmsg_atomic(rs, rm, cmsg);
967 break;
968
969 default:
970 return -EINVAL;
971 }
972
973 if (ret)
974 break;
975 }
976
977 return ret;
978}
979
980int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
981{
982 struct sock *sk = sock->sk;
983 struct rds_sock *rs = rds_sk_to_rs(sk);
984 DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
985 __be32 daddr;
986 __be16 dport;
987 struct rds_message *rm = NULL;
988 struct rds_connection *conn;
989 int ret = 0;
990 int queued = 0, allocated_mr = 0;
991 int nonblock = msg->msg_flags & MSG_DONTWAIT;
992 long timeo = sock_sndtimeo(sk, nonblock);
993
994
995
996 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
997 ret = -EOPNOTSUPP;
998 goto out;
999 }
1000
1001 if (msg->msg_namelen) {
1002
1003 if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
1004 ret = -EINVAL;
1005 goto out;
1006 }
1007 daddr = usin->sin_addr.s_addr;
1008 dport = usin->sin_port;
1009 } else {
1010
1011 lock_sock(sk);
1012 daddr = rs->rs_conn_addr;
1013 dport = rs->rs_conn_port;
1014 release_sock(sk);
1015 }
1016
1017 lock_sock(sk);
1018 if (daddr == 0 || rs->rs_bound_addr == 0) {
1019 release_sock(sk);
1020 ret = -ENOTCONN;
1021 goto out;
1022 }
1023 release_sock(sk);
1024
1025 if (payload_len > rds_sk_sndbuf(rs)) {
1026 ret = -EMSGSIZE;
1027 goto out;
1028 }
1029
1030
1031 ret = rds_rm_size(msg, payload_len);
1032 if (ret < 0)
1033 goto out;
1034
1035 rm = rds_message_alloc(ret, GFP_KERNEL);
1036 if (!rm) {
1037 ret = -ENOMEM;
1038 goto out;
1039 }
1040
1041
1042 if (payload_len) {
1043 rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
1044 if (!rm->data.op_sg) {
1045 ret = -ENOMEM;
1046 goto out;
1047 }
1048 ret = rds_message_copy_from_user(rm, &msg->msg_iter);
1049 if (ret)
1050 goto out;
1051 }
1052 rm->data.op_active = 1;
1053
1054 rm->m_daddr = daddr;
1055
1056
1057
1058 if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
1059 conn = rs->rs_conn;
1060 else {
1061 conn = rds_conn_create_outgoing(sock_net(sock->sk),
1062 rs->rs_bound_addr, daddr,
1063 rs->rs_transport,
1064 sock->sk->sk_allocation);
1065 if (IS_ERR(conn)) {
1066 ret = PTR_ERR(conn);
1067 goto out;
1068 }
1069 rs->rs_conn = conn;
1070 }
1071
1072
1073 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
1074 if (ret)
1075 goto out;
1076
1077 if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
1078 printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
1079 &rm->rdma, conn->c_trans->xmit_rdma);
1080 ret = -EOPNOTSUPP;
1081 goto out;
1082 }
1083
1084 if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
1085 printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
1086 &rm->atomic, conn->c_trans->xmit_atomic);
1087 ret = -EOPNOTSUPP;
1088 goto out;
1089 }
1090
1091 rds_conn_connect_if_down(conn);
1092
1093 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1094 if (ret) {
1095 rs->rs_seen_congestion = 1;
1096 goto out;
1097 }
1098
1099 while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
1100 dport, &queued)) {
1101 rds_stats_inc(s_send_queue_full);
1102
1103 if (nonblock) {
1104 ret = -EAGAIN;
1105 goto out;
1106 }
1107
1108 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
1109 rds_send_queue_rm(rs, conn, rm,
1110 rs->rs_bound_port,
1111 dport,
1112 &queued),
1113 timeo);
1114 rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1115 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1116 continue;
1117
1118 ret = timeo;
1119 if (ret == 0)
1120 ret = -ETIMEDOUT;
1121 goto out;
1122 }
1123
1124
1125
1126
1127
1128 rds_stats_inc(s_send_queued);
1129
1130 ret = rds_send_xmit(conn);
1131 if (ret == -ENOMEM || ret == -EAGAIN)
1132 queue_delayed_work(rds_wq, &conn->c_send_w, 1);
1133
1134 rds_message_put(rm);
1135 return payload_len;
1136
1137out:
1138
1139
1140
1141 if (allocated_mr)
1142 rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1143
1144 if (rm)
1145 rds_message_put(rm);
1146 return ret;
1147}
1148
1149
1150
1151
1152int
1153rds_send_pong(struct rds_connection *conn, __be16 dport)
1154{
1155 struct rds_message *rm;
1156 unsigned long flags;
1157 int ret = 0;
1158
1159 rm = rds_message_alloc(0, GFP_ATOMIC);
1160 if (!rm) {
1161 ret = -ENOMEM;
1162 goto out;
1163 }
1164
1165 rm->m_daddr = conn->c_faddr;
1166 rm->data.op_active = 1;
1167
1168 rds_conn_connect_if_down(conn);
1169
1170 ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
1171 if (ret)
1172 goto out;
1173
1174 spin_lock_irqsave(&conn->c_lock, flags);
1175 list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
1176 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1177 rds_message_addref(rm);
1178 rm->m_inc.i_conn = conn;
1179
1180 rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
1181 conn->c_next_tx_seq);
1182 conn->c_next_tx_seq++;
1183 spin_unlock_irqrestore(&conn->c_lock, flags);
1184
1185 rds_stats_inc(s_send_queued);
1186 rds_stats_inc(s_send_pong);
1187
1188
1189 queue_delayed_work(rds_wq, &conn->c_send_w, 1);
1190
1191 rds_message_put(rm);
1192 return 0;
1193
1194out:
1195 if (rm)
1196 rds_message_put(rm);
1197 return ret;
1198}
1199