linux/net/rds/send.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2006 Oracle.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33#include <linux/kernel.h>
  34#include <linux/moduleparam.h>
  35#include <linux/gfp.h>
  36#include <net/sock.h>
  37#include <linux/in.h>
  38#include <linux/list.h>
  39#include <linux/ratelimit.h>
  40#include <linux/export.h>
  41#include <linux/sizes.h>
  42
  43#include "rds.h"
  44
  45/* When transmitting messages in rds_send_xmit, we need to emerge from
  46 * time to time and briefly release the CPU. Otherwise the softlock watchdog
  47 * will kick our shin.
  48 * Also, it seems fairer to not let one busy connection stall all the
  49 * others.
  50 *
  51 * send_batch_count is the number of times we'll loop in send_xmit. Setting
  52 * it to 0 will restore the old behavior (where we looped until we had
  53 * drained the queue).
  54 */
  55static int send_batch_count = SZ_1K;
  56module_param(send_batch_count, int, 0444);
  57MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
  58
  59static void rds_send_remove_from_sock(struct list_head *messages, int status);
  60
  61/*
  62 * Reset the send state.  Callers must ensure that this doesn't race with
  63 * rds_send_xmit().
  64 */
  65void rds_send_reset(struct rds_connection *conn)
  66{
  67        struct rds_message *rm, *tmp;
  68        unsigned long flags;
  69
  70        if (conn->c_xmit_rm) {
  71                rm = conn->c_xmit_rm;
  72                conn->c_xmit_rm = NULL;
  73                /* Tell the user the RDMA op is no longer mapped by the
  74                 * transport. This isn't entirely true (it's flushed out
  75                 * independently) but as the connection is down, there's
  76                 * no ongoing RDMA to/from that memory */
  77                rds_message_unmapped(rm);
  78                rds_message_put(rm);
  79        }
  80
  81        conn->c_xmit_sg = 0;
  82        conn->c_xmit_hdr_off = 0;
  83        conn->c_xmit_data_off = 0;
  84        conn->c_xmit_atomic_sent = 0;
  85        conn->c_xmit_rdma_sent = 0;
  86        conn->c_xmit_data_sent = 0;
  87
  88        conn->c_map_queued = 0;
  89
  90        conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
  91        conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
  92
  93        /* Mark messages as retransmissions, and move them to the send q */
  94        spin_lock_irqsave(&conn->c_lock, flags);
  95        list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
  96                set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
  97                set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
  98        }
  99        list_splice_init(&conn->c_retrans, &conn->c_send_queue);
 100        spin_unlock_irqrestore(&conn->c_lock, flags);
 101}
 102
 103static int acquire_in_xmit(struct rds_connection *conn)
 104{
 105        return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0;
 106}
 107
 108static void release_in_xmit(struct rds_connection *conn)
 109{
 110        clear_bit(RDS_IN_XMIT, &conn->c_flags);
 111        smp_mb__after_atomic();
 112        /*
 113         * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
 114         * hot path and finding waiters is very rare.  We don't want to walk
 115         * the system-wide hashed waitqueue buckets in the fast path only to
 116         * almost never find waiters.
 117         */
 118        if (waitqueue_active(&conn->c_waitq))
 119                wake_up_all(&conn->c_waitq);
 120}
 121
 122/*
 123 * We're making the conscious trade-off here to only send one message
 124 * down the connection at a time.
 125 *   Pro:
 126 *      - tx queueing is a simple fifo list
 127 *      - reassembly is optional and easily done by transports per conn
 128 *      - no per flow rx lookup at all, straight to the socket
 129 *      - less per-frag memory and wire overhead
 130 *   Con:
 131 *      - queued acks can be delayed behind large messages
 132 *   Depends:
 133 *      - small message latency is higher behind queued large messages
 134 *      - large message latency isn't starved by intervening small sends
 135 */
 136int rds_send_xmit(struct rds_connection *conn)
 137{
 138        struct rds_message *rm;
 139        unsigned long flags;
 140        unsigned int tmp;
 141        struct scatterlist *sg;
 142        int ret = 0;
 143        LIST_HEAD(to_be_dropped);
 144        int batch_count;
 145        unsigned long send_gen = 0;
 146
 147restart:
 148        batch_count = 0;
 149
 150        /*
 151         * sendmsg calls here after having queued its message on the send
 152         * queue.  We only have one task feeding the connection at a time.  If
 153         * another thread is already feeding the queue then we back off.  This
 154         * avoids blocking the caller and trading per-connection data between
 155         * caches per message.
 156         */
 157        if (!acquire_in_xmit(conn)) {
 158                rds_stats_inc(s_send_lock_contention);
 159                ret = -ENOMEM;
 160                goto out;
 161        }
 162
 163        /*
 164         * we record the send generation after doing the xmit acquire.
 165         * if someone else manages to jump in and do some work, we'll use
 166         * this to avoid a goto restart farther down.
 167         *
 168         * The acquire_in_xmit() check above ensures that only one
 169         * caller can increment c_send_gen at any time.
 170         */
 171        conn->c_send_gen++;
 172        send_gen = conn->c_send_gen;
 173
 174        /*
 175         * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
 176         * we do the opposite to avoid races.
 177         */
 178        if (!rds_conn_up(conn)) {
 179                release_in_xmit(conn);
 180                ret = 0;
 181                goto out;
 182        }
 183
 184        if (conn->c_trans->xmit_prepare)
 185                conn->c_trans->xmit_prepare(conn);
 186
 187        /*
 188         * spin trying to push headers and data down the connection until
 189         * the connection doesn't make forward progress.
 190         */
 191        while (1) {
 192
 193                rm = conn->c_xmit_rm;
 194
 195                /*
 196                 * If between sending messages, we can send a pending congestion
 197                 * map update.
 198                 */
 199                if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
 200                        rm = rds_cong_update_alloc(conn);
 201                        if (IS_ERR(rm)) {
 202                                ret = PTR_ERR(rm);
 203                                break;
 204                        }
 205                        rm->data.op_active = 1;
 206
 207                        conn->c_xmit_rm = rm;
 208                }
 209
 210                /*
 211                 * If not already working on one, grab the next message.
 212                 *
 213                 * c_xmit_rm holds a ref while we're sending this message down
 214                 * the connction.  We can use this ref while holding the
 215                 * send_sem.. rds_send_reset() is serialized with it.
 216                 */
 217                if (!rm) {
 218                        unsigned int len;
 219
 220                        batch_count++;
 221
 222                        /* we want to process as big a batch as we can, but
 223                         * we also want to avoid softlockups.  If we've been
 224                         * through a lot of messages, lets back off and see
 225                         * if anyone else jumps in
 226                         */
 227                        if (batch_count >= send_batch_count)
 228                                goto over_batch;
 229
 230                        spin_lock_irqsave(&conn->c_lock, flags);
 231
 232                        if (!list_empty(&conn->c_send_queue)) {
 233                                rm = list_entry(conn->c_send_queue.next,
 234                                                struct rds_message,
 235                                                m_conn_item);
 236                                rds_message_addref(rm);
 237
 238                                /*
 239                                 * Move the message from the send queue to the retransmit
 240                                 * list right away.
 241                                 */
 242                                list_move_tail(&rm->m_conn_item, &conn->c_retrans);
 243                        }
 244
 245                        spin_unlock_irqrestore(&conn->c_lock, flags);
 246
 247                        if (!rm)
 248                                break;
 249
 250                        /* Unfortunately, the way Infiniband deals with
 251                         * RDMA to a bad MR key is by moving the entire
 252                         * queue pair to error state. We cold possibly
 253                         * recover from that, but right now we drop the
 254                         * connection.
 255                         * Therefore, we never retransmit messages with RDMA ops.
 256                         */
 257                        if (rm->rdma.op_active &&
 258                            test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
 259                                spin_lock_irqsave(&conn->c_lock, flags);
 260                                if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
 261                                        list_move(&rm->m_conn_item, &to_be_dropped);
 262                                spin_unlock_irqrestore(&conn->c_lock, flags);
 263                                continue;
 264                        }
 265
 266                        /* Require an ACK every once in a while */
 267                        len = ntohl(rm->m_inc.i_hdr.h_len);
 268                        if (conn->c_unacked_packets == 0 ||
 269                            conn->c_unacked_bytes < len) {
 270                                __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
 271
 272                                conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
 273                                conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
 274                                rds_stats_inc(s_send_ack_required);
 275                        } else {
 276                                conn->c_unacked_bytes -= len;
 277                                conn->c_unacked_packets--;
 278                        }
 279
 280                        conn->c_xmit_rm = rm;
 281                }
 282
 283                /* The transport either sends the whole rdma or none of it */
 284                if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
 285                        rm->m_final_op = &rm->rdma;
 286                        /* The transport owns the mapped memory for now.
 287                         * You can't unmap it while it's on the send queue
 288                         */
 289                        set_bit(RDS_MSG_MAPPED, &rm->m_flags);
 290                        ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
 291                        if (ret) {
 292                                clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
 293                                wake_up_interruptible(&rm->m_flush_wait);
 294                                break;
 295                        }
 296                        conn->c_xmit_rdma_sent = 1;
 297
 298                }
 299
 300                if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
 301                        rm->m_final_op = &rm->atomic;
 302                        /* The transport owns the mapped memory for now.
 303                         * You can't unmap it while it's on the send queue
 304                         */
 305                        set_bit(RDS_MSG_MAPPED, &rm->m_flags);
 306                        ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
 307                        if (ret) {
 308                                clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
 309                                wake_up_interruptible(&rm->m_flush_wait);
 310                                break;
 311                        }
 312                        conn->c_xmit_atomic_sent = 1;
 313
 314                }
 315
 316                /*
 317                 * A number of cases require an RDS header to be sent
 318                 * even if there is no data.
 319                 * We permit 0-byte sends; rds-ping depends on this.
 320                 * However, if there are exclusively attached silent ops,
 321                 * we skip the hdr/data send, to enable silent operation.
 322                 */
 323                if (rm->data.op_nents == 0) {
 324                        int ops_present;
 325                        int all_ops_are_silent = 1;
 326
 327                        ops_present = (rm->atomic.op_active || rm->rdma.op_active);
 328                        if (rm->atomic.op_active && !rm->atomic.op_silent)
 329                                all_ops_are_silent = 0;
 330                        if (rm->rdma.op_active && !rm->rdma.op_silent)
 331                                all_ops_are_silent = 0;
 332
 333                        if (ops_present && all_ops_are_silent
 334                            && !rm->m_rdma_cookie)
 335                                rm->data.op_active = 0;
 336                }
 337
 338                if (rm->data.op_active && !conn->c_xmit_data_sent) {
 339                        rm->m_final_op = &rm->data;
 340                        ret = conn->c_trans->xmit(conn, rm,
 341                                                  conn->c_xmit_hdr_off,
 342                                                  conn->c_xmit_sg,
 343                                                  conn->c_xmit_data_off);
 344                        if (ret <= 0)
 345                                break;
 346
 347                        if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
 348                                tmp = min_t(int, ret,
 349                                            sizeof(struct rds_header) -
 350                                            conn->c_xmit_hdr_off);
 351                                conn->c_xmit_hdr_off += tmp;
 352                                ret -= tmp;
 353                        }
 354
 355                        sg = &rm->data.op_sg[conn->c_xmit_sg];
 356                        while (ret) {
 357                                tmp = min_t(int, ret, sg->length -
 358                                                      conn->c_xmit_data_off);
 359                                conn->c_xmit_data_off += tmp;
 360                                ret -= tmp;
 361                                if (conn->c_xmit_data_off == sg->length) {
 362                                        conn->c_xmit_data_off = 0;
 363                                        sg++;
 364                                        conn->c_xmit_sg++;
 365                                        BUG_ON(ret != 0 &&
 366                                               conn->c_xmit_sg == rm->data.op_nents);
 367                                }
 368                        }
 369
 370                        if (conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
 371                            (conn->c_xmit_sg == rm->data.op_nents))
 372                                conn->c_xmit_data_sent = 1;
 373                }
 374
 375                /*
 376                 * A rm will only take multiple times through this loop
 377                 * if there is a data op. Thus, if the data is sent (or there was
 378                 * none), then we're done with the rm.
 379                 */
 380                if (!rm->data.op_active || conn->c_xmit_data_sent) {
 381                        conn->c_xmit_rm = NULL;
 382                        conn->c_xmit_sg = 0;
 383                        conn->c_xmit_hdr_off = 0;
 384                        conn->c_xmit_data_off = 0;
 385                        conn->c_xmit_rdma_sent = 0;
 386                        conn->c_xmit_atomic_sent = 0;
 387                        conn->c_xmit_data_sent = 0;
 388
 389                        rds_message_put(rm);
 390                }
 391        }
 392
 393over_batch:
 394        if (conn->c_trans->xmit_complete)
 395                conn->c_trans->xmit_complete(conn);
 396        release_in_xmit(conn);
 397
 398        /* Nuke any messages we decided not to retransmit. */
 399        if (!list_empty(&to_be_dropped)) {
 400                /* irqs on here, so we can put(), unlike above */
 401                list_for_each_entry(rm, &to_be_dropped, m_conn_item)
 402                        rds_message_put(rm);
 403                rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
 404        }
 405
 406        /*
 407         * Other senders can queue a message after we last test the send queue
 408         * but before we clear RDS_IN_XMIT.  In that case they'd back off and
 409         * not try and send their newly queued message.  We need to check the
 410         * send queue after having cleared RDS_IN_XMIT so that their message
 411         * doesn't get stuck on the send queue.
 412         *
 413         * If the transport cannot continue (i.e ret != 0), then it must
 414         * call us when more room is available, such as from the tx
 415         * completion handler.
 416         *
 417         * We have an extra generation check here so that if someone manages
 418         * to jump in after our release_in_xmit, we'll see that they have done
 419         * some work and we will skip our goto
 420         */
 421        if (ret == 0) {
 422                smp_mb();
 423                if ((test_bit(0, &conn->c_map_queued) ||
 424                     !list_empty(&conn->c_send_queue)) &&
 425                    send_gen == conn->c_send_gen) {
 426                        rds_stats_inc(s_send_lock_queue_raced);
 427                        if (batch_count < send_batch_count)
 428                                goto restart;
 429                        queue_delayed_work(rds_wq, &conn->c_send_w, 1);
 430                }
 431        }
 432out:
 433        return ret;
 434}
 435EXPORT_SYMBOL_GPL(rds_send_xmit);
 436
 437static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
 438{
 439        u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
 440
 441        assert_spin_locked(&rs->rs_lock);
 442
 443        BUG_ON(rs->rs_snd_bytes < len);
 444        rs->rs_snd_bytes -= len;
 445
 446        if (rs->rs_snd_bytes == 0)
 447                rds_stats_inc(s_send_queue_empty);
 448}
 449
 450static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
 451                                    is_acked_func is_acked)
 452{
 453        if (is_acked)
 454                return is_acked(rm, ack);
 455        return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
 456}
 457
 458/*
 459 * This is pretty similar to what happens below in the ACK
 460 * handling code - except that we call here as soon as we get
 461 * the IB send completion on the RDMA op and the accompanying
 462 * message.
 463 */
 464void rds_rdma_send_complete(struct rds_message *rm, int status)
 465{
 466        struct rds_sock *rs = NULL;
 467        struct rm_rdma_op *ro;
 468        struct rds_notifier *notifier;
 469        unsigned long flags;
 470
 471        spin_lock_irqsave(&rm->m_rs_lock, flags);
 472
 473        ro = &rm->rdma;
 474        if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
 475            ro->op_active && ro->op_notify && ro->op_notifier) {
 476                notifier = ro->op_notifier;
 477                rs = rm->m_rs;
 478                sock_hold(rds_rs_to_sk(rs));
 479
 480                notifier->n_status = status;
 481                spin_lock(&rs->rs_lock);
 482                list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
 483                spin_unlock(&rs->rs_lock);
 484
 485                ro->op_notifier = NULL;
 486        }
 487
 488        spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 489
 490        if (rs) {
 491                rds_wake_sk_sleep(rs);
 492                sock_put(rds_rs_to_sk(rs));
 493        }
 494}
 495EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
 496
 497/*
 498 * Just like above, except looks at atomic op
 499 */
 500void rds_atomic_send_complete(struct rds_message *rm, int status)
 501{
 502        struct rds_sock *rs = NULL;
 503        struct rm_atomic_op *ao;
 504        struct rds_notifier *notifier;
 505        unsigned long flags;
 506
 507        spin_lock_irqsave(&rm->m_rs_lock, flags);
 508
 509        ao = &rm->atomic;
 510        if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
 511            && ao->op_active && ao->op_notify && ao->op_notifier) {
 512                notifier = ao->op_notifier;
 513                rs = rm->m_rs;
 514                sock_hold(rds_rs_to_sk(rs));
 515
 516                notifier->n_status = status;
 517                spin_lock(&rs->rs_lock);
 518                list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
 519                spin_unlock(&rs->rs_lock);
 520
 521                ao->op_notifier = NULL;
 522        }
 523
 524        spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 525
 526        if (rs) {
 527                rds_wake_sk_sleep(rs);
 528                sock_put(rds_rs_to_sk(rs));
 529        }
 530}
 531EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
 532
 533/*
 534 * This is the same as rds_rdma_send_complete except we
 535 * don't do any locking - we have all the ingredients (message,
 536 * socket, socket lock) and can just move the notifier.
 537 */
 538static inline void
 539__rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
 540{
 541        struct rm_rdma_op *ro;
 542        struct rm_atomic_op *ao;
 543
 544        ro = &rm->rdma;
 545        if (ro->op_active && ro->op_notify && ro->op_notifier) {
 546                ro->op_notifier->n_status = status;
 547                list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
 548                ro->op_notifier = NULL;
 549        }
 550
 551        ao = &rm->atomic;
 552        if (ao->op_active && ao->op_notify && ao->op_notifier) {
 553                ao->op_notifier->n_status = status;
 554                list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
 555                ao->op_notifier = NULL;
 556        }
 557
 558        /* No need to wake the app - caller does this */
 559}
 560
 561/*
 562 * This is called from the IB send completion when we detect
 563 * a RDMA operation that failed with remote access error.
 564 * So speed is not an issue here.
 565 */
 566struct rds_message *rds_send_get_message(struct rds_connection *conn,
 567                                         struct rm_rdma_op *op)
 568{
 569        struct rds_message *rm, *tmp, *found = NULL;
 570        unsigned long flags;
 571
 572        spin_lock_irqsave(&conn->c_lock, flags);
 573
 574        list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
 575                if (&rm->rdma == op) {
 576                        atomic_inc(&rm->m_refcount);
 577                        found = rm;
 578                        goto out;
 579                }
 580        }
 581
 582        list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
 583                if (&rm->rdma == op) {
 584                        atomic_inc(&rm->m_refcount);
 585                        found = rm;
 586                        break;
 587                }
 588        }
 589
 590out:
 591        spin_unlock_irqrestore(&conn->c_lock, flags);
 592
 593        return found;
 594}
 595EXPORT_SYMBOL_GPL(rds_send_get_message);
 596
 597/*
 598 * This removes messages from the socket's list if they're on it.  The list
 599 * argument must be private to the caller, we must be able to modify it
 600 * without locks.  The messages must have a reference held for their
 601 * position on the list.  This function will drop that reference after
 602 * removing the messages from the 'messages' list regardless of if it found
 603 * the messages on the socket list or not.
 604 */
 605static void rds_send_remove_from_sock(struct list_head *messages, int status)
 606{
 607        unsigned long flags;
 608        struct rds_sock *rs = NULL;
 609        struct rds_message *rm;
 610
 611        while (!list_empty(messages)) {
 612                int was_on_sock = 0;
 613
 614                rm = list_entry(messages->next, struct rds_message,
 615                                m_conn_item);
 616                list_del_init(&rm->m_conn_item);
 617
 618                /*
 619                 * If we see this flag cleared then we're *sure* that someone
 620                 * else beat us to removing it from the sock.  If we race
 621                 * with their flag update we'll get the lock and then really
 622                 * see that the flag has been cleared.
 623                 *
 624                 * The message spinlock makes sure nobody clears rm->m_rs
 625                 * while we're messing with it. It does not prevent the
 626                 * message from being removed from the socket, though.
 627                 */
 628                spin_lock_irqsave(&rm->m_rs_lock, flags);
 629                if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
 630                        goto unlock_and_drop;
 631
 632                if (rs != rm->m_rs) {
 633                        if (rs) {
 634                                rds_wake_sk_sleep(rs);
 635                                sock_put(rds_rs_to_sk(rs));
 636                        }
 637                        rs = rm->m_rs;
 638                        if (rs)
 639                                sock_hold(rds_rs_to_sk(rs));
 640                }
 641                if (!rs)
 642                        goto unlock_and_drop;
 643                spin_lock(&rs->rs_lock);
 644
 645                if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
 646                        struct rm_rdma_op *ro = &rm->rdma;
 647                        struct rds_notifier *notifier;
 648
 649                        list_del_init(&rm->m_sock_item);
 650                        rds_send_sndbuf_remove(rs, rm);
 651
 652                        if (ro->op_active && ro->op_notifier &&
 653                               (ro->op_notify || (ro->op_recverr && status))) {
 654                                notifier = ro->op_notifier;
 655                                list_add_tail(&notifier->n_list,
 656                                                &rs->rs_notify_queue);
 657                                if (!notifier->n_status)
 658                                        notifier->n_status = status;
 659                                rm->rdma.op_notifier = NULL;
 660                        }
 661                        was_on_sock = 1;
 662                        rm->m_rs = NULL;
 663                }
 664                spin_unlock(&rs->rs_lock);
 665
 666unlock_and_drop:
 667                spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 668                rds_message_put(rm);
 669                if (was_on_sock)
 670                        rds_message_put(rm);
 671        }
 672
 673        if (rs) {
 674                rds_wake_sk_sleep(rs);
 675                sock_put(rds_rs_to_sk(rs));
 676        }
 677}
 678
 679/*
 680 * Transports call here when they've determined that the receiver queued
 681 * messages up to, and including, the given sequence number.  Messages are
 682 * moved to the retrans queue when rds_send_xmit picks them off the send
 683 * queue. This means that in the TCP case, the message may not have been
 684 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
 685 * checks the RDS_MSG_HAS_ACK_SEQ bit.
 686 */
 687void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
 688                         is_acked_func is_acked)
 689{
 690        struct rds_message *rm, *tmp;
 691        unsigned long flags;
 692        LIST_HEAD(list);
 693
 694        spin_lock_irqsave(&conn->c_lock, flags);
 695
 696        list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
 697                if (!rds_send_is_acked(rm, ack, is_acked))
 698                        break;
 699
 700                list_move(&rm->m_conn_item, &list);
 701                clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
 702        }
 703
 704        /* order flag updates with spin locks */
 705        if (!list_empty(&list))
 706                smp_mb__after_atomic();
 707
 708        spin_unlock_irqrestore(&conn->c_lock, flags);
 709
 710        /* now remove the messages from the sock list as needed */
 711        rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
 712}
 713EXPORT_SYMBOL_GPL(rds_send_drop_acked);
 714
 715void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
 716{
 717        struct rds_message *rm, *tmp;
 718        struct rds_connection *conn;
 719        unsigned long flags;
 720        LIST_HEAD(list);
 721
 722        /* get all the messages we're dropping under the rs lock */
 723        spin_lock_irqsave(&rs->rs_lock, flags);
 724
 725        list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
 726                if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
 727                             dest->sin_port != rm->m_inc.i_hdr.h_dport))
 728                        continue;
 729
 730                list_move(&rm->m_sock_item, &list);
 731                rds_send_sndbuf_remove(rs, rm);
 732                clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
 733        }
 734
 735        /* order flag updates with the rs lock */
 736        smp_mb__after_atomic();
 737
 738        spin_unlock_irqrestore(&rs->rs_lock, flags);
 739
 740        if (list_empty(&list))
 741                return;
 742
 743        /* Remove the messages from the conn */
 744        list_for_each_entry(rm, &list, m_sock_item) {
 745
 746                conn = rm->m_inc.i_conn;
 747
 748                spin_lock_irqsave(&conn->c_lock, flags);
 749                /*
 750                 * Maybe someone else beat us to removing rm from the conn.
 751                 * If we race with their flag update we'll get the lock and
 752                 * then really see that the flag has been cleared.
 753                 */
 754                if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
 755                        spin_unlock_irqrestore(&conn->c_lock, flags);
 756                        spin_lock_irqsave(&rm->m_rs_lock, flags);
 757                        rm->m_rs = NULL;
 758                        spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 759                        continue;
 760                }
 761                list_del_init(&rm->m_conn_item);
 762                spin_unlock_irqrestore(&conn->c_lock, flags);
 763
 764                /*
 765                 * Couldn't grab m_rs_lock in top loop (lock ordering),
 766                 * but we can now.
 767                 */
 768                spin_lock_irqsave(&rm->m_rs_lock, flags);
 769
 770                spin_lock(&rs->rs_lock);
 771                __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
 772                spin_unlock(&rs->rs_lock);
 773
 774                rm->m_rs = NULL;
 775                spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 776
 777                rds_message_put(rm);
 778        }
 779
 780        rds_wake_sk_sleep(rs);
 781
 782        while (!list_empty(&list)) {
 783                rm = list_entry(list.next, struct rds_message, m_sock_item);
 784                list_del_init(&rm->m_sock_item);
 785                rds_message_wait(rm);
 786
 787                /* just in case the code above skipped this message
 788                 * because RDS_MSG_ON_CONN wasn't set, run it again here
 789                 * taking m_rs_lock is the only thing that keeps us
 790                 * from racing with ack processing.
 791                 */
 792                spin_lock_irqsave(&rm->m_rs_lock, flags);
 793
 794                spin_lock(&rs->rs_lock);
 795                __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
 796                spin_unlock(&rs->rs_lock);
 797
 798                rm->m_rs = NULL;
 799                spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 800
 801                rds_message_put(rm);
 802        }
 803}
 804
 805/*
 806 * we only want this to fire once so we use the callers 'queued'.  It's
 807 * possible that another thread can race with us and remove the
 808 * message from the flow with RDS_CANCEL_SENT_TO.
 809 */
 810static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
 811                             struct rds_message *rm, __be16 sport,
 812                             __be16 dport, int *queued)
 813{
 814        unsigned long flags;
 815        u32 len;
 816
 817        if (*queued)
 818                goto out;
 819
 820        len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
 821
 822        /* this is the only place which holds both the socket's rs_lock
 823         * and the connection's c_lock */
 824        spin_lock_irqsave(&rs->rs_lock, flags);
 825
 826        /*
 827         * If there is a little space in sndbuf, we don't queue anything,
 828         * and userspace gets -EAGAIN. But poll() indicates there's send
 829         * room. This can lead to bad behavior (spinning) if snd_bytes isn't
 830         * freed up by incoming acks. So we check the *old* value of
 831         * rs_snd_bytes here to allow the last msg to exceed the buffer,
 832         * and poll() now knows no more data can be sent.
 833         */
 834        if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
 835                rs->rs_snd_bytes += len;
 836
 837                /* let recv side know we are close to send space exhaustion.
 838                 * This is probably not the optimal way to do it, as this
 839                 * means we set the flag on *all* messages as soon as our
 840                 * throughput hits a certain threshold.
 841                 */
 842                if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
 843                        __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
 844
 845                list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
 846                set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
 847                rds_message_addref(rm);
 848                rm->m_rs = rs;
 849
 850                /* The code ordering is a little weird, but we're
 851                   trying to minimize the time we hold c_lock */
 852                rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
 853                rm->m_inc.i_conn = conn;
 854                rds_message_addref(rm);
 855
 856                spin_lock(&conn->c_lock);
 857                rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
 858                list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
 859                set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
 860                spin_unlock(&conn->c_lock);
 861
 862                rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
 863                         rm, len, rs, rs->rs_snd_bytes,
 864                         (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
 865
 866                *queued = 1;
 867        }
 868
 869        spin_unlock_irqrestore(&rs->rs_lock, flags);
 870out:
 871        return *queued;
 872}
 873
 874/*
 875 * rds_message is getting to be quite complicated, and we'd like to allocate
 876 * it all in one go. This figures out how big it needs to be up front.
 877 */
 878static int rds_rm_size(struct msghdr *msg, int data_len)
 879{
 880        struct cmsghdr *cmsg;
 881        int size = 0;
 882        int cmsg_groups = 0;
 883        int retval;
 884
 885        for_each_cmsghdr(cmsg, msg) {
 886                if (!CMSG_OK(msg, cmsg))
 887                        return -EINVAL;
 888
 889                if (cmsg->cmsg_level != SOL_RDS)
 890                        continue;
 891
 892                switch (cmsg->cmsg_type) {
 893                case RDS_CMSG_RDMA_ARGS:
 894                        cmsg_groups |= 1;
 895                        retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
 896                        if (retval < 0)
 897                                return retval;
 898                        size += retval;
 899
 900                        break;
 901
 902                case RDS_CMSG_RDMA_DEST:
 903                case RDS_CMSG_RDMA_MAP:
 904                        cmsg_groups |= 2;
 905                        /* these are valid but do no add any size */
 906                        break;
 907
 908                case RDS_CMSG_ATOMIC_CSWP:
 909                case RDS_CMSG_ATOMIC_FADD:
 910                case RDS_CMSG_MASKED_ATOMIC_CSWP:
 911                case RDS_CMSG_MASKED_ATOMIC_FADD:
 912                        cmsg_groups |= 1;
 913                        size += sizeof(struct scatterlist);
 914                        break;
 915
 916                default:
 917                        return -EINVAL;
 918                }
 919
 920        }
 921
 922        size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
 923
 924        /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
 925        if (cmsg_groups == 3)
 926                return -EINVAL;
 927
 928        return size;
 929}
 930
 931static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
 932                         struct msghdr *msg, int *allocated_mr)
 933{
 934        struct cmsghdr *cmsg;
 935        int ret = 0;
 936
 937        for_each_cmsghdr(cmsg, msg) {
 938                if (!CMSG_OK(msg, cmsg))
 939                        return -EINVAL;
 940
 941                if (cmsg->cmsg_level != SOL_RDS)
 942                        continue;
 943
 944                /* As a side effect, RDMA_DEST and RDMA_MAP will set
 945                 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
 946                 */
 947                switch (cmsg->cmsg_type) {
 948                case RDS_CMSG_RDMA_ARGS:
 949                        ret = rds_cmsg_rdma_args(rs, rm, cmsg);
 950                        break;
 951
 952                case RDS_CMSG_RDMA_DEST:
 953                        ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
 954                        break;
 955
 956                case RDS_CMSG_RDMA_MAP:
 957                        ret = rds_cmsg_rdma_map(rs, rm, cmsg);
 958                        if (!ret)
 959                                *allocated_mr = 1;
 960                        break;
 961                case RDS_CMSG_ATOMIC_CSWP:
 962                case RDS_CMSG_ATOMIC_FADD:
 963                case RDS_CMSG_MASKED_ATOMIC_CSWP:
 964                case RDS_CMSG_MASKED_ATOMIC_FADD:
 965                        ret = rds_cmsg_atomic(rs, rm, cmsg);
 966                        break;
 967
 968                default:
 969                        return -EINVAL;
 970                }
 971
 972                if (ret)
 973                        break;
 974        }
 975
 976        return ret;
 977}
 978
 979int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
 980{
 981        struct sock *sk = sock->sk;
 982        struct rds_sock *rs = rds_sk_to_rs(sk);
 983        DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
 984        __be32 daddr;
 985        __be16 dport;
 986        struct rds_message *rm = NULL;
 987        struct rds_connection *conn;
 988        int ret = 0;
 989        int queued = 0, allocated_mr = 0;
 990        int nonblock = msg->msg_flags & MSG_DONTWAIT;
 991        long timeo = sock_sndtimeo(sk, nonblock);
 992
 993        /* Mirror Linux UDP mirror of BSD error message compatibility */
 994        /* XXX: Perhaps MSG_MORE someday */
 995        if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
 996                ret = -EOPNOTSUPP;
 997                goto out;
 998        }
 999
1000        if (msg->msg_namelen) {
1001                /* XXX fail non-unicast destination IPs? */
1002                if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
1003                        ret = -EINVAL;
1004                        goto out;
1005                }
1006                daddr = usin->sin_addr.s_addr;
1007                dport = usin->sin_port;
1008        } else {
1009                /* We only care about consistency with ->connect() */
1010                lock_sock(sk);
1011                daddr = rs->rs_conn_addr;
1012                dport = rs->rs_conn_port;
1013                release_sock(sk);
1014        }
1015
1016        lock_sock(sk);
1017        if (daddr == 0 || rs->rs_bound_addr == 0) {
1018                release_sock(sk);
1019                ret = -ENOTCONN; /* XXX not a great errno */
1020                goto out;
1021        }
1022        release_sock(sk);
1023
1024        if (payload_len > rds_sk_sndbuf(rs)) {
1025                ret = -EMSGSIZE;
1026                goto out;
1027        }
1028
1029        /* size of rm including all sgs */
1030        ret = rds_rm_size(msg, payload_len);
1031        if (ret < 0)
1032                goto out;
1033
1034        rm = rds_message_alloc(ret, GFP_KERNEL);
1035        if (!rm) {
1036                ret = -ENOMEM;
1037                goto out;
1038        }
1039
1040        /* Attach data to the rm */
1041        if (payload_len) {
1042                rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
1043                if (!rm->data.op_sg) {
1044                        ret = -ENOMEM;
1045                        goto out;
1046                }
1047                ret = rds_message_copy_from_user(rm, &msg->msg_iter);
1048                if (ret)
1049                        goto out;
1050        }
1051        rm->data.op_active = 1;
1052
1053        rm->m_daddr = daddr;
1054
1055        /* rds_conn_create has a spinlock that runs with IRQ off.
1056         * Caching the conn in the socket helps a lot. */
1057        if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
1058                conn = rs->rs_conn;
1059        else {
1060                conn = rds_conn_create_outgoing(sock_net(sock->sk),
1061                                                rs->rs_bound_addr, daddr,
1062                                        rs->rs_transport,
1063                                        sock->sk->sk_allocation);
1064                if (IS_ERR(conn)) {
1065                        ret = PTR_ERR(conn);
1066                        goto out;
1067                }
1068                rs->rs_conn = conn;
1069        }
1070
1071        /* Parse any control messages the user may have included. */
1072        ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
1073        if (ret)
1074                goto out;
1075
1076        if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
1077                printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
1078                               &rm->rdma, conn->c_trans->xmit_rdma);
1079                ret = -EOPNOTSUPP;
1080                goto out;
1081        }
1082
1083        if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
1084                printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
1085                               &rm->atomic, conn->c_trans->xmit_atomic);
1086                ret = -EOPNOTSUPP;
1087                goto out;
1088        }
1089
1090        rds_conn_connect_if_down(conn);
1091
1092        ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1093        if (ret) {
1094                rs->rs_seen_congestion = 1;
1095                goto out;
1096        }
1097
1098        while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
1099                                  dport, &queued)) {
1100                rds_stats_inc(s_send_queue_full);
1101
1102                if (nonblock) {
1103                        ret = -EAGAIN;
1104                        goto out;
1105                }
1106
1107                timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
1108                                        rds_send_queue_rm(rs, conn, rm,
1109                                                          rs->rs_bound_port,
1110                                                          dport,
1111                                                          &queued),
1112                                        timeo);
1113                rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1114                if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1115                        continue;
1116
1117                ret = timeo;
1118                if (ret == 0)
1119                        ret = -ETIMEDOUT;
1120                goto out;
1121        }
1122
1123        /*
1124         * By now we've committed to the send.  We reuse rds_send_worker()
1125         * to retry sends in the rds thread if the transport asks us to.
1126         */
1127        rds_stats_inc(s_send_queued);
1128
1129        ret = rds_send_xmit(conn);
1130        if (ret == -ENOMEM || ret == -EAGAIN)
1131                queue_delayed_work(rds_wq, &conn->c_send_w, 1);
1132
1133        rds_message_put(rm);
1134        return payload_len;
1135
1136out:
1137        /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1138         * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1139         * or in any other way, we need to destroy the MR again */
1140        if (allocated_mr)
1141                rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1142
1143        if (rm)
1144                rds_message_put(rm);
1145        return ret;
1146}
1147
1148/*
1149 * Reply to a ping packet.
1150 */
1151int
1152rds_send_pong(struct rds_connection *conn, __be16 dport)
1153{
1154        struct rds_message *rm;
1155        unsigned long flags;
1156        int ret = 0;
1157
1158        rm = rds_message_alloc(0, GFP_ATOMIC);
1159        if (!rm) {
1160                ret = -ENOMEM;
1161                goto out;
1162        }
1163
1164        rm->m_daddr = conn->c_faddr;
1165        rm->data.op_active = 1;
1166
1167        rds_conn_connect_if_down(conn);
1168
1169        ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
1170        if (ret)
1171                goto out;
1172
1173        spin_lock_irqsave(&conn->c_lock, flags);
1174        list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
1175        set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1176        rds_message_addref(rm);
1177        rm->m_inc.i_conn = conn;
1178
1179        rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
1180                                    conn->c_next_tx_seq);
1181        conn->c_next_tx_seq++;
1182        spin_unlock_irqrestore(&conn->c_lock, flags);
1183
1184        rds_stats_inc(s_send_queued);
1185        rds_stats_inc(s_send_pong);
1186
1187        /* schedule the send work on rds_wq */
1188        queue_delayed_work(rds_wq, &conn->c_send_w, 1);
1189
1190        rds_message_put(rm);
1191        return 0;
1192
1193out:
1194        if (rm)
1195                rds_message_put(rm);
1196        return ret;
1197}
1198