linux/net/rds/recv.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2006 Oracle.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33#include <linux/kernel.h>
  34#include <linux/slab.h>
  35#include <net/sock.h>
  36#include <linux/in.h>
  37#include <linux/export.h>
  38#include <linux/time.h>
  39#include <linux/rds.h>
  40
  41#include "rds.h"
  42
  43void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
  44                  __be32 saddr)
  45{
  46        int i;
  47
  48        atomic_set(&inc->i_refcount, 1);
  49        INIT_LIST_HEAD(&inc->i_item);
  50        inc->i_conn = conn;
  51        inc->i_saddr = saddr;
  52        inc->i_rdma_cookie = 0;
  53        inc->i_rx_tstamp.tv_sec = 0;
  54        inc->i_rx_tstamp.tv_usec = 0;
  55
  56        for (i = 0; i < RDS_RX_MAX_TRACES; i++)
  57                inc->i_rx_lat_trace[i] = 0;
  58}
  59EXPORT_SYMBOL_GPL(rds_inc_init);
  60
  61void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *cp,
  62                       __be32 saddr)
  63{
  64        atomic_set(&inc->i_refcount, 1);
  65        INIT_LIST_HEAD(&inc->i_item);
  66        inc->i_conn = cp->cp_conn;
  67        inc->i_conn_path = cp;
  68        inc->i_saddr = saddr;
  69        inc->i_rdma_cookie = 0;
  70        inc->i_rx_tstamp.tv_sec = 0;
  71        inc->i_rx_tstamp.tv_usec = 0;
  72}
  73EXPORT_SYMBOL_GPL(rds_inc_path_init);
  74
  75static void rds_inc_addref(struct rds_incoming *inc)
  76{
  77        rdsdebug("addref inc %p ref %d\n", inc, atomic_read(&inc->i_refcount));
  78        atomic_inc(&inc->i_refcount);
  79}
  80
  81void rds_inc_put(struct rds_incoming *inc)
  82{
  83        rdsdebug("put inc %p ref %d\n", inc, atomic_read(&inc->i_refcount));
  84        if (atomic_dec_and_test(&inc->i_refcount)) {
  85                BUG_ON(!list_empty(&inc->i_item));
  86
  87                inc->i_conn->c_trans->inc_free(inc);
  88        }
  89}
  90EXPORT_SYMBOL_GPL(rds_inc_put);
  91
  92static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk,
  93                                  struct rds_cong_map *map,
  94                                  int delta, __be16 port)
  95{
  96        int now_congested;
  97
  98        if (delta == 0)
  99                return;
 100
 101        rs->rs_rcv_bytes += delta;
 102        if (delta > 0)
 103                rds_stats_add(s_recv_bytes_added_to_socket, delta);
 104        else
 105                rds_stats_add(s_recv_bytes_removed_from_socket, -delta);
 106        now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
 107
 108        rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d "
 109          "now_cong %d delta %d\n",
 110          rs, &rs->rs_bound_addr,
 111          ntohs(rs->rs_bound_port), rs->rs_rcv_bytes,
 112          rds_sk_rcvbuf(rs), now_congested, delta);
 113
 114        /* wasn't -> am congested */
 115        if (!rs->rs_congested && now_congested) {
 116                rs->rs_congested = 1;
 117                rds_cong_set_bit(map, port);
 118                rds_cong_queue_updates(map);
 119        }
 120        /* was -> aren't congested */
 121        /* Require more free space before reporting uncongested to prevent
 122           bouncing cong/uncong state too often */
 123        else if (rs->rs_congested && (rs->rs_rcv_bytes < (rds_sk_rcvbuf(rs)/2))) {
 124                rs->rs_congested = 0;
 125                rds_cong_clear_bit(map, port);
 126                rds_cong_queue_updates(map);
 127        }
 128
 129        /* do nothing if no change in cong state */
 130}
 131
 132static void rds_conn_peer_gen_update(struct rds_connection *conn,
 133                                     u32 peer_gen_num)
 134{
 135        int i;
 136        struct rds_message *rm, *tmp;
 137        unsigned long flags;
 138
 139        WARN_ON(conn->c_trans->t_type != RDS_TRANS_TCP);
 140        if (peer_gen_num != 0) {
 141                if (conn->c_peer_gen_num != 0 &&
 142                    peer_gen_num != conn->c_peer_gen_num) {
 143                        for (i = 0; i < RDS_MPATH_WORKERS; i++) {
 144                                struct rds_conn_path *cp;
 145
 146                                cp = &conn->c_path[i];
 147                                spin_lock_irqsave(&cp->cp_lock, flags);
 148                                cp->cp_next_tx_seq = 1;
 149                                cp->cp_next_rx_seq = 0;
 150                                list_for_each_entry_safe(rm, tmp,
 151                                                         &cp->cp_retrans,
 152                                                         m_conn_item) {
 153                                        set_bit(RDS_MSG_FLUSH, &rm->m_flags);
 154                                }
 155                                spin_unlock_irqrestore(&cp->cp_lock, flags);
 156                        }
 157                }
 158                conn->c_peer_gen_num = peer_gen_num;
 159        }
 160}
 161
 162/*
 163 * Process all extension headers that come with this message.
 164 */
 165static void rds_recv_incoming_exthdrs(struct rds_incoming *inc, struct rds_sock *rs)
 166{
 167        struct rds_header *hdr = &inc->i_hdr;
 168        unsigned int pos = 0, type, len;
 169        union {
 170                struct rds_ext_header_version version;
 171                struct rds_ext_header_rdma rdma;
 172                struct rds_ext_header_rdma_dest rdma_dest;
 173        } buffer;
 174
 175        while (1) {
 176                len = sizeof(buffer);
 177                type = rds_message_next_extension(hdr, &pos, &buffer, &len);
 178                if (type == RDS_EXTHDR_NONE)
 179                        break;
 180                /* Process extension header here */
 181                switch (type) {
 182                case RDS_EXTHDR_RDMA:
 183                        rds_rdma_unuse(rs, be32_to_cpu(buffer.rdma.h_rdma_rkey), 0);
 184                        break;
 185
 186                case RDS_EXTHDR_RDMA_DEST:
 187                        /* We ignore the size for now. We could stash it
 188                         * somewhere and use it for error checking. */
 189                        inc->i_rdma_cookie = rds_rdma_make_cookie(
 190                                        be32_to_cpu(buffer.rdma_dest.h_rdma_rkey),
 191                                        be32_to_cpu(buffer.rdma_dest.h_rdma_offset));
 192
 193                        break;
 194                }
 195        }
 196}
 197
 198static void rds_recv_hs_exthdrs(struct rds_header *hdr,
 199                                struct rds_connection *conn)
 200{
 201        unsigned int pos = 0, type, len;
 202        union {
 203                struct rds_ext_header_version version;
 204                u16 rds_npaths;
 205                u32 rds_gen_num;
 206        } buffer;
 207        u32 new_peer_gen_num = 0;
 208
 209        while (1) {
 210                len = sizeof(buffer);
 211                type = rds_message_next_extension(hdr, &pos, &buffer, &len);
 212                if (type == RDS_EXTHDR_NONE)
 213                        break;
 214                /* Process extension header here */
 215                switch (type) {
 216                case RDS_EXTHDR_NPATHS:
 217                        conn->c_npaths = min_t(int, RDS_MPATH_WORKERS,
 218                                               buffer.rds_npaths);
 219                        break;
 220                case RDS_EXTHDR_GEN_NUM:
 221                        new_peer_gen_num = buffer.rds_gen_num;
 222                        break;
 223                default:
 224                        pr_warn_ratelimited("ignoring unknown exthdr type "
 225                                             "0x%x\n", type);
 226                }
 227        }
 228        /* if RDS_EXTHDR_NPATHS was not found, default to a single-path */
 229        conn->c_npaths = max_t(int, conn->c_npaths, 1);
 230        rds_conn_peer_gen_update(conn, new_peer_gen_num);
 231}
 232
 233/* rds_start_mprds() will synchronously start multiple paths when appropriate.
 234 * The scheme is based on the following rules:
 235 *
 236 * 1. rds_sendmsg on first connect attempt sends the probe ping, with the
 237 *    sender's npaths (s_npaths)
 238 * 2. rcvr of probe-ping knows the mprds_paths = min(s_npaths, r_npaths). It
 239 *    sends back a probe-pong with r_npaths. After that, if rcvr is the
 240 *    smaller ip addr, it starts rds_conn_path_connect_if_down on all
 241 *    mprds_paths.
 242 * 3. sender gets woken up, and can move to rds_conn_path_connect_if_down.
 243 *    If it is the smaller ipaddr, rds_conn_path_connect_if_down can be
 244 *    called after reception of the probe-pong on all mprds_paths.
 245 *    Otherwise (sender of probe-ping is not the smaller ip addr): just call
 246 *    rds_conn_path_connect_if_down on the hashed path. (see rule 4)
 247 * 4. when cp_index > 0, rds_connect_worker must only trigger
 248 *    a connection if laddr < faddr.
 249 * 5. sender may end up queuing the packet on the cp. will get sent out later.
 250 *    when connection is completed.
 251 */
 252static void rds_start_mprds(struct rds_connection *conn)
 253{
 254        int i;
 255        struct rds_conn_path *cp;
 256
 257        if (conn->c_npaths > 1 && conn->c_laddr < conn->c_faddr) {
 258                for (i = 1; i < conn->c_npaths; i++) {
 259                        cp = &conn->c_path[i];
 260                        rds_conn_path_connect_if_down(cp);
 261                }
 262        }
 263}
 264
 265/*
 266 * The transport must make sure that this is serialized against other
 267 * rx and conn reset on this specific conn.
 268 *
 269 * We currently assert that only one fragmented message will be sent
 270 * down a connection at a time.  This lets us reassemble in the conn
 271 * instead of per-flow which means that we don't have to go digging through
 272 * flows to tear down partial reassembly progress on conn failure and
 273 * we save flow lookup and locking for each frag arrival.  It does mean
 274 * that small messages will wait behind large ones.  Fragmenting at all
 275 * is only to reduce the memory consumption of pre-posted buffers.
 276 *
 277 * The caller passes in saddr and daddr instead of us getting it from the
 278 * conn.  This lets loopback, who only has one conn for both directions,
 279 * tell us which roles the addrs in the conn are playing for this message.
 280 */
 281void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
 282                       struct rds_incoming *inc, gfp_t gfp)
 283{
 284        struct rds_sock *rs = NULL;
 285        struct sock *sk;
 286        unsigned long flags;
 287        struct rds_conn_path *cp;
 288
 289        inc->i_conn = conn;
 290        inc->i_rx_jiffies = jiffies;
 291        if (conn->c_trans->t_mp_capable)
 292                cp = inc->i_conn_path;
 293        else
 294                cp = &conn->c_path[0];
 295
 296        rdsdebug("conn %p next %llu inc %p seq %llu len %u sport %u dport %u "
 297                 "flags 0x%x rx_jiffies %lu\n", conn,
 298                 (unsigned long long)cp->cp_next_rx_seq,
 299                 inc,
 300                 (unsigned long long)be64_to_cpu(inc->i_hdr.h_sequence),
 301                 be32_to_cpu(inc->i_hdr.h_len),
 302                 be16_to_cpu(inc->i_hdr.h_sport),
 303                 be16_to_cpu(inc->i_hdr.h_dport),
 304                 inc->i_hdr.h_flags,
 305                 inc->i_rx_jiffies);
 306
 307        /*
 308         * Sequence numbers should only increase.  Messages get their
 309         * sequence number as they're queued in a sending conn.  They
 310         * can be dropped, though, if the sending socket is closed before
 311         * they hit the wire.  So sequence numbers can skip forward
 312         * under normal operation.  They can also drop back in the conn
 313         * failover case as previously sent messages are resent down the
 314         * new instance of a conn.  We drop those, otherwise we have
 315         * to assume that the next valid seq does not come after a
 316         * hole in the fragment stream.
 317         *
 318         * The headers don't give us a way to realize if fragments of
 319         * a message have been dropped.  We assume that frags that arrive
 320         * to a flow are part of the current message on the flow that is
 321         * being reassembled.  This means that senders can't drop messages
 322         * from the sending conn until all their frags are sent.
 323         *
 324         * XXX we could spend more on the wire to get more robust failure
 325         * detection, arguably worth it to avoid data corruption.
 326         */
 327        if (be64_to_cpu(inc->i_hdr.h_sequence) < cp->cp_next_rx_seq &&
 328            (inc->i_hdr.h_flags & RDS_FLAG_RETRANSMITTED)) {
 329                rds_stats_inc(s_recv_drop_old_seq);
 330                goto out;
 331        }
 332        cp->cp_next_rx_seq = be64_to_cpu(inc->i_hdr.h_sequence) + 1;
 333
 334        if (rds_sysctl_ping_enable && inc->i_hdr.h_dport == 0) {
 335                if (inc->i_hdr.h_sport == 0) {
 336                        rdsdebug("ignore ping with 0 sport from 0x%x\n", saddr);
 337                        goto out;
 338                }
 339                rds_stats_inc(s_recv_ping);
 340                rds_send_pong(cp, inc->i_hdr.h_sport);
 341                /* if this is a handshake ping, start multipath if necessary */
 342                if (RDS_HS_PROBE(inc->i_hdr.h_sport, inc->i_hdr.h_dport)) {
 343                        rds_recv_hs_exthdrs(&inc->i_hdr, cp->cp_conn);
 344                        rds_start_mprds(cp->cp_conn);
 345                }
 346                goto out;
 347        }
 348
 349        if (inc->i_hdr.h_dport ==  RDS_FLAG_PROBE_PORT &&
 350            inc->i_hdr.h_sport == 0) {
 351                rds_recv_hs_exthdrs(&inc->i_hdr, cp->cp_conn);
 352                /* if this is a handshake pong, start multipath if necessary */
 353                rds_start_mprds(cp->cp_conn);
 354                wake_up(&cp->cp_conn->c_hs_waitq);
 355                goto out;
 356        }
 357
 358        rs = rds_find_bound(daddr, inc->i_hdr.h_dport);
 359        if (!rs) {
 360                rds_stats_inc(s_recv_drop_no_sock);
 361                goto out;
 362        }
 363
 364        /* Process extension headers */
 365        rds_recv_incoming_exthdrs(inc, rs);
 366
 367        /* We can be racing with rds_release() which marks the socket dead. */
 368        sk = rds_rs_to_sk(rs);
 369
 370        /* serialize with rds_release -> sock_orphan */
 371        write_lock_irqsave(&rs->rs_recv_lock, flags);
 372        if (!sock_flag(sk, SOCK_DEAD)) {
 373                rdsdebug("adding inc %p to rs %p's recv queue\n", inc, rs);
 374                rds_stats_inc(s_recv_queued);
 375                rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
 376                                      be32_to_cpu(inc->i_hdr.h_len),
 377                                      inc->i_hdr.h_dport);
 378                if (sock_flag(sk, SOCK_RCVTSTAMP))
 379                        do_gettimeofday(&inc->i_rx_tstamp);
 380                rds_inc_addref(inc);
 381                inc->i_rx_lat_trace[RDS_MSG_RX_END] = local_clock();
 382                list_add_tail(&inc->i_item, &rs->rs_recv_queue);
 383                __rds_wake_sk_sleep(sk);
 384        } else {
 385                rds_stats_inc(s_recv_drop_dead_sock);
 386        }
 387        write_unlock_irqrestore(&rs->rs_recv_lock, flags);
 388
 389out:
 390        if (rs)
 391                rds_sock_put(rs);
 392}
 393EXPORT_SYMBOL_GPL(rds_recv_incoming);
 394
 395/*
 396 * be very careful here.  This is being called as the condition in
 397 * wait_event_*() needs to cope with being called many times.
 398 */
 399static int rds_next_incoming(struct rds_sock *rs, struct rds_incoming **inc)
 400{
 401        unsigned long flags;
 402
 403        if (!*inc) {
 404                read_lock_irqsave(&rs->rs_recv_lock, flags);
 405                if (!list_empty(&rs->rs_recv_queue)) {
 406                        *inc = list_entry(rs->rs_recv_queue.next,
 407                                          struct rds_incoming,
 408                                          i_item);
 409                        rds_inc_addref(*inc);
 410                }
 411                read_unlock_irqrestore(&rs->rs_recv_lock, flags);
 412        }
 413
 414        return *inc != NULL;
 415}
 416
 417static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
 418                            int drop)
 419{
 420        struct sock *sk = rds_rs_to_sk(rs);
 421        int ret = 0;
 422        unsigned long flags;
 423
 424        write_lock_irqsave(&rs->rs_recv_lock, flags);
 425        if (!list_empty(&inc->i_item)) {
 426                ret = 1;
 427                if (drop) {
 428                        /* XXX make sure this i_conn is reliable */
 429                        rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
 430                                              -be32_to_cpu(inc->i_hdr.h_len),
 431                                              inc->i_hdr.h_dport);
 432                        list_del_init(&inc->i_item);
 433                        rds_inc_put(inc);
 434                }
 435        }
 436        write_unlock_irqrestore(&rs->rs_recv_lock, flags);
 437
 438        rdsdebug("inc %p rs %p still %d dropped %d\n", inc, rs, ret, drop);
 439        return ret;
 440}
 441
 442/*
 443 * Pull errors off the error queue.
 444 * If msghdr is NULL, we will just purge the error queue.
 445 */
 446int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
 447{
 448        struct rds_notifier *notifier;
 449        struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */
 450        unsigned int count = 0, max_messages = ~0U;
 451        unsigned long flags;
 452        LIST_HEAD(copy);
 453        int err = 0;
 454
 455
 456        /* put_cmsg copies to user space and thus may sleep. We can't do this
 457         * with rs_lock held, so first grab as many notifications as we can stuff
 458         * in the user provided cmsg buffer. We don't try to copy more, to avoid
 459         * losing notifications - except when the buffer is so small that it wouldn't
 460         * even hold a single notification. Then we give him as much of this single
 461         * msg as we can squeeze in, and set MSG_CTRUNC.
 462         */
 463        if (msghdr) {
 464                max_messages = msghdr->msg_controllen / CMSG_SPACE(sizeof(cmsg));
 465                if (!max_messages)
 466                        max_messages = 1;
 467        }
 468
 469        spin_lock_irqsave(&rs->rs_lock, flags);
 470        while (!list_empty(&rs->rs_notify_queue) && count < max_messages) {
 471                notifier = list_entry(rs->rs_notify_queue.next,
 472                                struct rds_notifier, n_list);
 473                list_move(&notifier->n_list, &copy);
 474                count++;
 475        }
 476        spin_unlock_irqrestore(&rs->rs_lock, flags);
 477
 478        if (!count)
 479                return 0;
 480
 481        while (!list_empty(&copy)) {
 482                notifier = list_entry(copy.next, struct rds_notifier, n_list);
 483
 484                if (msghdr) {
 485                        cmsg.user_token = notifier->n_user_token;
 486                        cmsg.status = notifier->n_status;
 487
 488                        err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_RDMA_STATUS,
 489                                       sizeof(cmsg), &cmsg);
 490                        if (err)
 491                                break;
 492                }
 493
 494                list_del_init(&notifier->n_list);
 495                kfree(notifier);
 496        }
 497
 498        /* If we bailed out because of an error in put_cmsg,
 499         * we may be left with one or more notifications that we
 500         * didn't process. Return them to the head of the list. */
 501        if (!list_empty(&copy)) {
 502                spin_lock_irqsave(&rs->rs_lock, flags);
 503                list_splice(&copy, &rs->rs_notify_queue);
 504                spin_unlock_irqrestore(&rs->rs_lock, flags);
 505        }
 506
 507        return err;
 508}
 509
 510/*
 511 * Queue a congestion notification
 512 */
 513static int rds_notify_cong(struct rds_sock *rs, struct msghdr *msghdr)
 514{
 515        uint64_t notify = rs->rs_cong_notify;
 516        unsigned long flags;
 517        int err;
 518
 519        err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_CONG_UPDATE,
 520                        sizeof(notify), &notify);
 521        if (err)
 522                return err;
 523
 524        spin_lock_irqsave(&rs->rs_lock, flags);
 525        rs->rs_cong_notify &= ~notify;
 526        spin_unlock_irqrestore(&rs->rs_lock, flags);
 527
 528        return 0;
 529}
 530
 531/*
 532 * Receive any control messages.
 533 */
 534static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg,
 535                         struct rds_sock *rs)
 536{
 537        int ret = 0;
 538
 539        if (inc->i_rdma_cookie) {
 540                ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RDMA_DEST,
 541                                sizeof(inc->i_rdma_cookie), &inc->i_rdma_cookie);
 542                if (ret)
 543                        goto out;
 544        }
 545
 546        if ((inc->i_rx_tstamp.tv_sec != 0) &&
 547            sock_flag(rds_rs_to_sk(rs), SOCK_RCVTSTAMP)) {
 548                ret = put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP,
 549                               sizeof(struct timeval),
 550                               &inc->i_rx_tstamp);
 551                if (ret)
 552                        goto out;
 553        }
 554
 555        if (rs->rs_rx_traces) {
 556                struct rds_cmsg_rx_trace t;
 557                int i, j;
 558
 559                inc->i_rx_lat_trace[RDS_MSG_RX_CMSG] = local_clock();
 560                t.rx_traces =  rs->rs_rx_traces;
 561                for (i = 0; i < rs->rs_rx_traces; i++) {
 562                        j = rs->rs_rx_trace[i];
 563                        t.rx_trace_pos[i] = j;
 564                        t.rx_trace[i] = inc->i_rx_lat_trace[j + 1] -
 565                                          inc->i_rx_lat_trace[j];
 566                }
 567
 568                ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RXPATH_LATENCY,
 569                               sizeof(t), &t);
 570                if (ret)
 571                        goto out;
 572        }
 573
 574out:
 575        return ret;
 576}
 577
 578int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
 579                int msg_flags)
 580{
 581        struct sock *sk = sock->sk;
 582        struct rds_sock *rs = rds_sk_to_rs(sk);
 583        long timeo;
 584        int ret = 0, nonblock = msg_flags & MSG_DONTWAIT;
 585        DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
 586        struct rds_incoming *inc = NULL;
 587
 588        /* udp_recvmsg()->sock_recvtimeo() gets away without locking too.. */
 589        timeo = sock_rcvtimeo(sk, nonblock);
 590
 591        rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
 592
 593        if (msg_flags & MSG_OOB)
 594                goto out;
 595
 596        while (1) {
 597                struct iov_iter save;
 598                /* If there are pending notifications, do those - and nothing else */
 599                if (!list_empty(&rs->rs_notify_queue)) {
 600                        ret = rds_notify_queue_get(rs, msg);
 601                        break;
 602                }
 603
 604                if (rs->rs_cong_notify) {
 605                        ret = rds_notify_cong(rs, msg);
 606                        break;
 607                }
 608
 609                if (!rds_next_incoming(rs, &inc)) {
 610                        if (nonblock) {
 611                                ret = -EAGAIN;
 612                                break;
 613                        }
 614
 615                        timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
 616                                        (!list_empty(&rs->rs_notify_queue) ||
 617                                         rs->rs_cong_notify ||
 618                                         rds_next_incoming(rs, &inc)), timeo);
 619                        rdsdebug("recvmsg woke inc %p timeo %ld\n", inc,
 620                                 timeo);
 621                        if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
 622                                continue;
 623
 624                        ret = timeo;
 625                        if (ret == 0)
 626                                ret = -ETIMEDOUT;
 627                        break;
 628                }
 629
 630                rdsdebug("copying inc %p from %pI4:%u to user\n", inc,
 631                         &inc->i_conn->c_faddr,
 632                         ntohs(inc->i_hdr.h_sport));
 633                save = msg->msg_iter;
 634                ret = inc->i_conn->c_trans->inc_copy_to_user(inc, &msg->msg_iter);
 635                if (ret < 0)
 636                        break;
 637
 638                /*
 639                 * if the message we just copied isn't at the head of the
 640                 * recv queue then someone else raced us to return it, try
 641                 * to get the next message.
 642                 */
 643                if (!rds_still_queued(rs, inc, !(msg_flags & MSG_PEEK))) {
 644                        rds_inc_put(inc);
 645                        inc = NULL;
 646                        rds_stats_inc(s_recv_deliver_raced);
 647                        msg->msg_iter = save;
 648                        continue;
 649                }
 650
 651                if (ret < be32_to_cpu(inc->i_hdr.h_len)) {
 652                        if (msg_flags & MSG_TRUNC)
 653                                ret = be32_to_cpu(inc->i_hdr.h_len);
 654                        msg->msg_flags |= MSG_TRUNC;
 655                }
 656
 657                if (rds_cmsg_recv(inc, msg, rs)) {
 658                        ret = -EFAULT;
 659                        goto out;
 660                }
 661
 662                rds_stats_inc(s_recv_delivered);
 663
 664                if (sin) {
 665                        sin->sin_family = AF_INET;
 666                        sin->sin_port = inc->i_hdr.h_sport;
 667                        sin->sin_addr.s_addr = inc->i_saddr;
 668                        memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
 669                        msg->msg_namelen = sizeof(*sin);
 670                }
 671                break;
 672        }
 673
 674        if (inc)
 675                rds_inc_put(inc);
 676
 677out:
 678        return ret;
 679}
 680
 681/*
 682 * The socket is being shut down and we're asked to drop messages that were
 683 * queued for recvmsg.  The caller has unbound the socket so the receive path
 684 * won't queue any more incoming fragments or messages on the socket.
 685 */
 686void rds_clear_recv_queue(struct rds_sock *rs)
 687{
 688        struct sock *sk = rds_rs_to_sk(rs);
 689        struct rds_incoming *inc, *tmp;
 690        unsigned long flags;
 691
 692        write_lock_irqsave(&rs->rs_recv_lock, flags);
 693        list_for_each_entry_safe(inc, tmp, &rs->rs_recv_queue, i_item) {
 694                rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
 695                                      -be32_to_cpu(inc->i_hdr.h_len),
 696                                      inc->i_hdr.h_dport);
 697                list_del_init(&inc->i_item);
 698                rds_inc_put(inc);
 699        }
 700        write_unlock_irqrestore(&rs->rs_recv_lock, flags);
 701}
 702
 703/*
 704 * inc->i_saddr isn't used here because it is only set in the receive
 705 * path.
 706 */
 707void rds_inc_info_copy(struct rds_incoming *inc,
 708                       struct rds_info_iterator *iter,
 709                       __be32 saddr, __be32 daddr, int flip)
 710{
 711        struct rds_info_message minfo;
 712
 713        minfo.seq = be64_to_cpu(inc->i_hdr.h_sequence);
 714        minfo.len = be32_to_cpu(inc->i_hdr.h_len);
 715
 716        if (flip) {
 717                minfo.laddr = daddr;
 718                minfo.faddr = saddr;
 719                minfo.lport = inc->i_hdr.h_dport;
 720                minfo.fport = inc->i_hdr.h_sport;
 721        } else {
 722                minfo.laddr = saddr;
 723                minfo.faddr = daddr;
 724                minfo.lport = inc->i_hdr.h_sport;
 725                minfo.fport = inc->i_hdr.h_dport;
 726        }
 727
 728        minfo.flags = 0;
 729
 730        rds_info_copy(iter, &minfo, sizeof(minfo));
 731}
 732