linux/net/rxrpc/output.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* RxRPC packet transmission
   3 *
   4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   5 * Written by David Howells (dhowells@redhat.com)
   6 */
   7
   8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   9
  10#include <linux/net.h>
  11#include <linux/gfp.h>
  12#include <linux/skbuff.h>
  13#include <linux/export.h>
  14#include <net/sock.h>
  15#include <net/af_rxrpc.h>
  16#include "ar-internal.h"
  17
  18struct rxrpc_ack_buffer {
  19        struct rxrpc_wire_header whdr;
  20        struct rxrpc_ackpacket ack;
  21        u8 acks[255];
  22        u8 pad[3];
  23        struct rxrpc_ackinfo ackinfo;
  24};
  25
  26struct rxrpc_abort_buffer {
  27        struct rxrpc_wire_header whdr;
  28        __be32 abort_code;
  29};
  30
  31static const char rxrpc_keepalive_string[] = "";
  32
  33/*
  34 * Increase Tx backoff on transmission failure and clear it on success.
  35 */
  36static void rxrpc_tx_backoff(struct rxrpc_call *call, int ret)
  37{
  38        if (ret < 0) {
  39                u16 tx_backoff = READ_ONCE(call->tx_backoff);
  40
  41                if (tx_backoff < HZ)
  42                        WRITE_ONCE(call->tx_backoff, tx_backoff + 1);
  43        } else {
  44                WRITE_ONCE(call->tx_backoff, 0);
  45        }
  46}
  47
  48/*
  49 * Arrange for a keepalive ping a certain time after we last transmitted.  This
  50 * lets the far side know we're still interested in this call and helps keep
  51 * the route through any intervening firewall open.
  52 *
  53 * Receiving a response to the ping will prevent the ->expect_rx_by timer from
  54 * expiring.
  55 */
  56static void rxrpc_set_keepalive(struct rxrpc_call *call)
  57{
  58        unsigned long now = jiffies, keepalive_at = call->next_rx_timo / 6;
  59
  60        keepalive_at += now;
  61        WRITE_ONCE(call->keepalive_at, keepalive_at);
  62        rxrpc_reduce_call_timer(call, keepalive_at, now,
  63                                rxrpc_timer_set_for_keepalive);
  64}
  65
  66/*
  67 * Fill out an ACK packet.
  68 */
  69static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
  70                                 struct rxrpc_call *call,
  71                                 struct rxrpc_ack_buffer *pkt,
  72                                 rxrpc_seq_t *_hard_ack,
  73                                 rxrpc_seq_t *_top,
  74                                 u8 reason)
  75{
  76        rxrpc_serial_t serial;
  77        rxrpc_seq_t hard_ack, top, seq;
  78        int ix;
  79        u32 mtu, jmax;
  80        u8 *ackp = pkt->acks;
  81
  82        /* Barrier against rxrpc_input_data(). */
  83        serial = call->ackr_serial;
  84        hard_ack = READ_ONCE(call->rx_hard_ack);
  85        top = smp_load_acquire(&call->rx_top);
  86        *_hard_ack = hard_ack;
  87        *_top = top;
  88
  89        pkt->ack.bufferSpace    = htons(8);
  90        pkt->ack.maxSkew        = htons(0);
  91        pkt->ack.firstPacket    = htonl(hard_ack + 1);
  92        pkt->ack.previousPacket = htonl(call->ackr_prev_seq);
  93        pkt->ack.serial         = htonl(serial);
  94        pkt->ack.reason         = reason;
  95        pkt->ack.nAcks          = top - hard_ack;
  96
  97        if (reason == RXRPC_ACK_PING)
  98                pkt->whdr.flags |= RXRPC_REQUEST_ACK;
  99
 100        if (after(top, hard_ack)) {
 101                seq = hard_ack + 1;
 102                do {
 103                        ix = seq & RXRPC_RXTX_BUFF_MASK;
 104                        if (call->rxtx_buffer[ix])
 105                                *ackp++ = RXRPC_ACK_TYPE_ACK;
 106                        else
 107                                *ackp++ = RXRPC_ACK_TYPE_NACK;
 108                        seq++;
 109                } while (before_eq(seq, top));
 110        }
 111
 112        mtu = conn->params.peer->if_mtu;
 113        mtu -= conn->params.peer->hdrsize;
 114        jmax = (call->nr_jumbo_bad > 3) ? 1 : rxrpc_rx_jumbo_max;
 115        pkt->ackinfo.rxMTU      = htonl(rxrpc_rx_mtu);
 116        pkt->ackinfo.maxMTU     = htonl(mtu);
 117        pkt->ackinfo.rwind      = htonl(call->rx_winsize);
 118        pkt->ackinfo.jumbo_max  = htonl(jmax);
 119
 120        *ackp++ = 0;
 121        *ackp++ = 0;
 122        *ackp++ = 0;
 123        return top - hard_ack + 3;
 124}
 125
 126/*
 127 * Send an ACK call packet.
 128 */
 129int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
 130                          rxrpc_serial_t *_serial)
 131{
 132        struct rxrpc_connection *conn = NULL;
 133        struct rxrpc_ack_buffer *pkt;
 134        struct msghdr msg;
 135        struct kvec iov[2];
 136        rxrpc_serial_t serial;
 137        rxrpc_seq_t hard_ack, top;
 138        size_t len, n;
 139        int ret;
 140        u8 reason;
 141
 142        spin_lock_bh(&call->lock);
 143        if (call->conn)
 144                conn = rxrpc_get_connection_maybe(call->conn);
 145        spin_unlock_bh(&call->lock);
 146        if (!conn)
 147                return -ECONNRESET;
 148
 149        pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
 150        if (!pkt) {
 151                rxrpc_put_connection(conn);
 152                return -ENOMEM;
 153        }
 154
 155        msg.msg_name    = &call->peer->srx.transport;
 156        msg.msg_namelen = call->peer->srx.transport_len;
 157        msg.msg_control = NULL;
 158        msg.msg_controllen = 0;
 159        msg.msg_flags   = 0;
 160
 161        pkt->whdr.epoch         = htonl(conn->proto.epoch);
 162        pkt->whdr.cid           = htonl(call->cid);
 163        pkt->whdr.callNumber    = htonl(call->call_id);
 164        pkt->whdr.seq           = 0;
 165        pkt->whdr.type          = RXRPC_PACKET_TYPE_ACK;
 166        pkt->whdr.flags         = RXRPC_SLOW_START_OK | conn->out_clientflag;
 167        pkt->whdr.userStatus    = 0;
 168        pkt->whdr.securityIndex = call->security_ix;
 169        pkt->whdr._rsvd         = 0;
 170        pkt->whdr.serviceId     = htons(call->service_id);
 171
 172        spin_lock_bh(&call->lock);
 173        if (ping) {
 174                reason = RXRPC_ACK_PING;
 175        } else {
 176                reason = call->ackr_reason;
 177                if (!call->ackr_reason) {
 178                        spin_unlock_bh(&call->lock);
 179                        ret = 0;
 180                        goto out;
 181                }
 182                call->ackr_reason = 0;
 183        }
 184        n = rxrpc_fill_out_ack(conn, call, pkt, &hard_ack, &top, reason);
 185
 186        spin_unlock_bh(&call->lock);
 187
 188        iov[0].iov_base = pkt;
 189        iov[0].iov_len  = sizeof(pkt->whdr) + sizeof(pkt->ack) + n;
 190        iov[1].iov_base = &pkt->ackinfo;
 191        iov[1].iov_len  = sizeof(pkt->ackinfo);
 192        len = iov[0].iov_len + iov[1].iov_len;
 193
 194        serial = atomic_inc_return(&conn->serial);
 195        pkt->whdr.serial = htonl(serial);
 196        trace_rxrpc_tx_ack(call->debug_id, serial,
 197                           ntohl(pkt->ack.firstPacket),
 198                           ntohl(pkt->ack.serial),
 199                           pkt->ack.reason, pkt->ack.nAcks);
 200        if (_serial)
 201                *_serial = serial;
 202
 203        if (ping) {
 204                call->ping_serial = serial;
 205                smp_wmb();
 206                /* We need to stick a time in before we send the packet in case
 207                 * the reply gets back before kernel_sendmsg() completes - but
 208                 * asking UDP to send the packet can take a relatively long
 209                 * time.
 210                 */
 211                call->ping_time = ktime_get_real();
 212                set_bit(RXRPC_CALL_PINGING, &call->flags);
 213                trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_ping, serial);
 214        }
 215
 216        ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
 217        conn->params.peer->last_tx_at = ktime_get_seconds();
 218        if (ret < 0)
 219                trace_rxrpc_tx_fail(call->debug_id, serial, ret,
 220                                    rxrpc_tx_point_call_ack);
 221        else
 222                trace_rxrpc_tx_packet(call->debug_id, &pkt->whdr,
 223                                      rxrpc_tx_point_call_ack);
 224        rxrpc_tx_backoff(call, ret);
 225
 226        if (call->state < RXRPC_CALL_COMPLETE) {
 227                if (ret < 0) {
 228                        if (ping)
 229                                clear_bit(RXRPC_CALL_PINGING, &call->flags);
 230                        rxrpc_propose_ACK(call, pkt->ack.reason,
 231                                          ntohl(pkt->ack.serial),
 232                                          false, true,
 233                                          rxrpc_propose_ack_retry_tx);
 234                } else {
 235                        spin_lock_bh(&call->lock);
 236                        if (after(hard_ack, call->ackr_consumed))
 237                                call->ackr_consumed = hard_ack;
 238                        if (after(top, call->ackr_seen))
 239                                call->ackr_seen = top;
 240                        spin_unlock_bh(&call->lock);
 241                }
 242
 243                rxrpc_set_keepalive(call);
 244        }
 245
 246out:
 247        rxrpc_put_connection(conn);
 248        kfree(pkt);
 249        return ret;
 250}
 251
 252/*
 253 * Send an ABORT call packet.
 254 */
 255int rxrpc_send_abort_packet(struct rxrpc_call *call)
 256{
 257        struct rxrpc_connection *conn = NULL;
 258        struct rxrpc_abort_buffer pkt;
 259        struct msghdr msg;
 260        struct kvec iov[1];
 261        rxrpc_serial_t serial;
 262        int ret;
 263
 264        /* Don't bother sending aborts for a client call once the server has
 265         * hard-ACK'd all of its request data.  After that point, we're not
 266         * going to stop the operation proceeding, and whilst we might limit
 267         * the reply, it's not worth it if we can send a new call on the same
 268         * channel instead, thereby closing off this call.
 269         */
 270        if (rxrpc_is_client_call(call) &&
 271            test_bit(RXRPC_CALL_TX_LAST, &call->flags))
 272                return 0;
 273
 274        spin_lock_bh(&call->lock);
 275        if (call->conn)
 276                conn = rxrpc_get_connection_maybe(call->conn);
 277        spin_unlock_bh(&call->lock);
 278        if (!conn)
 279                return -ECONNRESET;
 280
 281        msg.msg_name    = &call->peer->srx.transport;
 282        msg.msg_namelen = call->peer->srx.transport_len;
 283        msg.msg_control = NULL;
 284        msg.msg_controllen = 0;
 285        msg.msg_flags   = 0;
 286
 287        pkt.whdr.epoch          = htonl(conn->proto.epoch);
 288        pkt.whdr.cid            = htonl(call->cid);
 289        pkt.whdr.callNumber     = htonl(call->call_id);
 290        pkt.whdr.seq            = 0;
 291        pkt.whdr.type           = RXRPC_PACKET_TYPE_ABORT;
 292        pkt.whdr.flags          = conn->out_clientflag;
 293        pkt.whdr.userStatus     = 0;
 294        pkt.whdr.securityIndex  = call->security_ix;
 295        pkt.whdr._rsvd          = 0;
 296        pkt.whdr.serviceId      = htons(call->service_id);
 297        pkt.abort_code          = htonl(call->abort_code);
 298
 299        iov[0].iov_base = &pkt;
 300        iov[0].iov_len  = sizeof(pkt);
 301
 302        serial = atomic_inc_return(&conn->serial);
 303        pkt.whdr.serial = htonl(serial);
 304
 305        ret = kernel_sendmsg(conn->params.local->socket,
 306                             &msg, iov, 1, sizeof(pkt));
 307        conn->params.peer->last_tx_at = ktime_get_seconds();
 308        if (ret < 0)
 309                trace_rxrpc_tx_fail(call->debug_id, serial, ret,
 310                                    rxrpc_tx_point_call_abort);
 311        else
 312                trace_rxrpc_tx_packet(call->debug_id, &pkt.whdr,
 313                                      rxrpc_tx_point_call_abort);
 314        rxrpc_tx_backoff(call, ret);
 315
 316        rxrpc_put_connection(conn);
 317        return ret;
 318}
 319
 320/*
 321 * send a packet through the transport endpoint
 322 */
 323int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
 324                           bool retrans)
 325{
 326        struct rxrpc_connection *conn = call->conn;
 327        struct rxrpc_wire_header whdr;
 328        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
 329        struct msghdr msg;
 330        struct kvec iov[2];
 331        rxrpc_serial_t serial;
 332        size_t len;
 333        int ret, opt;
 334
 335        _enter(",{%d}", skb->len);
 336
 337        /* Each transmission of a Tx packet needs a new serial number */
 338        serial = atomic_inc_return(&conn->serial);
 339
 340        whdr.epoch      = htonl(conn->proto.epoch);
 341        whdr.cid        = htonl(call->cid);
 342        whdr.callNumber = htonl(call->call_id);
 343        whdr.seq        = htonl(sp->hdr.seq);
 344        whdr.serial     = htonl(serial);
 345        whdr.type       = RXRPC_PACKET_TYPE_DATA;
 346        whdr.flags      = sp->hdr.flags;
 347        whdr.userStatus = 0;
 348        whdr.securityIndex = call->security_ix;
 349        whdr._rsvd      = htons(sp->hdr._rsvd);
 350        whdr.serviceId  = htons(call->service_id);
 351
 352        if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags) &&
 353            sp->hdr.seq == 1)
 354                whdr.userStatus = RXRPC_USERSTATUS_SERVICE_UPGRADE;
 355
 356        iov[0].iov_base = &whdr;
 357        iov[0].iov_len = sizeof(whdr);
 358        iov[1].iov_base = skb->head;
 359        iov[1].iov_len = skb->len;
 360        len = iov[0].iov_len + iov[1].iov_len;
 361
 362        msg.msg_name = &call->peer->srx.transport;
 363        msg.msg_namelen = call->peer->srx.transport_len;
 364        msg.msg_control = NULL;
 365        msg.msg_controllen = 0;
 366        msg.msg_flags = 0;
 367
 368        /* If our RTT cache needs working on, request an ACK.  Also request
 369         * ACKs if a DATA packet appears to have been lost.
 370         *
 371         * However, we mustn't request an ACK on the last reply packet of a
 372         * service call, lest OpenAFS incorrectly send us an ACK with some
 373         * soft-ACKs in it and then never follow up with a proper hard ACK.
 374         */
 375        if ((!(sp->hdr.flags & RXRPC_LAST_PACKET) ||
 376             rxrpc_to_server(sp)
 377             ) &&
 378            (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) ||
 379             retrans ||
 380             call->cong_mode == RXRPC_CALL_SLOW_START ||
 381             (call->peer->rtt_usage < 3 && sp->hdr.seq & 1) ||
 382             ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
 383                          ktime_get_real())))
 384                whdr.flags |= RXRPC_REQUEST_ACK;
 385
 386        if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
 387                static int lose;
 388                if ((lose++ & 7) == 7) {
 389                        ret = 0;
 390                        trace_rxrpc_tx_data(call, sp->hdr.seq, serial,
 391                                            whdr.flags, retrans, true);
 392                        goto done;
 393                }
 394        }
 395
 396        trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags, retrans,
 397                            false);
 398
 399        /* send the packet with the don't fragment bit set if we currently
 400         * think it's small enough */
 401        if (iov[1].iov_len >= call->peer->maxdata)
 402                goto send_fragmentable;
 403
 404        down_read(&conn->params.local->defrag_sem);
 405
 406        sp->hdr.serial = serial;
 407        smp_wmb(); /* Set serial before timestamp */
 408        skb->tstamp = ktime_get_real();
 409
 410        /* send the packet by UDP
 411         * - returns -EMSGSIZE if UDP would have to fragment the packet
 412         *   to go out of the interface
 413         *   - in which case, we'll have processed the ICMP error
 414         *     message and update the peer record
 415         */
 416        ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
 417        conn->params.peer->last_tx_at = ktime_get_seconds();
 418
 419        up_read(&conn->params.local->defrag_sem);
 420        if (ret < 0)
 421                trace_rxrpc_tx_fail(call->debug_id, serial, ret,
 422                                    rxrpc_tx_point_call_data_nofrag);
 423        else
 424                trace_rxrpc_tx_packet(call->debug_id, &whdr,
 425                                      rxrpc_tx_point_call_data_nofrag);
 426        rxrpc_tx_backoff(call, ret);
 427        if (ret == -EMSGSIZE)
 428                goto send_fragmentable;
 429
 430done:
 431        if (ret >= 0) {
 432                if (whdr.flags & RXRPC_REQUEST_ACK) {
 433                        call->peer->rtt_last_req = skb->tstamp;
 434                        trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
 435                        if (call->peer->rtt_usage > 1) {
 436                                unsigned long nowj = jiffies, ack_lost_at;
 437
 438                                ack_lost_at = nsecs_to_jiffies(2 * call->peer->rtt);
 439                                if (ack_lost_at < 1)
 440                                        ack_lost_at = 1;
 441
 442                                ack_lost_at += nowj;
 443                                WRITE_ONCE(call->ack_lost_at, ack_lost_at);
 444                                rxrpc_reduce_call_timer(call, ack_lost_at, nowj,
 445                                                        rxrpc_timer_set_for_lost_ack);
 446                        }
 447                }
 448
 449                if (sp->hdr.seq == 1 &&
 450                    !test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER,
 451                                      &call->flags)) {
 452                        unsigned long nowj = jiffies, expect_rx_by;
 453
 454                        expect_rx_by = nowj + call->next_rx_timo;
 455                        WRITE_ONCE(call->expect_rx_by, expect_rx_by);
 456                        rxrpc_reduce_call_timer(call, expect_rx_by, nowj,
 457                                                rxrpc_timer_set_for_normal);
 458                }
 459
 460                rxrpc_set_keepalive(call);
 461        } else {
 462                /* Cancel the call if the initial transmission fails,
 463                 * particularly if that's due to network routing issues that
 464                 * aren't going away anytime soon.  The layer above can arrange
 465                 * the retransmission.
 466                 */
 467                if (!test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER, &call->flags))
 468                        rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
 469                                                  RX_USER_ABORT, ret);
 470        }
 471
 472        _leave(" = %d [%u]", ret, call->peer->maxdata);
 473        return ret;
 474
 475send_fragmentable:
 476        /* attempt to send this message with fragmentation enabled */
 477        _debug("send fragment");
 478
 479        down_write(&conn->params.local->defrag_sem);
 480
 481        sp->hdr.serial = serial;
 482        smp_wmb(); /* Set serial before timestamp */
 483        skb->tstamp = ktime_get_real();
 484
 485        switch (conn->params.local->srx.transport.family) {
 486        case AF_INET:
 487                opt = IP_PMTUDISC_DONT;
 488                ret = kernel_setsockopt(conn->params.local->socket,
 489                                        SOL_IP, IP_MTU_DISCOVER,
 490                                        (char *)&opt, sizeof(opt));
 491                if (ret == 0) {
 492                        ret = kernel_sendmsg(conn->params.local->socket, &msg,
 493                                             iov, 2, len);
 494                        conn->params.peer->last_tx_at = ktime_get_seconds();
 495
 496                        opt = IP_PMTUDISC_DO;
 497                        kernel_setsockopt(conn->params.local->socket, SOL_IP,
 498                                          IP_MTU_DISCOVER,
 499                                          (char *)&opt, sizeof(opt));
 500                }
 501                break;
 502
 503#ifdef CONFIG_AF_RXRPC_IPV6
 504        case AF_INET6:
 505                opt = IPV6_PMTUDISC_DONT;
 506                ret = kernel_setsockopt(conn->params.local->socket,
 507                                        SOL_IPV6, IPV6_MTU_DISCOVER,
 508                                        (char *)&opt, sizeof(opt));
 509                if (ret == 0) {
 510                        ret = kernel_sendmsg(conn->params.local->socket, &msg,
 511                                             iov, 2, len);
 512                        conn->params.peer->last_tx_at = ktime_get_seconds();
 513
 514                        opt = IPV6_PMTUDISC_DO;
 515                        kernel_setsockopt(conn->params.local->socket,
 516                                          SOL_IPV6, IPV6_MTU_DISCOVER,
 517                                          (char *)&opt, sizeof(opt));
 518                }
 519                break;
 520#endif
 521
 522        default:
 523                BUG();
 524        }
 525
 526        if (ret < 0)
 527                trace_rxrpc_tx_fail(call->debug_id, serial, ret,
 528                                    rxrpc_tx_point_call_data_frag);
 529        else
 530                trace_rxrpc_tx_packet(call->debug_id, &whdr,
 531                                      rxrpc_tx_point_call_data_frag);
 532        rxrpc_tx_backoff(call, ret);
 533
 534        up_write(&conn->params.local->defrag_sem);
 535        goto done;
 536}
 537
 538/*
 539 * reject packets through the local endpoint
 540 */
 541void rxrpc_reject_packets(struct rxrpc_local *local)
 542{
 543        struct sockaddr_rxrpc srx;
 544        struct rxrpc_skb_priv *sp;
 545        struct rxrpc_wire_header whdr;
 546        struct sk_buff *skb;
 547        struct msghdr msg;
 548        struct kvec iov[2];
 549        size_t size;
 550        __be32 code;
 551        int ret, ioc;
 552
 553        _enter("%d", local->debug_id);
 554
 555        iov[0].iov_base = &whdr;
 556        iov[0].iov_len = sizeof(whdr);
 557        iov[1].iov_base = &code;
 558        iov[1].iov_len = sizeof(code);
 559
 560        msg.msg_name = &srx.transport;
 561        msg.msg_control = NULL;
 562        msg.msg_controllen = 0;
 563        msg.msg_flags = 0;
 564
 565        memset(&whdr, 0, sizeof(whdr));
 566
 567        while ((skb = skb_dequeue(&local->reject_queue))) {
 568                rxrpc_see_skb(skb, rxrpc_skb_seen);
 569                sp = rxrpc_skb(skb);
 570
 571                switch (skb->mark) {
 572                case RXRPC_SKB_MARK_REJECT_BUSY:
 573                        whdr.type = RXRPC_PACKET_TYPE_BUSY;
 574                        size = sizeof(whdr);
 575                        ioc = 1;
 576                        break;
 577                case RXRPC_SKB_MARK_REJECT_ABORT:
 578                        whdr.type = RXRPC_PACKET_TYPE_ABORT;
 579                        code = htonl(skb->priority);
 580                        size = sizeof(whdr) + sizeof(code);
 581                        ioc = 2;
 582                        break;
 583                default:
 584                        rxrpc_free_skb(skb, rxrpc_skb_freed);
 585                        continue;
 586                }
 587
 588                if (rxrpc_extract_addr_from_skb(&srx, skb) == 0) {
 589                        msg.msg_namelen = srx.transport_len;
 590
 591                        whdr.epoch      = htonl(sp->hdr.epoch);
 592                        whdr.cid        = htonl(sp->hdr.cid);
 593                        whdr.callNumber = htonl(sp->hdr.callNumber);
 594                        whdr.serviceId  = htons(sp->hdr.serviceId);
 595                        whdr.flags      = sp->hdr.flags;
 596                        whdr.flags      ^= RXRPC_CLIENT_INITIATED;
 597                        whdr.flags      &= RXRPC_CLIENT_INITIATED;
 598
 599                        ret = kernel_sendmsg(local->socket, &msg,
 600                                             iov, ioc, size);
 601                        if (ret < 0)
 602                                trace_rxrpc_tx_fail(local->debug_id, 0, ret,
 603                                                    rxrpc_tx_point_reject);
 604                        else
 605                                trace_rxrpc_tx_packet(local->debug_id, &whdr,
 606                                                      rxrpc_tx_point_reject);
 607                }
 608
 609                rxrpc_free_skb(skb, rxrpc_skb_freed);
 610        }
 611
 612        _leave("");
 613}
 614
 615/*
 616 * Send a VERSION reply to a peer as a keepalive.
 617 */
 618void rxrpc_send_keepalive(struct rxrpc_peer *peer)
 619{
 620        struct rxrpc_wire_header whdr;
 621        struct msghdr msg;
 622        struct kvec iov[2];
 623        size_t len;
 624        int ret;
 625
 626        _enter("");
 627
 628        msg.msg_name    = &peer->srx.transport;
 629        msg.msg_namelen = peer->srx.transport_len;
 630        msg.msg_control = NULL;
 631        msg.msg_controllen = 0;
 632        msg.msg_flags   = 0;
 633
 634        whdr.epoch      = htonl(peer->local->rxnet->epoch);
 635        whdr.cid        = 0;
 636        whdr.callNumber = 0;
 637        whdr.seq        = 0;
 638        whdr.serial     = 0;
 639        whdr.type       = RXRPC_PACKET_TYPE_VERSION; /* Not client-initiated */
 640        whdr.flags      = RXRPC_LAST_PACKET;
 641        whdr.userStatus = 0;
 642        whdr.securityIndex = 0;
 643        whdr._rsvd      = 0;
 644        whdr.serviceId  = 0;
 645
 646        iov[0].iov_base = &whdr;
 647        iov[0].iov_len  = sizeof(whdr);
 648        iov[1].iov_base = (char *)rxrpc_keepalive_string;
 649        iov[1].iov_len  = sizeof(rxrpc_keepalive_string);
 650
 651        len = iov[0].iov_len + iov[1].iov_len;
 652
 653        _proto("Tx VERSION (keepalive)");
 654
 655        ret = kernel_sendmsg(peer->local->socket, &msg, iov, 2, len);
 656        if (ret < 0)
 657                trace_rxrpc_tx_fail(peer->debug_id, 0, ret,
 658                                    rxrpc_tx_point_version_keepalive);
 659        else
 660                trace_rxrpc_tx_packet(peer->debug_id, &whdr,
 661                                      rxrpc_tx_point_version_keepalive);
 662
 663        peer->last_tx_at = ktime_get_seconds();
 664        _leave("");
 665}
 666