linux/net/rxrpc/call_event.c
<<
>>
Prefs
   1/* Management of Tx window, Tx resend, ACKs and out-of-sequence reception
   2 *
   3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   4 * Written by David Howells (dhowells@redhat.com)
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version
   9 * 2 of the License, or (at your option) any later version.
  10 */
  11
  12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13
  14#include <linux/module.h>
  15#include <linux/circ_buf.h>
  16#include <linux/net.h>
  17#include <linux/skbuff.h>
  18#include <linux/slab.h>
  19#include <linux/udp.h>
  20#include <net/sock.h>
  21#include <net/af_rxrpc.h>
  22#include "ar-internal.h"
  23
  24/*
  25 * Set the timer
  26 */
  27void __rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
  28                       ktime_t now)
  29{
  30        unsigned long t_j, now_j = jiffies;
  31        ktime_t t;
  32        bool queue = false;
  33
  34        if (call->state < RXRPC_CALL_COMPLETE) {
  35                t = call->expire_at;
  36                if (!ktime_after(t, now)) {
  37                        trace_rxrpc_timer(call, why, now, now_j);
  38                        queue = true;
  39                        goto out;
  40                }
  41
  42                if (!ktime_after(call->resend_at, now)) {
  43                        call->resend_at = call->expire_at;
  44                        if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
  45                                queue = true;
  46                } else if (ktime_before(call->resend_at, t)) {
  47                        t = call->resend_at;
  48                }
  49
  50                if (!ktime_after(call->ack_at, now)) {
  51                        call->ack_at = call->expire_at;
  52                        if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
  53                                queue = true;
  54                } else if (ktime_before(call->ack_at, t)) {
  55                        t = call->ack_at;
  56                }
  57
  58                if (!ktime_after(call->ping_at, now)) {
  59                        call->ping_at = call->expire_at;
  60                        if (!test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
  61                                queue = true;
  62                } else if (ktime_before(call->ping_at, t)) {
  63                        t = call->ping_at;
  64                }
  65
  66                t_j = nsecs_to_jiffies(ktime_to_ns(ktime_sub(t, now)));
  67                t_j += jiffies;
  68
  69                /* We have to make sure that the calculated jiffies value falls
  70                 * at or after the nsec value, or we may loop ceaselessly
  71                 * because the timer times out, but we haven't reached the nsec
  72                 * timeout yet.
  73                 */
  74                t_j++;
  75
  76                if (call->timer.expires != t_j || !timer_pending(&call->timer)) {
  77                        mod_timer(&call->timer, t_j);
  78                        trace_rxrpc_timer(call, why, now, now_j);
  79                }
  80        }
  81
  82out:
  83        if (queue)
  84                rxrpc_queue_call(call);
  85}
  86
  87/*
  88 * Set the timer
  89 */
  90void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
  91                     ktime_t now)
  92{
  93        read_lock_bh(&call->state_lock);
  94        __rxrpc_set_timer(call, why, now);
  95        read_unlock_bh(&call->state_lock);
  96}
  97
  98/*
  99 * Propose a PING ACK be sent.
 100 */
 101static void rxrpc_propose_ping(struct rxrpc_call *call,
 102                               bool immediate, bool background)
 103{
 104        if (immediate) {
 105                if (background &&
 106                    !test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
 107                        rxrpc_queue_call(call);
 108        } else {
 109                ktime_t now = ktime_get_real();
 110                ktime_t ping_at = ktime_add_ms(now, rxrpc_idle_ack_delay);
 111
 112                if (ktime_before(ping_at, call->ping_at)) {
 113                        call->ping_at = ping_at;
 114                        rxrpc_set_timer(call, rxrpc_timer_set_for_ping, now);
 115                }
 116        }
 117}
 118
 119/*
 120 * propose an ACK be sent
 121 */
 122static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
 123                                u16 skew, u32 serial, bool immediate,
 124                                bool background,
 125                                enum rxrpc_propose_ack_trace why)
 126{
 127        enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
 128        unsigned int expiry = rxrpc_soft_ack_delay;
 129        ktime_t now, ack_at;
 130        s8 prior = rxrpc_ack_priority[ack_reason];
 131
 132        /* Pings are handled specially because we don't want to accidentally
 133         * lose a ping response by subsuming it into a ping.
 134         */
 135        if (ack_reason == RXRPC_ACK_PING) {
 136                rxrpc_propose_ping(call, immediate, background);
 137                goto trace;
 138        }
 139
 140        /* Update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial
 141         * numbers, but we don't alter the timeout.
 142         */
 143        _debug("prior %u %u vs %u %u",
 144               ack_reason, prior,
 145               call->ackr_reason, rxrpc_ack_priority[call->ackr_reason]);
 146        if (ack_reason == call->ackr_reason) {
 147                if (RXRPC_ACK_UPDATEABLE & (1 << ack_reason)) {
 148                        outcome = rxrpc_propose_ack_update;
 149                        call->ackr_serial = serial;
 150                        call->ackr_skew = skew;
 151                }
 152                if (!immediate)
 153                        goto trace;
 154        } else if (prior > rxrpc_ack_priority[call->ackr_reason]) {
 155                call->ackr_reason = ack_reason;
 156                call->ackr_serial = serial;
 157                call->ackr_skew = skew;
 158        } else {
 159                outcome = rxrpc_propose_ack_subsume;
 160        }
 161
 162        switch (ack_reason) {
 163        case RXRPC_ACK_REQUESTED:
 164                if (rxrpc_requested_ack_delay < expiry)
 165                        expiry = rxrpc_requested_ack_delay;
 166                if (serial == 1)
 167                        immediate = false;
 168                break;
 169
 170        case RXRPC_ACK_DELAY:
 171                if (rxrpc_soft_ack_delay < expiry)
 172                        expiry = rxrpc_soft_ack_delay;
 173                break;
 174
 175        case RXRPC_ACK_IDLE:
 176                if (rxrpc_idle_ack_delay < expiry)
 177                        expiry = rxrpc_idle_ack_delay;
 178                break;
 179
 180        default:
 181                immediate = true;
 182                break;
 183        }
 184
 185        if (test_bit(RXRPC_CALL_EV_ACK, &call->events)) {
 186                _debug("already scheduled");
 187        } else if (immediate || expiry == 0) {
 188                _debug("immediate ACK %lx", call->events);
 189                if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events) &&
 190                    background)
 191                        rxrpc_queue_call(call);
 192        } else {
 193                now = ktime_get_real();
 194                ack_at = ktime_add_ms(now, expiry);
 195                if (ktime_before(ack_at, call->ack_at)) {
 196                        call->ack_at = ack_at;
 197                        rxrpc_set_timer(call, rxrpc_timer_set_for_ack, now);
 198                }
 199        }
 200
 201trace:
 202        trace_rxrpc_propose_ack(call, why, ack_reason, serial, immediate,
 203                                background, outcome);
 204}
 205
 206/*
 207 * propose an ACK be sent, locking the call structure
 208 */
 209void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
 210                       u16 skew, u32 serial, bool immediate, bool background,
 211                       enum rxrpc_propose_ack_trace why)
 212{
 213        spin_lock_bh(&call->lock);
 214        __rxrpc_propose_ACK(call, ack_reason, skew, serial,
 215                            immediate, background, why);
 216        spin_unlock_bh(&call->lock);
 217}
 218
 219/*
 220 * Handle congestion being detected by the retransmit timeout.
 221 */
 222static void rxrpc_congestion_timeout(struct rxrpc_call *call)
 223{
 224        set_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags);
 225}
 226
 227/*
 228 * Perform retransmission of NAK'd and unack'd packets.
 229 */
 230static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
 231{
 232        struct rxrpc_skb_priv *sp;
 233        struct sk_buff *skb;
 234        rxrpc_seq_t cursor, seq, top;
 235        ktime_t max_age, oldest, ack_ts;
 236        int ix;
 237        u8 annotation, anno_type, retrans = 0, unacked = 0;
 238
 239        _enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
 240
 241        max_age = ktime_sub_ms(now, rxrpc_resend_timeout);
 242
 243        spin_lock_bh(&call->lock);
 244
 245        cursor = call->tx_hard_ack;
 246        top = call->tx_top;
 247        ASSERT(before_eq(cursor, top));
 248        if (cursor == top)
 249                goto out_unlock;
 250
 251        /* Scan the packet list without dropping the lock and decide which of
 252         * the packets in the Tx buffer we're going to resend and what the new
 253         * resend timeout will be.
 254         */
 255        oldest = now;
 256        for (seq = cursor + 1; before_eq(seq, top); seq++) {
 257                ix = seq & RXRPC_RXTX_BUFF_MASK;
 258                annotation = call->rxtx_annotations[ix];
 259                anno_type = annotation & RXRPC_TX_ANNO_MASK;
 260                annotation &= ~RXRPC_TX_ANNO_MASK;
 261                if (anno_type == RXRPC_TX_ANNO_ACK)
 262                        continue;
 263
 264                skb = call->rxtx_buffer[ix];
 265                rxrpc_see_skb(skb, rxrpc_skb_tx_seen);
 266                sp = rxrpc_skb(skb);
 267
 268                if (anno_type == RXRPC_TX_ANNO_UNACK) {
 269                        if (ktime_after(skb->tstamp, max_age)) {
 270                                if (ktime_before(skb->tstamp, oldest))
 271                                        oldest = skb->tstamp;
 272                                continue;
 273                        }
 274                        if (!(annotation & RXRPC_TX_ANNO_RESENT))
 275                                unacked++;
 276                }
 277
 278                /* Okay, we need to retransmit a packet. */
 279                call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS | annotation;
 280                retrans++;
 281                trace_rxrpc_retransmit(call, seq, annotation | anno_type,
 282                                       ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
 283        }
 284
 285        call->resend_at = ktime_add_ms(oldest, rxrpc_resend_timeout);
 286
 287        if (unacked)
 288                rxrpc_congestion_timeout(call);
 289
 290        /* If there was nothing that needed retransmission then it's likely
 291         * that an ACK got lost somewhere.  Send a ping to find out instead of
 292         * retransmitting data.
 293         */
 294        if (!retrans) {
 295                rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);
 296                spin_unlock_bh(&call->lock);
 297                ack_ts = ktime_sub(now, call->acks_latest_ts);
 298                if (ktime_to_ns(ack_ts) < call->peer->rtt)
 299                        goto out;
 300                rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
 301                                  rxrpc_propose_ack_ping_for_lost_ack);
 302                rxrpc_send_ack_packet(call, true);
 303                goto out;
 304        }
 305
 306        /* Now go through the Tx window and perform the retransmissions.  We
 307         * have to drop the lock for each send.  If an ACK comes in whilst the
 308         * lock is dropped, it may clear some of the retransmission markers for
 309         * packets that it soft-ACKs.
 310         */
 311        for (seq = cursor + 1; before_eq(seq, top); seq++) {
 312                ix = seq & RXRPC_RXTX_BUFF_MASK;
 313                annotation = call->rxtx_annotations[ix];
 314                anno_type = annotation & RXRPC_TX_ANNO_MASK;
 315                if (anno_type != RXRPC_TX_ANNO_RETRANS)
 316                        continue;
 317
 318                skb = call->rxtx_buffer[ix];
 319                rxrpc_get_skb(skb, rxrpc_skb_tx_got);
 320                spin_unlock_bh(&call->lock);
 321
 322                if (rxrpc_send_data_packet(call, skb, true) < 0) {
 323                        rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
 324                        return;
 325                }
 326
 327                if (rxrpc_is_client_call(call))
 328                        rxrpc_expose_client_call(call);
 329
 330                rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
 331                spin_lock_bh(&call->lock);
 332
 333                /* We need to clear the retransmit state, but there are two
 334                 * things we need to be aware of: A new ACK/NAK might have been
 335                 * received and the packet might have been hard-ACK'd (in which
 336                 * case it will no longer be in the buffer).
 337                 */
 338                if (after(seq, call->tx_hard_ack)) {
 339                        annotation = call->rxtx_annotations[ix];
 340                        anno_type = annotation & RXRPC_TX_ANNO_MASK;
 341                        if (anno_type == RXRPC_TX_ANNO_RETRANS ||
 342                            anno_type == RXRPC_TX_ANNO_NAK) {
 343                                annotation &= ~RXRPC_TX_ANNO_MASK;
 344                                annotation |= RXRPC_TX_ANNO_UNACK;
 345                        }
 346                        annotation |= RXRPC_TX_ANNO_RESENT;
 347                        call->rxtx_annotations[ix] = annotation;
 348                }
 349
 350                if (after(call->tx_hard_ack, seq))
 351                        seq = call->tx_hard_ack;
 352        }
 353
 354out_unlock:
 355        spin_unlock_bh(&call->lock);
 356out:
 357        _leave("");
 358}
 359
 360/*
 361 * Handle retransmission and deferred ACK/abort generation.
 362 */
 363void rxrpc_process_call(struct work_struct *work)
 364{
 365        struct rxrpc_call *call =
 366                container_of(work, struct rxrpc_call, processor);
 367        ktime_t now;
 368
 369        rxrpc_see_call(call);
 370
 371        //printk("\n--------------------\n");
 372        _enter("{%d,%s,%lx}",
 373               call->debug_id, rxrpc_call_states[call->state], call->events);
 374
 375recheck_state:
 376        if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
 377                rxrpc_send_abort_packet(call);
 378                goto recheck_state;
 379        }
 380
 381        if (call->state == RXRPC_CALL_COMPLETE) {
 382                del_timer_sync(&call->timer);
 383                rxrpc_notify_socket(call);
 384                goto out_put;
 385        }
 386
 387        now = ktime_get_real();
 388        if (ktime_before(call->expire_at, now)) {
 389                rxrpc_abort_call("EXP", call, 0, RX_CALL_TIMEOUT, -ETIME);
 390                set_bit(RXRPC_CALL_EV_ABORT, &call->events);
 391                goto recheck_state;
 392        }
 393
 394        if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events)) {
 395                if (call->ackr_reason) {
 396                        rxrpc_send_ack_packet(call, false);
 397                        goto recheck_state;
 398                }
 399        }
 400
 401        if (test_and_clear_bit(RXRPC_CALL_EV_PING, &call->events)) {
 402                rxrpc_send_ack_packet(call, true);
 403                goto recheck_state;
 404        }
 405
 406        if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events)) {
 407                rxrpc_resend(call, now);
 408                goto recheck_state;
 409        }
 410
 411        rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);
 412
 413        /* other events may have been raised since we started checking */
 414        if (call->events && call->state < RXRPC_CALL_COMPLETE) {
 415                __rxrpc_queue_call(call);
 416                goto out;
 417        }
 418
 419out_put:
 420        rxrpc_put_call(call, rxrpc_call_put);
 421out:
 422        _leave("");
 423}
 424