linux/net/dccp/minisocks.c
<<
>>
Prefs
   1/*
   2 *  net/dccp/minisocks.c
   3 *
   4 *  An implementation of the DCCP protocol
   5 *  Arnaldo Carvalho de Melo <acme@conectiva.com.br>
   6 *
   7 *      This program is free software; you can redistribute it and/or
   8 *      modify it under the terms of the GNU General Public License
   9 *      as published by the Free Software Foundation; either version
  10 *      2 of the License, or (at your option) any later version.
  11 */
  12
  13#include <linux/dccp.h>
  14#include <linux/kernel.h>
  15#include <linux/skbuff.h>
  16#include <linux/timer.h>
  17
  18#include <net/sock.h>
  19#include <net/xfrm.h>
  20#include <net/inet_timewait_sock.h>
  21
  22#include "ackvec.h"
  23#include "ccid.h"
  24#include "dccp.h"
  25#include "feat.h"
  26
  27struct inet_timewait_death_row dccp_death_row = {
  28        .sysctl_max_tw_buckets = NR_FILE * 2,
  29        .period         = DCCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
  30        .death_lock     = __SPIN_LOCK_UNLOCKED(dccp_death_row.death_lock),
  31        .hashinfo       = &dccp_hashinfo,
  32        .tw_timer       = TIMER_INITIALIZER(inet_twdr_hangman, 0,
  33                                            (unsigned long)&dccp_death_row),
  34        .twkill_work    = __WORK_INITIALIZER(dccp_death_row.twkill_work,
  35                                             inet_twdr_twkill_work),
  36/* Short-time timewait calendar */
  37
  38        .twcal_hand     = -1,
  39        .twcal_timer    = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0,
  40                                            (unsigned long)&dccp_death_row),
  41};
  42
  43EXPORT_SYMBOL_GPL(dccp_death_row);
  44
  45void dccp_minisock_init(struct dccp_minisock *dmsk)
  46{
  47        dmsk->dccpms_sequence_window = sysctl_dccp_feat_sequence_window;
  48        dmsk->dccpms_rx_ccid         = sysctl_dccp_feat_rx_ccid;
  49        dmsk->dccpms_tx_ccid         = sysctl_dccp_feat_tx_ccid;
  50        dmsk->dccpms_ack_ratio       = sysctl_dccp_feat_ack_ratio;
  51        dmsk->dccpms_send_ack_vector = sysctl_dccp_feat_send_ack_vector;
  52        dmsk->dccpms_send_ndp_count  = sysctl_dccp_feat_send_ndp_count;
  53}
  54
  55void dccp_time_wait(struct sock *sk, int state, int timeo)
  56{
  57        struct inet_timewait_sock *tw = NULL;
  58
  59        if (dccp_death_row.tw_count < dccp_death_row.sysctl_max_tw_buckets)
  60                tw = inet_twsk_alloc(sk, state);
  61
  62        if (tw != NULL) {
  63                const struct inet_connection_sock *icsk = inet_csk(sk);
  64                const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
  65#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
  66                if (tw->tw_family == PF_INET6) {
  67                        const struct ipv6_pinfo *np = inet6_sk(sk);
  68                        struct inet6_timewait_sock *tw6;
  69
  70                        tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
  71                        tw6 = inet6_twsk((struct sock *)tw);
  72                        ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr);
  73                        ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr);
  74                        tw->tw_ipv6only = np->ipv6only;
  75                }
  76#endif
  77                /* Linkage updates. */
  78                __inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
  79
  80                /* Get the TIME_WAIT timeout firing. */
  81                if (timeo < rto)
  82                        timeo = rto;
  83
  84                tw->tw_timeout = DCCP_TIMEWAIT_LEN;
  85                if (state == DCCP_TIME_WAIT)
  86                        timeo = DCCP_TIMEWAIT_LEN;
  87
  88                inet_twsk_schedule(tw, &dccp_death_row, timeo,
  89                                   DCCP_TIMEWAIT_LEN);
  90                inet_twsk_put(tw);
  91        } else {
  92                /* Sorry, if we're out of memory, just CLOSE this
  93                 * socket up.  We've got bigger problems than
  94                 * non-graceful socket closings.
  95                 */
  96                DCCP_WARN("time wait bucket table overflow\n");
  97        }
  98
  99        dccp_done(sk);
 100}
 101
 102struct sock *dccp_create_openreq_child(struct sock *sk,
 103                                       const struct request_sock *req,
 104                                       const struct sk_buff *skb)
 105{
 106        /*
 107         * Step 3: Process LISTEN state
 108         *
 109         *   (* Generate a new socket and switch to that socket *)
 110         *   Set S := new socket for this port pair
 111         */
 112        struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
 113
 114        if (newsk != NULL) {
 115                const struct dccp_request_sock *dreq = dccp_rsk(req);
 116                struct inet_connection_sock *newicsk = inet_csk(newsk);
 117                struct dccp_sock *newdp = dccp_sk(newsk);
 118                struct dccp_minisock *newdmsk = dccp_msk(newsk);
 119
 120                newdp->dccps_role          = DCCP_ROLE_SERVER;
 121                newdp->dccps_hc_rx_ackvec  = NULL;
 122                newdp->dccps_service_list  = NULL;
 123                newdp->dccps_service       = dreq->dreq_service;
 124                newicsk->icsk_rto          = DCCP_TIMEOUT_INIT;
 125
 126                if (dccp_feat_clone(sk, newsk))
 127                        goto out_free;
 128
 129                if (newdmsk->dccpms_send_ack_vector) {
 130                        newdp->dccps_hc_rx_ackvec =
 131                                                dccp_ackvec_alloc(GFP_ATOMIC);
 132                        if (unlikely(newdp->dccps_hc_rx_ackvec == NULL))
 133                                goto out_free;
 134                }
 135
 136                newdp->dccps_hc_rx_ccid =
 137                            ccid_hc_rx_new(newdmsk->dccpms_rx_ccid,
 138                                           newsk, GFP_ATOMIC);
 139                newdp->dccps_hc_tx_ccid =
 140                            ccid_hc_tx_new(newdmsk->dccpms_tx_ccid,
 141                                           newsk, GFP_ATOMIC);
 142                if (unlikely(newdp->dccps_hc_rx_ccid == NULL ||
 143                             newdp->dccps_hc_tx_ccid == NULL)) {
 144                        dccp_ackvec_free(newdp->dccps_hc_rx_ackvec);
 145                        ccid_hc_rx_delete(newdp->dccps_hc_rx_ccid, newsk);
 146                        ccid_hc_tx_delete(newdp->dccps_hc_tx_ccid, newsk);
 147out_free:
 148                        /* It is still raw copy of parent, so invalidate
 149                         * destructor and make plain sk_free() */
 150                        newsk->sk_destruct = NULL;
 151                        sk_free(newsk);
 152                        return NULL;
 153                }
 154
 155                /*
 156                 * Step 3: Process LISTEN state
 157                 *
 158                 *    Choose S.ISS (initial seqno) or set from Init Cookies
 159                 *    Initialize S.GAR := S.ISS
 160                 *    Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
 161                 */
 162
 163                /* See dccp_v4_conn_request */
 164                newdmsk->dccpms_sequence_window = req->rcv_wnd;
 165
 166                newdp->dccps_gar = newdp->dccps_isr = dreq->dreq_isr;
 167                dccp_update_gsr(newsk, dreq->dreq_isr);
 168
 169                newdp->dccps_iss = dreq->dreq_iss;
 170                dccp_update_gss(newsk, dreq->dreq_iss);
 171
 172                /*
 173                 * SWL and AWL are initially adjusted so that they are not less than
 174                 * the initial Sequence Numbers received and sent, respectively:
 175                 *      SWL := max(GSR + 1 - floor(W/4), ISR),
 176                 *      AWL := max(GSS - W' + 1, ISS).
 177                 * These adjustments MUST be applied only at the beginning of the
 178                 * connection.
 179                 */
 180                dccp_set_seqno(&newdp->dccps_swl,
 181                               max48(newdp->dccps_swl, newdp->dccps_isr));
 182                dccp_set_seqno(&newdp->dccps_awl,
 183                               max48(newdp->dccps_awl, newdp->dccps_iss));
 184
 185                dccp_init_xmit_timers(newsk);
 186
 187                DCCP_INC_STATS_BH(DCCP_MIB_PASSIVEOPENS);
 188        }
 189        return newsk;
 190}
 191
 192EXPORT_SYMBOL_GPL(dccp_create_openreq_child);
 193
 194/*
 195 * Process an incoming packet for RESPOND sockets represented
 196 * as an request_sock.
 197 */
 198struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
 199                            struct request_sock *req,
 200                            struct request_sock **prev)
 201{
 202        struct sock *child = NULL;
 203
 204        /* Check for retransmitted REQUEST */
 205        if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) {
 206                struct dccp_request_sock *dreq = dccp_rsk(req);
 207
 208                if (after48(DCCP_SKB_CB(skb)->dccpd_seq, dreq->dreq_isr)) {
 209                        dccp_pr_debug("Retransmitted REQUEST\n");
 210                        dreq->dreq_isr = DCCP_SKB_CB(skb)->dccpd_seq;
 211                        /*
 212                         * Send another RESPONSE packet
 213                         * To protect against Request floods, increment retrans
 214                         * counter (backoff, monitored by dccp_response_timer).
 215                         */
 216                        req->retrans++;
 217                        req->rsk_ops->rtx_syn_ack(sk, req, NULL);
 218                }
 219                /* Network Duplicate, discard packet */
 220                return NULL;
 221        }
 222
 223        DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
 224
 225        if (dccp_hdr(skb)->dccph_type != DCCP_PKT_ACK &&
 226            dccp_hdr(skb)->dccph_type != DCCP_PKT_DATAACK)
 227                goto drop;
 228
 229        /* Invalid ACK */
 230        if (DCCP_SKB_CB(skb)->dccpd_ack_seq != dccp_rsk(req)->dreq_iss) {
 231                dccp_pr_debug("Invalid ACK number: ack_seq=%llu, "
 232                              "dreq_iss=%llu\n",
 233                              (unsigned long long)
 234                              DCCP_SKB_CB(skb)->dccpd_ack_seq,
 235                              (unsigned long long)
 236                              dccp_rsk(req)->dreq_iss);
 237                goto drop;
 238        }
 239
 240        child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
 241        if (child == NULL)
 242                goto listen_overflow;
 243
 244        /* FIXME: deal with options */
 245
 246        inet_csk_reqsk_queue_unlink(sk, req, prev);
 247        inet_csk_reqsk_queue_removed(sk, req);
 248        inet_csk_reqsk_queue_add(sk, req, child);
 249out:
 250        return child;
 251listen_overflow:
 252        dccp_pr_debug("listen_overflow!\n");
 253        DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
 254drop:
 255        if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET)
 256                req->rsk_ops->send_reset(sk, skb);
 257
 258        inet_csk_reqsk_queue_drop(sk, req, prev);
 259        goto out;
 260}
 261
 262EXPORT_SYMBOL_GPL(dccp_check_req);
 263
 264/*
 265 *  Queue segment on the new socket if the new socket is active,
 266 *  otherwise we just shortcircuit this and continue with
 267 *  the new socket.
 268 */
 269int dccp_child_process(struct sock *parent, struct sock *child,
 270                       struct sk_buff *skb)
 271{
 272        int ret = 0;
 273        const int state = child->sk_state;
 274
 275        if (!sock_owned_by_user(child)) {
 276                ret = dccp_rcv_state_process(child, skb, dccp_hdr(skb),
 277                                             skb->len);
 278
 279                /* Wakeup parent, send SIGIO */
 280                if (state == DCCP_RESPOND && child->sk_state != state)
 281                        parent->sk_data_ready(parent, 0);
 282        } else {
 283                /* Alas, it is possible again, because we do lookup
 284                 * in main socket hash table and lock on listening
 285                 * socket does not protect us more.
 286                 */
 287                sk_add_backlog(child, skb);
 288        }
 289
 290        bh_unlock_sock(child);
 291        sock_put(child);
 292        return ret;
 293}
 294
 295EXPORT_SYMBOL_GPL(dccp_child_process);
 296
 297void dccp_reqsk_send_ack(struct sk_buff *skb, struct request_sock *rsk)
 298{
 299        DCCP_BUG("DCCP-ACK packets are never sent in LISTEN/RESPOND state");
 300}
 301
 302EXPORT_SYMBOL_GPL(dccp_reqsk_send_ack);
 303
 304void dccp_reqsk_init(struct request_sock *req, struct sk_buff *skb)
 305{
 306        inet_rsk(req)->rmt_port = dccp_hdr(skb)->dccph_sport;
 307        inet_rsk(req)->acked    = 0;
 308        req->rcv_wnd            = sysctl_dccp_feat_sequence_window;
 309}
 310
 311EXPORT_SYMBOL_GPL(dccp_reqsk_init);
 312