linux/net/dccp/ipv4.c
<<
>>
Prefs
   1/*
   2 *  net/dccp/ipv4.c
   3 *
   4 *  An implementation of the DCCP protocol
   5 *  Arnaldo Carvalho de Melo <acme@conectiva.com.br>
   6 *
   7 *      This program is free software; you can redistribute it and/or
   8 *      modify it under the terms of the GNU General Public License
   9 *      as published by the Free Software Foundation; either version
  10 *      2 of the License, or (at your option) any later version.
  11 */
  12
  13#include <linux/dccp.h>
  14#include <linux/icmp.h>
  15#include <linux/module.h>
  16#include <linux/skbuff.h>
  17#include <linux/random.h>
  18
  19#include <net/icmp.h>
  20#include <net/inet_common.h>
  21#include <net/inet_hashtables.h>
  22#include <net/inet_sock.h>
  23#include <net/protocol.h>
  24#include <net/sock.h>
  25#include <net/timewait_sock.h>
  26#include <net/tcp_states.h>
  27#include <net/xfrm.h>
  28
  29#include "ackvec.h"
  30#include "ccid.h"
  31#include "dccp.h"
  32#include "feat.h"
  33
  34/*
  35 * This is the global socket data structure used for responding to
  36 * the Out-of-the-blue (OOTB) packets. A control sock will be created
  37 * for this socket at the initialization time.
  38 */
  39static struct socket *dccp_v4_ctl_socket;
  40
  41static int dccp_v4_get_port(struct sock *sk, const unsigned short snum)
  42{
  43        return inet_csk_get_port(&dccp_hashinfo, sk, snum,
  44                                 inet_csk_bind_conflict);
  45}
  46
  47int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
  48{
  49        struct inet_sock *inet = inet_sk(sk);
  50        struct dccp_sock *dp = dccp_sk(sk);
  51        const struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
  52        struct rtable *rt;
  53        __be32 daddr, nexthop;
  54        int tmp;
  55        int err;
  56
  57        dp->dccps_role = DCCP_ROLE_CLIENT;
  58
  59        if (addr_len < sizeof(struct sockaddr_in))
  60                return -EINVAL;
  61
  62        if (usin->sin_family != AF_INET)
  63                return -EAFNOSUPPORT;
  64
  65        nexthop = daddr = usin->sin_addr.s_addr;
  66        if (inet->opt != NULL && inet->opt->srr) {
  67                if (daddr == 0)
  68                        return -EINVAL;
  69                nexthop = inet->opt->faddr;
  70        }
  71
  72        tmp = ip_route_connect(&rt, nexthop, inet->saddr,
  73                               RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
  74                               IPPROTO_DCCP,
  75                               inet->sport, usin->sin_port, sk, 1);
  76        if (tmp < 0)
  77                return tmp;
  78
  79        if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
  80                ip_rt_put(rt);
  81                return -ENETUNREACH;
  82        }
  83
  84        if (inet->opt == NULL || !inet->opt->srr)
  85                daddr = rt->rt_dst;
  86
  87        if (inet->saddr == 0)
  88                inet->saddr = rt->rt_src;
  89        inet->rcv_saddr = inet->saddr;
  90
  91        inet->dport = usin->sin_port;
  92        inet->daddr = daddr;
  93
  94        inet_csk(sk)->icsk_ext_hdr_len = 0;
  95        if (inet->opt != NULL)
  96                inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
  97        /*
  98         * Socket identity is still unknown (sport may be zero).
  99         * However we set state to DCCP_REQUESTING and not releasing socket
 100         * lock select source port, enter ourselves into the hash tables and
 101         * complete initialization after this.
 102         */
 103        dccp_set_state(sk, DCCP_REQUESTING);
 104        err = inet_hash_connect(&dccp_death_row, sk);
 105        if (err != 0)
 106                goto failure;
 107
 108        err = ip_route_newports(&rt, IPPROTO_DCCP, inet->sport, inet->dport,
 109                                sk);
 110        if (err != 0)
 111                goto failure;
 112
 113        /* OK, now commit destination to socket.  */
 114        sk_setup_caps(sk, &rt->u.dst);
 115
 116        dp->dccps_iss = secure_dccp_sequence_number(inet->saddr, inet->daddr,
 117                                                    inet->sport, inet->dport);
 118        inet->id = dp->dccps_iss ^ jiffies;
 119
 120        err = dccp_connect(sk);
 121        rt = NULL;
 122        if (err != 0)
 123                goto failure;
 124out:
 125        return err;
 126failure:
 127        /*
 128         * This unhashes the socket and releases the local port, if necessary.
 129         */
 130        dccp_set_state(sk, DCCP_CLOSED);
 131        ip_rt_put(rt);
 132        sk->sk_route_caps = 0;
 133        inet->dport = 0;
 134        goto out;
 135}
 136
 137EXPORT_SYMBOL_GPL(dccp_v4_connect);
 138
 139/*
 140 * This routine does path mtu discovery as defined in RFC1191.
 141 */
 142static inline void dccp_do_pmtu_discovery(struct sock *sk,
 143                                          const struct iphdr *iph,
 144                                          u32 mtu)
 145{
 146        struct dst_entry *dst;
 147        const struct inet_sock *inet = inet_sk(sk);
 148        const struct dccp_sock *dp = dccp_sk(sk);
 149
 150        /* We are not interested in DCCP_LISTEN and request_socks (RESPONSEs
 151         * send out by Linux are always < 576bytes so they should go through
 152         * unfragmented).
 153         */
 154        if (sk->sk_state == DCCP_LISTEN)
 155                return;
 156
 157        /* We don't check in the destentry if pmtu discovery is forbidden
 158         * on this route. We just assume that no packet_to_big packets
 159         * are send back when pmtu discovery is not active.
 160         * There is a small race when the user changes this flag in the
 161         * route, but I think that's acceptable.
 162         */
 163        if ((dst = __sk_dst_check(sk, 0)) == NULL)
 164                return;
 165
 166        dst->ops->update_pmtu(dst, mtu);
 167
 168        /* Something is about to be wrong... Remember soft error
 169         * for the case, if this connection will not able to recover.
 170         */
 171        if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
 172                sk->sk_err_soft = EMSGSIZE;
 173
 174        mtu = dst_mtu(dst);
 175
 176        if (inet->pmtudisc != IP_PMTUDISC_DONT &&
 177            inet_csk(sk)->icsk_pmtu_cookie > mtu) {
 178                dccp_sync_mss(sk, mtu);
 179
 180                /*
 181                 * From RFC 4340, sec. 14.1:
 182                 *
 183                 *      DCCP-Sync packets are the best choice for upward
 184                 *      probing, since DCCP-Sync probes do not risk application
 185                 *      data loss.
 186                 */
 187                dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC);
 188        } /* else let the usual retransmit timer handle it */
 189}
 190
 191/*
 192 * This routine is called by the ICMP module when it gets some sort of error
 193 * condition. If err < 0 then the socket should be closed and the error
 194 * returned to the user. If err > 0 it's just the icmp type << 8 | icmp code.
 195 * After adjustment header points to the first 8 bytes of the tcp header. We
 196 * need to find the appropriate port.
 197 *
 198 * The locking strategy used here is very "optimistic". When someone else
 199 * accesses the socket the ICMP is just dropped and for some paths there is no
 200 * check at all. A more general error queue to queue errors for later handling
 201 * is probably better.
 202 */
 203static void dccp_v4_err(struct sk_buff *skb, u32 info)
 204{
 205        const struct iphdr *iph = (struct iphdr *)skb->data;
 206        const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data +
 207                                                        (iph->ihl << 2));
 208        struct dccp_sock *dp;
 209        struct inet_sock *inet;
 210        const int type = icmp_hdr(skb)->type;
 211        const int code = icmp_hdr(skb)->code;
 212        struct sock *sk;
 213        __u64 seq;
 214        int err;
 215
 216        if (skb->len < (iph->ihl << 2) + 8) {
 217                ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
 218                return;
 219        }
 220
 221        sk = inet_lookup(&dccp_hashinfo, iph->daddr, dh->dccph_dport,
 222                         iph->saddr, dh->dccph_sport, inet_iif(skb));
 223        if (sk == NULL) {
 224                ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
 225                return;
 226        }
 227
 228        if (sk->sk_state == DCCP_TIME_WAIT) {
 229                inet_twsk_put(inet_twsk(sk));
 230                return;
 231        }
 232
 233        bh_lock_sock(sk);
 234        /* If too many ICMPs get dropped on busy
 235         * servers this needs to be solved differently.
 236         */
 237        if (sock_owned_by_user(sk))
 238                NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
 239
 240        if (sk->sk_state == DCCP_CLOSED)
 241                goto out;
 242
 243        dp = dccp_sk(sk);
 244        seq = dccp_hdr_seq(dh);
 245        if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
 246            !between48(seq, dp->dccps_swl, dp->dccps_swh)) {
 247                NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
 248                goto out;
 249        }
 250
 251        switch (type) {
 252        case ICMP_SOURCE_QUENCH:
 253                /* Just silently ignore these. */
 254                goto out;
 255        case ICMP_PARAMETERPROB:
 256                err = EPROTO;
 257                break;
 258        case ICMP_DEST_UNREACH:
 259                if (code > NR_ICMP_UNREACH)
 260                        goto out;
 261
 262                if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
 263                        if (!sock_owned_by_user(sk))
 264                                dccp_do_pmtu_discovery(sk, iph, info);
 265                        goto out;
 266                }
 267
 268                err = icmp_err_convert[code].errno;
 269                break;
 270        case ICMP_TIME_EXCEEDED:
 271                err = EHOSTUNREACH;
 272                break;
 273        default:
 274                goto out;
 275        }
 276
 277        switch (sk->sk_state) {
 278                struct request_sock *req , **prev;
 279        case DCCP_LISTEN:
 280                if (sock_owned_by_user(sk))
 281                        goto out;
 282                req = inet_csk_search_req(sk, &prev, dh->dccph_dport,
 283                                          iph->daddr, iph->saddr);
 284                if (!req)
 285                        goto out;
 286
 287                /*
 288                 * ICMPs are not backlogged, hence we cannot get an established
 289                 * socket here.
 290                 */
 291                BUG_TRAP(!req->sk);
 292
 293                if (seq != dccp_rsk(req)->dreq_iss) {
 294                        NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
 295                        goto out;
 296                }
 297                /*
 298                 * Still in RESPOND, just remove it silently.
 299                 * There is no good way to pass the error to the newly
 300                 * created socket, and POSIX does not want network
 301                 * errors returned from accept().
 302                 */
 303                inet_csk_reqsk_queue_drop(sk, req, prev);
 304                goto out;
 305
 306        case DCCP_REQUESTING:
 307        case DCCP_RESPOND:
 308                if (!sock_owned_by_user(sk)) {
 309                        DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
 310                        sk->sk_err = err;
 311
 312                        sk->sk_error_report(sk);
 313
 314                        dccp_done(sk);
 315                } else
 316                        sk->sk_err_soft = err;
 317                goto out;
 318        }
 319
 320        /* If we've already connected we will keep trying
 321         * until we time out, or the user gives up.
 322         *
 323         * rfc1122 4.2.3.9 allows to consider as hard errors
 324         * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
 325         * but it is obsoleted by pmtu discovery).
 326         *
 327         * Note, that in modern internet, where routing is unreliable
 328         * and in each dark corner broken firewalls sit, sending random
 329         * errors ordered by their masters even this two messages finally lose
 330         * their original sense (even Linux sends invalid PORT_UNREACHs)
 331         *
 332         * Now we are in compliance with RFCs.
 333         *                                                      --ANK (980905)
 334         */
 335
 336        inet = inet_sk(sk);
 337        if (!sock_owned_by_user(sk) && inet->recverr) {
 338                sk->sk_err = err;
 339                sk->sk_error_report(sk);
 340        } else /* Only an error on timeout */
 341                sk->sk_err_soft = err;
 342out:
 343        bh_unlock_sock(sk);
 344        sock_put(sk);
 345}
 346
 347static inline __sum16 dccp_v4_csum_finish(struct sk_buff *skb,
 348                                      __be32 src, __be32 dst)
 349{
 350        return csum_tcpudp_magic(src, dst, skb->len, IPPROTO_DCCP, skb->csum);
 351}
 352
 353void dccp_v4_send_check(struct sock *sk, int unused, struct sk_buff *skb)
 354{
 355        const struct inet_sock *inet = inet_sk(sk);
 356        struct dccp_hdr *dh = dccp_hdr(skb);
 357
 358        dccp_csum_outgoing(skb);
 359        dh->dccph_checksum = dccp_v4_csum_finish(skb, inet->saddr, inet->daddr);
 360}
 361
 362EXPORT_SYMBOL_GPL(dccp_v4_send_check);
 363
 364static inline u64 dccp_v4_init_sequence(const struct sk_buff *skb)
 365{
 366        return secure_dccp_sequence_number(ip_hdr(skb)->daddr,
 367                                           ip_hdr(skb)->saddr,
 368                                           dccp_hdr(skb)->dccph_dport,
 369                                           dccp_hdr(skb)->dccph_sport);
 370}
 371
 372/*
 373 * The three way handshake has completed - we got a valid ACK or DATAACK -
 374 * now create the new socket.
 375 *
 376 * This is the equivalent of TCP's tcp_v4_syn_recv_sock
 377 */
 378struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
 379                                       struct request_sock *req,
 380                                       struct dst_entry *dst)
 381{
 382        struct inet_request_sock *ireq;
 383        struct inet_sock *newinet;
 384        struct sock *newsk;
 385
 386        if (sk_acceptq_is_full(sk))
 387                goto exit_overflow;
 388
 389        if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL)
 390                goto exit;
 391
 392        newsk = dccp_create_openreq_child(sk, req, skb);
 393        if (newsk == NULL)
 394                goto exit;
 395
 396        sk_setup_caps(newsk, dst);
 397
 398        newinet            = inet_sk(newsk);
 399        ireq               = inet_rsk(req);
 400        newinet->daddr     = ireq->rmt_addr;
 401        newinet->rcv_saddr = ireq->loc_addr;
 402        newinet->saddr     = ireq->loc_addr;
 403        newinet->opt       = ireq->opt;
 404        ireq->opt          = NULL;
 405        newinet->mc_index  = inet_iif(skb);
 406        newinet->mc_ttl    = ip_hdr(skb)->ttl;
 407        newinet->id        = jiffies;
 408
 409        dccp_sync_mss(newsk, dst_mtu(dst));
 410
 411        __inet_hash(&dccp_hashinfo, newsk, 0);
 412        __inet_inherit_port(&dccp_hashinfo, sk, newsk);
 413
 414        return newsk;
 415
 416exit_overflow:
 417        NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
 418exit:
 419        NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
 420        dst_release(dst);
 421        return NULL;
 422}
 423
 424EXPORT_SYMBOL_GPL(dccp_v4_request_recv_sock);
 425
 426static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
 427{
 428        const struct dccp_hdr *dh = dccp_hdr(skb);
 429        const struct iphdr *iph = ip_hdr(skb);
 430        struct sock *nsk;
 431        struct request_sock **prev;
 432        /* Find possible connection requests. */
 433        struct request_sock *req = inet_csk_search_req(sk, &prev,
 434                                                       dh->dccph_sport,
 435                                                       iph->saddr, iph->daddr);
 436        if (req != NULL)
 437                return dccp_check_req(sk, skb, req, prev);
 438
 439        nsk = inet_lookup_established(&dccp_hashinfo,
 440                                      iph->saddr, dh->dccph_sport,
 441                                      iph->daddr, dh->dccph_dport,
 442                                      inet_iif(skb));
 443        if (nsk != NULL) {
 444                if (nsk->sk_state != DCCP_TIME_WAIT) {
 445                        bh_lock_sock(nsk);
 446                        return nsk;
 447                }
 448                inet_twsk_put(inet_twsk(nsk));
 449                return NULL;
 450        }
 451
 452        return sk;
 453}
 454
 455static struct dst_entry* dccp_v4_route_skb(struct sock *sk,
 456                                           struct sk_buff *skb)
 457{
 458        struct rtable *rt;
 459        struct flowi fl = { .oif = ((struct rtable *)skb->dst)->rt_iif,
 460                            .nl_u = { .ip4_u =
 461                                      { .daddr = ip_hdr(skb)->saddr,
 462                                        .saddr = ip_hdr(skb)->daddr,
 463                                        .tos = RT_CONN_FLAGS(sk) } },
 464                            .proto = sk->sk_protocol,
 465                            .uli_u = { .ports =
 466                                       { .sport = dccp_hdr(skb)->dccph_dport,
 467                                         .dport = dccp_hdr(skb)->dccph_sport }
 468                                     }
 469                          };
 470
 471        security_skb_classify_flow(skb, &fl);
 472        if (ip_route_output_flow(&rt, &fl, sk, 0)) {
 473                IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
 474                return NULL;
 475        }
 476
 477        return &rt->u.dst;
 478}
 479
 480static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
 481                                 struct dst_entry *dst)
 482{
 483        int err = -1;
 484        struct sk_buff *skb;
 485
 486        /* First, grab a route. */
 487
 488        if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL)
 489                goto out;
 490
 491        skb = dccp_make_response(sk, dst, req);
 492        if (skb != NULL) {
 493                const struct inet_request_sock *ireq = inet_rsk(req);
 494                struct dccp_hdr *dh = dccp_hdr(skb);
 495
 496                dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->loc_addr,
 497                                                              ireq->rmt_addr);
 498                memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
 499                err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
 500                                            ireq->rmt_addr,
 501                                            ireq->opt);
 502                err = net_xmit_eval(err);
 503        }
 504
 505out:
 506        dst_release(dst);
 507        return err;
 508}
 509
 510static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
 511{
 512        int err;
 513        const struct iphdr *rxiph;
 514        struct sk_buff *skb;
 515        struct dst_entry *dst;
 516
 517        /* Never send a reset in response to a reset. */
 518        if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET)
 519                return;
 520
 521        if (((struct rtable *)rxskb->dst)->rt_type != RTN_LOCAL)
 522                return;
 523
 524        dst = dccp_v4_route_skb(dccp_v4_ctl_socket->sk, rxskb);
 525        if (dst == NULL)
 526                return;
 527
 528        skb = dccp_ctl_make_reset(dccp_v4_ctl_socket, rxskb);
 529        if (skb == NULL)
 530                goto out;
 531
 532        rxiph = ip_hdr(rxskb);
 533        dccp_hdr(skb)->dccph_checksum = dccp_v4_csum_finish(skb, rxiph->saddr,
 534                                                                 rxiph->daddr);
 535        skb->dst = dst_clone(dst);
 536
 537        bh_lock_sock(dccp_v4_ctl_socket->sk);
 538        err = ip_build_and_send_pkt(skb, dccp_v4_ctl_socket->sk,
 539                                    rxiph->daddr, rxiph->saddr, NULL);
 540        bh_unlock_sock(dccp_v4_ctl_socket->sk);
 541
 542        if (net_xmit_eval(err) == 0) {
 543                DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
 544                DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
 545        }
 546out:
 547         dst_release(dst);
 548}
 549
 550static void dccp_v4_reqsk_destructor(struct request_sock *req)
 551{
 552        kfree(inet_rsk(req)->opt);
 553}
 554
 555static struct request_sock_ops dccp_request_sock_ops __read_mostly = {
 556        .family         = PF_INET,
 557        .obj_size       = sizeof(struct dccp_request_sock),
 558        .rtx_syn_ack    = dccp_v4_send_response,
 559        .send_ack       = dccp_reqsk_send_ack,
 560        .destructor     = dccp_v4_reqsk_destructor,
 561        .send_reset     = dccp_v4_ctl_send_reset,
 562};
 563
 564int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
 565{
 566        struct inet_request_sock *ireq;
 567        struct request_sock *req;
 568        struct dccp_request_sock *dreq;
 569        const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
 570        struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
 571
 572        /* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */
 573        if (((struct rtable *)skb->dst)->rt_flags &
 574            (RTCF_BROADCAST | RTCF_MULTICAST))
 575                return 0;       /* discard, don't send a reset here */
 576
 577        if (dccp_bad_service_code(sk, service)) {
 578                dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
 579                goto drop;
 580        }
 581        /*
 582         * TW buckets are converted to open requests without
 583         * limitations, they conserve resources and peer is
 584         * evidently real one.
 585         */
 586        dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
 587        if (inet_csk_reqsk_queue_is_full(sk))
 588                goto drop;
 589
 590        /*
 591         * Accept backlog is full. If we have already queued enough
 592         * of warm entries in syn queue, drop request. It is better than
 593         * clogging syn queue with openreqs with exponentially increasing
 594         * timeout.
 595         */
 596        if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
 597                goto drop;
 598
 599        req = reqsk_alloc(&dccp_request_sock_ops);
 600        if (req == NULL)
 601                goto drop;
 602
 603        if (dccp_parse_options(sk, skb))
 604                goto drop_and_free;
 605
 606        dccp_reqsk_init(req, skb);
 607
 608        if (security_inet_conn_request(sk, skb, req))
 609                goto drop_and_free;
 610
 611        ireq = inet_rsk(req);
 612        ireq->loc_addr = ip_hdr(skb)->daddr;
 613        ireq->rmt_addr = ip_hdr(skb)->saddr;
 614        ireq->opt       = NULL;
 615
 616        /*
 617         * Step 3: Process LISTEN state
 618         *
 619         * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
 620         *
 621         * In fact we defer setting S.GSR, S.SWL, S.SWH to
 622         * dccp_create_openreq_child.
 623         */
 624        dreq = dccp_rsk(req);
 625        dreq->dreq_isr     = dcb->dccpd_seq;
 626        dreq->dreq_iss     = dccp_v4_init_sequence(skb);
 627        dreq->dreq_service = service;
 628
 629        if (dccp_v4_send_response(sk, req, NULL))
 630                goto drop_and_free;
 631
 632        inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
 633        return 0;
 634
 635drop_and_free:
 636        reqsk_free(req);
 637drop:
 638        DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
 639        return -1;
 640}
 641
 642EXPORT_SYMBOL_GPL(dccp_v4_conn_request);
 643
 644int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
 645{
 646        struct dccp_hdr *dh = dccp_hdr(skb);
 647
 648        if (sk->sk_state == DCCP_OPEN) { /* Fast path */
 649                if (dccp_rcv_established(sk, skb, dh, skb->len))
 650                        goto reset;
 651                return 0;
 652        }
 653
 654        /*
 655         *  Step 3: Process LISTEN state
 656         *       If P.type == Request or P contains a valid Init Cookie option,
 657         *            (* Must scan the packet's options to check for Init
 658         *               Cookies.  Only Init Cookies are processed here,
 659         *               however; other options are processed in Step 8.  This
 660         *               scan need only be performed if the endpoint uses Init
 661         *               Cookies *)
 662         *            (* Generate a new socket and switch to that socket *)
 663         *            Set S := new socket for this port pair
 664         *            S.state = RESPOND
 665         *            Choose S.ISS (initial seqno) or set from Init Cookies
 666         *            Initialize S.GAR := S.ISS
 667         *            Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
 668         *            Continue with S.state == RESPOND
 669         *            (* A Response packet will be generated in Step 11 *)
 670         *       Otherwise,
 671         *            Generate Reset(No Connection) unless P.type == Reset
 672         *            Drop packet and return
 673         *
 674         * NOTE: the check for the packet types is done in
 675         *       dccp_rcv_state_process
 676         */
 677        if (sk->sk_state == DCCP_LISTEN) {
 678                struct sock *nsk = dccp_v4_hnd_req(sk, skb);
 679
 680                if (nsk == NULL)
 681                        goto discard;
 682
 683                if (nsk != sk) {
 684                        if (dccp_child_process(sk, nsk, skb))
 685                                goto reset;
 686                        return 0;
 687                }
 688        }
 689
 690        if (dccp_rcv_state_process(sk, skb, dh, skb->len))
 691                goto reset;
 692        return 0;
 693
 694reset:
 695        dccp_v4_ctl_send_reset(sk, skb);
 696discard:
 697        kfree_skb(skb);
 698        return 0;
 699}
 700
 701EXPORT_SYMBOL_GPL(dccp_v4_do_rcv);
 702
 703/**
 704 *      dccp_invalid_packet  -  check for malformed packets
 705 *      Implements RFC 4340, 8.5:  Step 1: Check header basics
 706 *      Packets that fail these checks are ignored and do not receive Resets.
 707 */
 708int dccp_invalid_packet(struct sk_buff *skb)
 709{
 710        const struct dccp_hdr *dh;
 711        unsigned int cscov;
 712
 713        if (skb->pkt_type != PACKET_HOST)
 714                return 1;
 715
 716        /* If the packet is shorter than 12 bytes, drop packet and return */
 717        if (!pskb_may_pull(skb, sizeof(struct dccp_hdr))) {
 718                DCCP_WARN("pskb_may_pull failed\n");
 719                return 1;
 720        }
 721
 722        dh = dccp_hdr(skb);
 723
 724        /* If P.type is not understood, drop packet and return */
 725        if (dh->dccph_type >= DCCP_PKT_INVALID) {
 726                DCCP_WARN("invalid packet type\n");
 727                return 1;
 728        }
 729
 730        /*
 731         * If P.Data Offset is too small for packet type, drop packet and return
 732         */
 733        if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) {
 734                DCCP_WARN("P.Data Offset(%u) too small\n", dh->dccph_doff);
 735                return 1;
 736        }
 737        /*
 738         * If P.Data Offset is too too large for packet, drop packet and return
 739         */
 740        if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) {
 741                DCCP_WARN("P.Data Offset(%u) too large\n", dh->dccph_doff);
 742                return 1;
 743        }
 744
 745        /*
 746         * If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet
 747         * has short sequence numbers), drop packet and return
 748         */
 749        if (dh->dccph_type >= DCCP_PKT_DATA    &&
 750            dh->dccph_type <= DCCP_PKT_DATAACK && dh->dccph_x == 0)  {
 751                DCCP_WARN("P.type (%s) not Data || [Data]Ack, while P.X == 0\n",
 752                          dccp_packet_name(dh->dccph_type));
 753                return 1;
 754        }
 755
 756        /*
 757         * If P.CsCov is too large for the packet size, drop packet and return.
 758         * This must come _before_ checksumming (not as RFC 4340 suggests).
 759         */
 760        cscov = dccp_csum_coverage(skb);
 761        if (cscov > skb->len) {
 762                DCCP_WARN("P.CsCov %u exceeds packet length %d\n",
 763                          dh->dccph_cscov, skb->len);
 764                return 1;
 765        }
 766
 767        /* If header checksum is incorrect, drop packet and return.
 768         * (This step is completed in the AF-dependent functions.) */
 769        skb->csum = skb_checksum(skb, 0, cscov, 0);
 770
 771        return 0;
 772}
 773
 774EXPORT_SYMBOL_GPL(dccp_invalid_packet);
 775
 776/* this is called when real data arrives */
 777static int dccp_v4_rcv(struct sk_buff *skb)
 778{
 779        const struct dccp_hdr *dh;
 780        const struct iphdr *iph;
 781        struct sock *sk;
 782        int min_cov;
 783
 784        /* Step 1: Check header basics */
 785
 786        if (dccp_invalid_packet(skb))
 787                goto discard_it;
 788
 789        iph = ip_hdr(skb);
 790        /* Step 1: If header checksum is incorrect, drop packet and return */
 791        if (dccp_v4_csum_finish(skb, iph->saddr, iph->daddr)) {
 792                DCCP_WARN("dropped packet with invalid checksum\n");
 793                goto discard_it;
 794        }
 795
 796        dh = dccp_hdr(skb);
 797
 798        DCCP_SKB_CB(skb)->dccpd_seq  = dccp_hdr_seq(dh);
 799        DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
 800
 801        dccp_pr_debug("%8.8s "
 802                      "src=%u.%u.%u.%u@%-5d "
 803                      "dst=%u.%u.%u.%u@%-5d seq=%llu",
 804                      dccp_packet_name(dh->dccph_type),
 805                      NIPQUAD(iph->saddr), ntohs(dh->dccph_sport),
 806                      NIPQUAD(iph->daddr), ntohs(dh->dccph_dport),
 807                      (unsigned long long) DCCP_SKB_CB(skb)->dccpd_seq);
 808
 809        if (dccp_packet_without_ack(skb)) {
 810                DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
 811                dccp_pr_debug_cat("\n");
 812        } else {
 813                DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
 814                dccp_pr_debug_cat(", ack=%llu\n", (unsigned long long)
 815                                  DCCP_SKB_CB(skb)->dccpd_ack_seq);
 816        }
 817
 818        /* Step 2:
 819         *      Look up flow ID in table and get corresponding socket */
 820        sk = __inet_lookup(&dccp_hashinfo,
 821                           iph->saddr, dh->dccph_sport,
 822                           iph->daddr, dh->dccph_dport, inet_iif(skb));
 823        /*
 824         * Step 2:
 825         *      If no socket ...
 826         */
 827        if (sk == NULL) {
 828                dccp_pr_debug("failed to look up flow ID in table and "
 829                              "get corresponding socket\n");
 830                goto no_dccp_socket;
 831        }
 832
 833        /*
 834         * Step 2:
 835         *      ... or S.state == TIMEWAIT,
 836         *              Generate Reset(No Connection) unless P.type == Reset
 837         *              Drop packet and return
 838         */
 839        if (sk->sk_state == DCCP_TIME_WAIT) {
 840                dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n");
 841                inet_twsk_put(inet_twsk(sk));
 842                goto no_dccp_socket;
 843        }
 844
 845        /*
 846         * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
 847         *      o if MinCsCov = 0, only packets with CsCov = 0 are accepted
 848         *      o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
 849         */
 850        min_cov = dccp_sk(sk)->dccps_pcrlen;
 851        if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov))  {
 852                dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n",
 853                              dh->dccph_cscov, min_cov);
 854                /* FIXME: "Such packets SHOULD be reported using Data Dropped
 855                 *         options (Section 11.7) with Drop Code 0, Protocol
 856                 *         Constraints."                                     */
 857                goto discard_and_relse;
 858        }
 859
 860        if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
 861                goto discard_and_relse;
 862        nf_reset(skb);
 863
 864        return sk_receive_skb(sk, skb, 1);
 865
 866no_dccp_socket:
 867        if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
 868                goto discard_it;
 869        /*
 870         * Step 2:
 871         *      If no socket ...
 872         *              Generate Reset(No Connection) unless P.type == Reset
 873         *              Drop packet and return
 874         */
 875        if (dh->dccph_type != DCCP_PKT_RESET) {
 876                DCCP_SKB_CB(skb)->dccpd_reset_code =
 877                                        DCCP_RESET_CODE_NO_CONNECTION;
 878                dccp_v4_ctl_send_reset(sk, skb);
 879        }
 880
 881discard_it:
 882        kfree_skb(skb);
 883        return 0;
 884
 885discard_and_relse:
 886        sock_put(sk);
 887        goto discard_it;
 888}
 889
 890static struct inet_connection_sock_af_ops dccp_ipv4_af_ops = {
 891        .queue_xmit        = ip_queue_xmit,
 892        .send_check        = dccp_v4_send_check,
 893        .rebuild_header    = inet_sk_rebuild_header,
 894        .conn_request      = dccp_v4_conn_request,
 895        .syn_recv_sock     = dccp_v4_request_recv_sock,
 896        .net_header_len    = sizeof(struct iphdr),
 897        .setsockopt        = ip_setsockopt,
 898        .getsockopt        = ip_getsockopt,
 899        .addr2sockaddr     = inet_csk_addr2sockaddr,
 900        .sockaddr_len      = sizeof(struct sockaddr_in),
 901#ifdef CONFIG_COMPAT
 902        .compat_setsockopt = compat_ip_setsockopt,
 903        .compat_getsockopt = compat_ip_getsockopt,
 904#endif
 905};
 906
 907static int dccp_v4_init_sock(struct sock *sk)
 908{
 909        static __u8 dccp_v4_ctl_sock_initialized;
 910        int err = dccp_init_sock(sk, dccp_v4_ctl_sock_initialized);
 911
 912        if (err == 0) {
 913                if (unlikely(!dccp_v4_ctl_sock_initialized))
 914                        dccp_v4_ctl_sock_initialized = 1;
 915                inet_csk(sk)->icsk_af_ops = &dccp_ipv4_af_ops;
 916        }
 917
 918        return err;
 919}
 920
 921static struct timewait_sock_ops dccp_timewait_sock_ops = {
 922        .twsk_obj_size  = sizeof(struct inet_timewait_sock),
 923};
 924
 925DEFINE_PROTO_INUSE(dccp_v4)
 926
 927static struct proto dccp_v4_prot = {
 928        .name                   = "DCCP",
 929        .owner                  = THIS_MODULE,
 930        .close                  = dccp_close,
 931        .connect                = dccp_v4_connect,
 932        .disconnect             = dccp_disconnect,
 933        .ioctl                  = dccp_ioctl,
 934        .init                   = dccp_v4_init_sock,
 935        .setsockopt             = dccp_setsockopt,
 936        .getsockopt             = dccp_getsockopt,
 937        .sendmsg                = dccp_sendmsg,
 938        .recvmsg                = dccp_recvmsg,
 939        .backlog_rcv            = dccp_v4_do_rcv,
 940        .hash                   = dccp_hash,
 941        .unhash                 = dccp_unhash,
 942        .accept                 = inet_csk_accept,
 943        .get_port               = dccp_v4_get_port,
 944        .shutdown               = dccp_shutdown,
 945        .destroy                = dccp_destroy_sock,
 946        .orphan_count           = &dccp_orphan_count,
 947        .max_header             = MAX_DCCP_HEADER,
 948        .obj_size               = sizeof(struct dccp_sock),
 949        .rsk_prot               = &dccp_request_sock_ops,
 950        .twsk_prot              = &dccp_timewait_sock_ops,
 951#ifdef CONFIG_COMPAT
 952        .compat_setsockopt      = compat_dccp_setsockopt,
 953        .compat_getsockopt      = compat_dccp_getsockopt,
 954#endif
 955        REF_PROTO_INUSE(dccp_v4)
 956};
 957
 958static struct net_protocol dccp_v4_protocol = {
 959        .handler        = dccp_v4_rcv,
 960        .err_handler    = dccp_v4_err,
 961        .no_policy      = 1,
 962};
 963
 964static const struct proto_ops inet_dccp_ops = {
 965        .family            = PF_INET,
 966        .owner             = THIS_MODULE,
 967        .release           = inet_release,
 968        .bind              = inet_bind,
 969        .connect           = inet_stream_connect,
 970        .socketpair        = sock_no_socketpair,
 971        .accept            = inet_accept,
 972        .getname           = inet_getname,
 973        /* FIXME: work on tcp_poll to rename it to inet_csk_poll */
 974        .poll              = dccp_poll,
 975        .ioctl             = inet_ioctl,
 976        /* FIXME: work on inet_listen to rename it to sock_common_listen */
 977        .listen            = inet_dccp_listen,
 978        .shutdown          = inet_shutdown,
 979        .setsockopt        = sock_common_setsockopt,
 980        .getsockopt        = sock_common_getsockopt,
 981        .sendmsg           = inet_sendmsg,
 982        .recvmsg           = sock_common_recvmsg,
 983        .mmap              = sock_no_mmap,
 984        .sendpage          = sock_no_sendpage,
 985#ifdef CONFIG_COMPAT
 986        .compat_setsockopt = compat_sock_common_setsockopt,
 987        .compat_getsockopt = compat_sock_common_getsockopt,
 988#endif
 989};
 990
 991static struct inet_protosw dccp_v4_protosw = {
 992        .type           = SOCK_DCCP,
 993        .protocol       = IPPROTO_DCCP,
 994        .prot           = &dccp_v4_prot,
 995        .ops            = &inet_dccp_ops,
 996        .capability     = -1,
 997        .no_check       = 0,
 998        .flags          = INET_PROTOSW_ICSK,
 999};
1000
1001static int __init dccp_v4_init(void)
1002{
1003        int err = proto_register(&dccp_v4_prot, 1);
1004
1005        if (err != 0)
1006                goto out;
1007
1008        err = inet_add_protocol(&dccp_v4_protocol, IPPROTO_DCCP);
1009        if (err != 0)
1010                goto out_proto_unregister;
1011
1012        inet_register_protosw(&dccp_v4_protosw);
1013
1014        err = inet_csk_ctl_sock_create(&dccp_v4_ctl_socket, PF_INET,
1015                                       SOCK_DCCP, IPPROTO_DCCP);
1016        if (err)
1017                goto out_unregister_protosw;
1018out:
1019        return err;
1020out_unregister_protosw:
1021        inet_unregister_protosw(&dccp_v4_protosw);
1022        inet_del_protocol(&dccp_v4_protocol, IPPROTO_DCCP);
1023out_proto_unregister:
1024        proto_unregister(&dccp_v4_prot);
1025        goto out;
1026}
1027
1028static void __exit dccp_v4_exit(void)
1029{
1030        inet_unregister_protosw(&dccp_v4_protosw);
1031        inet_del_protocol(&dccp_v4_protocol, IPPROTO_DCCP);
1032        proto_unregister(&dccp_v4_prot);
1033}
1034
1035module_init(dccp_v4_init);
1036module_exit(dccp_v4_exit);
1037
1038/*
1039 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1040 * values directly, Also cover the case where the protocol is not specified,
1041 * i.e. net-pf-PF_INET-proto-0-type-SOCK_DCCP
1042 */
1043MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 33, 6);
1044MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 0, 6);
1045MODULE_LICENSE("GPL");
1046MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1047MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");
1048