linux/net/ipv4/netfilter/nf_nat_helper.c
<<
>>
Prefs
   1/* ip_nat_helper.c - generic support functions for NAT helpers
   2 *
   3 * (C) 2000-2002 Harald Welte <laforge@netfilter.org>
   4 * (C) 2003-2006 Netfilter Core Team <coreteam@netfilter.org>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10#include <linux/module.h>
  11#include <linux/kmod.h>
  12#include <linux/types.h>
  13#include <linux/timer.h>
  14#include <linux/skbuff.h>
  15#include <linux/tcp.h>
  16#include <linux/udp.h>
  17#include <net/checksum.h>
  18#include <net/tcp.h>
  19#include <net/route.h>
  20
  21#include <linux/netfilter_ipv4.h>
  22#include <net/netfilter/nf_conntrack.h>
  23#include <net/netfilter/nf_conntrack_helper.h>
  24#include <net/netfilter/nf_conntrack_ecache.h>
  25#include <net/netfilter/nf_conntrack_expect.h>
  26#include <net/netfilter/nf_nat.h>
  27#include <net/netfilter/nf_nat_protocol.h>
  28#include <net/netfilter/nf_nat_core.h>
  29#include <net/netfilter/nf_nat_helper.h>
  30
  31#define DUMP_OFFSET(x) \
  32        pr_debug("offset_before=%d, offset_after=%d, correction_pos=%u\n", \
  33                 x->offset_before, x->offset_after, x->correction_pos);
  34
  35static DEFINE_SPINLOCK(nf_nat_seqofs_lock);
  36
  37/* Setup TCP sequence correction given this change at this sequence */
  38static inline void
  39adjust_tcp_sequence(u32 seq,
  40                    int sizediff,
  41                    struct nf_conn *ct,
  42                    enum ip_conntrack_info ctinfo)
  43{
  44        int dir;
  45        struct nf_nat_seq *this_way, *other_way;
  46        struct nf_conn_nat *nat = nfct_nat(ct);
  47
  48        pr_debug("adjust_tcp_sequence: seq = %u, sizediff = %d\n", seq, seq);
  49
  50        dir = CTINFO2DIR(ctinfo);
  51
  52        this_way = &nat->seq[dir];
  53        other_way = &nat->seq[!dir];
  54
  55        pr_debug("nf_nat_resize_packet: Seq_offset before: ");
  56        DUMP_OFFSET(this_way);
  57
  58        spin_lock_bh(&nf_nat_seqofs_lock);
  59
  60        /* SYN adjust. If it's uninitialized, or this is after last
  61         * correction, record it: we don't handle more than one
  62         * adjustment in the window, but do deal with common case of a
  63         * retransmit */
  64        if (this_way->offset_before == this_way->offset_after ||
  65            before(this_way->correction_pos, seq)) {
  66                   this_way->correction_pos = seq;
  67                   this_way->offset_before = this_way->offset_after;
  68                   this_way->offset_after += sizediff;
  69        }
  70        spin_unlock_bh(&nf_nat_seqofs_lock);
  71
  72        pr_debug("nf_nat_resize_packet: Seq_offset after: ");
  73        DUMP_OFFSET(this_way);
  74}
  75
  76/* Get the offset value, for conntrack */
  77s16 nf_nat_get_offset(const struct nf_conn *ct,
  78                      enum ip_conntrack_dir dir,
  79                      u32 seq)
  80{
  81        struct nf_conn_nat *nat = nfct_nat(ct);
  82        struct nf_nat_seq *this_way;
  83        s16 offset;
  84
  85        if (!nat)
  86                return 0;
  87
  88        this_way = &nat->seq[dir];
  89        spin_lock_bh(&nf_nat_seqofs_lock);
  90        offset = after(seq, this_way->correction_pos)
  91                 ? this_way->offset_after : this_way->offset_before;
  92        spin_unlock_bh(&nf_nat_seqofs_lock);
  93
  94        return offset;
  95}
  96EXPORT_SYMBOL_GPL(nf_nat_get_offset);
  97
  98/* Frobs data inside this packet, which is linear. */
  99static void mangle_contents(struct sk_buff *skb,
 100                            unsigned int dataoff,
 101                            unsigned int match_offset,
 102                            unsigned int match_len,
 103                            const char *rep_buffer,
 104                            unsigned int rep_len)
 105{
 106        unsigned char *data;
 107
 108        BUG_ON(skb_is_nonlinear(skb));
 109        data = skb_network_header(skb) + dataoff;
 110
 111        /* move post-replacement */
 112        memmove(data + match_offset + rep_len,
 113                data + match_offset + match_len,
 114                skb->tail - (skb->network_header + dataoff +
 115                             match_offset + match_len));
 116
 117        /* insert data from buffer */
 118        memcpy(data + match_offset, rep_buffer, rep_len);
 119
 120        /* update skb info */
 121        if (rep_len > match_len) {
 122                pr_debug("nf_nat_mangle_packet: Extending packet by "
 123                         "%u from %u bytes\n", rep_len - match_len, skb->len);
 124                skb_put(skb, rep_len - match_len);
 125        } else {
 126                pr_debug("nf_nat_mangle_packet: Shrinking packet from "
 127                         "%u from %u bytes\n", match_len - rep_len, skb->len);
 128                __skb_trim(skb, skb->len + rep_len - match_len);
 129        }
 130
 131        /* fix IP hdr checksum information */
 132        ip_hdr(skb)->tot_len = htons(skb->len);
 133        ip_send_check(ip_hdr(skb));
 134}
 135
 136/* Unusual, but possible case. */
 137static int enlarge_skb(struct sk_buff *skb, unsigned int extra)
 138{
 139        if (skb->len + extra > 65535)
 140                return 0;
 141
 142        if (pskb_expand_head(skb, 0, extra - skb_tailroom(skb), GFP_ATOMIC))
 143                return 0;
 144
 145        return 1;
 146}
 147
 148/* Generic function for mangling variable-length address changes inside
 149 * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
 150 * command in FTP).
 151 *
 152 * Takes care about all the nasty sequence number changes, checksumming,
 153 * skb enlargement, ...
 154 *
 155 * */
 156int
 157nf_nat_mangle_tcp_packet(struct sk_buff *skb,
 158                         struct nf_conn *ct,
 159                         enum ip_conntrack_info ctinfo,
 160                         unsigned int match_offset,
 161                         unsigned int match_len,
 162                         const char *rep_buffer,
 163                         unsigned int rep_len)
 164{
 165        struct rtable *rt = skb_rtable(skb);
 166        struct iphdr *iph;
 167        struct tcphdr *tcph;
 168        int oldlen, datalen;
 169
 170        if (!skb_make_writable(skb, skb->len))
 171                return 0;
 172
 173        if (rep_len > match_len &&
 174            rep_len - match_len > skb_tailroom(skb) &&
 175            !enlarge_skb(skb, rep_len - match_len))
 176                return 0;
 177
 178        SKB_LINEAR_ASSERT(skb);
 179
 180        iph = ip_hdr(skb);
 181        tcph = (void *)iph + iph->ihl*4;
 182
 183        oldlen = skb->len - iph->ihl*4;
 184        mangle_contents(skb, iph->ihl*4 + tcph->doff*4,
 185                        match_offset, match_len, rep_buffer, rep_len);
 186
 187        datalen = skb->len - iph->ihl*4;
 188        if (skb->ip_summed != CHECKSUM_PARTIAL) {
 189                if (!(rt->rt_flags & RTCF_LOCAL) &&
 190                    skb->dev->features & NETIF_F_V4_CSUM) {
 191                        skb->ip_summed = CHECKSUM_PARTIAL;
 192                        skb->csum_start = skb_headroom(skb) +
 193                                          skb_network_offset(skb) +
 194                                          iph->ihl * 4;
 195                        skb->csum_offset = offsetof(struct tcphdr, check);
 196                        tcph->check = ~tcp_v4_check(datalen,
 197                                                    iph->saddr, iph->daddr, 0);
 198                } else {
 199                        tcph->check = 0;
 200                        tcph->check = tcp_v4_check(datalen,
 201                                                   iph->saddr, iph->daddr,
 202                                                   csum_partial(tcph,
 203                                                                datalen, 0));
 204                }
 205        } else
 206                inet_proto_csum_replace2(&tcph->check, skb,
 207                                         htons(oldlen), htons(datalen), 1);
 208
 209        if (rep_len != match_len) {
 210                set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
 211                adjust_tcp_sequence(ntohl(tcph->seq),
 212                                    (int)rep_len - (int)match_len,
 213                                    ct, ctinfo);
 214                nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
 215        }
 216        return 1;
 217}
 218EXPORT_SYMBOL(nf_nat_mangle_tcp_packet);
 219
 220/* Generic function for mangling variable-length address changes inside
 221 * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
 222 * command in the Amanda protocol)
 223 *
 224 * Takes care about all the nasty sequence number changes, checksumming,
 225 * skb enlargement, ...
 226 *
 227 * XXX - This function could be merged with nf_nat_mangle_tcp_packet which
 228 *       should be fairly easy to do.
 229 */
 230int
 231nf_nat_mangle_udp_packet(struct sk_buff *skb,
 232                         struct nf_conn *ct,
 233                         enum ip_conntrack_info ctinfo,
 234                         unsigned int match_offset,
 235                         unsigned int match_len,
 236                         const char *rep_buffer,
 237                         unsigned int rep_len)
 238{
 239        struct rtable *rt = skb_rtable(skb);
 240        struct iphdr *iph;
 241        struct udphdr *udph;
 242        int datalen, oldlen;
 243
 244        /* UDP helpers might accidentally mangle the wrong packet */
 245        iph = ip_hdr(skb);
 246        if (skb->len < iph->ihl*4 + sizeof(*udph) +
 247                               match_offset + match_len)
 248                return 0;
 249
 250        if (!skb_make_writable(skb, skb->len))
 251                return 0;
 252
 253        if (rep_len > match_len &&
 254            rep_len - match_len > skb_tailroom(skb) &&
 255            !enlarge_skb(skb, rep_len - match_len))
 256                return 0;
 257
 258        iph = ip_hdr(skb);
 259        udph = (void *)iph + iph->ihl*4;
 260
 261        oldlen = skb->len - iph->ihl*4;
 262        mangle_contents(skb, iph->ihl*4 + sizeof(*udph),
 263                        match_offset, match_len, rep_buffer, rep_len);
 264
 265        /* update the length of the UDP packet */
 266        datalen = skb->len - iph->ihl*4;
 267        udph->len = htons(datalen);
 268
 269        /* fix udp checksum if udp checksum was previously calculated */
 270        if (!udph->check && skb->ip_summed != CHECKSUM_PARTIAL)
 271                return 1;
 272
 273        if (skb->ip_summed != CHECKSUM_PARTIAL) {
 274                if (!(rt->rt_flags & RTCF_LOCAL) &&
 275                    skb->dev->features & NETIF_F_V4_CSUM) {
 276                        skb->ip_summed = CHECKSUM_PARTIAL;
 277                        skb->csum_start = skb_headroom(skb) +
 278                                          skb_network_offset(skb) +
 279                                          iph->ihl * 4;
 280                        skb->csum_offset = offsetof(struct udphdr, check);
 281                        udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
 282                                                         datalen, IPPROTO_UDP,
 283                                                         0);
 284                } else {
 285                        udph->check = 0;
 286                        udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
 287                                                        datalen, IPPROTO_UDP,
 288                                                        csum_partial(udph,
 289                                                                     datalen, 0));
 290                        if (!udph->check)
 291                                udph->check = CSUM_MANGLED_0;
 292                }
 293        } else
 294                inet_proto_csum_replace2(&udph->check, skb,
 295                                         htons(oldlen), htons(datalen), 1);
 296
 297        return 1;
 298}
 299EXPORT_SYMBOL(nf_nat_mangle_udp_packet);
 300
 301/* Adjust one found SACK option including checksum correction */
 302static void
 303sack_adjust(struct sk_buff *skb,
 304            struct tcphdr *tcph,
 305            unsigned int sackoff,
 306            unsigned int sackend,
 307            struct nf_nat_seq *natseq)
 308{
 309        while (sackoff < sackend) {
 310                struct tcp_sack_block_wire *sack;
 311                __be32 new_start_seq, new_end_seq;
 312
 313                sack = (void *)skb->data + sackoff;
 314                if (after(ntohl(sack->start_seq) - natseq->offset_before,
 315                          natseq->correction_pos))
 316                        new_start_seq = htonl(ntohl(sack->start_seq)
 317                                        - natseq->offset_after);
 318                else
 319                        new_start_seq = htonl(ntohl(sack->start_seq)
 320                                        - natseq->offset_before);
 321
 322                if (after(ntohl(sack->end_seq) - natseq->offset_before,
 323                          natseq->correction_pos))
 324                        new_end_seq = htonl(ntohl(sack->end_seq)
 325                                      - natseq->offset_after);
 326                else
 327                        new_end_seq = htonl(ntohl(sack->end_seq)
 328                                      - natseq->offset_before);
 329
 330                pr_debug("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n",
 331                         ntohl(sack->start_seq), new_start_seq,
 332                         ntohl(sack->end_seq), new_end_seq);
 333
 334                inet_proto_csum_replace4(&tcph->check, skb,
 335                                         sack->start_seq, new_start_seq, 0);
 336                inet_proto_csum_replace4(&tcph->check, skb,
 337                                         sack->end_seq, new_end_seq, 0);
 338                sack->start_seq = new_start_seq;
 339                sack->end_seq = new_end_seq;
 340                sackoff += sizeof(*sack);
 341        }
 342}
 343
 344/* TCP SACK sequence number adjustment */
 345static inline unsigned int
 346nf_nat_sack_adjust(struct sk_buff *skb,
 347                   struct tcphdr *tcph,
 348                   struct nf_conn *ct,
 349                   enum ip_conntrack_info ctinfo)
 350{
 351        unsigned int dir, optoff, optend;
 352        struct nf_conn_nat *nat = nfct_nat(ct);
 353
 354        optoff = ip_hdrlen(skb) + sizeof(struct tcphdr);
 355        optend = ip_hdrlen(skb) + tcph->doff * 4;
 356
 357        if (!skb_make_writable(skb, optend))
 358                return 0;
 359
 360        dir = CTINFO2DIR(ctinfo);
 361
 362        while (optoff < optend) {
 363                /* Usually: option, length. */
 364                unsigned char *op = skb->data + optoff;
 365
 366                switch (op[0]) {
 367                case TCPOPT_EOL:
 368                        return 1;
 369                case TCPOPT_NOP:
 370                        optoff++;
 371                        continue;
 372                default:
 373                        /* no partial options */
 374                        if (optoff + 1 == optend ||
 375                            optoff + op[1] > optend ||
 376                            op[1] < 2)
 377                                return 0;
 378                        if (op[0] == TCPOPT_SACK &&
 379                            op[1] >= 2+TCPOLEN_SACK_PERBLOCK &&
 380                            ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0)
 381                                sack_adjust(skb, tcph, optoff+2,
 382                                            optoff+op[1], &nat->seq[!dir]);
 383                        optoff += op[1];
 384                }
 385        }
 386        return 1;
 387}
 388
 389/* TCP sequence number adjustment.  Returns 1 on success, 0 on failure */
 390int
 391nf_nat_seq_adjust(struct sk_buff *skb,
 392                  struct nf_conn *ct,
 393                  enum ip_conntrack_info ctinfo)
 394{
 395        struct tcphdr *tcph;
 396        int dir;
 397        __be32 newseq, newack;
 398        s16 seqoff, ackoff;
 399        struct nf_conn_nat *nat = nfct_nat(ct);
 400        struct nf_nat_seq *this_way, *other_way;
 401
 402        dir = CTINFO2DIR(ctinfo);
 403
 404        this_way = &nat->seq[dir];
 405        other_way = &nat->seq[!dir];
 406
 407        if (!skb_make_writable(skb, ip_hdrlen(skb) + sizeof(*tcph)))
 408                return 0;
 409
 410        tcph = (void *)skb->data + ip_hdrlen(skb);
 411        if (after(ntohl(tcph->seq), this_way->correction_pos))
 412                seqoff = this_way->offset_after;
 413        else
 414                seqoff = this_way->offset_before;
 415
 416        if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
 417                  other_way->correction_pos))
 418                ackoff = other_way->offset_after;
 419        else
 420                ackoff = other_way->offset_before;
 421
 422        newseq = htonl(ntohl(tcph->seq) + seqoff);
 423        newack = htonl(ntohl(tcph->ack_seq) - ackoff);
 424
 425        inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0);
 426        inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0);
 427
 428        pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n",
 429                 ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
 430                 ntohl(newack));
 431
 432        tcph->seq = newseq;
 433        tcph->ack_seq = newack;
 434
 435        return nf_nat_sack_adjust(skb, tcph, ct, ctinfo);
 436}
 437
 438/* Setup NAT on this expected conntrack so it follows master. */
 439/* If we fail to get a free NAT slot, we'll get dropped on confirm */
 440void nf_nat_follow_master(struct nf_conn *ct,
 441                          struct nf_conntrack_expect *exp)
 442{
 443        struct nf_nat_range range;
 444
 445        /* This must be a fresh one. */
 446        BUG_ON(ct->status & IPS_NAT_DONE_MASK);
 447
 448        /* Change src to where master sends to */
 449        range.flags = IP_NAT_RANGE_MAP_IPS;
 450        range.min_ip = range.max_ip
 451                = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
 452        nf_nat_setup_info(ct, &range, IP_NAT_MANIP_SRC);
 453
 454        /* For DST manip, map port here to where it's expected. */
 455        range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
 456        range.min = range.max = exp->saved_proto;
 457        range.min_ip = range.max_ip
 458                = ct->master->tuplehash[!exp->dir].tuple.src.u3.ip;
 459        nf_nat_setup_info(ct, &range, IP_NAT_MANIP_DST);
 460}
 461EXPORT_SYMBOL(nf_nat_follow_master);
 462