linux/net/netfilter/nf_nat_core.c
<<
>>
Prefs
   1/*
   2 * (C) 1999-2001 Paul `Rusty' Russell
   3 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
   4 * (C) 2011 Patrick McHardy <kaber@trash.net>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10
  11#include <linux/module.h>
  12#include <linux/types.h>
  13#include <linux/timer.h>
  14#include <linux/skbuff.h>
  15#include <linux/gfp.h>
  16#include <net/xfrm.h>
  17#include <linux/jhash.h>
  18#include <linux/rtnetlink.h>
  19
  20#include <net/netfilter/nf_conntrack.h>
  21#include <net/netfilter/nf_conntrack_core.h>
  22#include <net/netfilter/nf_nat.h>
  23#include <net/netfilter/nf_nat_l3proto.h>
  24#include <net/netfilter/nf_nat_l4proto.h>
  25#include <net/netfilter/nf_nat_core.h>
  26#include <net/netfilter/nf_nat_helper.h>
  27#include <net/netfilter/nf_conntrack_helper.h>
  28#include <net/netfilter/nf_conntrack_seqadj.h>
  29#include <net/netfilter/nf_conntrack_l3proto.h>
  30#include <net/netfilter/nf_conntrack_zones.h>
  31#include <linux/netfilter/nf_nat.h>
  32
  33static DEFINE_MUTEX(nf_nat_proto_mutex);
  34static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO]
  35                                                __read_mostly;
  36static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO]
  37                                                __read_mostly;
  38
  39struct nf_nat_conn_key {
  40        const struct net *net;
  41        const struct nf_conntrack_tuple *tuple;
  42        const struct nf_conntrack_zone *zone;
  43};
  44
  45static struct rhltable nf_nat_bysource_table;
  46
  47inline const struct nf_nat_l3proto *
  48__nf_nat_l3proto_find(u8 family)
  49{
  50        return rcu_dereference(nf_nat_l3protos[family]);
  51}
  52
  53inline const struct nf_nat_l4proto *
  54__nf_nat_l4proto_find(u8 family, u8 protonum)
  55{
  56        return rcu_dereference(nf_nat_l4protos[family][protonum]);
  57}
  58EXPORT_SYMBOL_GPL(__nf_nat_l4proto_find);
  59
  60#ifdef CONFIG_XFRM
  61static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl)
  62{
  63        const struct nf_nat_l3proto *l3proto;
  64        const struct nf_conn *ct;
  65        enum ip_conntrack_info ctinfo;
  66        enum ip_conntrack_dir dir;
  67        unsigned  long statusbit;
  68        u8 family;
  69
  70        ct = nf_ct_get(skb, &ctinfo);
  71        if (ct == NULL)
  72                return;
  73
  74        family = nf_ct_l3num(ct);
  75        l3proto = __nf_nat_l3proto_find(family);
  76        if (l3proto == NULL)
  77                return;
  78
  79        dir = CTINFO2DIR(ctinfo);
  80        if (dir == IP_CT_DIR_ORIGINAL)
  81                statusbit = IPS_DST_NAT;
  82        else
  83                statusbit = IPS_SRC_NAT;
  84
  85        l3proto->decode_session(skb, ct, dir, statusbit, fl);
  86}
  87
  88int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
  89{
  90        struct flowi fl;
  91        unsigned int hh_len;
  92        struct dst_entry *dst;
  93        int err;
  94
  95        err = xfrm_decode_session(skb, &fl, family);
  96        if (err < 0)
  97                return err;
  98
  99        dst = skb_dst(skb);
 100        if (dst->xfrm)
 101                dst = ((struct xfrm_dst *)dst)->route;
 102        dst_hold(dst);
 103
 104        dst = xfrm_lookup(net, dst, &fl, skb->sk, 0);
 105        if (IS_ERR(dst))
 106                return PTR_ERR(dst);
 107
 108        skb_dst_drop(skb);
 109        skb_dst_set(skb, dst);
 110
 111        /* Change in oif may mean change in hh_len. */
 112        hh_len = skb_dst(skb)->dev->hard_header_len;
 113        if (skb_headroom(skb) < hh_len &&
 114            pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
 115                return -ENOMEM;
 116        return 0;
 117}
 118EXPORT_SYMBOL(nf_xfrm_me_harder);
 119#endif /* CONFIG_XFRM */
 120
 121static u32 nf_nat_bysource_hash(const void *data, u32 len, u32 seed)
 122{
 123        const struct nf_conntrack_tuple *t;
 124        const struct nf_conn *ct = data;
 125
 126        t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
 127        /* Original src, to ensure we map it consistently if poss. */
 128
 129        seed ^= net_hash_mix(nf_ct_net(ct));
 130        return jhash2((const u32 *)&t->src, sizeof(t->src) / sizeof(u32),
 131                      t->dst.protonum ^ seed);
 132}
 133
 134/* Is this tuple already taken? (not by us) */
 135int
 136nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
 137                  const struct nf_conn *ignored_conntrack)
 138{
 139        /* Conntrack tracking doesn't keep track of outgoing tuples; only
 140         * incoming ones.  NAT means they don't have a fixed mapping,
 141         * so we invert the tuple and look for the incoming reply.
 142         *
 143         * We could keep a separate hash if this proves too slow.
 144         */
 145        struct nf_conntrack_tuple reply;
 146
 147        nf_ct_invert_tuplepr(&reply, tuple);
 148        return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
 149}
 150EXPORT_SYMBOL(nf_nat_used_tuple);
 151
 152/* If we source map this tuple so reply looks like reply_tuple, will
 153 * that meet the constraints of range.
 154 */
 155static int in_range(const struct nf_nat_l3proto *l3proto,
 156                    const struct nf_nat_l4proto *l4proto,
 157                    const struct nf_conntrack_tuple *tuple,
 158                    const struct nf_nat_range *range)
 159{
 160        /* If we are supposed to map IPs, then we must be in the
 161         * range specified, otherwise let this drag us onto a new src IP.
 162         */
 163        if (range->flags & NF_NAT_RANGE_MAP_IPS &&
 164            !l3proto->in_range(tuple, range))
 165                return 0;
 166
 167        if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) ||
 168            l4proto->in_range(tuple, NF_NAT_MANIP_SRC,
 169                              &range->min_proto, &range->max_proto))
 170                return 1;
 171
 172        return 0;
 173}
 174
 175static inline int
 176same_src(const struct nf_conn *ct,
 177         const struct nf_conntrack_tuple *tuple)
 178{
 179        const struct nf_conntrack_tuple *t;
 180
 181        t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
 182        return (t->dst.protonum == tuple->dst.protonum &&
 183                nf_inet_addr_cmp(&t->src.u3, &tuple->src.u3) &&
 184                t->src.u.all == tuple->src.u.all);
 185}
 186
 187static int nf_nat_bysource_cmp(struct rhashtable_compare_arg *arg,
 188                               const void *obj)
 189{
 190        const struct nf_nat_conn_key *key = arg->key;
 191        const struct nf_conn *ct = obj;
 192
 193        if (!same_src(ct, key->tuple) ||
 194            !net_eq(nf_ct_net(ct), key->net) ||
 195            !nf_ct_zone_equal(ct, key->zone, IP_CT_DIR_ORIGINAL))
 196                return 1;
 197
 198        return 0;
 199}
 200
 201static struct rhashtable_params nf_nat_bysource_params = {
 202        .head_offset = offsetof(struct nf_conn, nat_bysource),
 203        .obj_hashfn = nf_nat_bysource_hash,
 204        .obj_cmpfn = nf_nat_bysource_cmp,
 205        .nelem_hint = 256,
 206        .min_size = 1024,
 207};
 208
 209/* Only called for SRC manip */
 210static int
 211find_appropriate_src(struct net *net,
 212                     const struct nf_conntrack_zone *zone,
 213                     const struct nf_nat_l3proto *l3proto,
 214                     const struct nf_nat_l4proto *l4proto,
 215                     const struct nf_conntrack_tuple *tuple,
 216                     struct nf_conntrack_tuple *result,
 217                     const struct nf_nat_range *range)
 218{
 219        const struct nf_conn *ct;
 220        struct nf_nat_conn_key key = {
 221                .net = net,
 222                .tuple = tuple,
 223                .zone = zone
 224        };
 225        struct rhlist_head *hl, *h;
 226
 227        hl = rhltable_lookup(&nf_nat_bysource_table, &key,
 228                             nf_nat_bysource_params);
 229
 230        rhl_for_each_entry_rcu(ct, h, hl, nat_bysource) {
 231                nf_ct_invert_tuplepr(result,
 232                                     &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 233                result->dst = tuple->dst;
 234
 235                if (in_range(l3proto, l4proto, result, range))
 236                        return 1;
 237        }
 238
 239        return 0;
 240}
 241
 242/* For [FUTURE] fragmentation handling, we want the least-used
 243 * src-ip/dst-ip/proto triple.  Fairness doesn't come into it.  Thus
 244 * if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
 245 * 1-65535, we don't do pro-rata allocation based on ports; we choose
 246 * the ip with the lowest src-ip/dst-ip/proto usage.
 247 */
 248static void
 249find_best_ips_proto(const struct nf_conntrack_zone *zone,
 250                    struct nf_conntrack_tuple *tuple,
 251                    const struct nf_nat_range *range,
 252                    const struct nf_conn *ct,
 253                    enum nf_nat_manip_type maniptype)
 254{
 255        union nf_inet_addr *var_ipp;
 256        unsigned int i, max;
 257        /* Host order */
 258        u32 minip, maxip, j, dist;
 259        bool full_range;
 260
 261        /* No IP mapping?  Do nothing. */
 262        if (!(range->flags & NF_NAT_RANGE_MAP_IPS))
 263                return;
 264
 265        if (maniptype == NF_NAT_MANIP_SRC)
 266                var_ipp = &tuple->src.u3;
 267        else
 268                var_ipp = &tuple->dst.u3;
 269
 270        /* Fast path: only one choice. */
 271        if (nf_inet_addr_cmp(&range->min_addr, &range->max_addr)) {
 272                *var_ipp = range->min_addr;
 273                return;
 274        }
 275
 276        if (nf_ct_l3num(ct) == NFPROTO_IPV4)
 277                max = sizeof(var_ipp->ip) / sizeof(u32) - 1;
 278        else
 279                max = sizeof(var_ipp->ip6) / sizeof(u32) - 1;
 280
 281        /* Hashing source and destination IPs gives a fairly even
 282         * spread in practice (if there are a small number of IPs
 283         * involved, there usually aren't that many connections
 284         * anyway).  The consistency means that servers see the same
 285         * client coming from the same IP (some Internet Banking sites
 286         * like this), even across reboots.
 287         */
 288        j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32),
 289                   range->flags & NF_NAT_RANGE_PERSISTENT ?
 290                        0 : (__force u32)tuple->dst.u3.all[max] ^ zone->id);
 291
 292        full_range = false;
 293        for (i = 0; i <= max; i++) {
 294                /* If first bytes of the address are at the maximum, use the
 295                 * distance. Otherwise use the full range.
 296                 */
 297                if (!full_range) {
 298                        minip = ntohl((__force __be32)range->min_addr.all[i]);
 299                        maxip = ntohl((__force __be32)range->max_addr.all[i]);
 300                        dist  = maxip - minip + 1;
 301                } else {
 302                        minip = 0;
 303                        dist  = ~0;
 304                }
 305
 306                var_ipp->all[i] = (__force __u32)
 307                        htonl(minip + reciprocal_scale(j, dist));
 308                if (var_ipp->all[i] != range->max_addr.all[i])
 309                        full_range = true;
 310
 311                if (!(range->flags & NF_NAT_RANGE_PERSISTENT))
 312                        j ^= (__force u32)tuple->dst.u3.all[i];
 313        }
 314}
 315
 316/* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING,
 317 * we change the source to map into the range. For NF_INET_PRE_ROUTING
 318 * and NF_INET_LOCAL_OUT, we change the destination to map into the
 319 * range. It might not be possible to get a unique tuple, but we try.
 320 * At worst (or if we race), we will end up with a final duplicate in
 321 * __ip_conntrack_confirm and drop the packet. */
 322static void
 323get_unique_tuple(struct nf_conntrack_tuple *tuple,
 324                 const struct nf_conntrack_tuple *orig_tuple,
 325                 const struct nf_nat_range *range,
 326                 struct nf_conn *ct,
 327                 enum nf_nat_manip_type maniptype)
 328{
 329        const struct nf_conntrack_zone *zone;
 330        const struct nf_nat_l3proto *l3proto;
 331        const struct nf_nat_l4proto *l4proto;
 332        struct net *net = nf_ct_net(ct);
 333
 334        zone = nf_ct_zone(ct);
 335
 336        rcu_read_lock();
 337        l3proto = __nf_nat_l3proto_find(orig_tuple->src.l3num);
 338        l4proto = __nf_nat_l4proto_find(orig_tuple->src.l3num,
 339                                        orig_tuple->dst.protonum);
 340
 341        /* 1) If this srcip/proto/src-proto-part is currently mapped,
 342         * and that same mapping gives a unique tuple within the given
 343         * range, use that.
 344         *
 345         * This is only required for source (ie. NAT/masq) mappings.
 346         * So far, we don't do local source mappings, so multiple
 347         * manips not an issue.
 348         */
 349        if (maniptype == NF_NAT_MANIP_SRC &&
 350            !(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
 351                /* try the original tuple first */
 352                if (in_range(l3proto, l4proto, orig_tuple, range)) {
 353                        if (!nf_nat_used_tuple(orig_tuple, ct)) {
 354                                *tuple = *orig_tuple;
 355                                goto out;
 356                        }
 357                } else if (find_appropriate_src(net, zone, l3proto, l4proto,
 358                                                orig_tuple, tuple, range)) {
 359                        pr_debug("get_unique_tuple: Found current src map\n");
 360                        if (!nf_nat_used_tuple(tuple, ct))
 361                                goto out;
 362                }
 363        }
 364
 365        /* 2) Select the least-used IP/proto combination in the given range */
 366        *tuple = *orig_tuple;
 367        find_best_ips_proto(zone, tuple, range, ct, maniptype);
 368
 369        /* 3) The per-protocol part of the manip is made to map into
 370         * the range to make a unique tuple.
 371         */
 372
 373        /* Only bother mapping if it's not already in range and unique */
 374        if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
 375                if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
 376                        if (l4proto->in_range(tuple, maniptype,
 377                                              &range->min_proto,
 378                                              &range->max_proto) &&
 379                            (range->min_proto.all == range->max_proto.all ||
 380                             !nf_nat_used_tuple(tuple, ct)))
 381                                goto out;
 382                } else if (!nf_nat_used_tuple(tuple, ct)) {
 383                        goto out;
 384                }
 385        }
 386
 387        /* Last change: get protocol to try to obtain unique tuple. */
 388        l4proto->unique_tuple(l3proto, tuple, range, maniptype, ct);
 389out:
 390        rcu_read_unlock();
 391}
 392
 393struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct)
 394{
 395        struct nf_conn_nat *nat = nfct_nat(ct);
 396        if (nat)
 397                return nat;
 398
 399        if (!nf_ct_is_confirmed(ct))
 400                nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
 401
 402        return nat;
 403}
 404EXPORT_SYMBOL_GPL(nf_ct_nat_ext_add);
 405
 406unsigned int
 407nf_nat_setup_info(struct nf_conn *ct,
 408                  const struct nf_nat_range *range,
 409                  enum nf_nat_manip_type maniptype)
 410{
 411        struct nf_conntrack_tuple curr_tuple, new_tuple;
 412
 413        /* Can't setup nat info for confirmed ct. */
 414        if (nf_ct_is_confirmed(ct))
 415                return NF_ACCEPT;
 416
 417        NF_CT_ASSERT(maniptype == NF_NAT_MANIP_SRC ||
 418                     maniptype == NF_NAT_MANIP_DST);
 419        BUG_ON(nf_nat_initialized(ct, maniptype));
 420
 421        /* What we've got will look like inverse of reply. Normally
 422         * this is what is in the conntrack, except for prior
 423         * manipulations (future optimization: if num_manips == 0,
 424         * orig_tp = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
 425         */
 426        nf_ct_invert_tuplepr(&curr_tuple,
 427                             &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 428
 429        get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype);
 430
 431        if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) {
 432                struct nf_conntrack_tuple reply;
 433
 434                /* Alter conntrack table so will recognize replies. */
 435                nf_ct_invert_tuplepr(&reply, &new_tuple);
 436                nf_conntrack_alter_reply(ct, &reply);
 437
 438                /* Non-atomic: we own this at the moment. */
 439                if (maniptype == NF_NAT_MANIP_SRC)
 440                        ct->status |= IPS_SRC_NAT;
 441                else
 442                        ct->status |= IPS_DST_NAT;
 443
 444                if (nfct_help(ct) && !nfct_seqadj(ct))
 445                        if (!nfct_seqadj_ext_add(ct))
 446                                return NF_DROP;
 447        }
 448
 449        if (maniptype == NF_NAT_MANIP_SRC) {
 450                struct nf_nat_conn_key key = {
 451                        .net = nf_ct_net(ct),
 452                        .tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
 453                        .zone = nf_ct_zone(ct),
 454                };
 455                int err;
 456
 457                err = rhltable_insert_key(&nf_nat_bysource_table,
 458                                          &key,
 459                                          &ct->nat_bysource,
 460                                          nf_nat_bysource_params);
 461                if (err)
 462                        return NF_DROP;
 463        }
 464
 465        /* It's done. */
 466        if (maniptype == NF_NAT_MANIP_DST)
 467                ct->status |= IPS_DST_NAT_DONE;
 468        else
 469                ct->status |= IPS_SRC_NAT_DONE;
 470
 471        return NF_ACCEPT;
 472}
 473EXPORT_SYMBOL(nf_nat_setup_info);
 474
 475static unsigned int
 476__nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip)
 477{
 478        /* Force range to this IP; let proto decide mapping for
 479         * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
 480         * Use reply in case it's already been mangled (eg local packet).
 481         */
 482        union nf_inet_addr ip =
 483                (manip == NF_NAT_MANIP_SRC ?
 484                ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 :
 485                ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3);
 486        struct nf_nat_range range = {
 487                .flags          = NF_NAT_RANGE_MAP_IPS,
 488                .min_addr       = ip,
 489                .max_addr       = ip,
 490        };
 491        return nf_nat_setup_info(ct, &range, manip);
 492}
 493
 494unsigned int
 495nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
 496{
 497        return __nf_nat_alloc_null_binding(ct, HOOK2MANIP(hooknum));
 498}
 499EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding);
 500
 501/* Do packet manipulations according to nf_nat_setup_info. */
 502unsigned int nf_nat_packet(struct nf_conn *ct,
 503                           enum ip_conntrack_info ctinfo,
 504                           unsigned int hooknum,
 505                           struct sk_buff *skb)
 506{
 507        const struct nf_nat_l3proto *l3proto;
 508        const struct nf_nat_l4proto *l4proto;
 509        enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
 510        unsigned long statusbit;
 511        enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
 512
 513        if (mtype == NF_NAT_MANIP_SRC)
 514                statusbit = IPS_SRC_NAT;
 515        else
 516                statusbit = IPS_DST_NAT;
 517
 518        /* Invert if this is reply dir. */
 519        if (dir == IP_CT_DIR_REPLY)
 520                statusbit ^= IPS_NAT_MASK;
 521
 522        /* Non-atomic: these bits don't change. */
 523        if (ct->status & statusbit) {
 524                struct nf_conntrack_tuple target;
 525
 526                /* We are aiming to look like inverse of other direction. */
 527                nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
 528
 529                l3proto = __nf_nat_l3proto_find(target.src.l3num);
 530                l4proto = __nf_nat_l4proto_find(target.src.l3num,
 531                                                target.dst.protonum);
 532                if (!l3proto->manip_pkt(skb, 0, l4proto, &target, mtype))
 533                        return NF_DROP;
 534        }
 535        return NF_ACCEPT;
 536}
 537EXPORT_SYMBOL_GPL(nf_nat_packet);
 538
 539struct nf_nat_proto_clean {
 540        u8      l3proto;
 541        u8      l4proto;
 542};
 543
 544/* kill conntracks with affected NAT section */
 545static int nf_nat_proto_remove(struct nf_conn *i, void *data)
 546{
 547        const struct nf_nat_proto_clean *clean = data;
 548
 549        if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) ||
 550            (clean->l4proto && nf_ct_protonum(i) != clean->l4proto))
 551                return 0;
 552
 553        return i->status & IPS_NAT_MASK ? 1 : 0;
 554}
 555
 556static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
 557{
 558        if (nf_nat_proto_remove(ct, data))
 559                return 1;
 560
 561        if ((ct->status & IPS_SRC_NAT_DONE) == 0)
 562                return 0;
 563
 564        /* This netns is being destroyed, and conntrack has nat null binding.
 565         * Remove it from bysource hash, as the table will be freed soon.
 566         *
 567         * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
 568         * will delete entry from already-freed table.
 569         */
 570        clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
 571        rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource,
 572                        nf_nat_bysource_params);
 573
 574        /* don't delete conntrack.  Although that would make things a lot
 575         * simpler, we'd end up flushing all conntracks on nat rmmod.
 576         */
 577        return 0;
 578}
 579
 580static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
 581{
 582        struct nf_nat_proto_clean clean = {
 583                .l3proto = l3proto,
 584                .l4proto = l4proto,
 585        };
 586
 587        nf_ct_iterate_destroy(nf_nat_proto_remove, &clean);
 588}
 589
 590static void nf_nat_l3proto_clean(u8 l3proto)
 591{
 592        struct nf_nat_proto_clean clean = {
 593                .l3proto = l3proto,
 594        };
 595
 596        nf_ct_iterate_destroy(nf_nat_proto_remove, &clean);
 597}
 598
 599/* Protocol registration. */
 600int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto)
 601{
 602        const struct nf_nat_l4proto **l4protos;
 603        unsigned int i;
 604        int ret = 0;
 605
 606        mutex_lock(&nf_nat_proto_mutex);
 607        if (nf_nat_l4protos[l3proto] == NULL) {
 608                l4protos = kmalloc(IPPROTO_MAX * sizeof(struct nf_nat_l4proto *),
 609                                   GFP_KERNEL);
 610                if (l4protos == NULL) {
 611                        ret = -ENOMEM;
 612                        goto out;
 613                }
 614
 615                for (i = 0; i < IPPROTO_MAX; i++)
 616                        RCU_INIT_POINTER(l4protos[i], &nf_nat_l4proto_unknown);
 617
 618                /* Before making proto_array visible to lockless readers,
 619                 * we must make sure its content is committed to memory.
 620                 */
 621                smp_wmb();
 622
 623                nf_nat_l4protos[l3proto] = l4protos;
 624        }
 625
 626        if (rcu_dereference_protected(
 627                        nf_nat_l4protos[l3proto][l4proto->l4proto],
 628                        lockdep_is_held(&nf_nat_proto_mutex)
 629                        ) != &nf_nat_l4proto_unknown) {
 630                ret = -EBUSY;
 631                goto out;
 632        }
 633        RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto], l4proto);
 634 out:
 635        mutex_unlock(&nf_nat_proto_mutex);
 636        return ret;
 637}
 638EXPORT_SYMBOL_GPL(nf_nat_l4proto_register);
 639
 640/* No one stores the protocol anywhere; simply delete it. */
 641void nf_nat_l4proto_unregister(u8 l3proto, const struct nf_nat_l4proto *l4proto)
 642{
 643        mutex_lock(&nf_nat_proto_mutex);
 644        RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto],
 645                         &nf_nat_l4proto_unknown);
 646        mutex_unlock(&nf_nat_proto_mutex);
 647        synchronize_rcu();
 648
 649        nf_nat_l4proto_clean(l3proto, l4proto->l4proto);
 650}
 651EXPORT_SYMBOL_GPL(nf_nat_l4proto_unregister);
 652
 653int nf_nat_l3proto_register(const struct nf_nat_l3proto *l3proto)
 654{
 655        int err;
 656
 657        err = nf_ct_l3proto_try_module_get(l3proto->l3proto);
 658        if (err < 0)
 659                return err;
 660
 661        mutex_lock(&nf_nat_proto_mutex);
 662        RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_TCP],
 663                         &nf_nat_l4proto_tcp);
 664        RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_UDP],
 665                         &nf_nat_l4proto_udp);
 666#ifdef CONFIG_NF_NAT_PROTO_DCCP
 667        RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_DCCP],
 668                         &nf_nat_l4proto_dccp);
 669#endif
 670#ifdef CONFIG_NF_NAT_PROTO_SCTP
 671        RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_SCTP],
 672                         &nf_nat_l4proto_sctp);
 673#endif
 674#ifdef CONFIG_NF_NAT_PROTO_UDPLITE
 675        RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_UDPLITE],
 676                         &nf_nat_l4proto_udplite);
 677#endif
 678        mutex_unlock(&nf_nat_proto_mutex);
 679
 680        RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], l3proto);
 681        return 0;
 682}
 683EXPORT_SYMBOL_GPL(nf_nat_l3proto_register);
 684
 685void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *l3proto)
 686{
 687        mutex_lock(&nf_nat_proto_mutex);
 688        RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], NULL);
 689        mutex_unlock(&nf_nat_proto_mutex);
 690        synchronize_rcu();
 691
 692        nf_nat_l3proto_clean(l3proto->l3proto);
 693        nf_ct_l3proto_module_put(l3proto->l3proto);
 694}
 695EXPORT_SYMBOL_GPL(nf_nat_l3proto_unregister);
 696
 697/* No one using conntrack by the time this called. */
 698static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
 699{
 700        if (ct->status & IPS_SRC_NAT_DONE)
 701                rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource,
 702                                nf_nat_bysource_params);
 703}
 704
 705static struct nf_ct_ext_type nat_extend __read_mostly = {
 706        .len            = sizeof(struct nf_conn_nat),
 707        .align          = __alignof__(struct nf_conn_nat),
 708        .destroy        = nf_nat_cleanup_conntrack,
 709        .id             = NF_CT_EXT_NAT,
 710};
 711
 712#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 713
 714#include <linux/netfilter/nfnetlink.h>
 715#include <linux/netfilter/nfnetlink_conntrack.h>
 716
 717static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
 718        [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 },
 719        [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 },
 720};
 721
 722static int nfnetlink_parse_nat_proto(struct nlattr *attr,
 723                                     const struct nf_conn *ct,
 724                                     struct nf_nat_range *range)
 725{
 726        struct nlattr *tb[CTA_PROTONAT_MAX+1];
 727        const struct nf_nat_l4proto *l4proto;
 728        int err;
 729
 730        err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr,
 731                               protonat_nla_policy, NULL);
 732        if (err < 0)
 733                return err;
 734
 735        l4proto = __nf_nat_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
 736        if (l4proto->nlattr_to_range)
 737                err = l4proto->nlattr_to_range(tb, range);
 738
 739        return err;
 740}
 741
 742static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
 743        [CTA_NAT_V4_MINIP]      = { .type = NLA_U32 },
 744        [CTA_NAT_V4_MAXIP]      = { .type = NLA_U32 },
 745        [CTA_NAT_V6_MINIP]      = { .len = sizeof(struct in6_addr) },
 746        [CTA_NAT_V6_MAXIP]      = { .len = sizeof(struct in6_addr) },
 747        [CTA_NAT_PROTO]         = { .type = NLA_NESTED },
 748};
 749
 750static int
 751nfnetlink_parse_nat(const struct nlattr *nat,
 752                    const struct nf_conn *ct, struct nf_nat_range *range,
 753                    const struct nf_nat_l3proto *l3proto)
 754{
 755        struct nlattr *tb[CTA_NAT_MAX+1];
 756        int err;
 757
 758        memset(range, 0, sizeof(*range));
 759
 760        err = nla_parse_nested(tb, CTA_NAT_MAX, nat, nat_nla_policy, NULL);
 761        if (err < 0)
 762                return err;
 763
 764        err = l3proto->nlattr_to_range(tb, range);
 765        if (err < 0)
 766                return err;
 767
 768        if (!tb[CTA_NAT_PROTO])
 769                return 0;
 770
 771        return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
 772}
 773
 774/* This function is called under rcu_read_lock() */
 775static int
 776nfnetlink_parse_nat_setup(struct nf_conn *ct,
 777                          enum nf_nat_manip_type manip,
 778                          const struct nlattr *attr)
 779{
 780        struct nf_nat_range range;
 781        const struct nf_nat_l3proto *l3proto;
 782        int err;
 783
 784        /* Should not happen, restricted to creating new conntracks
 785         * via ctnetlink.
 786         */
 787        if (WARN_ON_ONCE(nf_nat_initialized(ct, manip)))
 788                return -EEXIST;
 789
 790        /* Make sure that L3 NAT is there by when we call nf_nat_setup_info to
 791         * attach the null binding, otherwise this may oops.
 792         */
 793        l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct));
 794        if (l3proto == NULL)
 795                return -EAGAIN;
 796
 797        /* No NAT information has been passed, allocate the null-binding */
 798        if (attr == NULL)
 799                return __nf_nat_alloc_null_binding(ct, manip) == NF_DROP ? -ENOMEM : 0;
 800
 801        err = nfnetlink_parse_nat(attr, ct, &range, l3proto);
 802        if (err < 0)
 803                return err;
 804
 805        return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0;
 806}
 807#else
 808static int
 809nfnetlink_parse_nat_setup(struct nf_conn *ct,
 810                          enum nf_nat_manip_type manip,
 811                          const struct nlattr *attr)
 812{
 813        return -EOPNOTSUPP;
 814}
 815#endif
 816
 817static struct nf_ct_helper_expectfn follow_master_nat = {
 818        .name           = "nat-follow-master",
 819        .expectfn       = nf_nat_follow_master,
 820};
 821
 822static int __init nf_nat_init(void)
 823{
 824        int ret;
 825
 826        ret = rhltable_init(&nf_nat_bysource_table, &nf_nat_bysource_params);
 827        if (ret)
 828                return ret;
 829
 830        ret = nf_ct_extend_register(&nat_extend);
 831        if (ret < 0) {
 832                rhltable_destroy(&nf_nat_bysource_table);
 833                printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
 834                return ret;
 835        }
 836
 837        nf_ct_helper_expectfn_register(&follow_master_nat);
 838
 839        BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
 840        RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook,
 841                           nfnetlink_parse_nat_setup);
 842#ifdef CONFIG_XFRM
 843        BUG_ON(nf_nat_decode_session_hook != NULL);
 844        RCU_INIT_POINTER(nf_nat_decode_session_hook, __nf_nat_decode_session);
 845#endif
 846        return 0;
 847}
 848
 849static void __exit nf_nat_cleanup(void)
 850{
 851        struct nf_nat_proto_clean clean = {};
 852        unsigned int i;
 853
 854        nf_ct_iterate_destroy(nf_nat_proto_clean, &clean);
 855
 856        nf_ct_extend_unregister(&nat_extend);
 857        nf_ct_helper_expectfn_unregister(&follow_master_nat);
 858        RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL);
 859#ifdef CONFIG_XFRM
 860        RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL);
 861#endif
 862        synchronize_rcu();
 863
 864        for (i = 0; i < NFPROTO_NUMPROTO; i++)
 865                kfree(nf_nat_l4protos[i]);
 866
 867        rhltable_destroy(&nf_nat_bysource_table);
 868}
 869
 870MODULE_LICENSE("GPL");
 871
 872module_init(nf_nat_init);
 873module_exit(nf_nat_cleanup);
 874