linux/net/netfilter/nf_conntrack_core.c
<<
>>
Prefs
   1/* Connection state tracking for netfilter.  This is separated from,
   2   but required by, the NAT layer; it can also be used by an iptables
   3   extension. */
   4
   5/* (C) 1999-2001 Paul `Rusty' Russell
   6 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
   7 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 */
  13
  14#include <linux/types.h>
  15#include <linux/netfilter.h>
  16#include <linux/module.h>
  17#include <linux/sched.h>
  18#include <linux/skbuff.h>
  19#include <linux/proc_fs.h>
  20#include <linux/vmalloc.h>
  21#include <linux/stddef.h>
  22#include <linux/slab.h>
  23#include <linux/random.h>
  24#include <linux/jhash.h>
  25#include <linux/err.h>
  26#include <linux/percpu.h>
  27#include <linux/moduleparam.h>
  28#include <linux/notifier.h>
  29#include <linux/kernel.h>
  30#include <linux/netdevice.h>
  31#include <linux/socket.h>
  32#include <linux/mm.h>
  33#include <linux/rculist_nulls.h>
  34
  35#include <net/netfilter/nf_conntrack.h>
  36#include <net/netfilter/nf_conntrack_l3proto.h>
  37#include <net/netfilter/nf_conntrack_l4proto.h>
  38#include <net/netfilter/nf_conntrack_expect.h>
  39#include <net/netfilter/nf_conntrack_helper.h>
  40#include <net/netfilter/nf_conntrack_core.h>
  41#include <net/netfilter/nf_conntrack_extend.h>
  42#include <net/netfilter/nf_conntrack_acct.h>
  43#include <net/netfilter/nf_conntrack_ecache.h>
  44#include <net/netfilter/nf_nat.h>
  45#include <net/netfilter/nf_nat_core.h>
  46
  47#define NF_CONNTRACK_VERSION    "0.5.0"
  48
  49int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
  50                                      enum nf_nat_manip_type manip,
  51                                      const struct nlattr *attr) __read_mostly;
  52EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook);
  53
  54DEFINE_SPINLOCK(nf_conntrack_lock);
  55EXPORT_SYMBOL_GPL(nf_conntrack_lock);
  56
  57unsigned int nf_conntrack_htable_size __read_mostly;
  58EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
  59
  60unsigned int nf_conntrack_max __read_mostly;
  61EXPORT_SYMBOL_GPL(nf_conntrack_max);
  62
  63struct nf_conn nf_conntrack_untracked __read_mostly;
  64EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
  65
  66static struct kmem_cache *nf_conntrack_cachep __read_mostly;
  67
  68static int nf_conntrack_hash_rnd_initted;
  69static unsigned int nf_conntrack_hash_rnd;
  70
  71static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
  72                                  unsigned int size, unsigned int rnd)
  73{
  74        unsigned int n;
  75        u_int32_t h;
  76
  77        /* The direction must be ignored, so we hash everything up to the
  78         * destination ports (which is a multiple of 4) and treat the last
  79         * three bytes manually.
  80         */
  81        n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
  82        h = jhash2((u32 *)tuple, n,
  83                   rnd ^ (((__force __u16)tuple->dst.u.all << 16) |
  84                          tuple->dst.protonum));
  85
  86        return ((u64)h * size) >> 32;
  87}
  88
  89static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
  90{
  91        return __hash_conntrack(tuple, nf_conntrack_htable_size,
  92                                nf_conntrack_hash_rnd);
  93}
  94
  95bool
  96nf_ct_get_tuple(const struct sk_buff *skb,
  97                unsigned int nhoff,
  98                unsigned int dataoff,
  99                u_int16_t l3num,
 100                u_int8_t protonum,
 101                struct nf_conntrack_tuple *tuple,
 102                const struct nf_conntrack_l3proto *l3proto,
 103                const struct nf_conntrack_l4proto *l4proto)
 104{
 105        memset(tuple, 0, sizeof(*tuple));
 106
 107        tuple->src.l3num = l3num;
 108        if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
 109                return false;
 110
 111        tuple->dst.protonum = protonum;
 112        tuple->dst.dir = IP_CT_DIR_ORIGINAL;
 113
 114        return l4proto->pkt_to_tuple(skb, dataoff, tuple);
 115}
 116EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
 117
 118bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
 119                       u_int16_t l3num, struct nf_conntrack_tuple *tuple)
 120{
 121        struct nf_conntrack_l3proto *l3proto;
 122        struct nf_conntrack_l4proto *l4proto;
 123        unsigned int protoff;
 124        u_int8_t protonum;
 125        int ret;
 126
 127        rcu_read_lock();
 128
 129        l3proto = __nf_ct_l3proto_find(l3num);
 130        ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
 131        if (ret != NF_ACCEPT) {
 132                rcu_read_unlock();
 133                return false;
 134        }
 135
 136        l4proto = __nf_ct_l4proto_find(l3num, protonum);
 137
 138        ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple,
 139                              l3proto, l4proto);
 140
 141        rcu_read_unlock();
 142        return ret;
 143}
 144EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
 145
 146bool
 147nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
 148                   const struct nf_conntrack_tuple *orig,
 149                   const struct nf_conntrack_l3proto *l3proto,
 150                   const struct nf_conntrack_l4proto *l4proto)
 151{
 152        memset(inverse, 0, sizeof(*inverse));
 153
 154        inverse->src.l3num = orig->src.l3num;
 155        if (l3proto->invert_tuple(inverse, orig) == 0)
 156                return false;
 157
 158        inverse->dst.dir = !orig->dst.dir;
 159
 160        inverse->dst.protonum = orig->dst.protonum;
 161        return l4proto->invert_tuple(inverse, orig);
 162}
 163EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
 164
 165static void
 166clean_from_lists(struct nf_conn *ct)
 167{
 168        pr_debug("clean_from_lists(%p)\n", ct);
 169        hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
 170        hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
 171
 172        /* Destroy all pending expectations */
 173        nf_ct_remove_expectations(ct);
 174}
 175
 176static void
 177destroy_conntrack(struct nf_conntrack *nfct)
 178{
 179        struct nf_conn *ct = (struct nf_conn *)nfct;
 180        struct net *net = nf_ct_net(ct);
 181        struct nf_conntrack_l4proto *l4proto;
 182
 183        pr_debug("destroy_conntrack(%p)\n", ct);
 184        NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
 185        NF_CT_ASSERT(!timer_pending(&ct->timeout));
 186
 187        /* To make sure we don't get any weird locking issues here:
 188         * destroy_conntrack() MUST NOT be called with a write lock
 189         * to nf_conntrack_lock!!! -HW */
 190        rcu_read_lock();
 191        l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
 192        if (l4proto && l4proto->destroy)
 193                l4proto->destroy(ct);
 194
 195        rcu_read_unlock();
 196
 197        spin_lock_bh(&nf_conntrack_lock);
 198        /* Expectations will have been removed in clean_from_lists,
 199         * except TFTP can create an expectation on the first packet,
 200         * before connection is in the list, so we need to clean here,
 201         * too. */
 202        nf_ct_remove_expectations(ct);
 203
 204        /* We overload first tuple to link into unconfirmed list. */
 205        if (!nf_ct_is_confirmed(ct)) {
 206                BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
 207                hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
 208        }
 209
 210        NF_CT_STAT_INC(net, delete);
 211        spin_unlock_bh(&nf_conntrack_lock);
 212
 213        if (ct->master)
 214                nf_ct_put(ct->master);
 215
 216        pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
 217        nf_conntrack_free(ct);
 218}
 219
 220void nf_ct_delete_from_lists(struct nf_conn *ct)
 221{
 222        struct net *net = nf_ct_net(ct);
 223
 224        nf_ct_helper_destroy(ct);
 225        spin_lock_bh(&nf_conntrack_lock);
 226        /* Inside lock so preempt is disabled on module removal path.
 227         * Otherwise we can get spurious warnings. */
 228        NF_CT_STAT_INC(net, delete_list);
 229        clean_from_lists(ct);
 230        spin_unlock_bh(&nf_conntrack_lock);
 231}
 232EXPORT_SYMBOL_GPL(nf_ct_delete_from_lists);
 233
 234static void death_by_event(unsigned long ul_conntrack)
 235{
 236        struct nf_conn *ct = (void *)ul_conntrack;
 237        struct net *net = nf_ct_net(ct);
 238
 239        if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
 240                /* bad luck, let's retry again */
 241                ct->timeout.expires = jiffies +
 242                        (random32() % net->ct.sysctl_events_retry_timeout);
 243                add_timer(&ct->timeout);
 244                return;
 245        }
 246        /* we've got the event delivered, now it's dying */
 247        set_bit(IPS_DYING_BIT, &ct->status);
 248        spin_lock(&nf_conntrack_lock);
 249        hlist_nulls_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
 250        spin_unlock(&nf_conntrack_lock);
 251        nf_ct_put(ct);
 252}
 253
 254void nf_ct_insert_dying_list(struct nf_conn *ct)
 255{
 256        struct net *net = nf_ct_net(ct);
 257
 258        /* add this conntrack to the dying list */
 259        spin_lock_bh(&nf_conntrack_lock);
 260        hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
 261                             &net->ct.dying);
 262        spin_unlock_bh(&nf_conntrack_lock);
 263        /* set a new timer to retry event delivery */
 264        setup_timer(&ct->timeout, death_by_event, (unsigned long)ct);
 265        ct->timeout.expires = jiffies +
 266                (random32() % net->ct.sysctl_events_retry_timeout);
 267        add_timer(&ct->timeout);
 268}
 269EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
 270
 271static void death_by_timeout(unsigned long ul_conntrack)
 272{
 273        struct nf_conn *ct = (void *)ul_conntrack;
 274
 275        if (!test_bit(IPS_DYING_BIT, &ct->status) &&
 276            unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) {
 277                /* destroy event was not delivered */
 278                nf_ct_delete_from_lists(ct);
 279                nf_ct_insert_dying_list(ct);
 280                return;
 281        }
 282        set_bit(IPS_DYING_BIT, &ct->status);
 283        nf_ct_delete_from_lists(ct);
 284        nf_ct_put(ct);
 285}
 286
 287/*
 288 * Warning :
 289 * - Caller must take a reference on returned object
 290 *   and recheck nf_ct_tuple_equal(tuple, &h->tuple)
 291 * OR
 292 * - Caller must lock nf_conntrack_lock before calling this function
 293 */
 294struct nf_conntrack_tuple_hash *
 295__nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
 296{
 297        struct nf_conntrack_tuple_hash *h;
 298        struct hlist_nulls_node *n;
 299        unsigned int hash = hash_conntrack(tuple);
 300
 301        /* Disable BHs the entire time since we normally need to disable them
 302         * at least once for the stats anyway.
 303         */
 304        local_bh_disable();
 305begin:
 306        hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
 307                if (nf_ct_tuple_equal(tuple, &h->tuple)) {
 308                        NF_CT_STAT_INC(net, found);
 309                        local_bh_enable();
 310                        return h;
 311                }
 312                NF_CT_STAT_INC(net, searched);
 313        }
 314        /*
 315         * if the nulls value we got at the end of this lookup is
 316         * not the expected one, we must restart lookup.
 317         * We probably met an item that was moved to another chain.
 318         */
 319        if (get_nulls_value(n) != hash)
 320                goto begin;
 321        local_bh_enable();
 322
 323        return NULL;
 324}
 325EXPORT_SYMBOL_GPL(__nf_conntrack_find);
 326
 327/* Find a connection corresponding to a tuple. */
 328struct nf_conntrack_tuple_hash *
 329nf_conntrack_find_get(struct net *net, const struct nf_conntrack_tuple *tuple)
 330{
 331        struct nf_conntrack_tuple_hash *h;
 332        struct nf_conn *ct;
 333
 334        rcu_read_lock();
 335begin:
 336        h = __nf_conntrack_find(net, tuple);
 337        if (h) {
 338                ct = nf_ct_tuplehash_to_ctrack(h);
 339                if (unlikely(nf_ct_is_dying(ct) ||
 340                             !atomic_inc_not_zero(&ct->ct_general.use)))
 341                        h = NULL;
 342                else {
 343                        if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple))) {
 344                                nf_ct_put(ct);
 345                                goto begin;
 346                        }
 347                }
 348        }
 349        rcu_read_unlock();
 350
 351        return h;
 352}
 353EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
 354
 355static void __nf_conntrack_hash_insert(struct nf_conn *ct,
 356                                       unsigned int hash,
 357                                       unsigned int repl_hash)
 358{
 359        struct net *net = nf_ct_net(ct);
 360
 361        hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
 362                           &net->ct.hash[hash]);
 363        hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
 364                           &net->ct.hash[repl_hash]);
 365}
 366
 367void nf_conntrack_hash_insert(struct nf_conn *ct)
 368{
 369        unsigned int hash, repl_hash;
 370
 371        hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
 372        repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 373
 374        __nf_conntrack_hash_insert(ct, hash, repl_hash);
 375}
 376EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert);
 377
 378/* Confirm a connection given skb; places it in hash table */
 379int
 380__nf_conntrack_confirm(struct sk_buff *skb)
 381{
 382        unsigned int hash, repl_hash;
 383        struct nf_conntrack_tuple_hash *h;
 384        struct nf_conn *ct;
 385        struct nf_conn_help *help;
 386        struct hlist_nulls_node *n;
 387        enum ip_conntrack_info ctinfo;
 388        struct net *net;
 389
 390        ct = nf_ct_get(skb, &ctinfo);
 391        net = nf_ct_net(ct);
 392
 393        /* ipt_REJECT uses nf_conntrack_attach to attach related
 394           ICMP/TCP RST packets in other direction.  Actual packet
 395           which created connection will be IP_CT_NEW or for an
 396           expected connection, IP_CT_RELATED. */
 397        if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
 398                return NF_ACCEPT;
 399
 400        hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
 401        repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 402
 403        /* We're not in hash table, and we refuse to set up related
 404           connections for unconfirmed conns.  But packet copies and
 405           REJECT will give spurious warnings here. */
 406        /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
 407
 408        /* No external references means noone else could have
 409           confirmed us. */
 410        NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
 411        pr_debug("Confirming conntrack %p\n", ct);
 412
 413        spin_lock_bh(&nf_conntrack_lock);
 414
 415        /* See if there's one in the list already, including reverse:
 416           NAT could have grabbed it without realizing, since we're
 417           not in the hash.  If there is, we lost race. */
 418        hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
 419                if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
 420                                      &h->tuple))
 421                        goto out;
 422        hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
 423                if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
 424                                      &h->tuple))
 425                        goto out;
 426
 427        /* Remove from unconfirmed list */
 428        hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
 429
 430        /* Timer relative to confirmation time, not original
 431           setting time, otherwise we'd get timer wrap in
 432           weird delay cases. */
 433        ct->timeout.expires += jiffies;
 434        add_timer(&ct->timeout);
 435        atomic_inc(&ct->ct_general.use);
 436        set_bit(IPS_CONFIRMED_BIT, &ct->status);
 437
 438        /* Since the lookup is lockless, hash insertion must be done after
 439         * starting the timer and setting the CONFIRMED bit. The RCU barriers
 440         * guarantee that no other CPU can find the conntrack before the above
 441         * stores are visible.
 442         */
 443        __nf_conntrack_hash_insert(ct, hash, repl_hash);
 444        NF_CT_STAT_INC(net, insert);
 445        spin_unlock_bh(&nf_conntrack_lock);
 446
 447        help = nfct_help(ct);
 448        if (help && help->helper)
 449                nf_conntrack_event_cache(IPCT_HELPER, ct);
 450
 451        nf_conntrack_event_cache(master_ct(ct) ?
 452                                 IPCT_RELATED : IPCT_NEW, ct);
 453        return NF_ACCEPT;
 454
 455out:
 456        NF_CT_STAT_INC(net, insert_failed);
 457        spin_unlock_bh(&nf_conntrack_lock);
 458        return NF_DROP;
 459}
 460EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
 461
 462/* Returns true if a connection correspondings to the tuple (required
 463   for NAT). */
 464int
 465nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
 466                         const struct nf_conn *ignored_conntrack)
 467{
 468        struct net *net = nf_ct_net(ignored_conntrack);
 469        struct nf_conntrack_tuple_hash *h;
 470        struct hlist_nulls_node *n;
 471        unsigned int hash = hash_conntrack(tuple);
 472
 473        /* Disable BHs the entire time since we need to disable them at
 474         * least once for the stats anyway.
 475         */
 476        rcu_read_lock_bh();
 477        hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
 478                if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
 479                    nf_ct_tuple_equal(tuple, &h->tuple)) {
 480                        NF_CT_STAT_INC(net, found);
 481                        rcu_read_unlock_bh();
 482                        return 1;
 483                }
 484                NF_CT_STAT_INC(net, searched);
 485        }
 486        rcu_read_unlock_bh();
 487
 488        return 0;
 489}
 490EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
 491
 492#define NF_CT_EVICTION_RANGE    8
 493
 494/* There's a small race here where we may free a just-assured
 495   connection.  Too bad: we're in trouble anyway. */
 496static noinline int early_drop(struct net *net, unsigned int hash)
 497{
 498        /* Use oldest entry, which is roughly LRU */
 499        struct nf_conntrack_tuple_hash *h;
 500        struct nf_conn *ct = NULL, *tmp;
 501        struct hlist_nulls_node *n;
 502        unsigned int i, cnt = 0;
 503        int dropped = 0;
 504
 505        rcu_read_lock();
 506        for (i = 0; i < nf_conntrack_htable_size; i++) {
 507                hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
 508                                         hnnode) {
 509                        tmp = nf_ct_tuplehash_to_ctrack(h);
 510                        if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
 511                                ct = tmp;
 512                        cnt++;
 513                }
 514
 515                if (ct && unlikely(nf_ct_is_dying(ct) ||
 516                                   !atomic_inc_not_zero(&ct->ct_general.use)))
 517                        ct = NULL;
 518                if (ct || cnt >= NF_CT_EVICTION_RANGE)
 519                        break;
 520                hash = (hash + 1) % nf_conntrack_htable_size;
 521        }
 522        rcu_read_unlock();
 523
 524        if (!ct)
 525                return dropped;
 526
 527        if (del_timer(&ct->timeout)) {
 528                death_by_timeout((unsigned long)ct);
 529                dropped = 1;
 530                NF_CT_STAT_INC_ATOMIC(net, early_drop);
 531        }
 532        nf_ct_put(ct);
 533        return dropped;
 534}
 535
 536struct nf_conn *nf_conntrack_alloc(struct net *net,
 537                                   const struct nf_conntrack_tuple *orig,
 538                                   const struct nf_conntrack_tuple *repl,
 539                                   gfp_t gfp)
 540{
 541        struct nf_conn *ct;
 542
 543        if (unlikely(!nf_conntrack_hash_rnd_initted)) {
 544                get_random_bytes(&nf_conntrack_hash_rnd,
 545                                sizeof(nf_conntrack_hash_rnd));
 546                nf_conntrack_hash_rnd_initted = 1;
 547        }
 548
 549        /* We don't want any race condition at early drop stage */
 550        atomic_inc(&net->ct.count);
 551
 552        if (nf_conntrack_max &&
 553            unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
 554                unsigned int hash = hash_conntrack(orig);
 555                if (!early_drop(net, hash)) {
 556                        atomic_dec(&net->ct.count);
 557                        if (net_ratelimit())
 558                                printk(KERN_WARNING
 559                                       "nf_conntrack: table full, dropping"
 560                                       " packet.\n");
 561                        return ERR_PTR(-ENOMEM);
 562                }
 563        }
 564
 565        /*
 566         * Do not use kmem_cache_zalloc(), as this cache uses
 567         * SLAB_DESTROY_BY_RCU.
 568         */
 569        ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
 570        if (ct == NULL) {
 571                pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
 572                atomic_dec(&net->ct.count);
 573                return ERR_PTR(-ENOMEM);
 574        }
 575        /*
 576         * Let ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.next
 577         * and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged.
 578         */
 579        memset(&ct->tuplehash[IP_CT_DIR_MAX], 0,
 580               sizeof(*ct) - offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX]));
 581        spin_lock_init(&ct->lock);
 582        ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
 583        ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
 584        ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
 585        ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev = NULL;
 586        /* Don't set timer yet: wait for confirmation */
 587        setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
 588#ifdef CONFIG_NET_NS
 589        ct->ct_net = net;
 590#endif
 591
 592        /*
 593         * changes to lookup keys must be done before setting refcnt to 1
 594         */
 595        smp_wmb();
 596        atomic_set(&ct->ct_general.use, 1);
 597        return ct;
 598}
 599EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
 600
 601void nf_conntrack_free(struct nf_conn *ct)
 602{
 603        struct net *net = nf_ct_net(ct);
 604
 605        nf_ct_ext_destroy(ct);
 606        atomic_dec(&net->ct.count);
 607        nf_ct_ext_free(ct);
 608        kmem_cache_free(nf_conntrack_cachep, ct);
 609}
 610EXPORT_SYMBOL_GPL(nf_conntrack_free);
 611
 612/* Allocate a new conntrack: we return -ENOMEM if classification
 613   failed due to stress.  Otherwise it really is unclassifiable. */
 614static struct nf_conntrack_tuple_hash *
 615init_conntrack(struct net *net,
 616               const struct nf_conntrack_tuple *tuple,
 617               struct nf_conntrack_l3proto *l3proto,
 618               struct nf_conntrack_l4proto *l4proto,
 619               struct sk_buff *skb,
 620               unsigned int dataoff)
 621{
 622        struct nf_conn *ct;
 623        struct nf_conn_help *help;
 624        struct nf_conntrack_tuple repl_tuple;
 625        struct nf_conntrack_expect *exp;
 626
 627        if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
 628                pr_debug("Can't invert tuple.\n");
 629                return NULL;
 630        }
 631
 632        ct = nf_conntrack_alloc(net, tuple, &repl_tuple, GFP_ATOMIC);
 633        if (IS_ERR(ct)) {
 634                pr_debug("Can't allocate conntrack.\n");
 635                return (struct nf_conntrack_tuple_hash *)ct;
 636        }
 637
 638        if (!l4proto->new(ct, skb, dataoff)) {
 639                nf_conntrack_free(ct);
 640                pr_debug("init conntrack: can't track with proto module\n");
 641                return NULL;
 642        }
 643
 644        nf_ct_acct_ext_add(ct, GFP_ATOMIC);
 645        nf_ct_ecache_ext_add(ct, GFP_ATOMIC);
 646
 647        spin_lock_bh(&nf_conntrack_lock);
 648        exp = nf_ct_find_expectation(net, tuple);
 649        if (exp) {
 650                pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
 651                         ct, exp);
 652                /* Welcome, Mr. Bond.  We've been expecting you... */
 653                __set_bit(IPS_EXPECTED_BIT, &ct->status);
 654                ct->master = exp->master;
 655                if (exp->helper) {
 656                        help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
 657                        if (help)
 658                                rcu_assign_pointer(help->helper, exp->helper);
 659                }
 660
 661#ifdef CONFIG_NF_CONNTRACK_MARK
 662                ct->mark = exp->master->mark;
 663#endif
 664#ifdef CONFIG_NF_CONNTRACK_SECMARK
 665                ct->secmark = exp->master->secmark;
 666#endif
 667                nf_conntrack_get(&ct->master->ct_general);
 668                NF_CT_STAT_INC(net, expect_new);
 669        } else {
 670                __nf_ct_try_assign_helper(ct, GFP_ATOMIC);
 671                NF_CT_STAT_INC(net, new);
 672        }
 673
 674        /* Overload tuple linked list to put us in unconfirmed list. */
 675        hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
 676                       &net->ct.unconfirmed);
 677
 678        spin_unlock_bh(&nf_conntrack_lock);
 679
 680        if (exp) {
 681                if (exp->expectfn)
 682                        exp->expectfn(ct, exp);
 683                nf_ct_expect_put(exp);
 684        }
 685
 686        return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
 687}
 688
 689/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
 690static inline struct nf_conn *
 691resolve_normal_ct(struct net *net,
 692                  struct sk_buff *skb,
 693                  unsigned int dataoff,
 694                  u_int16_t l3num,
 695                  u_int8_t protonum,
 696                  struct nf_conntrack_l3proto *l3proto,
 697                  struct nf_conntrack_l4proto *l4proto,
 698                  int *set_reply,
 699                  enum ip_conntrack_info *ctinfo)
 700{
 701        struct nf_conntrack_tuple tuple;
 702        struct nf_conntrack_tuple_hash *h;
 703        struct nf_conn *ct;
 704
 705        if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
 706                             dataoff, l3num, protonum, &tuple, l3proto,
 707                             l4proto)) {
 708                pr_debug("resolve_normal_ct: Can't get tuple\n");
 709                return NULL;
 710        }
 711
 712        /* look for tuple match */
 713        h = nf_conntrack_find_get(net, &tuple);
 714        if (!h) {
 715                h = init_conntrack(net, &tuple, l3proto, l4proto, skb, dataoff);
 716                if (!h)
 717                        return NULL;
 718                if (IS_ERR(h))
 719                        return (void *)h;
 720        }
 721        ct = nf_ct_tuplehash_to_ctrack(h);
 722
 723        /* It exists; we have (non-exclusive) reference. */
 724        if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
 725                *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY;
 726                /* Please set reply bit if this packet OK */
 727                *set_reply = 1;
 728        } else {
 729                /* Once we've had two way comms, always ESTABLISHED. */
 730                if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
 731                        pr_debug("nf_conntrack_in: normal packet for %p\n", ct);
 732                        *ctinfo = IP_CT_ESTABLISHED;
 733                } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
 734                        pr_debug("nf_conntrack_in: related packet for %p\n",
 735                                 ct);
 736                        *ctinfo = IP_CT_RELATED;
 737                } else {
 738                        pr_debug("nf_conntrack_in: new packet for %p\n", ct);
 739                        *ctinfo = IP_CT_NEW;
 740                }
 741                *set_reply = 0;
 742        }
 743        skb->nfct = &ct->ct_general;
 744        skb->nfctinfo = *ctinfo;
 745        return ct;
 746}
 747
 748unsigned int
 749nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
 750                struct sk_buff *skb)
 751{
 752        struct nf_conn *ct;
 753        enum ip_conntrack_info ctinfo;
 754        struct nf_conntrack_l3proto *l3proto;
 755        struct nf_conntrack_l4proto *l4proto;
 756        unsigned int dataoff;
 757        u_int8_t protonum;
 758        int set_reply = 0;
 759        int ret;
 760
 761        /* Previously seen (loopback or untracked)?  Ignore. */
 762        if (skb->nfct) {
 763                NF_CT_STAT_INC_ATOMIC(net, ignore);
 764                return NF_ACCEPT;
 765        }
 766
 767        /* rcu_read_lock()ed by nf_hook_slow */
 768        l3proto = __nf_ct_l3proto_find(pf);
 769        ret = l3proto->get_l4proto(skb, skb_network_offset(skb),
 770                                   &dataoff, &protonum);
 771        if (ret <= 0) {
 772                pr_debug("not prepared to track yet or error occured\n");
 773                NF_CT_STAT_INC_ATOMIC(net, error);
 774                NF_CT_STAT_INC_ATOMIC(net, invalid);
 775                return -ret;
 776        }
 777
 778        l4proto = __nf_ct_l4proto_find(pf, protonum);
 779
 780        /* It may be an special packet, error, unclean...
 781         * inverse of the return code tells to the netfilter
 782         * core what to do with the packet. */
 783        if (l4proto->error != NULL) {
 784                ret = l4proto->error(net, skb, dataoff, &ctinfo, pf, hooknum);
 785                if (ret <= 0) {
 786                        NF_CT_STAT_INC_ATOMIC(net, error);
 787                        NF_CT_STAT_INC_ATOMIC(net, invalid);
 788                        return -ret;
 789                }
 790        }
 791
 792        ct = resolve_normal_ct(net, skb, dataoff, pf, protonum,
 793                               l3proto, l4proto, &set_reply, &ctinfo);
 794        if (!ct) {
 795                /* Not valid part of a connection */
 796                NF_CT_STAT_INC_ATOMIC(net, invalid);
 797                return NF_ACCEPT;
 798        }
 799
 800        if (IS_ERR(ct)) {
 801                /* Too stressed to deal. */
 802                NF_CT_STAT_INC_ATOMIC(net, drop);
 803                return NF_DROP;
 804        }
 805
 806        NF_CT_ASSERT(skb->nfct);
 807
 808        ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum);
 809        if (ret <= 0) {
 810                /* Invalid: inverse of the return code tells
 811                 * the netfilter core what to do */
 812                pr_debug("nf_conntrack_in: Can't track with proto module\n");
 813                nf_conntrack_put(skb->nfct);
 814                skb->nfct = NULL;
 815                NF_CT_STAT_INC_ATOMIC(net, invalid);
 816                if (ret == -NF_DROP)
 817                        NF_CT_STAT_INC_ATOMIC(net, drop);
 818                return -ret;
 819        }
 820
 821        if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
 822                nf_conntrack_event_cache(IPCT_STATUS, ct);
 823
 824        return ret;
 825}
 826EXPORT_SYMBOL_GPL(nf_conntrack_in);
 827
 828bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
 829                          const struct nf_conntrack_tuple *orig)
 830{
 831        bool ret;
 832
 833        rcu_read_lock();
 834        ret = nf_ct_invert_tuple(inverse, orig,
 835                                 __nf_ct_l3proto_find(orig->src.l3num),
 836                                 __nf_ct_l4proto_find(orig->src.l3num,
 837                                                      orig->dst.protonum));
 838        rcu_read_unlock();
 839        return ret;
 840}
 841EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
 842
 843/* Alter reply tuple (maybe alter helper).  This is for NAT, and is
 844   implicitly racy: see __nf_conntrack_confirm */
 845void nf_conntrack_alter_reply(struct nf_conn *ct,
 846                              const struct nf_conntrack_tuple *newreply)
 847{
 848        struct nf_conn_help *help = nfct_help(ct);
 849
 850        /* Should be unconfirmed, so not in hash table yet */
 851        NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
 852
 853        pr_debug("Altering reply tuple of %p to ", ct);
 854        nf_ct_dump_tuple(newreply);
 855
 856        ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
 857        if (ct->master || (help && !hlist_empty(&help->expectations)))
 858                return;
 859
 860        rcu_read_lock();
 861        __nf_ct_try_assign_helper(ct, GFP_ATOMIC);
 862        rcu_read_unlock();
 863}
 864EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
 865
 866/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
 867void __nf_ct_refresh_acct(struct nf_conn *ct,
 868                          enum ip_conntrack_info ctinfo,
 869                          const struct sk_buff *skb,
 870                          unsigned long extra_jiffies,
 871                          int do_acct)
 872{
 873        NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
 874        NF_CT_ASSERT(skb);
 875
 876        /* Only update if this is not a fixed timeout */
 877        if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
 878                goto acct;
 879
 880        /* If not in hash table, timer will not be active yet */
 881        if (!nf_ct_is_confirmed(ct)) {
 882                ct->timeout.expires = extra_jiffies;
 883        } else {
 884                unsigned long newtime = jiffies + extra_jiffies;
 885
 886                /* Only update the timeout if the new timeout is at least
 887                   HZ jiffies from the old timeout. Need del_timer for race
 888                   avoidance (may already be dying). */
 889                if (newtime - ct->timeout.expires >= HZ)
 890                        mod_timer_pending(&ct->timeout, newtime);
 891        }
 892
 893acct:
 894        if (do_acct) {
 895                struct nf_conn_counter *acct;
 896
 897                acct = nf_conn_acct_find(ct);
 898                if (acct) {
 899                        spin_lock_bh(&ct->lock);
 900                        acct[CTINFO2DIR(ctinfo)].packets++;
 901                        acct[CTINFO2DIR(ctinfo)].bytes +=
 902                                skb->len - skb_network_offset(skb);
 903                        spin_unlock_bh(&ct->lock);
 904                }
 905        }
 906}
 907EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
 908
 909bool __nf_ct_kill_acct(struct nf_conn *ct,
 910                       enum ip_conntrack_info ctinfo,
 911                       const struct sk_buff *skb,
 912                       int do_acct)
 913{
 914        if (do_acct) {
 915                struct nf_conn_counter *acct;
 916
 917                acct = nf_conn_acct_find(ct);
 918                if (acct) {
 919                        spin_lock_bh(&ct->lock);
 920                        acct[CTINFO2DIR(ctinfo)].packets++;
 921                        acct[CTINFO2DIR(ctinfo)].bytes +=
 922                                skb->len - skb_network_offset(skb);
 923                        spin_unlock_bh(&ct->lock);
 924                }
 925        }
 926
 927        if (del_timer(&ct->timeout)) {
 928                ct->timeout.function((unsigned long)ct);
 929                return true;
 930        }
 931        return false;
 932}
 933EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
 934
 935#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
 936
 937#include <linux/netfilter/nfnetlink.h>
 938#include <linux/netfilter/nfnetlink_conntrack.h>
 939#include <linux/mutex.h>
 940
 941/* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
 942 * in ip_conntrack_core, since we don't want the protocols to autoload
 943 * or depend on ctnetlink */
 944int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
 945                               const struct nf_conntrack_tuple *tuple)
 946{
 947        NLA_PUT_BE16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port);
 948        NLA_PUT_BE16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port);
 949        return 0;
 950
 951nla_put_failure:
 952        return -1;
 953}
 954EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
 955
 956const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
 957        [CTA_PROTO_SRC_PORT]  = { .type = NLA_U16 },
 958        [CTA_PROTO_DST_PORT]  = { .type = NLA_U16 },
 959};
 960EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
 961
 962int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
 963                               struct nf_conntrack_tuple *t)
 964{
 965        if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
 966                return -EINVAL;
 967
 968        t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
 969        t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
 970
 971        return 0;
 972}
 973EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
 974
 975int nf_ct_port_nlattr_tuple_size(void)
 976{
 977        return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
 978}
 979EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
 980#endif
 981
 982/* Used by ipt_REJECT and ip6t_REJECT. */
 983static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
 984{
 985        struct nf_conn *ct;
 986        enum ip_conntrack_info ctinfo;
 987
 988        /* This ICMP is in reverse direction to the packet which caused it */
 989        ct = nf_ct_get(skb, &ctinfo);
 990        if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
 991                ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY;
 992        else
 993                ctinfo = IP_CT_RELATED;
 994
 995        /* Attach to new skbuff, and increment count */
 996        nskb->nfct = &ct->ct_general;
 997        nskb->nfctinfo = ctinfo;
 998        nf_conntrack_get(nskb->nfct);
 999}
1000
1001/* Bring out ya dead! */
1002static struct nf_conn *
1003get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1004                void *data, unsigned int *bucket)
1005{
1006        struct nf_conntrack_tuple_hash *h;
1007        struct nf_conn *ct;
1008        struct hlist_nulls_node *n;
1009
1010        spin_lock_bh(&nf_conntrack_lock);
1011        for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
1012                hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
1013                        ct = nf_ct_tuplehash_to_ctrack(h);
1014                        if (iter(ct, data))
1015                                goto found;
1016                }
1017        }
1018        hlist_nulls_for_each_entry(h, n, &net->ct.unconfirmed, hnnode) {
1019                ct = nf_ct_tuplehash_to_ctrack(h);
1020                if (iter(ct, data))
1021                        set_bit(IPS_DYING_BIT, &ct->status);
1022        }
1023        spin_unlock_bh(&nf_conntrack_lock);
1024        return NULL;
1025found:
1026        atomic_inc(&ct->ct_general.use);
1027        spin_unlock_bh(&nf_conntrack_lock);
1028        return ct;
1029}
1030
1031void nf_ct_iterate_cleanup(struct net *net,
1032                           int (*iter)(struct nf_conn *i, void *data),
1033                           void *data)
1034{
1035        struct nf_conn *ct;
1036        unsigned int bucket = 0;
1037
1038        while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
1039                /* Time to push up daises... */
1040                if (del_timer(&ct->timeout))
1041                        death_by_timeout((unsigned long)ct);
1042                /* ... else the timer will get him soon. */
1043
1044                nf_ct_put(ct);
1045        }
1046}
1047EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
1048
1049struct __nf_ct_flush_report {
1050        u32 pid;
1051        int report;
1052};
1053
1054static int kill_report(struct nf_conn *i, void *data)
1055{
1056        struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data;
1057
1058        /* If we fail to deliver the event, death_by_timeout() will retry */
1059        if (nf_conntrack_event_report(IPCT_DESTROY, i,
1060                                      fr->pid, fr->report) < 0)
1061                return 1;
1062
1063        /* Avoid the delivery of the destroy event in death_by_timeout(). */
1064        set_bit(IPS_DYING_BIT, &i->status);
1065        return 1;
1066}
1067
1068static int kill_all(struct nf_conn *i, void *data)
1069{
1070        return 1;
1071}
1072
1073void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size)
1074{
1075        if (vmalloced)
1076                vfree(hash);
1077        else
1078                free_pages((unsigned long)hash,
1079                           get_order(sizeof(struct hlist_head) * size));
1080}
1081EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
1082
1083void nf_conntrack_flush_report(struct net *net, u32 pid, int report)
1084{
1085        struct __nf_ct_flush_report fr = {
1086                .pid    = pid,
1087                .report = report,
1088        };
1089        nf_ct_iterate_cleanup(net, kill_report, &fr);
1090}
1091EXPORT_SYMBOL_GPL(nf_conntrack_flush_report);
1092
1093static void nf_ct_release_dying_list(struct net *net)
1094{
1095        struct nf_conntrack_tuple_hash *h;
1096        struct nf_conn *ct;
1097        struct hlist_nulls_node *n;
1098
1099        spin_lock_bh(&nf_conntrack_lock);
1100        hlist_nulls_for_each_entry(h, n, &net->ct.dying, hnnode) {
1101                ct = nf_ct_tuplehash_to_ctrack(h);
1102                /* never fails to remove them, no listeners at this point */
1103                nf_ct_kill(ct);
1104        }
1105        spin_unlock_bh(&nf_conntrack_lock);
1106}
1107
1108static void nf_conntrack_cleanup_init_net(void)
1109{
1110        nf_conntrack_helper_fini();
1111        nf_conntrack_proto_fini();
1112        kmem_cache_destroy(nf_conntrack_cachep);
1113}
1114
1115static void nf_conntrack_cleanup_net(struct net *net)
1116{
1117 i_see_dead_people:
1118        nf_ct_iterate_cleanup(net, kill_all, NULL);
1119        nf_ct_release_dying_list(net);
1120        if (atomic_read(&net->ct.count) != 0) {
1121                schedule();
1122                goto i_see_dead_people;
1123        }
1124        /* wait until all references to nf_conntrack_untracked are dropped */
1125        while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
1126                schedule();
1127
1128        nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1129                             nf_conntrack_htable_size);
1130        nf_conntrack_ecache_fini(net);
1131        nf_conntrack_acct_fini(net);
1132        nf_conntrack_expect_fini(net);
1133        free_percpu(net->ct.stat);
1134}
1135
1136/* Mishearing the voices in his head, our hero wonders how he's
1137   supposed to kill the mall. */
1138void nf_conntrack_cleanup(struct net *net)
1139{
1140        if (net_eq(net, &init_net))
1141                rcu_assign_pointer(ip_ct_attach, NULL);
1142
1143        /* This makes sure all current packets have passed through
1144           netfilter framework.  Roll on, two-stage module
1145           delete... */
1146        synchronize_net();
1147
1148        nf_conntrack_cleanup_net(net);
1149
1150        if (net_eq(net, &init_net)) {
1151                rcu_assign_pointer(nf_ct_destroy, NULL);
1152                nf_conntrack_cleanup_init_net();
1153        }
1154}
1155
1156void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls)
1157{
1158        struct hlist_nulls_head *hash;
1159        unsigned int nr_slots, i;
1160        size_t sz;
1161
1162        *vmalloced = 0;
1163
1164        BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
1165        nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
1166        sz = nr_slots * sizeof(struct hlist_nulls_head);
1167        hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1168                                        get_order(sz));
1169        if (!hash) {
1170                *vmalloced = 1;
1171                printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
1172                hash = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
1173        }
1174
1175        if (hash && nulls)
1176                for (i = 0; i < nr_slots; i++)
1177                        INIT_HLIST_NULLS_HEAD(&hash[i], i);
1178
1179        return hash;
1180}
1181EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
1182
1183int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1184{
1185        int i, bucket, vmalloced, old_vmalloced;
1186        unsigned int hashsize, old_size;
1187        int rnd;
1188        struct hlist_nulls_head *hash, *old_hash;
1189        struct nf_conntrack_tuple_hash *h;
1190
1191        /* On boot, we can set this without any fancy locking. */
1192        if (!nf_conntrack_htable_size)
1193                return param_set_uint(val, kp);
1194
1195        hashsize = simple_strtoul(val, NULL, 0);
1196        if (!hashsize)
1197                return -EINVAL;
1198
1199        hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced, 1);
1200        if (!hash)
1201                return -ENOMEM;
1202
1203        /* We have to rehahs for the new table anyway, so we also can
1204         * use a newrandom seed */
1205        get_random_bytes(&rnd, sizeof(rnd));
1206
1207        /* Lookups in the old hash might happen in parallel, which means we
1208         * might get false negatives during connection lookup. New connections
1209         * created because of a false negative won't make it into the hash
1210         * though since that required taking the lock.
1211         */
1212        spin_lock_bh(&nf_conntrack_lock);
1213        for (i = 0; i < nf_conntrack_htable_size; i++) {
1214                while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
1215                        h = hlist_nulls_entry(init_net.ct.hash[i].first,
1216                                        struct nf_conntrack_tuple_hash, hnnode);
1217                        hlist_nulls_del_rcu(&h->hnnode);
1218                        bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
1219                        hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
1220                }
1221        }
1222        old_size = nf_conntrack_htable_size;
1223        old_vmalloced = init_net.ct.hash_vmalloc;
1224        old_hash = init_net.ct.hash;
1225
1226        nf_conntrack_htable_size = hashsize;
1227        init_net.ct.hash_vmalloc = vmalloced;
1228        init_net.ct.hash = hash;
1229        nf_conntrack_hash_rnd = rnd;
1230        spin_unlock_bh(&nf_conntrack_lock);
1231
1232        nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
1233        return 0;
1234}
1235EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
1236
1237module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
1238                  &nf_conntrack_htable_size, 0600);
1239
1240static int nf_conntrack_init_init_net(void)
1241{
1242        int max_factor = 8;
1243        int ret;
1244
1245        /* Idea from tcp.c: use 1/16384 of memory.  On i386: 32MB
1246         * machine has 512 buckets. >= 1GB machines have 16384 buckets. */
1247        if (!nf_conntrack_htable_size) {
1248                nf_conntrack_htable_size
1249                        = (((totalram_pages << PAGE_SHIFT) / 16384)
1250                           / sizeof(struct hlist_head));
1251                if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
1252                        nf_conntrack_htable_size = 16384;
1253                if (nf_conntrack_htable_size < 32)
1254                        nf_conntrack_htable_size = 32;
1255
1256                /* Use a max. factor of four by default to get the same max as
1257                 * with the old struct list_heads. When a table size is given
1258                 * we use the old value of 8 to avoid reducing the max.
1259                 * entries. */
1260                max_factor = 4;
1261        }
1262        nf_conntrack_max = max_factor * nf_conntrack_htable_size;
1263
1264        printk("nf_conntrack version %s (%u buckets, %d max)\n",
1265               NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1266               nf_conntrack_max);
1267
1268        nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
1269                                                sizeof(struct nf_conn),
1270                                                0, SLAB_DESTROY_BY_RCU, NULL);
1271        if (!nf_conntrack_cachep) {
1272                printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1273                ret = -ENOMEM;
1274                goto err_cache;
1275        }
1276
1277        ret = nf_conntrack_proto_init();
1278        if (ret < 0)
1279                goto err_proto;
1280
1281        ret = nf_conntrack_helper_init();
1282        if (ret < 0)
1283                goto err_helper;
1284
1285        return 0;
1286
1287err_helper:
1288        nf_conntrack_proto_fini();
1289err_proto:
1290        kmem_cache_destroy(nf_conntrack_cachep);
1291err_cache:
1292        return ret;
1293}
1294
1295/*
1296 * We need to use special "null" values, not used in hash table
1297 */
1298#define UNCONFIRMED_NULLS_VAL   ((1<<30)+0)
1299#define DYING_NULLS_VAL         ((1<<30)+1)
1300
1301static int nf_conntrack_init_net(struct net *net)
1302{
1303        int ret;
1304
1305        atomic_set(&net->ct.count, 0);
1306        INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, UNCONFIRMED_NULLS_VAL);
1307        INIT_HLIST_NULLS_HEAD(&net->ct.dying, DYING_NULLS_VAL);
1308        net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
1309        if (!net->ct.stat) {
1310                ret = -ENOMEM;
1311                goto err_stat;
1312        }
1313        net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
1314                                             &net->ct.hash_vmalloc, 1);
1315        if (!net->ct.hash) {
1316                ret = -ENOMEM;
1317                printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
1318                goto err_hash;
1319        }
1320        ret = nf_conntrack_expect_init(net);
1321        if (ret < 0)
1322                goto err_expect;
1323        ret = nf_conntrack_acct_init(net);
1324        if (ret < 0)
1325                goto err_acct;
1326        ret = nf_conntrack_ecache_init(net);
1327        if (ret < 0)
1328                goto err_ecache;
1329
1330        /* Set up fake conntrack:
1331            - to never be deleted, not in any hashes */
1332#ifdef CONFIG_NET_NS
1333        nf_conntrack_untracked.ct_net = &init_net;
1334#endif
1335        atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
1336        /*  - and look it like as a confirmed connection */
1337        set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
1338
1339        return 0;
1340
1341err_ecache:
1342        nf_conntrack_acct_fini(net);
1343err_acct:
1344        nf_conntrack_expect_fini(net);
1345err_expect:
1346        nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1347                             nf_conntrack_htable_size);
1348err_hash:
1349        free_percpu(net->ct.stat);
1350err_stat:
1351        return ret;
1352}
1353
1354s16 (*nf_ct_nat_offset)(const struct nf_conn *ct,
1355                        enum ip_conntrack_dir dir,
1356                        u32 seq);
1357EXPORT_SYMBOL_GPL(nf_ct_nat_offset);
1358
1359int nf_conntrack_init(struct net *net)
1360{
1361        int ret;
1362
1363        if (net_eq(net, &init_net)) {
1364                ret = nf_conntrack_init_init_net();
1365                if (ret < 0)
1366                        goto out_init_net;
1367        }
1368        ret = nf_conntrack_init_net(net);
1369        if (ret < 0)
1370                goto out_net;
1371
1372        if (net_eq(net, &init_net)) {
1373                /* For use by REJECT target */
1374                rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach);
1375                rcu_assign_pointer(nf_ct_destroy, destroy_conntrack);
1376
1377                /* Howto get NAT offsets */
1378                rcu_assign_pointer(nf_ct_nat_offset, NULL);
1379        }
1380        return 0;
1381
1382out_net:
1383        if (net_eq(net, &init_net))
1384                nf_conntrack_cleanup_init_net();
1385out_init_net:
1386        return ret;
1387}
1388