linux/net/netfilter/nf_conntrack_core.c
<<
>>
Prefs
   1/* Connection state tracking for netfilter.  This is separated from,
   2   but required by, the NAT layer; it can also be used by an iptables
   3   extension. */
   4
   5/* (C) 1999-2001 Paul `Rusty' Russell
   6 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
   7 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
   8 * (C) 2005-2012 Patrick McHardy <kaber@trash.net>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as
  12 * published by the Free Software Foundation.
  13 */
  14
  15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  16
  17#include <linux/types.h>
  18#include <linux/netfilter.h>
  19#include <linux/module.h>
  20#include <linux/sched.h>
  21#include <linux/skbuff.h>
  22#include <linux/proc_fs.h>
  23#include <linux/vmalloc.h>
  24#include <linux/stddef.h>
  25#include <linux/slab.h>
  26#include <linux/random.h>
  27#include <linux/jhash.h>
  28#include <linux/err.h>
  29#include <linux/percpu.h>
  30#include <linux/moduleparam.h>
  31#include <linux/notifier.h>
  32#include <linux/kernel.h>
  33#include <linux/netdevice.h>
  34#include <linux/socket.h>
  35#include <linux/mm.h>
  36#include <linux/nsproxy.h>
  37#include <linux/rculist_nulls.h>
  38
  39#include <net/netfilter/nf_conntrack.h>
  40#include <net/netfilter/nf_conntrack_l3proto.h>
  41#include <net/netfilter/nf_conntrack_l4proto.h>
  42#include <net/netfilter/nf_conntrack_expect.h>
  43#include <net/netfilter/nf_conntrack_helper.h>
  44#include <net/netfilter/nf_conntrack_seqadj.h>
  45#include <net/netfilter/nf_conntrack_core.h>
  46#include <net/netfilter/nf_conntrack_extend.h>
  47#include <net/netfilter/nf_conntrack_acct.h>
  48#include <net/netfilter/nf_conntrack_ecache.h>
  49#include <net/netfilter/nf_conntrack_zones.h>
  50#include <net/netfilter/nf_conntrack_timestamp.h>
  51#include <net/netfilter/nf_conntrack_timeout.h>
  52#include <net/netfilter/nf_conntrack_labels.h>
  53#include <net/netfilter/nf_conntrack_synproxy.h>
  54#include <net/netfilter/nf_nat.h>
  55#include <net/netfilter/nf_nat_core.h>
  56#include <net/netfilter/nf_nat_helper.h>
  57#include <net/netns/hash.h>
  58
  59#define NF_CONNTRACK_VERSION    "0.5.0"
  60
  61int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
  62                                      enum nf_nat_manip_type manip,
  63                                      const struct nlattr *attr) __read_mostly;
  64EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook);
  65
  66__cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
  67EXPORT_SYMBOL_GPL(nf_conntrack_locks);
  68
  69__cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
  70EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
  71
  72struct hlist_nulls_head *nf_conntrack_hash __read_mostly;
  73EXPORT_SYMBOL_GPL(nf_conntrack_hash);
  74
  75struct conntrack_gc_work {
  76        struct delayed_work     dwork;
  77        u32                     last_bucket;
  78        bool                    exiting;
  79        long                    next_gc_run;
  80};
  81
  82static __read_mostly struct kmem_cache *nf_conntrack_cachep;
  83static __read_mostly spinlock_t nf_conntrack_locks_all_lock;
  84static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
  85static __read_mostly bool nf_conntrack_locks_all;
  86
  87/* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
  88#define GC_MAX_BUCKETS_DIV      128u
  89/* upper bound of full table scan */
  90#define GC_MAX_SCAN_JIFFIES     (16u * HZ)
  91/* desired ratio of entries found to be expired */
  92#define GC_EVICT_RATIO  50u
  93
  94static struct conntrack_gc_work conntrack_gc_work;
  95
  96void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
  97{
  98        spin_lock(lock);
  99        while (unlikely(nf_conntrack_locks_all)) {
 100                spin_unlock(lock);
 101
 102                /*
 103                 * Order the 'nf_conntrack_locks_all' load vs. the
 104                 * spin_unlock_wait() loads below, to ensure
 105                 * that 'nf_conntrack_locks_all_lock' is indeed held:
 106                 */
 107                smp_rmb(); /* spin_lock(&nf_conntrack_locks_all_lock) */
 108                spin_unlock_wait(&nf_conntrack_locks_all_lock);
 109                spin_lock(lock);
 110        }
 111}
 112EXPORT_SYMBOL_GPL(nf_conntrack_lock);
 113
 114static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
 115{
 116        h1 %= CONNTRACK_LOCKS;
 117        h2 %= CONNTRACK_LOCKS;
 118        spin_unlock(&nf_conntrack_locks[h1]);
 119        if (h1 != h2)
 120                spin_unlock(&nf_conntrack_locks[h2]);
 121}
 122
 123/* return true if we need to recompute hashes (in case hash table was resized) */
 124static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
 125                                     unsigned int h2, unsigned int sequence)
 126{
 127        h1 %= CONNTRACK_LOCKS;
 128        h2 %= CONNTRACK_LOCKS;
 129        if (h1 <= h2) {
 130                nf_conntrack_lock(&nf_conntrack_locks[h1]);
 131                if (h1 != h2)
 132                        spin_lock_nested(&nf_conntrack_locks[h2],
 133                                         SINGLE_DEPTH_NESTING);
 134        } else {
 135                nf_conntrack_lock(&nf_conntrack_locks[h2]);
 136                spin_lock_nested(&nf_conntrack_locks[h1],
 137                                 SINGLE_DEPTH_NESTING);
 138        }
 139        if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
 140                nf_conntrack_double_unlock(h1, h2);
 141                return true;
 142        }
 143        return false;
 144}
 145
 146static void nf_conntrack_all_lock(void)
 147{
 148        int i;
 149
 150        spin_lock(&nf_conntrack_locks_all_lock);
 151        nf_conntrack_locks_all = true;
 152
 153        /*
 154         * Order the above store of 'nf_conntrack_locks_all' against
 155         * the spin_unlock_wait() loads below, such that if
 156         * nf_conntrack_lock() observes 'nf_conntrack_locks_all'
 157         * we must observe nf_conntrack_locks[] held:
 158         */
 159        smp_mb(); /* spin_lock(&nf_conntrack_locks_all_lock) */
 160
 161        for (i = 0; i < CONNTRACK_LOCKS; i++) {
 162                spin_unlock_wait(&nf_conntrack_locks[i]);
 163        }
 164}
 165
 166static void nf_conntrack_all_unlock(void)
 167{
 168        /*
 169         * All prior stores must be complete before we clear
 170         * 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock()
 171         * might observe the false value but not the entire
 172         * critical section:
 173         */
 174        smp_store_release(&nf_conntrack_locks_all, false);
 175        spin_unlock(&nf_conntrack_locks_all_lock);
 176}
 177
 178unsigned int nf_conntrack_htable_size __read_mostly;
 179EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
 180
 181unsigned int nf_conntrack_max __read_mostly;
 182seqcount_t nf_conntrack_generation __read_mostly;
 183
 184/* nf_conn must be 8 bytes aligned, as the 3 LSB bits are used
 185 * for the nfctinfo. We cheat by (ab)using the PER CPU cache line
 186 * alignment to enforce this.
 187 */
 188DEFINE_PER_CPU_ALIGNED(struct nf_conn, nf_conntrack_untracked);
 189EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
 190
 191static unsigned int nf_conntrack_hash_rnd __read_mostly;
 192
 193static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
 194                              const struct net *net)
 195{
 196        unsigned int n;
 197        u32 seed;
 198
 199        get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd));
 200
 201        /* The direction must be ignored, so we hash everything up to the
 202         * destination ports (which is a multiple of 4) and treat the last
 203         * three bytes manually.
 204         */
 205        seed = nf_conntrack_hash_rnd ^ net_hash_mix(net);
 206        n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
 207        return jhash2((u32 *)tuple, n, seed ^
 208                      (((__force __u16)tuple->dst.u.all << 16) |
 209                      tuple->dst.protonum));
 210}
 211
 212static u32 scale_hash(u32 hash)
 213{
 214        return reciprocal_scale(hash, nf_conntrack_htable_size);
 215}
 216
 217static u32 __hash_conntrack(const struct net *net,
 218                            const struct nf_conntrack_tuple *tuple,
 219                            unsigned int size)
 220{
 221        return reciprocal_scale(hash_conntrack_raw(tuple, net), size);
 222}
 223
 224static u32 hash_conntrack(const struct net *net,
 225                          const struct nf_conntrack_tuple *tuple)
 226{
 227        return scale_hash(hash_conntrack_raw(tuple, net));
 228}
 229
 230bool
 231nf_ct_get_tuple(const struct sk_buff *skb,
 232                unsigned int nhoff,
 233                unsigned int dataoff,
 234                u_int16_t l3num,
 235                u_int8_t protonum,
 236                struct net *net,
 237                struct nf_conntrack_tuple *tuple,
 238                const struct nf_conntrack_l3proto *l3proto,
 239                const struct nf_conntrack_l4proto *l4proto)
 240{
 241        memset(tuple, 0, sizeof(*tuple));
 242
 243        tuple->src.l3num = l3num;
 244        if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
 245                return false;
 246
 247        tuple->dst.protonum = protonum;
 248        tuple->dst.dir = IP_CT_DIR_ORIGINAL;
 249
 250        return l4proto->pkt_to_tuple(skb, dataoff, net, tuple);
 251}
 252EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
 253
 254bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
 255                       u_int16_t l3num,
 256                       struct net *net, struct nf_conntrack_tuple *tuple)
 257{
 258        struct nf_conntrack_l3proto *l3proto;
 259        struct nf_conntrack_l4proto *l4proto;
 260        unsigned int protoff;
 261        u_int8_t protonum;
 262        int ret;
 263
 264        rcu_read_lock();
 265
 266        l3proto = __nf_ct_l3proto_find(l3num);
 267        ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
 268        if (ret != NF_ACCEPT) {
 269                rcu_read_unlock();
 270                return false;
 271        }
 272
 273        l4proto = __nf_ct_l4proto_find(l3num, protonum);
 274
 275        ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple,
 276                              l3proto, l4proto);
 277
 278        rcu_read_unlock();
 279        return ret;
 280}
 281EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
 282
 283bool
 284nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
 285                   const struct nf_conntrack_tuple *orig,
 286                   const struct nf_conntrack_l3proto *l3proto,
 287                   const struct nf_conntrack_l4proto *l4proto)
 288{
 289        memset(inverse, 0, sizeof(*inverse));
 290
 291        inverse->src.l3num = orig->src.l3num;
 292        if (l3proto->invert_tuple(inverse, orig) == 0)
 293                return false;
 294
 295        inverse->dst.dir = !orig->dst.dir;
 296
 297        inverse->dst.protonum = orig->dst.protonum;
 298        return l4proto->invert_tuple(inverse, orig);
 299}
 300EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
 301
 302static void
 303clean_from_lists(struct nf_conn *ct)
 304{
 305        pr_debug("clean_from_lists(%p)\n", ct);
 306        hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
 307        hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
 308
 309        /* Destroy all pending expectations */
 310        nf_ct_remove_expectations(ct);
 311}
 312
 313/* must be called with local_bh_disable */
 314static void nf_ct_add_to_dying_list(struct nf_conn *ct)
 315{
 316        struct ct_pcpu *pcpu;
 317
 318        /* add this conntrack to the (per cpu) dying list */
 319        ct->cpu = smp_processor_id();
 320        pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
 321
 322        spin_lock(&pcpu->lock);
 323        hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
 324                             &pcpu->dying);
 325        spin_unlock(&pcpu->lock);
 326}
 327
 328/* must be called with local_bh_disable */
 329static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct)
 330{
 331        struct ct_pcpu *pcpu;
 332
 333        /* add this conntrack to the (per cpu) unconfirmed list */
 334        ct->cpu = smp_processor_id();
 335        pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
 336
 337        spin_lock(&pcpu->lock);
 338        hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
 339                             &pcpu->unconfirmed);
 340        spin_unlock(&pcpu->lock);
 341}
 342
 343/* must be called with local_bh_disable */
 344static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
 345{
 346        struct ct_pcpu *pcpu;
 347
 348        /* We overload first tuple to link into unconfirmed or dying list.*/
 349        pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
 350
 351        spin_lock(&pcpu->lock);
 352        BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
 353        hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
 354        spin_unlock(&pcpu->lock);
 355}
 356
 357#define NFCT_ALIGN(len) (((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK)
 358
 359/* Released via destroy_conntrack() */
 360struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
 361                                 const struct nf_conntrack_zone *zone,
 362                                 gfp_t flags)
 363{
 364        struct nf_conn *tmpl, *p;
 365
 366        if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK) {
 367                tmpl = kzalloc(sizeof(*tmpl) + NFCT_INFOMASK, flags);
 368                if (!tmpl)
 369                        return NULL;
 370
 371                p = tmpl;
 372                tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
 373                if (tmpl != p) {
 374                        tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
 375                        tmpl->proto.tmpl_padto = (char *)tmpl - (char *)p;
 376                }
 377        } else {
 378                tmpl = kzalloc(sizeof(*tmpl), flags);
 379                if (!tmpl)
 380                        return NULL;
 381        }
 382
 383        tmpl->status = IPS_TEMPLATE;
 384        write_pnet(&tmpl->ct_net, net);
 385        nf_ct_zone_add(tmpl, zone);
 386        atomic_set(&tmpl->ct_general.use, 0);
 387
 388        return tmpl;
 389}
 390EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
 391
 392void nf_ct_tmpl_free(struct nf_conn *tmpl)
 393{
 394        nf_ct_ext_destroy(tmpl);
 395        nf_ct_ext_free(tmpl);
 396
 397        if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK)
 398                kfree((char *)tmpl - tmpl->proto.tmpl_padto);
 399        else
 400                kfree(tmpl);
 401}
 402EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
 403
 404static void
 405destroy_conntrack(struct nf_conntrack *nfct)
 406{
 407        struct nf_conn *ct = (struct nf_conn *)nfct;
 408        struct nf_conntrack_l4proto *l4proto;
 409
 410        pr_debug("destroy_conntrack(%p)\n", ct);
 411        NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
 412
 413        if (unlikely(nf_ct_is_template(ct))) {
 414                nf_ct_tmpl_free(ct);
 415                return;
 416        }
 417        rcu_read_lock();
 418        l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
 419        if (l4proto->destroy)
 420                l4proto->destroy(ct);
 421
 422        rcu_read_unlock();
 423
 424        local_bh_disable();
 425        /* Expectations will have been removed in clean_from_lists,
 426         * except TFTP can create an expectation on the first packet,
 427         * before connection is in the list, so we need to clean here,
 428         * too.
 429         */
 430        nf_ct_remove_expectations(ct);
 431
 432        nf_ct_del_from_dying_or_unconfirmed_list(ct);
 433
 434        local_bh_enable();
 435
 436        if (ct->master)
 437                nf_ct_put(ct->master);
 438
 439        pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
 440        nf_conntrack_free(ct);
 441}
 442
 443static void nf_ct_delete_from_lists(struct nf_conn *ct)
 444{
 445        struct net *net = nf_ct_net(ct);
 446        unsigned int hash, reply_hash;
 447        unsigned int sequence;
 448
 449        nf_ct_helper_destroy(ct);
 450
 451        local_bh_disable();
 452        do {
 453                sequence = read_seqcount_begin(&nf_conntrack_generation);
 454                hash = hash_conntrack(net,
 455                                      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
 456                reply_hash = hash_conntrack(net,
 457                                           &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 458        } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
 459
 460        clean_from_lists(ct);
 461        nf_conntrack_double_unlock(hash, reply_hash);
 462
 463        nf_ct_add_to_dying_list(ct);
 464
 465        local_bh_enable();
 466}
 467
 468bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
 469{
 470        struct nf_conn_tstamp *tstamp;
 471
 472        if (test_and_set_bit(IPS_DYING_BIT, &ct->status))
 473                return false;
 474
 475        tstamp = nf_conn_tstamp_find(ct);
 476        if (tstamp && tstamp->stop == 0)
 477                tstamp->stop = ktime_get_real_ns();
 478
 479        if (nf_conntrack_event_report(IPCT_DESTROY, ct,
 480                                    portid, report) < 0) {
 481                /* destroy event was not delivered. nf_ct_put will
 482                 * be done by event cache worker on redelivery.
 483                 */
 484                nf_ct_delete_from_lists(ct);
 485                nf_conntrack_ecache_delayed_work(nf_ct_net(ct));
 486                return false;
 487        }
 488
 489        nf_conntrack_ecache_work(nf_ct_net(ct));
 490        nf_ct_delete_from_lists(ct);
 491        nf_ct_put(ct);
 492        return true;
 493}
 494EXPORT_SYMBOL_GPL(nf_ct_delete);
 495
 496static inline bool
 497nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
 498                const struct nf_conntrack_tuple *tuple,
 499                const struct nf_conntrack_zone *zone,
 500                const struct net *net)
 501{
 502        struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
 503
 504        /* A conntrack can be recreated with the equal tuple,
 505         * so we need to check that the conntrack is confirmed
 506         */
 507        return nf_ct_tuple_equal(tuple, &h->tuple) &&
 508               nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
 509               nf_ct_is_confirmed(ct) &&
 510               net_eq(net, nf_ct_net(ct));
 511}
 512
 513/* caller must hold rcu readlock and none of the nf_conntrack_locks */
 514static void nf_ct_gc_expired(struct nf_conn *ct)
 515{
 516        if (!atomic_inc_not_zero(&ct->ct_general.use))
 517                return;
 518
 519        if (nf_ct_should_gc(ct))
 520                nf_ct_kill(ct);
 521
 522        nf_ct_put(ct);
 523}
 524
 525/*
 526 * Warning :
 527 * - Caller must take a reference on returned object
 528 *   and recheck nf_ct_tuple_equal(tuple, &h->tuple)
 529 */
 530static struct nf_conntrack_tuple_hash *
 531____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
 532                      const struct nf_conntrack_tuple *tuple, u32 hash)
 533{
 534        struct nf_conntrack_tuple_hash *h;
 535        struct hlist_nulls_head *ct_hash;
 536        struct hlist_nulls_node *n;
 537        unsigned int bucket, hsize;
 538
 539begin:
 540        nf_conntrack_get_ht(&ct_hash, &hsize);
 541        bucket = reciprocal_scale(hash, hsize);
 542
 543        hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) {
 544                struct nf_conn *ct;
 545
 546                ct = nf_ct_tuplehash_to_ctrack(h);
 547                if (nf_ct_is_expired(ct)) {
 548                        nf_ct_gc_expired(ct);
 549                        continue;
 550                }
 551
 552                if (nf_ct_is_dying(ct))
 553                        continue;
 554
 555                if (nf_ct_key_equal(h, tuple, zone, net))
 556                        return h;
 557        }
 558        /*
 559         * if the nulls value we got at the end of this lookup is
 560         * not the expected one, we must restart lookup.
 561         * We probably met an item that was moved to another chain.
 562         */
 563        if (get_nulls_value(n) != bucket) {
 564                NF_CT_STAT_INC_ATOMIC(net, search_restart);
 565                goto begin;
 566        }
 567
 568        return NULL;
 569}
 570
 571/* Find a connection corresponding to a tuple. */
 572static struct nf_conntrack_tuple_hash *
 573__nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
 574                        const struct nf_conntrack_tuple *tuple, u32 hash)
 575{
 576        struct nf_conntrack_tuple_hash *h;
 577        struct nf_conn *ct;
 578
 579        rcu_read_lock();
 580begin:
 581        h = ____nf_conntrack_find(net, zone, tuple, hash);
 582        if (h) {
 583                ct = nf_ct_tuplehash_to_ctrack(h);
 584                if (unlikely(nf_ct_is_dying(ct) ||
 585                             !atomic_inc_not_zero(&ct->ct_general.use)))
 586                        h = NULL;
 587                else {
 588                        if (unlikely(!nf_ct_key_equal(h, tuple, zone, net))) {
 589                                nf_ct_put(ct);
 590                                goto begin;
 591                        }
 592                }
 593        }
 594        rcu_read_unlock();
 595
 596        return h;
 597}
 598
 599struct nf_conntrack_tuple_hash *
 600nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
 601                      const struct nf_conntrack_tuple *tuple)
 602{
 603        return __nf_conntrack_find_get(net, zone, tuple,
 604                                       hash_conntrack_raw(tuple, net));
 605}
 606EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
 607
 608static void __nf_conntrack_hash_insert(struct nf_conn *ct,
 609                                       unsigned int hash,
 610                                       unsigned int reply_hash)
 611{
 612        hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
 613                           &nf_conntrack_hash[hash]);
 614        hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
 615                           &nf_conntrack_hash[reply_hash]);
 616}
 617
 618int
 619nf_conntrack_hash_check_insert(struct nf_conn *ct)
 620{
 621        const struct nf_conntrack_zone *zone;
 622        struct net *net = nf_ct_net(ct);
 623        unsigned int hash, reply_hash;
 624        struct nf_conntrack_tuple_hash *h;
 625        struct hlist_nulls_node *n;
 626        unsigned int sequence;
 627
 628        zone = nf_ct_zone(ct);
 629
 630        local_bh_disable();
 631        do {
 632                sequence = read_seqcount_begin(&nf_conntrack_generation);
 633                hash = hash_conntrack(net,
 634                                      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
 635                reply_hash = hash_conntrack(net,
 636                                           &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 637        } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
 638
 639        /* See if there's one in the list already, including reverse */
 640        hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
 641                if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
 642                                    zone, net))
 643                        goto out;
 644
 645        hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
 646                if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
 647                                    zone, net))
 648                        goto out;
 649
 650        smp_wmb();
 651        /* The caller holds a reference to this object */
 652        atomic_set(&ct->ct_general.use, 2);
 653        __nf_conntrack_hash_insert(ct, hash, reply_hash);
 654        nf_conntrack_double_unlock(hash, reply_hash);
 655        NF_CT_STAT_INC(net, insert);
 656        local_bh_enable();
 657        return 0;
 658
 659out:
 660        nf_conntrack_double_unlock(hash, reply_hash);
 661        NF_CT_STAT_INC(net, insert_failed);
 662        local_bh_enable();
 663        return -EEXIST;
 664}
 665EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
 666
 667static inline void nf_ct_acct_update(struct nf_conn *ct,
 668                                     enum ip_conntrack_info ctinfo,
 669                                     unsigned int len)
 670{
 671        struct nf_conn_acct *acct;
 672
 673        acct = nf_conn_acct_find(ct);
 674        if (acct) {
 675                struct nf_conn_counter *counter = acct->counter;
 676
 677                atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets);
 678                atomic64_add(len, &counter[CTINFO2DIR(ctinfo)].bytes);
 679        }
 680}
 681
 682static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
 683                             const struct nf_conn *loser_ct)
 684{
 685        struct nf_conn_acct *acct;
 686
 687        acct = nf_conn_acct_find(loser_ct);
 688        if (acct) {
 689                struct nf_conn_counter *counter = acct->counter;
 690                unsigned int bytes;
 691
 692                /* u32 should be fine since we must have seen one packet. */
 693                bytes = atomic64_read(&counter[CTINFO2DIR(ctinfo)].bytes);
 694                nf_ct_acct_update(ct, ctinfo, bytes);
 695        }
 696}
 697
 698/* Resolve race on insertion if this protocol allows this. */
 699static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
 700                               enum ip_conntrack_info ctinfo,
 701                               struct nf_conntrack_tuple_hash *h)
 702{
 703        /* This is the conntrack entry already in hashes that won race. */
 704        struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
 705        struct nf_conntrack_l4proto *l4proto;
 706
 707        l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
 708        if (l4proto->allow_clash &&
 709            !nfct_nat(ct) &&
 710            !nf_ct_is_dying(ct) &&
 711            atomic_inc_not_zero(&ct->ct_general.use)) {
 712                enum ip_conntrack_info oldinfo;
 713                struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo);
 714
 715                nf_ct_acct_merge(ct, ctinfo, loser_ct);
 716                nf_conntrack_put(&loser_ct->ct_general);
 717                nf_ct_set(skb, ct, oldinfo);
 718                return NF_ACCEPT;
 719        }
 720        NF_CT_STAT_INC(net, drop);
 721        return NF_DROP;
 722}
 723
 724/* Confirm a connection given skb; places it in hash table */
 725int
 726__nf_conntrack_confirm(struct sk_buff *skb)
 727{
 728        const struct nf_conntrack_zone *zone;
 729        unsigned int hash, reply_hash;
 730        struct nf_conntrack_tuple_hash *h;
 731        struct nf_conn *ct;
 732        struct nf_conn_help *help;
 733        struct nf_conn_tstamp *tstamp;
 734        struct hlist_nulls_node *n;
 735        enum ip_conntrack_info ctinfo;
 736        struct net *net;
 737        unsigned int sequence;
 738        int ret = NF_DROP;
 739
 740        ct = nf_ct_get(skb, &ctinfo);
 741        net = nf_ct_net(ct);
 742
 743        /* ipt_REJECT uses nf_conntrack_attach to attach related
 744           ICMP/TCP RST packets in other direction.  Actual packet
 745           which created connection will be IP_CT_NEW or for an
 746           expected connection, IP_CT_RELATED. */
 747        if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
 748                return NF_ACCEPT;
 749
 750        zone = nf_ct_zone(ct);
 751        local_bh_disable();
 752
 753        do {
 754                sequence = read_seqcount_begin(&nf_conntrack_generation);
 755                /* reuse the hash saved before */
 756                hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
 757                hash = scale_hash(hash);
 758                reply_hash = hash_conntrack(net,
 759                                           &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 760
 761        } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
 762
 763        /* We're not in hash table, and we refuse to set up related
 764         * connections for unconfirmed conns.  But packet copies and
 765         * REJECT will give spurious warnings here.
 766         */
 767        /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
 768
 769        /* No external references means no one else could have
 770         * confirmed us.
 771         */
 772        NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
 773        pr_debug("Confirming conntrack %p\n", ct);
 774        /* We have to check the DYING flag after unlink to prevent
 775         * a race against nf_ct_get_next_corpse() possibly called from
 776         * user context, else we insert an already 'dead' hash, blocking
 777         * further use of that particular connection -JM.
 778         */
 779        nf_ct_del_from_dying_or_unconfirmed_list(ct);
 780
 781        if (unlikely(nf_ct_is_dying(ct))) {
 782                nf_ct_add_to_dying_list(ct);
 783                goto dying;
 784        }
 785
 786        /* See if there's one in the list already, including reverse:
 787           NAT could have grabbed it without realizing, since we're
 788           not in the hash.  If there is, we lost race. */
 789        hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
 790                if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
 791                                    zone, net))
 792                        goto out;
 793
 794        hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
 795                if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
 796                                    zone, net))
 797                        goto out;
 798
 799        /* Timer relative to confirmation time, not original
 800           setting time, otherwise we'd get timer wrap in
 801           weird delay cases. */
 802        ct->timeout += nfct_time_stamp;
 803        atomic_inc(&ct->ct_general.use);
 804        ct->status |= IPS_CONFIRMED;
 805
 806        /* set conntrack timestamp, if enabled. */
 807        tstamp = nf_conn_tstamp_find(ct);
 808        if (tstamp) {
 809                if (skb->tstamp == 0)
 810                        __net_timestamp(skb);
 811
 812                tstamp->start = ktime_to_ns(skb->tstamp);
 813        }
 814        /* Since the lookup is lockless, hash insertion must be done after
 815         * starting the timer and setting the CONFIRMED bit. The RCU barriers
 816         * guarantee that no other CPU can find the conntrack before the above
 817         * stores are visible.
 818         */
 819        __nf_conntrack_hash_insert(ct, hash, reply_hash);
 820        nf_conntrack_double_unlock(hash, reply_hash);
 821        local_bh_enable();
 822
 823        help = nfct_help(ct);
 824        if (help && help->helper)
 825                nf_conntrack_event_cache(IPCT_HELPER, ct);
 826
 827        nf_conntrack_event_cache(master_ct(ct) ?
 828                                 IPCT_RELATED : IPCT_NEW, ct);
 829        return NF_ACCEPT;
 830
 831out:
 832        nf_ct_add_to_dying_list(ct);
 833        ret = nf_ct_resolve_clash(net, skb, ctinfo, h);
 834dying:
 835        nf_conntrack_double_unlock(hash, reply_hash);
 836        NF_CT_STAT_INC(net, insert_failed);
 837        local_bh_enable();
 838        return ret;
 839}
 840EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
 841
 842/* Returns true if a connection correspondings to the tuple (required
 843   for NAT). */
 844int
 845nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
 846                         const struct nf_conn *ignored_conntrack)
 847{
 848        struct net *net = nf_ct_net(ignored_conntrack);
 849        const struct nf_conntrack_zone *zone;
 850        struct nf_conntrack_tuple_hash *h;
 851        struct hlist_nulls_head *ct_hash;
 852        unsigned int hash, hsize;
 853        struct hlist_nulls_node *n;
 854        struct nf_conn *ct;
 855
 856        zone = nf_ct_zone(ignored_conntrack);
 857
 858        rcu_read_lock();
 859 begin:
 860        nf_conntrack_get_ht(&ct_hash, &hsize);
 861        hash = __hash_conntrack(net, tuple, hsize);
 862
 863        hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {
 864                ct = nf_ct_tuplehash_to_ctrack(h);
 865
 866                if (ct == ignored_conntrack)
 867                        continue;
 868
 869                if (nf_ct_is_expired(ct)) {
 870                        nf_ct_gc_expired(ct);
 871                        continue;
 872                }
 873
 874                if (nf_ct_key_equal(h, tuple, zone, net)) {
 875                        NF_CT_STAT_INC_ATOMIC(net, found);
 876                        rcu_read_unlock();
 877                        return 1;
 878                }
 879        }
 880
 881        if (get_nulls_value(n) != hash) {
 882                NF_CT_STAT_INC_ATOMIC(net, search_restart);
 883                goto begin;
 884        }
 885
 886        rcu_read_unlock();
 887
 888        return 0;
 889}
 890EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
 891
 892#define NF_CT_EVICTION_RANGE    8
 893
 894/* There's a small race here where we may free a just-assured
 895   connection.  Too bad: we're in trouble anyway. */
 896static unsigned int early_drop_list(struct net *net,
 897                                    struct hlist_nulls_head *head)
 898{
 899        struct nf_conntrack_tuple_hash *h;
 900        struct hlist_nulls_node *n;
 901        unsigned int drops = 0;
 902        struct nf_conn *tmp;
 903
 904        hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) {
 905                tmp = nf_ct_tuplehash_to_ctrack(h);
 906
 907                if (nf_ct_is_expired(tmp)) {
 908                        nf_ct_gc_expired(tmp);
 909                        continue;
 910                }
 911
 912                if (test_bit(IPS_ASSURED_BIT, &tmp->status) ||
 913                    !net_eq(nf_ct_net(tmp), net) ||
 914                    nf_ct_is_dying(tmp))
 915                        continue;
 916
 917                if (!atomic_inc_not_zero(&tmp->ct_general.use))
 918                        continue;
 919
 920                /* kill only if still in same netns -- might have moved due to
 921                 * SLAB_DESTROY_BY_RCU rules.
 922                 *
 923                 * We steal the timer reference.  If that fails timer has
 924                 * already fired or someone else deleted it. Just drop ref
 925                 * and move to next entry.
 926                 */
 927                if (net_eq(nf_ct_net(tmp), net) &&
 928                    nf_ct_is_confirmed(tmp) &&
 929                    nf_ct_delete(tmp, 0, 0))
 930                        drops++;
 931
 932                nf_ct_put(tmp);
 933        }
 934
 935        return drops;
 936}
 937
 938static noinline int early_drop(struct net *net, unsigned int _hash)
 939{
 940        unsigned int i;
 941
 942        for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
 943                struct hlist_nulls_head *ct_hash;
 944                unsigned int hash, hsize, drops;
 945
 946                rcu_read_lock();
 947                nf_conntrack_get_ht(&ct_hash, &hsize);
 948                hash = reciprocal_scale(_hash++, hsize);
 949
 950                drops = early_drop_list(net, &ct_hash[hash]);
 951                rcu_read_unlock();
 952
 953                if (drops) {
 954                        NF_CT_STAT_ADD_ATOMIC(net, early_drop, drops);
 955                        return true;
 956                }
 957        }
 958
 959        return false;
 960}
 961
 962static void gc_worker(struct work_struct *work)
 963{
 964        unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
 965        unsigned int i, goal, buckets = 0, expired_count = 0;
 966        struct conntrack_gc_work *gc_work;
 967        unsigned int ratio, scanned = 0;
 968        unsigned long next_run;
 969
 970        gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
 971
 972        goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV;
 973        i = gc_work->last_bucket;
 974
 975        do {
 976                struct nf_conntrack_tuple_hash *h;
 977                struct hlist_nulls_head *ct_hash;
 978                struct hlist_nulls_node *n;
 979                unsigned int hashsz;
 980                struct nf_conn *tmp;
 981
 982                i++;
 983                rcu_read_lock();
 984
 985                nf_conntrack_get_ht(&ct_hash, &hashsz);
 986                if (i >= hashsz)
 987                        i = 0;
 988
 989                hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
 990                        tmp = nf_ct_tuplehash_to_ctrack(h);
 991
 992                        scanned++;
 993                        if (nf_ct_is_expired(tmp)) {
 994                                nf_ct_gc_expired(tmp);
 995                                expired_count++;
 996                                continue;
 997                        }
 998                }
 999
1000                /* could check get_nulls_value() here and restart if ct
1001                 * was moved to another chain.  But given gc is best-effort
1002                 * we will just continue with next hash slot.
1003                 */
1004                rcu_read_unlock();
1005                cond_resched_rcu_qs();
1006        } while (++buckets < goal);
1007
1008        if (gc_work->exiting)
1009                return;
1010
1011        /*
1012         * Eviction will normally happen from the packet path, and not
1013         * from this gc worker.
1014         *
1015         * This worker is only here to reap expired entries when system went
1016         * idle after a busy period.
1017         *
1018         * The heuristics below are supposed to balance conflicting goals:
1019         *
1020         * 1. Minimize time until we notice a stale entry
1021         * 2. Maximize scan intervals to not waste cycles
1022         *
1023         * Normally, expire ratio will be close to 0.
1024         *
1025         * As soon as a sizeable fraction of the entries have expired
1026         * increase scan frequency.
1027         */
1028        ratio = scanned ? expired_count * 100 / scanned : 0;
1029        if (ratio > GC_EVICT_RATIO) {
1030                gc_work->next_gc_run = min_interval;
1031        } else {
1032                unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV;
1033
1034                BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0);
1035
1036                gc_work->next_gc_run += min_interval;
1037                if (gc_work->next_gc_run > max)
1038                        gc_work->next_gc_run = max;
1039        }
1040
1041        next_run = gc_work->next_gc_run;
1042        gc_work->last_bucket = i;
1043        queue_delayed_work(system_long_wq, &gc_work->dwork, next_run);
1044}
1045
1046static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
1047{
1048        INIT_DELAYED_WORK(&gc_work->dwork, gc_worker);
1049        gc_work->next_gc_run = HZ;
1050        gc_work->exiting = false;
1051}
1052
1053static struct nf_conn *
1054__nf_conntrack_alloc(struct net *net,
1055                     const struct nf_conntrack_zone *zone,
1056                     const struct nf_conntrack_tuple *orig,
1057                     const struct nf_conntrack_tuple *repl,
1058                     gfp_t gfp, u32 hash)
1059{
1060        struct nf_conn *ct;
1061
1062        /* We don't want any race condition at early drop stage */
1063        atomic_inc(&net->ct.count);
1064
1065        if (nf_conntrack_max &&
1066            unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
1067                if (!early_drop(net, hash)) {
1068                        atomic_dec(&net->ct.count);
1069                        net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
1070                        return ERR_PTR(-ENOMEM);
1071                }
1072        }
1073
1074        /*
1075         * Do not use kmem_cache_zalloc(), as this cache uses
1076         * SLAB_DESTROY_BY_RCU.
1077         */
1078        ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
1079        if (ct == NULL)
1080                goto out;
1081
1082        spin_lock_init(&ct->lock);
1083        ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
1084        ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
1085        ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
1086        /* save hash for reusing when confirming */
1087        *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
1088        ct->status = 0;
1089        write_pnet(&ct->ct_net, net);
1090        memset(&ct->__nfct_init_offset[0], 0,
1091               offsetof(struct nf_conn, proto) -
1092               offsetof(struct nf_conn, __nfct_init_offset[0]));
1093
1094        nf_ct_zone_add(ct, zone);
1095
1096        /* Because we use RCU lookups, we set ct_general.use to zero before
1097         * this is inserted in any list.
1098         */
1099        atomic_set(&ct->ct_general.use, 0);
1100        return ct;
1101out:
1102        atomic_dec(&net->ct.count);
1103        return ERR_PTR(-ENOMEM);
1104}
1105
1106struct nf_conn *nf_conntrack_alloc(struct net *net,
1107                                   const struct nf_conntrack_zone *zone,
1108                                   const struct nf_conntrack_tuple *orig,
1109                                   const struct nf_conntrack_tuple *repl,
1110                                   gfp_t gfp)
1111{
1112        return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
1113}
1114EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
1115
1116void nf_conntrack_free(struct nf_conn *ct)
1117{
1118        struct net *net = nf_ct_net(ct);
1119
1120        /* A freed object has refcnt == 0, that's
1121         * the golden rule for SLAB_DESTROY_BY_RCU
1122         */
1123        NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 0);
1124
1125        nf_ct_ext_destroy(ct);
1126        nf_ct_ext_free(ct);
1127        kmem_cache_free(nf_conntrack_cachep, ct);
1128        smp_mb__before_atomic();
1129        atomic_dec(&net->ct.count);
1130}
1131EXPORT_SYMBOL_GPL(nf_conntrack_free);
1132
1133
1134/* Allocate a new conntrack: we return -ENOMEM if classification
1135   failed due to stress.  Otherwise it really is unclassifiable. */
1136static struct nf_conntrack_tuple_hash *
1137init_conntrack(struct net *net, struct nf_conn *tmpl,
1138               const struct nf_conntrack_tuple *tuple,
1139               struct nf_conntrack_l3proto *l3proto,
1140               struct nf_conntrack_l4proto *l4proto,
1141               struct sk_buff *skb,
1142               unsigned int dataoff, u32 hash)
1143{
1144        struct nf_conn *ct;
1145        struct nf_conn_help *help;
1146        struct nf_conntrack_tuple repl_tuple;
1147        struct nf_conntrack_ecache *ecache;
1148        struct nf_conntrack_expect *exp = NULL;
1149        const struct nf_conntrack_zone *zone;
1150        struct nf_conn_timeout *timeout_ext;
1151        struct nf_conntrack_zone tmp;
1152        unsigned int *timeouts;
1153
1154        if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
1155                pr_debug("Can't invert tuple.\n");
1156                return NULL;
1157        }
1158
1159        zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
1160        ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
1161                                  hash);
1162        if (IS_ERR(ct))
1163                return (struct nf_conntrack_tuple_hash *)ct;
1164
1165        if (!nf_ct_add_synproxy(ct, tmpl)) {
1166                nf_conntrack_free(ct);
1167                return ERR_PTR(-ENOMEM);
1168        }
1169
1170        timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
1171        if (timeout_ext) {
1172                timeouts = nf_ct_timeout_data(timeout_ext);
1173                if (unlikely(!timeouts))
1174                        timeouts = l4proto->get_timeouts(net);
1175        } else {
1176                timeouts = l4proto->get_timeouts(net);
1177        }
1178
1179        if (!l4proto->new(ct, skb, dataoff, timeouts)) {
1180                nf_conntrack_free(ct);
1181                pr_debug("can't track with proto module\n");
1182                return NULL;
1183        }
1184
1185        if (timeout_ext)
1186                nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout),
1187                                      GFP_ATOMIC);
1188
1189        nf_ct_acct_ext_add(ct, GFP_ATOMIC);
1190        nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
1191        nf_ct_labels_ext_add(ct);
1192
1193        ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
1194        nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
1195                                 ecache ? ecache->expmask : 0,
1196                             GFP_ATOMIC);
1197
1198        local_bh_disable();
1199        if (net->ct.expect_count) {
1200                spin_lock(&nf_conntrack_expect_lock);
1201                exp = nf_ct_find_expectation(net, zone, tuple);
1202                if (exp) {
1203                        pr_debug("expectation arrives ct=%p exp=%p\n",
1204                                 ct, exp);
1205                        /* Welcome, Mr. Bond.  We've been expecting you... */
1206                        __set_bit(IPS_EXPECTED_BIT, &ct->status);
1207                        /* exp->master safe, refcnt bumped in nf_ct_find_expectation */
1208                        ct->master = exp->master;
1209                        if (exp->helper) {
1210                                help = nf_ct_helper_ext_add(ct, exp->helper,
1211                                                            GFP_ATOMIC);
1212                                if (help)
1213                                        rcu_assign_pointer(help->helper, exp->helper);
1214                        }
1215
1216#ifdef CONFIG_NF_CONNTRACK_MARK
1217                        ct->mark = exp->master->mark;
1218#endif
1219#ifdef CONFIG_NF_CONNTRACK_SECMARK
1220                        ct->secmark = exp->master->secmark;
1221#endif
1222                        NF_CT_STAT_INC(net, expect_new);
1223                }
1224                spin_unlock(&nf_conntrack_expect_lock);
1225        }
1226        if (!exp)
1227                __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
1228
1229        /* Now it is inserted into the unconfirmed list, bump refcount */
1230        nf_conntrack_get(&ct->ct_general);
1231        nf_ct_add_to_unconfirmed_list(ct);
1232
1233        local_bh_enable();
1234
1235        if (exp) {
1236                if (exp->expectfn)
1237                        exp->expectfn(ct, exp);
1238                nf_ct_expect_put(exp);
1239        }
1240
1241        return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
1242}
1243
1244/* On success, returns conntrack ptr, sets skb->_nfct | ctinfo */
1245static inline struct nf_conn *
1246resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
1247                  struct sk_buff *skb,
1248                  unsigned int dataoff,
1249                  u_int16_t l3num,
1250                  u_int8_t protonum,
1251                  struct nf_conntrack_l3proto *l3proto,
1252                  struct nf_conntrack_l4proto *l4proto,
1253                  int *set_reply,
1254                  enum ip_conntrack_info *ctinfo)
1255{
1256        const struct nf_conntrack_zone *zone;
1257        struct nf_conntrack_tuple tuple;
1258        struct nf_conntrack_tuple_hash *h;
1259        struct nf_conntrack_zone tmp;
1260        struct nf_conn *ct;
1261        u32 hash;
1262
1263        if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
1264                             dataoff, l3num, protonum, net, &tuple, l3proto,
1265                             l4proto)) {
1266                pr_debug("Can't get tuple\n");
1267                return NULL;
1268        }
1269
1270        /* look for tuple match */
1271        zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
1272        hash = hash_conntrack_raw(&tuple, net);
1273        h = __nf_conntrack_find_get(net, zone, &tuple, hash);
1274        if (!h) {
1275                h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
1276                                   skb, dataoff, hash);
1277                if (!h)
1278                        return NULL;
1279                if (IS_ERR(h))
1280                        return (void *)h;
1281        }
1282        ct = nf_ct_tuplehash_to_ctrack(h);
1283
1284        /* It exists; we have (non-exclusive) reference. */
1285        if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
1286                *ctinfo = IP_CT_ESTABLISHED_REPLY;
1287                /* Please set reply bit if this packet OK */
1288                *set_reply = 1;
1289        } else {
1290                /* Once we've had two way comms, always ESTABLISHED. */
1291                if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
1292                        pr_debug("normal packet for %p\n", ct);
1293                        *ctinfo = IP_CT_ESTABLISHED;
1294                } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
1295                        pr_debug("related packet for %p\n", ct);
1296                        *ctinfo = IP_CT_RELATED;
1297                } else {
1298                        pr_debug("new packet for %p\n", ct);
1299                        *ctinfo = IP_CT_NEW;
1300                }
1301                *set_reply = 0;
1302        }
1303        nf_ct_set(skb, ct, *ctinfo);
1304        return ct;
1305}
1306
1307unsigned int
1308nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
1309                struct sk_buff *skb)
1310{
1311        struct nf_conn *ct, *tmpl;
1312        enum ip_conntrack_info ctinfo;
1313        struct nf_conntrack_l3proto *l3proto;
1314        struct nf_conntrack_l4proto *l4proto;
1315        unsigned int *timeouts;
1316        unsigned int dataoff;
1317        u_int8_t protonum;
1318        int set_reply = 0;
1319        int ret;
1320
1321        tmpl = nf_ct_get(skb, &ctinfo);
1322        if (tmpl) {
1323                /* Previously seen (loopback or untracked)?  Ignore. */
1324                if (!nf_ct_is_template(tmpl)) {
1325                        NF_CT_STAT_INC_ATOMIC(net, ignore);
1326                        return NF_ACCEPT;
1327                }
1328                skb->_nfct = 0;
1329        }
1330
1331        /* rcu_read_lock()ed by nf_hook_thresh */
1332        l3proto = __nf_ct_l3proto_find(pf);
1333        ret = l3proto->get_l4proto(skb, skb_network_offset(skb),
1334                                   &dataoff, &protonum);
1335        if (ret <= 0) {
1336                pr_debug("not prepared to track yet or error occurred\n");
1337                NF_CT_STAT_INC_ATOMIC(net, error);
1338                NF_CT_STAT_INC_ATOMIC(net, invalid);
1339                ret = -ret;
1340                goto out;
1341        }
1342
1343        l4proto = __nf_ct_l4proto_find(pf, protonum);
1344
1345        /* It may be an special packet, error, unclean...
1346         * inverse of the return code tells to the netfilter
1347         * core what to do with the packet. */
1348        if (l4proto->error != NULL) {
1349                ret = l4proto->error(net, tmpl, skb, dataoff, pf, hooknum);
1350                if (ret <= 0) {
1351                        NF_CT_STAT_INC_ATOMIC(net, error);
1352                        NF_CT_STAT_INC_ATOMIC(net, invalid);
1353                        ret = -ret;
1354                        goto out;
1355                }
1356                /* ICMP[v6] protocol trackers may assign one conntrack. */
1357                if (skb->_nfct)
1358                        goto out;
1359        }
1360repeat:
1361        ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
1362                               l3proto, l4proto, &set_reply, &ctinfo);
1363        if (!ct) {
1364                /* Not valid part of a connection */
1365                NF_CT_STAT_INC_ATOMIC(net, invalid);
1366                ret = NF_ACCEPT;
1367                goto out;
1368        }
1369
1370        if (IS_ERR(ct)) {
1371                /* Too stressed to deal. */
1372                NF_CT_STAT_INC_ATOMIC(net, drop);
1373                ret = NF_DROP;
1374                goto out;
1375        }
1376
1377        NF_CT_ASSERT(skb_nfct(skb));
1378
1379        /* Decide what timeout policy we want to apply to this flow. */
1380        timeouts = nf_ct_timeout_lookup(net, ct, l4proto);
1381
1382        ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum, timeouts);
1383        if (ret <= 0) {
1384                /* Invalid: inverse of the return code tells
1385                 * the netfilter core what to do */
1386                pr_debug("nf_conntrack_in: Can't track with proto module\n");
1387                nf_conntrack_put(&ct->ct_general);
1388                skb->_nfct = 0;
1389                NF_CT_STAT_INC_ATOMIC(net, invalid);
1390                if (ret == -NF_DROP)
1391                        NF_CT_STAT_INC_ATOMIC(net, drop);
1392                /* Special case: TCP tracker reports an attempt to reopen a
1393                 * closed/aborted connection. We have to go back and create a
1394                 * fresh conntrack.
1395                 */
1396                if (ret == -NF_REPEAT)
1397                        goto repeat;
1398                ret = -ret;
1399                goto out;
1400        }
1401
1402        if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
1403                nf_conntrack_event_cache(IPCT_REPLY, ct);
1404out:
1405        if (tmpl)
1406                nf_ct_put(tmpl);
1407
1408        return ret;
1409}
1410EXPORT_SYMBOL_GPL(nf_conntrack_in);
1411
1412bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
1413                          const struct nf_conntrack_tuple *orig)
1414{
1415        bool ret;
1416
1417        rcu_read_lock();
1418        ret = nf_ct_invert_tuple(inverse, orig,
1419                                 __nf_ct_l3proto_find(orig->src.l3num),
1420                                 __nf_ct_l4proto_find(orig->src.l3num,
1421                                                      orig->dst.protonum));
1422        rcu_read_unlock();
1423        return ret;
1424}
1425EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
1426
1427/* Alter reply tuple (maybe alter helper).  This is for NAT, and is
1428   implicitly racy: see __nf_conntrack_confirm */
1429void nf_conntrack_alter_reply(struct nf_conn *ct,
1430                              const struct nf_conntrack_tuple *newreply)
1431{
1432        struct nf_conn_help *help = nfct_help(ct);
1433
1434        /* Should be unconfirmed, so not in hash table yet */
1435        NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
1436
1437        pr_debug("Altering reply tuple of %p to ", ct);
1438        nf_ct_dump_tuple(newreply);
1439
1440        ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
1441        if (ct->master || (help && !hlist_empty(&help->expectations)))
1442                return;
1443
1444        rcu_read_lock();
1445        __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
1446        rcu_read_unlock();
1447}
1448EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
1449
1450/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
1451void __nf_ct_refresh_acct(struct nf_conn *ct,
1452                          enum ip_conntrack_info ctinfo,
1453                          const struct sk_buff *skb,
1454                          unsigned long extra_jiffies,
1455                          int do_acct)
1456{
1457        NF_CT_ASSERT(skb);
1458
1459        /* Only update if this is not a fixed timeout */
1460        if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
1461                goto acct;
1462
1463        /* If not in hash table, timer will not be active yet */
1464        if (nf_ct_is_confirmed(ct))
1465                extra_jiffies += nfct_time_stamp;
1466
1467        ct->timeout = extra_jiffies;
1468acct:
1469        if (do_acct)
1470                nf_ct_acct_update(ct, ctinfo, skb->len);
1471}
1472EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
1473
1474bool nf_ct_kill_acct(struct nf_conn *ct,
1475                     enum ip_conntrack_info ctinfo,
1476                     const struct sk_buff *skb)
1477{
1478        nf_ct_acct_update(ct, ctinfo, skb->len);
1479
1480        return nf_ct_delete(ct, 0, 0);
1481}
1482EXPORT_SYMBOL_GPL(nf_ct_kill_acct);
1483
1484#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1485
1486#include <linux/netfilter/nfnetlink.h>
1487#include <linux/netfilter/nfnetlink_conntrack.h>
1488#include <linux/mutex.h>
1489
1490/* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
1491 * in ip_conntrack_core, since we don't want the protocols to autoload
1492 * or depend on ctnetlink */
1493int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
1494                               const struct nf_conntrack_tuple *tuple)
1495{
1496        if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
1497            nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
1498                goto nla_put_failure;
1499        return 0;
1500
1501nla_put_failure:
1502        return -1;
1503}
1504EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
1505
1506const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
1507        [CTA_PROTO_SRC_PORT]  = { .type = NLA_U16 },
1508        [CTA_PROTO_DST_PORT]  = { .type = NLA_U16 },
1509};
1510EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
1511
1512int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
1513                               struct nf_conntrack_tuple *t)
1514{
1515        if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
1516                return -EINVAL;
1517
1518        t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
1519        t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
1520
1521        return 0;
1522}
1523EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
1524
1525int nf_ct_port_nlattr_tuple_size(void)
1526{
1527        return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1528}
1529EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
1530#endif
1531
1532/* Used by ipt_REJECT and ip6t_REJECT. */
1533static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
1534{
1535        struct nf_conn *ct;
1536        enum ip_conntrack_info ctinfo;
1537
1538        /* This ICMP is in reverse direction to the packet which caused it */
1539        ct = nf_ct_get(skb, &ctinfo);
1540        if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
1541                ctinfo = IP_CT_RELATED_REPLY;
1542        else
1543                ctinfo = IP_CT_RELATED;
1544
1545        /* Attach to new skbuff, and increment count */
1546        nf_ct_set(nskb, ct, ctinfo);
1547        nf_conntrack_get(skb_nfct(nskb));
1548}
1549
1550/* Bring out ya dead! */
1551static struct nf_conn *
1552get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1553                void *data, unsigned int *bucket)
1554{
1555        struct nf_conntrack_tuple_hash *h;
1556        struct nf_conn *ct;
1557        struct hlist_nulls_node *n;
1558        int cpu;
1559        spinlock_t *lockp;
1560
1561        for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
1562                lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
1563                local_bh_disable();
1564                nf_conntrack_lock(lockp);
1565                if (*bucket < nf_conntrack_htable_size) {
1566                        hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) {
1567                                if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
1568                                        continue;
1569                                ct = nf_ct_tuplehash_to_ctrack(h);
1570                                if (net_eq(nf_ct_net(ct), net) &&
1571                                    iter(ct, data))
1572                                        goto found;
1573                        }
1574                }
1575                spin_unlock(lockp);
1576                local_bh_enable();
1577                cond_resched();
1578        }
1579
1580        for_each_possible_cpu(cpu) {
1581                struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1582
1583                spin_lock_bh(&pcpu->lock);
1584                hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
1585                        ct = nf_ct_tuplehash_to_ctrack(h);
1586                        if (iter(ct, data))
1587                                set_bit(IPS_DYING_BIT, &ct->status);
1588                }
1589                spin_unlock_bh(&pcpu->lock);
1590                cond_resched();
1591        }
1592        return NULL;
1593found:
1594        atomic_inc(&ct->ct_general.use);
1595        spin_unlock(lockp);
1596        local_bh_enable();
1597        return ct;
1598}
1599
1600void nf_ct_iterate_cleanup(struct net *net,
1601                           int (*iter)(struct nf_conn *i, void *data),
1602                           void *data, u32 portid, int report)
1603{
1604        struct nf_conn *ct;
1605        unsigned int bucket = 0;
1606
1607        might_sleep();
1608
1609        if (atomic_read(&net->ct.count) == 0)
1610                return;
1611
1612        while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
1613                /* Time to push up daises... */
1614
1615                nf_ct_delete(ct, portid, report);
1616                nf_ct_put(ct);
1617                cond_resched();
1618        }
1619}
1620EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
1621
1622static int kill_all(struct nf_conn *i, void *data)
1623{
1624        return 1;
1625}
1626
1627void nf_ct_free_hashtable(void *hash, unsigned int size)
1628{
1629        if (is_vmalloc_addr(hash))
1630                vfree(hash);
1631        else
1632                free_pages((unsigned long)hash,
1633                           get_order(sizeof(struct hlist_head) * size));
1634}
1635EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
1636
1637static int untrack_refs(void)
1638{
1639        int cnt = 0, cpu;
1640
1641        for_each_possible_cpu(cpu) {
1642                struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1643
1644                cnt += atomic_read(&ct->ct_general.use) - 1;
1645        }
1646        return cnt;
1647}
1648
1649void nf_conntrack_cleanup_start(void)
1650{
1651        conntrack_gc_work.exiting = true;
1652        RCU_INIT_POINTER(ip_ct_attach, NULL);
1653}
1654
1655void nf_conntrack_cleanup_end(void)
1656{
1657        RCU_INIT_POINTER(nf_ct_destroy, NULL);
1658        while (untrack_refs() > 0)
1659                schedule();
1660
1661        cancel_delayed_work_sync(&conntrack_gc_work.dwork);
1662        nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size);
1663
1664        nf_conntrack_proto_fini();
1665        nf_conntrack_seqadj_fini();
1666        nf_conntrack_labels_fini();
1667        nf_conntrack_helper_fini();
1668        nf_conntrack_timeout_fini();
1669        nf_conntrack_ecache_fini();
1670        nf_conntrack_tstamp_fini();
1671        nf_conntrack_acct_fini();
1672        nf_conntrack_expect_fini();
1673
1674        kmem_cache_destroy(nf_conntrack_cachep);
1675}
1676
1677/*
1678 * Mishearing the voices in his head, our hero wonders how he's
1679 * supposed to kill the mall.
1680 */
1681void nf_conntrack_cleanup_net(struct net *net)
1682{
1683        LIST_HEAD(single);
1684
1685        list_add(&net->exit_list, &single);
1686        nf_conntrack_cleanup_net_list(&single);
1687}
1688
1689void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
1690{
1691        int busy;
1692        struct net *net;
1693
1694        /*
1695         * This makes sure all current packets have passed through
1696         *  netfilter framework.  Roll on, two-stage module
1697         *  delete...
1698         */
1699        synchronize_net();
1700i_see_dead_people:
1701        busy = 0;
1702        list_for_each_entry(net, net_exit_list, exit_list) {
1703                nf_ct_iterate_cleanup(net, kill_all, NULL, 0, 0);
1704                if (atomic_read(&net->ct.count) != 0)
1705                        busy = 1;
1706        }
1707        if (busy) {
1708                schedule();
1709                goto i_see_dead_people;
1710        }
1711
1712        list_for_each_entry(net, net_exit_list, exit_list) {
1713                nf_conntrack_proto_pernet_fini(net);
1714                nf_conntrack_helper_pernet_fini(net);
1715                nf_conntrack_ecache_pernet_fini(net);
1716                nf_conntrack_tstamp_pernet_fini(net);
1717                nf_conntrack_acct_pernet_fini(net);
1718                nf_conntrack_expect_pernet_fini(net);
1719                free_percpu(net->ct.stat);
1720                free_percpu(net->ct.pcpu_lists);
1721        }
1722}
1723
1724void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
1725{
1726        struct hlist_nulls_head *hash;
1727        unsigned int nr_slots, i;
1728        size_t sz;
1729
1730        if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
1731                return NULL;
1732
1733        BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
1734        nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
1735
1736        if (nr_slots > (UINT_MAX / sizeof(struct hlist_nulls_head)))
1737                return NULL;
1738
1739        sz = nr_slots * sizeof(struct hlist_nulls_head);
1740        hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1741                                        get_order(sz));
1742        if (!hash)
1743                hash = vzalloc(sz);
1744
1745        if (hash && nulls)
1746                for (i = 0; i < nr_slots; i++)
1747                        INIT_HLIST_NULLS_HEAD(&hash[i], i);
1748
1749        return hash;
1750}
1751EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
1752
1753int nf_conntrack_hash_resize(unsigned int hashsize)
1754{
1755        int i, bucket;
1756        unsigned int old_size;
1757        struct hlist_nulls_head *hash, *old_hash;
1758        struct nf_conntrack_tuple_hash *h;
1759        struct nf_conn *ct;
1760
1761        if (!hashsize)
1762                return -EINVAL;
1763
1764        hash = nf_ct_alloc_hashtable(&hashsize, 1);
1765        if (!hash)
1766                return -ENOMEM;
1767
1768        old_size = nf_conntrack_htable_size;
1769        if (old_size == hashsize) {
1770                nf_ct_free_hashtable(hash, hashsize);
1771                return 0;
1772        }
1773
1774        local_bh_disable();
1775        nf_conntrack_all_lock();
1776        write_seqcount_begin(&nf_conntrack_generation);
1777
1778        /* Lookups in the old hash might happen in parallel, which means we
1779         * might get false negatives during connection lookup. New connections
1780         * created because of a false negative won't make it into the hash
1781         * though since that required taking the locks.
1782         */
1783
1784        for (i = 0; i < nf_conntrack_htable_size; i++) {
1785                while (!hlist_nulls_empty(&nf_conntrack_hash[i])) {
1786                        h = hlist_nulls_entry(nf_conntrack_hash[i].first,
1787                                              struct nf_conntrack_tuple_hash, hnnode);
1788                        ct = nf_ct_tuplehash_to_ctrack(h);
1789                        hlist_nulls_del_rcu(&h->hnnode);
1790                        bucket = __hash_conntrack(nf_ct_net(ct),
1791                                                  &h->tuple, hashsize);
1792                        hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
1793                }
1794        }
1795        old_size = nf_conntrack_htable_size;
1796        old_hash = nf_conntrack_hash;
1797
1798        nf_conntrack_hash = hash;
1799        nf_conntrack_htable_size = hashsize;
1800
1801        write_seqcount_end(&nf_conntrack_generation);
1802        nf_conntrack_all_unlock();
1803        local_bh_enable();
1804
1805        synchronize_net();
1806        nf_ct_free_hashtable(old_hash, old_size);
1807        return 0;
1808}
1809
1810int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1811{
1812        unsigned int hashsize;
1813        int rc;
1814
1815        if (current->nsproxy->net_ns != &init_net)
1816                return -EOPNOTSUPP;
1817
1818        /* On boot, we can set this without any fancy locking. */
1819        if (!nf_conntrack_htable_size)
1820                return param_set_uint(val, kp);
1821
1822        rc = kstrtouint(val, 0, &hashsize);
1823        if (rc)
1824                return rc;
1825
1826        return nf_conntrack_hash_resize(hashsize);
1827}
1828EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
1829
1830module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
1831                  &nf_conntrack_htable_size, 0600);
1832
1833void nf_ct_untracked_status_or(unsigned long bits)
1834{
1835        int cpu;
1836
1837        for_each_possible_cpu(cpu)
1838                per_cpu(nf_conntrack_untracked, cpu).status |= bits;
1839}
1840EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or);
1841
1842int nf_conntrack_init_start(void)
1843{
1844        int max_factor = 8;
1845        int ret = -ENOMEM;
1846        int i, cpu;
1847
1848        seqcount_init(&nf_conntrack_generation);
1849
1850        for (i = 0; i < CONNTRACK_LOCKS; i++)
1851                spin_lock_init(&nf_conntrack_locks[i]);
1852
1853        if (!nf_conntrack_htable_size) {
1854                /* Idea from tcp.c: use 1/16384 of memory.
1855                 * On i386: 32MB machine has 512 buckets.
1856                 * >= 1GB machines have 16384 buckets.
1857                 * >= 4GB machines have 65536 buckets.
1858                 */
1859                nf_conntrack_htable_size
1860                        = (((totalram_pages << PAGE_SHIFT) / 16384)
1861                           / sizeof(struct hlist_head));
1862                if (totalram_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
1863                        nf_conntrack_htable_size = 65536;
1864                else if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
1865                        nf_conntrack_htable_size = 16384;
1866                if (nf_conntrack_htable_size < 32)
1867                        nf_conntrack_htable_size = 32;
1868
1869                /* Use a max. factor of four by default to get the same max as
1870                 * with the old struct list_heads. When a table size is given
1871                 * we use the old value of 8 to avoid reducing the max.
1872                 * entries. */
1873                max_factor = 4;
1874        }
1875
1876        nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1);
1877        if (!nf_conntrack_hash)
1878                return -ENOMEM;
1879
1880        nf_conntrack_max = max_factor * nf_conntrack_htable_size;
1881
1882        nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
1883                                                sizeof(struct nf_conn),
1884                                                NFCT_INFOMASK + 1,
1885                                                SLAB_DESTROY_BY_RCU | SLAB_HWCACHE_ALIGN, NULL);
1886        if (!nf_conntrack_cachep)
1887                goto err_cachep;
1888
1889        printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n",
1890               NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1891               nf_conntrack_max);
1892
1893        ret = nf_conntrack_expect_init();
1894        if (ret < 0)
1895                goto err_expect;
1896
1897        ret = nf_conntrack_acct_init();
1898        if (ret < 0)
1899                goto err_acct;
1900
1901        ret = nf_conntrack_tstamp_init();
1902        if (ret < 0)
1903                goto err_tstamp;
1904
1905        ret = nf_conntrack_ecache_init();
1906        if (ret < 0)
1907                goto err_ecache;
1908
1909        ret = nf_conntrack_timeout_init();
1910        if (ret < 0)
1911                goto err_timeout;
1912
1913        ret = nf_conntrack_helper_init();
1914        if (ret < 0)
1915                goto err_helper;
1916
1917        ret = nf_conntrack_labels_init();
1918        if (ret < 0)
1919                goto err_labels;
1920
1921        ret = nf_conntrack_seqadj_init();
1922        if (ret < 0)
1923                goto err_seqadj;
1924
1925        ret = nf_conntrack_proto_init();
1926        if (ret < 0)
1927                goto err_proto;
1928
1929        /* Set up fake conntrack: to never be deleted, not in any hashes */
1930        for_each_possible_cpu(cpu) {
1931                struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1932                write_pnet(&ct->ct_net, &init_net);
1933                atomic_set(&ct->ct_general.use, 1);
1934        }
1935        /*  - and look it like as a confirmed connection */
1936        nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
1937
1938        conntrack_gc_work_init(&conntrack_gc_work);
1939        queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, HZ);
1940
1941        return 0;
1942
1943err_proto:
1944        nf_conntrack_seqadj_fini();
1945err_seqadj:
1946        nf_conntrack_labels_fini();
1947err_labels:
1948        nf_conntrack_helper_fini();
1949err_helper:
1950        nf_conntrack_timeout_fini();
1951err_timeout:
1952        nf_conntrack_ecache_fini();
1953err_ecache:
1954        nf_conntrack_tstamp_fini();
1955err_tstamp:
1956        nf_conntrack_acct_fini();
1957err_acct:
1958        nf_conntrack_expect_fini();
1959err_expect:
1960        kmem_cache_destroy(nf_conntrack_cachep);
1961err_cachep:
1962        nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size);
1963        return ret;
1964}
1965
1966void nf_conntrack_init_end(void)
1967{
1968        /* For use by REJECT target */
1969        RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
1970        RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack);
1971}
1972
1973/*
1974 * We need to use special "null" values, not used in hash table
1975 */
1976#define UNCONFIRMED_NULLS_VAL   ((1<<30)+0)
1977#define DYING_NULLS_VAL         ((1<<30)+1)
1978#define TEMPLATE_NULLS_VAL      ((1<<30)+2)
1979
1980int nf_conntrack_init_net(struct net *net)
1981{
1982        int ret = -ENOMEM;
1983        int cpu;
1984
1985        atomic_set(&net->ct.count, 0);
1986
1987        net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
1988        if (!net->ct.pcpu_lists)
1989                goto err_stat;
1990
1991        for_each_possible_cpu(cpu) {
1992                struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1993
1994                spin_lock_init(&pcpu->lock);
1995                INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
1996                INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
1997        }
1998
1999        net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
2000        if (!net->ct.stat)
2001                goto err_pcpu_lists;
2002
2003        ret = nf_conntrack_expect_pernet_init(net);
2004        if (ret < 0)
2005                goto err_expect;
2006        ret = nf_conntrack_acct_pernet_init(net);
2007        if (ret < 0)
2008                goto err_acct;
2009        ret = nf_conntrack_tstamp_pernet_init(net);
2010        if (ret < 0)
2011                goto err_tstamp;
2012        ret = nf_conntrack_ecache_pernet_init(net);
2013        if (ret < 0)
2014                goto err_ecache;
2015        ret = nf_conntrack_helper_pernet_init(net);
2016        if (ret < 0)
2017                goto err_helper;
2018        ret = nf_conntrack_proto_pernet_init(net);
2019        if (ret < 0)
2020                goto err_proto;
2021        return 0;
2022
2023err_proto:
2024        nf_conntrack_helper_pernet_fini(net);
2025err_helper:
2026        nf_conntrack_ecache_pernet_fini(net);
2027err_ecache:
2028        nf_conntrack_tstamp_pernet_fini(net);
2029err_tstamp:
2030        nf_conntrack_acct_pernet_fini(net);
2031err_acct:
2032        nf_conntrack_expect_pernet_fini(net);
2033err_expect:
2034        free_percpu(net->ct.stat);
2035err_pcpu_lists:
2036        free_percpu(net->ct.pcpu_lists);
2037err_stat:
2038        return ret;
2039}
2040