linux/include/net/netfilter/nf_conntrack.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Connection state tracking for netfilter.  This is separated from,
   4 * but required by, the (future) NAT layer; it can also be used by an iptables
   5 * extension.
   6 *
   7 * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
   8 *      - generalize L3 protocol dependent part.
   9 *
  10 * Derived from include/linux/netfiter_ipv4/ip_conntrack.h
  11 */
  12
  13#ifndef _NF_CONNTRACK_H
  14#define _NF_CONNTRACK_H
  15
  16#include <linux/bitops.h>
  17#include <linux/compiler.h>
  18
  19#include <linux/netfilter/nf_conntrack_common.h>
  20#include <linux/netfilter/nf_conntrack_tcp.h>
  21#include <linux/netfilter/nf_conntrack_dccp.h>
  22#include <linux/netfilter/nf_conntrack_sctp.h>
  23#include <linux/netfilter/nf_conntrack_proto_gre.h>
  24
  25#include <net/netfilter/nf_conntrack_tuple.h>
  26
  27struct nf_ct_udp {
  28        unsigned long   stream_ts;
  29};
  30
  31/* per conntrack: protocol private data */
  32union nf_conntrack_proto {
  33        /* insert conntrack proto private data here */
  34        struct nf_ct_dccp dccp;
  35        struct ip_ct_sctp sctp;
  36        struct ip_ct_tcp tcp;
  37        struct nf_ct_udp udp;
  38        struct nf_ct_gre gre;
  39        unsigned int tmpl_padto;
  40};
  41
  42union nf_conntrack_expect_proto {
  43        /* insert expect proto private data here */
  44};
  45
  46struct nf_conntrack_net {
  47        /* only used when new connection is allocated: */
  48        atomic_t count;
  49        unsigned int expect_count;
  50        u8 sysctl_auto_assign_helper;
  51        bool auto_assign_helper_warned;
  52
  53        /* only used from work queues, configuration plane, and so on: */
  54        unsigned int users4;
  55        unsigned int users6;
  56        unsigned int users_bridge;
  57#ifdef CONFIG_SYSCTL
  58        struct ctl_table_header *sysctl_header;
  59#endif
  60#ifdef CONFIG_NF_CONNTRACK_EVENTS
  61        struct delayed_work ecache_dwork;
  62        struct netns_ct *ct_net;
  63#endif
  64};
  65
  66#include <linux/types.h>
  67#include <linux/skbuff.h>
  68
  69#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
  70#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
  71
  72struct nf_conn {
  73        /* Usage count in here is 1 for hash table, 1 per skb,
  74         * plus 1 for any connection(s) we are `master' for
  75         *
  76         * Hint, SKB address this struct and refcnt via skb->_nfct and
  77         * helpers nf_conntrack_get() and nf_conntrack_put().
  78         * Helper nf_ct_put() equals nf_conntrack_put() by dec refcnt,
  79         * beware nf_ct_get() is different and don't inc refcnt.
  80         */
  81        struct nf_conntrack ct_general;
  82
  83        spinlock_t      lock;
  84        /* jiffies32 when this ct is considered dead */
  85        u32 timeout;
  86
  87#ifdef CONFIG_NF_CONNTRACK_ZONES
  88        struct nf_conntrack_zone zone;
  89#endif
  90        /* XXX should I move this to the tail ? - Y.K */
  91        /* These are my tuples; original and reply */
  92        struct nf_conntrack_tuple_hash tuplehash[IP_CT_DIR_MAX];
  93
  94        /* Have we seen traffic both ways yet? (bitset) */
  95        unsigned long status;
  96
  97        u16             cpu;
  98        possible_net_t ct_net;
  99
 100#if IS_ENABLED(CONFIG_NF_NAT)
 101        struct hlist_node       nat_bysource;
 102#endif
 103        /* all members below initialized via memset */
 104        struct { } __nfct_init_offset;
 105
 106        /* If we were expected by an expectation, this will be it */
 107        struct nf_conn *master;
 108
 109#if defined(CONFIG_NF_CONNTRACK_MARK)
 110        u_int32_t mark;
 111#endif
 112
 113#ifdef CONFIG_NF_CONNTRACK_SECMARK
 114        u_int32_t secmark;
 115#endif
 116
 117        /* Extensions */
 118        struct nf_ct_ext *ext;
 119
 120        /* Storage reserved for other modules, must be the last member */
 121        union nf_conntrack_proto proto;
 122};
 123
 124static inline struct nf_conn *
 125nf_ct_tuplehash_to_ctrack(const struct nf_conntrack_tuple_hash *hash)
 126{
 127        return container_of(hash, struct nf_conn,
 128                            tuplehash[hash->tuple.dst.dir]);
 129}
 130
 131static inline u_int16_t nf_ct_l3num(const struct nf_conn *ct)
 132{
 133        return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
 134}
 135
 136static inline u_int8_t nf_ct_protonum(const struct nf_conn *ct)
 137{
 138        return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum;
 139}
 140
 141#define nf_ct_tuple(ct, dir) (&(ct)->tuplehash[dir].tuple)
 142
 143/* get master conntrack via master expectation */
 144#define master_ct(conntr) (conntr->master)
 145
 146extern struct net init_net;
 147
 148static inline struct net *nf_ct_net(const struct nf_conn *ct)
 149{
 150        return read_pnet(&ct->ct_net);
 151}
 152
 153/* Alter reply tuple (maybe alter helper). */
 154void nf_conntrack_alter_reply(struct nf_conn *ct,
 155                              const struct nf_conntrack_tuple *newreply);
 156
 157/* Is this tuple taken? (ignoring any belonging to the given
 158   conntrack). */
 159int nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
 160                             const struct nf_conn *ignored_conntrack);
 161
 162/* Return conntrack_info and tuple hash for given skb. */
 163static inline struct nf_conn *
 164nf_ct_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
 165{
 166        unsigned long nfct = skb_get_nfct(skb);
 167
 168        *ctinfo = nfct & NFCT_INFOMASK;
 169        return (struct nf_conn *)(nfct & NFCT_PTRMASK);
 170}
 171
 172/* decrement reference count on a conntrack */
 173static inline void nf_ct_put(struct nf_conn *ct)
 174{
 175        WARN_ON(!ct);
 176        nf_conntrack_put(&ct->ct_general);
 177}
 178
 179/* Protocol module loading */
 180int nf_ct_l3proto_try_module_get(unsigned short l3proto);
 181void nf_ct_l3proto_module_put(unsigned short l3proto);
 182
 183/* load module; enable/disable conntrack in this namespace */
 184int nf_ct_netns_get(struct net *net, u8 nfproto);
 185void nf_ct_netns_put(struct net *net, u8 nfproto);
 186
 187/*
 188 * Allocate a hashtable of hlist_head (if nulls == 0),
 189 * or hlist_nulls_head (if nulls == 1)
 190 */
 191void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls);
 192
 193int nf_conntrack_hash_check_insert(struct nf_conn *ct);
 194bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report);
 195
 196bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
 197                       u_int16_t l3num, struct net *net,
 198                       struct nf_conntrack_tuple *tuple);
 199
 200void __nf_ct_refresh_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
 201                          const struct sk_buff *skb,
 202                          u32 extra_jiffies, bool do_acct);
 203
 204/* Refresh conntrack for this many jiffies and do accounting */
 205static inline void nf_ct_refresh_acct(struct nf_conn *ct,
 206                                      enum ip_conntrack_info ctinfo,
 207                                      const struct sk_buff *skb,
 208                                      u32 extra_jiffies)
 209{
 210        __nf_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies, true);
 211}
 212
 213/* Refresh conntrack for this many jiffies */
 214static inline void nf_ct_refresh(struct nf_conn *ct,
 215                                 const struct sk_buff *skb,
 216                                 u32 extra_jiffies)
 217{
 218        __nf_ct_refresh_acct(ct, 0, skb, extra_jiffies, false);
 219}
 220
 221/* kill conntrack and do accounting */
 222bool nf_ct_kill_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
 223                     const struct sk_buff *skb);
 224
 225/* kill conntrack without accounting */
 226static inline bool nf_ct_kill(struct nf_conn *ct)
 227{
 228        return nf_ct_delete(ct, 0, 0);
 229}
 230
 231/* Set all unconfirmed conntrack as dying */
 232void nf_ct_unconfirmed_destroy(struct net *);
 233
 234/* Iterate over all conntracks: if iter returns true, it's deleted. */
 235void nf_ct_iterate_cleanup_net(struct net *net,
 236                               int (*iter)(struct nf_conn *i, void *data),
 237                               void *data, u32 portid, int report);
 238
 239/* also set unconfirmed conntracks as dying. Only use in module exit path. */
 240void nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data),
 241                           void *data);
 242
 243struct nf_conntrack_zone;
 244
 245void nf_conntrack_free(struct nf_conn *ct);
 246struct nf_conn *nf_conntrack_alloc(struct net *net,
 247                                   const struct nf_conntrack_zone *zone,
 248                                   const struct nf_conntrack_tuple *orig,
 249                                   const struct nf_conntrack_tuple *repl,
 250                                   gfp_t gfp);
 251
 252static inline int nf_ct_is_template(const struct nf_conn *ct)
 253{
 254        return test_bit(IPS_TEMPLATE_BIT, &ct->status);
 255}
 256
 257/* It's confirmed if it is, or has been in the hash table. */
 258static inline int nf_ct_is_confirmed(const struct nf_conn *ct)
 259{
 260        return test_bit(IPS_CONFIRMED_BIT, &ct->status);
 261}
 262
 263static inline int nf_ct_is_dying(const struct nf_conn *ct)
 264{
 265        return test_bit(IPS_DYING_BIT, &ct->status);
 266}
 267
 268/* Packet is received from loopback */
 269static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
 270{
 271        return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK;
 272}
 273
 274#define nfct_time_stamp ((u32)(jiffies))
 275
 276/* jiffies until ct expires, 0 if already expired */
 277static inline unsigned long nf_ct_expires(const struct nf_conn *ct)
 278{
 279        s32 timeout = ct->timeout - nfct_time_stamp;
 280
 281        return timeout > 0 ? timeout : 0;
 282}
 283
 284static inline bool nf_ct_is_expired(const struct nf_conn *ct)
 285{
 286        return (__s32)(ct->timeout - nfct_time_stamp) <= 0;
 287}
 288
 289/* use after obtaining a reference count */
 290static inline bool nf_ct_should_gc(const struct nf_conn *ct)
 291{
 292        return nf_ct_is_expired(ct) && nf_ct_is_confirmed(ct) &&
 293               !nf_ct_is_dying(ct);
 294}
 295
 296#define NF_CT_DAY       (86400 * HZ)
 297
 298/* Set an arbitrary timeout large enough not to ever expire, this save
 299 * us a check for the IPS_OFFLOAD_BIT from the packet path via
 300 * nf_ct_is_expired().
 301 */
 302static inline void nf_ct_offload_timeout(struct nf_conn *ct)
 303{
 304        if (nf_ct_expires(ct) < NF_CT_DAY / 2)
 305                ct->timeout = nfct_time_stamp + NF_CT_DAY;
 306}
 307
 308struct kernel_param;
 309
 310int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp);
 311int nf_conntrack_hash_resize(unsigned int hashsize);
 312
 313extern struct hlist_nulls_head *nf_conntrack_hash;
 314extern unsigned int nf_conntrack_htable_size;
 315extern seqcount_spinlock_t nf_conntrack_generation;
 316extern unsigned int nf_conntrack_max;
 317
 318/* must be called with rcu read lock held */
 319static inline void
 320nf_conntrack_get_ht(struct hlist_nulls_head **hash, unsigned int *hsize)
 321{
 322        struct hlist_nulls_head *hptr;
 323        unsigned int sequence, hsz;
 324
 325        do {
 326                sequence = read_seqcount_begin(&nf_conntrack_generation);
 327                hsz = nf_conntrack_htable_size;
 328                hptr = nf_conntrack_hash;
 329        } while (read_seqcount_retry(&nf_conntrack_generation, sequence));
 330
 331        *hash = hptr;
 332        *hsize = hsz;
 333}
 334
 335struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
 336                                 const struct nf_conntrack_zone *zone,
 337                                 gfp_t flags);
 338void nf_ct_tmpl_free(struct nf_conn *tmpl);
 339
 340u32 nf_ct_get_id(const struct nf_conn *ct);
 341u32 nf_conntrack_count(const struct net *net);
 342
 343static inline void
 344nf_ct_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info info)
 345{
 346        skb_set_nfct(skb, (unsigned long)ct | info);
 347}
 348
 349extern unsigned int nf_conntrack_net_id;
 350
 351static inline struct nf_conntrack_net *nf_ct_pernet(const struct net *net)
 352{
 353        return net_generic(net, nf_conntrack_net_id);
 354}
 355
 356#define NF_CT_STAT_INC(net, count)        __this_cpu_inc((net)->ct.stat->count)
 357#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
 358#define NF_CT_STAT_ADD_ATOMIC(net, count, v) this_cpu_add((net)->ct.stat->count, (v))
 359
 360#define MODULE_ALIAS_NFCT_HELPER(helper) \
 361        MODULE_ALIAS("nfct-helper-" helper)
 362
 363#endif /* _NF_CONNTRACK_H */
 364