linux/include/linux/netfilter.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __LINUX_NETFILTER_H
   3#define __LINUX_NETFILTER_H
   4
   5#include <linux/init.h>
   6#include <linux/skbuff.h>
   7#include <linux/net.h>
   8#include <linux/if.h>
   9#include <linux/in.h>
  10#include <linux/in6.h>
  11#include <linux/wait.h>
  12#include <linux/list.h>
  13#include <linux/static_key.h>
  14#include <linux/netfilter_defs.h>
  15#include <linux/netdevice.h>
  16#include <net/net_namespace.h>
  17
  18#ifdef CONFIG_NETFILTER
  19static inline int NF_DROP_GETERR(int verdict)
  20{
  21        return -(verdict >> NF_VERDICT_QBITS);
  22}
  23
  24static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1,
  25                                   const union nf_inet_addr *a2)
  26{
  27        return a1->all[0] == a2->all[0] &&
  28               a1->all[1] == a2->all[1] &&
  29               a1->all[2] == a2->all[2] &&
  30               a1->all[3] == a2->all[3];
  31}
  32
  33static inline void nf_inet_addr_mask(const union nf_inet_addr *a1,
  34                                     union nf_inet_addr *result,
  35                                     const union nf_inet_addr *mask)
  36{
  37        result->all[0] = a1->all[0] & mask->all[0];
  38        result->all[1] = a1->all[1] & mask->all[1];
  39        result->all[2] = a1->all[2] & mask->all[2];
  40        result->all[3] = a1->all[3] & mask->all[3];
  41}
  42
  43int netfilter_init(void);
  44
  45struct sk_buff;
  46
  47struct nf_hook_ops;
  48
  49struct sock;
  50
  51struct nf_hook_state {
  52        unsigned int hook;
  53        u_int8_t pf;
  54        struct net_device *in;
  55        struct net_device *out;
  56        struct sock *sk;
  57        struct net *net;
  58        int (*okfn)(struct net *, struct sock *, struct sk_buff *);
  59};
  60
  61typedef unsigned int nf_hookfn(void *priv,
  62                               struct sk_buff *skb,
  63                               const struct nf_hook_state *state);
  64struct nf_hook_ops {
  65        /* User fills in from here down. */
  66        nf_hookfn               *hook;
  67        struct net_device       *dev;
  68        void                    *priv;
  69        u_int8_t                pf;
  70        unsigned int            hooknum;
  71        /* Hooks are ordered in ascending priority. */
  72        int                     priority;
  73};
  74
  75struct nf_hook_entry {
  76        nf_hookfn                       *hook;
  77        void                            *priv;
  78};
  79
  80struct nf_hook_entries_rcu_head {
  81        struct rcu_head head;
  82        void    *allocation;
  83};
  84
  85struct nf_hook_entries {
  86        u16                             num_hook_entries;
  87        /* padding */
  88        struct nf_hook_entry            hooks[];
  89
  90        /* trailer: pointers to original orig_ops of each hook,
  91         * followed by rcu_head and scratch space used for freeing
  92         * the structure via call_rcu.
  93         *
  94         *   This is not part of struct nf_hook_entry since its only
  95         *   needed in slow path (hook register/unregister):
  96         * const struct nf_hook_ops     *orig_ops[]
  97         *
  98         *   For the same reason, we store this at end -- its
  99         *   only needed when a hook is deleted, not during
 100         *   packet path processing:
 101         * struct nf_hook_entries_rcu_head     head
 102         */
 103};
 104
 105static inline struct nf_hook_ops **nf_hook_entries_get_hook_ops(const struct nf_hook_entries *e)
 106{
 107        unsigned int n = e->num_hook_entries;
 108        const void *hook_end;
 109
 110        hook_end = &e->hooks[n]; /* this is *past* ->hooks[]! */
 111
 112        return (struct nf_hook_ops **)hook_end;
 113}
 114
 115static inline int
 116nf_hook_entry_hookfn(const struct nf_hook_entry *entry, struct sk_buff *skb,
 117                     struct nf_hook_state *state)
 118{
 119        return entry->hook(entry->priv, skb, state);
 120}
 121
 122static inline void nf_hook_state_init(struct nf_hook_state *p,
 123                                      unsigned int hook,
 124                                      u_int8_t pf,
 125                                      struct net_device *indev,
 126                                      struct net_device *outdev,
 127                                      struct sock *sk,
 128                                      struct net *net,
 129                                      int (*okfn)(struct net *, struct sock *, struct sk_buff *))
 130{
 131        p->hook = hook;
 132        p->pf = pf;
 133        p->in = indev;
 134        p->out = outdev;
 135        p->sk = sk;
 136        p->net = net;
 137        p->okfn = okfn;
 138}
 139
 140
 141
 142struct nf_sockopt_ops {
 143        struct list_head list;
 144
 145        u_int8_t pf;
 146
 147        /* Non-inclusive ranges: use 0/0/NULL to never get called. */
 148        int set_optmin;
 149        int set_optmax;
 150        int (*set)(struct sock *sk, int optval, void __user *user, unsigned int len);
 151#ifdef CONFIG_COMPAT
 152        int (*compat_set)(struct sock *sk, int optval,
 153                        void __user *user, unsigned int len);
 154#endif
 155        int get_optmin;
 156        int get_optmax;
 157        int (*get)(struct sock *sk, int optval, void __user *user, int *len);
 158#ifdef CONFIG_COMPAT
 159        int (*compat_get)(struct sock *sk, int optval,
 160                        void __user *user, int *len);
 161#endif
 162        /* Use the module struct to lock set/get code in place */
 163        struct module *owner;
 164};
 165
 166/* Function to register/unregister hook points. */
 167int nf_register_net_hook(struct net *net, const struct nf_hook_ops *ops);
 168void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *ops);
 169int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
 170                          unsigned int n);
 171void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
 172                             unsigned int n);
 173
 174/* Functions to register get/setsockopt ranges (non-inclusive).  You
 175   need to check permissions yourself! */
 176int nf_register_sockopt(struct nf_sockopt_ops *reg);
 177void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
 178
 179#ifdef HAVE_JUMP_LABEL
 180extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
 181#endif
 182
 183int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state,
 184                 const struct nf_hook_entries *e, unsigned int i);
 185
 186/**
 187 *      nf_hook - call a netfilter hook
 188 *
 189 *      Returns 1 if the hook has allowed the packet to pass.  The function
 190 *      okfn must be invoked by the caller in this case.  Any other return
 191 *      value indicates the packet has been consumed by the hook.
 192 */
 193static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
 194                          struct sock *sk, struct sk_buff *skb,
 195                          struct net_device *indev, struct net_device *outdev,
 196                          int (*okfn)(struct net *, struct sock *, struct sk_buff *))
 197{
 198        struct nf_hook_entries *hook_head = NULL;
 199        int ret = 1;
 200
 201#ifdef HAVE_JUMP_LABEL
 202        if (__builtin_constant_p(pf) &&
 203            __builtin_constant_p(hook) &&
 204            !static_key_false(&nf_hooks_needed[pf][hook]))
 205                return 1;
 206#endif
 207
 208        rcu_read_lock();
 209        switch (pf) {
 210        case NFPROTO_IPV4:
 211                hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]);
 212                break;
 213        case NFPROTO_IPV6:
 214                hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]);
 215                break;
 216        case NFPROTO_ARP:
 217#ifdef CONFIG_NETFILTER_FAMILY_ARP
 218                if (WARN_ON_ONCE(hook >= ARRAY_SIZE(net->nf.hooks_arp)))
 219                        break;
 220                hook_head = rcu_dereference(net->nf.hooks_arp[hook]);
 221#endif
 222                break;
 223        case NFPROTO_BRIDGE:
 224#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
 225                hook_head = rcu_dereference(net->nf.hooks_bridge[hook]);
 226#endif
 227                break;
 228#if IS_ENABLED(CONFIG_DECNET)
 229        case NFPROTO_DECNET:
 230                hook_head = rcu_dereference(net->nf.hooks_decnet[hook]);
 231                break;
 232#endif
 233        default:
 234                WARN_ON_ONCE(1);
 235                break;
 236        }
 237
 238        if (hook_head) {
 239                struct nf_hook_state state;
 240
 241                nf_hook_state_init(&state, hook, pf, indev, outdev,
 242                                   sk, net, okfn);
 243
 244                ret = nf_hook_slow(skb, &state, hook_head, 0);
 245        }
 246        rcu_read_unlock();
 247
 248        return ret;
 249}
 250
 251/* Activate hook; either okfn or kfree_skb called, unless a hook
 252   returns NF_STOLEN (in which case, it's up to the hook to deal with
 253   the consequences).
 254
 255   Returns -ERRNO if packet dropped.  Zero means queued, stolen or
 256   accepted.
 257*/
 258
 259/* RR:
 260   > I don't want nf_hook to return anything because people might forget
 261   > about async and trust the return value to mean "packet was ok".
 262
 263   AK:
 264   Just document it clearly, then you can expect some sense from kernel
 265   coders :)
 266*/
 267
 268static inline int
 269NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
 270             struct sk_buff *skb, struct net_device *in, struct net_device *out,
 271             int (*okfn)(struct net *, struct sock *, struct sk_buff *),
 272             bool cond)
 273{
 274        int ret;
 275
 276        if (!cond ||
 277            ((ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn)) == 1))
 278                ret = okfn(net, sk, skb);
 279        return ret;
 280}
 281
 282static inline int
 283NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb,
 284        struct net_device *in, struct net_device *out,
 285        int (*okfn)(struct net *, struct sock *, struct sk_buff *))
 286{
 287        int ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn);
 288        if (ret == 1)
 289                ret = okfn(net, sk, skb);
 290        return ret;
 291}
 292
 293/* Call setsockopt() */
 294int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
 295                  unsigned int len);
 296int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
 297                  int *len);
 298#ifdef CONFIG_COMPAT
 299int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval,
 300                char __user *opt, unsigned int len);
 301int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval,
 302                char __user *opt, int *len);
 303#endif
 304
 305/* Call this before modifying an existing packet: ensures it is
 306   modifiable and linear to the point you care about (writable_len).
 307   Returns true or false. */
 308int skb_make_writable(struct sk_buff *skb, unsigned int writable_len);
 309
 310struct flowi;
 311struct nf_queue_entry;
 312
 313__sum16 nf_checksum(struct sk_buff *skb, unsigned int hook,
 314                    unsigned int dataoff, u_int8_t protocol,
 315                    unsigned short family);
 316
 317__sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
 318                            unsigned int dataoff, unsigned int len,
 319                            u_int8_t protocol, unsigned short family);
 320int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
 321             bool strict, unsigned short family);
 322int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry);
 323
 324#include <net/flow.h>
 325
 326struct nf_conn;
 327enum nf_nat_manip_type;
 328struct nlattr;
 329enum ip_conntrack_dir;
 330
 331struct nf_nat_hook {
 332        int (*parse_nat_setup)(struct nf_conn *ct, enum nf_nat_manip_type manip,
 333                               const struct nlattr *attr);
 334        void (*decode_session)(struct sk_buff *skb, struct flowi *fl);
 335        unsigned int (*manip_pkt)(struct sk_buff *skb, struct nf_conn *ct,
 336                                  enum nf_nat_manip_type mtype,
 337                                  enum ip_conntrack_dir dir);
 338};
 339
 340extern struct nf_nat_hook __rcu *nf_nat_hook;
 341
 342static inline void
 343nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
 344{
 345#ifdef CONFIG_NF_NAT_NEEDED
 346        struct nf_nat_hook *nat_hook;
 347
 348        rcu_read_lock();
 349        nat_hook = rcu_dereference(nf_nat_hook);
 350        if (nat_hook && nat_hook->decode_session)
 351                nat_hook->decode_session(skb, fl);
 352        rcu_read_unlock();
 353#endif
 354}
 355
 356#else /* !CONFIG_NETFILTER */
 357static inline int
 358NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
 359             struct sk_buff *skb, struct net_device *in, struct net_device *out,
 360             int (*okfn)(struct net *, struct sock *, struct sk_buff *),
 361             bool cond)
 362{
 363        return okfn(net, sk, skb);
 364}
 365
 366static inline int
 367NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
 368        struct sk_buff *skb, struct net_device *in, struct net_device *out,
 369        int (*okfn)(struct net *, struct sock *, struct sk_buff *))
 370{
 371        return okfn(net, sk, skb);
 372}
 373
 374static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
 375                          struct sock *sk, struct sk_buff *skb,
 376                          struct net_device *indev, struct net_device *outdev,
 377                          int (*okfn)(struct net *, struct sock *, struct sk_buff *))
 378{
 379        return 1;
 380}
 381struct flowi;
 382static inline void
 383nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
 384{
 385}
 386#endif /*CONFIG_NETFILTER*/
 387
 388#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 389#include <linux/netfilter/nf_conntrack_zones_common.h>
 390
 391extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
 392void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
 393struct nf_conntrack_tuple;
 394bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
 395                         const struct sk_buff *skb);
 396#else
 397static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
 398struct nf_conntrack_tuple;
 399static inline bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
 400                                       const struct sk_buff *skb)
 401{
 402        return false;
 403}
 404#endif
 405
 406struct nf_conn;
 407enum ip_conntrack_info;
 408
 409struct nf_ct_hook {
 410        int (*update)(struct net *net, struct sk_buff *skb);
 411        void (*destroy)(struct nf_conntrack *);
 412        bool (*get_tuple_skb)(struct nf_conntrack_tuple *,
 413                              const struct sk_buff *);
 414};
 415extern struct nf_ct_hook __rcu *nf_ct_hook;
 416
 417struct nlattr;
 418
 419struct nfnl_ct_hook {
 420        struct nf_conn *(*get_ct)(const struct sk_buff *skb,
 421                                  enum ip_conntrack_info *ctinfo);
 422        size_t (*build_size)(const struct nf_conn *ct);
 423        int (*build)(struct sk_buff *skb, struct nf_conn *ct,
 424                     enum ip_conntrack_info ctinfo,
 425                     u_int16_t ct_attr, u_int16_t ct_info_attr);
 426        int (*parse)(const struct nlattr *attr, struct nf_conn *ct);
 427        int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct,
 428                             u32 portid, u32 report);
 429        void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct,
 430                           enum ip_conntrack_info ctinfo, s32 off);
 431};
 432extern struct nfnl_ct_hook __rcu *nfnl_ct_hook;
 433
 434/**
 435 * nf_skb_duplicated - TEE target has sent a packet
 436 *
 437 * When a xtables target sends a packet, the OUTPUT and POSTROUTING
 438 * hooks are traversed again, i.e. nft and xtables are invoked recursively.
 439 *
 440 * This is used by xtables TEE target to prevent the duplicated skb from
 441 * being duplicated again.
 442 */
 443DECLARE_PER_CPU(bool, nf_skb_duplicated);
 444
 445#endif /*__LINUX_NETFILTER_H*/
 446