linux/include/linux/netfilter/x_tables.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _X_TABLES_H
   3#define _X_TABLES_H
   4
   5
   6#include <linux/netdevice.h>
   7#include <linux/static_key.h>
   8#include <linux/netfilter.h>
   9#include <uapi/linux/netfilter/x_tables.h>
  10
  11/* Test a struct->invflags and a boolean for inequality */
  12#define NF_INVF(ptr, flag, boolean)                                     \
  13        ((boolean) ^ !!((ptr)->invflags & (flag)))
  14
  15/**
  16 * struct xt_action_param - parameters for matches/targets
  17 *
  18 * @match:      the match extension
  19 * @target:     the target extension
  20 * @matchinfo:  per-match data
  21 * @targetinfo: per-target data
  22 * @state:      pointer to hook state this packet came from
  23 * @fragoff:    packet is a fragment, this is the data offset
  24 * @thoff:      position of transport header relative to skb->data
  25 *
  26 * Fields written to by extensions:
  27 *
  28 * @hotdrop:    drop packet if we had inspection problems
  29 */
  30struct xt_action_param {
  31        union {
  32                const struct xt_match *match;
  33                const struct xt_target *target;
  34        };
  35        union {
  36                const void *matchinfo, *targinfo;
  37        };
  38        const struct nf_hook_state *state;
  39        int fragoff;
  40        unsigned int thoff;
  41        bool hotdrop;
  42};
  43
  44static inline struct net *xt_net(const struct xt_action_param *par)
  45{
  46        return par->state->net;
  47}
  48
  49static inline struct net_device *xt_in(const struct xt_action_param *par)
  50{
  51        return par->state->in;
  52}
  53
  54static inline const char *xt_inname(const struct xt_action_param *par)
  55{
  56        return par->state->in->name;
  57}
  58
  59static inline struct net_device *xt_out(const struct xt_action_param *par)
  60{
  61        return par->state->out;
  62}
  63
  64static inline const char *xt_outname(const struct xt_action_param *par)
  65{
  66        return par->state->out->name;
  67}
  68
  69static inline unsigned int xt_hooknum(const struct xt_action_param *par)
  70{
  71        return par->state->hook;
  72}
  73
  74static inline u_int8_t xt_family(const struct xt_action_param *par)
  75{
  76        return par->state->pf;
  77}
  78
  79/**
  80 * struct xt_mtchk_param - parameters for match extensions'
  81 * checkentry functions
  82 *
  83 * @net:        network namespace through which the check was invoked
  84 * @table:      table the rule is tried to be inserted into
  85 * @entryinfo:  the family-specific rule data
  86 *              (struct ipt_ip, ip6t_ip, arpt_arp or (note) ebt_entry)
  87 * @match:      struct xt_match through which this function was invoked
  88 * @matchinfo:  per-match data
  89 * @hook_mask:  via which hooks the new rule is reachable
  90 * Other fields as above.
  91 */
  92struct xt_mtchk_param {
  93        struct net *net;
  94        const char *table;
  95        const void *entryinfo;
  96        const struct xt_match *match;
  97        void *matchinfo;
  98        unsigned int hook_mask;
  99        u_int8_t family;
 100        bool nft_compat;
 101};
 102
 103/**
 104 * struct xt_mdtor_param - match destructor parameters
 105 * Fields as above.
 106 */
 107struct xt_mtdtor_param {
 108        struct net *net;
 109        const struct xt_match *match;
 110        void *matchinfo;
 111        u_int8_t family;
 112};
 113
 114/**
 115 * struct xt_tgchk_param - parameters for target extensions'
 116 * checkentry functions
 117 *
 118 * @entryinfo:  the family-specific rule data
 119 *              (struct ipt_entry, ip6t_entry, arpt_entry, ebt_entry)
 120 *
 121 * Other fields see above.
 122 */
 123struct xt_tgchk_param {
 124        struct net *net;
 125        const char *table;
 126        const void *entryinfo;
 127        const struct xt_target *target;
 128        void *targinfo;
 129        unsigned int hook_mask;
 130        u_int8_t family;
 131        bool nft_compat;
 132};
 133
 134/* Target destructor parameters */
 135struct xt_tgdtor_param {
 136        struct net *net;
 137        const struct xt_target *target;
 138        void *targinfo;
 139        u_int8_t family;
 140};
 141
 142struct xt_match {
 143        struct list_head list;
 144
 145        const char name[XT_EXTENSION_MAXNAMELEN];
 146        u_int8_t revision;
 147
 148        /* Return true or false: return FALSE and set *hotdrop = 1 to
 149           force immediate packet drop. */
 150        /* Arguments changed since 2.6.9, as this must now handle
 151           non-linear skb, using skb_header_pointer and
 152           skb_ip_make_writable. */
 153        bool (*match)(const struct sk_buff *skb,
 154                      struct xt_action_param *);
 155
 156        /* Called when user tries to insert an entry of this type. */
 157        int (*checkentry)(const struct xt_mtchk_param *);
 158
 159        /* Called when entry of this type deleted. */
 160        void (*destroy)(const struct xt_mtdtor_param *);
 161#ifdef CONFIG_COMPAT
 162        /* Called when userspace align differs from kernel space one */
 163        void (*compat_from_user)(void *dst, const void *src);
 164        int (*compat_to_user)(void __user *dst, const void *src);
 165#endif
 166        /* Set this to THIS_MODULE if you are a module, otherwise NULL */
 167        struct module *me;
 168
 169        const char *table;
 170        unsigned int matchsize;
 171        unsigned int usersize;
 172#ifdef CONFIG_COMPAT
 173        unsigned int compatsize;
 174#endif
 175        unsigned int hooks;
 176        unsigned short proto;
 177
 178        unsigned short family;
 179};
 180
 181/* Registration hooks for targets. */
 182struct xt_target {
 183        struct list_head list;
 184
 185        const char name[XT_EXTENSION_MAXNAMELEN];
 186        u_int8_t revision;
 187
 188        /* Returns verdict. Argument order changed since 2.6.9, as this
 189           must now handle non-linear skbs, using skb_copy_bits and
 190           skb_ip_make_writable. */
 191        unsigned int (*target)(struct sk_buff *skb,
 192                               const struct xt_action_param *);
 193
 194        /* Called when user tries to insert an entry of this type:
 195           hook_mask is a bitmask of hooks from which it can be
 196           called. */
 197        /* Should return 0 on success or an error code otherwise (-Exxxx). */
 198        int (*checkentry)(const struct xt_tgchk_param *);
 199
 200        /* Called when entry of this type deleted. */
 201        void (*destroy)(const struct xt_tgdtor_param *);
 202#ifdef CONFIG_COMPAT
 203        /* Called when userspace align differs from kernel space one */
 204        void (*compat_from_user)(void *dst, const void *src);
 205        int (*compat_to_user)(void __user *dst, const void *src);
 206#endif
 207        /* Set this to THIS_MODULE if you are a module, otherwise NULL */
 208        struct module *me;
 209
 210        const char *table;
 211        unsigned int targetsize;
 212        unsigned int usersize;
 213#ifdef CONFIG_COMPAT
 214        unsigned int compatsize;
 215#endif
 216        unsigned int hooks;
 217        unsigned short proto;
 218
 219        unsigned short family;
 220};
 221
 222/* Furniture shopping... */
 223struct xt_table {
 224        struct list_head list;
 225
 226        /* What hooks you will enter on */
 227        unsigned int valid_hooks;
 228
 229        /* Man behind the curtain... */
 230        struct xt_table_info *private;
 231
 232        /* Set this to THIS_MODULE if you are a module, otherwise NULL */
 233        struct module *me;
 234
 235        u_int8_t af;            /* address/protocol family */
 236        int priority;           /* hook order */
 237
 238        /* called when table is needed in the given netns */
 239        int (*table_init)(struct net *net);
 240
 241        /* A unique name... */
 242        const char name[XT_TABLE_MAXNAMELEN];
 243};
 244
 245#include <linux/netfilter_ipv4.h>
 246
 247/* The table itself */
 248struct xt_table_info {
 249        /* Size per table */
 250        unsigned int size;
 251        /* Number of entries: FIXME. --RR */
 252        unsigned int number;
 253        /* Initial number of entries. Needed for module usage count */
 254        unsigned int initial_entries;
 255
 256        /* Entry points and underflows */
 257        unsigned int hook_entry[NF_INET_NUMHOOKS];
 258        unsigned int underflow[NF_INET_NUMHOOKS];
 259
 260        /*
 261         * Number of user chains. Since tables cannot have loops, at most
 262         * @stacksize jumps (number of user chains) can possibly be made.
 263         */
 264        unsigned int stacksize;
 265        void ***jumpstack;
 266
 267        unsigned char entries[0] __aligned(8);
 268};
 269
 270int xt_register_target(struct xt_target *target);
 271void xt_unregister_target(struct xt_target *target);
 272int xt_register_targets(struct xt_target *target, unsigned int n);
 273void xt_unregister_targets(struct xt_target *target, unsigned int n);
 274
 275int xt_register_match(struct xt_match *target);
 276void xt_unregister_match(struct xt_match *target);
 277int xt_register_matches(struct xt_match *match, unsigned int n);
 278void xt_unregister_matches(struct xt_match *match, unsigned int n);
 279
 280int xt_check_entry_offsets(const void *base, const char *elems,
 281                           unsigned int target_offset,
 282                           unsigned int next_offset);
 283
 284unsigned int *xt_alloc_entry_offsets(unsigned int size);
 285bool xt_find_jump_offset(const unsigned int *offsets,
 286                         unsigned int target, unsigned int size);
 287
 288int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
 289                   bool inv_proto);
 290int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
 291                    bool inv_proto);
 292
 293int xt_match_to_user(const struct xt_entry_match *m,
 294                     struct xt_entry_match __user *u);
 295int xt_target_to_user(const struct xt_entry_target *t,
 296                      struct xt_entry_target __user *u);
 297int xt_data_to_user(void __user *dst, const void *src,
 298                    int usersize, int size, int aligned_size);
 299
 300void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
 301                                 struct xt_counters_info *info, bool compat);
 302
 303struct xt_table *xt_register_table(struct net *net,
 304                                   const struct xt_table *table,
 305                                   struct xt_table_info *bootstrap,
 306                                   struct xt_table_info *newinfo);
 307void *xt_unregister_table(struct xt_table *table);
 308
 309struct xt_table_info *xt_replace_table(struct xt_table *table,
 310                                       unsigned int num_counters,
 311                                       struct xt_table_info *newinfo,
 312                                       int *error);
 313
 314struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
 315struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
 316struct xt_match *xt_request_find_match(u8 af, const char *name, u8 revision);
 317struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision);
 318int xt_find_revision(u8 af, const char *name, u8 revision, int target,
 319                     int *err);
 320
 321struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
 322                                    const char *name);
 323void xt_table_unlock(struct xt_table *t);
 324
 325int xt_proto_init(struct net *net, u_int8_t af);
 326void xt_proto_fini(struct net *net, u_int8_t af);
 327
 328struct xt_table_info *xt_alloc_table_info(unsigned int size);
 329void xt_free_table_info(struct xt_table_info *info);
 330
 331/**
 332 * xt_recseq - recursive seqcount for netfilter use
 333 * 
 334 * Packet processing changes the seqcount only if no recursion happened
 335 * get_counters() can use read_seqcount_begin()/read_seqcount_retry(),
 336 * because we use the normal seqcount convention :
 337 * Low order bit set to 1 if a writer is active.
 338 */
 339DECLARE_PER_CPU(seqcount_t, xt_recseq);
 340
 341/* xt_tee_enabled - true if x_tables needs to handle reentrancy
 342 *
 343 * Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
 344 */
 345extern struct static_key xt_tee_enabled;
 346
 347/**
 348 * xt_write_recseq_begin - start of a write section
 349 *
 350 * Begin packet processing : all readers must wait the end
 351 * 1) Must be called with preemption disabled
 352 * 2) softirqs must be disabled too (or we should use this_cpu_add())
 353 * Returns :
 354 *  1 if no recursion on this cpu
 355 *  0 if recursion detected
 356 */
 357static inline unsigned int xt_write_recseq_begin(void)
 358{
 359        unsigned int addend;
 360
 361        /*
 362         * Low order bit of sequence is set if we already
 363         * called xt_write_recseq_begin().
 364         */
 365        addend = (__this_cpu_read(xt_recseq.sequence) + 1) & 1;
 366
 367        /*
 368         * This is kind of a write_seqcount_begin(), but addend is 0 or 1
 369         * We dont check addend value to avoid a test and conditional jump,
 370         * since addend is most likely 1
 371         */
 372        __this_cpu_add(xt_recseq.sequence, addend);
 373        smp_wmb();
 374
 375        return addend;
 376}
 377
 378/**
 379 * xt_write_recseq_end - end of a write section
 380 * @addend: return value from previous xt_write_recseq_begin()
 381 *
 382 * End packet processing : all readers can proceed
 383 * 1) Must be called with preemption disabled
 384 * 2) softirqs must be disabled too (or we should use this_cpu_add())
 385 */
 386static inline void xt_write_recseq_end(unsigned int addend)
 387{
 388        /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
 389        smp_wmb();
 390        __this_cpu_add(xt_recseq.sequence, addend);
 391}
 392
 393/*
 394 * This helper is performance critical and must be inlined
 395 */
 396static inline unsigned long ifname_compare_aligned(const char *_a,
 397                                                   const char *_b,
 398                                                   const char *_mask)
 399{
 400        const unsigned long *a = (const unsigned long *)_a;
 401        const unsigned long *b = (const unsigned long *)_b;
 402        const unsigned long *mask = (const unsigned long *)_mask;
 403        unsigned long ret;
 404
 405        ret = (a[0] ^ b[0]) & mask[0];
 406        if (IFNAMSIZ > sizeof(unsigned long))
 407                ret |= (a[1] ^ b[1]) & mask[1];
 408        if (IFNAMSIZ > 2 * sizeof(unsigned long))
 409                ret |= (a[2] ^ b[2]) & mask[2];
 410        if (IFNAMSIZ > 3 * sizeof(unsigned long))
 411                ret |= (a[3] ^ b[3]) & mask[3];
 412        BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
 413        return ret;
 414}
 415
 416struct xt_percpu_counter_alloc_state {
 417        unsigned int off;
 418        const char __percpu *mem;
 419};
 420
 421bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
 422                             struct xt_counters *counter);
 423void xt_percpu_counter_free(struct xt_counters *cnt);
 424
 425static inline struct xt_counters *
 426xt_get_this_cpu_counter(struct xt_counters *cnt)
 427{
 428        if (nr_cpu_ids > 1)
 429                return this_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt);
 430
 431        return cnt;
 432}
 433
 434static inline struct xt_counters *
 435xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
 436{
 437        if (nr_cpu_ids > 1)
 438                return per_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt, cpu);
 439
 440        return cnt;
 441}
 442
 443struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *);
 444
 445#ifdef CONFIG_COMPAT
 446#include <net/compat.h>
 447
 448struct compat_xt_entry_match {
 449        union {
 450                struct {
 451                        u_int16_t match_size;
 452                        char name[XT_FUNCTION_MAXNAMELEN - 1];
 453                        u_int8_t revision;
 454                } user;
 455                struct {
 456                        u_int16_t match_size;
 457                        compat_uptr_t match;
 458                } kernel;
 459                u_int16_t match_size;
 460        } u;
 461        unsigned char data[0];
 462};
 463
 464struct compat_xt_entry_target {
 465        union {
 466                struct {
 467                        u_int16_t target_size;
 468                        char name[XT_FUNCTION_MAXNAMELEN - 1];
 469                        u_int8_t revision;
 470                } user;
 471                struct {
 472                        u_int16_t target_size;
 473                        compat_uptr_t target;
 474                } kernel;
 475                u_int16_t target_size;
 476        } u;
 477        unsigned char data[0];
 478};
 479
 480/* FIXME: this works only on 32 bit tasks
 481 * need to change whole approach in order to calculate align as function of
 482 * current task alignment */
 483
 484struct compat_xt_counters {
 485        compat_u64 pcnt, bcnt;                  /* Packet and byte counters */
 486};
 487
 488struct compat_xt_counters_info {
 489        char name[XT_TABLE_MAXNAMELEN];
 490        compat_uint_t num_counters;
 491        struct compat_xt_counters counters[0];
 492};
 493
 494struct _compat_xt_align {
 495        __u8 u8;
 496        __u16 u16;
 497        __u32 u32;
 498        compat_u64 u64;
 499};
 500
 501#define COMPAT_XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _compat_xt_align))
 502
 503void xt_compat_lock(u_int8_t af);
 504void xt_compat_unlock(u_int8_t af);
 505
 506int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
 507void xt_compat_flush_offsets(u_int8_t af);
 508void xt_compat_init_offsets(u_int8_t af, unsigned int number);
 509int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
 510
 511int xt_compat_match_offset(const struct xt_match *match);
 512void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
 513                              unsigned int *size);
 514int xt_compat_match_to_user(const struct xt_entry_match *m,
 515                            void __user **dstptr, unsigned int *size);
 516
 517int xt_compat_target_offset(const struct xt_target *target);
 518void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
 519                                unsigned int *size);
 520int xt_compat_target_to_user(const struct xt_entry_target *t,
 521                             void __user **dstptr, unsigned int *size);
 522int xt_compat_check_entry_offsets(const void *base, const char *elems,
 523                                  unsigned int target_offset,
 524                                  unsigned int next_offset);
 525
 526#endif /* CONFIG_COMPAT */
 527#endif /* _X_TABLES_H */
 528