linux/include/linux/netfilter/x_tables.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _X_TABLES_H
   3#define _X_TABLES_H
   4
   5
   6#include <linux/netdevice.h>
   7#include <linux/static_key.h>
   8#include <linux/netfilter.h>
   9#include <uapi/linux/netfilter/x_tables.h>
  10
  11/* Test a struct->invflags and a boolean for inequality */
  12#define NF_INVF(ptr, flag, boolean)                                     \
  13        ((boolean) ^ !!((ptr)->invflags & (flag)))
  14
  15/**
  16 * struct xt_action_param - parameters for matches/targets
  17 *
  18 * @match:      the match extension
  19 * @target:     the target extension
  20 * @matchinfo:  per-match data
  21 * @targetinfo: per-target data
  22 * @state:      pointer to hook state this packet came from
  23 * @fragoff:    packet is a fragment, this is the data offset
  24 * @thoff:      position of transport header relative to skb->data
  25 *
  26 * Fields written to by extensions:
  27 *
  28 * @hotdrop:    drop packet if we had inspection problems
  29 */
  30struct xt_action_param {
  31        union {
  32                const struct xt_match *match;
  33                const struct xt_target *target;
  34        };
  35        union {
  36                const void *matchinfo, *targinfo;
  37        };
  38        const struct nf_hook_state *state;
  39        unsigned int thoff;
  40        u16 fragoff;
  41        bool hotdrop;
  42};
  43
  44static inline struct net *xt_net(const struct xt_action_param *par)
  45{
  46        return par->state->net;
  47}
  48
  49static inline struct net_device *xt_in(const struct xt_action_param *par)
  50{
  51        return par->state->in;
  52}
  53
  54static inline const char *xt_inname(const struct xt_action_param *par)
  55{
  56        return par->state->in->name;
  57}
  58
  59static inline struct net_device *xt_out(const struct xt_action_param *par)
  60{
  61        return par->state->out;
  62}
  63
  64static inline const char *xt_outname(const struct xt_action_param *par)
  65{
  66        return par->state->out->name;
  67}
  68
  69static inline unsigned int xt_hooknum(const struct xt_action_param *par)
  70{
  71        return par->state->hook;
  72}
  73
  74static inline u_int8_t xt_family(const struct xt_action_param *par)
  75{
  76        return par->state->pf;
  77}
  78
  79/**
  80 * struct xt_mtchk_param - parameters for match extensions'
  81 * checkentry functions
  82 *
  83 * @net:        network namespace through which the check was invoked
  84 * @table:      table the rule is tried to be inserted into
  85 * @entryinfo:  the family-specific rule data
  86 *              (struct ipt_ip, ip6t_ip, arpt_arp or (note) ebt_entry)
  87 * @match:      struct xt_match through which this function was invoked
  88 * @matchinfo:  per-match data
  89 * @hook_mask:  via which hooks the new rule is reachable
  90 * Other fields as above.
  91 */
  92struct xt_mtchk_param {
  93        struct net *net;
  94        const char *table;
  95        const void *entryinfo;
  96        const struct xt_match *match;
  97        void *matchinfo;
  98        unsigned int hook_mask;
  99        u_int8_t family;
 100        bool nft_compat;
 101};
 102
 103/**
 104 * struct xt_mdtor_param - match destructor parameters
 105 * Fields as above.
 106 */
 107struct xt_mtdtor_param {
 108        struct net *net;
 109        const struct xt_match *match;
 110        void *matchinfo;
 111        u_int8_t family;
 112};
 113
 114/**
 115 * struct xt_tgchk_param - parameters for target extensions'
 116 * checkentry functions
 117 *
 118 * @entryinfo:  the family-specific rule data
 119 *              (struct ipt_entry, ip6t_entry, arpt_entry, ebt_entry)
 120 *
 121 * Other fields see above.
 122 */
 123struct xt_tgchk_param {
 124        struct net *net;
 125        const char *table;
 126        const void *entryinfo;
 127        const struct xt_target *target;
 128        void *targinfo;
 129        unsigned int hook_mask;
 130        u_int8_t family;
 131        bool nft_compat;
 132};
 133
 134/* Target destructor parameters */
 135struct xt_tgdtor_param {
 136        struct net *net;
 137        const struct xt_target *target;
 138        void *targinfo;
 139        u_int8_t family;
 140};
 141
 142struct xt_match {
 143        struct list_head list;
 144
 145        const char name[XT_EXTENSION_MAXNAMELEN];
 146        u_int8_t revision;
 147
 148        /* Return true or false: return FALSE and set *hotdrop = 1 to
 149           force immediate packet drop. */
 150        /* Arguments changed since 2.6.9, as this must now handle
 151           non-linear skb, using skb_header_pointer and
 152           skb_ip_make_writable. */
 153        bool (*match)(const struct sk_buff *skb,
 154                      struct xt_action_param *);
 155
 156        /* Called when user tries to insert an entry of this type. */
 157        int (*checkentry)(const struct xt_mtchk_param *);
 158
 159        /* Called when entry of this type deleted. */
 160        void (*destroy)(const struct xt_mtdtor_param *);
 161#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
 162        /* Called when userspace align differs from kernel space one */
 163        void (*compat_from_user)(void *dst, const void *src);
 164        int (*compat_to_user)(void __user *dst, const void *src);
 165#endif
 166        /* Set this to THIS_MODULE if you are a module, otherwise NULL */
 167        struct module *me;
 168
 169        const char *table;
 170        unsigned int matchsize;
 171        unsigned int usersize;
 172#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
 173        unsigned int compatsize;
 174#endif
 175        unsigned int hooks;
 176        unsigned short proto;
 177
 178        unsigned short family;
 179};
 180
 181/* Registration hooks for targets. */
 182struct xt_target {
 183        struct list_head list;
 184
 185        const char name[XT_EXTENSION_MAXNAMELEN];
 186        u_int8_t revision;
 187
 188        /* Returns verdict. Argument order changed since 2.6.9, as this
 189           must now handle non-linear skbs, using skb_copy_bits and
 190           skb_ip_make_writable. */
 191        unsigned int (*target)(struct sk_buff *skb,
 192                               const struct xt_action_param *);
 193
 194        /* Called when user tries to insert an entry of this type:
 195           hook_mask is a bitmask of hooks from which it can be
 196           called. */
 197        /* Should return 0 on success or an error code otherwise (-Exxxx). */
 198        int (*checkentry)(const struct xt_tgchk_param *);
 199
 200        /* Called when entry of this type deleted. */
 201        void (*destroy)(const struct xt_tgdtor_param *);
 202#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
 203        /* Called when userspace align differs from kernel space one */
 204        void (*compat_from_user)(void *dst, const void *src);
 205        int (*compat_to_user)(void __user *dst, const void *src);
 206#endif
 207        /* Set this to THIS_MODULE if you are a module, otherwise NULL */
 208        struct module *me;
 209
 210        const char *table;
 211        unsigned int targetsize;
 212        unsigned int usersize;
 213#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
 214        unsigned int compatsize;
 215#endif
 216        unsigned int hooks;
 217        unsigned short proto;
 218
 219        unsigned short family;
 220};
 221
 222/* Furniture shopping... */
 223struct xt_table {
 224        struct list_head list;
 225
 226        /* What hooks you will enter on */
 227        unsigned int valid_hooks;
 228
 229        /* Man behind the curtain... */
 230        struct xt_table_info *private;
 231
 232        /* hook ops that register the table with the netfilter core */
 233        struct nf_hook_ops *ops;
 234
 235        /* Set this to THIS_MODULE if you are a module, otherwise NULL */
 236        struct module *me;
 237
 238        u_int8_t af;            /* address/protocol family */
 239        int priority;           /* hook order */
 240
 241        /* A unique name... */
 242        const char name[XT_TABLE_MAXNAMELEN];
 243};
 244
 245#include <linux/netfilter_ipv4.h>
 246
 247/* The table itself */
 248struct xt_table_info {
 249        /* Size per table */
 250        unsigned int size;
 251        /* Number of entries: FIXME. --RR */
 252        unsigned int number;
 253        /* Initial number of entries. Needed for module usage count */
 254        unsigned int initial_entries;
 255
 256        /* Entry points and underflows */
 257        unsigned int hook_entry[NF_INET_NUMHOOKS];
 258        unsigned int underflow[NF_INET_NUMHOOKS];
 259
 260        /*
 261         * Number of user chains. Since tables cannot have loops, at most
 262         * @stacksize jumps (number of user chains) can possibly be made.
 263         */
 264        unsigned int stacksize;
 265        void ***jumpstack;
 266
 267        unsigned char entries[] __aligned(8);
 268};
 269
 270int xt_register_target(struct xt_target *target);
 271void xt_unregister_target(struct xt_target *target);
 272int xt_register_targets(struct xt_target *target, unsigned int n);
 273void xt_unregister_targets(struct xt_target *target, unsigned int n);
 274
 275int xt_register_match(struct xt_match *target);
 276void xt_unregister_match(struct xt_match *target);
 277int xt_register_matches(struct xt_match *match, unsigned int n);
 278void xt_unregister_matches(struct xt_match *match, unsigned int n);
 279
 280int xt_check_entry_offsets(const void *base, const char *elems,
 281                           unsigned int target_offset,
 282                           unsigned int next_offset);
 283
 284int xt_check_table_hooks(const struct xt_table_info *info, unsigned int valid_hooks);
 285
 286unsigned int *xt_alloc_entry_offsets(unsigned int size);
 287bool xt_find_jump_offset(const unsigned int *offsets,
 288                         unsigned int target, unsigned int size);
 289
 290int xt_check_proc_name(const char *name, unsigned int size);
 291
 292int xt_check_match(struct xt_mtchk_param *, unsigned int size, u16 proto,
 293                   bool inv_proto);
 294int xt_check_target(struct xt_tgchk_param *, unsigned int size, u16 proto,
 295                    bool inv_proto);
 296
 297int xt_match_to_user(const struct xt_entry_match *m,
 298                     struct xt_entry_match __user *u);
 299int xt_target_to_user(const struct xt_entry_target *t,
 300                      struct xt_entry_target __user *u);
 301int xt_data_to_user(void __user *dst, const void *src,
 302                    int usersize, int size, int aligned_size);
 303
 304void *xt_copy_counters(sockptr_t arg, unsigned int len,
 305                       struct xt_counters_info *info);
 306struct xt_counters *xt_counters_alloc(unsigned int counters);
 307
 308struct xt_table *xt_register_table(struct net *net,
 309                                   const struct xt_table *table,
 310                                   struct xt_table_info *bootstrap,
 311                                   struct xt_table_info *newinfo);
 312void *xt_unregister_table(struct xt_table *table);
 313
 314struct xt_table_info *xt_replace_table(struct xt_table *table,
 315                                       unsigned int num_counters,
 316                                       struct xt_table_info *newinfo,
 317                                       int *error);
 318
 319struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
 320struct xt_match *xt_request_find_match(u8 af, const char *name, u8 revision);
 321struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision);
 322int xt_find_revision(u8 af, const char *name, u8 revision, int target,
 323                     int *err);
 324
 325struct xt_table *xt_find_table(struct net *net, u8 af, const char *name);
 326struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
 327                                    const char *name);
 328struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af,
 329                                            const char *name);
 330void xt_table_unlock(struct xt_table *t);
 331
 332int xt_proto_init(struct net *net, u_int8_t af);
 333void xt_proto_fini(struct net *net, u_int8_t af);
 334
 335struct xt_table_info *xt_alloc_table_info(unsigned int size);
 336void xt_free_table_info(struct xt_table_info *info);
 337
 338/**
 339 * xt_recseq - recursive seqcount for netfilter use
 340 *
 341 * Packet processing changes the seqcount only if no recursion happened
 342 * get_counters() can use read_seqcount_begin()/read_seqcount_retry(),
 343 * because we use the normal seqcount convention :
 344 * Low order bit set to 1 if a writer is active.
 345 */
 346DECLARE_PER_CPU(seqcount_t, xt_recseq);
 347
 348/* xt_tee_enabled - true if x_tables needs to handle reentrancy
 349 *
 350 * Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
 351 */
 352extern struct static_key xt_tee_enabled;
 353
 354/**
 355 * xt_write_recseq_begin - start of a write section
 356 *
 357 * Begin packet processing : all readers must wait the end
 358 * 1) Must be called with preemption disabled
 359 * 2) softirqs must be disabled too (or we should use this_cpu_add())
 360 * Returns :
 361 *  1 if no recursion on this cpu
 362 *  0 if recursion detected
 363 */
 364static inline unsigned int xt_write_recseq_begin(void)
 365{
 366        unsigned int addend;
 367
 368        /*
 369         * Low order bit of sequence is set if we already
 370         * called xt_write_recseq_begin().
 371         */
 372        addend = (__this_cpu_read(xt_recseq.sequence) + 1) & 1;
 373
 374        /*
 375         * This is kind of a write_seqcount_begin(), but addend is 0 or 1
 376         * We dont check addend value to avoid a test and conditional jump,
 377         * since addend is most likely 1
 378         */
 379        __this_cpu_add(xt_recseq.sequence, addend);
 380        smp_mb();
 381
 382        return addend;
 383}
 384
 385/**
 386 * xt_write_recseq_end - end of a write section
 387 * @addend: return value from previous xt_write_recseq_begin()
 388 *
 389 * End packet processing : all readers can proceed
 390 * 1) Must be called with preemption disabled
 391 * 2) softirqs must be disabled too (or we should use this_cpu_add())
 392 */
 393static inline void xt_write_recseq_end(unsigned int addend)
 394{
 395        /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
 396        smp_wmb();
 397        __this_cpu_add(xt_recseq.sequence, addend);
 398}
 399
 400/*
 401 * This helper is performance critical and must be inlined
 402 */
 403static inline unsigned long ifname_compare_aligned(const char *_a,
 404                                                   const char *_b,
 405                                                   const char *_mask)
 406{
 407        const unsigned long *a = (const unsigned long *)_a;
 408        const unsigned long *b = (const unsigned long *)_b;
 409        const unsigned long *mask = (const unsigned long *)_mask;
 410        unsigned long ret;
 411
 412        ret = (a[0] ^ b[0]) & mask[0];
 413        if (IFNAMSIZ > sizeof(unsigned long))
 414                ret |= (a[1] ^ b[1]) & mask[1];
 415        if (IFNAMSIZ > 2 * sizeof(unsigned long))
 416                ret |= (a[2] ^ b[2]) & mask[2];
 417        if (IFNAMSIZ > 3 * sizeof(unsigned long))
 418                ret |= (a[3] ^ b[3]) & mask[3];
 419        BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
 420        return ret;
 421}
 422
 423struct xt_percpu_counter_alloc_state {
 424        unsigned int off;
 425        const char __percpu *mem;
 426};
 427
 428bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
 429                             struct xt_counters *counter);
 430void xt_percpu_counter_free(struct xt_counters *cnt);
 431
 432static inline struct xt_counters *
 433xt_get_this_cpu_counter(struct xt_counters *cnt)
 434{
 435        if (nr_cpu_ids > 1)
 436                return this_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt);
 437
 438        return cnt;
 439}
 440
 441static inline struct xt_counters *
 442xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
 443{
 444        if (nr_cpu_ids > 1)
 445                return per_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt, cpu);
 446
 447        return cnt;
 448}
 449
 450struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *);
 451
 452int xt_register_template(const struct xt_table *t, int(*table_init)(struct net *net));
 453void xt_unregister_template(const struct xt_table *t);
 454
 455#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
 456#include <net/compat.h>
 457
 458struct compat_xt_entry_match {
 459        union {
 460                struct {
 461                        u_int16_t match_size;
 462                        char name[XT_FUNCTION_MAXNAMELEN - 1];
 463                        u_int8_t revision;
 464                } user;
 465                struct {
 466                        u_int16_t match_size;
 467                        compat_uptr_t match;
 468                } kernel;
 469                u_int16_t match_size;
 470        } u;
 471        unsigned char data[];
 472};
 473
 474struct compat_xt_entry_target {
 475        union {
 476                struct {
 477                        u_int16_t target_size;
 478                        char name[XT_FUNCTION_MAXNAMELEN - 1];
 479                        u_int8_t revision;
 480                } user;
 481                struct {
 482                        u_int16_t target_size;
 483                        compat_uptr_t target;
 484                } kernel;
 485                u_int16_t target_size;
 486        } u;
 487        unsigned char data[];
 488};
 489
 490/* FIXME: this works only on 32 bit tasks
 491 * need to change whole approach in order to calculate align as function of
 492 * current task alignment */
 493
 494struct compat_xt_counters {
 495        compat_u64 pcnt, bcnt;                  /* Packet and byte counters */
 496};
 497
 498struct compat_xt_counters_info {
 499        char name[XT_TABLE_MAXNAMELEN];
 500        compat_uint_t num_counters;
 501        struct compat_xt_counters counters[];
 502};
 503
 504struct _compat_xt_align {
 505        __u8 u8;
 506        __u16 u16;
 507        __u32 u32;
 508        compat_u64 u64;
 509};
 510
 511#define COMPAT_XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _compat_xt_align))
 512
 513void xt_compat_lock(u_int8_t af);
 514void xt_compat_unlock(u_int8_t af);
 515
 516int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
 517void xt_compat_flush_offsets(u_int8_t af);
 518int xt_compat_init_offsets(u8 af, unsigned int number);
 519int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
 520
 521int xt_compat_match_offset(const struct xt_match *match);
 522void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
 523                              unsigned int *size);
 524int xt_compat_match_to_user(const struct xt_entry_match *m,
 525                            void __user **dstptr, unsigned int *size);
 526
 527int xt_compat_target_offset(const struct xt_target *target);
 528void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
 529                                unsigned int *size);
 530int xt_compat_target_to_user(const struct xt_entry_target *t,
 531                             void __user **dstptr, unsigned int *size);
 532int xt_compat_check_entry_offsets(const void *base, const char *elems,
 533                                  unsigned int target_offset,
 534                                  unsigned int next_offset);
 535
 536#endif /* CONFIG_NETFILTER_XTABLES_COMPAT */
 537#endif /* _X_TABLES_H */
 538