linux/include/linux/netfilter/x_tables.h
<<
>>
Prefs
   1#ifndef _X_TABLES_H
   2#define _X_TABLES_H
   3
   4
   5#include <linux/netdevice.h>
   6#include <uapi/linux/netfilter/x_tables.h>
   7
   8#include <linux/rh_kabi.h>
   9
  10/**
  11 * struct xt_action_param - parameters for matches/targets
  12 *
  13 * @match:      the match extension
  14 * @target:     the target extension
  15 * @matchinfo:  per-match data
  16 * @targetinfo: per-target data
  17 * @in:         input netdevice
  18 * @out:        output netdevice
  19 * @fragoff:    packet is a fragment, this is the data offset
  20 * @thoff:      position of transport header relative to skb->data
  21 * @hook:       hook number given packet came from
  22 * @family:     Actual NFPROTO_* through which the function is invoked
  23 *              (helpful when match->family == NFPROTO_UNSPEC)
  24 *
  25 * Fields written to by extensions:
  26 *
  27 * @hotdrop:    drop packet if we had inspection problems
  28 * Network namespace obtainable using dev_net(in/out)
  29 */
  30struct xt_action_param {
  31        union {
  32                const struct xt_match *match;
  33                const struct xt_target *target;
  34        };
  35        union {
  36                const void *matchinfo, *targinfo;
  37        };
  38        const struct net_device *in, *out;
  39        int fragoff;
  40        unsigned int thoff;
  41        unsigned int hooknum;
  42        u_int8_t family;
  43        bool hotdrop;
  44};
  45
  46/**
  47 * struct xt_mtchk_param - parameters for match extensions'
  48 * checkentry functions
  49 *
  50 * @net:        network namespace through which the check was invoked
  51 * @table:      table the rule is tried to be inserted into
  52 * @entryinfo:  the family-specific rule data
  53 *              (struct ipt_ip, ip6t_ip, arpt_arp or (note) ebt_entry)
  54 * @match:      struct xt_match through which this function was invoked
  55 * @matchinfo:  per-match data
  56 * @hook_mask:  via which hooks the new rule is reachable
  57 * Other fields as above.
  58 */
  59struct xt_mtchk_param {
  60        struct net *net;
  61        const char *table;
  62        const void *entryinfo;
  63        const struct xt_match *match;
  64        void *matchinfo;
  65        unsigned int hook_mask;
  66        u_int8_t family;
  67        RH_KABI_EXTEND(bool nft_compat)
  68};
  69
  70/**
  71 * struct xt_mdtor_param - match destructor parameters
  72 * Fields as above.
  73 */
  74struct xt_mtdtor_param {
  75        struct net *net;
  76        const struct xt_match *match;
  77        void *matchinfo;
  78        u_int8_t family;
  79};
  80
  81/**
  82 * struct xt_tgchk_param - parameters for target extensions'
  83 * checkentry functions
  84 *
  85 * @entryinfo:  the family-specific rule data
  86 *              (struct ipt_entry, ip6t_entry, arpt_entry, ebt_entry)
  87 *
  88 * Other fields see above.
  89 */
  90struct xt_tgchk_param {
  91        struct net *net;
  92        const char *table;
  93        const void *entryinfo;
  94        const struct xt_target *target;
  95        void *targinfo;
  96        unsigned int hook_mask;
  97        u_int8_t family;
  98        RH_KABI_EXTEND(bool nft_compat)
  99};
 100
 101/* Target destructor parameters */
 102struct xt_tgdtor_param {
 103        struct net *net;
 104        const struct xt_target *target;
 105        void *targinfo;
 106        u_int8_t family;
 107};
 108
 109struct xt_match {
 110        struct list_head list;
 111
 112        const char name[XT_EXTENSION_MAXNAMELEN];
 113        u_int8_t revision;
 114
 115        /* Return true or false: return FALSE and set *hotdrop = 1 to
 116           force immediate packet drop. */
 117        /* Arguments changed since 2.6.9, as this must now handle
 118           non-linear skb, using skb_header_pointer and
 119           skb_ip_make_writable. */
 120        bool (*match)(const struct sk_buff *skb,
 121                      struct xt_action_param *);
 122
 123        /* Called when user tries to insert an entry of this type. */
 124        int (*checkentry)(const struct xt_mtchk_param *);
 125
 126        /* Called when entry of this type deleted. */
 127        void (*destroy)(const struct xt_mtdtor_param *);
 128#ifdef CONFIG_COMPAT
 129        /* Called when userspace align differs from kernel space one */
 130        void (*compat_from_user)(void *dst, const void *src);
 131        int (*compat_to_user)(void __user *dst, const void *src);
 132#endif
 133        /* Set this to THIS_MODULE if you are a module, otherwise NULL */
 134        struct module *me;
 135
 136        const char *table;
 137        unsigned int matchsize;
 138#ifdef CONFIG_COMPAT
 139        unsigned int compatsize;
 140#endif
 141        unsigned int hooks;
 142        unsigned short proto;
 143
 144        unsigned short family;
 145};
 146
 147/* Registration hooks for targets. */
 148struct xt_target {
 149        struct list_head list;
 150
 151        const char name[XT_EXTENSION_MAXNAMELEN];
 152        u_int8_t revision;
 153
 154        /* Returns verdict. Argument order changed since 2.6.9, as this
 155           must now handle non-linear skbs, using skb_copy_bits and
 156           skb_ip_make_writable. */
 157        unsigned int (*target)(struct sk_buff *skb,
 158                               const struct xt_action_param *);
 159
 160        /* Called when user tries to insert an entry of this type:
 161           hook_mask is a bitmask of hooks from which it can be
 162           called. */
 163        /* Should return 0 on success or an error code otherwise (-Exxxx). */
 164        int (*checkentry)(const struct xt_tgchk_param *);
 165
 166        /* Called when entry of this type deleted. */
 167        void (*destroy)(const struct xt_tgdtor_param *);
 168#ifdef CONFIG_COMPAT
 169        /* Called when userspace align differs from kernel space one */
 170        void (*compat_from_user)(void *dst, const void *src);
 171        int (*compat_to_user)(void __user *dst, const void *src);
 172#endif
 173        /* Set this to THIS_MODULE if you are a module, otherwise NULL */
 174        struct module *me;
 175
 176        const char *table;
 177        unsigned int targetsize;
 178#ifdef CONFIG_COMPAT
 179        unsigned int compatsize;
 180#endif
 181        unsigned int hooks;
 182        unsigned short proto;
 183
 184        unsigned short family;
 185
 186        RH_KABI_RESERVE(1)
 187        RH_KABI_RESERVE(2)
 188        RH_KABI_RESERVE(3)
 189        RH_KABI_RESERVE(4)
 190};
 191
 192/* Furniture shopping... */
 193struct xt_table {
 194        struct list_head list;
 195
 196        /* What hooks you will enter on */
 197        unsigned int valid_hooks;
 198
 199        /* Man behind the curtain... */
 200        struct xt_table_info *private;
 201
 202        /* Set this to THIS_MODULE if you are a module, otherwise NULL */
 203        struct module *me;
 204
 205        u_int8_t af;            /* address/protocol family */
 206        int priority;           /* hook order */
 207
 208        /* A unique name... */
 209        const char name[XT_TABLE_MAXNAMELEN];
 210};
 211
 212#include <linux/netfilter_ipv4.h>
 213
 214/* The table itself */
 215struct xt_table_info {
 216        /* Size per table */
 217        unsigned int size;
 218        /* Number of entries: FIXME. --RR */
 219        unsigned int number;
 220        /* Initial number of entries. Needed for module usage count */
 221        unsigned int initial_entries;
 222
 223        /* Entry points and underflows */
 224        unsigned int hook_entry[NF_INET_NUMHOOKS];
 225        unsigned int underflow[NF_INET_NUMHOOKS];
 226
 227        /*
 228         * Number of user chains. Since tables cannot have loops, at most
 229         * @stacksize jumps (number of user chains) can possibly be made.
 230         */
 231        unsigned int stacksize;
 232        unsigned int __percpu *stackptr;
 233        void ***jumpstack;
 234
 235        unsigned char entries[0] __aligned(8);
 236};
 237
 238int xt_register_target(struct xt_target *target);
 239void xt_unregister_target(struct xt_target *target);
 240int xt_register_targets(struct xt_target *target, unsigned int n);
 241void xt_unregister_targets(struct xt_target *target, unsigned int n);
 242
 243int xt_register_match(struct xt_match *target);
 244void xt_unregister_match(struct xt_match *target);
 245int xt_register_matches(struct xt_match *match, unsigned int n);
 246void xt_unregister_matches(struct xt_match *match, unsigned int n);
 247
 248int xt_check_entry_offsets(const void *base, const char *elems,
 249                           unsigned int target_offset,
 250                           unsigned int next_offset);
 251
 252unsigned int *xt_alloc_entry_offsets(unsigned int size);
 253bool xt_find_jump_offset(const unsigned int *offsets,
 254                         unsigned int target, unsigned int size);
 255
 256int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
 257                   bool inv_proto);
 258int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
 259                    bool inv_proto);
 260
 261void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
 262                                 struct xt_counters_info *info, bool compat);
 263
 264struct xt_table *xt_register_table(struct net *net,
 265                                   const struct xt_table *table,
 266                                   struct xt_table_info *bootstrap,
 267                                   struct xt_table_info *newinfo);
 268void *xt_unregister_table(struct xt_table *table);
 269
 270struct xt_table_info *xt_replace_table(struct xt_table *table,
 271                                       unsigned int num_counters,
 272                                       struct xt_table_info *newinfo,
 273                                       int *error);
 274
 275struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
 276struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
 277struct xt_match *xt_request_find_match(u8 af, const char *name, u8 revision);
 278struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision);
 279int xt_find_revision(u8 af, const char *name, u8 revision, int target,
 280                     int *err);
 281
 282struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
 283                                    const char *name);
 284void xt_table_unlock(struct xt_table *t);
 285
 286int xt_proto_init(struct net *net, u_int8_t af);
 287void xt_proto_fini(struct net *net, u_int8_t af);
 288
 289struct xt_table_info *xt_alloc_table_info(unsigned int size);
 290void xt_free_table_info(struct xt_table_info *info);
 291
 292/**
 293 * xt_recseq - recursive seqcount for netfilter use
 294 * 
 295 * Packet processing changes the seqcount only if no recursion happened
 296 * get_counters() can use read_seqcount_begin()/read_seqcount_retry(),
 297 * because we use the normal seqcount convention :
 298 * Low order bit set to 1 if a writer is active.
 299 */
 300DECLARE_PER_CPU(seqcount_t, xt_recseq);
 301
 302/**
 303 * xt_write_recseq_begin - start of a write section
 304 *
 305 * Begin packet processing : all readers must wait the end
 306 * 1) Must be called with preemption disabled
 307 * 2) softirqs must be disabled too (or we should use this_cpu_add())
 308 * Returns :
 309 *  1 if no recursion on this cpu
 310 *  0 if recursion detected
 311 */
 312static inline unsigned int xt_write_recseq_begin(void)
 313{
 314        unsigned int addend;
 315
 316        /*
 317         * Low order bit of sequence is set if we already
 318         * called xt_write_recseq_begin().
 319         */
 320        addend = (__this_cpu_read(xt_recseq.sequence) + 1) & 1;
 321
 322        /*
 323         * This is kind of a write_seqcount_begin(), but addend is 0 or 1
 324         * We dont check addend value to avoid a test and conditional jump,
 325         * since addend is most likely 1
 326         */
 327        __this_cpu_add(xt_recseq.sequence, addend);
 328        smp_wmb();
 329
 330        return addend;
 331}
 332
 333/**
 334 * xt_write_recseq_end - end of a write section
 335 * @addend: return value from previous xt_write_recseq_begin()
 336 *
 337 * End packet processing : all readers can proceed
 338 * 1) Must be called with preemption disabled
 339 * 2) softirqs must be disabled too (or we should use this_cpu_add())
 340 */
 341static inline void xt_write_recseq_end(unsigned int addend)
 342{
 343        /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
 344        smp_wmb();
 345        __this_cpu_add(xt_recseq.sequence, addend);
 346}
 347
 348/*
 349 * This helper is performance critical and must be inlined
 350 */
 351static inline unsigned long ifname_compare_aligned(const char *_a,
 352                                                   const char *_b,
 353                                                   const char *_mask)
 354{
 355        const unsigned long *a = (const unsigned long *)_a;
 356        const unsigned long *b = (const unsigned long *)_b;
 357        const unsigned long *mask = (const unsigned long *)_mask;
 358        unsigned long ret;
 359
 360        ret = (a[0] ^ b[0]) & mask[0];
 361        if (IFNAMSIZ > sizeof(unsigned long))
 362                ret |= (a[1] ^ b[1]) & mask[1];
 363        if (IFNAMSIZ > 2 * sizeof(unsigned long))
 364                ret |= (a[2] ^ b[2]) & mask[2];
 365        if (IFNAMSIZ > 3 * sizeof(unsigned long))
 366                ret |= (a[3] ^ b[3]) & mask[3];
 367        BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
 368        return ret;
 369}
 370
 371struct xt_percpu_counter_alloc_state {
 372        unsigned int off;
 373        const char __percpu *mem;
 374};
 375
 376bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
 377                             struct xt_counters *counter);
 378void xt_percpu_counter_free(struct xt_counters *cnt);
 379
 380static inline struct xt_counters *
 381xt_get_this_cpu_counter(struct xt_counters *cnt)
 382{
 383        if (nr_cpu_ids > 1)
 384                return this_cpu_ptr((void __percpu *) cnt->pcnt);
 385
 386        return cnt;
 387}
 388
 389static inline struct xt_counters *
 390xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
 391{
 392        if (nr_cpu_ids > 1)
 393                return per_cpu_ptr((void __percpu *) cnt->pcnt, cpu);
 394
 395        return cnt;
 396}
 397
 398struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
 399void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
 400
 401#ifdef CONFIG_COMPAT
 402#include <net/compat.h>
 403
 404struct compat_xt_entry_match {
 405        union {
 406                struct {
 407                        u_int16_t match_size;
 408                        char name[XT_FUNCTION_MAXNAMELEN - 1];
 409                        u_int8_t revision;
 410                } user;
 411                struct {
 412                        u_int16_t match_size;
 413                        compat_uptr_t match;
 414                } kernel;
 415                u_int16_t match_size;
 416        } u;
 417        unsigned char data[0];
 418};
 419
 420struct compat_xt_entry_target {
 421        union {
 422                struct {
 423                        u_int16_t target_size;
 424                        char name[XT_FUNCTION_MAXNAMELEN - 1];
 425                        u_int8_t revision;
 426                } user;
 427                struct {
 428                        u_int16_t target_size;
 429                        compat_uptr_t target;
 430                } kernel;
 431                u_int16_t target_size;
 432        } u;
 433        unsigned char data[0];
 434};
 435
 436/* FIXME: this works only on 32 bit tasks
 437 * need to change whole approach in order to calculate align as function of
 438 * current task alignment */
 439
 440struct compat_xt_counters {
 441        compat_u64 pcnt, bcnt;                  /* Packet and byte counters */
 442};
 443
 444struct compat_xt_counters_info {
 445        char name[XT_TABLE_MAXNAMELEN];
 446        compat_uint_t num_counters;
 447        struct compat_xt_counters counters[0];
 448};
 449
 450struct _compat_xt_align {
 451        __u8 u8;
 452        __u16 u16;
 453        __u32 u32;
 454        compat_u64 u64;
 455};
 456
 457#define COMPAT_XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _compat_xt_align))
 458
 459void xt_compat_lock(u_int8_t af);
 460void xt_compat_unlock(u_int8_t af);
 461
 462int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
 463void xt_compat_flush_offsets(u_int8_t af);
 464void xt_compat_init_offsets(u_int8_t af, unsigned int number);
 465int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
 466
 467int xt_compat_match_offset(const struct xt_match *match);
 468void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
 469                              unsigned int *size);
 470int xt_compat_match_to_user(const struct xt_entry_match *m,
 471                            void __user **dstptr, unsigned int *size);
 472
 473int xt_compat_target_offset(const struct xt_target *target);
 474void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
 475                                unsigned int *size);
 476int xt_compat_target_to_user(const struct xt_entry_target *t,
 477                             void __user **dstptr, unsigned int *size);
 478int xt_compat_check_entry_offsets(const void *base, const char *elems,
 479                                  unsigned int target_offset,
 480                                  unsigned int next_offset);
 481
 482#endif /* CONFIG_COMPAT */
 483#endif /* _X_TABLES_H */
 484