linux/include/linux/netfilter/x_tables.h
<<
>>
Prefs
   1#ifndef _X_TABLES_H
   2#define _X_TABLES_H
   3#include <linux/kernel.h>
   4#include <linux/types.h>
   5
   6#define XT_FUNCTION_MAXNAMELEN 30
   7#define XT_EXTENSION_MAXNAMELEN 29
   8#define XT_TABLE_MAXNAMELEN 32
   9
  10struct xt_entry_match {
  11        union {
  12                struct {
  13                        __u16 match_size;
  14
  15                        /* Used by userspace */
  16                        char name[XT_EXTENSION_MAXNAMELEN];
  17                        __u8 revision;
  18                } user;
  19                struct {
  20                        __u16 match_size;
  21
  22                        /* Used inside the kernel */
  23                        struct xt_match *match;
  24                } kernel;
  25
  26                /* Total length */
  27                __u16 match_size;
  28        } u;
  29
  30        unsigned char data[0];
  31};
  32
  33struct xt_entry_target {
  34        union {
  35                struct {
  36                        __u16 target_size;
  37
  38                        /* Used by userspace */
  39                        char name[XT_EXTENSION_MAXNAMELEN];
  40                        __u8 revision;
  41                } user;
  42                struct {
  43                        __u16 target_size;
  44
  45                        /* Used inside the kernel */
  46                        struct xt_target *target;
  47                } kernel;
  48
  49                /* Total length */
  50                __u16 target_size;
  51        } u;
  52
  53        unsigned char data[0];
  54};
  55
  56#define XT_TARGET_INIT(__name, __size)                                         \
  57{                                                                              \
  58        .target.u.user = {                                                     \
  59                .target_size    = XT_ALIGN(__size),                            \
  60                .name           = __name,                                      \
  61        },                                                                     \
  62}
  63
  64struct xt_standard_target {
  65        struct xt_entry_target target;
  66        int verdict;
  67};
  68
  69struct xt_error_target {
  70        struct xt_entry_target target;
  71        char errorname[XT_FUNCTION_MAXNAMELEN];
  72};
  73
  74/* The argument to IPT_SO_GET_REVISION_*.  Returns highest revision
  75 * kernel supports, if >= revision. */
  76struct xt_get_revision {
  77        char name[XT_EXTENSION_MAXNAMELEN];
  78        __u8 revision;
  79};
  80
  81/* CONTINUE verdict for targets */
  82#define XT_CONTINUE 0xFFFFFFFF
  83
  84/* For standard target */
  85#define XT_RETURN (-NF_REPEAT - 1)
  86
  87/* this is a dummy structure to find out the alignment requirement for a struct
  88 * containing all the fundamental data types that are used in ipt_entry,
  89 * ip6t_entry and arpt_entry.  This sucks, and it is a hack.  It will be my
  90 * personal pleasure to remove it -HW
  91 */
  92struct _xt_align {
  93        __u8 u8;
  94        __u16 u16;
  95        __u32 u32;
  96        __u64 u64;
  97};
  98
  99#define XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _xt_align))
 100
 101/* Standard return verdict, or do jump. */
 102#define XT_STANDARD_TARGET ""
 103/* Error verdict. */
 104#define XT_ERROR_TARGET "ERROR"
 105
 106#define SET_COUNTER(c,b,p) do { (c).bcnt = (b); (c).pcnt = (p); } while(0)
 107#define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0)
 108
 109struct xt_counters {
 110        __u64 pcnt, bcnt;                       /* Packet and byte counters */
 111};
 112
 113/* The argument to IPT_SO_ADD_COUNTERS. */
 114struct xt_counters_info {
 115        /* Which table. */
 116        char name[XT_TABLE_MAXNAMELEN];
 117
 118        unsigned int num_counters;
 119
 120        /* The counters (actually `number' of these). */
 121        struct xt_counters counters[0];
 122};
 123
 124#define XT_INV_PROTO            0x40    /* Invert the sense of PROTO. */
 125
 126#ifndef __KERNEL__
 127/* fn returns 0 to continue iteration */
 128#define XT_MATCH_ITERATE(type, e, fn, args...)                  \
 129({                                                              \
 130        unsigned int __i;                                       \
 131        int __ret = 0;                                          \
 132        struct xt_entry_match *__m;                             \
 133                                                                \
 134        for (__i = sizeof(type);                                \
 135             __i < (e)->target_offset;                          \
 136             __i += __m->u.match_size) {                        \
 137                __m = (void *)e + __i;                          \
 138                                                                \
 139                __ret = fn(__m , ## args);                      \
 140                if (__ret != 0)                                 \
 141                        break;                                  \
 142        }                                                       \
 143        __ret;                                                  \
 144})
 145
 146/* fn returns 0 to continue iteration */
 147#define XT_ENTRY_ITERATE_CONTINUE(type, entries, size, n, fn, args...) \
 148({                                                              \
 149        unsigned int __i, __n;                                  \
 150        int __ret = 0;                                          \
 151        type *__entry;                                          \
 152                                                                \
 153        for (__i = 0, __n = 0; __i < (size);                    \
 154             __i += __entry->next_offset, __n++) {              \
 155                __entry = (void *)(entries) + __i;              \
 156                if (__n < n)                                    \
 157                        continue;                               \
 158                                                                \
 159                __ret = fn(__entry , ## args);                  \
 160                if (__ret != 0)                                 \
 161                        break;                                  \
 162        }                                                       \
 163        __ret;                                                  \
 164})
 165
 166/* fn returns 0 to continue iteration */
 167#define XT_ENTRY_ITERATE(type, entries, size, fn, args...) \
 168        XT_ENTRY_ITERATE_CONTINUE(type, entries, size, 0, fn, args)
 169
 170#endif /* !__KERNEL__ */
 171
 172/* pos is normally a struct ipt_entry/ip6t_entry/etc. */
 173#define xt_entry_foreach(pos, ehead, esize) \
 174        for ((pos) = (typeof(pos))(ehead); \
 175             (pos) < (typeof(pos))((char *)(ehead) + (esize)); \
 176             (pos) = (typeof(pos))((char *)(pos) + (pos)->next_offset))
 177
 178/* can only be xt_entry_match, so no use of typeof here */
 179#define xt_ematch_foreach(pos, entry) \
 180        for ((pos) = (struct xt_entry_match *)entry->elems; \
 181             (pos) < (struct xt_entry_match *)((char *)(entry) + \
 182                     (entry)->target_offset); \
 183             (pos) = (struct xt_entry_match *)((char *)(pos) + \
 184                     (pos)->u.match_size))
 185
 186#ifdef __KERNEL__
 187
 188#include <linux/netdevice.h>
 189
 190/**
 191 * struct xt_action_param - parameters for matches/targets
 192 *
 193 * @match:      the match extension
 194 * @target:     the target extension
 195 * @matchinfo:  per-match data
 196 * @targetinfo: per-target data
 197 * @in:         input netdevice
 198 * @out:        output netdevice
 199 * @fragoff:    packet is a fragment, this is the data offset
 200 * @thoff:      position of transport header relative to skb->data
 201 * @hook:       hook number given packet came from
 202 * @family:     Actual NFPROTO_* through which the function is invoked
 203 *              (helpful when match->family == NFPROTO_UNSPEC)
 204 *
 205 * Fields written to by extensions:
 206 *
 207 * @hotdrop:    drop packet if we had inspection problems
 208 * Network namespace obtainable using dev_net(in/out)
 209 */
 210struct xt_action_param {
 211        union {
 212                const struct xt_match *match;
 213                const struct xt_target *target;
 214        };
 215        union {
 216                const void *matchinfo, *targinfo;
 217        };
 218        const struct net_device *in, *out;
 219        int fragoff;
 220        unsigned int thoff;
 221        unsigned int hooknum;
 222        u_int8_t family;
 223        bool hotdrop;
 224};
 225
 226/**
 227 * struct xt_mtchk_param - parameters for match extensions'
 228 * checkentry functions
 229 *
 230 * @net:        network namespace through which the check was invoked
 231 * @table:      table the rule is tried to be inserted into
 232 * @entryinfo:  the family-specific rule data
 233 *              (struct ipt_ip, ip6t_ip, arpt_arp or (note) ebt_entry)
 234 * @match:      struct xt_match through which this function was invoked
 235 * @matchinfo:  per-match data
 236 * @hook_mask:  via which hooks the new rule is reachable
 237 * Other fields as above.
 238 */
 239struct xt_mtchk_param {
 240        struct net *net;
 241        const char *table;
 242        const void *entryinfo;
 243        const struct xt_match *match;
 244        void *matchinfo;
 245        unsigned int hook_mask;
 246        u_int8_t family;
 247};
 248
 249/**
 250 * struct xt_mdtor_param - match destructor parameters
 251 * Fields as above.
 252 */
 253struct xt_mtdtor_param {
 254        struct net *net;
 255        const struct xt_match *match;
 256        void *matchinfo;
 257        u_int8_t family;
 258};
 259
 260/**
 261 * struct xt_tgchk_param - parameters for target extensions'
 262 * checkentry functions
 263 *
 264 * @entryinfo:  the family-specific rule data
 265 *              (struct ipt_entry, ip6t_entry, arpt_entry, ebt_entry)
 266 *
 267 * Other fields see above.
 268 */
 269struct xt_tgchk_param {
 270        struct net *net;
 271        const char *table;
 272        const void *entryinfo;
 273        const struct xt_target *target;
 274        void *targinfo;
 275        unsigned int hook_mask;
 276        u_int8_t family;
 277};
 278
 279/* Target destructor parameters */
 280struct xt_tgdtor_param {
 281        struct net *net;
 282        const struct xt_target *target;
 283        void *targinfo;
 284        u_int8_t family;
 285};
 286
 287struct xt_match {
 288        struct list_head list;
 289
 290        const char name[XT_EXTENSION_MAXNAMELEN];
 291        u_int8_t revision;
 292
 293        /* Return true or false: return FALSE and set *hotdrop = 1 to
 294           force immediate packet drop. */
 295        /* Arguments changed since 2.6.9, as this must now handle
 296           non-linear skb, using skb_header_pointer and
 297           skb_ip_make_writable. */
 298        bool (*match)(const struct sk_buff *skb,
 299                      struct xt_action_param *);
 300
 301        /* Called when user tries to insert an entry of this type. */
 302        int (*checkentry)(const struct xt_mtchk_param *);
 303
 304        /* Called when entry of this type deleted. */
 305        void (*destroy)(const struct xt_mtdtor_param *);
 306#ifdef CONFIG_COMPAT
 307        /* Called when userspace align differs from kernel space one */
 308        void (*compat_from_user)(void *dst, const void *src);
 309        int (*compat_to_user)(void __user *dst, const void *src);
 310#endif
 311        /* Set this to THIS_MODULE if you are a module, otherwise NULL */
 312        struct module *me;
 313
 314        const char *table;
 315        unsigned int matchsize;
 316#ifdef CONFIG_COMPAT
 317        unsigned int compatsize;
 318#endif
 319        unsigned int hooks;
 320        unsigned short proto;
 321
 322        unsigned short family;
 323};
 324
 325/* Registration hooks for targets. */
 326struct xt_target {
 327        struct list_head list;
 328
 329        const char name[XT_EXTENSION_MAXNAMELEN];
 330        u_int8_t revision;
 331
 332        /* Returns verdict. Argument order changed since 2.6.9, as this
 333           must now handle non-linear skbs, using skb_copy_bits and
 334           skb_ip_make_writable. */
 335        unsigned int (*target)(struct sk_buff *skb,
 336                               const struct xt_action_param *);
 337
 338        /* Called when user tries to insert an entry of this type:
 339           hook_mask is a bitmask of hooks from which it can be
 340           called. */
 341        /* Should return 0 on success or an error code otherwise (-Exxxx). */
 342        int (*checkentry)(const struct xt_tgchk_param *);
 343
 344        /* Called when entry of this type deleted. */
 345        void (*destroy)(const struct xt_tgdtor_param *);
 346#ifdef CONFIG_COMPAT
 347        /* Called when userspace align differs from kernel space one */
 348        void (*compat_from_user)(void *dst, const void *src);
 349        int (*compat_to_user)(void __user *dst, const void *src);
 350#endif
 351        /* Set this to THIS_MODULE if you are a module, otherwise NULL */
 352        struct module *me;
 353
 354        const char *table;
 355        unsigned int targetsize;
 356#ifdef CONFIG_COMPAT
 357        unsigned int compatsize;
 358#endif
 359        unsigned int hooks;
 360        unsigned short proto;
 361
 362        unsigned short family;
 363};
 364
 365/* Furniture shopping... */
 366struct xt_table {
 367        struct list_head list;
 368
 369        /* What hooks you will enter on */
 370        unsigned int valid_hooks;
 371
 372        /* Man behind the curtain... */
 373        struct xt_table_info *private;
 374
 375        /* Set this to THIS_MODULE if you are a module, otherwise NULL */
 376        struct module *me;
 377
 378        u_int8_t af;            /* address/protocol family */
 379        int priority;           /* hook order */
 380
 381        /* A unique name... */
 382        const char name[XT_TABLE_MAXNAMELEN];
 383};
 384
 385#include <linux/netfilter_ipv4.h>
 386
 387/* The table itself */
 388struct xt_table_info {
 389        /* Size per table */
 390        unsigned int size;
 391        /* Number of entries: FIXME. --RR */
 392        unsigned int number;
 393        /* Initial number of entries. Needed for module usage count */
 394        unsigned int initial_entries;
 395
 396        /* Entry points and underflows */
 397        unsigned int hook_entry[NF_INET_NUMHOOKS];
 398        unsigned int underflow[NF_INET_NUMHOOKS];
 399
 400        /*
 401         * Number of user chains. Since tables cannot have loops, at most
 402         * @stacksize jumps (number of user chains) can possibly be made.
 403         */
 404        unsigned int stacksize;
 405        unsigned int __percpu *stackptr;
 406        void ***jumpstack;
 407        /* ipt_entry tables: one per CPU */
 408        /* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */
 409        void *entries[1];
 410};
 411
 412#define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \
 413                          + nr_cpu_ids * sizeof(char *))
 414extern int xt_register_target(struct xt_target *target);
 415extern void xt_unregister_target(struct xt_target *target);
 416extern int xt_register_targets(struct xt_target *target, unsigned int n);
 417extern void xt_unregister_targets(struct xt_target *target, unsigned int n);
 418
 419extern int xt_register_match(struct xt_match *target);
 420extern void xt_unregister_match(struct xt_match *target);
 421extern int xt_register_matches(struct xt_match *match, unsigned int n);
 422extern void xt_unregister_matches(struct xt_match *match, unsigned int n);
 423
 424extern int xt_check_match(struct xt_mtchk_param *,
 425                          unsigned int size, u_int8_t proto, bool inv_proto);
 426extern int xt_check_target(struct xt_tgchk_param *,
 427                           unsigned int size, u_int8_t proto, bool inv_proto);
 428
 429extern struct xt_table *xt_register_table(struct net *net,
 430                                          const struct xt_table *table,
 431                                          struct xt_table_info *bootstrap,
 432                                          struct xt_table_info *newinfo);
 433extern void *xt_unregister_table(struct xt_table *table);
 434
 435extern struct xt_table_info *xt_replace_table(struct xt_table *table,
 436                                              unsigned int num_counters,
 437                                              struct xt_table_info *newinfo,
 438                                              int *error);
 439
 440extern struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
 441extern struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
 442extern struct xt_match *xt_request_find_match(u8 af, const char *name,
 443                                              u8 revision);
 444extern struct xt_target *xt_request_find_target(u8 af, const char *name,
 445                                                u8 revision);
 446extern int xt_find_revision(u8 af, const char *name, u8 revision,
 447                            int target, int *err);
 448
 449extern struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
 450                                           const char *name);
 451extern void xt_table_unlock(struct xt_table *t);
 452
 453extern int xt_proto_init(struct net *net, u_int8_t af);
 454extern void xt_proto_fini(struct net *net, u_int8_t af);
 455
 456extern struct xt_table_info *xt_alloc_table_info(unsigned int size);
 457extern void xt_free_table_info(struct xt_table_info *info);
 458
 459/*
 460 * Per-CPU spinlock associated with per-cpu table entries, and
 461 * with a counter for the "reading" side that allows a recursive
 462 * reader to avoid taking the lock and deadlocking.
 463 *
 464 * "reading" is used by ip/arp/ip6 tables rule processing which runs per-cpu.
 465 * It needs to ensure that the rules are not being changed while the packet
 466 * is being processed. In some cases, the read lock will be acquired
 467 * twice on the same CPU; this is okay because of the count.
 468 *
 469 * "writing" is used when reading counters.
 470 *  During replace any readers that are using the old tables have to complete
 471 *  before freeing the old table. This is handled by the write locking
 472 *  necessary for reading the counters.
 473 */
 474struct xt_info_lock {
 475        seqlock_t lock;
 476        unsigned char readers;
 477};
 478DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks);
 479
 480/*
 481 * Note: we need to ensure that preemption is disabled before acquiring
 482 * the per-cpu-variable, so we do it as a two step process rather than
 483 * using "spin_lock_bh()".
 484 *
 485 * We _also_ need to disable bottom half processing before updating our
 486 * nesting count, to make sure that the only kind of re-entrancy is this
 487 * code being called by itself: since the count+lock is not an atomic
 488 * operation, we can allow no races.
 489 *
 490 * _Only_ that special combination of being per-cpu and never getting
 491 * re-entered asynchronously means that the count is safe.
 492 */
 493static inline void xt_info_rdlock_bh(void)
 494{
 495        struct xt_info_lock *lock;
 496
 497        local_bh_disable();
 498        lock = &__get_cpu_var(xt_info_locks);
 499        if (likely(!lock->readers++))
 500                write_seqlock(&lock->lock);
 501}
 502
 503static inline void xt_info_rdunlock_bh(void)
 504{
 505        struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks);
 506
 507        if (likely(!--lock->readers))
 508                write_sequnlock(&lock->lock);
 509        local_bh_enable();
 510}
 511
 512/*
 513 * The "writer" side needs to get exclusive access to the lock,
 514 * regardless of readers.  This must be called with bottom half
 515 * processing (and thus also preemption) disabled.
 516 */
 517static inline void xt_info_wrlock(unsigned int cpu)
 518{
 519        write_seqlock(&per_cpu(xt_info_locks, cpu).lock);
 520}
 521
 522static inline void xt_info_wrunlock(unsigned int cpu)
 523{
 524        write_sequnlock(&per_cpu(xt_info_locks, cpu).lock);
 525}
 526
 527/*
 528 * This helper is performance critical and must be inlined
 529 */
 530static inline unsigned long ifname_compare_aligned(const char *_a,
 531                                                   const char *_b,
 532                                                   const char *_mask)
 533{
 534        const unsigned long *a = (const unsigned long *)_a;
 535        const unsigned long *b = (const unsigned long *)_b;
 536        const unsigned long *mask = (const unsigned long *)_mask;
 537        unsigned long ret;
 538
 539        ret = (a[0] ^ b[0]) & mask[0];
 540        if (IFNAMSIZ > sizeof(unsigned long))
 541                ret |= (a[1] ^ b[1]) & mask[1];
 542        if (IFNAMSIZ > 2 * sizeof(unsigned long))
 543                ret |= (a[2] ^ b[2]) & mask[2];
 544        if (IFNAMSIZ > 3 * sizeof(unsigned long))
 545                ret |= (a[3] ^ b[3]) & mask[3];
 546        BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
 547        return ret;
 548}
 549
 550extern struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
 551extern void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
 552
 553#ifdef CONFIG_COMPAT
 554#include <net/compat.h>
 555
 556struct compat_xt_entry_match {
 557        union {
 558                struct {
 559                        u_int16_t match_size;
 560                        char name[XT_FUNCTION_MAXNAMELEN - 1];
 561                        u_int8_t revision;
 562                } user;
 563                struct {
 564                        u_int16_t match_size;
 565                        compat_uptr_t match;
 566                } kernel;
 567                u_int16_t match_size;
 568        } u;
 569        unsigned char data[0];
 570};
 571
 572struct compat_xt_entry_target {
 573        union {
 574                struct {
 575                        u_int16_t target_size;
 576                        char name[XT_FUNCTION_MAXNAMELEN - 1];
 577                        u_int8_t revision;
 578                } user;
 579                struct {
 580                        u_int16_t target_size;
 581                        compat_uptr_t target;
 582                } kernel;
 583                u_int16_t target_size;
 584        } u;
 585        unsigned char data[0];
 586};
 587
 588/* FIXME: this works only on 32 bit tasks
 589 * need to change whole approach in order to calculate align as function of
 590 * current task alignment */
 591
 592struct compat_xt_counters {
 593        compat_u64 pcnt, bcnt;                  /* Packet and byte counters */
 594};
 595
 596struct compat_xt_counters_info {
 597        char name[XT_TABLE_MAXNAMELEN];
 598        compat_uint_t num_counters;
 599        struct compat_xt_counters counters[0];
 600};
 601
 602struct _compat_xt_align {
 603        __u8 u8;
 604        __u16 u16;
 605        __u32 u32;
 606        compat_u64 u64;
 607};
 608
 609#define COMPAT_XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _compat_xt_align))
 610
 611extern void xt_compat_lock(u_int8_t af);
 612extern void xt_compat_unlock(u_int8_t af);
 613
 614extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta);
 615extern void xt_compat_flush_offsets(u_int8_t af);
 616extern int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
 617
 618extern int xt_compat_match_offset(const struct xt_match *match);
 619extern int xt_compat_match_from_user(struct xt_entry_match *m,
 620                                     void **dstptr, unsigned int *size);
 621extern int xt_compat_match_to_user(const struct xt_entry_match *m,
 622                                   void __user **dstptr, unsigned int *size);
 623
 624extern int xt_compat_target_offset(const struct xt_target *target);
 625extern void xt_compat_target_from_user(struct xt_entry_target *t,
 626                                       void **dstptr, unsigned int *size);
 627extern int xt_compat_target_to_user(const struct xt_entry_target *t,
 628                                    void __user **dstptr, unsigned int *size);
 629
 630#endif /* CONFIG_COMPAT */
 631#endif /* __KERNEL__ */
 632
 633#endif /* _X_TABLES_H */
 634