linux/net/netfilter/core.c
<<
>>
Prefs
   1/* netfilter.c: look after the filters for various protocols.
   2 * Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
   3 *
   4 * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
   5 * way.
   6 *
   7 * Rusty Russell (C)2000 -- This code is GPL.
   8 */
   9#include <linux/kernel.h>
  10#include <linux/netfilter.h>
  11#include <net/protocol.h>
  12#include <linux/init.h>
  13#include <linux/skbuff.h>
  14#include <linux/wait.h>
  15#include <linux/module.h>
  16#include <linux/interrupt.h>
  17#include <linux/if.h>
  18#include <linux/netdevice.h>
  19#include <linux/inetdevice.h>
  20#include <linux/proc_fs.h>
  21#include <linux/mutex.h>
  22#include <linux/slab.h>
  23#include <net/net_namespace.h>
  24#include <net/sock.h>
  25
  26#include "nf_internals.h"
  27
  28static DEFINE_MUTEX(afinfo_mutex);
  29
  30const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
  31EXPORT_SYMBOL(nf_afinfo);
  32
  33int nf_register_afinfo(const struct nf_afinfo *afinfo)
  34{
  35        int err;
  36
  37        err = mutex_lock_interruptible(&afinfo_mutex);
  38        if (err < 0)
  39                return err;
  40        rcu_assign_pointer(nf_afinfo[afinfo->family], afinfo);
  41        mutex_unlock(&afinfo_mutex);
  42        return 0;
  43}
  44EXPORT_SYMBOL_GPL(nf_register_afinfo);
  45
  46void nf_unregister_afinfo(const struct nf_afinfo *afinfo)
  47{
  48        mutex_lock(&afinfo_mutex);
  49        rcu_assign_pointer(nf_afinfo[afinfo->family], NULL);
  50        mutex_unlock(&afinfo_mutex);
  51        synchronize_rcu();
  52}
  53EXPORT_SYMBOL_GPL(nf_unregister_afinfo);
  54
  55struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS] __read_mostly;
  56EXPORT_SYMBOL(nf_hooks);
  57static DEFINE_MUTEX(nf_hook_mutex);
  58
  59int nf_register_hook(struct nf_hook_ops *reg)
  60{
  61        struct nf_hook_ops *elem;
  62        int err;
  63
  64        err = mutex_lock_interruptible(&nf_hook_mutex);
  65        if (err < 0)
  66                return err;
  67        list_for_each_entry(elem, &nf_hooks[reg->pf][reg->hooknum], list) {
  68                if (reg->priority < elem->priority)
  69                        break;
  70        }
  71        list_add_rcu(&reg->list, elem->list.prev);
  72        mutex_unlock(&nf_hook_mutex);
  73        return 0;
  74}
  75EXPORT_SYMBOL(nf_register_hook);
  76
  77void nf_unregister_hook(struct nf_hook_ops *reg)
  78{
  79        mutex_lock(&nf_hook_mutex);
  80        list_del_rcu(&reg->list);
  81        mutex_unlock(&nf_hook_mutex);
  82
  83        synchronize_net();
  84}
  85EXPORT_SYMBOL(nf_unregister_hook);
  86
  87int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n)
  88{
  89        unsigned int i;
  90        int err = 0;
  91
  92        for (i = 0; i < n; i++) {
  93                err = nf_register_hook(&reg[i]);
  94                if (err)
  95                        goto err;
  96        }
  97        return err;
  98
  99err:
 100        if (i > 0)
 101                nf_unregister_hooks(reg, i);
 102        return err;
 103}
 104EXPORT_SYMBOL(nf_register_hooks);
 105
 106void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n)
 107{
 108        while (n-- > 0)
 109                nf_unregister_hook(&reg[n]);
 110}
 111EXPORT_SYMBOL(nf_unregister_hooks);
 112
 113unsigned int nf_iterate(struct list_head *head,
 114                        struct sk_buff *skb,
 115                        unsigned int hook,
 116                        const struct net_device *indev,
 117                        const struct net_device *outdev,
 118                        struct list_head **i,
 119                        int (*okfn)(struct sk_buff *),
 120                        int hook_thresh)
 121{
 122        unsigned int verdict;
 123
 124        /*
 125         * The caller must not block between calls to this
 126         * function because of risk of continuing from deleted element.
 127         */
 128        list_for_each_continue_rcu(*i, head) {
 129                struct nf_hook_ops *elem = (struct nf_hook_ops *)*i;
 130
 131                if (hook_thresh > elem->priority)
 132                        continue;
 133
 134                /* Optimization: we don't need to hold module
 135                   reference here, since function can't sleep. --RR */
 136repeat:
 137                verdict = elem->hook(hook, skb, indev, outdev, okfn);
 138                if (verdict != NF_ACCEPT) {
 139#ifdef CONFIG_NETFILTER_DEBUG
 140                        if (unlikely((verdict & NF_VERDICT_MASK)
 141                                                        > NF_MAX_VERDICT)) {
 142                                NFDEBUG("Evil return from %p(%u).\n",
 143                                        elem->hook, hook);
 144                                continue;
 145                        }
 146#endif
 147                        if (verdict != NF_REPEAT)
 148                                return verdict;
 149                        goto repeat;
 150                }
 151        }
 152        return NF_ACCEPT;
 153}
 154
 155
 156/* Returns 1 if okfn() needs to be executed by the caller,
 157 * -EPERM for NF_DROP, 0 otherwise. */
 158int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
 159                 struct net_device *indev,
 160                 struct net_device *outdev,
 161                 int (*okfn)(struct sk_buff *),
 162                 int hook_thresh)
 163{
 164        struct list_head *elem;
 165        unsigned int verdict;
 166        int ret = 0;
 167
 168        /* We may already have this, but read-locks nest anyway */
 169        rcu_read_lock();
 170
 171        elem = &nf_hooks[pf][hook];
 172next_hook:
 173        verdict = nf_iterate(&nf_hooks[pf][hook], skb, hook, indev,
 174                             outdev, &elem, okfn, hook_thresh);
 175        if (verdict == NF_ACCEPT || verdict == NF_STOP) {
 176                ret = 1;
 177        } else if ((verdict & NF_VERDICT_MASK) == NF_DROP) {
 178                kfree_skb(skb);
 179                ret = -(verdict >> NF_VERDICT_BITS);
 180                if (ret == 0)
 181                        ret = -EPERM;
 182        } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
 183                if (!nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
 184                              verdict >> NF_VERDICT_BITS))
 185                        goto next_hook;
 186        }
 187        rcu_read_unlock();
 188        return ret;
 189}
 190EXPORT_SYMBOL(nf_hook_slow);
 191
 192
 193int skb_make_writable(struct sk_buff *skb, unsigned int writable_len)
 194{
 195        if (writable_len > skb->len)
 196                return 0;
 197
 198        /* Not exclusive use of packet?  Must copy. */
 199        if (!skb_cloned(skb)) {
 200                if (writable_len <= skb_headlen(skb))
 201                        return 1;
 202        } else if (skb_clone_writable(skb, writable_len))
 203                return 1;
 204
 205        if (writable_len <= skb_headlen(skb))
 206                writable_len = 0;
 207        else
 208                writable_len -= skb_headlen(skb);
 209
 210        return !!__pskb_pull_tail(skb, writable_len);
 211}
 212EXPORT_SYMBOL(skb_make_writable);
 213
 214#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 215/* This does not belong here, but locally generated errors need it if connection
 216   tracking in use: without this, connection may not be in hash table, and hence
 217   manufactured ICMP or RST packets will not be associated with it. */
 218void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *);
 219EXPORT_SYMBOL(ip_ct_attach);
 220
 221void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb)
 222{
 223        void (*attach)(struct sk_buff *, struct sk_buff *);
 224
 225        if (skb->nfct) {
 226                rcu_read_lock();
 227                attach = rcu_dereference(ip_ct_attach);
 228                if (attach)
 229                        attach(new, skb);
 230                rcu_read_unlock();
 231        }
 232}
 233EXPORT_SYMBOL(nf_ct_attach);
 234
 235void (*nf_ct_destroy)(struct nf_conntrack *);
 236EXPORT_SYMBOL(nf_ct_destroy);
 237
 238void nf_conntrack_destroy(struct nf_conntrack *nfct)
 239{
 240        void (*destroy)(struct nf_conntrack *);
 241
 242        rcu_read_lock();
 243        destroy = rcu_dereference(nf_ct_destroy);
 244        BUG_ON(destroy == NULL);
 245        destroy(nfct);
 246        rcu_read_unlock();
 247}
 248EXPORT_SYMBOL(nf_conntrack_destroy);
 249#endif /* CONFIG_NF_CONNTRACK */
 250
 251#ifdef CONFIG_PROC_FS
 252struct proc_dir_entry *proc_net_netfilter;
 253EXPORT_SYMBOL(proc_net_netfilter);
 254#endif
 255
 256void __init netfilter_init(void)
 257{
 258        int i, h;
 259        for (i = 0; i < ARRAY_SIZE(nf_hooks); i++) {
 260                for (h = 0; h < NF_MAX_HOOKS; h++)
 261                        INIT_LIST_HEAD(&nf_hooks[i][h]);
 262        }
 263
 264#ifdef CONFIG_PROC_FS
 265        proc_net_netfilter = proc_mkdir("netfilter", init_net.proc_net);
 266        if (!proc_net_netfilter)
 267                panic("cannot create netfilter proc entry");
 268#endif
 269
 270        if (netfilter_queue_init() < 0)
 271                panic("cannot initialize nf_queue");
 272        if (netfilter_log_init() < 0)
 273                panic("cannot initialize nf_log");
 274}
 275
 276#ifdef CONFIG_SYSCTL
 277struct ctl_path nf_net_netfilter_sysctl_path[] = {
 278        { .procname = "net", },
 279        { .procname = "netfilter", },
 280        { }
 281};
 282EXPORT_SYMBOL_GPL(nf_net_netfilter_sysctl_path);
 283#endif /* CONFIG_SYSCTL */
 284