linux/net/netfilter/core.c
<<
>>
Prefs
   1/* netfilter.c: look after the filters for various protocols.
   2 * Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
   3 *
   4 * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
   5 * way.
   6 *
   7 * Rusty Russell (C)2000 -- This code is GPL.
   8 */
   9#include <linux/kernel.h>
  10#include <linux/netfilter.h>
  11#include <net/protocol.h>
  12#include <linux/init.h>
  13#include <linux/skbuff.h>
  14#include <linux/wait.h>
  15#include <linux/module.h>
  16#include <linux/interrupt.h>
  17#include <linux/if.h>
  18#include <linux/netdevice.h>
  19#include <linux/inetdevice.h>
  20#include <linux/proc_fs.h>
  21#include <linux/mutex.h>
  22#include <net/net_namespace.h>
  23#include <net/sock.h>
  24
  25#include "nf_internals.h"
  26
  27static DEFINE_MUTEX(afinfo_mutex);
  28
  29const struct nf_afinfo *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
  30EXPORT_SYMBOL(nf_afinfo);
  31
  32int nf_register_afinfo(const struct nf_afinfo *afinfo)
  33{
  34        int err;
  35
  36        err = mutex_lock_interruptible(&afinfo_mutex);
  37        if (err < 0)
  38                return err;
  39        rcu_assign_pointer(nf_afinfo[afinfo->family], afinfo);
  40        mutex_unlock(&afinfo_mutex);
  41        return 0;
  42}
  43EXPORT_SYMBOL_GPL(nf_register_afinfo);
  44
  45void nf_unregister_afinfo(const struct nf_afinfo *afinfo)
  46{
  47        mutex_lock(&afinfo_mutex);
  48        rcu_assign_pointer(nf_afinfo[afinfo->family], NULL);
  49        mutex_unlock(&afinfo_mutex);
  50        synchronize_rcu();
  51}
  52EXPORT_SYMBOL_GPL(nf_unregister_afinfo);
  53
  54struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS] __read_mostly;
  55EXPORT_SYMBOL(nf_hooks);
  56static DEFINE_MUTEX(nf_hook_mutex);
  57
  58int nf_register_hook(struct nf_hook_ops *reg)
  59{
  60        struct nf_hook_ops *elem;
  61        int err;
  62
  63        err = mutex_lock_interruptible(&nf_hook_mutex);
  64        if (err < 0)
  65                return err;
  66        list_for_each_entry(elem, &nf_hooks[reg->pf][reg->hooknum], list) {
  67                if (reg->priority < elem->priority)
  68                        break;
  69        }
  70        list_add_rcu(&reg->list, elem->list.prev);
  71        mutex_unlock(&nf_hook_mutex);
  72        return 0;
  73}
  74EXPORT_SYMBOL(nf_register_hook);
  75
  76void nf_unregister_hook(struct nf_hook_ops *reg)
  77{
  78        mutex_lock(&nf_hook_mutex);
  79        list_del_rcu(&reg->list);
  80        mutex_unlock(&nf_hook_mutex);
  81
  82        synchronize_net();
  83}
  84EXPORT_SYMBOL(nf_unregister_hook);
  85
  86int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n)
  87{
  88        unsigned int i;
  89        int err = 0;
  90
  91        for (i = 0; i < n; i++) {
  92                err = nf_register_hook(&reg[i]);
  93                if (err)
  94                        goto err;
  95        }
  96        return err;
  97
  98err:
  99        if (i > 0)
 100                nf_unregister_hooks(reg, i);
 101        return err;
 102}
 103EXPORT_SYMBOL(nf_register_hooks);
 104
 105void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n)
 106{
 107        unsigned int i;
 108
 109        for (i = 0; i < n; i++)
 110                nf_unregister_hook(&reg[i]);
 111}
 112EXPORT_SYMBOL(nf_unregister_hooks);
 113
 114unsigned int nf_iterate(struct list_head *head,
 115                        struct sk_buff *skb,
 116                        unsigned int hook,
 117                        const struct net_device *indev,
 118                        const struct net_device *outdev,
 119                        struct list_head **i,
 120                        int (*okfn)(struct sk_buff *),
 121                        int hook_thresh)
 122{
 123        unsigned int verdict;
 124
 125        /*
 126         * The caller must not block between calls to this
 127         * function because of risk of continuing from deleted element.
 128         */
 129        list_for_each_continue_rcu(*i, head) {
 130                struct nf_hook_ops *elem = (struct nf_hook_ops *)*i;
 131
 132                if (hook_thresh > elem->priority)
 133                        continue;
 134
 135                /* Optimization: we don't need to hold module
 136                   reference here, since function can't sleep. --RR */
 137                verdict = elem->hook(hook, skb, indev, outdev, okfn);
 138                if (verdict != NF_ACCEPT) {
 139#ifdef CONFIG_NETFILTER_DEBUG
 140                        if (unlikely((verdict & NF_VERDICT_MASK)
 141                                                        > NF_MAX_VERDICT)) {
 142                                NFDEBUG("Evil return from %p(%u).\n",
 143                                        elem->hook, hook);
 144                                continue;
 145                        }
 146#endif
 147                        if (verdict != NF_REPEAT)
 148                                return verdict;
 149                        *i = (*i)->prev;
 150                }
 151        }
 152        return NF_ACCEPT;
 153}
 154
 155
 156/* Returns 1 if okfn() needs to be executed by the caller,
 157 * -EPERM for NF_DROP, 0 otherwise. */
 158int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
 159                 struct net_device *indev,
 160                 struct net_device *outdev,
 161                 int (*okfn)(struct sk_buff *),
 162                 int hook_thresh)
 163{
 164        struct list_head *elem;
 165        unsigned int verdict;
 166        int ret = 0;
 167
 168        /* We may already have this, but read-locks nest anyway */
 169        rcu_read_lock();
 170
 171        elem = &nf_hooks[pf][hook];
 172next_hook:
 173        verdict = nf_iterate(&nf_hooks[pf][hook], skb, hook, indev,
 174                             outdev, &elem, okfn, hook_thresh);
 175        if (verdict == NF_ACCEPT || verdict == NF_STOP) {
 176                ret = 1;
 177        } else if (verdict == NF_DROP) {
 178                kfree_skb(skb);
 179                ret = -EPERM;
 180        } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
 181                if (!nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
 182                              verdict >> NF_VERDICT_BITS))
 183                        goto next_hook;
 184        }
 185        rcu_read_unlock();
 186        return ret;
 187}
 188EXPORT_SYMBOL(nf_hook_slow);
 189
 190
 191int skb_make_writable(struct sk_buff *skb, unsigned int writable_len)
 192{
 193        if (writable_len > skb->len)
 194                return 0;
 195
 196        /* Not exclusive use of packet?  Must copy. */
 197        if (!skb_cloned(skb)) {
 198                if (writable_len <= skb_headlen(skb))
 199                        return 1;
 200        } else if (skb_clone_writable(skb, writable_len))
 201                return 1;
 202
 203        if (writable_len <= skb_headlen(skb))
 204                writable_len = 0;
 205        else
 206                writable_len -= skb_headlen(skb);
 207
 208        return !!__pskb_pull_tail(skb, writable_len);
 209}
 210EXPORT_SYMBOL(skb_make_writable);
 211
 212#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 213/* This does not belong here, but locally generated errors need it if connection
 214   tracking in use: without this, connection may not be in hash table, and hence
 215   manufactured ICMP or RST packets will not be associated with it. */
 216void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *);
 217EXPORT_SYMBOL(ip_ct_attach);
 218
 219void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb)
 220{
 221        void (*attach)(struct sk_buff *, struct sk_buff *);
 222
 223        if (skb->nfct) {
 224                rcu_read_lock();
 225                attach = rcu_dereference(ip_ct_attach);
 226                if (attach)
 227                        attach(new, skb);
 228                rcu_read_unlock();
 229        }
 230}
 231EXPORT_SYMBOL(nf_ct_attach);
 232
 233void (*nf_ct_destroy)(struct nf_conntrack *);
 234EXPORT_SYMBOL(nf_ct_destroy);
 235
 236void nf_conntrack_destroy(struct nf_conntrack *nfct)
 237{
 238        void (*destroy)(struct nf_conntrack *);
 239
 240        rcu_read_lock();
 241        destroy = rcu_dereference(nf_ct_destroy);
 242        BUG_ON(destroy == NULL);
 243        destroy(nfct);
 244        rcu_read_unlock();
 245}
 246EXPORT_SYMBOL(nf_conntrack_destroy);
 247#endif /* CONFIG_NF_CONNTRACK */
 248
 249#ifdef CONFIG_PROC_FS
 250struct proc_dir_entry *proc_net_netfilter;
 251EXPORT_SYMBOL(proc_net_netfilter);
 252#endif
 253
 254void __init netfilter_init(void)
 255{
 256        int i, h;
 257        for (i = 0; i < ARRAY_SIZE(nf_hooks); i++) {
 258                for (h = 0; h < NF_MAX_HOOKS; h++)
 259                        INIT_LIST_HEAD(&nf_hooks[i][h]);
 260        }
 261
 262#ifdef CONFIG_PROC_FS
 263        proc_net_netfilter = proc_mkdir("netfilter", init_net.proc_net);
 264        if (!proc_net_netfilter)
 265                panic("cannot create netfilter proc entry");
 266#endif
 267
 268        if (netfilter_queue_init() < 0)
 269                panic("cannot initialize nf_queue");
 270        if (netfilter_log_init() < 0)
 271                panic("cannot initialize nf_log");
 272}
 273
 274#ifdef CONFIG_SYSCTL
 275struct ctl_path nf_net_netfilter_sysctl_path[] = {
 276        { .procname = "net", .ctl_name = CTL_NET, },
 277        { .procname = "netfilter", .ctl_name = NET_NETFILTER, },
 278        { }
 279};
 280EXPORT_SYMBOL_GPL(nf_net_netfilter_sysctl_path);
 281#endif /* CONFIG_SYSCTL */
 282