linux/net/netfilter/nf_queue.c
<<
>>
Prefs
   1/*
   2 * Rusty Russell (C)2000 -- This code is GPL.
   3 * Patrick McHardy (c) 2006-2012
   4 */
   5
   6#include <linux/kernel.h>
   7#include <linux/slab.h>
   8#include <linux/init.h>
   9#include <linux/module.h>
  10#include <linux/proc_fs.h>
  11#include <linux/skbuff.h>
  12#include <linux/netfilter.h>
  13#include <linux/netfilter_ipv4.h>
  14#include <linux/netfilter_ipv6.h>
  15#include <linux/netfilter_bridge.h>
  16#include <linux/seq_file.h>
  17#include <linux/rcupdate.h>
  18#include <net/protocol.h>
  19#include <net/netfilter/nf_queue.h>
  20#include <net/dst.h>
  21
  22#include "nf_internals.h"
  23
  24/*
  25 * Hook for nfnetlink_queue to register its queue handler.
  26 * We do this so that most of the NFQUEUE code can be modular.
  27 *
  28 * Once the queue is registered it must reinject all packets it
  29 * receives, no matter what.
  30 */
  31
  32/* return EBUSY when somebody else is registered, return EEXIST if the
  33 * same handler is registered, return 0 in case of success. */
  34void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh)
  35{
  36        /* should never happen, we only have one queueing backend in kernel */
  37        WARN_ON(rcu_access_pointer(net->nf.queue_handler));
  38        rcu_assign_pointer(net->nf.queue_handler, qh);
  39}
  40EXPORT_SYMBOL(nf_register_queue_handler);
  41
  42/* The caller must flush their queue before this */
  43void nf_unregister_queue_handler(struct net *net)
  44{
  45        RCU_INIT_POINTER(net->nf.queue_handler, NULL);
  46}
  47EXPORT_SYMBOL(nf_unregister_queue_handler);
  48
  49static void nf_queue_entry_release_br_nf_refs(struct sk_buff *skb)
  50{
  51#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  52        struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
  53
  54        if (nf_bridge) {
  55                struct net_device *physdev;
  56
  57                physdev = nf_bridge_get_physindev(skb);
  58                if (physdev)
  59                        dev_put(physdev);
  60                physdev = nf_bridge_get_physoutdev(skb);
  61                if (physdev)
  62                        dev_put(physdev);
  63        }
  64#endif
  65}
  66
  67void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
  68{
  69        struct nf_hook_state *state = &entry->state;
  70
  71        /* Release those devices we held, or Alexey will kill me. */
  72        if (state->in)
  73                dev_put(state->in);
  74        if (state->out)
  75                dev_put(state->out);
  76        if (state->sk)
  77                sock_put(state->sk);
  78
  79        nf_queue_entry_release_br_nf_refs(entry->skb);
  80}
  81EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
  82
  83static void nf_queue_entry_get_br_nf_refs(struct sk_buff *skb)
  84{
  85#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  86        struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
  87
  88        if (nf_bridge) {
  89                struct net_device *physdev;
  90
  91                physdev = nf_bridge_get_physindev(skb);
  92                if (physdev)
  93                        dev_hold(physdev);
  94                physdev = nf_bridge_get_physoutdev(skb);
  95                if (physdev)
  96                        dev_hold(physdev);
  97        }
  98#endif
  99}
 100
 101/* Bump dev refs so they don't vanish while packet is out */
 102void nf_queue_entry_get_refs(struct nf_queue_entry *entry)
 103{
 104        struct nf_hook_state *state = &entry->state;
 105
 106        if (state->in)
 107                dev_hold(state->in);
 108        if (state->out)
 109                dev_hold(state->out);
 110        if (state->sk)
 111                sock_hold(state->sk);
 112
 113        nf_queue_entry_get_br_nf_refs(entry->skb);
 114}
 115EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
 116
 117void nf_queue_nf_hook_drop(struct net *net)
 118{
 119        const struct nf_queue_handler *qh;
 120
 121        rcu_read_lock();
 122        qh = rcu_dereference(net->nf.queue_handler);
 123        if (qh)
 124                qh->nf_hook_drop(net);
 125        rcu_read_unlock();
 126}
 127EXPORT_SYMBOL_GPL(nf_queue_nf_hook_drop);
 128
 129static void nf_ip_saveroute(const struct sk_buff *skb,
 130                            struct nf_queue_entry *entry)
 131{
 132        struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
 133
 134        if (entry->state.hook == NF_INET_LOCAL_OUT) {
 135                const struct iphdr *iph = ip_hdr(skb);
 136
 137                rt_info->tos = iph->tos;
 138                rt_info->daddr = iph->daddr;
 139                rt_info->saddr = iph->saddr;
 140                rt_info->mark = skb->mark;
 141        }
 142}
 143
 144static void nf_ip6_saveroute(const struct sk_buff *skb,
 145                             struct nf_queue_entry *entry)
 146{
 147        struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
 148
 149        if (entry->state.hook == NF_INET_LOCAL_OUT) {
 150                const struct ipv6hdr *iph = ipv6_hdr(skb);
 151
 152                rt_info->daddr = iph->daddr;
 153                rt_info->saddr = iph->saddr;
 154                rt_info->mark = skb->mark;
 155        }
 156}
 157
 158static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
 159                      unsigned int index, unsigned int queuenum)
 160{
 161        int status = -ENOENT;
 162        struct nf_queue_entry *entry = NULL;
 163        const struct nf_queue_handler *qh;
 164        struct net *net = state->net;
 165        unsigned int route_key_size;
 166
 167        /* QUEUE == DROP if no one is waiting, to be safe. */
 168        qh = rcu_dereference(net->nf.queue_handler);
 169        if (!qh) {
 170                status = -ESRCH;
 171                goto err;
 172        }
 173
 174        switch (state->pf) {
 175        case AF_INET:
 176                route_key_size = sizeof(struct ip_rt_info);
 177                break;
 178        case AF_INET6:
 179                route_key_size = sizeof(struct ip6_rt_info);
 180                break;
 181        default:
 182                route_key_size = 0;
 183                break;
 184        }
 185
 186        entry = kmalloc(sizeof(*entry) + route_key_size, GFP_ATOMIC);
 187        if (!entry) {
 188                status = -ENOMEM;
 189                goto err;
 190        }
 191
 192        if (skb_dst(skb) && !skb_dst_force(skb)) {
 193                status = -ENETDOWN;
 194                goto err;
 195        }
 196
 197        *entry = (struct nf_queue_entry) {
 198                .skb    = skb,
 199                .state  = *state,
 200                .hook_index = index,
 201                .size   = sizeof(*entry) + route_key_size,
 202        };
 203
 204        nf_queue_entry_get_refs(entry);
 205
 206        switch (entry->state.pf) {
 207        case AF_INET:
 208                nf_ip_saveroute(skb, entry);
 209                break;
 210        case AF_INET6:
 211                nf_ip6_saveroute(skb, entry);
 212                break;
 213        }
 214
 215        status = qh->outfn(entry, queuenum);
 216
 217        if (status < 0) {
 218                nf_queue_entry_release_refs(entry);
 219                goto err;
 220        }
 221
 222        return 0;
 223
 224err:
 225        kfree(entry);
 226        return status;
 227}
 228
 229/* Packets leaving via this function must come back through nf_reinject(). */
 230int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
 231             unsigned int index, unsigned int verdict)
 232{
 233        int ret;
 234
 235        ret = __nf_queue(skb, state, index, verdict >> NF_VERDICT_QBITS);
 236        if (ret < 0) {
 237                if (ret == -ESRCH &&
 238                    (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
 239                        return 1;
 240                kfree_skb(skb);
 241        }
 242
 243        return 0;
 244}
 245EXPORT_SYMBOL_GPL(nf_queue);
 246
 247static unsigned int nf_iterate(struct sk_buff *skb,
 248                               struct nf_hook_state *state,
 249                               const struct nf_hook_entries *hooks,
 250                               unsigned int *index)
 251{
 252        const struct nf_hook_entry *hook;
 253        unsigned int verdict, i = *index;
 254
 255        while (i < hooks->num_hook_entries) {
 256                hook = &hooks->hooks[i];
 257repeat:
 258                verdict = nf_hook_entry_hookfn(hook, skb, state);
 259                if (verdict != NF_ACCEPT) {
 260                        *index = i;
 261                        if (verdict != NF_REPEAT)
 262                                return verdict;
 263                        goto repeat;
 264                }
 265                i++;
 266        }
 267
 268        *index = i;
 269        return NF_ACCEPT;
 270}
 271
 272static struct nf_hook_entries *nf_hook_entries_head(const struct net *net, u8 pf, u8 hooknum)
 273{
 274        switch (pf) {
 275#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
 276        case NFPROTO_BRIDGE:
 277                return rcu_dereference(net->nf.hooks_bridge[hooknum]);
 278#endif
 279        case NFPROTO_IPV4:
 280                return rcu_dereference(net->nf.hooks_ipv4[hooknum]);
 281        case NFPROTO_IPV6:
 282                return rcu_dereference(net->nf.hooks_ipv6[hooknum]);
 283        default:
 284                WARN_ON_ONCE(1);
 285                return NULL;
 286        }
 287
 288        return NULL;
 289}
 290
 291/* Caller must hold rcu read-side lock */
 292void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
 293{
 294        const struct nf_hook_entry *hook_entry;
 295        const struct nf_hook_entries *hooks;
 296        struct sk_buff *skb = entry->skb;
 297        const struct net *net;
 298        unsigned int i;
 299        int err;
 300        u8 pf;
 301
 302        net = entry->state.net;
 303        pf = entry->state.pf;
 304
 305        hooks = nf_hook_entries_head(net, pf, entry->state.hook);
 306
 307        nf_queue_entry_release_refs(entry);
 308
 309        i = entry->hook_index;
 310        if (WARN_ON_ONCE(!hooks || i >= hooks->num_hook_entries)) {
 311                kfree_skb(skb);
 312                kfree(entry);
 313                return;
 314        }
 315
 316        hook_entry = &hooks->hooks[i];
 317
 318        /* Continue traversal iff userspace said ok... */
 319        if (verdict == NF_REPEAT)
 320                verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
 321
 322        if (verdict == NF_ACCEPT) {
 323                if (nf_reroute(skb, entry) < 0)
 324                        verdict = NF_DROP;
 325        }
 326
 327        if (verdict == NF_ACCEPT) {
 328next_hook:
 329                ++i;
 330                verdict = nf_iterate(skb, &entry->state, hooks, &i);
 331        }
 332
 333        switch (verdict & NF_VERDICT_MASK) {
 334        case NF_ACCEPT:
 335        case NF_STOP:
 336                local_bh_disable();
 337                entry->state.okfn(entry->state.net, entry->state.sk, skb);
 338                local_bh_enable();
 339                break;
 340        case NF_QUEUE:
 341                err = nf_queue(skb, &entry->state, i, verdict);
 342                if (err == 1)
 343                        goto next_hook;
 344                break;
 345        case NF_STOLEN:
 346                break;
 347        default:
 348                kfree_skb(skb);
 349        }
 350
 351        kfree(entry);
 352}
 353EXPORT_SYMBOL(nf_reinject);
 354