linux/net/netfilter/nf_queue.c
<<
>>
Prefs
   1#include <linux/kernel.h>
   2#include <linux/slab.h>
   3#include <linux/init.h>
   4#include <linux/module.h>
   5#include <linux/proc_fs.h>
   6#include <linux/skbuff.h>
   7#include <linux/netfilter.h>
   8#include <linux/seq_file.h>
   9#include <linux/rcupdate.h>
  10#include <net/protocol.h>
  11#include <net/netfilter/nf_queue.h>
  12#include <net/dst.h>
  13
  14#include "nf_internals.h"
  15
  16/*
  17 * A queue handler may be registered for each protocol.  Each is protected by
  18 * long term mutex.  The handler must provide an an outfn() to accept packets
  19 * for queueing and must reinject all packets it receives, no matter what.
  20 */
  21static const struct nf_queue_handler __rcu *queue_handler[NFPROTO_NUMPROTO] __read_mostly;
  22
  23static DEFINE_MUTEX(queue_handler_mutex);
  24
  25/* return EBUSY when somebody else is registered, return EEXIST if the
  26 * same handler is registered, return 0 in case of success. */
  27int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
  28{
  29        int ret;
  30        const struct nf_queue_handler *old;
  31
  32        if (pf >= ARRAY_SIZE(queue_handler))
  33                return -EINVAL;
  34
  35        mutex_lock(&queue_handler_mutex);
  36        old = rcu_dereference_protected(queue_handler[pf],
  37                                        lockdep_is_held(&queue_handler_mutex));
  38        if (old == qh)
  39                ret = -EEXIST;
  40        else if (old)
  41                ret = -EBUSY;
  42        else {
  43                RCU_INIT_POINTER(queue_handler[pf], qh);
  44                ret = 0;
  45        }
  46        mutex_unlock(&queue_handler_mutex);
  47
  48        return ret;
  49}
  50EXPORT_SYMBOL(nf_register_queue_handler);
  51
  52/* The caller must flush their queue before this */
  53int nf_unregister_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
  54{
  55        const struct nf_queue_handler *old;
  56
  57        if (pf >= ARRAY_SIZE(queue_handler))
  58                return -EINVAL;
  59
  60        mutex_lock(&queue_handler_mutex);
  61        old = rcu_dereference_protected(queue_handler[pf],
  62                                        lockdep_is_held(&queue_handler_mutex));
  63        if (old && old != qh) {
  64                mutex_unlock(&queue_handler_mutex);
  65                return -EINVAL;
  66        }
  67
  68        RCU_INIT_POINTER(queue_handler[pf], NULL);
  69        mutex_unlock(&queue_handler_mutex);
  70
  71        synchronize_rcu();
  72
  73        return 0;
  74}
  75EXPORT_SYMBOL(nf_unregister_queue_handler);
  76
  77void nf_unregister_queue_handlers(const struct nf_queue_handler *qh)
  78{
  79        u_int8_t pf;
  80
  81        mutex_lock(&queue_handler_mutex);
  82        for (pf = 0; pf < ARRAY_SIZE(queue_handler); pf++)  {
  83                if (rcu_dereference_protected(
  84                                queue_handler[pf],
  85                                lockdep_is_held(&queue_handler_mutex)
  86                                ) == qh)
  87                        RCU_INIT_POINTER(queue_handler[pf], NULL);
  88        }
  89        mutex_unlock(&queue_handler_mutex);
  90
  91        synchronize_rcu();
  92}
  93EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers);
  94
  95static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
  96{
  97        /* Release those devices we held, or Alexey will kill me. */
  98        if (entry->indev)
  99                dev_put(entry->indev);
 100        if (entry->outdev)
 101                dev_put(entry->outdev);
 102#ifdef CONFIG_BRIDGE_NETFILTER
 103        if (entry->skb->nf_bridge) {
 104                struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge;
 105
 106                if (nf_bridge->physindev)
 107                        dev_put(nf_bridge->physindev);
 108                if (nf_bridge->physoutdev)
 109                        dev_put(nf_bridge->physoutdev);
 110        }
 111#endif
 112        /* Drop reference to owner of hook which queued us. */
 113        module_put(entry->elem->owner);
 114}
 115
 116/*
 117 * Any packet that leaves via this function must come back
 118 * through nf_reinject().
 119 */
 120static int __nf_queue(struct sk_buff *skb,
 121                      struct list_head *elem,
 122                      u_int8_t pf, unsigned int hook,
 123                      struct net_device *indev,
 124                      struct net_device *outdev,
 125                      int (*okfn)(struct sk_buff *),
 126                      unsigned int queuenum)
 127{
 128        int status = -ENOENT;
 129        struct nf_queue_entry *entry = NULL;
 130#ifdef CONFIG_BRIDGE_NETFILTER
 131        struct net_device *physindev;
 132        struct net_device *physoutdev;
 133#endif
 134        const struct nf_afinfo *afinfo;
 135        const struct nf_queue_handler *qh;
 136
 137        /* QUEUE == DROP if no one is waiting, to be safe. */
 138        rcu_read_lock();
 139
 140        qh = rcu_dereference(queue_handler[pf]);
 141        if (!qh) {
 142                status = -ESRCH;
 143                goto err_unlock;
 144        }
 145
 146        afinfo = nf_get_afinfo(pf);
 147        if (!afinfo)
 148                goto err_unlock;
 149
 150        entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
 151        if (!entry) {
 152                status = -ENOMEM;
 153                goto err_unlock;
 154        }
 155
 156        *entry = (struct nf_queue_entry) {
 157                .skb    = skb,
 158                .elem   = list_entry(elem, struct nf_hook_ops, list),
 159                .pf     = pf,
 160                .hook   = hook,
 161                .indev  = indev,
 162                .outdev = outdev,
 163                .okfn   = okfn,
 164        };
 165
 166        /* If it's going away, ignore hook. */
 167        if (!try_module_get(entry->elem->owner)) {
 168                status = -ECANCELED;
 169                goto err_unlock;
 170        }
 171        /* Bump dev refs so they don't vanish while packet is out */
 172        if (indev)
 173                dev_hold(indev);
 174        if (outdev)
 175                dev_hold(outdev);
 176#ifdef CONFIG_BRIDGE_NETFILTER
 177        if (skb->nf_bridge) {
 178                physindev = skb->nf_bridge->physindev;
 179                if (physindev)
 180                        dev_hold(physindev);
 181                physoutdev = skb->nf_bridge->physoutdev;
 182                if (physoutdev)
 183                        dev_hold(physoutdev);
 184        }
 185#endif
 186        skb_dst_force(skb);
 187        afinfo->saveroute(skb, entry);
 188        status = qh->outfn(entry, queuenum);
 189
 190        rcu_read_unlock();
 191
 192        if (status < 0) {
 193                nf_queue_entry_release_refs(entry);
 194                goto err;
 195        }
 196
 197        return 0;
 198
 199err_unlock:
 200        rcu_read_unlock();
 201err:
 202        kfree(entry);
 203        return status;
 204}
 205
 206int nf_queue(struct sk_buff *skb,
 207             struct list_head *elem,
 208             u_int8_t pf, unsigned int hook,
 209             struct net_device *indev,
 210             struct net_device *outdev,
 211             int (*okfn)(struct sk_buff *),
 212             unsigned int queuenum)
 213{
 214        struct sk_buff *segs;
 215        int err;
 216        unsigned int queued;
 217
 218        if (!skb_is_gso(skb))
 219                return __nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
 220                                  queuenum);
 221
 222        switch (pf) {
 223        case NFPROTO_IPV4:
 224                skb->protocol = htons(ETH_P_IP);
 225                break;
 226        case NFPROTO_IPV6:
 227                skb->protocol = htons(ETH_P_IPV6);
 228                break;
 229        }
 230
 231        segs = skb_gso_segment(skb, 0);
 232        /* Does not use PTR_ERR to limit the number of error codes that can be
 233         * returned by nf_queue.  For instance, callers rely on -ECANCELED to mean
 234         * 'ignore this hook'.
 235         */
 236        if (IS_ERR(segs))
 237                return -EINVAL;
 238
 239        queued = 0;
 240        err = 0;
 241        do {
 242                struct sk_buff *nskb = segs->next;
 243
 244                segs->next = NULL;
 245                if (err == 0)
 246                        err = __nf_queue(segs, elem, pf, hook, indev,
 247                                           outdev, okfn, queuenum);
 248                if (err == 0)
 249                        queued++;
 250                else
 251                        kfree_skb(segs);
 252                segs = nskb;
 253        } while (segs);
 254
 255        /* also free orig skb if only some segments were queued */
 256        if (unlikely(err && queued))
 257                err = 0;
 258        if (err == 0)
 259                kfree_skb(skb);
 260        return err;
 261}
 262
 263void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
 264{
 265        struct sk_buff *skb = entry->skb;
 266        struct list_head *elem = &entry->elem->list;
 267        const struct nf_afinfo *afinfo;
 268        int err;
 269
 270        rcu_read_lock();
 271
 272        nf_queue_entry_release_refs(entry);
 273
 274        /* Continue traversal iff userspace said ok... */
 275        if (verdict == NF_REPEAT) {
 276                elem = elem->prev;
 277                verdict = NF_ACCEPT;
 278        }
 279
 280        if (verdict == NF_ACCEPT) {
 281                afinfo = nf_get_afinfo(entry->pf);
 282                if (!afinfo || afinfo->reroute(skb, entry) < 0)
 283                        verdict = NF_DROP;
 284        }
 285
 286        if (verdict == NF_ACCEPT) {
 287        next_hook:
 288                verdict = nf_iterate(&nf_hooks[entry->pf][entry->hook],
 289                                     skb, entry->hook,
 290                                     entry->indev, entry->outdev, &elem,
 291                                     entry->okfn, INT_MIN);
 292        }
 293
 294        switch (verdict & NF_VERDICT_MASK) {
 295        case NF_ACCEPT:
 296        case NF_STOP:
 297                local_bh_disable();
 298                entry->okfn(skb);
 299                local_bh_enable();
 300                break;
 301        case NF_QUEUE:
 302                err = __nf_queue(skb, elem, entry->pf, entry->hook,
 303                                 entry->indev, entry->outdev, entry->okfn,
 304                                 verdict >> NF_VERDICT_QBITS);
 305                if (err < 0) {
 306                        if (err == -ECANCELED)
 307                                goto next_hook;
 308                        if (err == -ESRCH &&
 309                           (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
 310                                goto next_hook;
 311                        kfree_skb(skb);
 312                }
 313                break;
 314        case NF_STOLEN:
 315                break;
 316        default:
 317                kfree_skb(skb);
 318        }
 319        rcu_read_unlock();
 320        kfree(entry);
 321}
 322EXPORT_SYMBOL(nf_reinject);
 323
 324#ifdef CONFIG_PROC_FS
 325static void *seq_start(struct seq_file *seq, loff_t *pos)
 326{
 327        if (*pos >= ARRAY_SIZE(queue_handler))
 328                return NULL;
 329
 330        return pos;
 331}
 332
 333static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
 334{
 335        (*pos)++;
 336
 337        if (*pos >= ARRAY_SIZE(queue_handler))
 338                return NULL;
 339
 340        return pos;
 341}
 342
 343static void seq_stop(struct seq_file *s, void *v)
 344{
 345
 346}
 347
 348static int seq_show(struct seq_file *s, void *v)
 349{
 350        int ret;
 351        loff_t *pos = v;
 352        const struct nf_queue_handler *qh;
 353
 354        rcu_read_lock();
 355        qh = rcu_dereference(queue_handler[*pos]);
 356        if (!qh)
 357                ret = seq_printf(s, "%2lld NONE\n", *pos);
 358        else
 359                ret = seq_printf(s, "%2lld %s\n", *pos, qh->name);
 360        rcu_read_unlock();
 361
 362        return ret;
 363}
 364
 365static const struct seq_operations nfqueue_seq_ops = {
 366        .start  = seq_start,
 367        .next   = seq_next,
 368        .stop   = seq_stop,
 369        .show   = seq_show,
 370};
 371
 372static int nfqueue_open(struct inode *inode, struct file *file)
 373{
 374        return seq_open(file, &nfqueue_seq_ops);
 375}
 376
 377static const struct file_operations nfqueue_file_ops = {
 378        .owner   = THIS_MODULE,
 379        .open    = nfqueue_open,
 380        .read    = seq_read,
 381        .llseek  = seq_lseek,
 382        .release = seq_release,
 383};
 384#endif /* PROC_FS */
 385
 386
 387int __init netfilter_queue_init(void)
 388{
 389#ifdef CONFIG_PROC_FS
 390        if (!proc_create("nf_queue", S_IRUGO,
 391                         proc_net_netfilter, &nfqueue_file_ops))
 392                return -1;
 393#endif
 394        return 0;
 395}
 396
 397