linux/net/netfilter/nfnetlink_log.c
<<
>>
Prefs
   1/*
   2 * This is a module which is used for logging packets to userspace via
   3 * nfetlink.
   4 *
   5 * (C) 2005 by Harald Welte <laforge@netfilter.org>
   6 *
   7 * Based on the old ipv4-only ipt_ULOG.c:
   8 * (C) 2000-2004 by Harald Welte <laforge@netfilter.org>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as
  12 * published by the Free Software Foundation.
  13 */
  14#include <linux/module.h>
  15#include <linux/skbuff.h>
  16#include <linux/init.h>
  17#include <linux/ip.h>
  18#include <linux/ipv6.h>
  19#include <linux/netdevice.h>
  20#include <linux/netfilter.h>
  21#include <linux/netlink.h>
  22#include <linux/netfilter/nfnetlink.h>
  23#include <linux/netfilter/nfnetlink_log.h>
  24#include <linux/spinlock.h>
  25#include <linux/sysctl.h>
  26#include <linux/proc_fs.h>
  27#include <linux/security.h>
  28#include <linux/list.h>
  29#include <linux/jhash.h>
  30#include <linux/random.h>
  31#include <net/sock.h>
  32#include <net/netfilter/nf_log.h>
  33#include <net/netfilter/nfnetlink_log.h>
  34
  35#include <asm/atomic.h>
  36
  37#ifdef CONFIG_BRIDGE_NETFILTER
  38#include "../bridge/br_private.h"
  39#endif
  40
  41#define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE
  42#define NFULNL_TIMEOUT_DEFAULT  100     /* every second */
  43#define NFULNL_QTHRESH_DEFAULT  100     /* 100 packets */
  44#define NFULNL_COPY_RANGE_MAX   0xFFFF  /* max packet size is limited by 16-bit struct nfattr nfa_len field */
  45
  46#define PRINTR(x, args...)      do { if (net_ratelimit()) \
  47                                     printk(x, ## args); } while (0);
  48
  49struct nfulnl_instance {
  50        struct hlist_node hlist;        /* global list of instances */
  51        spinlock_t lock;
  52        atomic_t use;                   /* use count */
  53
  54        unsigned int qlen;              /* number of nlmsgs in skb */
  55        struct sk_buff *skb;            /* pre-allocatd skb */
  56        struct timer_list timer;
  57        int peer_pid;                   /* PID of the peer process */
  58
  59        /* configurable parameters */
  60        unsigned int flushtimeout;      /* timeout until queue flush */
  61        unsigned int nlbufsiz;          /* netlink buffer allocation size */
  62        unsigned int qthreshold;        /* threshold of the queue */
  63        u_int32_t copy_range;
  64        u_int32_t seq;                  /* instance-local sequential counter */
  65        u_int16_t group_num;            /* number of this queue */
  66        u_int16_t flags;
  67        u_int8_t copy_mode;
  68};
  69
  70static DEFINE_RWLOCK(instances_lock);
  71static atomic_t global_seq;
  72
  73#define INSTANCE_BUCKETS        16
  74static struct hlist_head instance_table[INSTANCE_BUCKETS];
  75static unsigned int hash_init;
  76
  77static inline u_int8_t instance_hashfn(u_int16_t group_num)
  78{
  79        return ((group_num & 0xff) % INSTANCE_BUCKETS);
  80}
  81
  82static struct nfulnl_instance *
  83__instance_lookup(u_int16_t group_num)
  84{
  85        struct hlist_head *head;
  86        struct hlist_node *pos;
  87        struct nfulnl_instance *inst;
  88
  89        head = &instance_table[instance_hashfn(group_num)];
  90        hlist_for_each_entry(inst, pos, head, hlist) {
  91                if (inst->group_num == group_num)
  92                        return inst;
  93        }
  94        return NULL;
  95}
  96
  97static inline void
  98instance_get(struct nfulnl_instance *inst)
  99{
 100        atomic_inc(&inst->use);
 101}
 102
 103static struct nfulnl_instance *
 104instance_lookup_get(u_int16_t group_num)
 105{
 106        struct nfulnl_instance *inst;
 107
 108        read_lock_bh(&instances_lock);
 109        inst = __instance_lookup(group_num);
 110        if (inst)
 111                instance_get(inst);
 112        read_unlock_bh(&instances_lock);
 113
 114        return inst;
 115}
 116
 117static void
 118instance_put(struct nfulnl_instance *inst)
 119{
 120        if (inst && atomic_dec_and_test(&inst->use)) {
 121                kfree(inst);
 122                module_put(THIS_MODULE);
 123        }
 124}
 125
 126static void nfulnl_timer(unsigned long data);
 127
 128static struct nfulnl_instance *
 129instance_create(u_int16_t group_num, int pid)
 130{
 131        struct nfulnl_instance *inst;
 132        int err;
 133
 134        write_lock_bh(&instances_lock);
 135        if (__instance_lookup(group_num)) {
 136                err = -EEXIST;
 137                goto out_unlock;
 138        }
 139
 140        inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
 141        if (!inst) {
 142                err = -ENOMEM;
 143                goto out_unlock;
 144        }
 145
 146        if (!try_module_get(THIS_MODULE)) {
 147                kfree(inst);
 148                err = -EAGAIN;
 149                goto out_unlock;
 150        }
 151
 152        INIT_HLIST_NODE(&inst->hlist);
 153        spin_lock_init(&inst->lock);
 154        /* needs to be two, since we _put() after creation */
 155        atomic_set(&inst->use, 2);
 156
 157        setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst);
 158
 159        inst->peer_pid = pid;
 160        inst->group_num = group_num;
 161
 162        inst->qthreshold        = NFULNL_QTHRESH_DEFAULT;
 163        inst->flushtimeout      = NFULNL_TIMEOUT_DEFAULT;
 164        inst->nlbufsiz          = NFULNL_NLBUFSIZ_DEFAULT;
 165        inst->copy_mode         = NFULNL_COPY_PACKET;
 166        inst->copy_range        = NFULNL_COPY_RANGE_MAX;
 167
 168        hlist_add_head(&inst->hlist,
 169                       &instance_table[instance_hashfn(group_num)]);
 170
 171        write_unlock_bh(&instances_lock);
 172
 173        return inst;
 174
 175out_unlock:
 176        write_unlock_bh(&instances_lock);
 177        return ERR_PTR(err);
 178}
 179
 180static void __nfulnl_flush(struct nfulnl_instance *inst);
 181
 182static void
 183__instance_destroy(struct nfulnl_instance *inst)
 184{
 185        /* first pull it out of the global list */
 186        hlist_del(&inst->hlist);
 187
 188        /* then flush all pending packets from skb */
 189
 190        spin_lock_bh(&inst->lock);
 191        if (inst->skb)
 192                __nfulnl_flush(inst);
 193        spin_unlock_bh(&inst->lock);
 194
 195        /* and finally put the refcount */
 196        instance_put(inst);
 197}
 198
 199static inline void
 200instance_destroy(struct nfulnl_instance *inst)
 201{
 202        write_lock_bh(&instances_lock);
 203        __instance_destroy(inst);
 204        write_unlock_bh(&instances_lock);
 205}
 206
 207static int
 208nfulnl_set_mode(struct nfulnl_instance *inst, u_int8_t mode,
 209                  unsigned int range)
 210{
 211        int status = 0;
 212
 213        spin_lock_bh(&inst->lock);
 214
 215        switch (mode) {
 216        case NFULNL_COPY_NONE:
 217        case NFULNL_COPY_META:
 218                inst->copy_mode = mode;
 219                inst->copy_range = 0;
 220                break;
 221
 222        case NFULNL_COPY_PACKET:
 223                inst->copy_mode = mode;
 224                inst->copy_range = min_t(unsigned int,
 225                                         range, NFULNL_COPY_RANGE_MAX);
 226                break;
 227
 228        default:
 229                status = -EINVAL;
 230                break;
 231        }
 232
 233        spin_unlock_bh(&inst->lock);
 234
 235        return status;
 236}
 237
 238static int
 239nfulnl_set_nlbufsiz(struct nfulnl_instance *inst, u_int32_t nlbufsiz)
 240{
 241        int status;
 242
 243        spin_lock_bh(&inst->lock);
 244        if (nlbufsiz < NFULNL_NLBUFSIZ_DEFAULT)
 245                status = -ERANGE;
 246        else if (nlbufsiz > 131072)
 247                status = -ERANGE;
 248        else {
 249                inst->nlbufsiz = nlbufsiz;
 250                status = 0;
 251        }
 252        spin_unlock_bh(&inst->lock);
 253
 254        return status;
 255}
 256
 257static int
 258nfulnl_set_timeout(struct nfulnl_instance *inst, u_int32_t timeout)
 259{
 260        spin_lock_bh(&inst->lock);
 261        inst->flushtimeout = timeout;
 262        spin_unlock_bh(&inst->lock);
 263
 264        return 0;
 265}
 266
 267static int
 268nfulnl_set_qthresh(struct nfulnl_instance *inst, u_int32_t qthresh)
 269{
 270        spin_lock_bh(&inst->lock);
 271        inst->qthreshold = qthresh;
 272        spin_unlock_bh(&inst->lock);
 273
 274        return 0;
 275}
 276
 277static int
 278nfulnl_set_flags(struct nfulnl_instance *inst, u_int16_t flags)
 279{
 280        spin_lock_bh(&inst->lock);
 281        inst->flags = flags;
 282        spin_unlock_bh(&inst->lock);
 283
 284        return 0;
 285}
 286
 287static struct sk_buff *
 288nfulnl_alloc_skb(unsigned int inst_size, unsigned int pkt_size)
 289{
 290        struct sk_buff *skb;
 291        unsigned int n;
 292
 293        /* alloc skb which should be big enough for a whole multipart
 294         * message.  WARNING: has to be <= 128k due to slab restrictions */
 295
 296        n = max(inst_size, pkt_size);
 297        skb = alloc_skb(n, GFP_ATOMIC);
 298        if (!skb) {
 299                PRINTR("nfnetlink_log: can't alloc whole buffer (%u bytes)\n",
 300                        inst_size);
 301
 302                if (n > pkt_size) {
 303                        /* try to allocate only as much as we need for current
 304                         * packet */
 305
 306                        skb = alloc_skb(pkt_size, GFP_ATOMIC);
 307                        if (!skb)
 308                                PRINTR("nfnetlink_log: can't even alloc %u "
 309                                       "bytes\n", pkt_size);
 310                }
 311        }
 312
 313        return skb;
 314}
 315
 316static int
 317__nfulnl_send(struct nfulnl_instance *inst)
 318{
 319        int status = -1;
 320
 321        if (inst->qlen > 1)
 322                NLMSG_PUT(inst->skb, 0, 0,
 323                          NLMSG_DONE,
 324                          sizeof(struct nfgenmsg));
 325
 326        status = nfnetlink_unicast(inst->skb, inst->peer_pid, MSG_DONTWAIT);
 327
 328        inst->qlen = 0;
 329        inst->skb = NULL;
 330
 331nlmsg_failure:
 332        return status;
 333}
 334
 335static void
 336__nfulnl_flush(struct nfulnl_instance *inst)
 337{
 338        /* timer holds a reference */
 339        if (del_timer(&inst->timer))
 340                instance_put(inst);
 341        if (inst->skb)
 342                __nfulnl_send(inst);
 343}
 344
 345static void
 346nfulnl_timer(unsigned long data)
 347{
 348        struct nfulnl_instance *inst = (struct nfulnl_instance *)data;
 349
 350        spin_lock_bh(&inst->lock);
 351        if (inst->skb)
 352                __nfulnl_send(inst);
 353        spin_unlock_bh(&inst->lock);
 354        instance_put(inst);
 355}
 356
 357/* This is an inline function, we don't really care about a long
 358 * list of arguments */
 359static inline int
 360__build_packet_message(struct nfulnl_instance *inst,
 361                        const struct sk_buff *skb,
 362                        unsigned int data_len,
 363                        u_int8_t pf,
 364                        unsigned int hooknum,
 365                        const struct net_device *indev,
 366                        const struct net_device *outdev,
 367                        const struct nf_loginfo *li,
 368                        const char *prefix, unsigned int plen)
 369{
 370        struct nfulnl_msg_packet_hdr pmsg;
 371        struct nlmsghdr *nlh;
 372        struct nfgenmsg *nfmsg;
 373        __be32 tmp_uint;
 374        sk_buff_data_t old_tail = inst->skb->tail;
 375
 376        nlh = NLMSG_PUT(inst->skb, 0, 0,
 377                        NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET,
 378                        sizeof(struct nfgenmsg));
 379        nfmsg = NLMSG_DATA(nlh);
 380        nfmsg->nfgen_family = pf;
 381        nfmsg->version = NFNETLINK_V0;
 382        nfmsg->res_id = htons(inst->group_num);
 383
 384        pmsg.hw_protocol        = skb->protocol;
 385        pmsg.hook               = hooknum;
 386
 387        NLA_PUT(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg);
 388
 389        if (prefix)
 390                NLA_PUT(inst->skb, NFULA_PREFIX, plen, prefix);
 391
 392        if (indev) {
 393#ifndef CONFIG_BRIDGE_NETFILTER
 394                NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
 395                             htonl(indev->ifindex));
 396#else
 397                if (pf == PF_BRIDGE) {
 398                        /* Case 1: outdev is physical input device, we need to
 399                         * look for bridge group (when called from
 400                         * netfilter_bridge) */
 401                        NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
 402                                     htonl(indev->ifindex));
 403                        /* this is the bridge group "brX" */
 404                        NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
 405                                     htonl(indev->br_port->br->dev->ifindex));
 406                } else {
 407                        /* Case 2: indev is bridge group, we need to look for
 408                         * physical device (when called from ipv4) */
 409                        NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
 410                                     htonl(indev->ifindex));
 411                        if (skb->nf_bridge && skb->nf_bridge->physindev)
 412                                NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
 413                                             htonl(skb->nf_bridge->physindev->ifindex));
 414                }
 415#endif
 416        }
 417
 418        if (outdev) {
 419                tmp_uint = htonl(outdev->ifindex);
 420#ifndef CONFIG_BRIDGE_NETFILTER
 421                NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
 422                             htonl(outdev->ifindex));
 423#else
 424                if (pf == PF_BRIDGE) {
 425                        /* Case 1: outdev is physical output device, we need to
 426                         * look for bridge group (when called from
 427                         * netfilter_bridge) */
 428                        NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
 429                                     htonl(outdev->ifindex));
 430                        /* this is the bridge group "brX" */
 431                        NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
 432                                     htonl(outdev->br_port->br->dev->ifindex));
 433                } else {
 434                        /* Case 2: indev is a bridge group, we need to look
 435                         * for physical device (when called from ipv4) */
 436                        NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
 437                                     htonl(outdev->ifindex));
 438                        if (skb->nf_bridge && skb->nf_bridge->physoutdev)
 439                                NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
 440                                             htonl(skb->nf_bridge->physoutdev->ifindex));
 441                }
 442#endif
 443        }
 444
 445        if (skb->mark)
 446                NLA_PUT_BE32(inst->skb, NFULA_MARK, htonl(skb->mark));
 447
 448        if (indev && skb->dev) {
 449                struct nfulnl_msg_packet_hw phw;
 450                int len = dev_parse_header(skb, phw.hw_addr);
 451                if (len > 0) {
 452                        phw.hw_addrlen = htons(len);
 453                        NLA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw);
 454                }
 455        }
 456
 457        if (indev && skb_mac_header_was_set(skb)) {
 458                NLA_PUT_BE16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type));
 459                NLA_PUT_BE16(inst->skb, NFULA_HWLEN,
 460                             htons(skb->dev->hard_header_len));
 461                NLA_PUT(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
 462                        skb_mac_header(skb));
 463        }
 464
 465        if (skb->tstamp.tv64) {
 466                struct nfulnl_msg_packet_timestamp ts;
 467                struct timeval tv = ktime_to_timeval(skb->tstamp);
 468                ts.sec = cpu_to_be64(tv.tv_sec);
 469                ts.usec = cpu_to_be64(tv.tv_usec);
 470
 471                NLA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts);
 472        }
 473
 474        /* UID */
 475        if (skb->sk) {
 476                read_lock_bh(&skb->sk->sk_callback_lock);
 477                if (skb->sk->sk_socket && skb->sk->sk_socket->file) {
 478                        struct file *file = skb->sk->sk_socket->file;
 479                        __be32 uid = htonl(file->f_cred->fsuid);
 480                        __be32 gid = htonl(file->f_cred->fsgid);
 481                        /* need to unlock here since NLA_PUT may goto */
 482                        read_unlock_bh(&skb->sk->sk_callback_lock);
 483                        NLA_PUT_BE32(inst->skb, NFULA_UID, uid);
 484                        NLA_PUT_BE32(inst->skb, NFULA_GID, gid);
 485                } else
 486                        read_unlock_bh(&skb->sk->sk_callback_lock);
 487        }
 488
 489        /* local sequence number */
 490        if (inst->flags & NFULNL_CFG_F_SEQ)
 491                NLA_PUT_BE32(inst->skb, NFULA_SEQ, htonl(inst->seq++));
 492
 493        /* global sequence number */
 494        if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
 495                NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
 496                             htonl(atomic_inc_return(&global_seq)));
 497
 498        if (data_len) {
 499                struct nlattr *nla;
 500                int size = nla_attr_size(data_len);
 501
 502                if (skb_tailroom(inst->skb) < nla_total_size(data_len)) {
 503                        printk(KERN_WARNING "nfnetlink_log: no tailroom!\n");
 504                        goto nlmsg_failure;
 505                }
 506
 507                nla = (struct nlattr *)skb_put(inst->skb, nla_total_size(data_len));
 508                nla->nla_type = NFULA_PAYLOAD;
 509                nla->nla_len = size;
 510
 511                if (skb_copy_bits(skb, 0, nla_data(nla), data_len))
 512                        BUG();
 513        }
 514
 515        nlh->nlmsg_len = inst->skb->tail - old_tail;
 516        return 0;
 517
 518nlmsg_failure:
 519nla_put_failure:
 520        PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n");
 521        return -1;
 522}
 523
 524#define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
 525
 526static struct nf_loginfo default_loginfo = {
 527        .type =         NF_LOG_TYPE_ULOG,
 528        .u = {
 529                .ulog = {
 530                        .copy_len       = 0xffff,
 531                        .group          = 0,
 532                        .qthreshold     = 1,
 533                },
 534        },
 535};
 536
 537/* log handler for internal netfilter logging api */
 538void
 539nfulnl_log_packet(u_int8_t pf,
 540                  unsigned int hooknum,
 541                  const struct sk_buff *skb,
 542                  const struct net_device *in,
 543                  const struct net_device *out,
 544                  const struct nf_loginfo *li_user,
 545                  const char *prefix)
 546{
 547        unsigned int size, data_len;
 548        struct nfulnl_instance *inst;
 549        const struct nf_loginfo *li;
 550        unsigned int qthreshold;
 551        unsigned int plen;
 552
 553        if (li_user && li_user->type == NF_LOG_TYPE_ULOG)
 554                li = li_user;
 555        else
 556                li = &default_loginfo;
 557
 558        inst = instance_lookup_get(li->u.ulog.group);
 559        if (!inst)
 560                return;
 561
 562        plen = 0;
 563        if (prefix)
 564                plen = strlen(prefix) + 1;
 565
 566        /* FIXME: do we want to make the size calculation conditional based on
 567         * what is actually present?  way more branches and checks, but more
 568         * memory efficient... */
 569        size =    NLMSG_SPACE(sizeof(struct nfgenmsg))
 570                + nla_total_size(sizeof(struct nfulnl_msg_packet_hdr))
 571                + nla_total_size(sizeof(u_int32_t))     /* ifindex */
 572                + nla_total_size(sizeof(u_int32_t))     /* ifindex */
 573#ifdef CONFIG_BRIDGE_NETFILTER
 574                + nla_total_size(sizeof(u_int32_t))     /* ifindex */
 575                + nla_total_size(sizeof(u_int32_t))     /* ifindex */
 576#endif
 577                + nla_total_size(sizeof(u_int32_t))     /* mark */
 578                + nla_total_size(sizeof(u_int32_t))     /* uid */
 579                + nla_total_size(sizeof(u_int32_t))     /* gid */
 580                + nla_total_size(plen)                  /* prefix */
 581                + nla_total_size(sizeof(struct nfulnl_msg_packet_hw))
 582                + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp));
 583
 584        if (in && skb_mac_header_was_set(skb)) {
 585                size +=   nla_total_size(skb->dev->hard_header_len)
 586                        + nla_total_size(sizeof(u_int16_t))     /* hwtype */
 587                        + nla_total_size(sizeof(u_int16_t));    /* hwlen */
 588        }
 589
 590        spin_lock_bh(&inst->lock);
 591
 592        if (inst->flags & NFULNL_CFG_F_SEQ)
 593                size += nla_total_size(sizeof(u_int32_t));
 594        if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
 595                size += nla_total_size(sizeof(u_int32_t));
 596
 597        qthreshold = inst->qthreshold;
 598        /* per-rule qthreshold overrides per-instance */
 599        if (li->u.ulog.qthreshold)
 600                if (qthreshold > li->u.ulog.qthreshold)
 601                        qthreshold = li->u.ulog.qthreshold;
 602
 603
 604        switch (inst->copy_mode) {
 605        case NFULNL_COPY_META:
 606        case NFULNL_COPY_NONE:
 607                data_len = 0;
 608                break;
 609
 610        case NFULNL_COPY_PACKET:
 611                if (inst->copy_range == 0
 612                    || inst->copy_range > skb->len)
 613                        data_len = skb->len;
 614                else
 615                        data_len = inst->copy_range;
 616
 617                size += nla_total_size(data_len);
 618                break;
 619
 620        default:
 621                goto unlock_and_release;
 622        }
 623
 624        if (inst->skb &&
 625            size > skb_tailroom(inst->skb) - sizeof(struct nfgenmsg)) {
 626                /* either the queue len is too high or we don't have
 627                 * enough room in the skb left. flush to userspace. */
 628                __nfulnl_flush(inst);
 629        }
 630
 631        if (!inst->skb) {
 632                inst->skb = nfulnl_alloc_skb(inst->nlbufsiz, size);
 633                if (!inst->skb)
 634                        goto alloc_failure;
 635        }
 636
 637        inst->qlen++;
 638
 639        __build_packet_message(inst, skb, data_len, pf,
 640                                hooknum, in, out, li, prefix, plen);
 641
 642        if (inst->qlen >= qthreshold)
 643                __nfulnl_flush(inst);
 644        /* timer_pending always called within inst->lock, so there
 645         * is no chance of a race here */
 646        else if (!timer_pending(&inst->timer)) {
 647                instance_get(inst);
 648                inst->timer.expires = jiffies + (inst->flushtimeout*HZ/100);
 649                add_timer(&inst->timer);
 650        }
 651
 652unlock_and_release:
 653        spin_unlock_bh(&inst->lock);
 654        instance_put(inst);
 655        return;
 656
 657alloc_failure:
 658        /* FIXME: statistics */
 659        goto unlock_and_release;
 660}
 661EXPORT_SYMBOL_GPL(nfulnl_log_packet);
 662
 663static int
 664nfulnl_rcv_nl_event(struct notifier_block *this,
 665                   unsigned long event, void *ptr)
 666{
 667        struct netlink_notify *n = ptr;
 668
 669        if (event == NETLINK_URELEASE &&
 670            n->protocol == NETLINK_NETFILTER && n->pid) {
 671                int i;
 672
 673                /* destroy all instances for this pid */
 674                write_lock_bh(&instances_lock);
 675                for  (i = 0; i < INSTANCE_BUCKETS; i++) {
 676                        struct hlist_node *tmp, *t2;
 677                        struct nfulnl_instance *inst;
 678                        struct hlist_head *head = &instance_table[i];
 679
 680                        hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
 681                                if ((n->net == &init_net) &&
 682                                    (n->pid == inst->peer_pid))
 683                                        __instance_destroy(inst);
 684                        }
 685                }
 686                write_unlock_bh(&instances_lock);
 687        }
 688        return NOTIFY_DONE;
 689}
 690
 691static struct notifier_block nfulnl_rtnl_notifier = {
 692        .notifier_call  = nfulnl_rcv_nl_event,
 693};
 694
 695static int
 696nfulnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
 697                   const struct nlmsghdr *nlh,
 698                   const struct nlattr * const nfqa[])
 699{
 700        return -ENOTSUPP;
 701}
 702
 703static struct nf_logger nfulnl_logger __read_mostly = {
 704        .name   = "nfnetlink_log",
 705        .logfn  = &nfulnl_log_packet,
 706        .me     = THIS_MODULE,
 707};
 708
 709static const struct nla_policy nfula_cfg_policy[NFULA_CFG_MAX+1] = {
 710        [NFULA_CFG_CMD]         = { .len = sizeof(struct nfulnl_msg_config_cmd) },
 711        [NFULA_CFG_MODE]        = { .len = sizeof(struct nfulnl_msg_config_mode) },
 712        [NFULA_CFG_TIMEOUT]     = { .type = NLA_U32 },
 713        [NFULA_CFG_QTHRESH]     = { .type = NLA_U32 },
 714        [NFULA_CFG_NLBUFSIZ]    = { .type = NLA_U32 },
 715        [NFULA_CFG_FLAGS]       = { .type = NLA_U16 },
 716};
 717
 718static int
 719nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
 720                   const struct nlmsghdr *nlh,
 721                   const struct nlattr * const nfula[])
 722{
 723        struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
 724        u_int16_t group_num = ntohs(nfmsg->res_id);
 725        struct nfulnl_instance *inst;
 726        struct nfulnl_msg_config_cmd *cmd = NULL;
 727        int ret = 0;
 728
 729        if (nfula[NFULA_CFG_CMD]) {
 730                u_int8_t pf = nfmsg->nfgen_family;
 731                cmd = nla_data(nfula[NFULA_CFG_CMD]);
 732
 733                /* Commands without queue context */
 734                switch (cmd->command) {
 735                case NFULNL_CFG_CMD_PF_BIND:
 736                        return nf_log_bind_pf(pf, &nfulnl_logger);
 737                case NFULNL_CFG_CMD_PF_UNBIND:
 738                        nf_log_unbind_pf(pf);
 739                        return 0;
 740                }
 741        }
 742
 743        inst = instance_lookup_get(group_num);
 744        if (inst && inst->peer_pid != NETLINK_CB(skb).pid) {
 745                ret = -EPERM;
 746                goto out_put;
 747        }
 748
 749        if (cmd != NULL) {
 750                switch (cmd->command) {
 751                case NFULNL_CFG_CMD_BIND:
 752                        if (inst) {
 753                                ret = -EBUSY;
 754                                goto out_put;
 755                        }
 756
 757                        inst = instance_create(group_num,
 758                                               NETLINK_CB(skb).pid);
 759                        if (IS_ERR(inst)) {
 760                                ret = PTR_ERR(inst);
 761                                goto out;
 762                        }
 763                        break;
 764                case NFULNL_CFG_CMD_UNBIND:
 765                        if (!inst) {
 766                                ret = -ENODEV;
 767                                goto out;
 768                        }
 769
 770                        instance_destroy(inst);
 771                        goto out;
 772                default:
 773                        ret = -ENOTSUPP;
 774                        break;
 775                }
 776        }
 777
 778        if (nfula[NFULA_CFG_MODE]) {
 779                struct nfulnl_msg_config_mode *params;
 780                params = nla_data(nfula[NFULA_CFG_MODE]);
 781
 782                if (!inst) {
 783                        ret = -ENODEV;
 784                        goto out;
 785                }
 786                nfulnl_set_mode(inst, params->copy_mode,
 787                                ntohl(params->copy_range));
 788        }
 789
 790        if (nfula[NFULA_CFG_TIMEOUT]) {
 791                __be32 timeout = nla_get_be32(nfula[NFULA_CFG_TIMEOUT]);
 792
 793                if (!inst) {
 794                        ret = -ENODEV;
 795                        goto out;
 796                }
 797                nfulnl_set_timeout(inst, ntohl(timeout));
 798        }
 799
 800        if (nfula[NFULA_CFG_NLBUFSIZ]) {
 801                __be32 nlbufsiz = nla_get_be32(nfula[NFULA_CFG_NLBUFSIZ]);
 802
 803                if (!inst) {
 804                        ret = -ENODEV;
 805                        goto out;
 806                }
 807                nfulnl_set_nlbufsiz(inst, ntohl(nlbufsiz));
 808        }
 809
 810        if (nfula[NFULA_CFG_QTHRESH]) {
 811                __be32 qthresh = nla_get_be32(nfula[NFULA_CFG_QTHRESH]);
 812
 813                if (!inst) {
 814                        ret = -ENODEV;
 815                        goto out;
 816                }
 817                nfulnl_set_qthresh(inst, ntohl(qthresh));
 818        }
 819
 820        if (nfula[NFULA_CFG_FLAGS]) {
 821                __be16 flags = nla_get_be16(nfula[NFULA_CFG_FLAGS]);
 822
 823                if (!inst) {
 824                        ret = -ENODEV;
 825                        goto out;
 826                }
 827                nfulnl_set_flags(inst, ntohs(flags));
 828        }
 829
 830out_put:
 831        instance_put(inst);
 832out:
 833        return ret;
 834}
 835
 836static const struct nfnl_callback nfulnl_cb[NFULNL_MSG_MAX] = {
 837        [NFULNL_MSG_PACKET]     = { .call = nfulnl_recv_unsupp,
 838                                    .attr_count = NFULA_MAX, },
 839        [NFULNL_MSG_CONFIG]     = { .call = nfulnl_recv_config,
 840                                    .attr_count = NFULA_CFG_MAX,
 841                                    .policy = nfula_cfg_policy },
 842};
 843
 844static const struct nfnetlink_subsystem nfulnl_subsys = {
 845        .name           = "log",
 846        .subsys_id      = NFNL_SUBSYS_ULOG,
 847        .cb_count       = NFULNL_MSG_MAX,
 848        .cb             = nfulnl_cb,
 849};
 850
 851#ifdef CONFIG_PROC_FS
 852struct iter_state {
 853        unsigned int bucket;
 854};
 855
 856static struct hlist_node *get_first(struct iter_state *st)
 857{
 858        if (!st)
 859                return NULL;
 860
 861        for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
 862                if (!hlist_empty(&instance_table[st->bucket]))
 863                        return instance_table[st->bucket].first;
 864        }
 865        return NULL;
 866}
 867
 868static struct hlist_node *get_next(struct iter_state *st, struct hlist_node *h)
 869{
 870        h = h->next;
 871        while (!h) {
 872                if (++st->bucket >= INSTANCE_BUCKETS)
 873                        return NULL;
 874
 875                h = instance_table[st->bucket].first;
 876        }
 877        return h;
 878}
 879
 880static struct hlist_node *get_idx(struct iter_state *st, loff_t pos)
 881{
 882        struct hlist_node *head;
 883        head = get_first(st);
 884
 885        if (head)
 886                while (pos && (head = get_next(st, head)))
 887                        pos--;
 888        return pos ? NULL : head;
 889}
 890
 891static void *seq_start(struct seq_file *seq, loff_t *pos)
 892        __acquires(instances_lock)
 893{
 894        read_lock_bh(&instances_lock);
 895        return get_idx(seq->private, *pos);
 896}
 897
 898static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
 899{
 900        (*pos)++;
 901        return get_next(s->private, v);
 902}
 903
 904static void seq_stop(struct seq_file *s, void *v)
 905        __releases(instances_lock)
 906{
 907        read_unlock_bh(&instances_lock);
 908}
 909
 910static int seq_show(struct seq_file *s, void *v)
 911{
 912        const struct nfulnl_instance *inst = v;
 913
 914        return seq_printf(s, "%5d %6d %5d %1d %5d %6d %2d\n",
 915                          inst->group_num,
 916                          inst->peer_pid, inst->qlen,
 917                          inst->copy_mode, inst->copy_range,
 918                          inst->flushtimeout, atomic_read(&inst->use));
 919}
 920
 921static const struct seq_operations nful_seq_ops = {
 922        .start  = seq_start,
 923        .next   = seq_next,
 924        .stop   = seq_stop,
 925        .show   = seq_show,
 926};
 927
 928static int nful_open(struct inode *inode, struct file *file)
 929{
 930        return seq_open_private(file, &nful_seq_ops,
 931                        sizeof(struct iter_state));
 932}
 933
 934static const struct file_operations nful_file_ops = {
 935        .owner   = THIS_MODULE,
 936        .open    = nful_open,
 937        .read    = seq_read,
 938        .llseek  = seq_lseek,
 939        .release = seq_release_private,
 940};
 941
 942#endif /* PROC_FS */
 943
 944static int __init nfnetlink_log_init(void)
 945{
 946        int i, status = -ENOMEM;
 947
 948        for (i = 0; i < INSTANCE_BUCKETS; i++)
 949                INIT_HLIST_HEAD(&instance_table[i]);
 950
 951        /* it's not really all that important to have a random value, so
 952         * we can do this from the init function, even if there hasn't
 953         * been that much entropy yet */
 954        get_random_bytes(&hash_init, sizeof(hash_init));
 955
 956        netlink_register_notifier(&nfulnl_rtnl_notifier);
 957        status = nfnetlink_subsys_register(&nfulnl_subsys);
 958        if (status < 0) {
 959                printk(KERN_ERR "log: failed to create netlink socket\n");
 960                goto cleanup_netlink_notifier;
 961        }
 962
 963        status = nf_log_register(NFPROTO_UNSPEC, &nfulnl_logger);
 964        if (status < 0) {
 965                printk(KERN_ERR "log: failed to register logger\n");
 966                goto cleanup_subsys;
 967        }
 968
 969#ifdef CONFIG_PROC_FS
 970        if (!proc_create("nfnetlink_log", 0440,
 971                         proc_net_netfilter, &nful_file_ops))
 972                goto cleanup_logger;
 973#endif
 974        return status;
 975
 976#ifdef CONFIG_PROC_FS
 977cleanup_logger:
 978        nf_log_unregister(&nfulnl_logger);
 979#endif
 980cleanup_subsys:
 981        nfnetlink_subsys_unregister(&nfulnl_subsys);
 982cleanup_netlink_notifier:
 983        netlink_unregister_notifier(&nfulnl_rtnl_notifier);
 984        return status;
 985}
 986
 987static void __exit nfnetlink_log_fini(void)
 988{
 989        nf_log_unregister(&nfulnl_logger);
 990#ifdef CONFIG_PROC_FS
 991        remove_proc_entry("nfnetlink_log", proc_net_netfilter);
 992#endif
 993        nfnetlink_subsys_unregister(&nfulnl_subsys);
 994        netlink_unregister_notifier(&nfulnl_rtnl_notifier);
 995}
 996
 997MODULE_DESCRIPTION("netfilter userspace logging");
 998MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
 999MODULE_LICENSE("GPL");
1000MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_ULOG);
1001
1002module_init(nfnetlink_log_init);
1003module_exit(nfnetlink_log_fini);
1004