linux/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
<<
>>
Prefs
   1/* ip_conntrack proc compat - based on ip_conntrack_standalone.c
   2 *
   3 * (C) 1999-2001 Paul `Rusty' Russell
   4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
   5 * (C) 2006-2010 Patrick McHardy <kaber@trash.net>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11#include <linux/types.h>
  12#include <linux/proc_fs.h>
  13#include <linux/seq_file.h>
  14#include <linux/percpu.h>
  15#include <linux/security.h>
  16#include <net/net_namespace.h>
  17
  18#include <linux/netfilter.h>
  19#include <net/netfilter/nf_conntrack_core.h>
  20#include <net/netfilter/nf_conntrack_l3proto.h>
  21#include <net/netfilter/nf_conntrack_l4proto.h>
  22#include <net/netfilter/nf_conntrack_expect.h>
  23#include <net/netfilter/nf_conntrack_acct.h>
  24#include <linux/rculist_nulls.h>
  25#include <linux/export.h>
  26
  27struct ct_iter_state {
  28        struct seq_net_private p;
  29        unsigned int bucket;
  30};
  31
  32static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
  33{
  34        struct net *net = seq_file_net(seq);
  35        struct ct_iter_state *st = seq->private;
  36        struct hlist_nulls_node *n;
  37
  38        for (st->bucket = 0;
  39             st->bucket < net->ct.htable_size;
  40             st->bucket++) {
  41                n = rcu_dereference(
  42                        hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
  43                if (!is_a_nulls(n))
  44                        return n;
  45        }
  46        return NULL;
  47}
  48
  49static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
  50                                      struct hlist_nulls_node *head)
  51{
  52        struct net *net = seq_file_net(seq);
  53        struct ct_iter_state *st = seq->private;
  54
  55        head = rcu_dereference(hlist_nulls_next_rcu(head));
  56        while (is_a_nulls(head)) {
  57                if (likely(get_nulls_value(head) == st->bucket)) {
  58                        if (++st->bucket >= net->ct.htable_size)
  59                                return NULL;
  60                }
  61                head = rcu_dereference(
  62                        hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
  63        }
  64        return head;
  65}
  66
  67static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos)
  68{
  69        struct hlist_nulls_node *head = ct_get_first(seq);
  70
  71        if (head)
  72                while (pos && (head = ct_get_next(seq, head)))
  73                        pos--;
  74        return pos ? NULL : head;
  75}
  76
  77static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
  78        __acquires(RCU)
  79{
  80        rcu_read_lock();
  81        return ct_get_idx(seq, *pos);
  82}
  83
  84static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
  85{
  86        (*pos)++;
  87        return ct_get_next(s, v);
  88}
  89
  90static void ct_seq_stop(struct seq_file *s, void *v)
  91        __releases(RCU)
  92{
  93        rcu_read_unlock();
  94}
  95
  96#ifdef CONFIG_NF_CONNTRACK_SECMARK
  97static void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
  98{
  99        int ret;
 100        u32 len;
 101        char *secctx;
 102
 103        ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
 104        if (ret)
 105                return;
 106
 107        seq_printf(s, "secctx=%s ", secctx);
 108
 109        security_release_secctx(secctx, len);
 110}
 111#else
 112static inline void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
 113{
 114}
 115#endif
 116
 117static int ct_seq_show(struct seq_file *s, void *v)
 118{
 119        struct nf_conntrack_tuple_hash *hash = v;
 120        struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
 121        const struct nf_conntrack_l3proto *l3proto;
 122        const struct nf_conntrack_l4proto *l4proto;
 123        int ret = 0;
 124
 125        NF_CT_ASSERT(ct);
 126        if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
 127                return 0;
 128
 129
 130        /* we only want to print DIR_ORIGINAL */
 131        if (NF_CT_DIRECTION(hash))
 132                goto release;
 133        if (nf_ct_l3num(ct) != AF_INET)
 134                goto release;
 135
 136        l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
 137        NF_CT_ASSERT(l3proto);
 138        l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
 139        NF_CT_ASSERT(l4proto);
 140
 141        ret = -ENOSPC;
 142        seq_printf(s, "%-8s %u %ld ",
 143                   l4proto->name, nf_ct_protonum(ct),
 144                   timer_pending(&ct->timeout)
 145                   ? (long)(ct->timeout.expires - jiffies)/HZ : 0);
 146
 147        if (l4proto->print_conntrack)
 148                l4proto->print_conntrack(s, ct);
 149
 150        if (seq_has_overflowed(s))
 151                goto release;
 152
 153        print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
 154                    l3proto, l4proto);
 155
 156        if (seq_has_overflowed(s))
 157                goto release;
 158
 159        if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL))
 160                goto release;
 161
 162        if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
 163                seq_printf(s, "[UNREPLIED] ");
 164
 165        print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
 166                    l3proto, l4proto);
 167
 168        if (seq_has_overflowed(s))
 169                goto release;
 170
 171        if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
 172                goto release;
 173
 174        if (test_bit(IPS_ASSURED_BIT, &ct->status))
 175                seq_printf(s, "[ASSURED] ");
 176
 177#ifdef CONFIG_NF_CONNTRACK_MARK
 178        seq_printf(s, "mark=%u ", ct->mark);
 179#endif
 180
 181        ct_show_secctx(s, ct);
 182
 183        seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use));
 184
 185        if (seq_has_overflowed(s))
 186                goto release;
 187
 188        ret = 0;
 189release:
 190        nf_ct_put(ct);
 191        return ret;
 192}
 193
 194static const struct seq_operations ct_seq_ops = {
 195        .start = ct_seq_start,
 196        .next  = ct_seq_next,
 197        .stop  = ct_seq_stop,
 198        .show  = ct_seq_show
 199};
 200
 201static int ct_open(struct inode *inode, struct file *file)
 202{
 203        return seq_open_net(inode, file, &ct_seq_ops,
 204                            sizeof(struct ct_iter_state));
 205}
 206
 207static const struct file_operations ct_file_ops = {
 208        .owner   = THIS_MODULE,
 209        .open    = ct_open,
 210        .read    = seq_read,
 211        .llseek  = seq_lseek,
 212        .release = seq_release_net,
 213};
 214
 215/* expects */
 216struct ct_expect_iter_state {
 217        struct seq_net_private p;
 218        unsigned int bucket;
 219};
 220
 221static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
 222{
 223        struct net *net = seq_file_net(seq);
 224        struct ct_expect_iter_state *st = seq->private;
 225        struct hlist_node *n;
 226
 227        for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
 228                n = rcu_dereference(
 229                        hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
 230                if (n)
 231                        return n;
 232        }
 233        return NULL;
 234}
 235
 236static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
 237                                             struct hlist_node *head)
 238{
 239        struct net *net = seq_file_net(seq);
 240        struct ct_expect_iter_state *st = seq->private;
 241
 242        head = rcu_dereference(hlist_next_rcu(head));
 243        while (head == NULL) {
 244                if (++st->bucket >= nf_ct_expect_hsize)
 245                        return NULL;
 246                head = rcu_dereference(
 247                        hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
 248        }
 249        return head;
 250}
 251
 252static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
 253{
 254        struct hlist_node *head = ct_expect_get_first(seq);
 255
 256        if (head)
 257                while (pos && (head = ct_expect_get_next(seq, head)))
 258                        pos--;
 259        return pos ? NULL : head;
 260}
 261
 262static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
 263        __acquires(RCU)
 264{
 265        rcu_read_lock();
 266        return ct_expect_get_idx(seq, *pos);
 267}
 268
 269static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 270{
 271        (*pos)++;
 272        return ct_expect_get_next(seq, v);
 273}
 274
 275static void exp_seq_stop(struct seq_file *seq, void *v)
 276        __releases(RCU)
 277{
 278        rcu_read_unlock();
 279}
 280
 281static int exp_seq_show(struct seq_file *s, void *v)
 282{
 283        struct nf_conntrack_expect *exp;
 284        const struct hlist_node *n = v;
 285
 286        exp = hlist_entry(n, struct nf_conntrack_expect, hnode);
 287
 288        if (exp->tuple.src.l3num != AF_INET)
 289                return 0;
 290
 291        if (exp->timeout.function)
 292                seq_printf(s, "%ld ", timer_pending(&exp->timeout)
 293                           ? (long)(exp->timeout.expires - jiffies)/HZ : 0);
 294        else
 295                seq_printf(s, "- ");
 296
 297        seq_printf(s, "proto=%u ", exp->tuple.dst.protonum);
 298
 299        print_tuple(s, &exp->tuple,
 300                    __nf_ct_l3proto_find(exp->tuple.src.l3num),
 301                    __nf_ct_l4proto_find(exp->tuple.src.l3num,
 302                                         exp->tuple.dst.protonum));
 303        seq_putc(s, '\n');
 304
 305        return 0;
 306}
 307
 308static const struct seq_operations exp_seq_ops = {
 309        .start = exp_seq_start,
 310        .next = exp_seq_next,
 311        .stop = exp_seq_stop,
 312        .show = exp_seq_show
 313};
 314
 315static int exp_open(struct inode *inode, struct file *file)
 316{
 317        return seq_open_net(inode, file, &exp_seq_ops,
 318                            sizeof(struct ct_expect_iter_state));
 319}
 320
 321static const struct file_operations ip_exp_file_ops = {
 322        .owner   = THIS_MODULE,
 323        .open    = exp_open,
 324        .read    = seq_read,
 325        .llseek  = seq_lseek,
 326        .release = seq_release_net,
 327};
 328
 329static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
 330{
 331        struct net *net = seq_file_net(seq);
 332        int cpu;
 333
 334        if (*pos == 0)
 335                return SEQ_START_TOKEN;
 336
 337        for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
 338                if (!cpu_possible(cpu))
 339                        continue;
 340                *pos = cpu+1;
 341                return per_cpu_ptr(net->ct.stat, cpu);
 342        }
 343
 344        return NULL;
 345}
 346
 347static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 348{
 349        struct net *net = seq_file_net(seq);
 350        int cpu;
 351
 352        for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
 353                if (!cpu_possible(cpu))
 354                        continue;
 355                *pos = cpu+1;
 356                return per_cpu_ptr(net->ct.stat, cpu);
 357        }
 358
 359        return NULL;
 360}
 361
 362static void ct_cpu_seq_stop(struct seq_file *seq, void *v)
 363{
 364}
 365
 366static int ct_cpu_seq_show(struct seq_file *seq, void *v)
 367{
 368        struct net *net = seq_file_net(seq);
 369        unsigned int nr_conntracks = atomic_read(&net->ct.count);
 370        const struct ip_conntrack_stat *st = v;
 371
 372        if (v == SEQ_START_TOKEN) {
 373                seq_printf(seq, "entries  searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error  expect_new expect_create expect_delete search_restart\n");
 374                return 0;
 375        }
 376
 377        seq_printf(seq, "%08x  %08x %08x %08x %08x %08x %08x %08x "
 378                        "%08x %08x %08x %08x %08x  %08x %08x %08x %08x\n",
 379                   nr_conntracks,
 380                   st->searched,
 381                   st->found,
 382                   st->new,
 383                   st->invalid,
 384                   st->ignore,
 385                   st->delete,
 386                   st->delete_list,
 387                   st->insert,
 388                   st->insert_failed,
 389                   st->drop,
 390                   st->early_drop,
 391                   st->error,
 392
 393                   st->expect_new,
 394                   st->expect_create,
 395                   st->expect_delete,
 396                   st->search_restart
 397                );
 398        return 0;
 399}
 400
 401static const struct seq_operations ct_cpu_seq_ops = {
 402        .start  = ct_cpu_seq_start,
 403        .next   = ct_cpu_seq_next,
 404        .stop   = ct_cpu_seq_stop,
 405        .show   = ct_cpu_seq_show,
 406};
 407
 408static int ct_cpu_seq_open(struct inode *inode, struct file *file)
 409{
 410        return seq_open_net(inode, file, &ct_cpu_seq_ops,
 411                            sizeof(struct seq_net_private));
 412}
 413
 414static const struct file_operations ct_cpu_seq_fops = {
 415        .owner   = THIS_MODULE,
 416        .open    = ct_cpu_seq_open,
 417        .read    = seq_read,
 418        .llseek  = seq_lseek,
 419        .release = seq_release_net,
 420};
 421
 422static int __net_init ip_conntrack_net_init(struct net *net)
 423{
 424        struct proc_dir_entry *proc, *proc_exp, *proc_stat;
 425
 426        proc = proc_create("ip_conntrack", 0440, net->proc_net, &ct_file_ops);
 427        if (!proc)
 428                goto err1;
 429
 430        proc_exp = proc_create("ip_conntrack_expect", 0440, net->proc_net,
 431                               &ip_exp_file_ops);
 432        if (!proc_exp)
 433                goto err2;
 434
 435        proc_stat = proc_create("ip_conntrack", S_IRUGO,
 436                                net->proc_net_stat, &ct_cpu_seq_fops);
 437        if (!proc_stat)
 438                goto err3;
 439        return 0;
 440
 441err3:
 442        remove_proc_entry("ip_conntrack_expect", net->proc_net);
 443err2:
 444        remove_proc_entry("ip_conntrack", net->proc_net);
 445err1:
 446        return -ENOMEM;
 447}
 448
 449static void __net_exit ip_conntrack_net_exit(struct net *net)
 450{
 451        remove_proc_entry("ip_conntrack", net->proc_net_stat);
 452        remove_proc_entry("ip_conntrack_expect", net->proc_net);
 453        remove_proc_entry("ip_conntrack", net->proc_net);
 454}
 455
 456static struct pernet_operations ip_conntrack_net_ops = {
 457        .init = ip_conntrack_net_init,
 458        .exit = ip_conntrack_net_exit,
 459};
 460
 461int __init nf_conntrack_ipv4_compat_init(void)
 462{
 463        return register_pernet_subsys(&ip_conntrack_net_ops);
 464}
 465
 466void __exit nf_conntrack_ipv4_compat_fini(void)
 467{
 468        unregister_pernet_subsys(&ip_conntrack_net_ops);
 469}
 470