linux/net/sched/act_nat.c
<<
>>
Prefs
   1/*
   2 * Stateless NAT actions
   3 *
   4 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of the GNU General Public License as published by the Free
   8 * Software Foundation; either version 2 of the License, or (at your option)
   9 * any later version.
  10 */
  11
  12#include <linux/errno.h>
  13#include <linux/init.h>
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/netfilter.h>
  17#include <linux/rtnetlink.h>
  18#include <linux/skbuff.h>
  19#include <linux/slab.h>
  20#include <linux/spinlock.h>
  21#include <linux/string.h>
  22#include <linux/tc_act/tc_nat.h>
  23#include <net/act_api.h>
  24#include <net/icmp.h>
  25#include <net/ip.h>
  26#include <net/netlink.h>
  27#include <net/tc_act/tc_nat.h>
  28#include <net/tcp.h>
  29#include <net/udp.h>
  30
  31
  32static unsigned int nat_net_id;
  33static struct tc_action_ops act_nat_ops;
  34
  35static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = {
  36        [TCA_NAT_PARMS] = { .len = sizeof(struct tc_nat) },
  37};
  38
  39static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
  40                        struct tc_action **a, int ovr, int bind,
  41                        struct netlink_ext_ack *extack)
  42{
  43        struct tc_action_net *tn = net_generic(net, nat_net_id);
  44        struct nlattr *tb[TCA_NAT_MAX + 1];
  45        struct tc_nat *parm;
  46        int ret = 0, err;
  47        struct tcf_nat *p;
  48
  49        if (nla == NULL)
  50                return -EINVAL;
  51
  52        err = nla_parse_nested(tb, TCA_NAT_MAX, nla, nat_policy, NULL);
  53        if (err < 0)
  54                return err;
  55
  56        if (tb[TCA_NAT_PARMS] == NULL)
  57                return -EINVAL;
  58        parm = nla_data(tb[TCA_NAT_PARMS]);
  59
  60        if (!tcf_idr_check(tn, parm->index, a, bind)) {
  61                ret = tcf_idr_create(tn, parm->index, est, a,
  62                                     &act_nat_ops, bind, false);
  63                if (ret)
  64                        return ret;
  65                ret = ACT_P_CREATED;
  66        } else {
  67                if (bind)
  68                        return 0;
  69                tcf_idr_release(*a, bind);
  70                if (!ovr)
  71                        return -EEXIST;
  72        }
  73        p = to_tcf_nat(*a);
  74
  75        spin_lock_bh(&p->tcf_lock);
  76        p->old_addr = parm->old_addr;
  77        p->new_addr = parm->new_addr;
  78        p->mask = parm->mask;
  79        p->flags = parm->flags;
  80
  81        p->tcf_action = parm->action;
  82        spin_unlock_bh(&p->tcf_lock);
  83
  84        if (ret == ACT_P_CREATED)
  85                tcf_idr_insert(tn, *a);
  86
  87        return ret;
  88}
  89
  90static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
  91                   struct tcf_result *res)
  92{
  93        struct tcf_nat *p = to_tcf_nat(a);
  94        struct iphdr *iph;
  95        __be32 old_addr;
  96        __be32 new_addr;
  97        __be32 mask;
  98        __be32 addr;
  99        int egress;
 100        int action;
 101        int ihl;
 102        int noff;
 103
 104        spin_lock(&p->tcf_lock);
 105
 106        tcf_lastuse_update(&p->tcf_tm);
 107        old_addr = p->old_addr;
 108        new_addr = p->new_addr;
 109        mask = p->mask;
 110        egress = p->flags & TCA_NAT_FLAG_EGRESS;
 111        action = p->tcf_action;
 112
 113        bstats_update(&p->tcf_bstats, skb);
 114
 115        spin_unlock(&p->tcf_lock);
 116
 117        if (unlikely(action == TC_ACT_SHOT))
 118                goto drop;
 119
 120        noff = skb_network_offset(skb);
 121        if (!pskb_may_pull(skb, sizeof(*iph) + noff))
 122                goto drop;
 123
 124        iph = ip_hdr(skb);
 125
 126        if (egress)
 127                addr = iph->saddr;
 128        else
 129                addr = iph->daddr;
 130
 131        if (!((old_addr ^ addr) & mask)) {
 132                if (skb_try_make_writable(skb, sizeof(*iph) + noff))
 133                        goto drop;
 134
 135                new_addr &= mask;
 136                new_addr |= addr & ~mask;
 137
 138                /* Rewrite IP header */
 139                iph = ip_hdr(skb);
 140                if (egress)
 141                        iph->saddr = new_addr;
 142                else
 143                        iph->daddr = new_addr;
 144
 145                csum_replace4(&iph->check, addr, new_addr);
 146        } else if ((iph->frag_off & htons(IP_OFFSET)) ||
 147                   iph->protocol != IPPROTO_ICMP) {
 148                goto out;
 149        }
 150
 151        ihl = iph->ihl * 4;
 152
 153        /* It would be nice to share code with stateful NAT. */
 154        switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
 155        case IPPROTO_TCP:
 156        {
 157                struct tcphdr *tcph;
 158
 159                if (!pskb_may_pull(skb, ihl + sizeof(*tcph) + noff) ||
 160                    skb_try_make_writable(skb, ihl + sizeof(*tcph) + noff))
 161                        goto drop;
 162
 163                tcph = (void *)(skb_network_header(skb) + ihl);
 164                inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr,
 165                                         true);
 166                break;
 167        }
 168        case IPPROTO_UDP:
 169        {
 170                struct udphdr *udph;
 171
 172                if (!pskb_may_pull(skb, ihl + sizeof(*udph) + noff) ||
 173                    skb_try_make_writable(skb, ihl + sizeof(*udph) + noff))
 174                        goto drop;
 175
 176                udph = (void *)(skb_network_header(skb) + ihl);
 177                if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
 178                        inet_proto_csum_replace4(&udph->check, skb, addr,
 179                                                 new_addr, true);
 180                        if (!udph->check)
 181                                udph->check = CSUM_MANGLED_0;
 182                }
 183                break;
 184        }
 185        case IPPROTO_ICMP:
 186        {
 187                struct icmphdr *icmph;
 188
 189                if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + noff))
 190                        goto drop;
 191
 192                icmph = (void *)(skb_network_header(skb) + ihl);
 193
 194                if ((icmph->type != ICMP_DEST_UNREACH) &&
 195                    (icmph->type != ICMP_TIME_EXCEEDED) &&
 196                    (icmph->type != ICMP_PARAMETERPROB))
 197                        break;
 198
 199                if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph) +
 200                                        noff))
 201                        goto drop;
 202
 203                icmph = (void *)(skb_network_header(skb) + ihl);
 204                iph = (void *)(icmph + 1);
 205                if (egress)
 206                        addr = iph->daddr;
 207                else
 208                        addr = iph->saddr;
 209
 210                if ((old_addr ^ addr) & mask)
 211                        break;
 212
 213                if (skb_try_make_writable(skb, ihl + sizeof(*icmph) +
 214                                          sizeof(*iph) + noff))
 215                        goto drop;
 216
 217                icmph = (void *)(skb_network_header(skb) + ihl);
 218                iph = (void *)(icmph + 1);
 219
 220                new_addr &= mask;
 221                new_addr |= addr & ~mask;
 222
 223                /* XXX Fix up the inner checksums. */
 224                if (egress)
 225                        iph->daddr = new_addr;
 226                else
 227                        iph->saddr = new_addr;
 228
 229                inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr,
 230                                         false);
 231                break;
 232        }
 233        default:
 234                break;
 235        }
 236
 237out:
 238        return action;
 239
 240drop:
 241        spin_lock(&p->tcf_lock);
 242        p->tcf_qstats.drops++;
 243        spin_unlock(&p->tcf_lock);
 244        return TC_ACT_SHOT;
 245}
 246
 247static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
 248                        int bind, int ref)
 249{
 250        unsigned char *b = skb_tail_pointer(skb);
 251        struct tcf_nat *p = to_tcf_nat(a);
 252        struct tc_nat opt = {
 253                .old_addr = p->old_addr,
 254                .new_addr = p->new_addr,
 255                .mask     = p->mask,
 256                .flags    = p->flags,
 257
 258                .index    = p->tcf_index,
 259                .action   = p->tcf_action,
 260                .refcnt   = p->tcf_refcnt - ref,
 261                .bindcnt  = p->tcf_bindcnt - bind,
 262        };
 263        struct tcf_t t;
 264
 265        if (nla_put(skb, TCA_NAT_PARMS, sizeof(opt), &opt))
 266                goto nla_put_failure;
 267
 268        tcf_tm_dump(&t, &p->tcf_tm);
 269        if (nla_put_64bit(skb, TCA_NAT_TM, sizeof(t), &t, TCA_NAT_PAD))
 270                goto nla_put_failure;
 271
 272        return skb->len;
 273
 274nla_put_failure:
 275        nlmsg_trim(skb, b);
 276        return -1;
 277}
 278
 279static int tcf_nat_walker(struct net *net, struct sk_buff *skb,
 280                          struct netlink_callback *cb, int type,
 281                          const struct tc_action_ops *ops,
 282                          struct netlink_ext_ack *extack)
 283{
 284        struct tc_action_net *tn = net_generic(net, nat_net_id);
 285
 286        return tcf_generic_walker(tn, skb, cb, type, ops, extack);
 287}
 288
 289static int tcf_nat_search(struct net *net, struct tc_action **a, u32 index,
 290                          struct netlink_ext_ack *extack)
 291{
 292        struct tc_action_net *tn = net_generic(net, nat_net_id);
 293
 294        return tcf_idr_search(tn, a, index);
 295}
 296
 297static struct tc_action_ops act_nat_ops = {
 298        .kind           =       "nat",
 299        .type           =       TCA_ACT_NAT,
 300        .owner          =       THIS_MODULE,
 301        .act            =       tcf_nat,
 302        .dump           =       tcf_nat_dump,
 303        .init           =       tcf_nat_init,
 304        .walk           =       tcf_nat_walker,
 305        .lookup         =       tcf_nat_search,
 306        .size           =       sizeof(struct tcf_nat),
 307};
 308
 309static __net_init int nat_init_net(struct net *net)
 310{
 311        struct tc_action_net *tn = net_generic(net, nat_net_id);
 312
 313        return tc_action_net_init(tn, &act_nat_ops);
 314}
 315
 316static void __net_exit nat_exit_net(struct list_head *net_list)
 317{
 318        tc_action_net_exit(net_list, nat_net_id);
 319}
 320
 321static struct pernet_operations nat_net_ops = {
 322        .init = nat_init_net,
 323        .exit_batch = nat_exit_net,
 324        .id   = &nat_net_id,
 325        .size = sizeof(struct tc_action_net),
 326};
 327
 328MODULE_DESCRIPTION("Stateless NAT actions");
 329MODULE_LICENSE("GPL");
 330
 331static int __init nat_init_module(void)
 332{
 333        return tcf_register_action(&act_nat_ops, &nat_net_ops);
 334}
 335
 336static void __exit nat_cleanup_module(void)
 337{
 338        tcf_unregister_action(&act_nat_ops, &nat_net_ops);
 339}
 340
 341module_init(nat_init_module);
 342module_exit(nat_cleanup_module);
 343