1
2
3
4
5
6
7
8
9#include <linux/types.h>
10#include <linux/module.h>
11#include <linux/atomic.h>
12#include <linux/inetdevice.h>
13#include <linux/ip.h>
14#include <linux/timer.h>
15#include <linux/netfilter.h>
16#include <net/protocol.h>
17#include <net/ip.h>
18#include <net/checksum.h>
19#include <net/route.h>
20#include <linux/netfilter_ipv4.h>
21#include <linux/netfilter/x_tables.h>
22#include <net/netfilter/nf_nat.h>
23#include <net/netfilter/ipv4/nf_nat_masquerade.h>
24
25unsigned int
26nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
27 const struct nf_nat_range *range,
28 const struct net_device *out)
29{
30 struct nf_conn *ct;
31 struct nf_conn_nat *nat;
32 enum ip_conntrack_info ctinfo;
33 struct nf_nat_range newrange;
34 const struct rtable *rt;
35 __be32 newsrc, nh;
36
37 NF_CT_ASSERT(hooknum == NF_INET_POST_ROUTING);
38
39 ct = nf_ct_get(skb, &ctinfo);
40 nat = nfct_nat(ct);
41
42 NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
43 ctinfo == IP_CT_RELATED_REPLY));
44
45
46
47
48 if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip == 0)
49 return NF_ACCEPT;
50
51 rt = skb_rtable(skb);
52 nh = rt_nexthop(rt, ip_hdr(skb)->daddr);
53 newsrc = inet_select_addr(out, nh, RT_SCOPE_UNIVERSE);
54 if (!newsrc) {
55 pr_info("%s ate my IP address\n", out->name);
56 return NF_DROP;
57 }
58
59 nat->masq_index = out->ifindex;
60
61
62 memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
63 memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
64 newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS;
65 newrange.min_addr.ip = newsrc;
66 newrange.max_addr.ip = newsrc;
67 newrange.min_proto = range->min_proto;
68 newrange.max_proto = range->max_proto;
69
70
71 return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC);
72}
73EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4);
74
75static int device_cmp(struct nf_conn *i, void *ifindex)
76{
77 const struct nf_conn_nat *nat = nfct_nat(i);
78
79 if (!nat)
80 return 0;
81 if (nf_ct_l3num(i) != NFPROTO_IPV4)
82 return 0;
83 return nat->masq_index == (int)(long)ifindex;
84}
85
86static int masq_device_event(struct notifier_block *this,
87 unsigned long event,
88 void *ptr)
89{
90 const struct net_device *dev = netdev_notifier_info_to_dev(ptr);
91 struct net *net = dev_net(dev);
92
93 if (event == NETDEV_DOWN) {
94
95
96
97
98 NF_CT_ASSERT(dev->ifindex != 0);
99
100 nf_ct_iterate_cleanup(net, device_cmp,
101 (void *)(long)dev->ifindex, 0, 0);
102 }
103
104 return NOTIFY_DONE;
105}
106
107static int masq_inet_event(struct notifier_block *this,
108 unsigned long event,
109 void *ptr)
110{
111 struct in_device *idev = ((struct in_ifaddr *)ptr)->ifa_dev;
112 struct netdev_notifier_info info;
113
114
115
116
117
118
119 if (idev->dead)
120 return NOTIFY_DONE;
121
122 netdev_notifier_info_init(&info, idev->dev);
123 return masq_device_event(this, event, &info);
124}
125
126static struct notifier_block masq_dev_notifier = {
127 .notifier_call = masq_device_event,
128};
129
130static struct notifier_block masq_inet_notifier = {
131 .notifier_call = masq_inet_event,
132};
133
134static atomic_t masquerade_notifier_refcount = ATOMIC_INIT(0);
135
136void nf_nat_masquerade_ipv4_register_notifier(void)
137{
138
139 if (atomic_inc_return(&masquerade_notifier_refcount) > 1)
140 return;
141
142
143 register_netdevice_notifier(&masq_dev_notifier);
144
145 register_inetaddr_notifier(&masq_inet_notifier);
146}
147EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_register_notifier);
148
149void nf_nat_masquerade_ipv4_unregister_notifier(void)
150{
151
152 if (atomic_dec_return(&masquerade_notifier_refcount) > 0)
153 return;
154
155 unregister_netdevice_notifier(&masq_dev_notifier);
156 unregister_inetaddr_notifier(&masq_inet_notifier);
157}
158EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_unregister_notifier);
159
160MODULE_LICENSE("GPL");
161MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>");
162