1#include <linux/kernel.h>
2#include <linux/init.h>
3#include <linux/module.h>
4#include <linux/netfilter.h>
5#include <linux/rhashtable.h>
6#include <linux/ip.h>
7#include <linux/ipv6.h>
8#include <linux/netdevice.h>
9#include <net/ip.h>
10#include <net/ipv6.h>
11#include <net/ip6_route.h>
12#include <net/neighbour.h>
13#include <net/netfilter/nf_flow_table.h>
14
15#include <linux/tcp.h>
16#include <linux/udp.h>
17
18static int nf_flow_state_check(struct flow_offload *flow, int proto,
19 struct sk_buff *skb, unsigned int thoff)
20{
21 struct tcphdr *tcph;
22
23 if (proto != IPPROTO_TCP)
24 return 0;
25
26 if (!pskb_may_pull(skb, thoff + sizeof(*tcph)))
27 return -1;
28
29 tcph = (void *)(skb_network_header(skb) + thoff);
30 if (unlikely(tcph->fin || tcph->rst)) {
31 flow_offload_teardown(flow);
32 return -1;
33 }
34
35 return 0;
36}
37
38static int nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff,
39 __be32 addr, __be32 new_addr)
40{
41 struct tcphdr *tcph;
42
43 if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
44 skb_try_make_writable(skb, thoff + sizeof(*tcph)))
45 return -1;
46
47 tcph = (void *)(skb_network_header(skb) + thoff);
48 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true);
49
50 return 0;
51}
52
53static int nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff,
54 __be32 addr, __be32 new_addr)
55{
56 struct udphdr *udph;
57
58 if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
59 skb_try_make_writable(skb, thoff + sizeof(*udph)))
60 return -1;
61
62 udph = (void *)(skb_network_header(skb) + thoff);
63 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
64 inet_proto_csum_replace4(&udph->check, skb, addr,
65 new_addr, true);
66 if (!udph->check)
67 udph->check = CSUM_MANGLED_0;
68 }
69
70 return 0;
71}
72
73static int nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph,
74 unsigned int thoff, __be32 addr,
75 __be32 new_addr)
76{
77 switch (iph->protocol) {
78 case IPPROTO_TCP:
79 if (nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr) < 0)
80 return NF_DROP;
81 break;
82 case IPPROTO_UDP:
83 if (nf_flow_nat_ip_udp(skb, thoff, addr, new_addr) < 0)
84 return NF_DROP;
85 break;
86 }
87
88 return 0;
89}
90
91static int nf_flow_snat_ip(const struct flow_offload *flow, struct sk_buff *skb,
92 struct iphdr *iph, unsigned int thoff,
93 enum flow_offload_tuple_dir dir)
94{
95 __be32 addr, new_addr;
96
97 switch (dir) {
98 case FLOW_OFFLOAD_DIR_ORIGINAL:
99 addr = iph->saddr;
100 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
101 iph->saddr = new_addr;
102 break;
103 case FLOW_OFFLOAD_DIR_REPLY:
104 addr = iph->daddr;
105 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
106 iph->daddr = new_addr;
107 break;
108 default:
109 return -1;
110 }
111 csum_replace4(&iph->check, addr, new_addr);
112
113 return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
114}
115
116static int nf_flow_dnat_ip(const struct flow_offload *flow, struct sk_buff *skb,
117 struct iphdr *iph, unsigned int thoff,
118 enum flow_offload_tuple_dir dir)
119{
120 __be32 addr, new_addr;
121
122 switch (dir) {
123 case FLOW_OFFLOAD_DIR_ORIGINAL:
124 addr = iph->daddr;
125 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
126 iph->daddr = new_addr;
127 break;
128 case FLOW_OFFLOAD_DIR_REPLY:
129 addr = iph->saddr;
130 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
131 iph->saddr = new_addr;
132 break;
133 default:
134 return -1;
135 }
136 csum_replace4(&iph->check, addr, new_addr);
137
138 return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
139}
140
141static int nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
142 unsigned int thoff, enum flow_offload_tuple_dir dir)
143{
144 struct iphdr *iph = ip_hdr(skb);
145
146 if (flow->flags & FLOW_OFFLOAD_SNAT &&
147 (nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
148 nf_flow_snat_ip(flow, skb, iph, thoff, dir) < 0))
149 return -1;
150 if (flow->flags & FLOW_OFFLOAD_DNAT &&
151 (nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
152 nf_flow_dnat_ip(flow, skb, iph, thoff, dir) < 0))
153 return -1;
154
155 return 0;
156}
157
158static bool ip_has_options(unsigned int thoff)
159{
160 return thoff != sizeof(struct iphdr);
161}
162
163static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
164 struct flow_offload_tuple *tuple)
165{
166 struct flow_ports *ports;
167 unsigned int thoff;
168 struct iphdr *iph;
169
170 if (!pskb_may_pull(skb, sizeof(*iph)))
171 return -1;
172
173 iph = ip_hdr(skb);
174 thoff = iph->ihl * 4;
175
176 if (ip_is_fragment(iph) ||
177 unlikely(ip_has_options(thoff)))
178 return -1;
179
180 if (iph->protocol != IPPROTO_TCP &&
181 iph->protocol != IPPROTO_UDP)
182 return -1;
183
184 thoff = iph->ihl * 4;
185 if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
186 return -1;
187
188 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
189
190 tuple->src_v4.s_addr = iph->saddr;
191 tuple->dst_v4.s_addr = iph->daddr;
192 tuple->src_port = ports->source;
193 tuple->dst_port = ports->dest;
194 tuple->l3proto = AF_INET;
195 tuple->l4proto = iph->protocol;
196 tuple->iifidx = dev->ifindex;
197
198 return 0;
199}
200
201
202static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
203{
204 if (skb->len <= mtu)
205 return false;
206
207 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
208 return false;
209
210 return true;
211}
212
213unsigned int
214nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
215 const struct nf_hook_state *state)
216{
217 struct flow_offload_tuple_rhash *tuplehash;
218 struct nf_flowtable *flow_table = priv;
219 struct flow_offload_tuple tuple = {};
220 enum flow_offload_tuple_dir dir;
221 struct flow_offload *flow;
222 struct net_device *outdev;
223 struct rtable *rt;
224 unsigned int thoff;
225 struct iphdr *iph;
226 __be32 nexthop;
227
228 if (skb->protocol != htons(ETH_P_IP))
229 return NF_ACCEPT;
230
231 if (nf_flow_tuple_ip(skb, state->in, &tuple) < 0)
232 return NF_ACCEPT;
233
234 tuplehash = flow_offload_lookup(flow_table, &tuple);
235 if (tuplehash == NULL)
236 return NF_ACCEPT;
237
238 outdev = dev_get_by_index_rcu(state->net, tuplehash->tuple.oifidx);
239 if (!outdev)
240 return NF_ACCEPT;
241
242 dir = tuplehash->tuple.dir;
243 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
244 rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
245
246 if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)) &&
247 (ip_hdr(skb)->frag_off & htons(IP_DF)) != 0)
248 return NF_ACCEPT;
249
250 if (skb_try_make_writable(skb, sizeof(*iph)))
251 return NF_DROP;
252
253 thoff = ip_hdr(skb)->ihl * 4;
254 if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff))
255 return NF_ACCEPT;
256
257 if (flow->flags & (FLOW_OFFLOAD_SNAT | FLOW_OFFLOAD_DNAT) &&
258 nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
259 return NF_DROP;
260
261 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
262 iph = ip_hdr(skb);
263 ip_decrease_ttl(iph);
264
265 skb->dev = outdev;
266 nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
267 skb_dst_set_noref(skb, &rt->dst);
268 neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
269
270 return NF_STOLEN;
271}
272EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook);
273
274static int nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff,
275 struct in6_addr *addr,
276 struct in6_addr *new_addr)
277{
278 struct tcphdr *tcph;
279
280 if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
281 skb_try_make_writable(skb, thoff + sizeof(*tcph)))
282 return -1;
283
284 tcph = (void *)(skb_network_header(skb) + thoff);
285 inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32,
286 new_addr->s6_addr32, true);
287
288 return 0;
289}
290
291static int nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff,
292 struct in6_addr *addr,
293 struct in6_addr *new_addr)
294{
295 struct udphdr *udph;
296
297 if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
298 skb_try_make_writable(skb, thoff + sizeof(*udph)))
299 return -1;
300
301 udph = (void *)(skb_network_header(skb) + thoff);
302 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
303 inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32,
304 new_addr->s6_addr32, true);
305 if (!udph->check)
306 udph->check = CSUM_MANGLED_0;
307 }
308
309 return 0;
310}
311
312static int nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h,
313 unsigned int thoff, struct in6_addr *addr,
314 struct in6_addr *new_addr)
315{
316 switch (ip6h->nexthdr) {
317 case IPPROTO_TCP:
318 if (nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr) < 0)
319 return NF_DROP;
320 break;
321 case IPPROTO_UDP:
322 if (nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr) < 0)
323 return NF_DROP;
324 break;
325 }
326
327 return 0;
328}
329
330static int nf_flow_snat_ipv6(const struct flow_offload *flow,
331 struct sk_buff *skb, struct ipv6hdr *ip6h,
332 unsigned int thoff,
333 enum flow_offload_tuple_dir dir)
334{
335 struct in6_addr addr, new_addr;
336
337 switch (dir) {
338 case FLOW_OFFLOAD_DIR_ORIGINAL:
339 addr = ip6h->saddr;
340 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6;
341 ip6h->saddr = new_addr;
342 break;
343 case FLOW_OFFLOAD_DIR_REPLY:
344 addr = ip6h->daddr;
345 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6;
346 ip6h->daddr = new_addr;
347 break;
348 default:
349 return -1;
350 }
351
352 return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
353}
354
355static int nf_flow_dnat_ipv6(const struct flow_offload *flow,
356 struct sk_buff *skb, struct ipv6hdr *ip6h,
357 unsigned int thoff,
358 enum flow_offload_tuple_dir dir)
359{
360 struct in6_addr addr, new_addr;
361
362 switch (dir) {
363 case FLOW_OFFLOAD_DIR_ORIGINAL:
364 addr = ip6h->daddr;
365 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6;
366 ip6h->daddr = new_addr;
367 break;
368 case FLOW_OFFLOAD_DIR_REPLY:
369 addr = ip6h->saddr;
370 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6;
371 ip6h->saddr = new_addr;
372 break;
373 default:
374 return -1;
375 }
376
377 return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
378}
379
380static int nf_flow_nat_ipv6(const struct flow_offload *flow,
381 struct sk_buff *skb,
382 enum flow_offload_tuple_dir dir)
383{
384 struct ipv6hdr *ip6h = ipv6_hdr(skb);
385 unsigned int thoff = sizeof(*ip6h);
386
387 if (flow->flags & FLOW_OFFLOAD_SNAT &&
388 (nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
389 nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
390 return -1;
391 if (flow->flags & FLOW_OFFLOAD_DNAT &&
392 (nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
393 nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
394 return -1;
395
396 return 0;
397}
398
399static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
400 struct flow_offload_tuple *tuple)
401{
402 struct flow_ports *ports;
403 struct ipv6hdr *ip6h;
404 unsigned int thoff;
405
406 if (!pskb_may_pull(skb, sizeof(*ip6h)))
407 return -1;
408
409 ip6h = ipv6_hdr(skb);
410
411 if (ip6h->nexthdr != IPPROTO_TCP &&
412 ip6h->nexthdr != IPPROTO_UDP)
413 return -1;
414
415 thoff = sizeof(*ip6h);
416 if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
417 return -1;
418
419 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
420
421 tuple->src_v6 = ip6h->saddr;
422 tuple->dst_v6 = ip6h->daddr;
423 tuple->src_port = ports->source;
424 tuple->dst_port = ports->dest;
425 tuple->l3proto = AF_INET6;
426 tuple->l4proto = ip6h->nexthdr;
427 tuple->iifidx = dev->ifindex;
428
429 return 0;
430}
431
432unsigned int
433nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
434 const struct nf_hook_state *state)
435{
436 struct flow_offload_tuple_rhash *tuplehash;
437 struct nf_flowtable *flow_table = priv;
438 struct flow_offload_tuple tuple = {};
439 enum flow_offload_tuple_dir dir;
440 const struct in6_addr *nexthop;
441 struct flow_offload *flow;
442 struct net_device *outdev;
443 struct ipv6hdr *ip6h;
444 struct rt6_info *rt;
445
446 if (skb->protocol != htons(ETH_P_IPV6))
447 return NF_ACCEPT;
448
449 if (nf_flow_tuple_ipv6(skb, state->in, &tuple) < 0)
450 return NF_ACCEPT;
451
452 tuplehash = flow_offload_lookup(flow_table, &tuple);
453 if (tuplehash == NULL)
454 return NF_ACCEPT;
455
456 outdev = dev_get_by_index_rcu(state->net, tuplehash->tuple.oifidx);
457 if (!outdev)
458 return NF_ACCEPT;
459
460 dir = tuplehash->tuple.dir;
461 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
462 rt = (struct rt6_info *)flow->tuplehash[dir].tuple.dst_cache;
463
464 if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
465 return NF_ACCEPT;
466
467 if (nf_flow_state_check(flow, ipv6_hdr(skb)->nexthdr, skb,
468 sizeof(*ip6h)))
469 return NF_ACCEPT;
470
471 if (skb_try_make_writable(skb, sizeof(*ip6h)))
472 return NF_DROP;
473
474 if (flow->flags & (FLOW_OFFLOAD_SNAT | FLOW_OFFLOAD_DNAT) &&
475 nf_flow_nat_ipv6(flow, skb, dir) < 0)
476 return NF_DROP;
477
478 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
479 ip6h = ipv6_hdr(skb);
480 ip6h->hop_limit--;
481
482 skb->dev = outdev;
483 nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
484 skb_dst_set_noref(skb, &rt->dst);
485 neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
486
487 return NF_STOLEN;
488}
489EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook);
490