1
2
3
4
5
6
7
8
9#include <linux/kernel.h>
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/netlink.h>
13#include <linux/netfilter.h>
14#include <linux/netfilter/nf_tables.h>
15#include <net/netfilter/nf_tables.h>
16#include <net/netfilter/nft_reject.h>
17#include <net/netfilter/ipv4/nf_reject.h>
18#include <net/netfilter/ipv6/nf_reject.h>
19#include <linux/ip.h>
20#include <net/ip.h>
21#include <net/ip6_checksum.h>
22#include <linux/netfilter_bridge.h>
23#include <linux/netfilter_ipv6.h>
24#include "../br_private.h"
25
26static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
27 struct sk_buff *nskb)
28{
29 struct ethhdr *eth;
30
31 eth = skb_push(nskb, ETH_HLEN);
32 skb_reset_mac_header(nskb);
33 ether_addr_copy(eth->h_source, eth_hdr(oldskb)->h_dest);
34 ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source);
35 eth->h_proto = eth_hdr(oldskb)->h_proto;
36 skb_pull(nskb, ETH_HLEN);
37}
38
39static int nft_bridge_iphdr_validate(struct sk_buff *skb)
40{
41 struct iphdr *iph;
42 u32 len;
43
44 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
45 return 0;
46
47 iph = ip_hdr(skb);
48 if (iph->ihl < 5 || iph->version != 4)
49 return 0;
50
51 len = ntohs(iph->tot_len);
52 if (skb->len < len)
53 return 0;
54 else if (len < (iph->ihl*4))
55 return 0;
56
57 if (!pskb_may_pull(skb, iph->ihl*4))
58 return 0;
59
60 return 1;
61}
62
63
64
65
66static void nft_reject_br_send_v4_tcp_reset(struct net *net,
67 struct sk_buff *oldskb,
68 const struct net_device *dev,
69 int hook)
70{
71 struct sk_buff *nskb;
72 struct iphdr *niph;
73 const struct tcphdr *oth;
74 struct tcphdr _oth;
75
76 if (!nft_bridge_iphdr_validate(oldskb))
77 return;
78
79 oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
80 if (!oth)
81 return;
82
83 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
84 LL_MAX_HEADER, GFP_ATOMIC);
85 if (!nskb)
86 return;
87
88 skb_reserve(nskb, LL_MAX_HEADER);
89 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
90 net->ipv4.sysctl_ip_default_ttl);
91 nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
92 niph->tot_len = htons(nskb->len);
93 ip_send_check(niph);
94
95 nft_reject_br_push_etherhdr(oldskb, nskb);
96
97 br_forward(br_port_get_rcu(dev), nskb, false, true);
98}
99
100static void nft_reject_br_send_v4_unreach(struct net *net,
101 struct sk_buff *oldskb,
102 const struct net_device *dev,
103 int hook, u8 code)
104{
105 struct sk_buff *nskb;
106 struct iphdr *niph;
107 struct icmphdr *icmph;
108 unsigned int len;
109 __wsum csum;
110 u8 proto;
111
112 if (!nft_bridge_iphdr_validate(oldskb))
113 return;
114
115
116 if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
117 return;
118
119
120 len = min_t(unsigned int, 536, oldskb->len);
121
122 if (!pskb_may_pull(oldskb, len))
123 return;
124
125 if (pskb_trim_rcsum(oldskb, ntohs(ip_hdr(oldskb)->tot_len)))
126 return;
127
128 if (ip_hdr(oldskb)->protocol == IPPROTO_TCP ||
129 ip_hdr(oldskb)->protocol == IPPROTO_UDP)
130 proto = ip_hdr(oldskb)->protocol;
131 else
132 proto = 0;
133
134 if (!skb_csum_unnecessary(oldskb) &&
135 nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), proto))
136 return;
137
138 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) +
139 LL_MAX_HEADER + len, GFP_ATOMIC);
140 if (!nskb)
141 return;
142
143 skb_reserve(nskb, LL_MAX_HEADER);
144 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP,
145 net->ipv4.sysctl_ip_default_ttl);
146
147 skb_reset_transport_header(nskb);
148 icmph = skb_put_zero(nskb, sizeof(struct icmphdr));
149 icmph->type = ICMP_DEST_UNREACH;
150 icmph->code = code;
151
152 skb_put_data(nskb, skb_network_header(oldskb), len);
153
154 csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0);
155 icmph->checksum = csum_fold(csum);
156
157 niph->tot_len = htons(nskb->len);
158 ip_send_check(niph);
159
160 nft_reject_br_push_etherhdr(oldskb, nskb);
161
162 br_forward(br_port_get_rcu(dev), nskb, false, true);
163}
164
165static int nft_bridge_ip6hdr_validate(struct sk_buff *skb)
166{
167 struct ipv6hdr *hdr;
168 u32 pkt_len;
169
170 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
171 return 0;
172
173 hdr = ipv6_hdr(skb);
174 if (hdr->version != 6)
175 return 0;
176
177 pkt_len = ntohs(hdr->payload_len);
178 if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
179 return 0;
180
181 return 1;
182}
183
184static void nft_reject_br_send_v6_tcp_reset(struct net *net,
185 struct sk_buff *oldskb,
186 const struct net_device *dev,
187 int hook)
188{
189 struct sk_buff *nskb;
190 const struct tcphdr *oth;
191 struct tcphdr _oth;
192 unsigned int otcplen;
193 struct ipv6hdr *nip6h;
194
195 if (!nft_bridge_ip6hdr_validate(oldskb))
196 return;
197
198 oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook);
199 if (!oth)
200 return;
201
202 nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct tcphdr) +
203 LL_MAX_HEADER, GFP_ATOMIC);
204 if (!nskb)
205 return;
206
207 skb_reserve(nskb, LL_MAX_HEADER);
208 nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
209 net->ipv6.devconf_all->hop_limit);
210 nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen);
211 nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
212
213 nft_reject_br_push_etherhdr(oldskb, nskb);
214
215 br_forward(br_port_get_rcu(dev), nskb, false, true);
216}
217
218static bool reject6_br_csum_ok(struct sk_buff *skb, int hook)
219{
220 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
221 int thoff;
222 __be16 fo;
223 u8 proto = ip6h->nexthdr;
224
225 if (skb_csum_unnecessary(skb))
226 return true;
227
228 if (ip6h->payload_len &&
229 pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
230 return false;
231
232 ip6h = ipv6_hdr(skb);
233 thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
234 if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
235 return false;
236
237 return nf_ip6_checksum(skb, hook, thoff, proto) == 0;
238}
239
240static void nft_reject_br_send_v6_unreach(struct net *net,
241 struct sk_buff *oldskb,
242 const struct net_device *dev,
243 int hook, u8 code)
244{
245 struct sk_buff *nskb;
246 struct ipv6hdr *nip6h;
247 struct icmp6hdr *icmp6h;
248 unsigned int len;
249
250 if (!nft_bridge_ip6hdr_validate(oldskb))
251 return;
252
253
254
255
256 len = min_t(unsigned int, 1220, oldskb->len);
257
258 if (!pskb_may_pull(oldskb, len))
259 return;
260
261 if (!reject6_br_csum_ok(oldskb, hook))
262 return;
263
264 nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr) +
265 LL_MAX_HEADER + len, GFP_ATOMIC);
266 if (!nskb)
267 return;
268
269 skb_reserve(nskb, LL_MAX_HEADER);
270 nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6,
271 net->ipv6.devconf_all->hop_limit);
272
273 skb_reset_transport_header(nskb);
274 icmp6h = skb_put_zero(nskb, sizeof(struct icmp6hdr));
275 icmp6h->icmp6_type = ICMPV6_DEST_UNREACH;
276 icmp6h->icmp6_code = code;
277
278 skb_put_data(nskb, skb_network_header(oldskb), len);
279 nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
280
281 icmp6h->icmp6_cksum =
282 csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr,
283 nskb->len - sizeof(struct ipv6hdr),
284 IPPROTO_ICMPV6,
285 csum_partial(icmp6h,
286 nskb->len - sizeof(struct ipv6hdr),
287 0));
288
289 nft_reject_br_push_etherhdr(oldskb, nskb);
290
291 br_forward(br_port_get_rcu(dev), nskb, false, true);
292}
293
294static void nft_reject_bridge_eval(const struct nft_expr *expr,
295 struct nft_regs *regs,
296 const struct nft_pktinfo *pkt)
297{
298 struct nft_reject *priv = nft_expr_priv(expr);
299 const unsigned char *dest = eth_hdr(pkt->skb)->h_dest;
300
301 if (is_broadcast_ether_addr(dest) ||
302 is_multicast_ether_addr(dest))
303 goto out;
304
305 switch (eth_hdr(pkt->skb)->h_proto) {
306 case htons(ETH_P_IP):
307 switch (priv->type) {
308 case NFT_REJECT_ICMP_UNREACH:
309 nft_reject_br_send_v4_unreach(nft_net(pkt), pkt->skb,
310 nft_in(pkt),
311 nft_hook(pkt),
312 priv->icmp_code);
313 break;
314 case NFT_REJECT_TCP_RST:
315 nft_reject_br_send_v4_tcp_reset(nft_net(pkt), pkt->skb,
316 nft_in(pkt),
317 nft_hook(pkt));
318 break;
319 case NFT_REJECT_ICMPX_UNREACH:
320 nft_reject_br_send_v4_unreach(nft_net(pkt), pkt->skb,
321 nft_in(pkt),
322 nft_hook(pkt),
323 nft_reject_icmp_code(priv->icmp_code));
324 break;
325 }
326 break;
327 case htons(ETH_P_IPV6):
328 switch (priv->type) {
329 case NFT_REJECT_ICMP_UNREACH:
330 nft_reject_br_send_v6_unreach(nft_net(pkt), pkt->skb,
331 nft_in(pkt),
332 nft_hook(pkt),
333 priv->icmp_code);
334 break;
335 case NFT_REJECT_TCP_RST:
336 nft_reject_br_send_v6_tcp_reset(nft_net(pkt), pkt->skb,
337 nft_in(pkt),
338 nft_hook(pkt));
339 break;
340 case NFT_REJECT_ICMPX_UNREACH:
341 nft_reject_br_send_v6_unreach(nft_net(pkt), pkt->skb,
342 nft_in(pkt),
343 nft_hook(pkt),
344 nft_reject_icmpv6_code(priv->icmp_code));
345 break;
346 }
347 break;
348 default:
349
350 break;
351 }
352out:
353 regs->verdict.code = NF_DROP;
354}
355
356static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
357 const struct nft_expr *expr,
358 const struct nft_data **data)
359{
360 return nft_chain_validate_hooks(ctx->chain, (1 << NF_BR_PRE_ROUTING) |
361 (1 << NF_BR_LOCAL_IN));
362}
363
364static int nft_reject_bridge_init(const struct nft_ctx *ctx,
365 const struct nft_expr *expr,
366 const struct nlattr * const tb[])
367{
368 struct nft_reject *priv = nft_expr_priv(expr);
369 int icmp_code;
370
371 if (tb[NFTA_REJECT_TYPE] == NULL)
372 return -EINVAL;
373
374 priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
375 switch (priv->type) {
376 case NFT_REJECT_ICMP_UNREACH:
377 case NFT_REJECT_ICMPX_UNREACH:
378 if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
379 return -EINVAL;
380
381 icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
382 if (priv->type == NFT_REJECT_ICMPX_UNREACH &&
383 icmp_code > NFT_REJECT_ICMPX_MAX)
384 return -EINVAL;
385
386 priv->icmp_code = icmp_code;
387 break;
388 case NFT_REJECT_TCP_RST:
389 break;
390 default:
391 return -EINVAL;
392 }
393 return 0;
394}
395
396static int nft_reject_bridge_dump(struct sk_buff *skb,
397 const struct nft_expr *expr)
398{
399 const struct nft_reject *priv = nft_expr_priv(expr);
400
401 if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type)))
402 goto nla_put_failure;
403
404 switch (priv->type) {
405 case NFT_REJECT_ICMP_UNREACH:
406 case NFT_REJECT_ICMPX_UNREACH:
407 if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
408 goto nla_put_failure;
409 break;
410 default:
411 break;
412 }
413
414 return 0;
415
416nla_put_failure:
417 return -1;
418}
419
420static struct nft_expr_type nft_reject_bridge_type;
421static const struct nft_expr_ops nft_reject_bridge_ops = {
422 .type = &nft_reject_bridge_type,
423 .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
424 .eval = nft_reject_bridge_eval,
425 .init = nft_reject_bridge_init,
426 .dump = nft_reject_bridge_dump,
427 .validate = nft_reject_bridge_validate,
428};
429
430static struct nft_expr_type nft_reject_bridge_type __read_mostly = {
431 .family = NFPROTO_BRIDGE,
432 .name = "reject",
433 .ops = &nft_reject_bridge_ops,
434 .policy = nft_reject_policy,
435 .maxattr = NFTA_REJECT_MAX,
436 .owner = THIS_MODULE,
437};
438
439static int __init nft_reject_bridge_module_init(void)
440{
441 return nft_register_expr(&nft_reject_bridge_type);
442}
443
444static void __exit nft_reject_bridge_module_exit(void)
445{
446 nft_unregister_expr(&nft_reject_bridge_type);
447}
448
449module_init(nft_reject_bridge_module_init);
450module_exit(nft_reject_bridge_module_exit);
451
452MODULE_LICENSE("GPL");
453MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
454MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "reject");
455