1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109#define pr_fmt(fmt) "IPv4: " fmt
110
111#include <linux/module.h>
112#include <linux/types.h>
113#include <linux/kernel.h>
114#include <linux/string.h>
115#include <linux/errno.h>
116#include <linux/slab.h>
117
118#include <linux/net.h>
119#include <linux/socket.h>
120#include <linux/sockios.h>
121#include <linux/in.h>
122#include <linux/inet.h>
123#include <linux/inetdevice.h>
124#include <linux/netdevice.h>
125#include <linux/etherdevice.h>
126#include <linux/indirect_call_wrapper.h>
127
128#include <net/snmp.h>
129#include <net/ip.h>
130#include <net/protocol.h>
131#include <net/route.h>
132#include <linux/skbuff.h>
133#include <net/sock.h>
134#include <net/arp.h>
135#include <net/icmp.h>
136#include <net/raw.h>
137#include <net/checksum.h>
138#include <net/inet_ecn.h>
139#include <linux/netfilter_ipv4.h>
140#include <net/xfrm.h>
141#include <linux/mroute.h>
142#include <linux/netlink.h>
143#include <net/dst_metadata.h>
144
145
146
147
148bool ip_call_ra_chain(struct sk_buff *skb)
149{
150 struct ip_ra_chain *ra;
151 u8 protocol = ip_hdr(skb)->protocol;
152 struct sock *last = NULL;
153 struct net_device *dev = skb->dev;
154 struct net *net = dev_net(dev);
155
156 for (ra = rcu_dereference(net->ipv4.ra_chain); ra; ra = rcu_dereference(ra->next)) {
157 struct sock *sk = ra->sk;
158
159
160
161
162 if (sk && inet_sk(sk)->inet_num == protocol &&
163 (!sk->sk_bound_dev_if ||
164 sk->sk_bound_dev_if == dev->ifindex)) {
165 if (ip_is_fragment(ip_hdr(skb))) {
166 if (ip_defrag(net, skb, IP_DEFRAG_CALL_RA_CHAIN))
167 return true;
168 }
169 if (last) {
170 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
171 if (skb2)
172 raw_rcv(last, skb2);
173 }
174 last = sk;
175 }
176 }
177
178 if (last) {
179 raw_rcv(last, skb);
180 return true;
181 }
182 return false;
183}
184
185INDIRECT_CALLABLE_DECLARE(int udp_rcv(struct sk_buff *));
186INDIRECT_CALLABLE_DECLARE(int tcp_v4_rcv(struct sk_buff *));
187void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int protocol)
188{
189 const struct net_protocol *ipprot;
190 int raw, ret;
191
192resubmit:
193 raw = raw_local_deliver(skb, protocol);
194
195 ipprot = rcu_dereference(inet_protos[protocol]);
196 if (ipprot) {
197 if (!ipprot->no_policy) {
198 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
199 kfree_skb(skb);
200 return;
201 }
202 nf_reset_ct(skb);
203 }
204 ret = INDIRECT_CALL_2(ipprot->handler, tcp_v4_rcv, udp_rcv,
205 skb);
206 if (ret < 0) {
207 protocol = -ret;
208 goto resubmit;
209 }
210 __IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
211 } else {
212 if (!raw) {
213 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
214 __IP_INC_STATS(net, IPSTATS_MIB_INUNKNOWNPROTOS);
215 icmp_send(skb, ICMP_DEST_UNREACH,
216 ICMP_PROT_UNREACH, 0);
217 }
218 kfree_skb(skb);
219 } else {
220 __IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
221 consume_skb(skb);
222 }
223 }
224}
225
226static int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
227{
228 __skb_pull(skb, skb_network_header_len(skb));
229
230 rcu_read_lock();
231 ip_protocol_deliver_rcu(net, skb, ip_hdr(skb)->protocol);
232 rcu_read_unlock();
233
234 return 0;
235}
236
237
238
239
240int ip_local_deliver(struct sk_buff *skb)
241{
242
243
244
245 struct net *net = dev_net(skb->dev);
246
247 if (ip_is_fragment(ip_hdr(skb))) {
248 if (ip_defrag(net, skb, IP_DEFRAG_LOCAL_DELIVER))
249 return 0;
250 }
251
252 return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN,
253 net, NULL, skb, skb->dev, NULL,
254 ip_local_deliver_finish);
255}
256
257static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
258{
259 struct ip_options *opt;
260 const struct iphdr *iph;
261
262
263
264
265
266
267
268
269 if (skb_cow(skb, skb_headroom(skb))) {
270 __IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INDISCARDS);
271 goto drop;
272 }
273
274 iph = ip_hdr(skb);
275 opt = &(IPCB(skb)->opt);
276 opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
277
278 if (ip_options_compile(dev_net(dev), opt, skb)) {
279 __IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
280 goto drop;
281 }
282
283 if (unlikely(opt->srr)) {
284 struct in_device *in_dev = __in_dev_get_rcu(dev);
285
286 if (in_dev) {
287 if (!IN_DEV_SOURCE_ROUTE(in_dev)) {
288 if (IN_DEV_LOG_MARTIANS(in_dev))
289 net_info_ratelimited("source route option %pI4 -> %pI4\n",
290 &iph->saddr,
291 &iph->daddr);
292 goto drop;
293 }
294 }
295
296 if (ip_options_rcv_srr(skb, dev))
297 goto drop;
298 }
299
300 return false;
301drop:
302 return true;
303}
304
305static bool ip_can_use_hint(const struct sk_buff *skb, const struct iphdr *iph,
306 const struct sk_buff *hint)
307{
308 return hint && !skb_dst(skb) && ip_hdr(hint)->daddr == iph->daddr &&
309 ip_hdr(hint)->tos == iph->tos;
310}
311
312INDIRECT_CALLABLE_DECLARE(int udp_v4_early_demux(struct sk_buff *));
313INDIRECT_CALLABLE_DECLARE(int tcp_v4_early_demux(struct sk_buff *));
314static int ip_rcv_finish_core(struct net *net, struct sock *sk,
315 struct sk_buff *skb, struct net_device *dev,
316 const struct sk_buff *hint)
317{
318 const struct iphdr *iph = ip_hdr(skb);
319 int (*edemux)(struct sk_buff *skb);
320 struct rtable *rt;
321 int err;
322
323 if (ip_can_use_hint(skb, iph, hint)) {
324 err = ip_route_use_hint(skb, iph->daddr, iph->saddr, iph->tos,
325 dev, hint);
326 if (unlikely(err))
327 goto drop_error;
328 }
329
330 if (net->ipv4.sysctl_ip_early_demux &&
331 !skb_dst(skb) &&
332 !skb->sk &&
333 !ip_is_fragment(iph)) {
334 const struct net_protocol *ipprot;
335 int protocol = iph->protocol;
336
337 ipprot = rcu_dereference(inet_protos[protocol]);
338 if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) {
339 err = INDIRECT_CALL_2(edemux, tcp_v4_early_demux,
340 udp_v4_early_demux, skb);
341 if (unlikely(err))
342 goto drop_error;
343
344 iph = ip_hdr(skb);
345 }
346 }
347
348
349
350
351
352 if (!skb_valid_dst(skb)) {
353 err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
354 iph->tos, dev);
355 if (unlikely(err))
356 goto drop_error;
357 }
358
359#ifdef CONFIG_IP_ROUTE_CLASSID
360 if (unlikely(skb_dst(skb)->tclassid)) {
361 struct ip_rt_acct *st = this_cpu_ptr(ip_rt_acct);
362 u32 idx = skb_dst(skb)->tclassid;
363 st[idx&0xFF].o_packets++;
364 st[idx&0xFF].o_bytes += skb->len;
365 st[(idx>>16)&0xFF].i_packets++;
366 st[(idx>>16)&0xFF].i_bytes += skb->len;
367 }
368#endif
369
370 if (iph->ihl > 5 && ip_rcv_options(skb, dev))
371 goto drop;
372
373 rt = skb_rtable(skb);
374 if (rt->rt_type == RTN_MULTICAST) {
375 __IP_UPD_PO_STATS(net, IPSTATS_MIB_INMCAST, skb->len);
376 } else if (rt->rt_type == RTN_BROADCAST) {
377 __IP_UPD_PO_STATS(net, IPSTATS_MIB_INBCAST, skb->len);
378 } else if (skb->pkt_type == PACKET_BROADCAST ||
379 skb->pkt_type == PACKET_MULTICAST) {
380 struct in_device *in_dev = __in_dev_get_rcu(dev);
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397 if (in_dev &&
398 IN_DEV_ORCONF(in_dev, DROP_UNICAST_IN_L2_MULTICAST))
399 goto drop;
400 }
401
402 return NET_RX_SUCCESS;
403
404drop:
405 kfree_skb(skb);
406 return NET_RX_DROP;
407
408drop_error:
409 if (err == -EXDEV)
410 __NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
411 goto drop;
412}
413
414static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
415{
416 struct net_device *dev = skb->dev;
417 int ret;
418
419
420
421
422 skb = l3mdev_ip_rcv(skb);
423 if (!skb)
424 return NET_RX_SUCCESS;
425
426 ret = ip_rcv_finish_core(net, sk, skb, dev, NULL);
427 if (ret != NET_RX_DROP)
428 ret = dst_input(skb);
429 return ret;
430}
431
432
433
434
435static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net)
436{
437 const struct iphdr *iph;
438 u32 len;
439
440
441
442
443 if (skb->pkt_type == PACKET_OTHERHOST)
444 goto drop;
445
446 __IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len);
447
448 skb = skb_share_check(skb, GFP_ATOMIC);
449 if (!skb) {
450 __IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
451 goto out;
452 }
453
454 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
455 goto inhdr_error;
456
457 iph = ip_hdr(skb);
458
459
460
461
462
463
464
465
466
467
468
469
470 if (iph->ihl < 5 || iph->version != 4)
471 goto inhdr_error;
472
473 BUILD_BUG_ON(IPSTATS_MIB_ECT1PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_1);
474 BUILD_BUG_ON(IPSTATS_MIB_ECT0PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_0);
475 BUILD_BUG_ON(IPSTATS_MIB_CEPKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_CE);
476 __IP_ADD_STATS(net,
477 IPSTATS_MIB_NOECTPKTS + (iph->tos & INET_ECN_MASK),
478 max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
479
480 if (!pskb_may_pull(skb, iph->ihl*4))
481 goto inhdr_error;
482
483 iph = ip_hdr(skb);
484
485 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
486 goto csum_error;
487
488 len = ntohs(iph->tot_len);
489 if (skb->len < len) {
490 __IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS);
491 goto drop;
492 } else if (len < (iph->ihl*4))
493 goto inhdr_error;
494
495
496
497
498
499 if (pskb_trim_rcsum(skb, len)) {
500 __IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
501 goto drop;
502 }
503
504 iph = ip_hdr(skb);
505 skb->transport_header = skb->network_header + iph->ihl*4;
506
507
508 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
509 IPCB(skb)->iif = skb->skb_iif;
510
511
512 if (!skb_sk_is_prefetched(skb))
513 skb_orphan(skb);
514
515 return skb;
516
517csum_error:
518 __IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS);
519inhdr_error:
520 __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
521drop:
522 kfree_skb(skb);
523out:
524 return NULL;
525}
526
527
528
529
530int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
531 struct net_device *orig_dev)
532{
533 struct net *net = dev_net(dev);
534
535 skb = ip_rcv_core(skb, net);
536 if (skb == NULL)
537 return NET_RX_DROP;
538
539 return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
540 net, NULL, skb, dev, NULL,
541 ip_rcv_finish);
542}
543
544static void ip_sublist_rcv_finish(struct list_head *head)
545{
546 struct sk_buff *skb, *next;
547
548 list_for_each_entry_safe(skb, next, head, list) {
549 skb_list_del_init(skb);
550 dst_input(skb);
551 }
552}
553
554static struct sk_buff *ip_extract_route_hint(const struct net *net,
555 struct sk_buff *skb, int rt_type)
556{
557 if (fib4_has_custom_rules(net) || rt_type == RTN_BROADCAST)
558 return NULL;
559
560 return skb;
561}
562
563static void ip_list_rcv_finish(struct net *net, struct sock *sk,
564 struct list_head *head)
565{
566 struct sk_buff *skb, *next, *hint = NULL;
567 struct dst_entry *curr_dst = NULL;
568 struct list_head sublist;
569
570 INIT_LIST_HEAD(&sublist);
571 list_for_each_entry_safe(skb, next, head, list) {
572 struct net_device *dev = skb->dev;
573 struct dst_entry *dst;
574
575 skb_list_del_init(skb);
576
577
578
579 skb = l3mdev_ip_rcv(skb);
580 if (!skb)
581 continue;
582 if (ip_rcv_finish_core(net, sk, skb, dev, hint) == NET_RX_DROP)
583 continue;
584
585 dst = skb_dst(skb);
586 if (curr_dst != dst) {
587 hint = ip_extract_route_hint(net, skb,
588 ((struct rtable *)dst)->rt_type);
589
590
591 if (!list_empty(&sublist))
592 ip_sublist_rcv_finish(&sublist);
593
594 INIT_LIST_HEAD(&sublist);
595 curr_dst = dst;
596 }
597 list_add_tail(&skb->list, &sublist);
598 }
599
600 ip_sublist_rcv_finish(&sublist);
601}
602
603static void ip_sublist_rcv(struct list_head *head, struct net_device *dev,
604 struct net *net)
605{
606 NF_HOOK_LIST(NFPROTO_IPV4, NF_INET_PRE_ROUTING, net, NULL,
607 head, dev, NULL, ip_rcv_finish);
608 ip_list_rcv_finish(net, NULL, head);
609}
610
611
612void ip_list_rcv(struct list_head *head, struct packet_type *pt,
613 struct net_device *orig_dev)
614{
615 struct net_device *curr_dev = NULL;
616 struct net *curr_net = NULL;
617 struct sk_buff *skb, *next;
618 struct list_head sublist;
619
620 INIT_LIST_HEAD(&sublist);
621 list_for_each_entry_safe(skb, next, head, list) {
622 struct net_device *dev = skb->dev;
623 struct net *net = dev_net(dev);
624
625 skb_list_del_init(skb);
626 skb = ip_rcv_core(skb, net);
627 if (skb == NULL)
628 continue;
629
630 if (curr_dev != dev || curr_net != net) {
631
632 if (!list_empty(&sublist))
633 ip_sublist_rcv(&sublist, curr_dev, curr_net);
634
635 INIT_LIST_HEAD(&sublist);
636 curr_dev = dev;
637 curr_net = net;
638 }
639 list_add_tail(&skb->list, &sublist);
640 }
641
642 if (!list_empty(&sublist))
643 ip_sublist_rcv(&sublist, curr_dev, curr_net);
644}
645