1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65#include <linux/module.h>
66#include <linux/types.h>
67#include <linux/jiffies.h>
68#include <linux/kernel.h>
69#include <linux/fcntl.h>
70#include <linux/socket.h>
71#include <linux/in.h>
72#include <linux/inet.h>
73#include <linux/inetdevice.h>
74#include <linux/netdevice.h>
75#include <linux/string.h>
76#include <linux/netfilter_ipv4.h>
77#include <linux/slab.h>
78#include <net/snmp.h>
79#include <net/ip.h>
80#include <net/route.h>
81#include <net/protocol.h>
82#include <net/icmp.h>
83#include <net/tcp.h>
84#include <net/udp.h>
85#include <net/raw.h>
86#include <linux/skbuff.h>
87#include <net/sock.h>
88#include <linux/errno.h>
89#include <linux/timer.h>
90#include <linux/init.h>
91#include <asm/system.h>
92#include <asm/uaccess.h>
93#include <net/checksum.h>
94#include <net/xfrm.h>
95#include <net/inet_common.h>
96
97
98
99
100
101struct icmp_bxm {
102 struct sk_buff *skb;
103 int offset;
104 int data_len;
105
106 struct {
107 struct icmphdr icmph;
108 __be32 times[3];
109 } data;
110 int head_len;
111 struct ip_options replyopts;
112 unsigned char optbuf[40];
113};
114
115
116
117
118const struct icmp_err icmp_err_convert[] = {
119 {
120 .errno = ENETUNREACH,
121 .fatal = 0,
122 },
123 {
124 .errno = EHOSTUNREACH,
125 .fatal = 0,
126 },
127 {
128 .errno = ENOPROTOOPT ,
129 .fatal = 1,
130 },
131 {
132 .errno = ECONNREFUSED,
133 .fatal = 1,
134 },
135 {
136 .errno = EMSGSIZE,
137 .fatal = 0,
138 },
139 {
140 .errno = EOPNOTSUPP,
141 .fatal = 0,
142 },
143 {
144 .errno = ENETUNREACH,
145 .fatal = 1,
146 },
147 {
148 .errno = EHOSTDOWN,
149 .fatal = 1,
150 },
151 {
152 .errno = ENONET,
153 .fatal = 1,
154 },
155 {
156 .errno = ENETUNREACH,
157 .fatal = 1,
158 },
159 {
160 .errno = EHOSTUNREACH,
161 .fatal = 1,
162 },
163 {
164 .errno = ENETUNREACH,
165 .fatal = 0,
166 },
167 {
168 .errno = EHOSTUNREACH,
169 .fatal = 0,
170 },
171 {
172 .errno = EHOSTUNREACH,
173 .fatal = 1,
174 },
175 {
176 .errno = EHOSTUNREACH,
177 .fatal = 1,
178 },
179 {
180 .errno = EHOSTUNREACH,
181 .fatal = 1,
182 },
183};
184EXPORT_SYMBOL(icmp_err_convert);
185
186
187
188
189
190struct icmp_control {
191 void (*handler)(struct sk_buff *skb);
192 short error;
193};
194
195static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1];
196
197
198
199
200
201
202
203
204static struct sock *icmp_sk(struct net *net)
205{
206 return net->ipv4.icmp_sk[smp_processor_id()];
207}
208
209static inline struct sock *icmp_xmit_lock(struct net *net)
210{
211 struct sock *sk;
212
213 local_bh_disable();
214
215 sk = icmp_sk(net);
216
217 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
218
219
220
221 local_bh_enable();
222 return NULL;
223 }
224 return sk;
225}
226
227static inline void icmp_xmit_unlock(struct sock *sk)
228{
229 spin_unlock_bh(&sk->sk_lock.slock);
230}
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253#define XRLIM_BURST_FACTOR 6
254int xrlim_allow(struct dst_entry *dst, int timeout)
255{
256 unsigned long now, token = dst->rate_tokens;
257 int rc = 0;
258
259 now = jiffies;
260 token += now - dst->rate_last;
261 dst->rate_last = now;
262 if (token > XRLIM_BURST_FACTOR * timeout)
263 token = XRLIM_BURST_FACTOR * timeout;
264 if (token >= timeout) {
265 token -= timeout;
266 rc = 1;
267 }
268 dst->rate_tokens = token;
269 return rc;
270}
271EXPORT_SYMBOL(xrlim_allow);
272
273static inline int icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
274 int type, int code)
275{
276 struct dst_entry *dst = &rt->dst;
277 int rc = 1;
278
279 if (type > NR_ICMP_TYPES)
280 goto out;
281
282
283 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
284 goto out;
285
286
287 if (dst->dev && (dst->dev->flags&IFF_LOOPBACK))
288 goto out;
289
290
291 if ((1 << type) & net->ipv4.sysctl_icmp_ratemask)
292 rc = xrlim_allow(dst, net->ipv4.sysctl_icmp_ratelimit);
293out:
294 return rc;
295}
296
297
298
299
300void icmp_out_count(struct net *net, unsigned char type)
301{
302 ICMPMSGOUT_INC_STATS(net, type);
303 ICMP_INC_STATS(net, ICMP_MIB_OUTMSGS);
304}
305
306
307
308
309
310static int icmp_glue_bits(void *from, char *to, int offset, int len, int odd,
311 struct sk_buff *skb)
312{
313 struct icmp_bxm *icmp_param = (struct icmp_bxm *)from;
314 __wsum csum;
315
316 csum = skb_copy_and_csum_bits(icmp_param->skb,
317 icmp_param->offset + offset,
318 to, len, 0);
319
320 skb->csum = csum_block_add(skb->csum, csum, odd);
321 if (icmp_pointers[icmp_param->data.icmph.type].error)
322 nf_ct_attach(skb, icmp_param->skb);
323 return 0;
324}
325
326static void icmp_push_reply(struct icmp_bxm *icmp_param,
327 struct ipcm_cookie *ipc, struct rtable **rt)
328{
329 struct sock *sk;
330 struct sk_buff *skb;
331
332 sk = icmp_sk(dev_net((*rt)->dst.dev));
333 if (ip_append_data(sk, icmp_glue_bits, icmp_param,
334 icmp_param->data_len+icmp_param->head_len,
335 icmp_param->head_len,
336 ipc, rt, MSG_DONTWAIT) < 0) {
337 ICMP_INC_STATS_BH(sock_net(sk), ICMP_MIB_OUTERRORS);
338 ip_flush_pending_frames(sk);
339 } else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
340 struct icmphdr *icmph = icmp_hdr(skb);
341 __wsum csum = 0;
342 struct sk_buff *skb1;
343
344 skb_queue_walk(&sk->sk_write_queue, skb1) {
345 csum = csum_add(csum, skb1->csum);
346 }
347 csum = csum_partial_copy_nocheck((void *)&icmp_param->data,
348 (char *)icmph,
349 icmp_param->head_len, csum);
350 icmph->checksum = csum_fold(csum);
351 skb->ip_summed = CHECKSUM_NONE;
352 ip_push_pending_frames(sk);
353 }
354}
355
356
357
358
359
360static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
361{
362 struct ipcm_cookie ipc;
363 struct rtable *rt = skb_rtable(skb);
364 struct net *net = dev_net(rt->dst.dev);
365 struct sock *sk;
366 struct inet_sock *inet;
367 __be32 daddr;
368
369 if (ip_options_echo(&icmp_param->replyopts, skb))
370 return;
371
372 sk = icmp_xmit_lock(net);
373 if (sk == NULL)
374 return;
375 inet = inet_sk(sk);
376
377 icmp_param->data.icmph.checksum = 0;
378
379 inet->tos = ip_hdr(skb)->tos;
380 daddr = ipc.addr = rt->rt_src;
381 ipc.opt = NULL;
382 ipc.tx_flags = 0;
383 if (icmp_param->replyopts.optlen) {
384 ipc.opt = &icmp_param->replyopts;
385 if (ipc.opt->srr)
386 daddr = icmp_param->replyopts.faddr;
387 }
388 {
389 struct flowi fl = { .fl4_dst= daddr,
390 .fl4_src = rt->rt_spec_dst,
391 .fl4_tos = RT_TOS(ip_hdr(skb)->tos),
392 .proto = IPPROTO_ICMP };
393 security_skb_classify_flow(skb, &fl);
394 if (ip_route_output_key(net, &rt, &fl))
395 goto out_unlock;
396 }
397 if (icmpv4_xrlim_allow(net, rt, icmp_param->data.icmph.type,
398 icmp_param->data.icmph.code))
399 icmp_push_reply(icmp_param, &ipc, &rt);
400 ip_rt_put(rt);
401out_unlock:
402 icmp_xmit_unlock(sk);
403}
404
405
406
407
408
409
410
411
412
413
414
415
416
417void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
418{
419 struct iphdr *iph;
420 int room;
421 struct icmp_bxm icmp_param;
422 struct rtable *rt = skb_rtable(skb_in);
423 struct ipcm_cookie ipc;
424 __be32 saddr;
425 u8 tos;
426 struct net *net;
427 struct sock *sk;
428
429 if (!rt)
430 goto out;
431 net = dev_net(rt->dst.dev);
432
433
434
435
436
437
438 iph = ip_hdr(skb_in);
439
440 if ((u8 *)iph < skb_in->head ||
441 (skb_in->network_header + sizeof(*iph)) > skb_in->tail)
442 goto out;
443
444
445
446
447 if (skb_in->pkt_type != PACKET_HOST)
448 goto out;
449
450
451
452
453 if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
454 goto out;
455
456
457
458
459
460 if (iph->frag_off & htons(IP_OFFSET))
461 goto out;
462
463
464
465
466 if (icmp_pointers[type].error) {
467
468
469
470
471 if (iph->protocol == IPPROTO_ICMP) {
472 u8 _inner_type, *itp;
473
474 itp = skb_header_pointer(skb_in,
475 skb_network_header(skb_in) +
476 (iph->ihl << 2) +
477 offsetof(struct icmphdr,
478 type) -
479 skb_in->data,
480 sizeof(_inner_type),
481 &_inner_type);
482 if (itp == NULL)
483 goto out;
484
485
486
487
488
489 if (*itp > NR_ICMP_TYPES ||
490 icmp_pointers[*itp].error)
491 goto out;
492 }
493 }
494
495 sk = icmp_xmit_lock(net);
496 if (sk == NULL)
497 return;
498
499
500
501
502
503 saddr = iph->daddr;
504 if (!(rt->rt_flags & RTCF_LOCAL)) {
505 struct net_device *dev = NULL;
506
507 rcu_read_lock();
508 if (rt_is_input_route(rt) &&
509 net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)
510 dev = dev_get_by_index_rcu(net, rt->fl.iif);
511
512 if (dev)
513 saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK);
514 else
515 saddr = 0;
516 rcu_read_unlock();
517 }
518
519 tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) |
520 IPTOS_PREC_INTERNETCONTROL) :
521 iph->tos;
522
523 if (ip_options_echo(&icmp_param.replyopts, skb_in))
524 goto out_unlock;
525
526
527
528
529
530
531 icmp_param.data.icmph.type = type;
532 icmp_param.data.icmph.code = code;
533 icmp_param.data.icmph.un.gateway = info;
534 icmp_param.data.icmph.checksum = 0;
535 icmp_param.skb = skb_in;
536 icmp_param.offset = skb_network_offset(skb_in);
537 inet_sk(sk)->tos = tos;
538 ipc.addr = iph->saddr;
539 ipc.opt = &icmp_param.replyopts;
540 ipc.tx_flags = 0;
541
542 {
543 struct flowi fl = {
544 .fl4_dst = icmp_param.replyopts.srr ?
545 icmp_param.replyopts.faddr : iph->saddr,
546 .fl4_src = saddr,
547 .fl4_tos = RT_TOS(tos),
548 .proto = IPPROTO_ICMP,
549 .fl_icmp_type = type,
550 .fl_icmp_code = code,
551 };
552 int err;
553 struct rtable *rt2;
554
555 security_skb_classify_flow(skb_in, &fl);
556 if (__ip_route_output_key(net, &rt, &fl))
557 goto out_unlock;
558
559
560 rt2 = rt;
561
562 if (!fl.nl_u.ip4_u.saddr)
563 fl.nl_u.ip4_u.saddr = rt->rt_src;
564
565 err = xfrm_lookup(net, (struct dst_entry **)&rt, &fl, NULL, 0);
566 switch (err) {
567 case 0:
568 if (rt != rt2)
569 goto route_done;
570 break;
571 case -EPERM:
572 rt = NULL;
573 break;
574 default:
575 goto out_unlock;
576 }
577
578 if (xfrm_decode_session_reverse(skb_in, &fl, AF_INET))
579 goto relookup_failed;
580
581 if (inet_addr_type(net, fl.fl4_src) == RTN_LOCAL)
582 err = __ip_route_output_key(net, &rt2, &fl);
583 else {
584 struct flowi fl2 = {};
585 unsigned long orefdst;
586
587 fl2.fl4_dst = fl.fl4_src;
588 if (ip_route_output_key(net, &rt2, &fl2))
589 goto relookup_failed;
590
591
592 orefdst = skb_in->_skb_refdst;
593 err = ip_route_input(skb_in, fl.fl4_dst, fl.fl4_src,
594 RT_TOS(tos), rt2->dst.dev);
595
596 dst_release(&rt2->dst);
597 rt2 = skb_rtable(skb_in);
598 skb_in->_skb_refdst = orefdst;
599 }
600
601 if (err)
602 goto relookup_failed;
603
604 err = xfrm_lookup(net, (struct dst_entry **)&rt2, &fl, NULL,
605 XFRM_LOOKUP_ICMP);
606 switch (err) {
607 case 0:
608 dst_release(&rt->dst);
609 rt = rt2;
610 break;
611 case -EPERM:
612 goto ende;
613 default:
614relookup_failed:
615 if (!rt)
616 goto out_unlock;
617 break;
618 }
619 }
620
621route_done:
622 if (!icmpv4_xrlim_allow(net, rt, type, code))
623 goto ende;
624
625
626
627 room = dst_mtu(&rt->dst);
628 if (room > 576)
629 room = 576;
630 room -= sizeof(struct iphdr) + icmp_param.replyopts.optlen;
631 room -= sizeof(struct icmphdr);
632
633 icmp_param.data_len = skb_in->len - icmp_param.offset;
634 if (icmp_param.data_len > room)
635 icmp_param.data_len = room;
636 icmp_param.head_len = sizeof(struct icmphdr);
637
638 icmp_push_reply(&icmp_param, &ipc, &rt);
639ende:
640 ip_rt_put(rt);
641out_unlock:
642 icmp_xmit_unlock(sk);
643out:;
644}
645EXPORT_SYMBOL(icmp_send);
646
647
648
649
650
651
652static void icmp_unreach(struct sk_buff *skb)
653{
654 struct iphdr *iph;
655 struct icmphdr *icmph;
656 int hash, protocol;
657 const struct net_protocol *ipprot;
658 u32 info = 0;
659 struct net *net;
660
661 net = dev_net(skb_dst(skb)->dev);
662
663
664
665
666
667
668
669 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
670 goto out_err;
671
672 icmph = icmp_hdr(skb);
673 iph = (struct iphdr *)skb->data;
674
675 if (iph->ihl < 5)
676 goto out_err;
677
678 if (icmph->type == ICMP_DEST_UNREACH) {
679 switch (icmph->code & 15) {
680 case ICMP_NET_UNREACH:
681 case ICMP_HOST_UNREACH:
682 case ICMP_PROT_UNREACH:
683 case ICMP_PORT_UNREACH:
684 break;
685 case ICMP_FRAG_NEEDED:
686 if (ipv4_config.no_pmtu_disc) {
687 LIMIT_NETDEBUG(KERN_INFO "ICMP: %pI4: fragmentation needed and DF set.\n",
688 &iph->daddr);
689 } else {
690 info = ip_rt_frag_needed(net, iph,
691 ntohs(icmph->un.frag.mtu),
692 skb->dev);
693 if (!info)
694 goto out;
695 }
696 break;
697 case ICMP_SR_FAILED:
698 LIMIT_NETDEBUG(KERN_INFO "ICMP: %pI4: Source Route Failed.\n",
699 &iph->daddr);
700 break;
701 default:
702 break;
703 }
704 if (icmph->code > NR_ICMP_UNREACH)
705 goto out;
706 } else if (icmph->type == ICMP_PARAMETERPROB)
707 info = ntohl(icmph->un.gateway) >> 24;
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727 if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses &&
728 inet_addr_type(net, iph->daddr) == RTN_BROADCAST) {
729 if (net_ratelimit())
730 printk(KERN_WARNING "%pI4 sent an invalid ICMP "
731 "type %u, code %u "
732 "error to a broadcast: %pI4 on %s\n",
733 &ip_hdr(skb)->saddr,
734 icmph->type, icmph->code,
735 &iph->daddr,
736 skb->dev->name);
737 goto out;
738 }
739
740
741
742
743 if (!pskb_may_pull(skb, iph->ihl * 4 + 8))
744 goto out;
745
746 iph = (struct iphdr *)skb->data;
747 protocol = iph->protocol;
748
749
750
751
752 raw_icmp_error(skb, protocol, info);
753
754 hash = protocol & (MAX_INET_PROTOS - 1);
755 rcu_read_lock();
756 ipprot = rcu_dereference(inet_protos[hash]);
757 if (ipprot && ipprot->err_handler)
758 ipprot->err_handler(skb, info);
759 rcu_read_unlock();
760
761out:
762 return;
763out_err:
764 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
765 goto out;
766}
767
768
769
770
771
772
773static void icmp_redirect(struct sk_buff *skb)
774{
775 struct iphdr *iph;
776
777 if (skb->len < sizeof(struct iphdr))
778 goto out_err;
779
780
781
782
783 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
784 goto out;
785
786 iph = (struct iphdr *)skb->data;
787
788 switch (icmp_hdr(skb)->code & 7) {
789 case ICMP_REDIR_NET:
790 case ICMP_REDIR_NETTOS:
791
792
793
794 case ICMP_REDIR_HOST:
795 case ICMP_REDIR_HOSTTOS:
796 ip_rt_redirect(ip_hdr(skb)->saddr, iph->daddr,
797 icmp_hdr(skb)->un.gateway,
798 iph->saddr, skb->dev);
799 break;
800 }
801out:
802 return;
803out_err:
804 ICMP_INC_STATS_BH(dev_net(skb->dev), ICMP_MIB_INERRORS);
805 goto out;
806}
807
808
809
810
811
812
813
814
815
816
817
818
819
820static void icmp_echo(struct sk_buff *skb)
821{
822 struct net *net;
823
824 net = dev_net(skb_dst(skb)->dev);
825 if (!net->ipv4.sysctl_icmp_echo_ignore_all) {
826 struct icmp_bxm icmp_param;
827
828 icmp_param.data.icmph = *icmp_hdr(skb);
829 icmp_param.data.icmph.type = ICMP_ECHOREPLY;
830 icmp_param.skb = skb;
831 icmp_param.offset = 0;
832 icmp_param.data_len = skb->len;
833 icmp_param.head_len = sizeof(struct icmphdr);
834 icmp_reply(&icmp_param, skb);
835 }
836}
837
838
839
840
841
842
843
844
845static void icmp_timestamp(struct sk_buff *skb)
846{
847 struct timespec tv;
848 struct icmp_bxm icmp_param;
849
850
851
852 if (skb->len < 4)
853 goto out_err;
854
855
856
857
858 getnstimeofday(&tv);
859 icmp_param.data.times[1] = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC +
860 tv.tv_nsec / NSEC_PER_MSEC);
861 icmp_param.data.times[2] = icmp_param.data.times[1];
862 if (skb_copy_bits(skb, 0, &icmp_param.data.times[0], 4))
863 BUG();
864 icmp_param.data.icmph = *icmp_hdr(skb);
865 icmp_param.data.icmph.type = ICMP_TIMESTAMPREPLY;
866 icmp_param.data.icmph.code = 0;
867 icmp_param.skb = skb;
868 icmp_param.offset = 0;
869 icmp_param.data_len = 0;
870 icmp_param.head_len = sizeof(struct icmphdr) + 12;
871 icmp_reply(&icmp_param, skb);
872out:
873 return;
874out_err:
875 ICMP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS);
876 goto out;
877}
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913static void icmp_address(struct sk_buff *skb)
914{
915#if 0
916 if (net_ratelimit())
917 printk(KERN_DEBUG "a guy asks for address mask. Who is it?\n");
918#endif
919}
920
921
922
923
924
925
926
927static void icmp_address_reply(struct sk_buff *skb)
928{
929 struct rtable *rt = skb_rtable(skb);
930 struct net_device *dev = skb->dev;
931 struct in_device *in_dev;
932 struct in_ifaddr *ifa;
933
934 if (skb->len < 4 || !(rt->rt_flags&RTCF_DIRECTSRC))
935 return;
936
937 in_dev = __in_dev_get_rcu(dev);
938 if (!in_dev)
939 return;
940
941 if (in_dev->ifa_list &&
942 IN_DEV_LOG_MARTIANS(in_dev) &&
943 IN_DEV_FORWARD(in_dev)) {
944 __be32 _mask, *mp;
945
946 mp = skb_header_pointer(skb, 0, sizeof(_mask), &_mask);
947 BUG_ON(mp == NULL);
948 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
949 if (*mp == ifa->ifa_mask &&
950 inet_ifa_match(rt->rt_src, ifa))
951 break;
952 }
953 if (!ifa && net_ratelimit()) {
954 printk(KERN_INFO "Wrong address mask %pI4 from %s/%pI4\n",
955 mp, dev->name, &rt->rt_src);
956 }
957 }
958}
959
960static void icmp_discard(struct sk_buff *skb)
961{
962}
963
964
965
966
967int icmp_rcv(struct sk_buff *skb)
968{
969 struct icmphdr *icmph;
970 struct rtable *rt = skb_rtable(skb);
971 struct net *net = dev_net(rt->dst.dev);
972
973 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
974 struct sec_path *sp = skb_sec_path(skb);
975 int nh;
976
977 if (!(sp && sp->xvec[sp->len - 1]->props.flags &
978 XFRM_STATE_ICMP))
979 goto drop;
980
981 if (!pskb_may_pull(skb, sizeof(*icmph) + sizeof(struct iphdr)))
982 goto drop;
983
984 nh = skb_network_offset(skb);
985 skb_set_network_header(skb, sizeof(*icmph));
986
987 if (!xfrm4_policy_check_reverse(NULL, XFRM_POLICY_IN, skb))
988 goto drop;
989
990 skb_set_network_header(skb, nh);
991 }
992
993 ICMP_INC_STATS_BH(net, ICMP_MIB_INMSGS);
994
995 switch (skb->ip_summed) {
996 case CHECKSUM_COMPLETE:
997 if (!csum_fold(skb->csum))
998 break;
999
1000 case CHECKSUM_NONE:
1001 skb->csum = 0;
1002 if (__skb_checksum_complete(skb))
1003 goto error;
1004 }
1005
1006 if (!pskb_pull(skb, sizeof(*icmph)))
1007 goto error;
1008
1009 icmph = icmp_hdr(skb);
1010
1011 ICMPMSGIN_INC_STATS_BH(net, icmph->type);
1012
1013
1014
1015
1016
1017
1018 if (icmph->type > NR_ICMP_TYPES)
1019 goto error;
1020
1021
1022
1023
1024
1025
1026 if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
1027
1028
1029
1030
1031
1032
1033 if ((icmph->type == ICMP_ECHO ||
1034 icmph->type == ICMP_TIMESTAMP) &&
1035 net->ipv4.sysctl_icmp_echo_ignore_broadcasts) {
1036 goto error;
1037 }
1038 if (icmph->type != ICMP_ECHO &&
1039 icmph->type != ICMP_TIMESTAMP &&
1040 icmph->type != ICMP_ADDRESS &&
1041 icmph->type != ICMP_ADDRESSREPLY) {
1042 goto error;
1043 }
1044 }
1045
1046 icmp_pointers[icmph->type].handler(skb);
1047
1048drop:
1049 kfree_skb(skb);
1050 return 0;
1051error:
1052 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
1053 goto drop;
1054}
1055
1056
1057
1058
1059static const struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = {
1060 [ICMP_ECHOREPLY] = {
1061 .handler = icmp_discard,
1062 },
1063 [1] = {
1064 .handler = icmp_discard,
1065 .error = 1,
1066 },
1067 [2] = {
1068 .handler = icmp_discard,
1069 .error = 1,
1070 },
1071 [ICMP_DEST_UNREACH] = {
1072 .handler = icmp_unreach,
1073 .error = 1,
1074 },
1075 [ICMP_SOURCE_QUENCH] = {
1076 .handler = icmp_unreach,
1077 .error = 1,
1078 },
1079 [ICMP_REDIRECT] = {
1080 .handler = icmp_redirect,
1081 .error = 1,
1082 },
1083 [6] = {
1084 .handler = icmp_discard,
1085 .error = 1,
1086 },
1087 [7] = {
1088 .handler = icmp_discard,
1089 .error = 1,
1090 },
1091 [ICMP_ECHO] = {
1092 .handler = icmp_echo,
1093 },
1094 [9] = {
1095 .handler = icmp_discard,
1096 .error = 1,
1097 },
1098 [10] = {
1099 .handler = icmp_discard,
1100 .error = 1,
1101 },
1102 [ICMP_TIME_EXCEEDED] = {
1103 .handler = icmp_unreach,
1104 .error = 1,
1105 },
1106 [ICMP_PARAMETERPROB] = {
1107 .handler = icmp_unreach,
1108 .error = 1,
1109 },
1110 [ICMP_TIMESTAMP] = {
1111 .handler = icmp_timestamp,
1112 },
1113 [ICMP_TIMESTAMPREPLY] = {
1114 .handler = icmp_discard,
1115 },
1116 [ICMP_INFO_REQUEST] = {
1117 .handler = icmp_discard,
1118 },
1119 [ICMP_INFO_REPLY] = {
1120 .handler = icmp_discard,
1121 },
1122 [ICMP_ADDRESS] = {
1123 .handler = icmp_address,
1124 },
1125 [ICMP_ADDRESSREPLY] = {
1126 .handler = icmp_address_reply,
1127 },
1128};
1129
1130static void __net_exit icmp_sk_exit(struct net *net)
1131{
1132 int i;
1133
1134 for_each_possible_cpu(i)
1135 inet_ctl_sock_destroy(net->ipv4.icmp_sk[i]);
1136 kfree(net->ipv4.icmp_sk);
1137 net->ipv4.icmp_sk = NULL;
1138}
1139
1140static int __net_init icmp_sk_init(struct net *net)
1141{
1142 int i, err;
1143
1144 net->ipv4.icmp_sk =
1145 kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
1146 if (net->ipv4.icmp_sk == NULL)
1147 return -ENOMEM;
1148
1149 for_each_possible_cpu(i) {
1150 struct sock *sk;
1151
1152 err = inet_ctl_sock_create(&sk, PF_INET,
1153 SOCK_RAW, IPPROTO_ICMP, net);
1154 if (err < 0)
1155 goto fail;
1156
1157 net->ipv4.icmp_sk[i] = sk;
1158
1159
1160
1161
1162 sk->sk_sndbuf =
1163 (2 * ((64 * 1024) + sizeof(struct sk_buff)));
1164
1165
1166
1167
1168 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1169 inet_sk(sk)->pmtudisc = IP_PMTUDISC_DONT;
1170 }
1171
1172
1173 net->ipv4.sysctl_icmp_echo_ignore_all = 0;
1174 net->ipv4.sysctl_icmp_echo_ignore_broadcasts = 1;
1175
1176
1177 net->ipv4.sysctl_icmp_ignore_bogus_error_responses = 1;
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191 net->ipv4.sysctl_icmp_ratelimit = 1 * HZ;
1192 net->ipv4.sysctl_icmp_ratemask = 0x1818;
1193 net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr = 0;
1194
1195 return 0;
1196
1197fail:
1198 for_each_possible_cpu(i)
1199 inet_ctl_sock_destroy(net->ipv4.icmp_sk[i]);
1200 kfree(net->ipv4.icmp_sk);
1201 return err;
1202}
1203
1204static struct pernet_operations __net_initdata icmp_sk_ops = {
1205 .init = icmp_sk_init,
1206 .exit = icmp_sk_exit,
1207};
1208
1209int __init icmp_init(void)
1210{
1211 return register_pernet_subsys(&icmp_sk_ops);
1212}
1213